id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
700 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from proton import Message
from proton.handlers import MessagingHandler
from proton.reactor import Container
from system_test import TestCase, Qdrouterd, main_module
from system_test import unittest
from skupper_router_internal.policy.policy_util import is_ipv6_enabled
class SocketAddressFamilyTest(TestCase):
@classmethod
def setUpClass(cls):
"""
Starts three routers with various listeners and connectors.
There is a call to wait_router_connected to make sure that the routers are able to communicate with each
other on ports using the assigned protocol family.
"""
super(SocketAddressFamilyTest, cls).setUpClass()
def METHOD_NAME(name, connection):
config = [
('router', {'mode': 'interior', 'id': 'QDR.%s' % name}),
# This will test if the router defaults host to 127.0.0.1 and if the router auto-detects protocol family
('listener', {'port': cls.tester.get_port()}),
# Specify host as 127.0.0.1 and protocol family as IPv4
('listener', {'host': '127.0.0.1', 'socketAddressFamily': 'IPv4', 'port': cls.tester.get_port()}),
# Specify protocol family as IPv4 but don't specify any host
# This will test if the router defaults the host field to 127.0.0.1
('listener', {'socketAddressFamily': 'IPv4', 'port': cls.tester.get_port()}),
# Specify the host as 127.0.0.1
# This will test router's auto-detection of protocol family
('listener', {'host': '127.0.0.1', 'port': cls.tester.get_port()}),
# Specify host as ::1 and protocol family as IPv6
('listener', {'host': '::1', 'socketAddressFamily': 'IPv6',
'port': cls.tester.get_port(socket_address_family='IPv6')}),
] + connection
config = Qdrouterd.Config(config)
# The wait=True attempts to connect to each listening port with the appropriate protocol family
# and tests each connector
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))
if not is_ipv6_enabled():
return
cls.routers = []
inter_router_port = cls.tester.get_port(socket_address_family='IPv6')
inter_router_ipv4_port = cls.tester.get_port(socket_address_family='IPv4')
METHOD_NAME('A',
[
('listener', {'host': '::1', 'role': 'inter-router', 'socketAddressFamily': 'IPv6', 'port': inter_router_port})
]
)
METHOD_NAME('B',
[
# Tests an IPv6 connector
('connector', {'host': '::1', 'role': 'inter-router', 'socketAddressFamily': 'IPv6', 'port': inter_router_port}),
('listener', {'host': '127.0.0.1', 'role': 'inter-router', 'port': inter_router_ipv4_port})
]
)
METHOD_NAME('C',
[
# Tests an IPv4 connector
('connector', {'host': '127.0.0.1', 'role': 'inter-router', 'port': inter_router_ipv4_port})
]
)
cls.routers[0].wait_router_connected('QDR.B')
cls.routers[1].wait_router_connected('QDR.A')
cls.routers[2].wait_router_connected('QDR.B')
# Without at least one test the setUpClass does not execute
# If this test has started executing, it means that the setUpClass() has successfully executed which means that
# the routers were able to communicate with each other successfully using the specified protocol family.
def test_simple_send_receive(self):
if not is_ipv6_enabled():
return self.skipTest("Skipping test..IPV6 not enabled")
simple_send_receive_test = SimpleSndRecv(self.routers[0].addresses[4])
simple_send_receive_test.run()
self.assertTrue(simple_send_receive_test.message_received)
class SimpleSndRecv(MessagingHandler):
def __init__(self, address):
super(SimpleSndRecv, self).__init__()
self.address = address
self.sender = None
self.receiver = None
self.conn = None
self.message_received = False
def on_start(self, event):
self.conn = event.container.connect(self.address)
self.receiver = event.container.create_receiver(self.conn, "test_addr")
self.sender = event.container.create_sender(self.conn, "test_addr")
def on_sendable(self, event):
msg = Message(body="Hello World")
event.sender.send(msg)
def on_message(self, event):
if "Hello World" == event.message.body:
self.message_received = True
self.conn.close()
def run(self):
Container(self).run()
if __name__ == '__main__':
unittest.main(main_module()) | null |
701 | #/*##########################################################################
# Copyright (C) 2004-2015 V.A. Sole, European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
__author__ = "V.A. Sole - ESRF Data Analysis"
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
import sys
import os
import numpy
import traceback
from PyMca5.PyMcaGui import PyMcaQt as qt
from PyMca5.PyMcaGui.misc import SelectionTable
from PyMca5.PyMcaGui.plotting import MaskScatterWidget
class ScatterPlotCorrelatorWidget(MaskScatterWidget.MaskScatterWidget):
def __init__(self, parent=None,
labels=("Legend", "X", "Y"),
types=("Text","RadioButton", "RadioButton"),
toolbar=False,
**kw):
super(ScatterPlotCorrelatorWidget, self).__init__(None, **kw)
self._splitter = qt.QSplitter(parent)
self._splitter.setOrientation(qt.Qt.Horizontal)
self.container = qt.QWidget(self._splitter)
self.container.mainLayout = qt.QVBoxLayout(self.container)
self.container.mainLayout.setContentsMargins(0, 0, 0, 0)
self.container.mainLayout.setSpacing(0)
# add a toolbar on top of the table
if toolbar:
self.toolBar = qt.QToolBar(self.container)
# the selection table
self.table = SelectionTable.SelectionTable(self.container,
labels=labels,
types=types)
if toolbar:
self.container.mainLayout.addWidget(self.toolBar)
self.container.mainLayout.addWidget(self.table)
self._splitter.addWidget(self.container)
self._splitter.addWidget(self)
# internal variables
self._itemList = []
self._itemLabels = []
# connect
self.table.sigSelectionTableSignal.connect(self.selectionTableSlot)
def METHOD_NAME(self):
if self._splitter.isHidden():
self._splitter.METHOD_NAME()
else:
super(ScatterPlotCorrelatorWidget, self).METHOD_NAME()
def setSelectableItemList(self, items, labels=None, copy=True):
self._itemList = []
self._itemLabels = []
if labels is None:
labels = [None] * len(items)
for i in range(len(items)):
self.addSelectableItem(items[i], label=labels[i], copy=copy)
def addSelectableItem(self, item, label=None, copy=True):
# we always keep a copy by default
item = numpy.array(item, dtype=numpy.float32, copy=copy)
if label is None:
label = "Unnamed 00"
i = 0
while(label in self._itemLabels):
i += 1
label = "Unnamed %02d" % i
if len(self._itemList):
if item.size != self._itemList[0].size:
raise IndexError("Invalid size")
if label in self._itemLabels:
self._itemList[self._itemLabels.index(label)] = item
else:
self._itemList.append(item)
self._itemLabels.append(label)
nItems = len(self._itemList)
self.table.setRowCount(nItems)
self.table.fillLine(nItems - 1, [label, "", ""])
self.table.resizeColumnToContents(0)
self.table.resizeColumnToContents(1)
self.table.resizeColumnToContents(2)
ddict = self.table.getSelection()
index = self._itemLabels.index(label)
xKey = qt.safe_str(self.table.horizontalHeaderItem(1).text()).lower()
yKey = qt.safe_str(self.table.horizontalHeaderItem(2).text()).lower()
if index in (ddict[xKey] + ddict[yKey]):
self.selectionTableSlot(ddict)
def selectionTableSlot(self, ddict):
legendKey = qt.safe_str(self.table.horizontalHeaderItem(0).text()).lower()
xKey = qt.safe_str(self.table.horizontalHeaderItem(1).text()).lower()
yKey = qt.safe_str(self.table.horizontalHeaderItem(2).text()).lower()
if len(ddict[xKey]):
x0 = self._itemList[ddict[xKey][0]]
else:
return
if len(ddict[yKey]):
y0 = self._itemList[ddict[yKey][0]]
else:
return
x = x0[:]
x.shape = -1
y = y0[:]
y.shape = -1
xLabel = self._itemLabels[ddict[xKey][0]]
yLabel = self._itemLabels[ddict[yKey][0]]
# active curve handling is disabled
self.setGraphXLabel(xLabel)
self.setGraphYLabel(yLabel)
self.setSelectionCurveData(x, y, legend=None,
color="k",
symbol=".",
replot=False,
replace=True,
xlabel=xLabel,
ylabel=yLabel,
selectable=False)
self._updatePlot(replot=False, replace=True)
#matplotlib needs a zoom reset to update the scales
# that problem does not seem to be present with OpenGL
self.resetZoom()
if __name__ == "__main__":
if "opengl" in sys.argv:
backend = "opengl"
else:
backend = None
app = qt.QApplication([])
w = ScatterPlotCorrelatorWidget(labels=["Legend",
"X",
"Y"],
types=["Text",
"RadioButton",
"RadioButton"],
maxNRois=1,
backend=backend)
w.METHOD_NAME()
# fill some data
import numpy
import numpy.random
import time
t0 = time.time()
x = numpy.arange(1000000.)
w.addSelectableItem(x, "range(%d)" % x.size)
print("elapsed = ", time.time() - t0)
w.addSelectableItem(x * x, "range(%d) ** 2" % x.size)
x = numpy.random.random(x.size)
w.addSelectableItem(x, "random(%d)" % x.size)
x = numpy.random.normal(500000., 1.0, 1000000)
w.addSelectableItem(x, "Gauss 0")
x = numpy.random.normal(500000., 1.0, 1000000)
w.addSelectableItem(x, "Gauss 1")
w.setPolygonSelectionMode()
def theSlot(ddict):
print(ddict['event'])
w.sigMaskScatterWidgetSignal.connect(theSlot)
app.exec() | null |
702 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkmse.endpoint import endpoint_data
class UpdateConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'mse', '2019-05-31', 'UpdateConfig','mse')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ConfigAuthEnabled(self): # Boolean
return self.get_query_params().get('ConfigAuthEnabled')
def set_ConfigAuthEnabled(self, ConfigAuthEnabled): # Boolean
self.add_query_param('ConfigAuthEnabled', ConfigAuthEnabled)
def get_PassWord(self): # String
return self.get_query_params().get('PassWord')
def set_PassWord(self, PassWord): # String
self.add_query_param('PassWord', PassWord)
def get_MaxClientCnxns(self): # String
return self.get_query_params().get('MaxClientCnxns')
def set_MaxClientCnxns(self, MaxClientCnxns): # String
self.add_query_param('MaxClientCnxns', MaxClientCnxns)
def get_RequestPars(self): # String
return self.get_query_params().get('RequestPars')
def set_RequestPars(self, RequestPars): # String
self.add_query_param('RequestPars', RequestPars)
def get_NamingAuthEnabled(self): # Boolean
return self.get_query_params().get('NamingAuthEnabled')
def set_NamingAuthEnabled(self, NamingAuthEnabled): # Boolean
self.add_query_param('NamingAuthEnabled', NamingAuthEnabled)
def get_ExtendedTypesEnable(self): # String
return self.get_query_params().get('ExtendedTypesEnable')
def set_ExtendedTypesEnable(self, ExtendedTypesEnable): # String
self.add_query_param('ExtendedTypesEnable', ExtendedTypesEnable)
def get_AutopurgeSnapRetainCount(self): # String
return self.get_query_params().get('AutopurgeSnapRetainCount')
def set_AutopurgeSnapRetainCount(self, AutopurgeSnapRetainCount): # String
self.add_query_param('AutopurgeSnapRetainCount', AutopurgeSnapRetainCount)
def get_ConfigSecretEnabled(self): # Boolean
return self.get_query_params().get('ConfigSecretEnabled')
def set_ConfigSecretEnabled(self, ConfigSecretEnabled): # Boolean
self.add_query_param('ConfigSecretEnabled', ConfigSecretEnabled)
def get_MCPEnabled(self): # Boolean
return self.get_query_params().get('MCPEnabled')
def set_MCPEnabled(self, MCPEnabled): # Boolean
self.add_query_param('MCPEnabled', MCPEnabled)
def get_SyncLimit(self): # String
return self.get_query_params().get('SyncLimit')
def set_SyncLimit(self, SyncLimit): # String
self.add_query_param('SyncLimit', SyncLimit)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_TLSEnabled(self): # Boolean
return self.get_query_params().get('TLSEnabled')
def METHOD_NAME(self, TLSEnabled): # Boolean
self.add_query_param('TLSEnabled', TLSEnabled)
def get_OpenSuperAcl(self): # String
return self.get_body_params().get('OpenSuperAcl')
def set_OpenSuperAcl(self, OpenSuperAcl): # String
self.add_body_params('OpenSuperAcl', OpenSuperAcl)
def get_EurekaSupported(self): # Boolean
return self.get_query_params().get('EurekaSupported')
def set_EurekaSupported(self, EurekaSupported): # Boolean
self.add_query_param('EurekaSupported', EurekaSupported)
def get_SnapshotCount(self): # String
return self.get_query_params().get('SnapshotCount')
def set_SnapshotCount(self, SnapshotCount): # String
self.add_query_param('SnapshotCount', SnapshotCount)
def get_MinSessionTimeout(self): # String
return self.get_query_params().get('MinSessionTimeout')
def set_MinSessionTimeout(self, MinSessionTimeout): # String
self.add_query_param('MinSessionTimeout', MinSessionTimeout)
def get_JuteMaxbuffer(self): # String
return self.get_query_params().get('JuteMaxbuffer')
def set_JuteMaxbuffer(self, JuteMaxbuffer): # String
self.add_query_param('JuteMaxbuffer', JuteMaxbuffer)
def get_ConfigType(self): # String
return self.get_query_params().get('ConfigType')
def set_ConfigType(self, ConfigType): # String
self.add_query_param('ConfigType', ConfigType)
def get_MaxSessionTimeout(self): # String
return self.get_query_params().get('MaxSessionTimeout')
def set_MaxSessionTimeout(self, MaxSessionTimeout): # String
self.add_query_param('MaxSessionTimeout', MaxSessionTimeout)
def get_TickTime(self): # String
return self.get_query_params().get('TickTime')
def set_TickTime(self, TickTime): # String
self.add_query_param('TickTime', TickTime)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_AutopurgePurgeInterval(self): # String
return self.get_query_params().get('AutopurgePurgeInterval')
def set_AutopurgePurgeInterval(self, AutopurgePurgeInterval): # String
self.add_query_param('AutopurgePurgeInterval', AutopurgePurgeInterval)
def get_AcceptLanguage(self): # String
return self.get_query_params().get('AcceptLanguage')
def set_AcceptLanguage(self, AcceptLanguage): # String
self.add_query_param('AcceptLanguage', AcceptLanguage)
def get_InitLimit(self): # String
return self.get_query_params().get('InitLimit')
def set_InitLimit(self, InitLimit): # String
self.add_query_param('InitLimit', InitLimit)
def get_UserName(self): # String
return self.get_query_params().get('UserName')
def set_UserName(self, UserName): # String
self.add_query_param('UserName', UserName) | null |
703 | # -*- coding: utf-8 -*-
"""Handle notification related signals.
Copyright (C) 2021 Gitcoin Core
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from html import escape
from django.contrib.auth import get_user_model
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.urls import reverse
from app.utils import get_profiles_from_text
from dashboard.models import Activity
from inbox.utils import (
comment_notification, mentioned_users_notification, send_mention_notification_to_users, send_notification_to_user,
)
from townsquare.models import Comment, Like
def create_notification(sender, **kwargs):
activity = kwargs['instance']
if activity.activity_type == 'new_tip':
tip = activity.tip
if tip.recipient_profile:
send_notification_to_user(
activity.profile.user,
tip.recipient_profile.user,
tip.receive_url,
'new_tip',
f'<b>New Tip</b> worth {tip.value_in_usdt_now} USD ' +
f'recieved from {tip.from_username}'
)
if activity.activity_type == 'worker_applied':
bounty = activity.bounty
send_notification_to_user(
activity.profile.user,
get_user_model().objects.get(username__iexact=bounty.bounty_owner_github_username),
bounty.url,
'worker_applied',
f'<b>{activity.profile.user} applied</b> to work on {escape(bounty.title)}'
)
if activity.activity_type == 'worker_approved':
bounty = activity.bounty
send_notification_to_user(
activity.profile.user,
get_user_model().objects.get(username__iexact=activity.metadata['worker_handle']),
bounty.url,
'worker_approved',
f'You have been <b>approved to work on {escape(bounty.title)}</b>'
)
if activity.activity_type == 'worker_rejected':
bounty = activity.bounty
send_notification_to_user(
activity.profile.user,
get_user_model().objects.get(username__iexact=activity.metadata['worker_handle']),
bounty.url,
'worker_rejected',
f'Your request to work on <b>{escape(bounty.title)} has been rejected</b>'
)
if activity.activity_type == 'start_work':
bounty = activity.bounty
send_notification_to_user(
activity.profile.user,
get_user_model().objects.get(username__iexact=bounty.bounty_owner_github_username),
bounty.url,
'start_work',
f'<b>{activity.profile.user} has started work</b> on {escape(bounty.title)}'
)
if activity.activity_type == 'work_submitted':
bounty = activity.bounty
send_notification_to_user(
activity.profile.user,
get_user_model().objects.get(username__iexact=bounty.bounty_owner_github_username),
bounty.url,
'work_submitted',
f'<b>{activity.profile.user} has submitted work</b> for {escape(bounty.title)}'
)
if activity.activity_type == 'work_done':
bounty = activity.bounty
amount_paid = activity.metadata['new_bounty']['value_in_usdt_now']
send_notification_to_user(
get_user_model().objects.get(username__iexact=bounty.bounty_owner_github_username),
activity.profile.user,
bounty.url,
'work_done',
f'<b>{bounty.bounty_owner_github_username}</b> has paid out ' +
f'{amount_paid} USD for your work on {escape(bounty.title)}'
)
if activity.activity_type == 'stop_work':
bounty = activity.bounty
send_notification_to_user(
activity.profile.user,
get_user_model().objects.get(username__iexact=bounty.bounty_owner_github_username),
bounty.url,
'stop_work',
f'<b>{activity.profile.user} has stopped work</b> on {escape(bounty.title)}'
)
if activity.activity_type == 'new_crowdfund':
bounty = activity.bounty
amount = activity.metadata['value_in_usdt_now']
send_notification_to_user(
activity.profile.user,
get_user_model().objects.get(username__iexact=bounty.bounty_owner_github_username),
bounty.url,
'new_crowdfund',
f'A <b>crowdfunding contribution worth {amount} USD</b> has been attached for {escape(bounty.title)}'
)
if activity.activity_type == 'new_kudos':
kudos_url = reverse('profile_min', args=[
activity.kudos_transfer.recipient_profile.handle,
'kudos'
])
if activity.kudos_transfer and activity.kudos_transfer.recipient_profile:
kudos_url = activity.kudos_transfer.receive_url_for_recipient
send_notification_to_user(
activity.profile.user,
activity.kudos_transfer.recipient_profile.user,
kudos_url,
'new_kudos',
f'You received a <b>new kudos from {activity.profile.user}</b>'
)
if activity.activity_type == 'status_update':
text = activity.metadata['title']
mentioned_profiles = get_profiles_from_text(text).exclude(id__in=[activity.profile_id])
send_mention_notification_to_users(activity, mentioned_profiles)
# Added due comments and likes aren't direct members of activity.
# So new likes and comments doesn't trigger the Activity post_save
def create_comment_notification(sender, **kwargs):
comment = kwargs['instance']
comment_notification(comment)
mentioned_users_notification(comment)
def create_like_notification(sender, **kwargs):
like = kwargs['instance']
activity = like.activity
if activity.profile_id == like.profile_id:
return
send_notification_to_user(
like.profile.user,
activity.profile.user,
activity.url,
'new_like',
f'❤️ <b>{like.profile.user} liked your comment</b>: {activity.metadata.get("title", "")}'
)
@receiver(post_save, sender=Activity, dispatch_uid="psave_activitiy")
def psave_activitiy(sender, instance, created, **kwargs):
if created:
create_notification(sender=Activity, instance=instance)
@receiver(post_save, sender=Comment, dispatch_uid="psave_comment")
def psave_comment(sender, instance, created, **kwargs):
if created:
create_comment_notification(sender=Comment, instance=instance)
@receiver(post_save, sender=Like, dispatch_uid="psave_like")
def METHOD_NAME(sender, instance, created, **kwargs):
if created:
create_like_notification(sender=Like, instance=instance) | null |
704 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdrds.endpoint import endpoint_data
class SubmitHotExpandTaskRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Drds', '2019-01-23', 'SubmitHotExpandTask','drds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Mappings(self):
return self.get_query_params().get('Mapping')
def set_Mappings(self, Mappings):
for depth1 in range(len(Mappings)):
if Mappings[depth1].get('DbShardColumn') is not None:
self.add_query_param('Mapping.' + str(depth1 + 1) + '.DbShardColumn', Mappings[depth1].get('DbShardColumn'))
if Mappings[depth1].get('TbShardColumn') is not None:
self.add_query_param('Mapping.' + str(depth1 + 1) + '.TbShardColumn', Mappings[depth1].get('TbShardColumn'))
if Mappings[depth1].get('ShardTbValue') is not None:
self.add_query_param('Mapping.' + str(depth1 + 1) + '.ShardTbValue', Mappings[depth1].get('ShardTbValue'))
if Mappings[depth1].get('HotDbName') is not None:
self.add_query_param('Mapping.' + str(depth1 + 1) + '.HotDbName', Mappings[depth1].get('HotDbName'))
if Mappings[depth1].get('ShardDbValue') is not None:
self.add_query_param('Mapping.' + str(depth1 + 1) + '.ShardDbValue', Mappings[depth1].get('ShardDbValue'))
if Mappings[depth1].get('HotTableName') is not None:
self.add_query_param('Mapping.' + str(depth1 + 1) + '.HotTableName', Mappings[depth1].get('HotTableName'))
if Mappings[depth1].get('LogicTable') is not None:
self.add_query_param('Mapping.' + str(depth1 + 1) + '.LogicTable', Mappings[depth1].get('LogicTable'))
def get_TaskDesc(self):
return self.get_query_params().get('TaskDesc')
def METHOD_NAME(self,TaskDesc):
self.add_query_param('TaskDesc',TaskDesc)
def get_SupperAccountMappings(self):
return self.get_query_params().get('SupperAccountMapping')
def set_SupperAccountMappings(self, SupperAccountMappings):
for depth1 in range(len(SupperAccountMappings)):
if SupperAccountMappings[depth1].get('InstanceName') is not None:
self.add_query_param('SupperAccountMapping.' + str(depth1 + 1) + '.InstanceName', SupperAccountMappings[depth1].get('InstanceName'))
if SupperAccountMappings[depth1].get('SupperAccount') is not None:
self.add_query_param('SupperAccountMapping.' + str(depth1 + 1) + '.SupperAccount', SupperAccountMappings[depth1].get('SupperAccount'))
if SupperAccountMappings[depth1].get('SupperPassword') is not None:
self.add_query_param('SupperAccountMapping.' + str(depth1 + 1) + '.SupperPassword', SupperAccountMappings[depth1].get('SupperPassword'))
def get_ExtendedMappings(self):
return self.get_query_params().get('ExtendedMapping')
def set_ExtendedMappings(self, ExtendedMappings):
for depth1 in range(len(ExtendedMappings)):
if ExtendedMappings[depth1].get('SrcInstanceId') is not None:
self.add_query_param('ExtendedMapping.' + str(depth1 + 1) + '.SrcInstanceId', ExtendedMappings[depth1].get('SrcInstanceId'))
if ExtendedMappings[depth1].get('SrcDb') is not None:
self.add_query_param('ExtendedMapping.' + str(depth1 + 1) + '.SrcDb', ExtendedMappings[depth1].get('SrcDb'))
def get_TaskName(self):
return self.get_query_params().get('TaskName')
def set_TaskName(self,TaskName):
self.add_query_param('TaskName',TaskName)
def get_DrdsInstanceId(self):
return self.get_query_params().get('DrdsInstanceId')
def set_DrdsInstanceId(self,DrdsInstanceId):
self.add_query_param('DrdsInstanceId',DrdsInstanceId)
def get_InstanceDbMappings(self):
return self.get_query_params().get('InstanceDbMapping')
def set_InstanceDbMappings(self, InstanceDbMappings):
for depth1 in range(len(InstanceDbMappings)):
if InstanceDbMappings[depth1].get('DbList') is not None:
self.add_query_param('InstanceDbMapping.' + str(depth1 + 1) + '.DbList', InstanceDbMappings[depth1].get('DbList'))
if InstanceDbMappings[depth1].get('InstanceName') is not None:
self.add_query_param('InstanceDbMapping.' + str(depth1 + 1) + '.InstanceName', InstanceDbMappings[depth1].get('InstanceName'))
def get_DbName(self):
return self.get_query_params().get('DbName')
def set_DbName(self,DbName):
self.add_query_param('DbName',DbName | null |
705 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import abc
import logging
from typing import Callable, Optional
import gym
import numpy as np
import reagent.core.types as rlt
import torch
from gym import spaces
from reagent.core.dataclasses import dataclass
from reagent.core.parameters import CONTINUOUS_TRAINING_ACTION_RANGE
from reagent.core.registry_meta import RegistryMeta
from reagent.training.utils import rescale_actions
# types for reference
ObsPreprocessor = Callable[[np.ndarray], rlt.FeatureData]
ServingObsPreprocessor = Callable[[np.ndarray], rlt.ServingFeatureData]
ActionExtractor = Callable[[rlt.ActorOutput], np.ndarray]
ServingActionExtractor = ActionExtractor
CONTINUOUS_MODEL_LOW = torch.tensor(CONTINUOUS_TRAINING_ACTION_RANGE[0])
CONTINUOUS_MODEL_HIGH = torch.tensor(CONTINUOUS_TRAINING_ACTION_RANGE[1])
logger = logging.getLogger(__name__)
@dataclass
class EnvWrapper(gym.core.Wrapper, metaclass=RegistryMeta):
"""Wrapper around it's environment, to simplify configuration."""
def __post_init_post_parse__(self):
super().__init__(self.make())
logger.info(
f"Env: {self.env};\n"
f"observation_space: {self.env.observation_space};\n"
f"action_space: {self.env.action_space};"
)
@abc.abstractmethod
def make(self) -> gym.Env:
pass
@abc.abstractmethod
def obs_preprocessor(self, obs: np.ndarray) -> rlt.FeatureData:
pass
@abc.abstractmethod
def serving_obs_preprocessor(self, obs: np.ndarray) -> rlt.ServingFeatureData:
pass
def METHOD_NAME(self, *ctor_args, **ctor_kwargs):
# ctor_args go to .to call
ctor_kwargs["non_blocking"] = True
return lambda *args, **kwargs: self.obs_preprocessor(*args, **kwargs).to(
*ctor_args, **ctor_kwargs
)
def get_serving_obs_preprocessor(self):
return lambda *args, **kwargs: self.serving_obs_preprocessor(*args, **kwargs)
def action_extractor(self, actor_output: rlt.ActorOutput) -> torch.Tensor:
action = actor_output.action
action_space = self.action_space
# Canonical rule to return one-hot encoded actions for discrete
assert (
len(action.shape) == 2 and action.shape[0] == 1
), f"{action} (shape: {action.shape}) is not a single action!"
if isinstance(action_space, spaces.Discrete):
return action.squeeze(0).argmax()
elif isinstance(action_space, spaces.MultiDiscrete):
return action.squeeze(0)
# Canonical rule to scale actions to CONTINUOUS_TRAINING_ACTION_RANGE
elif isinstance(action_space, spaces.Box):
assert len(action_space.shape) == 1, f"{action_space} not supported."
return rescale_actions(
action.squeeze(0),
new_min=torch.tensor(action_space.low),
new_max=torch.tensor(action_space.high),
prev_min=CONTINUOUS_MODEL_LOW,
prev_max=CONTINUOUS_MODEL_HIGH,
)
else:
raise NotImplementedError(f"Unsupported action space: {action_space}")
def serving_action_extractor(self, actor_output: rlt.ActorOutput) -> torch.Tensor:
action = actor_output.action
action_space = self.action_space
assert (
len(action.shape) == 2 and action.shape[0] == 1
), f"{action.shape} isn't (1, action_dim)"
if isinstance(action_space, spaces.Discrete):
return action.squeeze(0).argmax().view([])
elif isinstance(action_space, spaces.MultiDiscrete):
return action.squeeze(0)
elif isinstance(action_space, spaces.Box):
assert (
len(action_space.shape) == 1
), f"Unsupported Box with shape {action_space.shape}"
return action.squeeze(0)
else:
raise NotImplementedError(f"Unsupported action space: {action_space}")
def get_action_extractor(self):
return (
lambda *args, **kwargs: self.action_extractor(*args, **kwargs).cpu().numpy()
)
def get_serving_action_extractor(self):
return (
lambda *args, **kwargs: self.serving_action_extractor(*args, **kwargs)
.cpu()
.numpy()
)
# TODO: add more methods to simplify gym code
# e.g. normalization, specific preprocessor, etc.
# This can move a lot of the if statements from create_from_env methods.
@property
def max_steps(self) -> Optional[int]:
possible_keys = [
# gym should have _max_episode_steps
"_max_episode_steps",
# Minigrid should have max_steps
"max_steps",
]
for key in possible_keys:
res = getattr(self.env, key, None)
if res is not None:
return res
return None
@property
def possible_actions_mask(self) -> Optional[np.ndarray]:
ret = getattr(self.env, "possible_actions_mask", None)
if ret is not None:
ret = ret.copy()
return ret | null |
706 | from typing import List, Optional
from boa3.internal.model.operation.binary.additional import *
from boa3.internal.model.operation.binary.arithmetic import *
from boa3.internal.model.operation.binary.binaryoperation import BinaryOperation
from boa3.internal.model.operation.binary.logical import *
from boa3.internal.model.operation.binary.relational import *
from boa3.internal.model.operation.operation import IOperation
from boa3.internal.model.operation.operator import Operator
from boa3.internal.model.operation.unary.noneidentity import NoneIdentity
from boa3.internal.model.operation.unary.nonenotidentity import NoneNotIdentity
from boa3.internal.model.type.itype import IType
class BinaryOp:
# Arithmetic operations
Add = Addition()
Sub = Subtraction()
Mul = Multiplication()
Div = Division()
IntDiv = FloorDivision()
ListAdd = ListAddition()
Mod = Modulo()
Pow = Power()
Concat = Concat()
StrBytesMul = StrBytesMultiplication()
# Relational operations
NumEq = NumericEquality()
NumNotEq = NumericInequality()
Lt = LessThan()
LtE = LessThanOrEqual()
Gt = GreaterThan()
GtE = GreaterThanOrEqual()
IsNone = NoneIdentity()
IsNotNone = NoneNotIdentity()
Is = Identity()
IsNot = NotIdentity()
Eq = ObjectEquality()
NotEq = ObjectInequality()
# Logical operations
And = BooleanAnd()
Or = BooleanOr()
BitAnd = LogicAnd()
BitOr = LogicOr()
Xor = LogicXor()
LShift = LeftShift()
RShift = RightShift()
# Other operations
In = CollectionMembership()
NotIn = CollectionNotMembership()
@classmethod
def validate_type(cls, operator: Operator, left: IType, right: IType) -> Optional[BinaryOperation]:
"""
Gets a binary operation given the operator and the operands types.
:param operator: binary operator
:param left: type of the left operand
:param right: type of the right operand
:return: The operation if exists. None otherwise;
:rtype: BinaryOperation or None
"""
for id, op in vars(cls).items():
if isinstance(op, IOperation) and op.is_valid(operator, left, right):
if isinstance(op, BinaryOperation):
return op.build(left, right)
else:
from boa3.internal.model.type.type import Type
operand = right if left is Type.none else left
return op.build(operand)
@classmethod
def METHOD_NAME(cls, operator: Operator, left_operand: IType,
right_operand: Optional[IType] = None) -> Optional[BinaryOperation]:
"""
Gets a binary operation given the operator.
:param operator: binary operator
:param left_operand: left operand of the operator
:param right_operand: right operand of the operator
:return: The operation if exists. If exists more than one operation with the same operator, returns the one with
the same left operand. If none has the same left operand, returns the first found. None otherwise;
:rtype: BinaryOperation or None
"""
valid_operations: List[BinaryOperation] = []
for id, op in vars(cls).items():
if isinstance(op, BinaryOperation) and op.operator is operator:
left, right = op.get_valid_operand_for_validation(left_operand, right_operand)
if left is not None:
return op.build(left_operand if right_operand is None else left, right)
else:
valid_operations.append(op)
return valid_operations[0] if len(valid_operations) > 0 else None
@classmethod
def get_operation(cls, operation: BinaryOperation) -> Optional[BinaryOperation]:
"""
Gets a binary operation given another operation.
:param operation: binary operation
:return: The operation if exists. None otherwise;
:rtype: BinaryOperation or None
"""
for id, op in vars(cls).items():
if type(operation) == type(op):
return op | null |
707 | # Copyright (c) ZenML GmbH 2023. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Proxy design pattern utils."""
from abc import ABC
from functools import wraps
from typing import Any, Callable, Type, TypeVar, cast
C = TypeVar("C", bound=Type[ABC])
F = TypeVar("F", bound=Callable[..., Any])
def METHOD_NAME(interface: Type[ABC], attribute: str) -> Callable[[C], C]:
"""Proxy class decorator.
Use this decorator to transform the decorated class into a proxy that
forwards all calls defined in the `interface` interface to the `attribute`
class attribute that implements the same interface.
This class is useful in cases where you need to have a base class that acts
as a proxy or facade for one or more other classes. Both the decorated class
and the class attribute must inherit from the same ABC interface for this to
work. Only regular methods are supported, not class methods or attributes.
Example: Let's say you have an interface called `BodyBuilder`, a base class
called `FatBob` and another class called `BigJim`. `BigJim` implements the
`BodyBuilder` interface, but `FatBob` does not. And let's say you want
`FatBob` to look as if it implements the `BodyBuilder` interface, but in
fact it just forwards all calls to `BigJim`. You could do this:
```python
from abc import ABC, abstractmethod
class BodyBuilder(ABC):
@abstractmethod
def build_body(self):
pass
class BigJim(BodyBuilder):
def build_body(self):
print("Looks fit!")
class FatBob(BodyBuilder)
def __init__(self):
self.big_jim = BigJim()
def build_body(self):
self.big_jim.build_body()
fat_bob = FatBob()
fat_bob.build_body()
```
But this leads to a lot of boilerplate code with bigger interfaces and
makes everything harder to maintain. This is where the proxy class
decorator comes in handy. Here's how to use it:
```python
from zenml.utils.proxy_utils import make_proxy_class
from typing import Optional
@make_proxy_class(BodyBuilder, "big_jim")
class FatBob(BodyBuilder)
big_jim: Optional[BodyBuilder] = None
def __init__(self):
self.big_jim = BigJim()
fat_bob = FatBob()
fat_bob.build_body()
```
This is the same as implementing FatBob to call BigJim explicitly, but it
has the advantage that you don't need to write a lot of boilerplate code
of modify the FatBob class every time you change something in the
BodyBuilder interface.
This proxy decorator also allows to extend classes dynamically at runtime:
if the `attribute` class attribute is set to None, the proxy class
will assume that the interface is not implemented by the class and will
raise a NotImplementedError:
```python
@make_proxy_class(BodyBuilder, "big_jim")
class FatBob(BodyBuilder)
big_jim: Optional[BodyBuilder] = None
def __init__(self):
self.big_jim = None
fat_bob = FatBob()
# Raises NotImplementedError, class not extended yet:
fat_bob.build_body()
fat_bob.big_jim = BigJim()
# Now it works:
fat_bob.build_body()
```
Args:
interface: The interface to implement.
attribute: The attribute of the base class to forward calls to.
Returns:
The proxy class.
"""
def make_proxy_method(cls: C, _method: F) -> F:
"""Proxy method decorator.
Used to transform a method into a proxy that forwards all calls to the
given class attribute.
Args:
cls: The class to use as the base.
_method: The method to replace.
Returns:
The proxy method.
"""
@wraps(_method)
def proxy_method(*args: Any, **kw: Any) -> Any:
"""Proxy method.
Args:
*args: The arguments to pass to the method.
**kw: The keyword arguments to pass to the method.
Returns:
The return value of the proxied method.
Raises:
TypeError: If the class does not have the attribute specified
in the decorator or if the attribute does not implement
the specified interface.
NotImplementedError: If the attribute specified in the
decorator is None, i.e. the interface is not implemented.
"""
self = args[0]
if not hasattr(self, attribute):
raise TypeError(
f"Class '{cls.__name__}' does not have a '{attribute}' "
f"as specified in the 'make_proxy_class' decorator."
)
proxied_obj = getattr(self, attribute)
if proxied_obj is None:
raise NotImplementedError(
f"This '{cls.__name__}' instance does not implement the "
f"'{interface.__name__}' interface."
)
if not isinstance(proxied_obj, interface):
raise TypeError(
f"Interface '{interface.__name__}' must be implemented by "
f"the '{cls.__name__}' '{attribute}' attribute."
)
proxied_method = getattr(proxied_obj, _method.__name__)
return proxied_method(*args[1:], **kw)
return cast(F, proxy_method)
def _inner_decorator(_cls: C) -> C:
"""Inner proxy class decorator.
Args:
_cls: The class to decorate.
Returns:
The decorated class.
Raises:
TypeError: If the decorated class does not implement the specified
interface.
"""
if not issubclass(_cls, interface):
raise TypeError(
f"Interface '{interface.__name__}' must be implemented by "
f"the '{_cls.__name__}' class."
)
for method_name in interface.__abstractmethods__:
original_method = getattr(_cls, method_name)
method_proxy = make_proxy_method(_cls, original_method)
# Make sure the proxy method is not considered abstract.
method_proxy.__isabstractmethod__ = False
setattr(_cls, method_name, method_proxy)
# Remove the abstract methods in the interface from the decorated class.
_cls.__abstractmethods__ = frozenset(
method_name
for method_name in _cls.__abstractmethods__
if method_name not in interface.__abstractmethods__
)
return cast(C, _cls)
return _inner_decorator | null |
708 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class InvokeCommandRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'InvokeCommand','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_ContainerName(self): # String
return self.get_query_params().get('ContainerName')
def set_ContainerName(self, ContainerName): # String
self.add_query_param('ContainerName', ContainerName)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_CommandId(self): # String
return self.get_query_params().get('CommandId')
def set_CommandId(self, CommandId): # String
self.add_query_param('CommandId', CommandId)
def get_Timeout(self): # Long
return self.get_query_params().get('Timeout')
def set_Timeout(self, Timeout): # Long
self.add_query_param('Timeout', Timeout)
def get_Frequency(self): # String
return self.get_query_params().get('Frequency')
def set_Frequency(self, Frequency): # String
self.add_query_param('Frequency', Frequency)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_RepeatMode(self): # String
return self.get_query_params().get('RepeatMode')
def set_RepeatMode(self, RepeatMode): # String
self.add_query_param('RepeatMode', RepeatMode)
def get_WindowsPasswordName(self): # String
return self.get_query_params().get('WindowsPasswordName')
def set_WindowsPasswordName(self, WindowsPasswordName): # String
self.add_query_param('WindowsPasswordName', WindowsPasswordName)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_Timed(self): # Boolean
return self.get_query_params().get('Timed')
def set_Timed(self, Timed): # Boolean
self.add_query_param('Timed', Timed)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def METHOD_NAME(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceIds(self): # RepeatList
return self.get_query_params().get('InstanceId')
def set_InstanceIds(self, InstanceId): # RepeatList
for depth1 in range(len(InstanceId)):
self.add_query_param('InstanceId.' + str(depth1 + 1), InstanceId[depth1])
def get_ContainerId(self): # String
return self.get_query_params().get('ContainerId')
def set_ContainerId(self, ContainerId): # String
self.add_query_param('ContainerId', ContainerId)
def get_Parameters(self): # Json
return self.get_query_params().get('Parameters')
def set_Parameters(self, Parameters): # Json
self.add_query_param('Parameters', Parameters)
def get_Username(self): # String
return self.get_query_params().get('Username')
def set_Username(self, Username): # String
self.add_query_param('Username', Username) | null |
709 | import tarfile
from celery import shared_task
from sqlalchemy import select
from galaxy.celery import galaxy_task
from galaxy.celery.tasks import (
prepare_pdf_download,
purge_hda,
)
from galaxy.model import HistoryDatasetAssociation
from galaxy.schema import PdfDocumentType
from galaxy.schema.schema import CreatePagePayload
from galaxy.schema.tasks import GeneratePdfDownload
from galaxy.web.short_term_storage import ShortTermStorageAllocator
from galaxy_test.base.populators import (
DatasetPopulator,
wait_on,
)
from galaxy_test.driver.integration_util import IntegrationTestCase
@shared_task
def mul(x, y):
return x * y
@galaxy_task
def process_page(request: CreatePagePayload):
# an example task that consumes a pydantic model
return f"content_format is {request.content_format} with annotation {request.annotation}"
class TestCeleryTasksIntegration(IntegrationTestCase):
dataset_populator: DatasetPopulator
def setUp(self):
super().setUp()
self.dataset_populator = DatasetPopulator(self.galaxy_interactor)
def test_random_simple_task_to_verify_framework_for_testing(self):
assert mul.delay(4, 4).get(timeout=10) == 16
def test_task_with_pydantic_argument(self):
request = CreatePagePayload(
content_format="markdown",
title="my cool title",
slug="my-cool-title",
annotation="my cool annotation",
)
assert (
process_page.delay(request).get(timeout=10)
== "content_format is markdown with annotation my cool annotation"
)
def test_galaxy_task(self):
history_id = self.dataset_populator.new_history()
dataset = self.dataset_populator.new_dataset(history_id, wait=True)
hda = self._latest_hda
assert hda
def hda_purged():
latest_details = self.dataset_populator.get_history_dataset_details(
history_id, dataset=dataset, assert_ok=False, wait=False
)
return True if latest_details["purged"] else None
assert not hda_purged()
purge_hda.delay(hda_id=hda.id).get(timeout=10)
wait_on(hda_purged, "dataset to become purged")
assert hda_purged()
def test_pdf_download(self):
short_term_storage_allocator = self._app[ShortTermStorageAllocator] # type: ignore[type-abstract]
short_term_storage_target = short_term_storage_allocator.new_target("moo.pdf", "application/pdf")
request_id = short_term_storage_target.request_id
pdf_download_request = GeneratePdfDownload(
basic_markdown="*Hello World!*",
document_type=PdfDocumentType.page,
short_term_storage_request_id=request_id,
)
prepare_pdf_download.delay(request=pdf_download_request)
contents = self.dataset_populator.wait_on_download_request(request_id)
contents.raise_for_status()
assert "application/pdf" in contents.headers["content-type"]
assert contents.content[0:4] == b"%PDF"
def METHOD_NAME(self):
history_id = self.dataset_populator.new_history()
hda1 = self.dataset_populator.new_dataset(history_id, wait=True)
contents = hda1
temp_tar = self.dataset_populator.download_contents_to_store(history_id, contents, "tgz")
with tarfile.open(name=temp_tar) as tf:
assert "datasets_attrs.txt" in tf.getnames()
second_history_id = self.dataset_populator.new_history()
as_list = self.dataset_populator.create_contents_from_store(
second_history_id,
store_path=temp_tar,
)
assert len(as_list) == 1
new_hda = as_list[0]
assert new_hda["model_class"] == "HistoryDatasetAssociation"
assert new_hda["state"] == "discarded"
assert not new_hda["deleted"]
@property
def _latest_hda(self):
stmt = select(HistoryDatasetAssociation).order_by(HistoryDatasetAssociation.table.c.id.desc()).limit(1)
return self._app.model.session.scalars(stmt).first() | null |
710 | import os
import re
from copy import deepcopy
from datetime import datetime, timezone
import pytest
from ggshield.core.config import Config
from ggshield.core.config.auth_config import (
InstanceConfig,
prepare_auth_config_dict_for_save,
)
from ggshield.core.config.utils import get_auth_config_filepath, replace_in_keys
from ggshield.core.errors import UnknownInstanceError
from tests.unit.conftest import write_text, write_yaml
from tests.unit.core.config.conftest import TEST_AUTH_CONFIG
@pytest.fixture(autouse=True)
def env_vars(monkeypatch):
monkeypatch.setenv("GITGUARDIAN_API_URL", "https://api.gitguardian.com")
@pytest.mark.usefixtures("isolated_fs")
class TestAuthConfig:
def test_load(self):
"""
GIVEN a default auth config
WHEN loading the config
THEN when serializing it again, it matches the data.
"""
write_yaml(get_auth_config_filepath(), TEST_AUTH_CONFIG)
config = Config()
assert config.auth_config.instances[0].account.token_name == "my_token"
assert config.auth_config.instances[0].default_token_lifetime == 1
assert config.auth_config.default_token_lifetime == 2
config_data = config.auth_config.to_dict()
config_data = prepare_auth_config_dict_for_save(config_data)
replace_in_keys(config_data, old_char="_", new_char="-")
assert config_data == TEST_AUTH_CONFIG
@pytest.mark.parametrize("n", [0, 2])
def test_no_account(self, n):
"""
GIVEN an auth config with a instance with 0 or more than 1 accounts
WHEN loading the AuthConfig
THEN it raises
"""
raw_config = deepcopy(TEST_AUTH_CONFIG)
raw_config["instances"][0]["accounts"] = (
raw_config["instances"][0]["accounts"] * n
)
write_yaml(get_auth_config_filepath(), raw_config)
with pytest.raises(
AssertionError,
match="Each GitGuardian instance should have exactly one account",
):
Config()
def test_invalid_format(self):
"""
GIVEN an auth config file with invalid content
WHEN loading AuthConfig
THEN it raises
"""
write_text(get_auth_config_filepath(), "Not a:\nyaml file.\n")
expected_output = (
f"{re.escape(get_auth_config_filepath())} is not a valid YAML file:"
)
with pytest.raises(
ValueError,
match=expected_output,
):
Config()
def test_token_not_expiring(self):
"""
GIVEN an auth config file with a token never expiring
WHEN loading the AuthConfig
THEN it works
"""
raw_config = deepcopy(TEST_AUTH_CONFIG)
raw_config["instances"][0]["accounts"][0]["expire-at"] = None
write_yaml(get_auth_config_filepath(), raw_config)
config = Config()
assert config.auth_config.instances[0].account.expire_at is None
def test_update(self):
"""
GIVEN -
WHEN modifying the default config
THEN it's not persisted until .save() is called
"""
config = Config()
config.auth_config.get_or_create_instance("custom")
with pytest.raises(UnknownInstanceError):
Config().auth_config.get_instance("custom")
config.save()
instance = Config().auth_config.get_instance("custom")
assert instance.url == "custom"
def test_load_file_not_existing(self):
"""
GIVEN the auth config file not existing
WHEN loading the config
THEN it works and has the default configuration
"""
config = Config()
assert config.instance_name == "https://dashboard.gitguardian.com"
assert config.auth_config.instances == []
def test_save_file_not_existing(self):
"""
GIVEN a config object and the auth config file not existing
WHEN saving the config
THEN it works
AND when loading the config again it has the correct values
"""
config = Config()
assert not os.path.exists(get_auth_config_filepath())
config.auth_config.get_or_create_instance("custom")
config.save()
updated_config = Config()
instance = updated_config.auth_config.get_instance("custom")
assert instance.url == "custom"
def test_timezone_aware_expired(self):
"""
GIVEN a config with a configured instance
WHEN loading the config
THEN the instance expiration date is timezone aware
"""
write_yaml(get_auth_config_filepath(), TEST_AUTH_CONFIG)
config = Config()
assert config.auth_config.instances[0].account.expire_at.tzinfo is not None
def METHOD_NAME(self):
token_data = {
"type": "personal_access_token",
"account_id": 8,
"name": "ggshield token 2022-10-13",
"scope": ["scan"],
"expire_at": "2022-10-17T11:55:06Z",
}
instance = InstanceConfig(account=None, url="u")
instance.init_account(token="tok", token_data=token_data)
assert instance.account.expire_at == datetime(
2022, 10, 17, 11, 55, 6, tzinfo=timezone.utc
) | null |
711 | # Copyright 2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import numpy as np
from nnabla.logger import logger
def set_random_pseed(comm):
import nnabla as nn
tc = comm.ctx.backend[0].split(":")[1]
x = nn.Variable.from_numpy_array(np.random.randint(
low=0, high=1 << 16 if tc == "half" else 1 << 30))
comm.broadcast(x)
from nnabla.random import set_parameter_seed
set_parameter_seed(int(x.d))
from nnabla.random import pseed
logger.info(f"[rank {comm.rank}] seed: {pseed}")
def init_nnabla(conf=None, ext_name=None, device_id=None, type_config=None, random_pseed=True):
import nnabla as nn
from nnabla.ext_utils import get_extension_context
from .comm import CommunicatorWrapper
if conf is None:
conf = AttrDict()
if ext_name is not None:
conf.ext_name = ext_name
if device_id is not None:
conf.device_id = device_id
if type_config is not None:
conf.type_config = type_config
# set context
ctx = get_extension_context(
ext_name=conf.ext_name, device_id=conf.device_id, type_config=conf.type_config)
# init communicator
comm = CommunicatorWrapper(ctx)
nn.set_default_context(comm.ctx)
# set random seed for parameter
if random_pseed:
set_random_pseed(comm)
# disable outputs from logger except rank==0
if comm.rank > 0:
import logging
logger.setLevel(logging.ERROR)
return comm
class AttrDict(dict):
# special internal variable used for error message.
_parent = []
def __setattr__(self, key, value):
if key == "_parent":
self.__dict__["_parent"] = value
return
self[key] = value
def __getattr__(self, key):
if key not in self:
raise AttributeError(
"dict (AttrDict) has no chain of attributes '{}'".format(".".join(self._parent + [key])))
if isinstance(self[key], dict):
self[key] = AttrDict(self[key])
self[key]._parent = self._parent + [key]
return self[key]
def METHOD_NAME(self):
self.dump()
def dump(self, file=sys.stdout, sort_keys=True):
if not hasattr(file, "write"):
assert isinstance(file, str)
file = open(file, 'w')
out = "\n================================configs================================\n"
iterator = self.items()
if sort_keys:
iterator = sorted(iterator, key=lambda x: x[0])
for k, v in iterator:
out += "{}: {}\n".format(k, v)
out += "======================================================================="
print(out, file=file)
class DictInterfaceFactory(object):
'''Creating a single dict interface of any function or class.
Example:
.. code-block:: python
# Define a function.
def foo(a, b=1, c=None):
for k, v in locals():
print(k, v)
# Register the function to the factory.
dictif = DictInterfaceFactory()
dictif.register(foo)
# You can call the registered function by name and a dict representing the arguments.
cfg = dict(a=1, c='hello')
dictif.call('foo', cfg)
# The following will fail because the `foo` function requires `a`.
# cfg = dict(c='hello')
# dictif.call('foo', cfg)
# Any argument not required will be just ignored.
cfg = dict(a=1, aaa=0)
dictif.call('foo', cfg)
# You can also use it for class initializer (we use it as a class decorator).
@dictif.register
class Bar:
def __init__(self, a, b, c=None):
for k, v in locals():
print(k, v)
bar = dictif.call('Bar', dict(a=0, b=0))
'''
def __init__(self):
self._factory = {}
def register(self, cls):
import inspect
# config interface function
def func(cfg):
sig = inspect.signature(cls)
# Handle all arguments of the created class
args = {}
for p in sig.parameters.values():
# Positional argument
if p.default is p.empty and p.name not in cfg:
raise ValueError(
f'`{cls.__name__}`` requires an argument `{p.name}`. Not found in cfg={cfg}.')
args[p.name] = cfg.get(p.name, p.default)
return cls(**args)
# Register config interface function
self._factory[cls.__name__] = func
return cls
def call(self, name, cfg):
if name in self._factory:
return self._factory[name](cfg)
raise ValueError(
f'`{name}`` not found in `{list(self._factory.keys())}`.')
def makedirs(dirpath):
if os.path.exists(dirpath):
if os.path.isdir(dirpath):
return
else:
raise ValueError(
"{} already exists as a file not a directory.".format(dirpath))
os.makedirs(dirpath)
def get_current_time():
from datetime import datetime
return datetime.now().strftime('%y%m%d_%H%M%S')
def get_iteration_per_epoch(dataset_size, batch_size, round="ceil"):
"""
Calculate a number of iterations to see whole images in dataset (= 1 epoch).
Args:
dataset_size (int): A number of images in dataset
batch_size (int): A number of batch_size.
round (str): Round method. One of ["ceil", "floor"].
return: int
"""
import numpy as np
round_func = {"ceil": np.ceil, "floor": np.floor}
if round not in round_func:
raise ValueError("Unknown rounding method {}. must be one of {}.".format(round,
list(round_func.keys())))
ipe = float(dataset_size) / batch_size
return int(round_func[round](ipe)) | null |
712 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkedas.endpoint import endpoint_data
class InsertApplicationRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'InsertApplication','Edas')
self.set_uri_pattern('/pop/v5/changeorder/co_create_app')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_WebContainer(self): # String
return self.get_query_params().get('WebContainer')
def set_WebContainer(self, WebContainer): # String
self.add_query_param('WebContainer', WebContainer)
def get_EcuInfo(self): # String
return self.get_query_params().get('EcuInfo')
def set_EcuInfo(self, EcuInfo): # String
self.add_query_param('EcuInfo', EcuInfo)
def get_MinHeapSize(self): # Integer
return self.get_query_params().get('MinHeapSize')
def set_MinHeapSize(self, MinHeapSize): # Integer
self.add_query_param('MinHeapSize', MinHeapSize)
def get_BuildPackId(self): # Integer
return self.get_query_params().get('BuildPackId')
def set_BuildPackId(self, BuildPackId): # Integer
self.add_query_param('BuildPackId', BuildPackId)
def get_ComponentIds(self): # String
return self.get_query_params().get('ComponentIds')
def set_ComponentIds(self, ComponentIds): # String
self.add_query_param('ComponentIds', ComponentIds)
def get_HealthCheckUrl(self): # String
return self.get_query_params().get('HealthCheckUrl')
def set_HealthCheckUrl(self, HealthCheckUrl): # String
self.add_query_param('HealthCheckUrl', HealthCheckUrl)
def get_ReservedPortStr(self): # String
return self.get_query_params().get('ReservedPortStr')
def set_ReservedPortStr(self, ReservedPortStr): # String
self.add_query_param('ReservedPortStr', ReservedPortStr)
def get_JvmOptions(self): # String
return self.get_query_params().get('JvmOptions')
def set_JvmOptions(self, JvmOptions): # String
self.add_query_param('JvmOptions', JvmOptions)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_Cpu(self): # Integer
return self.get_query_params().get('Cpu')
def set_Cpu(self, Cpu): # Integer
self.add_query_param('Cpu', Cpu)
def get_MaxPermSize(self): # Integer
return self.get_query_params().get('MaxPermSize')
def set_MaxPermSize(self, MaxPermSize): # Integer
self.add_query_param('MaxPermSize', MaxPermSize)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_MaxHeapSize(self): # Integer
return self.get_query_params().get('MaxHeapSize')
def METHOD_NAME(self, MaxHeapSize): # Integer
self.add_query_param('MaxHeapSize', MaxHeapSize)
def get_EnablePortCheck(self): # Boolean
return self.get_query_params().get('EnablePortCheck')
def set_EnablePortCheck(self, EnablePortCheck): # Boolean
self.add_query_param('EnablePortCheck', EnablePortCheck)
def get_ApplicationName(self): # String
return self.get_query_params().get('ApplicationName')
def set_ApplicationName(self, ApplicationName): # String
self.add_query_param('ApplicationName', ApplicationName)
def get_Jdk(self): # String
return self.get_query_params().get('Jdk')
def set_Jdk(self, Jdk): # String
self.add_query_param('Jdk', Jdk)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Mem(self): # Integer
return self.get_query_params().get('Mem')
def set_Mem(self, Mem): # Integer
self.add_query_param('Mem', Mem)
def get_LogicalRegionId(self): # String
return self.get_query_params().get('LogicalRegionId')
def set_LogicalRegionId(self, LogicalRegionId): # String
self.add_query_param('LogicalRegionId', LogicalRegionId)
def get_EnableUrlCheck(self): # Boolean
return self.get_query_params().get('EnableUrlCheck')
def set_EnableUrlCheck(self, EnableUrlCheck): # Boolean
self.add_query_param('EnableUrlCheck', EnableUrlCheck)
def get_PackageType(self): # String
return self.get_query_params().get('PackageType')
def set_PackageType(self, PackageType): # String
self.add_query_param('PackageType', PackageType)
def get_Hooks(self): # String
return self.get_query_params().get('Hooks')
def set_Hooks(self, Hooks): # String
self.add_query_param('Hooks', Hooks) | null |
713 | import difflib
import math
import re
import string
from collections import Counter
def remove_words(phrase):
# Removes words and punctuation that don't help the diff comparison.
stop_words = (
r"a|an|and|as|at|but|by|en|etc|for|if|in|is|of|on|or|the|to|v\.?|via"
+ r"|vs\.?|united|states?|et|al|appellants?|defendants?|administrator|plaintiffs?|error"
+ r"|others|against|ex|parte|complainants?|original|claimants?|devisee"
+ r"|executrix|executor"
)
stop_words_reg = re.compile(r"^(%s)$" % stop_words, re.IGNORECASE)
# strips punctuation
exclude = set(string.punctuation)
phrase = "".join(ch for ch in phrase if ch not in exclude)
words = re.split("[\t ]", phrase)
result = []
for word in words:
word = stop_words_reg.sub("", word)
result.append(word)
return "".join(result)
def gen_diff_ratio(left, right):
"""
Generates a difference between two strings.
Returns a value between 0 and 1. 0 means the strings are totally different.
1 means they are identical.
This is a case sensitive comparison. If you want case-insensitive, ensure
that you run lower() on your strings before passing them in.
"""
# Remove common strings from all case names /before/ comparison.
# Doing so lowers the opportunity for false positives.
left = remove_words(left)
right = remove_words(right)
# compute the difference value
diff = difflib.SequenceMatcher(None, left.strip(), right.strip()).ratio()
return diff
def find_best_match(items, s, case_sensitive=True):
"""Find the string in the list that is the closest match to the string
:param items: The list to search within
:param s: The string to attempt to match
:param case_sensitive: Whether comparisons should honor case
:return dict with the index of the best matching value, its value, and its
match ratio.
"""
diff_ratios = []
if not case_sensitive:
s = s.lower()
for item in items:
# Calculate its diff_ratio, and add it to an array
if not case_sensitive:
item = item.lower()
diff = gen_diff_ratio(item, s)
diff_ratios.append(diff)
# Find the max ratio, and grab the corresponding result
max_ratio = max(diff_ratios)
i = diff_ratios.index(max_ratio)
return {
"match_index": i,
"match_str": items[i],
"ratio": max_ratio,
}
def METHOD_NAME(results, case_name):
"""Returns all matches above a threshold.
This is nearly identical to find_best_match, but returns any good matches
in an array, and returns their confidence thresholds in a second array.
"""
diff_ratios = []
for result in results:
# Calculate its diff_ratio, and add it to an array
candidate_case_name = result["caseName"]
diff = gen_diff_ratio(candidate_case_name, case_name)
diff_ratios.append(diff)
return diff_ratios
def string_to_vector(text: str) -> Counter:
"""Convert strings to counter dict.
:param text: Text to vectorize
:return: A dictionary of words by count
"""
WORD = re.compile(r"\w+")
words = WORD.findall(text)
return Counter(words)
def get_cosine_similarity(left_str: str, right_str: str) -> float:
"""Calculate the cosine similarity of two strings.
This can be useful in circumstances when the counts of the words in the
strings have more meaning that the order of the characters or the edit
distances of individual words.
Better for long strings with sentence-length differences, where diff_lib's
ratio() can fall down.
"""
left, right = string_to_vector(left_str), string_to_vector(right_str)
intersection = set(left.keys()) & set(right.keys())
numerator = sum([left[x] * right[x] for x in intersection])
sum1 = sum([left[x] ** 2 for x in left.keys()])
sum2 = sum([right[x] ** 2 for x in right.keys()])
denominator = math.sqrt(sum1) * math.sqrt(sum2)
if not denominator:
return 0.0
else:
return float(numerator) / denominator | null |
714 | from __future__ import print_function
import numpy as np
try:
import scipy.special
except ImportError:
scipy = None
import IMP
import IMP.test
import IMP.algebra
import pickle
class UnitSimplexDTests(IMP.test.TestCase):
types = [
(1, IMP.algebra.UnitSimplex1D, (), IMP.algebra.Vector1D),
(2, IMP.algebra.UnitSimplex2D, (), IMP.algebra.Vector2D),
(3, IMP.algebra.UnitSimplex3D, (), IMP.algebra.Vector3D),
(4, IMP.algebra.UnitSimplex4D, (), IMP.algebra.Vector4D),
(5, IMP.algebra.UnitSimplex5D, (), IMP.algebra.Vector5D),
(6, IMP.algebra.UnitSimplex6D, (), IMP.algebra.Vector6D),
]
types += [
(d, IMP.algebra.UnitSimplexKD, (d,), IMP.algebra.VectorKD)
for d in range(1, 11)
]
@staticmethod
def _get_normal_thresh(tailprob, sigma=1, dim=1):
alpha = (1 - tailprob) ** dim
return sigma * np.sqrt(2) * scipy.special.erfinv(alpha)
def test_construction(self):
"""Check that fixed-dimension simplices are constructed correctly"""
for d, st, args, vt in self.types:
s = st(*args)
self.assertEqual(s.get_dimension(), d)
def test_construct_kd_with_wrong_dimension_raises_error(self):
self.assertRaisesUsageException(IMP.algebra.UnitSimplexKD, 0)
self.assertRaisesUsageException(IMP.algebra.UnitSimplexKD, -1)
def METHOD_NAME(self):
for d, st, args, vt in self.types:
s = st(*args)
v = s.get_barycenter()
self.assertIsInstance(v, vt)
self.assertSequenceAlmostEqual(list(s.get_barycenter()), [1.0 / d] * d)
def test_get_contains(self):
for d, st, args, vt in self.types:
s = st(*args)
for i in range(10):
if isinstance(vt, IMP.algebra.VectorKD):
v = -np.log(np.random.uniform(size=d + 1))
v /= np.sum(v)
self.assertFalse(s.get_contains(vt(v)))
v = -np.log(np.random.uniform(size=d))
self.assertFalse(s.get_contains(vt(v)))
v /= np.sum(v)
self.assertTrue(s.get_contains(vt(v)))
def test_get_vertices(self):
for d, st, args, vt in self.types:
s = st(*args)
vs = IMP.algebra.get_vertices(s)
I = np.eye(d)
self.assertEqual(len(vs), d)
for i, v in enumerate(vs):
self.assertIsInstance(v, vt)
self.assertSequenceAlmostEqual(list(v), list(I[i, :]))
def test_get_increasing_from_embedded(self):
for d, st, args, vt in self.types:
s = st(*args)
for i in range(10):
v = -np.log(np.random.uniform(size=d))
v /= np.sum(v)
inc = IMP.algebra.get_increasing_from_embedded(s, vt(v))
self.assertIsInstance(inc, vt)
self.assertSequenceAlmostEqual(list(inc), list(np.cumsum(v)))
def test_get_embedded_from_increasing(self):
for d, st, args, vt in self.types:
s = st(*args)
for i in range(10):
v = -np.log(np.random.uniform(size=d))
v /= np.sum(v)
inc = np.cumsum(v)
v2 = IMP.algebra.get_embedded_from_increasing(s, vt(inc))
self.assertIsInstance(v2, vt)
self.assertSequenceAlmostEqual(list(v2), list(v))
def test_get_projected(self):
for d, st, args, vt in self.types:
s = st(*args)
v = np.random.normal(size=d)
v_proj = IMP.algebra.get_projected(s, vt(v))
self.assertIsInstance(v_proj, vt)
v_proj = np.array(v_proj, dtype=np.double)
pos_inds = v_proj != 0.0
vshift = v[pos_inds] - v_proj[pos_inds]
self.assertTrue(np.all(v_proj >= 0))
self.assertAlmostEqual(np.sum(v_proj), 1)
# projection has cut point
if len(v[~pos_inds]) > 0:
min_pos = np.amin(v[pos_inds])
max_zero = np.amax(v[~pos_inds])
self.assertGreater(min_pos, max_zero)
# projection is rigid shift
self.assertSequenceAlmostEqual(
list(vshift), [vshift[0]] * len(vshift)
)
def test_get_random_vector_on(self):
for d, st, args, vt in self.types:
s = st(*args)
for i in range(10):
v = IMP.algebra.get_random_vector_on(s)
self.assertIsInstance(v, vt)
self.assertEqual(v.get_dimension(), d)
print(v)
print(np.sum(list(v)))
self.assertAlmostEqual(np.sum(v), 1)
@IMP.test.skipIf(scipy is None, "Requires SciPy")
def test_get_random_vector_on_is_uniform(self):
"""Test that result of get_random_vector_on is uniform on simplex.
Checks that each component of the Monte Carlo estimate of the mean
follows the central limit theorem.
"""
n = 1000
fail_prob = 1e-3 # Probability of all tests failing.
each_fail_prob = 1 - (1 - fail_prob) ** (1.0 / len(self.types))
for d, st, args, vt in self.types:
s = st(*args)
bary_vs = []
c = s.get_barycenter()
for i in range(n):
v = IMP.algebra.get_random_vector_on(s)
bary_vs.append(np.array(v - c, dtype=np.double))
if scipy:
mean_bary_vs = np.mean(bary_vs, axis=0)
mcse = ((d - 1.0) / (d + 1.0) / n) ** 0.5 / d
mean_thresh = self._get_normal_thresh(
each_fail_prob, dim=d, sigma=mcse
)
for i in range(d):
self.assertLessEqual(mean_bary_vs[i], mean_thresh)
def pickle(self):
"""Test (un-)pickle of UnitSimplexD"""
for d, st, args, vt in self.types:
s1 = st(*args)
s2 = st(*args)
s2.foo = 'bar'
dump = pickle.dumps((s1, s2))
news1, news2 = pickle.loads(dump)
self.assertLess(IMP.algebra.get_distance(
s1.get_barycenter(), news1.get_barycenter()), 1e-4)
self.assertLess(IMP.algebra.get_distance(
s2.get_barycenter(), news2.get_barycenter()), 1e-4)
self.assertEqual(news2.foo, 'bar')
self.assertRaises(TypeError, s1._set_from_binary, 42)
if __name__ == "__main__":
IMP.test.main() | null |
715 | """Unit Tests for the MPA Dataset Pipelines Transforms Augments."""
# Copyright (C) 2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import random
import numpy as np
import pytest
from PIL import Image
from otx.algorithms.classification.adapters.mmcls.datasets.pipelines.transforms.augmix import (
AugMixAugment,
OpsFabric,
)
from otx.algorithms.common.adapters.mmcv.pipelines.transforms.augments import (
CythonAugments,
)
@pytest.fixture
def METHOD_NAME() -> OpsFabric:
return OpsFabric("Rotate", 5, {"img_mean": 128})
@pytest.mark.xfail(reason="random may not return the same value on different machines.")
class TestOpsFabric:
def test_init(self, METHOD_NAME: OpsFabric) -> None:
"""Test OpsFabric initialization."""
assert METHOD_NAME.prob == 1.0
assert METHOD_NAME.hparams == {"img_mean": 128}
assert METHOD_NAME.aug_kwargs == {
"fillcolor": 128,
"resample": (Image.BILINEAR, Image.BICUBIC),
}
assert METHOD_NAME.aug_factory.magnitude == 5
assert METHOD_NAME.aug_factory.magnitude_std == float("inf")
assert METHOD_NAME.aug_factory.level_fn == METHOD_NAME._rotate_level_to_arg
assert METHOD_NAME.aug_factory.aug_fn == CythonAugments.rotate
def test_randomly_negate(self) -> None:
"""Test randomly_negate function."""
random.seed(1234)
assert OpsFabric.randomly_negate(5) == -5
assert OpsFabric.randomly_negate(5) == 5
assert OpsFabric.randomly_negate(5) == -5
def test_rotate_level_to_arg(self, METHOD_NAME: OpsFabric) -> None:
"""Test rotate_level_to_arg function."""
assert METHOD_NAME._rotate_level_to_arg(0, METHOD_NAME.hparams) == (0,)
assert METHOD_NAME._rotate_level_to_arg(5, METHOD_NAME.hparams) == (5 / 10 * 30,)
def test_enhance_increasing_level_to_arg(self, METHOD_NAME: OpsFabric) -> None:
"""Test enhance_increasing_level_to_arg function."""
assert METHOD_NAME._enhance_increasing_level_to_arg(0, METHOD_NAME.hparams) == (1.0,)
assert METHOD_NAME._enhance_increasing_level_to_arg(5, METHOD_NAME.hparams) == (1.0 + 5 / 10 * 0.9,)
def test_shear_level_to_arg(self, METHOD_NAME: OpsFabric) -> None:
"""Test shear_level_to_arg function."""
assert METHOD_NAME._shear_level_to_arg(0, METHOD_NAME.hparams) == (0,)
assert METHOD_NAME._shear_level_to_arg(5, METHOD_NAME.hparams) == (5 / 10 * 0.3,)
def test_translate_rel_level_to_arg(self, METHOD_NAME: OpsFabric) -> None:
"""Test translate_rel_level_to_arg function."""
assert METHOD_NAME._translate_rel_level_to_arg(0, METHOD_NAME.hparams) == (0,)
assert METHOD_NAME._translate_rel_level_to_arg(5, {"translate_pct": 0.5}) == (5 / 10 * 0.5,)
def test_posterize_increasing_level_to_arg(self, METHOD_NAME: OpsFabric) -> None:
"""Test posterize_increasing_level_to_arg function."""
assert METHOD_NAME._posterize_increasing_level_to_arg(0, METHOD_NAME.hparams) == (4,)
assert METHOD_NAME._posterize_increasing_level_to_arg(5, METHOD_NAME.hparams) == (4 - int(5 / 10 * 4),)
def test_solarize_increasing_level_to_arg(self, METHOD_NAME: OpsFabric) -> None:
"""Test solarize_increasing_level_to_arg function."""
assert METHOD_NAME._solarize_increasing_level_to_arg(0, METHOD_NAME.hparams) == (0,)
assert METHOD_NAME._solarize_increasing_level_to_arg(5, METHOD_NAME.hparams) == (256 - int(5 / 10 * 256),)
def test_call(self, METHOD_NAME: OpsFabric) -> None:
"""Test __call__ function."""
img = Image.new("RGB", (256, 256))
transformed_img = METHOD_NAME(img)
assert transformed_img != img # make sure the image was actually transformed
class TestAugMixAugment:
def test_init(self) -> None:
"""Test AugMixAugment initialization."""
aug_mix_augment = AugMixAugment(config_str="augmix-m5-w3")
assert isinstance(aug_mix_augment, AugMixAugment)
assert len(aug_mix_augment.ops) > 0
def test_apply_basic(self) -> None:
"""Test _apply_basic function."""
aug_mix_augment = AugMixAugment(config_str="augmix-m5-w3")
img = Image.new("RGB", (224, 224), color=(255, 0, 0))
mixing_weights = np.float32(np.random.dirichlet([aug_mix_augment.alpha] * aug_mix_augment.width))
m = np.float32(np.random.beta(aug_mix_augment.alpha, aug_mix_augment.alpha))
mixed_img = aug_mix_augment._apply_basic(img, mixing_weights, m)
assert isinstance(mixed_img, Image.Image)
def test_augmix_ops(self) -> None:
"""Test augmix_ops function."""
aug_mix_augment = AugMixAugment(config_str="augmix-m5-w3")
assert len(aug_mix_augment.ops) > 0
assert isinstance(aug_mix_augment.alpha, float)
assert isinstance(aug_mix_augment.width, int)
assert isinstance(aug_mix_augment.depth, int)
def test_call(self) -> None:
"""Test __call__ method."""
aug_mix_augment = AugMixAugment(config_str="augmix-m5-w3")
data = {"img": np.random.randint(0, 255, size=(224, 224, 3)).astype(np.uint8)}
results = aug_mix_augment(data)
assert "augmix" in results
assert isinstance(results["img"], Image.Image) | null |
716 | # Copyright 2021-2023 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unified_planning.model.walkers.generic import Walker
from unified_planning.model.fnode import FNode
class DagWalker(Walker):
"""DagWalker treats the expression as a DAG and performs memoization of the
intermediate results.
This should be used when the result of applying the function to a
expression is always the same, independently of where the expression has
been found; examples include substitution and solving.
Due to memoization, a few more things need to be taken into
account when using the DagWalker.
:func _get_key needs to be defined if additional arguments via
keywords need to be shared. This function should return the key to
be used in memoization. See substituter for an example.
"""
def __init__(self, invalidate_memoization=False):
"""The flag ``invalidate_memoization`` can be used to clear the cache
after the walk has been completed: the cache is one-time use.
"""
Walker.__init__(self)
self.memoization = {}
self.invalidate_memoization = invalidate_memoization
self.stack = []
return
def _get_children(self, expression: FNode):
return expression.args
def _push_with_children_to_stack(self, expression: FNode, **kwargs):
"""Add children to the stack."""
self.stack.append((True, expression))
for s in self._get_children(expression):
# Add only if not memoized already
key = self._get_key(s, **kwargs)
if key not in self.memoization:
self.stack.append((False, s))
def _compute_node_result(self, expression: FNode, **kwargs):
"""Apply function to the node and memoize the result.
Note: This function assumes that the results for the children
are already available.
"""
key = self._get_key(expression, **kwargs)
if key not in self.memoization:
try:
f = self.functions[expression.node_type]
except KeyError:
f = self.walk_error
args = [
self.memoization[self._get_key(s, **kwargs)]
for s in self._get_children(expression)
]
self.memoization[key] = f(expression, args=args, **kwargs)
else:
pass
def _process_stack(self, **kwargs):
"""Empties the stack by processing every node in it.
Processing is performed in two steps.
1- A node is expanded and all its children are push to the stack
2- Once all children have been processed, the result for the node
is computed and memoized.
"""
while self.stack:
(was_expanded, expression) = self.stack.pop()
if was_expanded:
self._compute_node_result(expression, **kwargs)
else:
self._push_with_children_to_stack(expression, **kwargs)
def METHOD_NAME(self, expression: FNode, **kwargs):
"""Performs an iterative walk of the DAG"""
self.stack.append((False, expression))
self._process_stack(**kwargs)
res_key = self._get_key(expression, **kwargs)
return self.memoization[res_key]
def walk(self, expression: FNode, **kwargs):
if expression in self.memoization:
return self.memoization[expression]
res = self.METHOD_NAME(expression, **kwargs)
if self.invalidate_memoization:
self.memoization.clear()
return res
def _get_key(self, expression: FNode, **kwargs):
if not kwargs:
return expression
raise NotImplementedError(
"DagWalker should redefine '_get_key'" + " when using keywords arguments"
)
def walk_true(self, expression: FNode, args, **kwargs):
# pylint: disable=unused-argument
"""Returns True, independently from the children's value."""
return True
def walk_false(self, expression: FNode, args, **kwargs):
# pylint: disable=unused-argument
"""Returns False, independently from the children's value."""
return False
def walk_none(self, expression: FNode, args, **kwargs):
# pylint: disable=unused-argument
"""Returns None, independently from the children's value."""
return None
def walk_identity(self, expression: FNode, **kwargs):
# pylint: disable=unused-argument
"""Returns expression, independently from the childrens's value."""
return expression
def walk_any(self, expression: FNode, args, **kwargs):
# pylint: disable=unused-argument
"""Returns True if any of the children returned True."""
return any(args)
def walk_all(self, expression: FNode, args, **kwargs):
# pylint: disable=unused-argument
"""Returns True if all the children returned True."""
return all(args)
# EOC DagWalker | null |
717 | #
# Copyright (c) 2021-2022 Red Hat, Inc.
#
# This file is part of nmstate
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
import os
import pytest
import time
from contextlib import contextmanager
import libnmstate
from libnmstate.schema import Bond
from libnmstate.schema import Interface
from libnmstate.schema import InterfaceState
from libnmstate.schema import InterfaceType
from libnmstate.schema import LLDP
from libnmstate.schema import VLAN
from .testlib import cmdlib
from .testlib import statelib
from .testlib.veth import veth_interface
LLDPTEST1 = "lldptest1"
LLDPTEST1_PEER = "lldptest1.peer"
LLDPTEST2 = "lldptest2"
LLDPTEST2_PEER = "lldptest2.peer"
LLDPTEST3 = "lldptest3"
LLDPTEST3_PEER = "lldptest3.peer"
BOND50 = "bond50"
VLAN_PRODNET = "prod-net"
VLAN_MGMTNET = "mgmt-net"
AUTOCONF_CMD = "nmstate-autoconf"
LLDP_BASIC_STATE = {
Interface.KEY: [
{
Interface.NAME: LLDPTEST1,
Interface.STATE: InterfaceState.UP,
Interface.TYPE: InterfaceType.VETH,
},
{
Interface.NAME: LLDPTEST2,
Interface.STATE: InterfaceState.UP,
Interface.TYPE: InterfaceType.VETH,
},
{
Interface.NAME: LLDPTEST3,
Interface.STATE: InterfaceState.UP,
Interface.TYPE: InterfaceType.VETH,
},
]
}
@pytest.fixture(scope="module")
def lldpifaces_env():
with veth_interface(LLDPTEST1, LLDPTEST1_PEER), veth_interface(
LLDPTEST2, LLDPTEST2_PEER
), veth_interface(LLDPTEST3, LLDPTEST3_PEER):
yield
_iface_cleanup(BOND50)
def test_autoconf_prodnet_and_mgmtnet(lldpifaces_env):
with METHOD_NAME(LLDP_BASIC_STATE):
_send_lldp_packet(LLDPTEST1_PEER, "lldp_prodnet.pcap")
_send_lldp_packet(LLDPTEST2_PEER, "lldp_prodnet.pcap")
_send_lldp_packet(LLDPTEST3_PEER, "lldp_mgmtnet.pcap")
cmdlib.exec_cmd(AUTOCONF_CMD.split(), check=True)
bond_cstate = statelib.show_only((BOND50,))[Interface.KEY][0]
assert LLDPTEST1 in bond_cstate[Bond.CONFIG_SUBTREE][Bond.PORT]
assert LLDPTEST2 in bond_cstate[Bond.CONFIG_SUBTREE][Bond.PORT]
vlan_prod = statelib.show_only((VLAN_PRODNET,))[Interface.KEY][0]
assert BOND50 == vlan_prod[VLAN.CONFIG_SUBTREE][VLAN.BASE_IFACE]
vlan_mgmt = statelib.show_only((VLAN_MGMTNET,))[Interface.KEY][0]
assert LLDPTEST3 == vlan_mgmt[VLAN.CONFIG_SUBTREE][VLAN.BASE_IFACE]
def test_autoconf_all_prodnet(lldpifaces_env):
with METHOD_NAME(LLDP_BASIC_STATE):
_send_lldp_packet(LLDPTEST1_PEER, "lldp_prodnet.pcap")
_send_lldp_packet(LLDPTEST2_PEER, "lldp_prodnet.pcap")
_send_lldp_packet(LLDPTEST3_PEER, "lldp_prodnet.pcap")
cmdlib.exec_cmd(AUTOCONF_CMD, check=True)
bond_cstate = statelib.show_only((BOND50,))[Interface.KEY][0]
assert LLDPTEST1 in bond_cstate[Bond.CONFIG_SUBTREE][Bond.PORT]
assert LLDPTEST2 in bond_cstate[Bond.CONFIG_SUBTREE][Bond.PORT]
assert LLDPTEST3 in bond_cstate[Bond.CONFIG_SUBTREE][Bond.PORT]
vlan_prod = statelib.show_only((VLAN_PRODNET,))[Interface.KEY][0]
assert BOND50 == vlan_prod[VLAN.CONFIG_SUBTREE][VLAN.BASE_IFACE]
@contextmanager
def METHOD_NAME(ifstate):
for iface in ifstate.get(Interface.KEY, []):
iface[LLDP.CONFIG_SUBTREE] = {LLDP.ENABLED: True}
libnmstate.apply(ifstate)
try:
yield
finally:
for iface in ifstate.get(Interface.KEY, []):
iface[LLDP.CONFIG_SUBTREE][LLDP.ENABLED] = False
libnmstate.apply(ifstate)
def _send_lldp_packet(ifname, pcap):
test_dir = os.path.dirname(os.path.realpath(__file__))
cmdlib.exec_cmd(
f"tcpreplay --intf1={ifname} "
f"{test_dir}/test_captures/{pcap}".split(),
check=True,
)
time.sleep(1)
def _iface_cleanup(ifname):
ifstate = {
Interface.KEY: [
{
Interface.NAME: ifname,
Interface.STATE: InterfaceState.ABSENT,
},
]
}
libnmstate.apply(ifstate) | null |
718 | #
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a copy from tensorflow/python/util/protobuf/compare.py
# which is based on the original internal google sources with changes
# reapplied by hand.
"""Test utility functions for comparing proto2 messages in Python.
assertProtoEqual() is useful for unit tests. It produces much more helpful
output than assertEqual() for proto2 messages, e.g. this:
outer {
inner {
- strings: "x"
? ^
+ strings: "y"
? ^
}
}
...compared to the default output from assertEqual() that looks like this:
AssertionError: <my.Msg object at 0x9fb353c> != <my.Msg object at 0x9fb35cc>
Call it inside your unit test's absltest.TestCase subclasses like this:
from fhir.py.test_util import protobuf_compare
class MyTest(absltest.TestCase):
...
def testXXX(self):
...
compare.assertProtoEqual(self, a, b)
Alternatively:
from fhir.py.test_util import protobuf_compare
class MyTest(protobuf_compare.ProtoAssertions, absltest.TestCase):
...
def testXXX(self):
...
self.assertProtoEqual(a, b)
"""
from typing import cast, Any, Optional, TypeVar
from google.protobuf import descriptor
from google.protobuf import descriptor_pool
from google.protobuf import message
from google.protobuf import text_format
from absl.testing import absltest
_T = TypeVar('_T', bound=message.Message)
# pylint: disable=invalid-name
def assertProtoEqual(self,
a: message.Message,
b: message.Message,
check_initialized: bool = True,
normalize_numbers: bool = False,
msg: Optional[str] = None) -> None:
"""Fails with a useful error if a and b aren't equal.
Comparison of repeated fields matches the semantics of
unittest.TestCase.assertEqual(), ie order and extra duplicates fields matter.
Args:
self: absltest.TestCase
a: proto2 PB instance, or text string representing one.
b: proto2 PB instance -- message.Message or subclass thereof.
check_initialized: boolean, whether to fail if either a or b isn't
initialized.
normalize_numbers: boolean, whether to normalize types and precision of
numbers before comparison.
msg: if specified, is used as the error message on failure.
"""
pool = descriptor_pool.Default()
if isinstance(a, str):
a = text_format.Merge(a, b.__class__(), descriptor_pool=pool)
for pb in a, b:
if check_initialized:
errors = pb.FindInitializationErrors()
if errors:
cast(absltest.TestCase,
self).fail(f'Initialization errors: {errors}\n{pb}')
if normalize_numbers:
METHOD_NAME(pb)
cast(absltest.TestCase, self).assertMultiLineEqual(
text_format.MessageToString(a, descriptor_pool=pool),
text_format.MessageToString(b, descriptor_pool=pool),
msg=msg)
def METHOD_NAME(pb: _T) -> _T:
"""Normalizes types and precisions of number fields in a protocol buffer.
Due to subtleties in the python protocol buffer implementation, it is possible
for values to have different types and precision depending on whether they
were set and retrieved directly or deserialized from a protobuf. This function
normalizes integer values to ints and longs based on width, 32-bit floats to
five digits of precision to account for python always storing them as 64-bit,
and ensures doubles are floating point for when they're set to integers.
Modifies pb in place. Recurses into nested objects.
Args:
pb: proto2 message.
Returns:
the given pb, modified in place.
"""
for desc, values in pb.ListFields():
is_repeated = True
if desc.label != descriptor.FieldDescriptor.LABEL_REPEATED:
is_repeated = False
values = [values]
normalized_values = None
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
if desc.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_SINT64):
normalized_values = [int(x) for x in values]
elif desc.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_ENUM):
normalized_values = [int(x) for x in values]
elif desc.type == descriptor.FieldDescriptor.TYPE_FLOAT:
normalized_values = [round(x, 6) for x in values]
elif desc.type == descriptor.FieldDescriptor.TYPE_DOUBLE:
normalized_values = [round(float(x), 7) for x in values]
if normalized_values is not None:
if is_repeated:
pb.ClearField(desc.name)
getattr(pb, desc.name).extend(normalized_values)
else:
setattr(pb, desc.name, normalized_values[0])
if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE or
desc.type == descriptor.FieldDescriptor.TYPE_GROUP):
if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
desc.message_type.has_options and
desc.message_type.GetOptions().map_entry):
# This is a map, only recurse if the values have a message type.
if (desc.message_type.fields_by_number[2].type ==
descriptor.FieldDescriptor.TYPE_MESSAGE):
for v in values:
METHOD_NAME(v)
else:
for v in values:
# recursive step
METHOD_NAME(v)
return pb
class ProtoAssertions:
"""Mix this into a absltest.TestCase class to get proto2 assertions.
Usage:
class SomeTestCase(protobuf_compare.ProtoAssertions, absltest.TestCase):
...
def testSomething(self):
...
self.assertProtoEqual(a, b)
See module-level definitions for method documentation.
"""
# pylint: disable=invalid-name
def assertProtoEqual(self, *args: Any, **kwargs: Any) -> None:
assertProtoEqual(self, *args, **kwargs) | null |
719 | # Copyright 2021-2023 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unified_planning
from unified_planning.shortcuts import *
from unified_planning.test import TestCase, main
from unified_planning.test.examples import get_example_problems
from unified_planning.engines import SequentialPlanValidator, ValidationResultStatus
from unified_planning.environment import get_environment
class TestProblem(TestCase):
def setUp(self):
TestCase.setUp(self)
self.problems = get_example_problems()
def test_all(self):
pv = SequentialPlanValidator(environment=get_environment())
for p in self.problems.values():
if not pv.supports(p.problem.kind):
continue
problem, plan = p.problem, p.plan
if SequentialPlanValidator.supports(problem.kind):
validation_result = pv.validate(problem, plan)
self.assertEqual(validation_result.status, ValidationResultStatus.VALID)
def test_all_from_factory(self):
with PlanValidator(name="sequential_plan_validator") as pv:
self.assertEqual(pv.name, "sequential_plan_validator")
for p in self.problems.values():
if not pv.supports(p.problem.kind):
continue
problem, plan = p.problem, p.plan
validation_result = pv.validate(problem, plan)
self.assertEqual(validation_result.status, ValidationResultStatus.VALID)
def test_all_from_factory_with_problem_kind(self):
for p in self.problems.values():
problem, plan = p.problem, p.plan
pk = problem.kind
if SequentialPlanValidator.supports(pk):
environment = unified_planning.environment.Environment()
environment.factory.preference_list = [
e for e in environment.factory.preference_list if e != "tamer"
]
with environment.factory.PlanValidator(
problem_kind=pk, plan_kind=plan.kind
) as pv:
self.assertEqual(pv.name, "sequential_plan_validator")
validation_result = pv.validate(problem, plan)
self.assertEqual(
validation_result.status, ValidationResultStatus.VALID
)
def test_quality_metric(self):
pv = SequentialPlanValidator()
problem, plan = self.problems["basic"]
problem = problem.clone()
problem.add_quality_metric(MinimizeSequentialPlanLength())
res = pv.validate(problem, plan)
me = res.metric_evaluations
assert me is not None
self.assertEqual(len(me), 1)
for qm, val in me.items():
self.assertIsInstance(qm, MinimizeSequentialPlanLength)
self.assertEqual(val, 1)
problem, plan = self.problems["locations_connected_visited_oversubscription"]
res = pv.validate(problem, plan)
me = res.metric_evaluations
assert me is not None
self.assertEqual(len(me), 1)
for qm, val in me.items():
self.assertIsInstance(qm, Oversubscription)
self.assertEqual(val, 15)
problem, plan = self.problems["locations_connected_cost_minimize"]
res = pv.validate(problem, plan)
me = res.metric_evaluations
assert me is not None
self.assertEqual(len(me), 1)
for qm, val in me.items():
self.assertIsInstance(qm, MinimizeActionCosts)
self.assertEqual(val, 10)
def METHOD_NAME(self):
problem = self.problems["robot_loader_weak_bridge"].problem
move = problem.action("move")
load = problem.action("load")
unload = problem.action("unload")
l1, l2, l3 = [problem.object(f"l{i}") for i in range(1, 4)]
# the plan is bad because going loaded from l3 to l1 violates a global constraint
invalid_action = up.plans.ActionInstance(move, (ObjectExp(l3), ObjectExp(l1)))
bad_plan = up.plans.SequentialPlan(
[
up.plans.ActionInstance(move, (ObjectExp(l1), ObjectExp(l3))),
up.plans.ActionInstance(load, (ObjectExp(l3),)),
invalid_action,
up.plans.ActionInstance(unload, (ObjectExp(l1),)),
]
)
with PlanValidator(name="sequential_plan_validator") as pv:
self.assertEqual(pv.name, "sequential_plan_validator")
self.assertTrue(pv.supports(problem.kind))
validation_result = pv.validate(problem, bad_plan)
self.assertEqual(validation_result.status, ValidationResultStatus.INVALID)
self.assertEqual(invalid_action, validation_result.inapplicable_action)
# when removing the trajectory constraints, the bad plan should become valid
problem = problem.clone()
problem.clear_trajectory_constraints()
validation_result = pv.validate(problem, bad_plan)
self.assertEqual(validation_result.status, ValidationResultStatus.VALID) | null |
720 | from typing import Union
from .registry import DATASET_COLLECTION_TYPES_REGISTRY
class CollectionTypeDescriptionFactory:
def __init__(self, type_registry=DATASET_COLLECTION_TYPES_REGISTRY):
# taking in type_registry though not using it, because we will someday
# I think.
self.type_registry = type_registry
def for_collection_type(self, collection_type):
assert collection_type is not None
return CollectionTypeDescription(collection_type, self)
class CollectionTypeDescription:
"""Abstraction over dataset collection type that ties together string
reprentation in database/model with type registry.
>>> factory = CollectionTypeDescriptionFactory(None)
>>> nested_type_description = factory.for_collection_type("list:paired")
>>> paired_type_description = factory.for_collection_type("paired")
>>> nested_type_description.has_subcollections_of_type("list")
False
>>> nested_type_description.has_subcollections_of_type("list:paired")
False
>>> nested_type_description.has_subcollections_of_type("paired")
True
>>> nested_type_description.has_subcollections_of_type(paired_type_description)
True
>>> nested_type_description.has_subcollections()
True
>>> paired_type_description.has_subcollections()
False
>>> paired_type_description.rank_collection_type()
'paired'
>>> nested_type_description.rank_collection_type()
'list'
>>> nested_type_description.effective_collection_type(paired_type_description)
'list'
>>> nested_type_description.effective_collection_type_description(paired_type_description).collection_type
'list'
>>> nested_type_description.child_collection_type()
'paired'
"""
collection_type: str
def __init__(self, collection_type: Union[str, "CollectionTypeDescription"], collection_type_description_factory):
if isinstance(collection_type, CollectionTypeDescription):
self.collection_type = collection_type.collection_type
else:
self.collection_type = collection_type
self.collection_type_description_factory = collection_type_description_factory
self.__has_subcollections = self.collection_type.find(":") > 0
def child_collection_type(self):
rank_collection_type = self.rank_collection_type()
return self.collection_type[len(rank_collection_type) + 1 :]
def child_collection_type_description(self):
child_collection_type = self.child_collection_type()
return self.collection_type_description_factory.for_collection_type(child_collection_type)
def effective_collection_type_description(self, subcollection_type):
effective_collection_type = self.effective_collection_type(subcollection_type)
return self.collection_type_description_factory.for_collection_type(effective_collection_type)
def effective_collection_type(self, subcollection_type):
if hasattr(subcollection_type, "collection_type"):
subcollection_type = subcollection_type.collection_type
if not self.has_subcollections_of_type(subcollection_type):
raise ValueError(f"Cannot compute effective subcollection type of {subcollection_type} over {self}")
return self.collection_type[: -(len(subcollection_type) + 1)]
def has_subcollections_of_type(self, other_collection_type):
"""Take in another type (either flat string or another
CollectionTypeDescription) and determine if this collection contains
subcollections matching that type.
The way this is used in map/reduce it seems to make the most sense
for this to return True if these subtypes are proper (i.e. a type
is not considered to have subcollections of its own type).
"""
if hasattr(other_collection_type, "collection_type"):
other_collection_type = other_collection_type.collection_type
collection_type = self.collection_type
return collection_type.endswith(other_collection_type) and collection_type != other_collection_type
def is_subcollection_of_type(self, other_collection_type):
if not hasattr(other_collection_type, "collection_type"):
other_collection_type = self.collection_type_description_factory.for_collection_type(other_collection_type)
return other_collection_type.has_subcollections_of_type(self)
def METHOD_NAME(self, other_collection_type):
if hasattr(other_collection_type, "collection_type"):
other_collection_type = other_collection_type.collection_type
collection_type = self.collection_type
return other_collection_type == collection_type
def subcollection_type_description(self):
if not self.__has_subcollections:
raise ValueError(f"Cannot generate subcollection type description for flat type {self.collection_type}")
subcollection_type = self.collection_type.split(":", 1)[1]
return self.collection_type_description_factory.for_collection_type(subcollection_type)
def has_subcollections(self):
return self.__has_subcollections
def rank_collection_type(self):
"""Return the top-level collection type corresponding to this
collection type. For instance the "rank" type of a list of paired
data ("list:paired") is "list".
"""
return self.collection_type.split(":")[0]
def rank_type_plugin(self):
return self.collection_type_description_factory.type_registry.get(self.rank_collection_type())
@property
def dimension(self):
return len(self.collection_type.split(":")) + 1
def multiply(self, other_collection_type):
collection_type = map_over_collection_type(self, other_collection_type)
return self.collection_type_description_factory.for_collection_type(collection_type)
def __str__(self):
return f"CollectionTypeDescription[{self.collection_type}]"
def map_over_collection_type(mapped_over_collection_type, target_collection_type):
if hasattr(mapped_over_collection_type, "collection_type"):
mapped_over_collection_type = mapped_over_collection_type.collection_type
if not target_collection_type:
return mapped_over_collection_type
else:
if hasattr(target_collection_type, "collection_type"):
target_collection_type = target_collection_type.collection_type
return f"{mapped_over_collection_type}:{target_collection_type}"
COLLECTION_TYPE_DESCRIPTION_FACTORY = CollectionTypeDescriptionFactory() | null |
721 | import os.path
from core import constants, log
import xml.etree.ElementTree as ET
import re
def validate(source_file, template):
log.log_subline_bold(f"Reading source file : '{source_file}'.")
source_file_string_placeholders = METHOD_NAME(
source_file
)
source_file_plurals_placeholders = get_source_file_plurals_placeholders(
source_file
)
for language in constants.languages.values():
translation_file = template.replace('<lang>', language)
if not os.path.exists(translation_file):
log.log_warning(f"'{translation_file}' doesn't exist.")
continue
log.log_subline(f"Validating '{translation_file}'.")
validate_string_translation(
source_file_string_placeholders, translation_file
)
validate_plurals_translation(
source_file_plurals_placeholders, translation_file
)
def validate_string_translation(source_file_placeholders, translation_file):
root = ET.parse(translation_file).getroot()
for string in root.findall('string'):
name = string.get('name')
value = ''.join(string.itertext())
if name in source_file_placeholders.keys():
for placeholder in source_file_placeholders[name]:
if not placeholder in value:
log.log_error(placeholder + " doesn't exist in '" +
name + "'\n File: " + translation_file)
def validate_plurals_translation(source_file_placeholders, translation_file):
for plural in ET.parse(translation_file).getroot().findall('plurals'):
if plural.get('name') in source_file_placeholders:
validate_item(plural, source_file_placeholders, translation_file)
def validate_item(plural, source_file_placeholders, translation_file):
items = plural.findall('item')
source_placeholder = list(source_file_placeholders[plural.get('name')].values())[0]
for item in items:
if (get_placeholders(item.text) != source_placeholder):
log.log_error("Plural '" + plural.get('name') + "': item '" +
item.get('quantity') + "' contain different placeholder " +
"or format specifier from default string \n File: " + translation_file)
for item in items:
if item.get('quantity') in source_file_placeholders[plural.get('name')]:
validate_quantity(plural, item, source_file_placeholders, translation_file)
def validate_quantity(plural, item, source_file_placeholders, translation_file):
plural_name = plural.get('name')
quantity = item.get('quantity')
for placeholder in source_file_placeholders[plural_name][quantity]:
if placeholder not in item.text:
log.log_error(placeholder + " doesn't exist in item '" +
quantity + "' of plural '" + plural_name +
"'\n File: " + translation_file)
def METHOD_NAME(file):
"""Reads the source xml file and return a dictionary having string name as
key and tuple of existing placeholders as values"""
placeholders = {}
root = ET.parse(file).getroot()
for element in root.findall('string'):
name = element.get('name')
value = ''.join(element.itertext())
placeholder = get_placeholders(value)
if placeholder:
placeholders[name] = placeholder
return placeholders
def get_source_file_plurals_placeholders(file):
placeholders = {}
root = ET.parse(file).getroot()
for plural in root.findall('plurals'):
placeholders.update(get_plural_items_placeholders(plural))
return placeholders
def get_plural_items_placeholders(plural): #new
plural_name = plural.get('name')
placeholders = {
plural_name: {
item.get("quantity"): get_placeholders(item.text)
for item in plural.findall("item")
if get_placeholders(item.text)
}
}
return placeholders if placeholders[plural_name] else {}
def get_placeholders(str):
return re.findall("{?[a-zA-Z0-9_]+}|%d|%f|%s", str) | null |
722 | # coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""ViT + BatchEnsemble.
"""
# pylint: enable=line-too-long
import ml_collections
from experiments import common_fewshot # local file import from baselines.jft
def get_config():
"""Config."""
config = ml_collections.ConfigDict()
config.seed = 0
# JFT parameters.
config.dataset = 'jft/entity:1.0.0'
config.val_split = 'test[:49511]' # aka tiny_test/test[:5%] in task_adapt
config.train_split = 'train' # task_adapt used train+validation so +64167
config.num_classes = 18291
config.init_head_bias = -10.0 # ~= ln(1/18k) ~= ln(1/num_classes)
pp_common = '|value_range(-1, 1)'
pp_common += f'|onehot({config.num_classes})'
# To use ancestor 'smearing', use this line instead:
# pp_common += f'|onehot({config.num_classes}, key='labels_extended', key_result='labels') # pylint: disable=line-too-long
pp_common += '|keep(["image", "labels"])'
config.pp_train = 'decode_jpeg_and_inception_crop(224)|flip_lr' + pp_common
config.pp_eval = 'decode|resize_small(256)|central_crop(224)' + pp_common
config.shuffle_buffer_size = 250_000 # Per host, so small-ish is ok.
# Model parameters.
config.model = ml_collections.ConfigDict()
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = [32, 32]
config.model.hidden_size = 1024
config.model.representation_size = 1024
config.model.classifier = 'token'
config.model.transformer = ml_collections.ConfigDict()
config.model.transformer.num_layers = 24
config.model.transformer.dropout_rate = 0.0
config.model.transformer.mlp_dim = 4096
config.model.transformer.num_heads = 16
config.model.transformer.attention_dropout_rate = 0.0
# BatchEnsemble parameters.
config.model.transformer.be_layers = (22, 23)
config.model.transformer.ens_size = 3
config.model.transformer.random_sign_init = -0.5
# TODO(trandustin): Remove `ensemble_attention` hparam once we no longer
# need checkpoints that only apply BE on the FF block.
config.model.transformer.ensemble_attention = True
config.fast_weight_lr_multiplier = 1.0
# Optimizer parameters.
config.optim_name = 'Adam'
config.optim = ml_collections.ConfigDict()
config.optim.beta1 = 0.9
config.optim.beta2 = 0.999
# TODO(trandustin): Note BE uses config.weight_decay instead of
# config.optim.weight_decay as in typical ViT. xid/34376832 shows
# config.weight_decay is better for BE. Still need to sweep over LR though.
config.weight_decay = 0.1
config.lr = ml_collections.ConfigDict()
config.lr.base = 4e-4 # LR likely has to be lower for larger models!
config.lr.warmup_steps = 10_000
config.lr.decay_type = 'linear'
config.lr.linear_end = 1e-5
config.disable_preemption_reproducibility = True
config.batch_size = 4096 # Global batch size.
config.num_epochs = 7
config.log_training_steps = 50
config.log_eval_steps = 1000
config.checkpoint_steps = 5000
config.checkpoint_timeout = 10
config.prefetch_to_device = 2
config.trial = 0
# Few-shot eval section
config.fewshot = common_fewshot.get_fewshot()
config.fewshot.log_steps = 25_000
return config
def METHOD_NAME(hyper):
# Use this as a sensible sweep over hyperparameters.
return hyper.product([
hyper.chainit([
hyper.product([
hyper.sweep('config.model.transformer.ens_size', [2]),
hyper.sweep('config.model.transformer.be_layers',
[(20, 21, 22, 23), (21, 22, 23)]),
]),
hyper.product([
hyper.sweep('config.model.transformer.ens_size', [3]),
hyper.sweep('config.model.transformer.be_layers',
[(21, 22, 23), (22, 23)]),
]),
]),
hyper.sweep('config.lr.base', [4e-4, 6e-4]),
hyper.sweep('config.model.transformer.random_sign_init',
[-0.5, 0.5]),
]) | null |
723 | # Drakkar-Software OctoBot-Trading
# Copyright (c) Drakkar-Software, All rights reserved.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library.
import pytest
import pytest_asyncio
import mock
import decimal
import sys
import asyncio
import octobot_commons.asyncio_tools as asyncio_tools
import octobot_trading.modes.script_keywords.context_management as context_management
import octobot_trading.exchanges as trading_exchanges
import octobot_trading.enums as enums
@pytest.fixture
def null_context():
context = context_management.Context(
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
None,
)
yield context
@pytest_asyncio.fixture
async def METHOD_NAME(backtesting_trader):
_, exchange_manager, trader_inst = backtesting_trader
context = context_management.Context(
mock.Mock(),
exchange_manager,
trader_inst,
mock.Mock(),
"BTC/USDT",
mock.Mock(),
mock.Mock(),
mock.Mock(),
mock.Mock(),
mock.Mock(),
mock.Mock(),
mock.Mock(),
mock.Mock(),
mock.Mock(),
mock.Mock(),
mock.Mock(),
)
context.signal_builder = mock.Mock()
context.is_trading_signal_emitter = mock.Mock(return_value=False)
context.orders_writer = mock.Mock(log_many=mock.AsyncMock())
portfolio_manager = exchange_manager.exchange_personal_data.portfolio_manager
# init portfolio with 0.5 BTC, 20 ETH and 30000 USDT and only 0.1 available BTC
portfolio_manager.portfolio.update_portfolio_from_balance({
'BTC': {'available': decimal.Decimal("0.1"), 'total': decimal.Decimal("0.5")},
'ETH': {'available': decimal.Decimal("20"), 'total': decimal.Decimal("20")},
'USDT': {'available': decimal.Decimal("30000"), 'total': decimal.Decimal("30000")}
}, True)
exchange_manager.client_symbols.append("BTC/USDT")
exchange_manager.client_symbols.append("ETH/USDT")
exchange_manager.client_symbols.append("ETH/BTC")
# init prices with BTC/USDT = 40000, ETH/BTC = 0.1 and ETH/USDT = 4000
portfolio_manager.portfolio_value_holder.value_converter.last_prices_by_trading_pair["BTC/USDT"] = \
decimal.Decimal("40000")
portfolio_manager.portfolio_value_holder.value_converter.last_prices_by_trading_pair["ETH/USDT"] = \
decimal.Decimal("4000")
portfolio_manager.portfolio_value_holder.value_converter.last_prices_by_trading_pair["ETH/BTC"] = \
decimal.Decimal("0.1")
portfolio_manager.handle_balance_updated()
yield context
@pytest.fixture
def symbol_market():
return {
enums.ExchangeConstantsMarketStatusColumns.LIMITS.value: {
enums.ExchangeConstantsMarketStatusColumns.LIMITS_AMOUNT.value: {
enums.ExchangeConstantsMarketStatusColumns.LIMITS_AMOUNT_MIN.value: 0.5,
enums.ExchangeConstantsMarketStatusColumns.LIMITS_AMOUNT_MAX.value: 100,
},
enums.ExchangeConstantsMarketStatusColumns.LIMITS_COST.value: {
enums.ExchangeConstantsMarketStatusColumns.LIMITS_COST_MIN.value: 1,
enums.ExchangeConstantsMarketStatusColumns.LIMITS_COST_MAX.value: 200
},
enums.ExchangeConstantsMarketStatusColumns.LIMITS_PRICE.value: {
enums.ExchangeConstantsMarketStatusColumns.LIMITS_PRICE_MIN.value: 0.5,
enums.ExchangeConstantsMarketStatusColumns.LIMITS_PRICE_MAX.value: 50
},
},
enums.ExchangeConstantsMarketStatusColumns.PRECISION.value: {
enums.ExchangeConstantsMarketStatusColumns.PRECISION_PRICE.value: 8,
enums.ExchangeConstantsMarketStatusColumns.PRECISION_AMOUNT.value: 8
}
}
@pytest.fixture
def event_loop():
# re-configure async loop each time this fixture is called
_configure_async_test_loop()
loop = asyncio.new_event_loop()
# use ErrorContainer to catch otherwise hidden exceptions occurring in async scheduled tasks
error_container = asyncio_tools.ErrorContainer()
loop.set_exception_handler(error_container.exception_handler)
yield loop
# will fail if exceptions have been silently raised
loop.run_until_complete(error_container.check())
loop.close()
@pytest.fixture
def skip_if_octobot_trading_mocking_disabled(request):
try:
with mock.patch.object(trading_exchanges.Trader, "cancel_order", mock.AsyncMock()):
pass
# mocking is available
except TypeError:
pytest.skip(reason=f"Disabled {request.node.name} [OctoBot-Trading mocks not allowed]")
def _configure_async_test_loop():
if sys.version_info[0] == 3 and sys.version_info[1] >= 8 and sys.platform.startswith('win'):
# use WindowsSelectorEventLoopPolicy to avoid aiohttp connexion close warnings
# https://github.com/encode/httpx/issues/914#issuecomment-622586610
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
# set default values for async loop
_configure_async_test_loop() | null |
724 | # Copyright 2018-2022 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
import bpy
import mathutils
from ...com import gltf2_blender_math
class Keyframe:
def __init__(self, channels: typing.Tuple[bpy.types.FCurve], frame: float, bake_channel: typing.Union[str, None]):
self.seconds = frame / bpy.context.scene.render.fps
self.frame = frame
self.fps = bpy.context.scene.render.fps
self.__length_morph = 0
# Note: channels has some None items only for SK if some SK are not animated
if bake_channel is None:
self.target = [c for c in channels if c is not None][0].data_path.split('.')[-1]
if self.target != "value":
self.__indices = [c.array_index for c in channels]
else:
self.__indices = [i for i, c in enumerate(channels) if c is not None]
self.__length_morph = len(channels)
else:
if bake_channel == "value":
self.__length_morph = len(channels)
self.target = bake_channel
self.__indices = []
for i in range(self.METHOD_NAME()):
self.__indices.append(i)
# Data holders for virtual properties
self.__value = None
self.__in_tangent = None
self.__out_tangent = None
def METHOD_NAME(self):
length = {
"delta_location": 3,
"delta_rotation_euler": 3,
"delta_rotation_quaternion": 4,
"delta_scale": 3,
"location": 3,
"rotation_axis_angle": 4,
"rotation_euler": 3,
"rotation_quaternion": 4,
"scale": 3,
"value": self.__length_morph
}.get(self.target)
if length is None:
raise RuntimeError("Animations with target type '{}' are not supported.".format(self.target))
return length
def __set_indexed(self, value):
# Sometimes blender animations only reference a subset of components of a data target. Keyframe should always
# contain a complete Vector/ Quaternion --> use the array_index value of the keyframe to set components in such
# structures
# For SK, must contains all SK values
result = [0.0] * self.METHOD_NAME()
for i, v in zip(self.__indices, value):
result[i] = v
return result
def get_indices(self):
return self.__indices
def set_value_index(self, idx, val):
self.__value[idx] = val
def set_value_index_in(self, idx, val):
self.__in_tangent[idx] = val
def set_value_index_out(self, idx, val):
self.__out_tangent[idx] = val
def set_first_tangent(self):
self.__in_tangent = self.__value
def set_last_tangent(self):
self.__out_tangent = self.__value
@property
def value(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
if self.target == "value":
return self.__value
return gltf2_blender_math.list_to_mathutils(self.__value, self.target)
@value.setter
def value(self, value: typing.List[float]):
self.__value = self.__set_indexed(value)
@value.setter
def value_total(self, value: typing.List[float]):
self.__value = value
@property
def in_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
if self.__in_tangent is None:
return None
if self.target == "value":
return self.__in_tangent
return gltf2_blender_math.list_to_mathutils(self.__in_tangent, self.target)
@in_tangent.setter
def in_tangent(self, value: typing.List[float]):
self.__in_tangent = self.__set_indexed(value)
@property
def out_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
if self.__out_tangent is None:
return None
if self.target == "value":
return self.__out_tangent
return gltf2_blender_math.list_to_mathutils(self.__out_tangent, self.target)
@out_tangent.setter
def out_tangent(self, value: typing.List[float]):
self.__out_tangent = self.__set_indexed(value) | null |
725 | #!/usr/bin/env python
"""
Test all Mappings
"""
from unittest import TestCase, main
from g2p.log import LOGGER
from g2p.mappings import Mapping
from g2p.mappings.create_ipa_mapping import (
DISTANCE_METRICS,
create_mapping,
create_multi_mapping,
)
from g2p.transducer import Transducer
class MappingCreationTest(TestCase):
def METHOD_NAME(self):
self.mappings = [
{"in": "ɑ", "out": "AA"},
{"in": "eː", "out": "EY"},
{"in": "i", "out": "IY"},
{"in": "u", "out": "UW"},
{"in": "tʃ", "out": "CH"},
{"in": "p", "out": "P"},
{"in": "t", "out": "T"},
{"in": "k", "out": "K"},
{"in": "w", "out": "W"},
{"in": "ɡ", "out": "G"},
{"in": "ʒ", "out": "ZH"},
]
self.target_mapping = Mapping(
self.mappings, in_lang="eng-ipa", out_lang="eng-arpabet", out_delimiter=" "
)
def test_unigram_mappings(self):
src_mappings = [
{"in": "ᐃ", "out": "i"},
{"in": "ᐅ", "out": "u"},
{"in": "ᐊ", "out": "a"},
]
src_mapping = Mapping(src_mappings, in_lang="crj", out_lang="crj-ipa")
mapping = create_mapping(src_mapping, self.target_mapping, quiet=True)
transducer = Transducer(mapping)
self.assertEqual(transducer("a").output_string, "ɑ")
self.assertEqual(transducer("i").output_string, "i")
self.assertEqual(transducer("u").output_string, "u")
def test_bigram_mappings(self):
src_mappings = [
{"in": "ᐱ", "out": "pi"},
{"in": "ᑎ", "out": "ti"},
{"in": "ᑭ", "out": "ki"},
]
src_mapping = Mapping(src_mappings, in_lang="crj", out_lang="crj-ipa")
mapping = create_mapping(src_mapping, self.target_mapping, quiet=True)
transducer = Transducer(mapping)
self.assertEqual(transducer("pi").output_string, "pi")
self.assertEqual(transducer("ti").output_string, "ti")
self.assertEqual(transducer("ki").output_string, "ki")
def test_trigram_mappings(self):
src_mappings = [
{"in": "ᒋ", "out": "t͡ʃi"},
{"in": "ᒍ", "out": "t͡ʃu"},
{"in": "ᒐ", "out": "t͡ʃa"},
]
src_mapping = Mapping(src_mappings, in_lang="crj", out_lang="crj-ipa")
mapping = create_mapping(src_mapping, self.target_mapping, quiet=True)
transducer = Transducer(mapping)
self.assertEqual(transducer("t͡ʃi").output_string, "tʃi")
self.assertEqual(transducer("t͡ʃu").output_string, "tʃu")
self.assertEqual(transducer("t͡ʃa").output_string, "tʃɑ")
def test_long_mappings(self):
src_mappings = [
{"in": "ᐧᐯ", "out": "pʷeː"},
{"in": "ᐧᑌ", "out": "tʷeː"},
{"in": "ᐧᑫ", "out": "kʷeː"},
]
src_mapping = Mapping(src_mappings, in_lang="crj", out_lang="crj-ipa")
mapping = create_mapping(src_mapping, self.target_mapping, quiet=True)
transducer = Transducer(mapping)
self.assertEqual(transducer("pʷeː").output_string, "pweː")
self.assertEqual(transducer("tʷeː").output_string, "tweː")
self.assertEqual(transducer("kʷeː").output_string, "kweː")
def test_distance_errors(self):
src_mappings = [{"in": "ᐃ", "out": "i"}]
src_mapping = Mapping(src_mappings, in_lang="crj", out_lang="crj-ipa")
# Exercise looking up distances in the known list
with self.assertRaises(ValueError):
_ = create_mapping(
src_mapping, self.target_mapping, distance="not_a_distance", quiet=True
)
with self.assertRaises(ValueError):
_ = create_multi_mapping(
[(src_mapping, "out")],
[(self.target_mapping, "in")],
distance="not_a_distance",
quiet=True,
)
# White box testing: monkey-patch an invalid distance to validate the
# second way we make sure distances are supported
DISTANCE_METRICS.append("not_a_real_distance")
with self.assertRaises(ValueError), self.assertLogs(LOGGER, level="ERROR"):
_ = create_mapping(
src_mapping,
self.target_mapping,
distance="not_a_real_distance",
quiet=True,
)
with self.assertRaises(ValueError), self.assertLogs(LOGGER, level="ERROR"):
_ = create_multi_mapping(
[(src_mapping, "out")],
[(self.target_mapping, "in")],
distance="not_a_real_distance",
quiet=True,
)
DISTANCE_METRICS.pop()
def test_distances(self):
# These mapppings are chosen to create different generated mappings
# from the various distances.
src_mappings = [
{"in": "ᐧᐯ", "out": "pʷeː"},
{"in": "ᒋ", "out": "t͡ʃi"},
{"in": "ᕃ", "out": "ʁaj"},
]
src_mapping = Mapping(src_mappings, in_lang="crj", out_lang="crj-ipa")
mapping = create_mapping(src_mapping, self.target_mapping, quiet=True)
# print("mapping", mapping, list(mapping), "distance", "default")
self.assertTrue(isinstance(mapping, Mapping))
set_of_mappings = {tuple(m["out"] for m in mapping)}
for distance in DISTANCE_METRICS:
mapping = create_mapping(
src_mapping, self.target_mapping, distance=distance, quiet=True
)
# print("mapping", mapping, list(mapping), "distance", distance)
self.assertTrue(isinstance(mapping, Mapping))
set_of_mappings.add(tuple(m["out"] for m in mapping))
mapping = create_multi_mapping(
[(src_mapping, "out")],
[(self.target_mapping, "in")],
distance=distance,
quiet=True,
)
self.assertTrue(isinstance(mapping, Mapping))
set_of_mappings.add(tuple(m["out"] for m in mapping))
self.assertGreater(len(set_of_mappings), 3)
if __name__ == "__main__":
main() | null |
726 | #!/usr/bin/env python3
import argparse
import time
import numpy as np
import arkouda as ak
TYPES = ("int64", "float64", "bool")
def time_ak_gather(isize, vsize, trials, dtype, random, seed):
print(">>> arkouda {} gather".format(dtype))
cfg = ak.get_config()
Ni = isize * cfg["numLocales"]
Nv = vsize * cfg["numLocales"]
print("numLocales = {}, num_indices = {:,} ; num_values = {:,}".format(cfg["numLocales"], Ni, Nv))
# Index vector is always random
i = ak.randint(0, Nv, Ni, seed=seed)
if seed is not None:
seed += 1
if random or seed is not None:
if dtype == "int64":
v = ak.randint(0, 2**32, Nv, seed=seed)
elif dtype == "float64":
v = ak.randint(0, 1, Nv, dtype=ak.float64, seed=seed)
elif dtype == "bool":
v = ak.randint(0, 1, Nv, dtype=ak.bool, seed=seed)
elif dtype == "str":
v = ak.random_strings_uniform(1, 16, Nv, seed=seed)
else:
if dtype == "str":
v = ak.cast(ak.arange(Nv), "str")
else:
v = ak.ones(Nv, dtype=dtype)
timings = []
for _ in range(trials):
start = time.time()
c = v[i]
end = time.time()
timings.append(end - start)
tavg = sum(timings) / trials
print("Average time = {:.4f} sec".format(tavg))
if dtype == "str":
offsets_transferred = 3 * c.size * 8
bytes_transferred = (c.size * 8) + (2 * c.nbytes)
bytes_per_sec = (offsets_transferred + bytes_transferred) / tavg
else:
bytes_per_sec = (c.size * c.itemsize * 3) / tavg
print("Average rate = {:.2f} GiB/sec".format(bytes_per_sec / 2**30))
def time_np_gather(Ni, Nv, trials, dtype, random, seed):
print(">>> numpy {} gather".format(dtype))
print("num_indices = {:,} ; num_values = {:,}".format(Ni, Nv))
# Index vector is always random
if seed is not None:
np.random.seed(seed)
i = np.random.randint(0, Nv, Ni)
if random or seed is not None:
if dtype == "int64":
v = np.random.randint(0, 2**32, Nv)
elif dtype == "float64":
v = np.random.random(Nv)
elif dtype == "bool":
v = np.random.randint(0, 1, Nv, dtype=np.bool)
elif dtype == "str":
v = np.array(np.random.randint(0, 2**32, Nv), dtype="str")
else:
v = np.ones(Nv, dtype=dtype)
timings = []
for _ in range(trials):
start = time.time()
c = v[i]
end = time.time()
timings.append(end - start)
tavg = sum(timings) / trials
print("Average time = {:.4f} sec".format(tavg))
bytes_per_sec = (c.size * c.itemsize * 3) / tavg
print("Average rate = {:.2f} GiB/sec".format(bytes_per_sec / 2**30))
def METHOD_NAME(dtype, random, seed):
Ni = 10**4
Nv = 10**4
if seed is not None:
np.random.seed(seed)
npi = np.random.randint(0, Nv, Ni)
aki = ak.array(npi)
if random or seed is not None:
if dtype == "int64":
npv = np.random.randint(0, 2**32, Nv)
elif dtype == "float64":
npv = np.random.random(Nv)
elif dtype == "bool":
npv = np.random.randint(0, 1, Nv, dtype=np.bool)
elif dtype == "str":
npv = np.array([str(x) for x in np.random.randint(0, 2**32, Nv)])
else:
npv = np.ones(Nv, dtype=dtype)
akv = ak.array(npv)
npc = npv[npi]
akc = akv[aki]
if dtype == "str":
assert (npc == akc.to_ndarray()).all()
else:
assert np.allclose(npc, akc.to_ndarray())
def create_parser():
parser = argparse.ArgumentParser(description="Measure the performance of random gather: C = V[I]")
parser.add_argument("hostname", help="Hostname of arkouda server")
parser.add_argument("port", type=int, help="Port of arkouda server")
parser.add_argument(
"-n", "--size", type=int, default=10**8, help="Problem size: length of index and gather arrays"
)
parser.add_argument(
"-i", "--index-size", type=int, help="Length of index array (number of gathers to perform)"
)
parser.add_argument(
"-v", "--value-size", type=int, help="Length of array from which values are gathered"
)
parser.add_argument(
"-t", "--trials", type=int, default=6, help="Number of times to run the benchmark"
)
parser.add_argument(
"-d", "--dtype", default="int64", help="Dtype of value array ({})".format(", ".join(TYPES))
)
parser.add_argument(
"-r", "--randomize", default=False, action="store_true", help="Use random values instead of ones"
)
parser.add_argument(
"--numpy",
default=False,
action="store_true",
help="Run the same operation in NumPy to compare performance.",
)
parser.add_argument(
"--correctness-only",
default=False,
action="store_true",
help="Only check correctness, not performance.",
)
parser.add_argument(
"-s", "--seed", default=None, type=int, help="Value to initialize random number generator"
)
return parser
if __name__ == "__main__":
import sys
parser = create_parser()
args = parser.parse_args()
args.index_size = args.size if args.index_size is None else args.index_size
args.value_size = args.size if args.value_size is None else args.value_size
if args.dtype not in TYPES:
raise ValueError("Dtype must be {}, not {}".format("/".join(TYPES), args.dtype))
ak.verbose = False
ak.connect(args.hostname, args.port)
if args.correctness_only:
for dtype in TYPES:
METHOD_NAME(dtype, args.randomize, args.seed)
sys.exit(0)
print("size of index array = {:,}".format(args.index_size))
print("size of values array = {:,}".format(args.value_size))
print("number of trials = ", args.trials)
time_ak_gather(args.index_size, args.value_size, args.trials, args.dtype, args.randomize, args.seed)
if args.numpy:
time_np_gather(
args.index_size, args.value_size, args.trials, args.dtype, args.randomize, args.seed
)
print("Verifying agreement between arkouda and NumPy on small problem... ", end="")
METHOD_NAME(args.dtype, args.randomize, args.seed)
print("CORRECT")
sys.exit(0) | null |
727 | """
Natural Questions: a Benchmark for Question Answering Research
https://storage.googleapis.com/pub-tools-public-publication-data/pdf/1f7b46b5378d757553d3e92ead36bda2e4254244.pdf
The Natural Questions (NQ) corpus is a question-answering dataset that contains
questions from real users and requires QA systems to read and comprehend an entire
Wikipedia article that may or may not contain the answer to the question. The
inclusion of real user questions, and the requirement that solutions should read
an entire page to find the answer, cause NQ to be a more realistic and challenging
task than prior QA datasets.
TODO: NaturalQS has a *really* large train set that huggingface just automatically
downloads even if you dont use it. we should try and only download the val set and
not even bother with the train set.
Homepage: https://ai.google.com/research/NaturalQuestions
"""
from lm_eval.base import Task
from itertools import islice
_CITATION = """
@article{47761,
title={Natural Questions: a Benchmark for Question Answering Research},
author={Tom Kwiatkowski and Jennimaria Palomaki and Olivia Redfield and Michael Collins and Ankur Parikh and Chris Alberti and Danielle Epstein and Illia Polosukhin and Matthew Kelcey and Jacob Devlin and Kenton Lee and Kristina N. Toutanova and Llion Jones and Ming-Wei Chang and Andrew Dai and Jakob Uszkoreit and Quoc Le and Slav Petrov},
year={2019},
journal={Transactions of the Association of Computational Linguistics}
}
"""
class NaturalQs(Task):
VERSION = 0
DATASET_PATH = "natural_questions"
DATASET_NAME = None
def has_training_docs(self):
return True
def has_validation_docs(self):
return True
def METHOD_NAME(self):
return False
def training_docs(self):
# Cache training for faster few-shot.
# Data is too large to fit in memory.
if self._training_docs is None:
self._training_docs = list(self.dataset["train"])
return self._training_docs
def validation_docs(self):
return self.dataset["validation"]
def fewshot_examples(self, k, rnd):
# Data is too large to fit in memory. We just sample from the first bit.
if self._training_docs is None:
self._training_docs = list(islice(self.training_docs(), 0, 100000))
return rnd.sample(self._training_docs, k)
def doc_to_text(self, doc):
return "Q: " + doc["question"]["text"] + "\n\n" + "A:"
def should_decontaminate(self):
return True
def doc_to_decontamination_query(self, doc):
return doc["question"]["text"]
def doc_to_target(self, doc):
# There's a short answer and a long answer. Based on the paper, I'm using the long answer.
# short_answer = doc["annotations"]["short_answers"][0]["text"]
long_answer_start = doc["annotations"]["long_answer"][0]["start_token"]
long_answer_end = doc["annotations"]["long_answer"][0]["end_token"]
long_answer_span = doc["document"]["tokens"]["token"][
long_answer_start:long_answer_end
]
long_answer_is_html = doc["document"]["tokens"]["is_html"][
long_answer_start:long_answer_end
]
long_answer_chars = [
tok
for (tok, is_html) in zip(long_answer_span, long_answer_is_html)
if not is_html
]
long_answer = " ".join(long_answer_chars)
return long_answer # Replace with short_answer[0] for short answer
def construct_requests(self, doc, ctx):
"""Uses RequestFactory to construct Requests and returns an iterable of
Requests which will be sent to the LM.
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param ctx: str
The context string, generated by fewshot_context. This includes the natural
language description, as well as the few shot examples, and the question
part of the document for `doc`.
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
def process_results(self, doc, results):
"""Take a single document and the LM results and evaluates, returning a
dict where keys are the names of submetrics and values are the values of
the metric for that one document
:param doc:
The document as returned from training_docs, validation_docs, or test_docs.
:param results:
The results of the requests created in construct_requests.
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
def aggregation(self):
"""
:returns: {str: [float] -> float}
A dictionary where keys are the names of submetrics and values are
functions that aggregate a list of metrics
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented")
def higher_is_better(self):
"""
:returns: {str: bool}
A dictionary where keys are the names of submetrics and values are
whether a higher value of the submetric is better
"""
# TODO: implement evaluation.
raise NotImplementedError("Evaluation not implemented") | null |
728 | import pytest
from lark.exceptions import UnexpectedToken
from pharmpy.model import ModelSyntaxError
def test_data_filename_get(parser):
record = parser.parse('$DATA "pheno.dta"').records[0]
assert record.filename == 'pheno.dta'
record = parser.parse('$DATA /home/full/pheno.dta').records[0]
assert record.filename == '/home/full/pheno.dta'
record = parser.parse("$DATA 'pheno.dta'").records[0]
assert str(record.root.subtree('filename')) == "'pheno.dta'"
assert record.filename == "pheno.dta"
record = parser.parse(r'$DATA "C:\windowspath\with space in.csv"').records[0]
assert record.filename == r'C:\windowspath\with space in.csv'
record = parser.parse('$DATA \n pheno.dta \n; comment\n').records[0]
assert record.filename == 'pheno.dta'
record = parser.parse('$DATA ; comment\n ; some comment line\n pheno.dta\n\n').records[0]
assert record.filename == 'pheno.dta'
def test_data_filename_set(parser):
record = parser.parse('$DATA DUMMY ; comment').records[0]
assert record.filename == 'DUMMY'
assert str(record) == '$DATA DUMMY ; comment'
# simple replace
record = record.set_filename('/new/path/to_file.txt')
assert record.filename == '/new/path/to_file.txt'
assert str(record) == '$DATA /new/path/to_file.txt ; comment'
# force quoting
record = record.set_filename('MUST=QUOTE')
assert record.filename == 'MUST=QUOTE'
assert str(record) == "$DATA 'MUST=QUOTE' ; comment"
# more complex example
text = '$DATA ; comment\n ; some comment line\n pheno.dta\n\n'
record = parser.parse(text).records[0]
assert record.filename == 'pheno.dta'
assert str(record) == text
# more complex replace
record = record.set_filename("'IGNORE'")
assert record.filename == "'IGNORE'"
assert str(record) == text.replace('pheno.dta', '"\'IGNORE\'"')
# *
record = parser.parse('$DATA DUMMY ; comment').records[0]
record = record.set_filename(None)
assert str(record) == '$DATA * ; comment'
def test_option_record(parser):
record = parser.parse('$DATA pheno.dta NOWIDE').records[0]
assert record.option_pairs == {'NOWIDE': None}
def METHOD_NAME(parser):
record = parser.parse('$DATA pheno.dta').records[0]
assert record.ignore_character is None
record = record.set_ignore_character('I')
assert record.ignore_character == 'I'
record = parser.parse('$DATA pheno.dta IGNORE=@').records[0]
assert record.filename == 'pheno.dta'
assert record.ignore_character == '@'
record = record.set_ignore_character('K')
assert record.ignore_character == 'K'
record = parser.parse('$DATA pheno.dta IGNORE="I"').records[0]
assert record.ignore_character == 'I'
record = parser.parse('$DATA pheno.dta IGNORE=\'"\'').records[0]
assert record.ignore_character == '"'
record = parser.parse('$DATA pheno.dta IGNORE=K IGNORE=(ID.EQ.2)').records[0]
assert record.ignore_character == 'K'
record = parser.parse('$DATA pheno.dta IGNORE=(DV==3) IGNORE=C').records[0]
assert record.ignore_character == 'C'
record = record.set_ignore_character('@')
assert record.ignore_character == '@'
assert str(record.ignore[0]) == 'DV==3'
record = parser.parse('$DATA pheno.dta IGNORE=,').records[0]
assert record.ignore_character == ','
record = parser.parse('$DATA pheno.dta IGNORE="').records[0]
assert record.ignore_character == '"'
record = record.set_ignore_character('"')
assert record.ignore_character == '"'
assert str(record) == '$DATA pheno.dta IGNORE="'
with pytest.raises(UnexpectedToken):
record = parser.parse('$DATA pheno.dta IGNORE=""').records[0]
record = parser.parse('$DATA pheno.dta IGNORE=c IGNORE=@').records[0]
with pytest.raises(ModelSyntaxError):
record.ignore_character
def test_ignore_character_from_header(parser):
record = parser.parse('$DATA pheno.dta').records[0]
assert record.ignore_character is None
record = record.set_ignore_character_from_header("ID")
assert record.ignore_character == '@'
record = record.set_ignore_character_from_header("_ID")
assert record.ignore_character == '_'
def test_null_value(parser):
record = parser.parse('$DATA pheno.dta NULL=1').records[0]
assert record.null_value == 1
record = parser.parse('$DATA pheno.dta NULL=+').records[0]
assert record.null_value == 0
def test_ignore_accept(parser):
record = parser.parse('$DATA pheno.dta IGNORE=(DV.EQ.1)').records[0]
assert str(record.ignore[0]) == 'DV.EQ.1'
assert record.accept == []
record = record.remove_ignore()
assert record.ignore == []
assert record.accept == []
record = parser.parse('$DATA pheno.dta ACCEPT=(DV.EQ.1, MDV.NEN.23)').records[0]
assert str(record.accept[0]) == 'DV.EQ.1'
assert str(record.accept[1]) == 'MDV.NEN.23'
assert record.ignore == []
record = record.remove_accept()
assert record.ignore == []
assert record.accept == []
record = parser.parse('$DATA pheno.dta IGNORE=(WGT < 1 ,\n ID\n.EQ."lk")').records[0]
assert str(record.ignore[0]) == 'WGT < 1', 'ID\n.EQ."lk"'
record = parser.parse('$DATA ../pheno.dta IGNORE=@ IGNORE(APGR.GT.23)\n').records[0]
record = record.remove_ignore().remove_accept()
assert str(record) == '$DATA ../pheno.dta IGNORE=@ \n'
def test_comments(parser):
record = parser.parse('$DATA pheno.dta IGNORE=@;MYCOMMENT').records[0]
assert str(record) == '$DATA pheno.dta IGNORE=@;MYCOMMENT'
def test_data_infile(parser):
record = parser.parse('$INFILE pheno.dta').records[0]
assert record.name == 'DATA'
assert record.filename == 'pheno.dta'
assert record.raw_name == '$INFILE'
def test_comment(parser):
contents = r"""$DATA cpt7.dta IGNORE= #
; Dataset
"""
record = parser.parse(contents).records[0]
record = record.set_ignore_character("A")
assert str(record) == '$DATA cpt7.dta \n; Dataset\nIGNORE=A\n' | null |
729 | # Copyright (C) 2018 - Today: GRAP (http://www.grap.coop)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# Copyright 2020 Camptocamp SA (http://www.camptocamp.com)
# @author Simone Orsi <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo.addons.shopinvader.tests.common import ProductCommonCase
class ProductLinkCaseBase(ProductCommonCase):
@classmethod
def METHOD_NAME(cls):
super().METHOD_NAME()
cls.env = cls.env(
context=dict(
cls.env.context,
# compatibility flag when you run tests on a db
# where `product_variant_multi_link` is installed.
_product_variant_link_bypass_check=True,
)
)
cls.template_1 = cls.env.ref(
"shopinvader.product_template_armchair_mid_century"
)
cls.template_1.product_template_link_ids.unlink()
cls.template_2 = cls.env.ref("shopinvader.product_template_chair_mid_century")
cls.template_2.product_template_link_ids.unlink()
cls.template_3 = cls.env.ref(
"shopinvader.product_template_tv_cabinet_shaker_wood"
)
cls.template_3.product_template_link_ids.unlink()
cls.variant_1_1 = cls.template_1.product_variant_ids[0]
cls.variant_1_2 = cls.template_1.product_variant_ids[1]
cls.variant_2_1 = cls.template_2.product_variant_ids[0]
cls.variant_2_2 = cls.template_2.product_variant_ids[1]
cls.variant_3_1 = cls.template_3.product_variant_ids[0]
cls.variant_3_2 = cls.template_3.product_variant_ids[1]
cls.shopinvader_variant_1_1 = cls.variant_1_1._get_invader_variant(
cls.backend, "en_US"
)
cls.shopinvader_variant_1_2 = cls.variant_1_2._get_invader_variant(
cls.backend, "en_US"
)
cls.shopinvader_variant_2_1 = cls.variant_2_1._get_invader_variant(
cls.backend, "en_US"
)
cls.shopinvader_variant_2_2 = cls.variant_2_2._get_invader_variant(
cls.backend, "en_US"
)
cls.shopinvader_variant_3_1 = cls.variant_3_1._get_invader_variant(
cls.backend, "en_US"
)
cls.shopinvader_variant_3_2 = cls.variant_3_2._get_invader_variant(
cls.backend, "en_US"
)
cls.link_type_asym = cls.env["product.template.link.type"].create(
{"name": "One way link", "code": "one-way", "is_symmetric": False}
)
cls.cross_selling_type = cls.env["product.template.link.type"].get_by_code(
"cross-selling"
)
cls.up_selling_type = cls.env["product.template.link.type"].get_by_code(
"up-selling"
)
cls._create_links()
@classmethod
def _create_links(cls):
cls.link_upselling_1_2 = cls.env["product.template.link"].create(
{
"left_product_tmpl_id": cls.template_1.id,
"right_product_tmpl_id": cls.template_2.id,
"type_id": cls.up_selling_type.id,
}
)
cls.link_crosselling_1_3 = cls.env["product.template.link"].create(
{
"left_product_tmpl_id": cls.template_1.id,
"right_product_tmpl_id": cls.template_3.id,
"type_id": cls.cross_selling_type.id,
}
)
cls.link_crosselling_2_3 = cls.env["product.template.link"].create(
{
"left_product_tmpl_id": cls.template_2.id,
"right_product_tmpl_id": cls.template_3.id,
"type_id": cls.env.ref(
"product_template_multi_link."
"product_template_link_type_cross_selling"
).id,
}
)
cls.link_one_way_3_2 = cls.env["product.template.link"].create(
{
"left_product_tmpl_id": cls.template_3.id,
"right_product_tmpl_id": cls.template_2.id,
"type_id": cls.link_type_asym.id,
}
) | null |
730 | # -*- coding: utf-8 -*-
#
# This file is part of INGInious. See the LICENSE and the COPYRIGHTS files for
# more information about the licensing of this file.
""" TemplateManager """
import os
from functools import lru_cache
from jinja2 import Environment, FileSystemLoader, select_autoescape
import inginious
import json
class TemplateHelper(object):
""" Class accessible from templates that calls function defined in the Python part of the code. """
def __init__(self, plugin_manager, user_manager, use_minified=True):
"""
Init the Template Helper
:param plugin_manager: an instance of a PluginManager
:param user_manager: an instance of UserManager.
:param default_template_dir: the path to the template dir. If it is not absolute, it will be taken from the root of the inginious package.
:param default_layout: the path to the layout. If it is not absolute, it will be taken from the root of the inginious package.
:param use_minified: weither to use minified js/css or not. Use True in production, False in dev envs.
"""
self._base_helpers = {"header_hook": (lambda **kwargs: self._generic_hook('header_html', **kwargs)),
"main_menu": (lambda **kwargs: self._generic_hook('main_menu', **kwargs)),
"course_menu": (lambda **kwargs: self._generic_hook('course_menu', **kwargs)),
"submission_admin_menu": (lambda **kwargs: self._generic_hook('submission_admin_menu', **kwargs)),
"task_list_item": (lambda **kwargs: self._generic_hook('task_list_item', **kwargs)),
"task_menu": (lambda **kwargs: self._generic_hook('task_menu', **kwargs)),
"welcome_text": (lambda **kwargs: self._generic_hook('welcome_text', **kwargs)),
"javascript_header": (lambda **_: self._javascript_helper("header")),
"javascript_footer": (lambda **_: self._javascript_helper("footer")),
"css": (lambda **_: self._css_helper())}
self._plugin_manager = plugin_manager
self._template_dir = 'frontend/templates'
self._user_manager = user_manager # can be None!
self._layout_old = 'frontend/templates/layout_old'
self._template_globals = {}
self._ctx = {"javascript": {"footer": [], "header": []}, "css": []}
self.add_to_template_globals("template_helper", self)
self.add_to_template_globals("plugin_manager", plugin_manager)
self.add_to_template_globals("use_minified", use_minified)
self.add_to_template_globals("is_lti", self.is_lti)
self.add_to_template_globals("json", self.METHOD_NAME)
def is_lti(self):
""" True if the current session is an LTI one """
return self._user_manager is not None and self._user_manager.session_lti_info() is not None
def add_to_template_globals(self, name, value):
""" Add a variable to will be accessible in the templates """
self._template_globals[name] = value
def render(self, path, template_folder="", **tpl_kwargs):
"""
Parse the Jinja template named "path" and render it with args ``*tpl_args`` and ``**tpl_kwargs``
:param path: Path of the template, relative to the base folder
:param template_folder: add the specified folder to the templates PATH.
:param tpl_kwargs: named args sent to the template
:return: the rendered template, as a str
"""
return self._get_jinja_renderer(template_folder).get_template(path).render(**tpl_kwargs)
@lru_cache(None)
def _get_jinja_renderer(self, template_folder=""):
# Always include the main template folder
template_folders = [os.path.join(inginious.get_root_path(), self._template_dir)]
# Include the additional template folder if specified
if template_folder:
template_folders += [os.path.join(inginious.get_root_path(), template_folder)]
env = Environment(loader=FileSystemLoader(template_folders),
autoescape=select_autoescape(['html', 'htm', 'xml']))
env.globals.update(self._template_globals)
return env
def call(self, name, **kwargs):
helpers = dict(list(self._base_helpers.items()) + self._plugin_manager.call_hook("template_helper"))
if helpers.get(name, None) is None:
return ""
else:
return helpers[name](**kwargs)
def add_javascript(self, link, position="footer"):
""" Add a javascript file to load. Position can either be "header" or "footer" """
self._get_ctx()["javascript"][position].append(link)
def add_css(self, link):
""" Add a css file to load """
self._get_ctx()["css"].append(link)
def add_other(self, name, func):
""" Add another callback to the template helper """
self._base_helpers[name] = func
def _javascript_helper(self, position):
""" Add javascript links for the current page and for the plugins """
if position not in ["header", "footer"]:
position = "footer"
# Load javascript files from plugins
if position == "header":
entries = [entry for entry in self._plugin_manager.call_hook("javascript_header") if entry is not None]
else:
entries = [entry for entry in self._plugin_manager.call_hook("javascript_footer") if entry is not None]
# Load javascript for the current page
entries += self._get_ctx()["javascript"][position]
entries = ["<script src='" + entry + "' type='text/javascript' charset='utf-8'></script>" for entry in entries]
return "\n".join(entries)
def _css_helper(self):
""" Add CSS links for the current page and for the plugins """
entries = [entry for entry in self._plugin_manager.call_hook("css") if entry is not None]
# Load javascript for the current page
entries += self._get_ctx()["css"]
entries = ["<link href='" + entry + "' rel='stylesheet'>" for entry in entries]
return "\n".join(entries)
def _get_ctx(self):
""" Get web.ctx object for the Template helper """
return self._ctx
def _generic_hook(self, name, **kwargs):
""" A generic hook that links the TemplateHelper with PluginManager """
entries = [entry for entry in self._plugin_manager.call_hook(name, **kwargs) if entry is not None]
return "\n".join(entries)
def METHOD_NAME(self, data):
""" Make a json dump of `data`, that can be used directly in a `<script>` tag. Available as json() inside templates """
return json.dumps(data).replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027' | null |
731 | # Copyright (C) 2018-2023 The NeoVintageous Team (NeoVintageous).
#
# This file is part of NeoVintageous.
#
# NeoVintageous is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NeoVintageous is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NeoVintageous. If not, see <https://www.gnu.org/licenses/>.
from collections import defaultdict
import glob
import os
import unittest
from sublime import active_window
from NeoVintageous.tests.unittest import Region
_path_to_test_specs = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
# NOTE
#
# Command tests are declared in a special text format in files with the
# .cmd-test extension. Several tests can be declared in the same file. This
# makes it easier to group tests.
#
# Special attention must be payed to whitespace: it counts for tests.
_TEST_HEADER_DELIM = '***\n' # Comes after the header.
_TEST_DELIM = '\n---///---\n' # Delimits tests.
_TEST_RESULTS_DELIM = '\n---\n' # Separates the test declaration from the expected result.
_CONVERTERS = defaultdict(lambda: (lambda x: str(x))) # type: dict
_CONVERTERS['mode'] = str
_CONVERTERS['count'] = int
def _make_args(args):
arg_dict = {}
for a in args:
name, value = a.split(':', 1)
arg_dict[name] = _CONVERTERS[name](value)
return arg_dict
def _process_notation(text, sel_start_token='^', sel_end_token='$'):
"""
Process @text assuming it contains markers defining selections.
@text
Text that contains @sel_start_token's and @sel_end_token's to define
selection regions.
@sel_start_token
Marks the start of a selection region. Removed from the test.
@sel_end_token
Marks the end of a selection region. Removed from the text.
Reversed selections can be defined too.
Returns (selections, processed_text), where `selections` are valid ST
ranges, and `processed_text` is @text without the special symbols.
"""
deletions = 0
start = None
selections = []
chars = []
pos = 0
while pos < len(text):
c = text[pos]
if c == sel_start_token:
if start == sel_start_token:
raise ValueError('unexpected token %s at %d', c, pos)
if start is None:
start = pos - deletions
else:
selections.append(Region(start, pos - deletions)) # type: ignore[unreachable]
start = None
deletions += 1
elif c == sel_end_token:
if start == sel_end_token:
raise ValueError('unexpected token %s at %d', c, pos)
if start is None:
start = pos - deletions
else:
selections.append(Region(start, pos - deletions))
start = None
deletions += 1
else:
chars.append(c)
pos += 1
if start is not None:
raise ValueError('wrong format, orphan ^ at %d', start + deletions)
return selections, ''.join(chars)
class CommandTest(object):
def __init__(self, cmd_name, args, description, before_text, after_text, file_name, test_nr, options=None):
self.cmd_name = cmd_name
self.args = args
self.description = description
self.before_text = before_text
self.after_text = after_text
self.file_name = file_name
self.test_nr = test_nr
self.options = options
@property
def message(self):
return "Failure in File: {0} Test Nr.: {1} -- {2}".format(self.file_name, self.test_nr, self.description)
@staticmethod
def METHOD_NAME(text, file_name, test_nr):
"""Create a test instance from a textual representation."""
header, body = text.split(_TEST_HEADER_DELIM, 1)
header, description = header.split('\n', 1)
description, options = CommandTest.process_description(description)
cmd_name, args = header.split(' ', 1)
args = _make_args(args.split())
assert 'mode' in args, 'all commands need to know the current mode'
before, after = body.split(_TEST_RESULTS_DELIM)
return CommandTest(cmd_name, args, description, before, after, file_name, test_nr, options)
@staticmethod
def process_description(text):
lines = text.split('\n')
description = lines
options_line = lines[0]
opts = {} # type: dict
if options_line.startswith('//options: '):
description = lines[1:]
raw_opts = options_line[11:].split()
opts = _make_args(raw_opts)
return '\n'.join(description), opts
def run_with(self, runner):
before_sels, before_text = _process_notation(self.before_text)
runner.append(before_text)
runner.set_sels(before_sels)
view = runner.view
view.run_command(self.cmd_name, self.args)
after_sels, after_text = _process_notation(self.after_text)
runner.assertEqual(view.substr(Region(0, view.size())), after_text, self.message)
runner.assertEqual(list(view.sel()), after_sels, self.message)
class CommandTestCase(unittest.TestCase):
"""
Runs tests based in cmd-test spec files (cmd-test).
Subclasses must implement setUp() and in it set self.path_to_test_specs.
"""
def get_motion_tests(self):
specs = self.get_tests("*.motion-test")
return specs
def get_action_tests(self):
specs = self.get_tests("*.cmd-test")
return specs
def get_tests(self, ext):
"""Yield `CommandTest`s found under the self.path_to_test_specs dir."""
specs = glob.glob(os.path.join(self.path_to_test_specs, ext + "-solo"))
if specs:
specs = specs[0:1]
else:
specs = glob.glob(os.path.join(self.path_to_test_specs, ext))
return specs
def iter_tests(self):
specs = self.get_motion_tests() + self.get_action_tests()
for spec_path in specs:
spec_path = os.path.abspath(spec_path)
content = None
with open(spec_path, 'rt') as f:
content = f.read()
tests = content.split(_TEST_DELIM)
for i, test in enumerate(tests):
if not test:
continue
yield CommandTest.METHOD_NAME(test, spec_path, i)
def append(self, text):
self.view.run_command('append', {'characters': text}) # type: ignore[has-type]
def reset(self):
if getattr(self, "view", None):
self.view.close() # type: ignore[has-type]
self.view = active_window().new_file()
self.view.set_scratch(True)
def set_sels(self, sels):
"""
Enable adding selections to the buffer text using a minilanguage.
S = add empty sel before S and delete S
x = add empty sel before x
v = add sel from before the first 'v' to after the last contiguous 'v'
"""
self.view.sel().clear()
self.view.sel().add_all(sels)
class TestAllCommands(CommandTestCase):
def setUp(self):
self.path_to_test_specs = _path_to_test_specs
def test_all(self):
self.reset()
for test in self.iter_tests():
test.run_with(self)
self.reset()
if self.view.is_scratch():
self.view.close()
def tearDown(self):
if self.view.is_scratch():
self.view.close()
super().tearDown() | null |
732 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkemr.endpoint import endpoint_data
class RunClusterServiceActionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'RunClusterServiceAction')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_HostGroupIdLists(self):
return self.get_query_params().get('HostGroupIdList')
def set_HostGroupIdLists(self, HostGroupIdLists):
for depth1 in range(len(HostGroupIdLists)):
if HostGroupIdLists[depth1] is not None:
self.add_query_param('HostGroupIdList.' + str(depth1 + 1) , HostGroupIdLists[depth1])
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def METHOD_NAME(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ServiceActionName(self):
return self.get_query_params().get('ServiceActionName')
def set_ServiceActionName(self,ServiceActionName):
self.add_query_param('ServiceActionName',ServiceActionName)
def get_IsRolling(self):
return self.get_query_params().get('IsRolling')
def set_IsRolling(self,IsRolling):
self.add_query_param('IsRolling',IsRolling)
def get_TotlerateFailCount(self):
return self.get_query_params().get('TotlerateFailCount')
def set_TotlerateFailCount(self,TotlerateFailCount):
self.add_query_param('TotlerateFailCount',TotlerateFailCount)
def get_ServiceName(self):
return self.get_query_params().get('ServiceName')
def set_ServiceName(self,ServiceName):
self.add_query_param('ServiceName',ServiceName)
def get_ExecuteStrategy(self):
return self.get_query_params().get('ExecuteStrategy')
def set_ExecuteStrategy(self,ExecuteStrategy):
self.add_query_param('ExecuteStrategy',ExecuteStrategy)
def get_OnlyRestartStaleConfigNodes(self):
return self.get_query_params().get('OnlyRestartStaleConfigNodes')
def set_OnlyRestartStaleConfigNodes(self,OnlyRestartStaleConfigNodes):
self.add_query_param('OnlyRestartStaleConfigNodes',OnlyRestartStaleConfigNodes)
def get_NodeCountPerBatch(self):
return self.get_query_params().get('NodeCountPerBatch')
def set_NodeCountPerBatch(self,NodeCountPerBatch):
self.add_query_param('NodeCountPerBatch',NodeCountPerBatch)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId)
def get_CustomCommand(self):
return self.get_query_params().get('CustomCommand')
def set_CustomCommand(self,CustomCommand):
self.add_query_param('CustomCommand',CustomCommand)
def get_ComponentNameList(self):
return self.get_query_params().get('ComponentNameList')
def set_ComponentNameList(self,ComponentNameList):
self.add_query_param('ComponentNameList',ComponentNameList)
def get_Comment(self):
return self.get_query_params().get('Comment')
def set_Comment(self,Comment):
self.add_query_param('Comment',Comment)
def get_CustomParams(self):
return self.get_query_params().get('CustomParams')
def set_CustomParams(self,CustomParams):
self.add_query_param('CustomParams',CustomParams)
def get_Interval(self):
return self.get_query_params().get('Interval')
def set_Interval(self,Interval):
self.add_query_param('Interval',Interval)
def get_HostIdList(self):
return self.get_query_params().get('HostIdList')
def set_HostIdList(self,HostIdList):
self.add_query_param('HostIdList',HostIdList)
def get_TurnOnMaintenanceMode(self):
return self.get_query_params().get('TurnOnMaintenanceMode')
def set_TurnOnMaintenanceMode(self,TurnOnMaintenanceMode):
self.add_query_param('TurnOnMaintenanceMode',TurnOnMaintenanceMode | null |
733 | #
# This file is part of LiteDRAM.
#
# Copyright (c) 2020 Antmicro <www.antmicro.com>
# SPDX-License-Identifier: BSD-2-Clause
import unittest
from migen import *
from litex.gen.sim import *
from litedram.common import *
from litedram.frontend.dma import *
from test.common import *
class DMAWriterDriver:
def __init__(self, dma):
self.dma = dma
def write(self, pattern):
yield self.dma.sink.valid.eq(1)
for adr, data in pattern:
yield self.dma.sink.address.eq(adr)
yield self.dma.sink.data.eq(data)
while not (yield self.dma.sink.ready):
yield
yield
yield self.dma.sink.valid.eq(0)
@staticmethod
def wait_complete(port, n):
for _ in range(n):
while not (yield port.wdata.ready):
yield
yield
class DMAReaderDriver:
def __init__(self, dma):
self.dma = dma
self.data = []
def read(self, address_list):
n_last = len(self.data)
yield self.dma.sink.valid.eq(1)
for adr in address_list:
yield self.dma.sink.address.eq(adr)
while not (yield self.dma.sink.ready):
yield
while (yield self.dma.sink.ready):
yield
yield self.dma.sink.valid.eq(0)
while len(self.data) < n_last + len(address_list):
yield
@passive
def read_handler(self):
yield self.dma.source.ready.eq(1)
while True:
if (yield self.dma.source.valid):
self.data.append((yield self.dma.source.data))
yield
class TestDMA(MemoryTestDataMixin, unittest.TestCase):
# LiteDRAMDMAWriter ----------------------------------------------------------------------------
def dma_writer_test(self, pattern, mem_expected, data_width, **kwargs):
class DUT(Module):
def __init__(self):
self.port = LiteDRAMNativeWritePort(address_width=32, data_width=data_width)
self.submodules.dma = LiteDRAMDMAWriter(self.port, **kwargs)
dut = DUT()
driver = DMAWriterDriver(dut.dma)
mem = DRAMMemory(data_width, len(mem_expected))
generators = [
driver.write(pattern),
driver.wait_complete(dut.port, len(pattern)),
mem.write_handler(dut.port),
]
run_simulation(dut, generators)
self.assertEqual(mem.mem, mem_expected)
def test_dma_writer_single(self):
# Verify DMAWriter with a single 32-bit data.
pattern = [(0x04, 0xdeadc0de)]
mem_expected = [0] * 32
mem_expected[0x04] = 0xdeadc0de
self.dma_writer_test(pattern, mem_expected, data_width=32)
def test_dma_writer_multiple(self):
# Verify DMAWriter with multiple 32-bit datas.
data = self.pattern_test_data["32bit"]
self.dma_writer_test(data["pattern"], data["expected"], data_width=32)
def test_dma_writer_sequential(self):
# Verify DMAWriter with sequential 32-bit datas.
data = self.pattern_test_data["32bit_sequential"]
self.dma_writer_test(data["pattern"], data["expected"], data_width=32)
def test_dma_writer_long_sequential(self):
# Verify DMAWriter with long sequential 32-bit datas.
data = self.pattern_test_data["32bit_long_sequential"]
self.dma_writer_test(data["pattern"], data["expected"], data_width=32)
def test_dma_writer_no_fifo(self):
# Verify DMAWriter without FIFO.
data = self.pattern_test_data["32bit_long_sequential"]
self.dma_writer_test(data["pattern"], data["expected"], data_width=32, fifo_depth=1)
def test_dma_writer_fifo_buffered(self):
# Verify DMAWriter with a buffered FIFO.
data = self.pattern_test_data["32bit_long_sequential"]
self.dma_writer_test(data["pattern"], data["expected"], data_width=32, fifo_buffered=True)
def test_dma_writer_duplicates(self):
# Verify DMAWriter with a duplicate addresses.
data = self.pattern_test_data["32bit_duplicates"]
self.dma_writer_test(data["pattern"], data["expected"], data_width=32)
# LiteDRAMDMAReader ----------------------------------------------------------------------------
def dma_reader_test(self, pattern, mem_expected, data_width, **kwargs):
class DUT(Module):
def __init__(self):
self.port = LiteDRAMNativeReadPort(address_width=32, data_width=data_width)
self.submodules.dma = LiteDRAMDMAReader(self.port, **kwargs)
dut = DUT()
driver = DMAReaderDriver(dut.dma)
mem = DRAMMemory(data_width, len(mem_expected), init=mem_expected)
generators = [
driver.read([adr for adr, data in pattern]),
driver.read_handler(),
mem.read_handler(dut.port),
]
run_simulation(dut, generators)
self.assertEqual(driver.data, [data for adr, data in pattern])
def METHOD_NAME(self):
# Verify DMAReader with a single 32-bit data.
pattern = [(0x04, 0xdeadc0de)]
mem_expected = [0] * 32
mem_expected[0x04] = 0xdeadc0de
self.dma_reader_test(pattern, mem_expected, data_width=32)
def test_dma_reader_multiple(self):
# Verify DMAReader with multiple 32-bit datas.
data = self.pattern_test_data["32bit"]
self.dma_reader_test(data["pattern"], data["expected"], data_width=32)
def test_dma_reader_sequential(self):
# Verify DMAReader with sequential 32-bit datas.
data = self.pattern_test_data["32bit_sequential"]
self.dma_reader_test(data["pattern"], data["expected"], data_width=32)
def test_dma_reader_long_sequential(self):
# Verify DMAReader with long sequential 32-bit datas.
data = self.pattern_test_data["32bit_long_sequential"]
self.dma_reader_test(data["pattern"], data["expected"], data_width=32)
def test_dma_reader_no_fifo(self):
# Verify DMAReader without FIFO.
data = self.pattern_test_data["32bit_long_sequential"]
self.dma_reader_test(data["pattern"], data["expected"], data_width=32, fifo_depth=1)
def test_dma_reader_fifo_buffered(self):
# Verify DMAReader with a buffered FIFO.
data = self.pattern_test_data["32bit_long_sequential"]
self.dma_reader_test(data["pattern"], data["expected"], data_width=32, fifo_buffered=True) | null |
734 | """
very simple ini parser and tools
tested on python 3.6
contact: adalca at csail.mit.edu
TODO: see
from collections import namedtuple
instead of Struct
"""
# built-in modules
# we'll need python's ini parser: 'configparser'
import configparser
def ini_to_struct(file):
"""
very simple ini parser that expands on configparser
tries to cast values from string whereever possible
parsed data ini can be accessed with
data = ini_to_struct(file)
value = data.section.key
does not support hierarchical sections
Parameters:
file: string full filename of the ini file.
Returns:
stuct: a Struct that allows ini data to be access in the manner of data.section.key
"""
# read the file via config.
conf = configparser.ConfigParser()
confout = conf.read(file)
assert len(confout) > 0, 'Cannot read file %s ' % file
# prepare the Struct
strct = Struct()
# go through the sections in the ini file
for sec in conf.sections():
# each section is its own struct
secstrct = Struct()
# go through the keys
for key in conf[sec]:
val = conf[sec][key]
# try to cast the data
ret, done = str_convert_single(val)
# if couldn't cast, try a comma/whitespace separated list
if not done:
lst = METHOD_NAME(val)
# if the size of the list is 1, we didn't achieve anything
if len(lst) == 1:
ret = lst[0] # still not done
# if we actually get a list, only keep it if we can cast its elements to something
# otherwise keep the entry as an entire string
else:
# make sure all elements in the list convert to something
done = all([str_convert_single(v)[1] for v in lst])
if done:
ret = [str_convert_single(v)[0] for v in lst]
# defeated, accept the entry as just a simple string...
if not done:
ret = val # accept string
# assign secstrct.key = ret
setattr(secstrct, key, ret)
# assign strct.sec = secstrct
setattr(strct, sec, secstrct)
return strct
class Struct():
"""
a simple struct class to allow for the following syntax:
data = Struct()
data.foo = 'bar'
"""
def __str__(self):
return self.__dict__.__str__()
def str_to_none(val):
"""
cast a string to a None
Parameters:
val: the string to cast
Returns:
(casted_val, success)
casted val: the casted value if successful, or None
success: None if casting was successful
"""
if val == 'None':
return (None, True)
else:
return (None, False)
def str_to_type(val, ctype):
"""
cast a string to a type (e.g. int('8')), with try/except
do *not* use for bool casting, instead see str_to_bull
Parameters:
val: the string to cast
Returns:
(casted_val, success)
casted val: the casted value if successful, or None
success: bool if casting was successful
"""
assert ctype is not bool, 'use str_to_bull() for casting to bool'
ret = None
success = True
try:
ret = ctype(val)
except ValueError:
success = False
return (ret, success)
def str_to_bool(val):
"""
cast a string to a bool
Parameters:
val: the string to cast
Returns:
(casted_val, success)
casted val: the casted value if successful, or None
success: bool if casting was successful
"""
if val == 'True':
return (True, True)
elif val == 'False':
return (False, True)
else:
return (None, False)
def METHOD_NAME(val):
"""
Split a string to a list of elements, where elements are separated by whitespace or commas
Leading/ending parantheses are stripped.
Returns:
val: the string to split
Returns:
casted_dst: the casted list
"""
val = val.replace('[', '')
val = val.replace('(', '')
val = val.replace(']', '')
val = val.replace(')', '')
if ',' in val:
lst = val.split(',')
else:
lst = val.split()
return lst
def str_convert_single(val):
"""
try to cast a string to an int, float or bool (in that order)
Parameters:
val: the string to cast
Returns:
(casted_val, success)
casted val: the casted value if successful, or None
success: bool if casting was successful
"""
val = val.strip()
# try int
ret, done = str_to_type(val, int)
# try float
if not done:
ret, done = str_to_type(val, float)
# try bool
if not done:
ret, done = str_to_bool(val)
# try None
if not done:
ret, done = str_to_none(val)
return (ret, done)
| null |
735 | """Utilities for finding Galaxy's configuration file.
This is for use by web framework code and scripts (e.g. scripts/galaxy_main.py).
"""
import os
from typing import (
List,
NamedTuple,
Optional,
)
from galaxy.util.properties import find_config_file
from galaxy.web_stack import get_app_kwds
DEFAULT_INI_APP = "main"
DEFAULT_CONFIG_SECTION = "galaxy"
def default_relative_config_paths_for(app_name: str) -> List[str]:
paths = [f"config/{app_name}.yml", f"config/{app_name}.ini", "universe_wsgi.ini"]
# Do not load sample config for galaxy
if app_name != "galaxy":
paths.append(f"config/{app_name}.yml.sample")
return paths
def absolute_config_path(path, galaxy_root: Optional[str]) -> Optional[str]:
if path and not os.path.isabs(path) and galaxy_root:
path = os.path.join(galaxy_root, path)
return path
def METHOD_NAME(config_file: Optional[str]) -> bool:
return bool(config_file and (config_file.endswith(".ini") or config_file.endswith(".ini.sample")))
def find_config(supplied_config: Optional[str], galaxy_root: Optional[str], app_name: str = "galaxy") -> Optional[str]:
if supplied_config:
return supplied_config
if galaxy_root is None:
return os.path.abspath(f"{app_name}.yml")
for filename in default_relative_config_paths_for(app_name):
config_path = os.path.join(galaxy_root, filename)
if os.path.exists(config_path):
return config_path
return None
class WebappSetupProps(NamedTuple):
"""Basic properties to provide information about the App and the environment variables
used to resolve the App configuration."""
app_name: str
default_section_name: str
env_config_file: str
env_config_section: Optional[str] = None
check_galaxy_root: bool = False
class WebappConfig(NamedTuple):
"""The resolved configuration values for a Webapp."""
global_conf: dict
load_app_kwds: dict
wsgi_preflight: bool = False
class WebappConfigResolver:
def __init__(self, props: WebappSetupProps) -> None:
self.props = props
self.app_kwds = get_app_kwds(props.default_section_name, props.app_name)
self.config_file = self._resolve_config_file_path()
self.is_ini_file = METHOD_NAME(self.config_file)
self.config_section = self._resolve_section_name()
self._update_kwds()
os.environ["IS_WEBAPP"] = "1"
def resolve_config(self) -> WebappConfig:
global_conf = {}
if self.is_ini_file:
global_conf["__file__"] = self.config_file
return WebappConfig(global_conf=global_conf, load_app_kwds=self.app_kwds)
def _resolve_config_file_path(self) -> Optional[str]:
config_file = self.app_kwds.get("config_file")
if not config_file and os.environ.get(self.props.env_config_file):
config_file = os.path.abspath(os.environ[self.props.env_config_file])
elif self.props.check_galaxy_root:
galaxy_root = self.app_kwds.get("galaxy_root") or os.environ.get("GALAXY_ROOT_DIR")
config_file = find_config(config_file, galaxy_root, app_name=self.props.app_name)
config_file = absolute_config_path(config_file, galaxy_root=galaxy_root)
else:
config_file = find_config_file([self.props.app_name])
return config_file
def _resolve_section_name(self) -> str:
config_section = self.props.default_section_name
if self.props.env_config_section and self.props.env_config_section in os.environ:
config_section = os.environ[self.props.env_config_section]
elif self.is_ini_file:
config_section = f"app:{DEFAULT_INI_APP}"
return config_section
def _update_kwds(self) -> None:
if "config_file" not in self.app_kwds:
self.app_kwds["config_file"] = self.config_file
if "config_section" not in self.app_kwds:
self.app_kwds["config_section"] = self.config_section | null |
736 | from collections.abc import Collection, Container, Iterable, Iterator, Sequence, Sized
from inspect import signature
from typing import (
Any,
List,
Literal,
Optional,
Tuple,
Type,
Union,
get_args,
get_origin,
get_type_hints,
)
def METHOD_NAME(fn):
sig = signature(fn)
parameters = sig.parameters.values()
type_hints = None
def _wrapped(*args, **kwargs):
# NOTE This delays loading annotations until first use
nonlocal type_hints
if type_hints is None:
type_hints = get_type_hints(fn)
for i, parameter in enumerate(parameters):
if parameter.annotation is parameter.empty:
# NOTE Do not check anything if there is no annotation
continue
name = parameter.name
value = _arg(parameter, i, args, kwargs)
# NOTE We cannot use parameter.annotation as this does not work
# in combination with `from __future__ import annotations`.
# See https://peps.python.org/pep-0563/#introducing-a-new-dictionary-for-the-string-literal-form-instead
expected_types = _annotation_to_types(type_hints[name])
if not any(map(lambda expected_type: _match(expected_type, value), expected_types)):
raise TypeError(
f'Invalid `{parameter.name}`: got `{value}` of type {_value_type(value)},'
+ (
f' expected {expected_types[0]}'
if len(expected_types) == 1
else f' expected one of {expected_types}.'
)
)
return fn(*args, **kwargs)
return _wrapped
def _type(t):
return type(None) if t is None else t
def _annotation_to_types(annotation):
types = annotation if isinstance(annotation, list) else [annotation]
return [_type(t) for t in types]
def _value_type(value):
return (
Type[value] # pyright: ignore [reportGeneralTypeIssues]
if isinstance(value, type)
else type(value)
)
def _kwargs(parameter, kwargs):
try:
return kwargs[parameter.name]
except KeyError:
default = parameter.default
if default == parameter.empty:
raise KeyError
return default
def _arg(parameter, i, args, kwargs):
if parameter.kind == parameter.POSITIONAL_ONLY:
return args[i]
elif parameter.kind == parameter.POSITIONAL_OR_KEYWORD:
if i < len(args):
return args[i]
return _kwargs(parameter, kwargs)
elif parameter.kind == parameter.VAR_POSITIONAL:
raise NotImplementedError(parameter.kind)
elif parameter.kind == parameter.KEYWORD_ONLY:
return _kwargs(parameter, kwargs)
else:
assert parameter.kind == parameter.VAR_KEYWORD
raise NotImplementedError(parameter.kind)
def _match_sequence_items(args, value):
if args:
assert len(args) == 1
t = args[0]
return all(map(lambda v: _match(t, v), value))
else:
return True
def _match(typing, value):
origin = get_origin(typing)
if origin is None:
if typing is Any or typing is Optional:
return True
return isinstance(value, typing)
if origin is Literal:
# NOTE Empty literals return False
return any(map(lambda t: value == t, get_args(typing)))
if origin is list or origin is List:
return isinstance(value, list) and _match_sequence_items(get_args(typing), value)
if origin is Sequence:
return isinstance(value, Sequence) and _match_sequence_items(get_args(typing), value)
if origin is tuple or origin is Tuple:
if not isinstance(value, tuple):
return False
args = get_args(typing)
n = len(args)
if n == 2 and args[1] is Ellipsis:
return _match_sequence_items((args[0],), value)
else:
return len(value) == n and all(map(_match, args, value))
if origin is Union:
# NOTE Empty unions return False
return any(map(lambda t: _match(t, value), get_args(typing)))
if origin is Optional:
args = get_args(typing)
if args:
assert len(args) == 1
t = args[0]
return value is None or _match(t, value)
else:
return True
if origin is Sized:
try:
return isinstance(len(value), int)
except TypeError:
return False
if origin is Container:
# NOTE Cannot check value type because we do not know any candidate key
return hasattr(value, '__contains__')
if origin is Iterator:
# NOTE Cannot check value type because we do not know any candidate key
return hasattr(value, '__next__') and hasattr(value, '__iter__')
if origin is Iterable:
# NOTE Cannot check value type because of risk of side-effect
try:
iter(value)
return True
except TypeError:
return False
if origin is Collection:
t = get_args(typing)
return (
_match(Sized, value)
and _match(
Container[t] if t else Container, # pyright: ignore [reportGeneralTypeIssues]
value,
)
and _match(
Iterable[t] if t else Iterable, # pyright: ignore [reportGeneralTypeIssues]
value,
)
)
raise NotImplementedError(origin) | null |
737 | '''
Copyright (C) 2017-2023 Bryant Moscon - [email protected]
Please see the LICENSE file for the terms and conditions
associated with this software.
'''
import logging
from decimal import Decimal
from typing import Dict, Tuple
from yapic import json
from cryptofeed.connection import AsyncConnection, RestEndpoint, Routes, WebsocketEndpoint
from cryptofeed.defines import BID, ASK, BLOCKCHAIN, BUY, L2_BOOK, L3_BOOK, SELL, TRADES
from cryptofeed.exceptions import MissingSequenceNumber
from cryptofeed.feed import Feed
from cryptofeed.symbols import Symbol
from cryptofeed.types import OrderBook, Trade
LOG = logging.getLogger('feedhandler')
class Blockchain(Feed):
id = BLOCKCHAIN
websocket_endpoints = [WebsocketEndpoint('wss://ws.blockchain.info/mercury-gateway/v1/ws', options={'origin': 'https://exchange.blockchain.com'})]
rest_endpoints = [RestEndpoint('https://api.blockchain.com', routes=Routes('/mercury-gateway/v1/instruments'))]
websocket_channels = {
L3_BOOK: 'l3',
L2_BOOK: 'l2',
TRADES: 'trades',
}
@classmethod
def _parse_symbol_data(cls, data: dict) -> Tuple[Dict, Dict]:
info = {'instrument_type': {}}
ret = {}
for entry in data:
if entry['status'] != 'open':
continue
base, quote = entry['symbol'].split("-")
s = Symbol(base, quote)
ret[s.normalized] = entry['symbol']
info['instrument_type'][s.normalized] = s.type
return ret, info
def __reset(self):
self.seq_no = None
self._l2_book = {}
self._l3_book = {}
async def _pair_l2_update(self, msg: str, timestamp: float):
delta = {BID: [], ASK: []}
pair = self.exchange_symbol_to_std_symbol(msg['symbol'])
if msg['event'] == 'snapshot':
# Reset the book
self._l2_book[pair] = OrderBook(self.id, pair, max_depth=self.max_depth)
for side in (BID, ASK):
for update in msg[side + 's']:
price = update['px']
qty = update['qty']
self._l2_book[pair].book[side][price] = qty
if qty <= 0:
del self._l2_book[pair].book[side][price]
delta[side].append((price, qty))
await self.book_callback(L2_BOOK, self._l2_book[pair], timestamp, raw=msg, delta=delta if msg['event'] != 'snapshot' else None, sequence_number=msg['seqnum'])
async def METHOD_NAME(self, msg: str, timestamp: float):
"""
Subscribed message
{
"seqnum": 1,
"event": "subscribed",
"channel": "l2",
"symbol": "BTC-USD"
}
"""
if msg['event'] == 'subscribed':
LOG.debug("%s: Subscribed to L2 data for %s", self.id, msg['symbol'])
elif msg['event'] in ['snapshot', 'updated']:
await self._pair_l2_update(msg, timestamp)
else:
LOG.warning("%s: Unexpected message %s", self.id, msg)
async def _pair_l3_update(self, msg: str, timestamp: float):
delta = {BID: [], ASK: []}
pair = self.exchange_symbol_to_std_symbol(msg['symbol'])
if msg['event'] == 'snapshot':
# Reset the book
self._l3_book[pair] = OrderBook(self.id, pair, max_depth=self.max_depth)
for side in (BID, ASK):
for update in msg[side + 's']:
price = update['px']
qty = update['qty']
order_id = update['id']
if qty <= 0:
del self._l3_book[pair].book[side][price][order_id]
else:
if price in self._l3_book[pair].book[side]:
self._l3_book[pair].book[side][price][order_id] = qty
else:
self._l3_book[pair].book[side][price] = {order_id: qty}
if len(self._l3_book[pair].book[side][price]) == 0:
del self._l3_book[pair].book[side][price]
delta[side].append((order_id, price, qty))
await self.book_callback(L3_BOOK, self._l3_book[pair], timestamp, raw=msg, delta=delta if msg['event'] != 'snapshot' else None, sequence_number=msg['seqnum'])
async def _handle_l3_msg(self, msg: str, timestamp: float):
if msg['event'] == 'subscribed':
LOG.debug("%s: Subscribed to L3 data for %s", self.id, msg['symbol'])
elif msg['event'] in ['snapshot', 'updated']:
await self._pair_l3_update(msg, timestamp)
else:
LOG.warning("%s: Unexpected message %s", self.id, msg)
async def _trade(self, msg: dict, timestamp: float):
"""
trade msg example
{
"seqnum": 21,
"event": "updated",
"channel": "trades",
"symbol": "BTC-USD",
"timestamp": "2019-08-13T11:30:06.100140Z",
"side": "sell",
"qty": 8.5E-5,
"price": 11252.4,
"trade_id": "12884909920"
}
"""
t = Trade(
self.id,
msg['symbol'],
BUY if msg['side'] == 'buy' else SELL,
msg['qty'],
msg['price'],
self.timestamp_normalize(msg['timestamp']),
id=msg['trade_id'],
)
await self.callback(TRADES, t, timestamp)
async def _handle_trade_msg(self, msg: str, timestamp: float):
if msg['event'] == 'subscribed':
LOG.debug("%s: Subscribed to trades channel for %s", self.id, msg['symbol'])
elif msg['event'] == 'updated':
await self._trade(msg, timestamp)
else:
LOG.warning("%s: Invalid message type %s", self.id, msg)
async def message_handler(self, msg: str, conn, timestamp: float):
msg = json.loads(msg, parse_float=Decimal)
if self.seq_no is not None and msg['seqnum'] != self.seq_no + 1:
LOG.warning("%s: Missing sequence number detected!", self.id)
raise MissingSequenceNumber("Missing sequence number, restarting")
self.seq_no = msg['seqnum']
if 'channel' in msg:
if msg['channel'] == 'l2':
await self.METHOD_NAME(msg, timestamp)
elif msg['channel'] == 'l3':
await self._handle_l3_msg(msg, timestamp)
elif msg['channel'] == 'trades':
await self._handle_trade_msg(msg, timestamp)
else:
LOG.warning("%s: Invalid message type %s", self.id, msg)
async def subscribe(self, conn: AsyncConnection):
self.__reset()
for chan in self.subscription:
for pair in self.subscription[chan]:
await conn.write(json.dumps({"action": "subscribe",
"symbol": pair,
"channel": chan
})) | null |
738 | """ test for app action functionality """
from unittest.mock import patch
from django.contrib.auth.models import AnonymousUser
from django.http import Http404
from django.template.response import TemplateResponse
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils import timezone
from bookwyrm import models, views
from bookwyrm.tests.validate_html import validate_html
class GoalViews(TestCase):
"""viewing and creating statuses"""
def METHOD_NAME(self):
"""we need basic test data and mocks"""
self.factory = RequestFactory()
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.local_user = models.User.objects.create_user(
"[email protected]",
"[email protected]",
"mouseword",
local=True,
localname="mouse",
remote_id="https://example.com/users/mouse",
)
self.rat = models.User.objects.create_user(
"[email protected]",
"[email protected]",
"ratword",
local=True,
localname="rat",
remote_id="https://example.com/users/rat",
)
self.book = models.Edition.objects.create(
title="Example Edition",
remote_id="https://example.com/book/1",
)
self.anonymous_user = AnonymousUser
self.anonymous_user.is_authenticated = False
self.year = timezone.now().year
models.SiteSettings.objects.create()
def test_goal_page_no_goal(self):
"""view a reading goal page for another's unset goal"""
view = views.Goal.as_view()
request = self.factory.get("")
request.user = self.rat
result = view(request, self.local_user.localname, self.year)
self.assertEqual(result.status_code, 404)
def test_goal_page_no_goal_self(self):
"""view a reading goal page for your own unset goal"""
view = views.Goal.as_view()
request = self.factory.get("")
request.user = self.local_user
result = view(request, self.local_user.localname, self.year)
validate_html(result.render())
self.assertIsInstance(result, TemplateResponse)
def test_goal_page_anonymous(self):
"""can't view it without login"""
view = views.Goal.as_view()
request = self.factory.get("")
request.user = self.anonymous_user
result = view(request, self.local_user.localname, self.year)
self.assertEqual(result.status_code, 302)
def test_goal_page_public(self):
"""view a user's public goal"""
models.ReadThrough.objects.create(
finish_date=timezone.now(),
user=self.local_user,
book=self.book,
)
models.AnnualGoal.objects.create(
user=self.local_user,
year=timezone.now().year,
goal=128937123,
privacy="public",
)
view = views.Goal.as_view()
request = self.factory.get("")
request.user = self.rat
result = view(request, self.local_user.localname, timezone.now().year)
validate_html(result.render())
self.assertIsInstance(result, TemplateResponse)
def test_goal_page_private(self):
"""view a user's private goal"""
models.AnnualGoal.objects.create(
user=self.local_user, year=self.year, goal=15, privacy="followers"
)
view = views.Goal.as_view()
request = self.factory.get("")
request.user = self.rat
with self.assertRaises(Http404):
view(request, self.local_user.localname, self.year)
@patch("bookwyrm.activitystreams.add_status_task.delay")
def test_create_goal(self, _):
"""create a new goal"""
view = views.Goal.as_view()
request = self.factory.post(
"",
{
"user": self.local_user.id,
"goal": 10,
"year": self.year,
"privacy": "unlisted",
"post-status": True,
},
)
request.user = self.local_user
with patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"):
view(request, self.local_user.localname, self.year)
goal = models.AnnualGoal.objects.get()
self.assertEqual(goal.user, self.local_user)
self.assertEqual(goal.goal, 10)
self.assertEqual(goal.year, self.year)
self.assertEqual(goal.privacy, "unlisted")
status = models.GeneratedNote.objects.get()
self.assertEqual(status.user, self.local_user)
self.assertEqual(status.privacy, "unlisted") | null |
739 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateNodeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Dds', '2015-12-01', 'CreateNode','dds')
self.set_method('POST')
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_NodeType(self): # String
return self.get_query_params().get('NodeType')
def set_NodeType(self, NodeType): # String
self.add_query_param('NodeType', NodeType)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_ReadonlyReplicas(self): # Integer
return self.get_query_params().get('ReadonlyReplicas')
def set_ReadonlyReplicas(self, ReadonlyReplicas): # Integer
self.add_query_param('ReadonlyReplicas', ReadonlyReplicas)
def get_CouponNo(self): # String
return self.get_query_params().get('CouponNo')
def set_CouponNo(self, CouponNo): # String
self.add_query_param('CouponNo', CouponNo)
def get_NodeClass(self): # String
return self.get_query_params().get('NodeClass')
def set_NodeClass(self, NodeClass): # String
self.add_query_param('NodeClass', NodeClass)
def get_ShardDirect(self): # Boolean
return self.get_query_params().get('ShardDirect')
def set_ShardDirect(self, ShardDirect): # Boolean
self.add_query_param('ShardDirect', ShardDirect)
def get_AccountName(self): # String
return self.get_query_params().get('AccountName')
def set_AccountName(self, AccountName): # String
self.add_query_param('AccountName', AccountName)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_DBInstanceId(self): # String
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self, DBInstanceId): # String
self.add_query_param('DBInstanceId', DBInstanceId)
def METHOD_NAME(self): # String
return self.get_query_params().get('BusinessInfo')
def set_BusinessInfo(self, BusinessInfo): # String
self.add_query_param('BusinessInfo', BusinessInfo)
def get_AutoPay(self): # Boolean
return self.get_query_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_query_param('AutoPay', AutoPay)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_NodeStorage(self): # Integer
return self.get_query_params().get('NodeStorage')
def set_NodeStorage(self, NodeStorage): # Integer
self.add_query_param('NodeStorage', NodeStorage)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_AccountPassword(self): # String
return self.get_query_params().get('AccountPassword')
def set_AccountPassword(self, AccountPassword): # String
self.add_query_param('AccountPassword', AccountPassword) | null |
740 | # Copyright (c) 2020 The Regents of the University of California
# All Rights Reserved.
#
# Copyright (c) 2020 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2017 Mark D. Hill and David A. Wood
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Tests which run simple binaries in gem5's SE mode. The stdlib's SimpleBoard
is used to run these tests.
"""
from testlib import *
import re
isa_str_map = {
constants.gcn3_x86_tag: "x86",
constants.arm_tag: "arm",
constants.mips_tag: "mips",
constants.riscv_tag: "riscv",
constants.sparc_tag: "sparc",
constants.vega_x86_tag: "x86",
}
static_progs = {
constants.vega_x86_tag: ("x86-hello64-static", "x86-hello32-static"),
constants.arm_tag: ("arm-hello64-static", "arm-hello32-static"),
constants.mips_tag: ("mips-hello",),
constants.riscv_tag: ("riscv-hello",),
constants.sparc_tag: ("sparc-hello",),
}
take_params_progs = {
constants.vega_x86_tag: ("x86-print-this",),
constants.riscv_tag: ("riscv-print-this",),
}
dynamic_progs = {constants.vega_x86_tag: ("x86-hello64-dynamic",)}
cpu_types = {
constants.vega_x86_tag: ("timing", "atomic", "o3"),
constants.arm_tag: ("timing", "atomic", "o3", "minor"),
constants.mips_tag: ("timing", "atomic", "o3"),
constants.riscv_tag: ("timing", "atomic", "o3", "minor"),
constants.sparc_tag: ("timing", "atomic"),
}
if config.bin_path:
resource_path = config.bin_path
else:
resource_path = joinpath(absdirpath(__file__), "..", "..", "resources")
regex = re.compile(r"Hello world!")
stdout_verifier = verifier.MatchRegex(regex)
def METHOD_NAME(isa, binary, cpu, hosts, verifier, input):
gem5_verify_config(
name="test-" + binary + "-" + cpu,
fixtures=(),
verifiers=(verifier,),
config=joinpath(
config.base_dir, "tests", "gem5", "configs", "simple_binary_run.py"
),
config_args=[
binary,
cpu,
"--resource-directory",
resource_path,
isa_str_map[isa],
]
+ input,
valid_isas=(constants.all_compiled_tag,),
valid_hosts=hosts,
length=constants.quick_tag,
)
# Run statically linked hello worlds
for isa in static_progs:
for binary in static_progs[isa]:
for cpu in cpu_types[isa]:
METHOD_NAME(
isa,
binary,
cpu,
constants.supported_hosts,
stdout_verifier,
[],
)
# Run dynamically linked hello worlds
for isa in dynamic_progs:
for binary in dynamic_progs[isa]:
for cpu in cpu_types[isa]:
METHOD_NAME(
isa,
binary,
cpu,
constants.target_host[isa],
stdout_verifier,
[],
)
regex = re.compile(r"1 print this")
stdout_verifier = verifier.MatchRegex(regex)
args = ["--arguments", "print this", "--arguments", "2000"]
for isa in take_params_progs:
for binary in take_params_progs[isa]:
for cpu in cpu_types[isa]:
METHOD_NAME(
isa,
binary,
cpu,
constants.target_host[isa],
stdout_verifier,
args,
) | null |
741 | #! /usr/bin/env python
'''test schedule module.'''
import pytest
from jmclient import (get_schedule, get_tumble_schedule,
tweak_tumble_schedule, load_test_config)
import os
pytestmark = pytest.mark.usefixtures("setup_regtest_bitcoind")
valids = """#sample for testing
1, 110000000, 3, INTERNAL, 0, 16, 1
0, 20000000, 2, mnsquzxrHXpFsZeL42qwbKdCP2y1esN3qw, 9.88, 16, 0
"""
invalids1 = """#sample for testing
1, 110000000, 3, 5, INTERNAL, 16, 0
#pointless comment here; following line has trailing spaces
0, 20000000, 2, mnsquzxrHXpFsZeL42qwbKdCP2y1esN3qw ,0, 16, 0,
"""
invalids2 = """#sample for testing
1, 110000000, notinteger, INTERNAL, 0, 16, 0
0, 20000000, 2, mnsquzxrHXpFsZeL42qwbKdCP2y1esN3qw, 0, 16, 0
"""
invalids3 = """#sample for testing
1, 110000000, 3, INTERNAL, 0, 16, 0
0, notinteger, 2, mnsquzxrHXpFsZeL42qwbKdCP2y1esN3qw, 0, 16, 0
"""
#invalid address
invalids4 = """#sample for testing
1, 110000000, 3, INTERNAL, 0, 16, 0
0, 20000000, 2, mnsquzxrHXpFsZeL42qwbKdCP2y1esN3qq, 0, 16, 0
"""
def METHOD_NAME():
load_test_config()
tsf = "schedulefortesting"
for s in [valids, invalids1, invalids2, invalids3, invalids4]:
if os.path.exists(tsf):
os.remove(tsf)
with open(tsf, "wb") as f:
f.write(s.encode('utf-8'))
result = get_schedule(tsf)
if s== valids:
assert result[0]
assert len(result[1])==2
else:
assert not result[0]
class Options(object):
pass
def get_options():
options = Options()
options.mixdepthcount = 4
options.txcountparams = (18, 3)
options.minmakercount = 2
options.makercountrange = (6, 0)
options.txfee = 5000
options.addrcount = 3
options.mintxcount = 1
options.timelambda = 0.2
options.waittime = 10
options.stage1_timelambda_increase = 3
options.mincjamount = 1000000
options.liquiditywait = 5
options.rounding_chance = 0.25
options.rounding_sigfig_weights = (55, 15, 25, 65, 40)
options = vars(options)
return options
@pytest.mark.parametrize(
"destaddrs, txcparams, mixdepthcount, mixdepthbal",
[
# very simple case
(["mzzAYbtPpANxpNVGCVBAhZYzrxyZtoix7i",
"mifCWfmygxKhsP3qM3HZi3ZjBEJu7m39h8",
"mnTn9KVQQT9zy9R4E2ZGzWPK4EfcEcV9Y5"], (3,0), 3,
{0:1}),
# with 2 non-empty mixdepths
(["mzzAYbtPpANxpNVGCVBAhZYzrxyZtoix7i",
"mifCWfmygxKhsP3qM3HZi3ZjBEJu7m39h8",
"mnTn9KVQQT9zy9R4E2ZGzWPK4EfcEcV9Y5"], (7,0), 3,
{2:1, 3: 1}),
#intended to trigger txcount=1 bump to 2
(["mzzAYbtPpANxpNVGCVBAhZYzrxyZtoix7i",
"mifCWfmygxKhsP3qM3HZi3ZjBEJu7m39h8",
"mnTn9KVQQT9zy9R4E2ZGzWPK4EfcEcV9Y5"], (3,2), 8,
{2:1, 3: 1}),
#slightly larger version
(["mzzAYbtPpANxpNVGCVBAhZYzrxyZtoix7i",
"mifCWfmygxKhsP3qM3HZi3ZjBEJu7m39h8",
"mnTn9KVQQT9zy9R4E2ZGzWPK4EfcEcV9Y5",
"bcrt1qcnv26w889eum5sekz5h8we45rxnr4sj5k08phv",
"bcrt1qgs0t239gj2kqgnsrvetvsv2qdva8y3j74cta4d"], (4,3), 8,
{0:2, 1: 1, 3: 1, 4: 1}),
])
def test_tumble_schedule(destaddrs, txcparams, mixdepthcount, mixdepthbal):
# note that these tests are currently only leaving the default
# value for the final argument to get_tumble_schedule, i.e. 4,
# and will fail if this is changed:
wallet_total_mixdepths = 5
options = get_options()
options['addrcount'] = len(destaddrs)
options['mixdepthcount'] = mixdepthcount
options['txcountparams'] = txcparams
schedule = get_tumble_schedule(options, destaddrs, mixdepthbal)
# first, examine the destination addresses; all the requested
# ones should be in the list, and all the others should be one
# of the two standard 'code' alternatives.
dests = [x[3] for x in schedule]
dests = [x for x in dests if x not in ["INTERNAL", "addrask"]]
assert len(dests) == len(destaddrs)
assert set(destaddrs) == set(dests)
nondestaddrs = [x[3] for x in schedule if x[3] not in destaddrs]
assert all([x in ["INTERNAL", "addrask"] for x in nondestaddrs])
# check that the source mixdepths for the phase 1 transactions are the
# expected, and that they are all sweeps:
for i, s in enumerate(schedule[:len(mixdepthbal)]):
assert s[1] == 0
assert s[0] in mixdepthbal.keys()
# check that the list of created transactions in Phase 2 only
# progresses forward, one mixdepth at a time.
# Note that due to the use of sdev calculation, we cannot check that
# the number of transactions per mixdepth is anything in particular.
for first, second in zip(schedule[len(mixdepthbal):-1],
schedule[len(mixdepthbal) + 1:]):
assert (second[0] - first[0]) % wallet_total_mixdepths in [1, 0]
# check that the amount fractions are always total < 1
last_s = []
for s in schedule:
if last_s == []:
last_s = s
total_amt = 0
continue
if s[0] == last_s[0]:
total_amt += s[1]
else:
assert total_amt < 1
total_amt = 0
last_s = s
@pytest.mark.parametrize(
"destaddrs, txcparams, mixdepthcount, lastcompleted, makercountrange",
[
(["mzzAYbtPpANxpNVGCVBAhZYzrxyZtoix7i",
"mifCWfmygxKhsP3qM3HZi3ZjBEJu7m39h8",
"mnTn9KVQQT9zy9R4E2ZGzWPK4EfcEcV9Y5"], (6,0), 5, 17, (6,0)),
#edge case: very first transaction
(["mzzAYbtPpANxpNVGCVBAhZYzrxyZtoix7i",
"mifCWfmygxKhsP3qM3HZi3ZjBEJu7m39h8",
"mnTn9KVQQT9zy9R4E2ZGzWPK4EfcEcV9Y5"], (3,0), 4, -1, (6,0)),
#edge case: hit minimum_makers limit
(["mzzAYbtPpANxpNVGCVBAhZYzrxyZtoix7i",
"mifCWfmygxKhsP3qM3HZi3ZjBEJu7m39h8",
"mnTn9KVQQT9zy9R4E2ZGzWPK4EfcEcV9Y5"], (3,0), 4, -1, (2,0)),
#edge case: it's a sweep
(["mzzAYbtPpANxpNVGCVBAhZYzrxyZtoix7i",
"mifCWfmygxKhsP3qM3HZi3ZjBEJu7m39h8",
"mnTn9KVQQT9zy9R4E2ZGzWPK4EfcEcV9Y5"], (3,0), 4, 1, (5,0)),
#mid-run case in 2nd mixdepth
(["mzzAYbtPpANxpNVGCVBAhZYzrxyZtoix7i",
"mifCWfmygxKhsP3qM3HZi3ZjBEJu7m39h8",
"mnTn9KVQQT9zy9R4E2ZGzWPK4EfcEcV9Y5"], (6,0), 4, 7, (5,0)),
#sanity check, typical parameters
(["mzzAYbtPpANxpNVGCVBAhZYzrxyZtoix7i",
"mifCWfmygxKhsP3qM3HZi3ZjBEJu7m39h8",
"mnTn9KVQQT9zy9R4E2ZGzWPK4EfcEcV9Y5"], (4,1), 7, 6, (6,1)),
])
def test_tumble_tweak(destaddrs, txcparams, mixdepthcount, lastcompleted,
makercountrange):
load_test_config()
options = get_options()
options['mixdepthcount'] = mixdepthcount
options['txcountparams'] = txcparams
options['makercountrange'] = makercountrange
schedule = get_tumble_schedule(options, destaddrs, {0:1})
dests = [x[3] for x in schedule]
assert set(destaddrs).issubset(set(dests))
new_schedule = tweak_tumble_schedule(options, schedule, lastcompleted)
#sanity check: each amount fraction list should add up to near 1.0,
#so some is left over for sweep
tally = 0
current_mixdepth = new_schedule[0][0]
for i in range(mixdepthcount):
if new_schedule[i][0] != current_mixdepth:
print('got total frac for mixdepth: ', tally)
#TODO spurious failure is possible here, not an ideal check
assert tally < 0.999
tally = 0
tally += new_schedule[i][1] | null |
742 | #!/usr/bin/env python
from __future__ import print_function
import matplotlib.pyplot
import numpy as np
from IMP.isd.History import History
class Analysis:
"""Class that produces analysis-related output, and is able to parse the
output of a file produced by the Statistics class.
"""
def create_entry(self, h, lineno, el):
"""adds a new entry to the hierarchy by parsing a title entry"""
if lineno == len(self.correspondences):
self.correspondences.append([])
entryno, cat, name = el.split(':')
entryno = int(entryno) - 1
if len(self.correspondences[lineno]) == entryno:
self.correspondences[lineno].append((cat, name))
else:
self.correspondences[lineno][entryno] = (cat, name)
h.create_entry(cat, name)
def add_data(self, h, lineno, colno, data):
"""adds data point to hierarchy"""
cat, name = self.correspondences[lineno][colno]
if data.isdigit():
data = int(data)
else:
try:
data = float(data)
except ValueError:
pass
h.add_data(cat, name, data)
def read_variables(self, statfile):
"""reads a *_stats.txt file and returns variables present in it"""
h = History(statfile)
# read title and setup history
self.correspondences = []
for line in open(statfile):
if line.startswith('*'):
continue
tokens = line.split()
if not tokens[0][1].isdigit():
continue
lineno = int(tokens[0][1:]) - 1
if line.startswith('#'):
for el in tokens[1:]:
self.create_entry(h, lineno, el)
continue
break
return self.correspondences
def read_AMBER_variables(self, statfile):
"""reads an AMBER mden file and returns variables present in it"""
h = History(statfile)
# read title and setup history
self.correspondences = []
oldnum = -1
for line in open(statfile):
tokens = line.split()
lineno = int(tokens[0][1:])
if lineno < oldnum:
break
oldnum = lineno
for i, el in enumerate(tokens[1:]):
self.create_entry(h, lineno, '%d:global:%s' % (i + 1, el))
return self.correspondences
def read_AMBER_stats(self, statfile):
"""reads an AMBER mden file and returns a History instance"""
h = History(statfile)
# read title and setup history
read_title = True
oldnum = -1
self.correspondences = []
for line in open(statfile):
tokens = line.split()
lineno = int(tokens[0][1:])
if lineno < oldnum and read_title:
read_title = False
oldnum = lineno
if read_title:
for i, el in enumerate(tokens[1:]):
self.create_entry(h, lineno, '%d:global:%s' % (i + 1, el))
continue
# from here on, the line contains data
for i, el in enumerate(tokens[1:]):
self.add_data(h, lineno, i, el)
# h.sanity_check()
return h
def read_stats(self, statfile):
"""reads a *_stats.txt file and returns a History instance"""
h = History(statfile)
# read title and setup history
read_title = True
self.correspondences = []
for line in open(statfile):
if line.startswith('*'):
continue
tokens = line.split()
if not tokens[0][1].isdigit():
continue
lineno = int(tokens[0][1:]) - 1
if line.startswith('#'):
if read_title:
for el in tokens[1:]:
self.create_entry(h, lineno, el)
continue
elif read_title:
read_title = False
# from here on, the line starts with 'L'
for i, el in enumerate(tokens[1:]):
self.add_data(h, lineno, i, el)
# h.sanity_check()
return h
def plot(self, h, *datums, **kwargs):
"""plots datum (cat,name) from hierarchy h, optionally specifying a
range. To plot multiple data at the same time, add them sequentially.
Takes x axis from the 'step' entry of the first datum. TODO.
"""
data = [np.array(h.get_data(cat, name), dtype=float)
for (cat, name) in datums]
x = h.get_data(datums[0][0], 'step')
toplot = []
for i in range(len(data)):
toplot.extend([x, data[i]])
matplotlib.pyplot.plot(*data, **kwargs)
matplotlib.pyplot.grid(True)
matplotlib.pyplot.legend()
matplotlib.pyplot.show()
def histogram(self, h, *datums, **kwargs):
"""plots histogram of datum (cat,name) from hierarchy h, optionally
specifying a range. To plot multiple data at the same time, add them
sequentially.
"""
data = [np.array(h.get_data(*dat), dtype=float)
for dat in datums]
matplotlib.pyplot.hist(*data, **kwargs)
matplotlib.pyplot.grid(True)
matplotlib.pyplot.legend()
matplotlib.pyplot.show()
def METHOD_NAME(self, h, dest, *args):
""""dump float data from history h to file dest.
args can be either strings corresponding to categories, or tuples
corresponding to entries of a certain category. Only one counter will
be output for the whole dump, it corresponds to the counter of the
first entry's category. You can always specify additional counters
if needed.
"""
# parse args
cats = []
names = []
for arg in args:
if isinstance(arg, str):
# get rid of counter
ent = h.get_entries(arg)[1:]
names.extend(ent)
cats.extend([arg] * len(ent))
else:
# argument should be (cat, entry)
names.append(arg[1])
cats.append(arg[0])
# write data
steps = h.get_data(cats[0], 'step')
fl = open(dest, 'w')
fl.write("# %s:step\t" % cats[0])
fl.write('\t'.join(['%s:%s' % (i, j) for (i, j) in zip(cats, names)]))
fl.write('\n')
data = [h.get_data(i, j) for (i, j) in zip(cats, names)]
for i, st in enumerate(steps):
fl.write("%10d\t" % st)
for j in data:
fl.write('%10f\t' % j[i])
fl.write('\n')
fl.close()
if __name__ == '__main__':
import sys
a = Analysis()
h = a.read_stats(sys.argv[1])
h.toc()
matplotlib.pyplot.ion() # interactive | null |
743 | # A simple CLI runner for slurm that can be used when running Galaxy from a
# non-submit host and using a Slurm cluster.
from logging import getLogger
from os import path
from . import (
BaseJobExec,
job_states,
)
from ... import runner_states
log = getLogger(__name__)
argmap = {
"memory": "-M", # There is code in job_script_kwargs relying on this name's setting
"cores": "-n",
"queue": "-q",
"working_dir": "-cwd",
"project": "-P",
}
class LSF(BaseJobExec):
def METHOD_NAME(self, ofile, efile, job_name):
scriptargs = {"-o": ofile, "-e": efile, "-J": job_name}
# Map arguments using argmap.
for k, v in self.params.items():
if k == "plugin" or k == "excluded_hosts":
continue
try:
if k == "memory":
# Memory requires both -m and -R rusage[mem=v] request
scriptargs["-R"] = f'"rusage[mem={v}]"'
if not k.startswith("-"):
k = argmap[k]
scriptargs[k] = v
except Exception:
log.warning(f"Unrecognized long argument passed to LSF CLI plugin: {k}")
# Generated template.
template_scriptargs = ""
for k, v in scriptargs.items():
template_scriptargs += f"#BSUB {k} {v}\n"
# Excluded hosts use the same -R option already in use for mem, so easier adding here.
for host in self._get_excluded_hosts():
template_scriptargs += f"#BSUB -R \"select[hname!='{host}']\"\n"
return dict(headers=template_scriptargs)
def submit(self, script_file):
# bsub returns Job <9147983> is submitted to default queue <research-rh7>.
# This should be really handled outside with something like
# parse_external. Currently CLI runner expect this to just send it in the last position
# of the string.
return "bsub <%s | awk '{ print $2}' | sed 's/[<>]//g'" % script_file
def delete(self, job_id):
return f"bkill {job_id}"
def get_status(self, job_ids=None):
return 'bjobs -a -o "id stat" -noheader' # check this
def get_single_status(self, job_id):
return f"bjobs -o stat -noheader {job_id}"
def parse_status(self, status, job_ids):
# Get status for each job, skipping header.
rval = {}
for line in status.splitlines():
job_id, state = line.split()
if job_id in job_ids:
# map job states to Galaxy job states.
rval[job_id] = self._get_job_state(state)
return rval
def parse_single_status(self, status, job_id):
if not status:
# Job not found in LSF, most probably finished and forgotten.
# lsf outputs: Job <num> is not found -- but that is on the stderr
# Note: a very old failed job job will not be shown here either,
# which would be badly handled here. So this only works well when Galaxy
# is constantly monitoring the jobs. The logic here is that DONE jobs get forgotten
# faster than failed jobs.
log.warning(f"Job id '{job_id}' not found LSF status check")
return job_states.OK
return self._get_job_state(status)
def get_failure_reason(self, job_id):
return f"bjobs -l {job_id}"
def parse_failure_reason(self, reason, job_id):
# LSF will produce the following in the job output file:
# TERM_MEMLIMIT: job killed after reaching LSF memory usage limit.
# Exited with exit code 143.
for line in reason.splitlines():
if "TERM_MEMLIMIT" in line:
return runner_states.MEMORY_LIMIT_REACHED
if "TERM_RUNLIMIT" in line:
return runner_states.WALLTIME_REACHED
return None
def _get_job_state(self, state):
# based on:
# https://www.ibm.com/support/knowledgecenter/en/SSETD4_9.1.3/lsf_admin/job_state_lsf.html
# https://www.ibm.com/support/knowledgecenter/en/SSETD4_9.1.2/lsf_command_ref/bjobs.1.html
try:
return {
"EXIT": job_states.ERROR,
"RUN": job_states.RUNNING,
"PEND": job_states.QUEUED,
"DONE": job_states.OK,
"PSUSP": job_states.ERROR,
"USUSP": job_states.ERROR,
"SSUSP": job_states.ERROR,
"UNKWN": job_states.ERROR,
"WAIT": job_states.QUEUED,
"ZOMBI": job_states.ERROR,
}.get(state)
except KeyError:
raise KeyError(f"Failed to map LSF status code [{state}] to job state.")
def _get_excluded_hosts(self):
"""
Reads a file in the set path with one node name per line. All these nodes will be added
to the exclusion list for execution.
The path can be added to destinations like this:
<destination id="lsf_8cpu_16GbRam" runner="cli">
<param id="shell_plugin">LocalShell</param>
<param id="job_plugin">LSF</param>
<param id="job_memory">16000</param>
<param id="job_cores">7</param>
<param id="job_excluded_hosts">/path/to/file/with/hosts/to/exclude/one/per/line.txt</param>
</destination>
:param pathExcludedNodes:
:return: list with node names
"""
if "excluded_hosts" in self.params:
path_excluded = self.params["excluded_hosts"]
if path.isfile(path_excluded):
with open(path_excluded) as f:
return f.read().splitlines()
return []
__all__ = ("LSF",) | null |
744 | #/*##########################################################################
# Copyright (C) 2004-2023 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
__author__ = "V.A. Sole"
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
import sys
import logging
import time
import weakref
from PyMca5.PyMcaGui import PyMcaQt as qt
QTVERSION = qt.qVersion()
_logger = logging.getLogger(__name__)
SOURCE_EVENT = qt.QEvent.registerEventType()
try:
import thread
except ImportError:
import _thread as thread
class SourceEvent(qt.QEvent):
def __init__(self, ddict=None):
if ddict is None:
ddict = {}
self.dict = ddict
qt.QEvent.__init__(self, SOURCE_EVENT)
class QSource(qt.QObject):
sigUpdated = qt.pyqtSignal(object)
def __init__(self):
qt.QObject.__init__(self, None) #no parent
self.surveyDict = {}
self.selections = {}
self.METHOD_NAME(0.7) # 700 milliseconds
self.pollerThreadId = None
def METHOD_NAME(self, pollTime):
"""Set polling time (in seconds)"""
self._pollTime = max(pollTime, 0.1)
return self._pollTime
def getPollTime(self):
return self._pollTime
def addToPoller(self, dataObject):
"""Set polling for data object"""
sourceName = dataObject.info['SourceName']
if sourceName != self.sourceName:
raise KeyError("Trying to survey key %s on wrong source %s" % (self.sourceName,dataObject.info['SourceName']))
#that is general to any source
key = dataObject.info['Key']
reference = id(dataObject)
def dataObjectDestroyed(ref, dataObjectKey=key, dataObjectRef=reference):
_logger.debug('data object destroyed, key was %s', dataObjectKey)
_logger.debug('data object destroyed, ref was 0x%x', dataObjectRef)
_logger.debug("self.surveyDict[key] = %s", self.surveyDict[key])
n = len(self.surveyDict[dataObjectKey])
if n > 0:
ns = list(range(n))
newlist = []
for i in ns:
try:
if len(dir(self.surveyDict[dataObjectKey][i])):
newlist.append(self.surveyDict[dataObjectKey][i])
except ReferenceError:
pass
self.surveyDict[dataObjectKey] = newlist
if len(self.surveyDict[dataObjectKey]) == 0:
del self.surveyDict[dataObjectKey]
_logger.debug("SURVEY DICT AFTER DELETION = %s", self.surveyDict)
return
# create a weak reference to the dataObject and we call it dataObjectRef
dataObjectRef=weakref.proxy(dataObject, dataObjectDestroyed)
try:
_logger.debug("Dealing with data object reference %s", dataObjectRef)
if key not in self.surveyDict:
self.surveyDict[key] = [dataObjectRef]
self.selections[key] = [(id(dataObjectRef), dataObjectRef.info)]
elif dataObjectRef not in self.surveyDict[key]:
_logger.debug("dataObject reference ADDED")
self.surveyDict[key].append(dataObjectRef)
self.selections[key].append((id(dataObjectRef), dataObjectRef.info))
else:
_logger.debug("dataObject reference IGNORED")
except KeyError:
print("ADDING BECAUSE OF KEY ERROR")
self.surveyDict[key] = [dataObjectRef]
self.selections[key] = [(id(dataObjectRef), dataObjectRef.info)]
except ReferenceError:
_logger.debug("NOT ADDED TO THE POLL dataObject = %s", dataObject)
return
if self.pollerThreadId is None:
# start a new polling thread
_logger.debug("starting new thread")
self.pollerThreadId = thread.start_new_thread(self.__run, ())
def __run(self):
_logger.debug("In QSource __run method")
while len(self.surveyDict) > 0:
#for key in self.surveyDict is dangerous
# runtime error: dictionary changed during iteration
# a mutex is needed
_logger.debug("In loop")
dummy = list(self.surveyDict.keys())
eventsToPost = {}
#for key in self.surveyDict:
for key in dummy:
if key not in eventsToPost:
eventsToPost[key] = []
if self.isUpdated(self.sourceName, key):
_logger.debug("%s %s is updated", self.sourceName, key)
try:
if len(self.surveyDict[key]):
#there are still instances of dataObjects
event = SourceEvent()
event.dict['Key'] = key
event.dict['event'] = 'updated'
event.dict['id'] = self.surveyDict[key]
scanselection = False
info = self.surveyDict[key][0].info
if "scanselection" in info:
scanselection = info['scanselection']
elif "selectiontype" in info:
_logger.debug("selectiontype %s", info["selectiontype"])
if info["selectiontype"] == "1D":
scanselection = True
if (key == 'SCAN_D') or scanselection:
event.dict['scanselection'] = True
else:
event.dict['scanselection'] = False
eventsToPost[key].append(event)
else:
del self.surveyDict[key]
del self.selections[key]
except Exception:
_logger.debug("error in loop %s", sys.exc_info())
del self.surveyDict[key]
del self.selections[key]
pass
for key in eventsToPost:
for event in eventsToPost[key]:
qt.QApplication.postEvent(self, event)
qt.QApplication.instance().processEvents()
time.sleep(self._pollTime)
_logger.debug("woke up")
self.pollerThreadId = None
self.selections = {} | null |
745 | ##########################################################################
#
# Copyright (c) 2022, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import os
import shutil
import tempfile
import nuke
import imath
import IECore
import IECoreScene
import IECoreNuke
class SceneCacheWriterTest( IECoreNuke.TestCase ) :
def setUp( self ) :
self.__temporaryDirectory = None
def tearDown( self ) :
if self.__temporaryDirectory is not None :
shutil.rmtree( self.__temporaryDirectory )
def temporaryDirectory( self ) :
if self.__temporaryDirectory is None :
self.__temporaryDirectory = tempfile.mkdtemp( prefix = "ieCoreNukeTest" )
return self.__temporaryDirectory
def testWriteEmptyScene( self ):
outputFile = os.path.join( self.temporaryDirectory(), "empty.scc" )
writer = nuke.createNode( "WriteGeo" )
writer["file"].fromScript( outputFile )
nuke.execute( writer, 1001, 1001 )
self.assertTrue( os.path.exists( outputFile ) )
scene = IECoreScene.SharedSceneInterfaces.get( outputFile )
self.assertEqual( scene.childNames(), [] )
def METHOD_NAME( self ):
outputFile = os.path.join( self.temporaryDirectory(), "sphere.scc" )
sphere = nuke.createNode( "Sphere" )
writer = nuke.createNode( "WriteGeo" )
writer["file"].fromScript( outputFile )
nuke.execute( writer, 1001, 1001 )
self.assertTrue( os.path.exists( outputFile ) )
scene = IECoreScene.SharedSceneInterfaces.get( outputFile )
self.assertEqual( scene.childNames(), ["object0"] )
self.assertEqual( scene.readTransform( 0 ).value, imath.M44d() )
liveSceneHolder = nuke.createNode( "ieLiveScene" )
liveSceneHolder.setInput( 0, sphere )
liveScene = liveSceneHolder["scene"].getValue()
liveSceneMesh = liveScene.scene( ["object0"] ).readObject( 0 )
mesh = scene.scene( ["object0"] ).readObject( 0 )
self.assertEqual( mesh.topologyHash(), liveSceneMesh.topologyHash() )
def testWriteSceneCacheReader( self ):
import random
import IECoreScene
outputFile = os.path.join( self.temporaryDirectory(), "scene.scc" )
sceneFile = "test/IECoreNuke/scripts/data/liveSceneData.scc"
sceneReader = nuke.createNode( "ieSceneCacheReader" )
sceneReader.knob( "file" ).setValue( sceneFile )
expectedScene = IECoreScene.SharedSceneInterfaces.get( sceneFile )
sceneReader.forceValidate()
widget = sceneReader.knob( "sceneView" )
widget.setSelectedItems( ['/root/A/a', '/root/B/b'] )
writer = nuke.createNode( "WriteGeo" )
writer["file"].fromScript( outputFile )
nuke.execute( writer, 1, 48 )
scene = IECoreScene.SharedSceneInterfaces.get( outputFile )
self.assertEqual( scene.childNames(), expectedScene.childNames() )
for time in range( 0, 3 ):
self.assertAlmostEqual( scene.readBound( time ).min(), expectedScene.readBound( time ).min() )
mesh = scene.scene( ["B", "b"] ).readObject( time )
expectedMesh = expectedScene.scene( ["B", "b"] ).readObject( time )
random.seed( 12 )
for i in range( 12 ):
pointIndex = random.choice( range( len( mesh["P"].data ) ) )
self.assertAlmostEqual( mesh["P"].data[pointIndex], expectedMesh["P"].data[pointIndex], 4 )
def testWriteParticle( self ):
outputFile = os.path.join( self.temporaryDirectory(), "particle.scc" )
noise = nuke.createNode( "Noise")
card = nuke.createNode( "Card2" )
card.setInput( 0, noise )
particle = nuke.createNode( "ParticleEmitter" )
particle.setInput( 1, card )
particle["size_variation"].setValue( 2 )
particle["color_from_texture"].setValue( True )
particle["spread"].setValue( .3 )
writer = nuke.createNode( "WriteGeo" )
writer["file"].fromScript( outputFile )
nuke.execute( writer, 0, 24 )
self.assertTrue( os.path.exists( outputFile ) )
scene = IECoreScene.SharedSceneInterfaces.get( outputFile )
self.assertEqual( scene.childNames(), ["object0"] )
pointsPrim = scene.scene( ["object0",] ).readObject( 1 )
self.assertEqual( set( pointsPrim.keys() ), set( ["Cs", "P", "pid", "width", "velocity", "alpha"] ) )
self.assertEqual( pointsPrim.numPoints, 100 )
self.assertEqual( scene.scene( ["object0",] ).readObject( 0.04 ).numPoints, 10 )
self.assertEqual( scene.scene( ["object0",] ).readObject( 0.5 ).numPoints, 100 )
self.assertAlmostEqual( pointsPrim["P"].data[12], imath.V3f(-0.559, 1.797, 1.677), delta=.015 )
self.assertAlmostEqual( pointsPrim["Cs"].data[21], imath.Color4f(0.241325, 0.241325, 0.241325, 1), delta=.015 )
self.assertAlmostEqual( pointsPrim["alpha"].data[72], 1.0, delta=.015 )
self.assertAlmostEqual( pointsPrim["width"].data[99], .105, delta=.015 )
self.assertAlmostEqual( pointsPrim["pid"].data[92], 197, delta=.015 )
self.assertAlmostEqual( pointsPrim["velocity"].data[72], imath.V3f(-18.424, 4.602, 14.675), delta=.015 )
def assertAlmostEqual( self, left, right, delta=None ):
if isinstance( left, ( imath.V3f, imath.Color3f, imath.Color4f ) ):
for index, _ in enumerate( left ):
super( SceneCacheWriterTest, self ).assertAlmostEqual( left[index], right[index], delta=delta )
else:
super( SceneCacheWriterTest, self ).assertAlmostEqual( left, right, delta=delta )
if __name__ == "__main__":
unittest.main()
| null |
746 | # coding: utf8
"""
This file contains a set of functional tests designed to check the correct execution of the pipeline and the
different functions available in Clinica
"""
import warnings
from os import PathLike
from pathlib import Path
from test.nonregression.testing_tools import compare_folders
import pytest
# Determine location for working_directory
warnings.filterwarnings("ignore")
@pytest.fixture(
params=[
# TODO: Update NIFD reference dataset.
"Nifd2Bids",
"Oasis2Bids",
"Oasis3ToBids",
"Adni2Bids",
"Aibl2Bids",
"HabsToBids",
"UkbToBids",
"GenfiToBids",
]
)
def METHOD_NAME(request):
return request.param
def run_nifd2bids(input_dir: PathLike, output_dir: PathLike, ref_dir: PathLike) -> None:
from pathlib import PurePath
from tempfile import TemporaryDirectory
from clinica.iotools.converters.nifd_to_bids.nifd_to_bids import convert_images
# Convert
input_dir = PurePath(input_dir)
output_dir = PurePath(output_dir)
ref_dir = PurePath(ref_dir)
# Act
_ = convert_images(
path_to_clinical=input_dir / "clinical_data",
path_to_dataset=input_dir / "unorganized",
bids_dir=output_dir,
)
# Assert
with TemporaryDirectory() as td:
compare_folders(output_dir, ref_dir, td)
def run_oasis2bids(
input_dir: PathLike, output_dir: PathLike, ref_dir: PathLike
) -> None:
from pathlib import PurePath
from clinica.iotools.converters.oasis_to_bids.oasis_to_bids import OasisToBids
# Convert
input_dir = PurePath(input_dir)
output_dir = PurePath(output_dir)
ref_dir = PurePath(ref_dir)
# Arrange
clinical_data_directory = input_dir / "clinical_data"
# Act
oasis_to_bids = OasisToBids()
oasis_to_bids.convert_images(input_dir / "unorganized", output_dir / "bids")
oasis_to_bids.convert_clinical_data(clinical_data_directory, output_dir / "bids")
# Assert
compare_folders(output_dir / "bids", ref_dir / "bids", output_dir)
def run_oasis3tobids(
input_dir: PathLike, output_dir: PathLike, ref_dir: PathLike
) -> None:
from pathlib import PurePath
from clinica.iotools.converters.oasis3_to_bids.oasis3_to_bids import convert_images
# Convert
input_dir = PurePath(input_dir)
output_dir = PurePath(output_dir)
ref_dir = PurePath(ref_dir)
# Arrange
clinical_data_directory = input_dir / "clinical_data"
# Act
convert_images(
input_dir / "unorganized", output_dir / "bids", clinical_data_directory
)
# Assert
compare_folders(output_dir / "bids", ref_dir / "bids", output_dir)
def run_adni2bids(input_dir: PathLike, output_dir: PathLike, ref_dir: PathLike) -> None:
from pathlib import PurePath
from clinica.iotools.converters.adni_to_bids.adni_to_bids import AdniToBids
# Convert
input_dir = PurePath(input_dir)
output_dir = PurePath(output_dir)
ref_dir = PurePath(ref_dir)
# Arrange
clinical_data_directory = input_dir / "clinical_data"
xml_directory = input_dir / "xml_metadata"
dataset_directory = input_dir / "unorganized_data"
subjects_list = input_dir / "subjects.txt"
modalities = ["T1", "PET_FDG", "PET_AMYLOID", "PET_TAU", "DWI", "FLAIR", "fMRI"]
# Act
adni_to_bids = AdniToBids()
adni_to_bids.check_adni_dependencies()
adni_to_bids.convert_images(
dataset_directory,
clinical_data_directory,
output_dir / "bids",
subjects_list,
modalities,
)
adni_to_bids.convert_clinical_data(
clinical_data_directory,
output_dir / "bids",
xml_path=xml_directory,
)
# Assert
compare_folders(output_dir / "bids", ref_dir / "bids", output_dir)
def run_aibl2bids(input_dir: PathLike, output_dir: PathLike, ref_dir: PathLike) -> None:
from pathlib import PurePath
from clinica.iotools.converters.aibl_to_bids.aibl_to_bids import (
convert_clinical_data,
convert_images,
)
# Convert
input_dir = PurePath(input_dir)
output_dir = PurePath(output_dir)
ref_dir = PurePath(ref_dir)
# Arrange
clinical_data_directory = input_dir / "Data_extract_3.2.5"
dataset_directory = input_dir / "unorganized_data"
# Act
convert_images(
dataset_directory,
clinical_data_directory,
output_dir / "bids",
)
convert_clinical_data(output_dir / "bids", clinical_data_directory)
# Assert
compare_folders(output_dir / "bids", ref_dir / "bids", output_dir)
def run_habs_to_bids(
input_dir: PathLike, output_dir: PathLike, ref_dir: PathLike
) -> None:
from click.testing import CliRunner
from clinica.iotools.converters.habs_to_bids.habs_to_bids_cli import cli
runner = CliRunner()
result = runner.invoke(cli, [str(input_dir), str(output_dir)])
assert result.exit_code == 0
compare_folders(output_dir, ref_dir, output_dir)
def run_ukbtobids(input_dir: PathLike, output_dir: PathLike, ref_dir: PathLike) -> None:
from pathlib import PurePath
from clinica.iotools.converters.ukb_to_bids.ukb_to_bids import convert_images
from clinica.utils.check_dependency import check_dcm2niix
# Convert
input_dir = PurePath(input_dir)
output_dir = PurePath(output_dir)
ref_dir = PurePath(ref_dir)
# Arrange
clinical_data_directory = input_dir / "clinical_data"
# Act
check_dcm2niix()
convert_images(
input_dir / "unorganized", output_dir / "bids", clinical_data_directory
)
# Assert
compare_folders(output_dir / "bids", ref_dir / "bids", output_dir / "bids")
def run_genfitobids(
input_dir: PathLike, output_dir: PathLike, ref_dir: PathLike
) -> None:
from pathlib import PurePath
from clinica.iotools.converters.genfi_to_bids.genfi_to_bids import convert_images
from clinica.utils.check_dependency import check_dcm2niix
# Convert
input_dir = PurePath(input_dir)
output_dir = PurePath(output_dir)
ref_dir = PurePath(ref_dir)
# Act
check_dcm2niix()
convert_images(
input_dir / "unorganized",
output_dir / "bids",
path_to_clinical=None,
gif=False,
)
# Assert
compare_folders(output_dir / "bids", ref_dir / "bids", output_dir / "bids")
def test_run_convertors(cmdopt, tmp_path, METHOD_NAME):
base_dir = Path(cmdopt["input"])
input_dir = base_dir / METHOD_NAME / "in"
ref_dir = base_dir / METHOD_NAME / "ref"
tmp_out_dir = tmp_path / METHOD_NAME / "out"
tmp_out_dir.mkdir(parents=True)
if METHOD_NAME == "Nifd2Bids":
run_nifd2bids(input_dir, tmp_out_dir, ref_dir)
elif METHOD_NAME == "Oasis2Bids":
run_oasis2bids(input_dir, tmp_out_dir, ref_dir)
elif METHOD_NAME == "Oasis3ToBids":
run_oasis3tobids(input_dir, tmp_out_dir, ref_dir)
elif METHOD_NAME == "Adni2Bids":
run_adni2bids(input_dir, tmp_out_dir, ref_dir)
elif METHOD_NAME == "Aibl2Bids":
run_aibl2bids(input_dir, tmp_out_dir, ref_dir)
elif METHOD_NAME == "HabsToBids":
run_habs_to_bids(input_dir, tmp_out_dir, ref_dir)
elif METHOD_NAME == "UkbToBids":
run_ukbtobids(input_dir, tmp_out_dir, ref_dir)
elif METHOD_NAME == "GenfiToBids":
run_genfitobids(input_dir, tmp_out_dir, ref_dir)
else:
print(f"Test {METHOD_NAME} not available.")
assert 0 | null |
747 | import IMP.algebra
import IMP.test
import math
class Tests(IMP.test.TestCase):
def test_magnitude(self):
"""Check Vector4D magnitude"""
v = IMP.algebra.Vector4D(1.0, 2.0, 3.0, 4.0)
self.assertEqual(v.get_squared_magnitude(), 30.0)
self.assertAlmostEqual(v.get_magnitude(), math.sqrt(30.0), places=1)
def test_component(self):
"""Check Vector4D components"""
v = IMP.algebra.Vector4D(1.0, 2.0, 3.0, 4.0)
self.assertEqual(v[0], 1.0)
self.assertEqual(v[1], 2.0)
self.assertEqual(v[2], 3.0)
self.assertEqual(v[3], 4.0)
v[0] = 10.0
self.assertEqual(v[0], 10.0)
def test_len(self):
"""Check Vector4D length"""
v1 = IMP.algebra.Vector4D(1.0, 2.0, 3.0, 4.0)
self.assertEqual(len(v1), 4)
def test_scalar_product(self):
"""Check Vector4D scalar product"""
v1 = IMP.algebra.Vector4D(1.0, 2.0, 3.0, 4.0)
v2 = IMP.algebra.Vector4D(10.0, 1.0, 2.0, 3.0)
self.assertAlmostEqual(v1 * v2, v2.get_scalar_product(v1), delta=.1)
self.assertAlmostEqual(v1 * v2, v1 * v2, delta=.1)
self.assertAlmostEqual(v1.get_scalar_product(v2), v2 * v1, delta=.1)
self.assertAlmostEqual(v1.get_scalar_product(v2), 30.0, delta=.1)
def test_difference(self):
"""Check Vector4D difference"""
v1 = IMP.algebra.Vector4D(1.0, 2.0, 3.0, 4.0)
v2 = IMP.algebra.Vector4D(10.0, 1.0, 2.0, 3.0)
diff = v1 - v2
v1 -= v2
expected_diff = IMP.algebra.Vector4D(-9.0, 1.0, 1.0, 1.0)
self.assertAlmostEqual((diff - expected_diff).get_magnitude(),
0, delta=.1)
self.assertAlmostEqual((v1 - expected_diff).get_magnitude(),
0, delta=.1)
def METHOD_NAME(self):
"""Check Vector4D addition"""
v1 = IMP.algebra.Vector4D(1.0, 2.0, 3.0, 4.0)
idv1 = id(v1)
cppobj = str(v1.this)
v2 = IMP.algebra.Vector4D(10.0, 1.0, 2.0, 3.0)
sum = v1 + v2
v1 += v2
# Inplace addition should not change the Python object identity:
self.assertEqual(id(v1), idv1)
# The underlying C++ object pointer should be unchanged too:
self.assertEqual(str(v1.this), cppobj)
expected_sum = IMP.algebra.Vector4D(11.0, 3.0, 5.0, 7.0)
self.assertAlmostEqual((sum - expected_sum).get_magnitude(),
0, delta=.1)
self.assertAlmostEqual((v1 - expected_sum).get_magnitude(),
0, delta=.1)
def test_scalar_multiplication(self):
"""Check Vector4D multiplication by a scalar"""
v1 = IMP.algebra.Vector4D(1.0, 2.0, 3.0, 4.0)
idv1 = id(v1)
s1 = 3.0
prod = v1 * s1
v1 *= s1
# Inplace multiplication should not change the Python object identity:
self.assertEqual(id(v1), idv1)
expected_prod = (3.0, 6.0, 9.0, 12.0)
for i in range(3):
self.assertAlmostEqual(prod[i], expected_prod[i], delta=.1)
self.assertAlmostEqual(v1[i], expected_prod[i], delta=.1)
def test_scalar_division(self):
"""Check Vector4D division by a scalar"""
v1 = IMP.algebra.Vector4D(3.0, 6.0, 9.0, 27.0)
idv1 = id(v1)
s1 = 3.0
prod = v1 / s1
v1 /= s1
# Inplace division should not change the Python object identity:
self.assertEqual(id(v1), idv1)
expected_prod = (1.0, 2.0, 3.0, 9.0)
for i in range(3):
self.assertEqual(prod[i], expected_prod[i])
self.assertEqual(v1[i], expected_prod[i])
def test_rotation_from_vector4d(self):
"""Check creation of a rotation from a 4D vector"""
r = IMP.algebra.Rotation3D(1, 0, 0, 0)
v1 = r.get_quaternion()
v2 = IMP.algebra.Vector4D(1.0, 2.0, 3.0, 10.0)
r2 = IMP.algebra.get_rotation_from_vector4d(v2)
if __name__ == '__main__':
IMP.test.main() | null |
748 | import asyncio
import logging
import time
from typing import Any, Dict, Optional
import numpy as np
from hummingbot.core.network_iterator import NetworkStatus, safe_ensure_future
from hummingbot.core.web_assistant.connections.data_types import WSJSONRequest
from hummingbot.core.web_assistant.ws_assistant import WSAssistant
from hummingbot.data_feed.candles_feed.candles_base import CandlesBase
from hummingbot.data_feed.candles_feed.gate_io_spot_candles import constants as CONSTANTS
from hummingbot.logger import HummingbotLogger
class GateioSpotCandles(CandlesBase):
_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def __init__(self, trading_pair: str, interval: str = "1m", max_records: int = 150):
super().__init__(trading_pair, interval, max_records)
@property
def name(self):
return f"gate_io_spot_{self._trading_pair}"
@property
def METHOD_NAME(self):
return CONSTANTS.REST_URL
@property
def wss_url(self):
return CONSTANTS.WSS_URL
@property
def health_check_url(self):
return self.METHOD_NAME + CONSTANTS.HEALTH_CHECK_ENDPOINT
@property
def candles_url(self):
return self.METHOD_NAME + CONSTANTS.CANDLES_ENDPOINT
@property
def rate_limits(self):
return CONSTANTS.RATE_LIMITS
@property
def intervals(self):
return CONSTANTS.INTERVALS
async def check_network(self) -> NetworkStatus:
rest_assistant = await self._api_factory.get_rest_assistant()
await rest_assistant.execute_request(url=self.health_check_url,
throttler_limit_id=CONSTANTS.HEALTH_CHECK_ENDPOINT)
return NetworkStatus.CONNECTED
def get_exchange_trading_pair(self, trading_pair):
return trading_pair.replace("-", "_")
async def fetch_candles(self,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
limit: Optional[int] = 500):
rest_assistant = await self._api_factory.get_rest_assistant()
params = {"currency_pair": self._ex_trading_pair, "interval": self.interval, "limit": limit}
if start_time:
params["from"] = start_time
if end_time:
params["to"] = end_time
candles = await rest_assistant.execute_request(url=self.candles_url,
throttler_limit_id=CONSTANTS.CANDLES_ENDPOINT,
params=params)
new_hb_candles = []
for i in candles:
timestamp_ms = i[0] + "000"
open = i[5]
high = i[3]
low = i[4]
close = i[2]
volume = i[6]
quote_asset_volume = i[1]
# no data field
n_trades = 0
taker_buy_base_volume = 0
taker_buy_quote_volume = 0
new_hb_candles.append([timestamp_ms, open, high, low, close, volume,
quote_asset_volume, n_trades, taker_buy_base_volume,
taker_buy_quote_volume])
return np.array(new_hb_candles).astype(float)
async def fill_historical_candles(self):
max_request_needed = (self._candles.maxlen // 1000) + 1
requests_executed = 0
while not self.is_ready:
missing_records = self._candles.maxlen - len(self._candles)
end_timestamp = int(int(self._candles[0][0]) * 1e-3)
try:
if requests_executed < max_request_needed:
# we have to add one more since, the last row is not going to be included
candles = await self.fetch_candles(end_time=end_timestamp, limit=missing_records + 1)
# we are computing again the quantity of records again since the websocket process is able to
# modify the deque and if we extend it, the new observations are going to be dropped.
missing_records = self._candles.maxlen - len(self._candles)
self._candles.extendleft(candles[-(missing_records + 1):-1][::-1])
requests_executed += 1
else:
self.logger().error(f"There is no data available for the quantity of "
f"candles requested for {self.name}.")
raise
except asyncio.CancelledError:
raise
except Exception:
self.logger().exception(
"Unexpected error occurred when getting historical klines. Retrying in 1 seconds...",
)
await self._sleep(1.0)
async def _subscribe_channels(self, ws: WSAssistant):
"""
Subscribes to the candles events through the provided websocket connection.
:param ws: the websocket assistant used to connect to the exchange
"""
try:
payload = {
"time": int(time.time()),
"channel": CONSTANTS.WS_CANDLES_ENDPOINT,
"event": "subscribe",
"payload": [self.interval, self._ex_trading_pair]
}
subscribe_candles_request: WSJSONRequest = WSJSONRequest(payload=payload)
await ws.send(subscribe_candles_request)
self.logger().info("Subscribed to public klines...")
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error occurred subscribing to public klines...",
exc_info=True
)
raise
async def _process_websocket_messages(self, websocket_assistant: WSAssistant):
async for ws_response in websocket_assistant.iter_messages():
data: Dict[str, Any] = ws_response.data
if data.get("event") == "update" and data.get("channel") == "spot.candlesticks":
timestamp_ms = int(data["result"]["t"] + "000")
open = data["result"]["o"]
high = data["result"]["h"]
low = data["result"]["l"]
close = data["result"]["c"]
volume = data["result"]["v"]
quote_asset_volume = data["result"]["a"]
# no data field
n_trades = 0
taker_buy_base_volume = 0
taker_buy_quote_volume = 0
if len(self._candles) == 0:
self._candles.append(np.array([timestamp_ms, open, high, low, close, volume,
quote_asset_volume, n_trades, taker_buy_base_volume,
taker_buy_quote_volume]))
safe_ensure_future(self.fill_historical_candles())
elif timestamp_ms > int(self._candles[-1][0]):
# TODO: validate also that the diff of timestamp == interval (issue with 30d interval).
self._candles.append(np.array([timestamp_ms, open, high, low, close, volume,
quote_asset_volume, n_trades, taker_buy_base_volume,
taker_buy_quote_volume]))
elif timestamp_ms == int(self._candles[-1][0]):
self._candles.pop()
self._candles.append(np.array([timestamp_ms, open, high, low, close, volume,
quote_asset_volume, n_trades, taker_buy_base_volume,
taker_buy_quote_volume])) | null |
749 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class AddTagsToResourceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'AddTagsToResource')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Tag4value(self): # String
return self.get_query_params().get('Tag.4.value')
def set_Tag4value(self, Tag4value): # String
self.add_query_param('Tag.4.value', Tag4value)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Tag2key(self): # String
return self.get_query_params().get('Tag.2.key')
def set_Tag2key(self, Tag2key): # String
self.add_query_param('Tag.2.key', Tag2key)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def METHOD_NAME(self): # String
return self.get_query_params().get('Tag.3.key')
def set_Tag3key(self, Tag3key): # String
self.add_query_param('Tag.3.key', Tag3key)
def get_Tag1value(self): # String
return self.get_query_params().get('Tag.1.value')
def set_Tag1value(self, Tag1value): # String
self.add_query_param('Tag.1.value', Tag1value)
def get_DBInstanceId(self): # String
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self, DBInstanceId): # String
self.add_query_param('DBInstanceId', DBInstanceId)
def get_Tag3value(self): # String
return self.get_query_params().get('Tag.3.value')
def set_Tag3value(self, Tag3value): # String
self.add_query_param('Tag.3.value', Tag3value)
def get_proxyId(self): # String
return self.get_query_params().get('proxyId')
def set_proxyId(self, proxyId): # String
self.add_query_param('proxyId', proxyId)
def get_Tag5key(self): # String
return self.get_query_params().get('Tag.5.key')
def set_Tag5key(self, Tag5key): # String
self.add_query_param('Tag.5.key', Tag5key)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_Tag5value(self): # String
return self.get_query_params().get('Tag.5.value')
def set_Tag5value(self, Tag5value): # String
self.add_query_param('Tag.5.value', Tag5value)
def get_Tags(self): # String
return self.get_query_params().get('Tags')
def set_Tags(self, Tags): # String
self.add_query_param('Tags', Tags)
def get_Tag1key(self): # String
return self.get_query_params().get('Tag.1.key')
def set_Tag1key(self, Tag1key): # String
self.add_query_param('Tag.1.key', Tag1key)
def get_Tag2value(self): # String
return self.get_query_params().get('Tag.2.value')
def set_Tag2value(self, Tag2value): # String
self.add_query_param('Tag.2.value', Tag2value)
def get_Tag4key(self): # String
return self.get_query_params().get('Tag.4.key')
def set_Tag4key(self, Tag4key): # String
self.add_query_param('Tag.4.key', Tag4key) | null |
750 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcams.endpoint import endpoint_data
import json
class SendChatappMessageRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'cams', '2020-06-06', 'SendChatappMessage')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_MessageType(self): # String
return self.get_body_params().get('MessageType')
def set_MessageType(self, MessageType): # String
self.add_body_params('MessageType', MessageType)
def get_Language(self): # String
return self.get_body_params().get('Language')
def set_Language(self, Language): # String
self.add_body_params('Language', Language)
def get_CustWabaId(self): # String
return self.get_body_params().get('CustWabaId')
def set_CustWabaId(self, CustWabaId): # String
self.add_body_params('CustWabaId', CustWabaId)
def get_Type(self): # String
return self.get_body_params().get('Type')
def set_Type(self, Type): # String
self.add_body_params('Type', Type)
def get_FallBackContent(self): # String
return self.get_body_params().get('FallBackContent')
def set_FallBackContent(self, FallBackContent): # String
self.add_body_params('FallBackContent', FallBackContent)
def get_Content(self): # String
return self.get_query_params().get('Content')
def set_Content(self, Content): # String
self.add_query_param('Content', Content)
def get_TemplateParams(self): # Map
return self.get_body_params().get('TemplateParams')
def set_TemplateParams(self, TemplateParams): # Map
self.add_body_params("TemplateParams", json.dumps(TemplateParams))
def get_Payload(self): # Array
return self.get_query_params().get('Payload')
def set_Payload(self, Payload): # Array
self.add_query_param("Payload", json.dumps(Payload))
def get_ChannelType(self): # String
return self.get_body_params().get('ChannelType')
def set_ChannelType(self, ChannelType): # String
self.add_body_params('ChannelType', ChannelType)
def get_From(self): # String
return self.get_body_params().get('From')
def set_From(self, _From): # String
self.add_body_params('From', _From)
def get_Tag(self): # String
return self.get_body_params().get('Tag')
def set_Tag(self, Tag): # String
self.add_body_params('Tag', Tag)
def get_TrackingData(self): # String
return self.get_body_params().get('TrackingData')
def set_TrackingData(self, TrackingData): # String
self.add_body_params('TrackingData', TrackingData)
def get_TaskId(self): # String
return self.get_body_params().get('TaskId')
def set_TaskId(self, TaskId): # String
self.add_body_params('TaskId', TaskId)
def get_IsvCode(self): # String
return self.get_body_params().get('IsvCode')
def set_IsvCode(self, IsvCode): # String
self.add_body_params('IsvCode', IsvCode)
def get_ContextMessageId(self): # String
return self.get_body_params().get('ContextMessageId')
def set_ContextMessageId(self, ContextMessageId): # String
self.add_body_params('ContextMessageId', ContextMessageId)
def get_Label(self): # String
return self.get_body_params().get('Label')
def set_Label(self, Label): # String
self.add_body_params('Label', Label)
def get_FallBackId(self): # String
return self.get_body_params().get('FallBackId')
def set_FallBackId(self, FallBackId): # String
self.add_body_params('FallBackId', FallBackId)
def METHOD_NAME(self): # Integer
return self.get_body_params().get('Ttl')
def set_Ttl(self, Ttl): # Integer
self.add_body_params('Ttl', Ttl)
def get_FallBackDuration(self): # Integer
return self.get_body_params().get('FallBackDuration')
def set_FallBackDuration(self, FallBackDuration): # Integer
self.add_body_params('FallBackDuration', FallBackDuration)
def get_CustSpaceId(self): # String
return self.get_body_params().get('CustSpaceId')
def set_CustSpaceId(self, CustSpaceId): # String
self.add_body_params('CustSpaceId', CustSpaceId)
def get_To(self): # String
return self.get_body_params().get('To')
def set_To(self, To): # String
self.add_body_params('To', To)
def get_TemplateCode(self): # String
return self.get_body_params().get('TemplateCode')
def set_TemplateCode(self, TemplateCode): # String
self.add_body_params('TemplateCode', TemplateCode) | null |
751 | import pytest
from lhotse.testing.dummies import dummy_cut, dummy_multi_cut, dummy_supervision
# Note: dummy_cut, dummy_multi_cut, and dummy_supervision have a duration of 1.0 by default.
@pytest.mark.parametrize(
"cut",
[
# MonoCut with single supervision
dummy_cut(0, supervisions=[dummy_supervision(0)]),
# MultiCut with single supervision
dummy_multi_cut(0, supervisions=[dummy_supervision(0)]),
],
)
def test_cut_fill_supervision_identity(cut):
fcut = cut.fill_supervision()
assert cut == fcut
@pytest.mark.parametrize(
"cut",
[
# MonoCut with single supervision
dummy_cut(0, supervisions=[dummy_supervision(0)]),
# MultiCut with single supervision
dummy_multi_cut(0, supervisions=[dummy_supervision(0)]),
],
)
def test_cut_fill_supervision_expand(cut):
cut.duration = 7.51
fcut = cut.fill_supervision()
# Original is not modified
assert cut.supervisions[0].start == 0
assert cut.supervisions[0].duration == 1
# Result is modified
assert fcut.supervisions[0].start == 0
assert fcut.supervisions[0].duration == 7.51
@pytest.mark.parametrize(
"cut",
[
# MonoCut with single supervision
dummy_cut(0, supervisions=[dummy_supervision(0)]),
# MultiCut with single supervision
dummy_multi_cut(0, supervisions=[dummy_supervision(0)]),
],
)
def test_cut_fill_supervision_shrink(cut):
cut.duration = 0.5
fcut = cut.fill_supervision(shrink_ok=True)
# Original is not modified
assert cut.supervisions[0].start == 0
assert cut.supervisions[0].duration == 1
# Result is modified
assert fcut.supervisions[0].start == 0
assert fcut.supervisions[0].duration == 0.5
@pytest.mark.parametrize(
"cut",
[
# MonoCut with single supervision
dummy_cut(0, supervisions=[dummy_supervision(0)]),
# MultiCut with single supervision
dummy_multi_cut(0, supervisions=[dummy_supervision(0)]),
],
)
def METHOD_NAME(cut):
cut.duration = 0.5
with pytest.raises(ValueError):
fcut = cut.fill_supervision()
@pytest.mark.parametrize(
"cut",
[
# MonoCut with no supervision
dummy_cut(0, supervisions=[]),
# MultiCut with no supervision
dummy_multi_cut(0, supervisions=[]),
],
)
def test_cut_fill_supervision_add_empty_true(cut):
fcut = cut.fill_supervision()
# Original is not modified
assert len(cut.supervisions) == 0
# Result is modified
assert fcut.supervisions[0].start == 0
assert fcut.supervisions[0].duration == 1
@pytest.mark.parametrize(
"cut",
[
# MonoCut with no supervision
dummy_cut(0, supervisions=[]),
# MultiCut with no supervision
dummy_multi_cut(0, supervisions=[]),
],
)
def test_cut_fill_supervision_add_empty_false(cut):
fcut = cut.fill_supervision(add_empty=False)
assert cut == fcut
def test_mono_cut_fill_supervision_raises_on_two_supervisions():
cut = dummy_cut(0, supervisions=[dummy_supervision(0), dummy_supervision(1)])
with pytest.raises(AssertionError):
fcut = cut.fill_supervision()
def test_multi_cut_fill_supervision_raises_on_two_supervisions():
cut = dummy_multi_cut(0, supervisions=[dummy_supervision(0), dummy_supervision(1)])
with pytest.raises(AssertionError):
fcut = cut.fill_supervision()
def test_mixed_cut_fill_supervision_identity():
cut = dummy_cut(0, supervisions=[dummy_supervision(0)])
cut = cut.mix(dummy_cut(1)) # cuts are 100% overlapping
fcut = cut.fill_supervision()
assert cut == fcut
def test_mixed_cut_fill_supervision_expand():
cut = dummy_cut(0, supervisions=[dummy_supervision(0)])
cut = cut.pad(duration=7.51)
fcut = cut.fill_supervision()
# Original is not modified
assert cut.supervisions[0].start == 0
assert cut.supervisions[0].duration == 1
# Result is modified
assert fcut.supervisions[0].start == 0
assert fcut.supervisions[0].duration == 7.51
def test_mixed_cut_fill_supervision_shrink():
cut = dummy_cut(0, supervisions=[dummy_supervision(0)])
cut = cut.mix(dummy_cut(1)).truncate(duration=0.5) # cuts are 100% overlapping
fcut = cut.fill_supervision(shrink_ok=True)
# Original is not modified
assert cut.supervisions[0].start == 0
assert cut.supervisions[0].duration == 1
# Result is modified
assert fcut.supervisions[0].start == 0
assert fcut.supervisions[0].duration == 0.5
def test_mixed_cut_fill_supervision_shrink_raises_default():
cut = dummy_cut(0, supervisions=[dummy_supervision(0)])
cut = cut.mix(dummy_cut(1)).truncate(duration=0.5) # cuts are 100% overlapping
with pytest.raises(ValueError):
fcut = cut.fill_supervision()
def test_mixed_cut_fill_supervision_add_empty_true():
cut = dummy_cut(0)
cut = cut.pad(duration=10)
fcut = cut.fill_supervision()
# Original is not modified
assert len(cut.supervisions) == 0
# Result is modified
assert fcut.supervisions[0].start == 0
assert fcut.supervisions[0].duration == 10
def test_mixed_cut_fill_supervision_add_empty_false():
cut = dummy_cut(0)
cut = cut.pad(duration=10)
fcut = cut.fill_supervision(add_empty=False)
assert cut == fcut
def test_mixed_cut_fill_supervision_raises_on_two_supervisions():
cut = dummy_cut(0, supervisions=[dummy_supervision(0), dummy_supervision(1)])
cut = cut.pad(duration=10)
with pytest.raises(AssertionError):
fcut = cut.fill_supervision() | null |
752 | #
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test extensions functionality."""
import os
import sys
from typing import Type
from google.protobuf import message
from absl.testing import absltest
from proto.google.fhir.proto.r4 import fhirproto_extensions_pb2
from proto.google.fhir.proto.r4 import ml_extensions_pb2
from proto.google.fhir.proto.r4.core import datatypes_pb2
from proto.google.fhir.proto.r4.core import extensions_pb2
from proto.google.fhir.proto.r4.core.resources import patient_pb2
from google.fhir.core import extensions
from google.fhir.core import extensions_test
try:
from testdata.r4.profiles import test_extensions_pb2
except ImportError:
# TODO(b/173534909): Add test protos to PYTHONPATH during dist testing.
pass # Fall through
_EXTENSIONS_DIR = os.path.join('testdata', 'r4', 'extensions')
class ExtensionsTest(extensions_test.ExtensionsTest):
"""Tests functionality provided by the extensions module."""
@property
def extension_cls(self) -> Type[message.Message]:
return datatypes_pb2.Extension
@property
def testdata_dir(self) -> str:
return _EXTENSIONS_DIR
def METHOD_NAME(self):
"""Tests get_fhir_extensions returns an empty list with no extensions."""
patient = patient_pb2.Patient()
self.assertEmpty(extensions.get_fhir_extensions(patient))
def test_get_fhir_extensions_with_extensions_returns_list(self):
"""Tests get_fhir_extensions returns a non-empty list with extensions."""
patient = patient_pb2.Patient()
patient.extension.add(
url=datatypes_pb2.Uri(value='abcd'),
value=datatypes_pb2.Extension.ValueX(
boolean=datatypes_pb2.Boolean(value=True)
),
)
self.assertLen(extensions.get_fhir_extensions(patient), 1)
def test_clear_fhir_extensions_with_multiple_extensions_succeeds(self):
"""Tests ClearFhirExtensions when a message has multiple extensions."""
arbitrary_string = datatypes_pb2.String()
arbitrary_string.extension.add(
url=datatypes_pb2.Uri(value='first'),
value=datatypes_pb2.Extension.ValueX(
boolean=datatypes_pb2.Boolean(value=True)
),
)
arbitrary_string.extension.add(
url=datatypes_pb2.Uri(value='second'),
value=datatypes_pb2.Extension.ValueX(
boolean=datatypes_pb2.Boolean(value=True)
),
)
arbitrary_string.extension.add(
url=datatypes_pb2.Uri(value='third'),
value=datatypes_pb2.Extension.ValueX(
boolean=datatypes_pb2.Boolean(value=True)
),
)
self.assertLen(extensions.get_fhir_extensions(arbitrary_string), 3)
# Remove middle extension
extensions.clear_fhir_extensions_with_url(arbitrary_string, 'second')
remaining_extensions = extensions.get_fhir_extensions(arbitrary_string)
self.assertLen(remaining_extensions, 2)
remaining_urls = [
extension.url.value
for extension in remaining_extensions
if isinstance(extension, datatypes_pb2.Extension)
]
self.assertEqual(remaining_urls, ['first', 'third'])
def test_extension_to_message_with_event_trigger_succeeds(self):
self.assert_extension_to_message_equals_golden(
'trigger', ml_extensions_pb2.EventTrigger
)
def test_message_to_extension_with_event_trigger_succeeds(self):
self.assert_message_to_extension_equals_golden(
'trigger', ml_extensions_pb2.EventTrigger
)
def test_extension_to_message_with_event_label_succeeds(self):
self.assert_extension_to_message_equals_golden(
'label', ml_extensions_pb2.EventLabel
)
def test_message_to_extension_with_event_label_succeeds(self):
self.assert_message_to_extension_equals_golden(
'label', ml_extensions_pb2.EventLabel
)
def test_extension_to_message_with_primitive_has_no_value_succeeds(self):
self.assert_extension_to_message_equals_golden(
'primitive_has_no_value', fhirproto_extensions_pb2.PrimitiveHasNoValue
)
def test_message_to_extension_with_primitive_has_no_value_succeeds(self):
self.assert_message_to_extension_equals_golden(
'primitive_has_no_value', fhirproto_extensions_pb2.PrimitiveHasNoValue
)
def test_extension_to_message_with_empty_primitive_has_no_value_succeeds(
self,
):
self.assert_extension_to_message_equals_golden(
'empty', fhirproto_extensions_pb2.PrimitiveHasNoValue
)
def test_message_to_extension_with_empty_primitive_has_no_value_succeeds(
self,
):
self.assert_message_to_extension_equals_golden(
'empty', fhirproto_extensions_pb2.PrimitiveHasNoValue
)
def test_extension_to_message_with_capability_statement_search_parameter_combination_succeeds(
self,
):
self.assert_extension_to_message_equals_golden(
'capability',
extensions_pb2.CapabilityStatementSearchParameterCombination,
)
def test_message_to_extension_with_capability_statement_search_parameter_combination_succeeds(
self,
):
self.assert_message_to_extension_equals_golden(
'capability',
extensions_pb2.CapabilityStatementSearchParameterCombination,
)
@absltest.skipIf(
'testdata' not in sys.modules,
'google-fhir package does not build+install tertiary testdata protos.',
)
def test_extension_to_message_with_digital_media_type_succeeds(self):
self.assert_extension_to_message_equals_golden(
'digital_media_type', test_extensions_pb2.DigitalMediaType
)
@absltest.skipIf(
'testdata' not in sys.modules,
'google-fhir package does not build+install tertiary testdata protos.',
)
def test_message_to_extension_with_digital_media_type_succeeds(self):
self.assert_message_to_extension_equals_golden(
'digital_media_type', test_extensions_pb2.DigitalMediaType
)
if __name__ == '__main__':
absltest.main() | null |
753 | ## @file
# This file is used to define the FMMT dependent external tool management class.
#
# Copyright (c) 2021-, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
##
import glob
import logging
import os
import shutil
import sys
import tempfile
import uuid
from FirmwareStorageFormat.Common import *
from utils.FmmtLogger import FmmtLogger as logger
import subprocess
def ExecuteCommand(cmd: list) -> None:
subprocess.run(cmd,stdout=subprocess.DEVNULL)
class GUIDTool:
def __init__(self, guid: str, short_name: str, command: str) -> None:
self.guid: str = guid
self.short_name: str = short_name
self.command: str = command
self.ifexist: bool = False
def pack(self, buffer: bytes) -> bytes:
"""
compress file.
"""
tool = self.command
if tool:
tmp = tempfile.mkdtemp(dir=os.environ.get('tmp'))
ToolInputFile = os.path.join(tmp, "pack_uncompress_sec_file")
ToolOuputFile = os.path.join(tmp, "pack_sec_file")
try:
file = open(ToolInputFile, "wb")
file.write(buffer)
file.close()
command = [tool, '-e', '-o', ToolOuputFile,
ToolInputFile]
ExecuteCommand(command)
buf = open(ToolOuputFile, "rb")
res_buffer = buf.read()
except Exception as msg:
logger.error(msg)
return ""
else:
buf.close()
if os.path.exists(tmp):
shutil.rmtree(tmp)
return res_buffer
else:
logger.error(
"Error parsing section: EFI_SECTION_GUID_DEFINED cannot be parsed at this time.")
logger.info("Its GUID is: %s" % self.guid)
return ""
def unpack(self, buffer: bytes) -> bytes:
"""
buffer: remove common header
uncompress file
"""
tool = self.command
if tool:
tmp = tempfile.mkdtemp(dir=os.environ.get('tmp'))
ToolInputFile = os.path.join(tmp, "unpack_sec_file")
ToolOuputFile = os.path.join(tmp, "unpack_uncompress_sec_file")
try:
file = open(ToolInputFile, "wb")
file.write(buffer)
file.close()
command = [tool, '-d', '-o', ToolOuputFile, ToolInputFile]
ExecuteCommand(command)
buf = open(ToolOuputFile, "rb")
res_buffer = buf.read()
except Exception as msg:
logger.error(msg)
return ""
else:
buf.close()
if os.path.exists(tmp):
shutil.rmtree(tmp)
return res_buffer
else:
logger.error("Error parsing section: EFI_SECTION_GUID_DEFINED cannot be parsed at this time.")
logger.info("Its GUID is: %s" % self.guid)
return ""
class GUIDTools:
'''
GUIDTools is responsible for reading FMMTConfig.ini, verify the tools and provide interfaces to access those tools.
'''
default_tools = {
struct2stream(ModifyGuidFormat("a31280ad-481e-41b6-95e8-127f4c984779")): GUIDTool("a31280ad-481e-41b6-95e8-127f4c984779", "TIANO", "TianoCompress"),
struct2stream(ModifyGuidFormat("ee4e5898-3914-4259-9d6e-dc7bd79403cf")): GUIDTool("ee4e5898-3914-4259-9d6e-dc7bd79403cf", "LZMA", "LzmaCompress"),
struct2stream(ModifyGuidFormat("fc1bcdb0-7d31-49aa-936a-a4600d9dd083")): GUIDTool("fc1bcdb0-7d31-49aa-936a-a4600d9dd083", "CRC32", "GenCrc32"),
struct2stream(ModifyGuidFormat("d42ae6bd-1352-4bfb-909a-ca72a6eae889")): GUIDTool("d42ae6bd-1352-4bfb-909a-ca72a6eae889", "LZMAF86", "LzmaF86Compress"),
struct2stream(ModifyGuidFormat("3d532050-5cda-4fd0-879e-0f7f630d5afb")): GUIDTool("3d532050-5cda-4fd0-879e-0f7f630d5afb", "BROTLI", "BrotliCompress"),
}
def __init__(self, tooldef_file: str=None) -> None:
self.dir = os.path.join(os.path.dirname(__file__), "..")
self.tooldef_file = tooldef_file if tooldef_file else os.path.join(self.dir, "FmmtConf.ini")
self.tooldef = dict()
def METHOD_NAME(self) -> None:
if os.environ['FmmtConfPath']:
self.tooldef_file = os.path.join(os.environ['FmmtConfPath'], 'FmmtConf.ini')
else:
PathList = os.environ['PATH']
for CurrentPath in PathList:
if os.path.exists(os.path.join(CurrentPath, 'FmmtConf.ini')):
self.tooldef_file = os.path.join(CurrentPath, 'FmmtConf.ini')
break
def VerifyTools(self, guidtool) -> None:
"""
Verify Tools and Update Tools path.
"""
path_env = os.environ.get("PATH")
path_env_list = path_env.split(os.pathsep)
path_env_list.append(os.path.dirname(__file__))
path_env_list = list(set(path_env_list))
cmd = guidtool.command
if os.path.isabs(cmd):
if not os.path.exists(cmd):
if guidtool not in self.default_tools:
logger.error("Tool Not found %s, which causes compress/uncompress process error." % cmd)
logger.error("Please goto edk2 repo in current console, run 'edksetup.bat rebuild' command, and try again.\n")
else:
logger.error("Tool Not found %s, which causes compress/uncompress process error." % cmd)
else:
guidtool.ifexist = True
else:
for syspath in path_env_list:
if glob.glob(os.path.join(syspath, cmd+"*")):
guidtool.ifexist = True
break
else:
if guidtool not in self.default_tools:
logger.error("Tool Not found %s, which causes compress/uncompress process error." % cmd)
logger.error("Please goto edk2 repo in current console, run 'edksetup.bat rebuild' command, and try again.\n")
else:
logger.error("Tool Not found %s, which causes compress/uncompress process error." % cmd)
def LoadingTools(self) -> None:
self.METHOD_NAME()
if os.path.exists(self.tooldef_file):
with open(self.tooldef_file, "r") as fd:
config_data = fd.readlines()
for line in config_data:
try:
if not line.startswith("#"):
guid, short_name, command = line.split()
new_format_guid = struct2stream(ModifyGuidFormat(guid.strip()))
self.tooldef[new_format_guid] = GUIDTool(
guid.strip(), short_name.strip(), command.strip())
except:
logger.error("GuidTool load error!")
continue
else:
self.tooldef.update(self.default_tools)
def __getitem__(self, guid):
if not self.tooldef:
self.LoadingTools()
guid_tool = self.tooldef.get(guid)
if guid_tool:
self.VerifyTools(guid_tool)
return guid_tool
else:
logger.error("{} GuidTool is not defined!".format(guid))
raise Exception("Process Failed: is not defined!")
guidtools = GUIDTools()
| null |
754 | # -*- coding: utf-8 -*-
"""
This documentation accompanies the video tutorial: `youtube link <https://youtu.be/imQWZ0HhYjk>`_
##############################################################################
.. _lightly-tutorial-pizza-filter:
Tutorial 1: Curate Pizza Images
===============================
.. warning::
**Tutorial is outdated**
This tutorial uses a deprecated workflow of the Lightly Solution and will be removed in the future.
Please refer to the `new documentation and tutorials <https://docs.lightly.ai>`_ instead.
In this tutorial, you will learn how to upload a dataset to the Lightly platform,
curate the data, and finally use the curated data to train a model.
What you will learn
-------------------
* Create and upload a new dataset
* Curate a dataset using simple image metrics such as Width, Height, Sharpness, Signal-to-Noise ratio, File Size
* Download images based on a tag from a dataset
* Train an image classifier with the filtered dataset
Requirements
------------
You can use your dataset or use the one we provide with this tutorial:
:download:`pizzas.zip <../../../_data/pizzas.zip>`.
If you use your dataset, please make sure the images are smaller than
2048 pixels with width and height, and you use less than 1000 images.
.. note:: For this tutorial, we provide you with a small dataset of pizza images.
We chose a small dataset because it's easy to ship and train.
Upload the data
---------------
We start by uploading the dataset to the `Lightly Platform <https://app.lightly.ai>`_.
Create a new account if you do not have one yet.
Go to your user Preferences and copy your API token.
Now install lightly if you haven't already, and upload your dataset.
.. code-block:: console
# install Lightly
pip3 install lightly
# upload your DATA directory
lightly-upload token=MY_TOKEN new_dataset_name='NEW_DATASET_NAME' input_dir='DATA/'
Filter the dataset using metadata
---------------------------------
Once the dataset is created and the
images uploaded, you can head to 'Metadata' under the 'Analyze & Filter' menu.
Move the sliders below the histograms to define filter rules for the dataset.
Once you are satisfied with the filtered dataset, create a new tag using the tag menu
on the left side.
Download the curated dataset
----------------------------
We have filtered the dataset and want to download it now to train a model.
Therefore, click on the download menu on the left.
We can now download the filtered images by clicking on the 'DOWNLOAD IMAGES' button.
In our case, the images are stored in the 'pizzas' folder. We now have to
annotate the images. We can do this by moving the individual images to
subfolders corresponding to the class. E.g. we move salami pizza images to the
'salami' folder and Margherita pizza images to the 'margherita' folder.
##############################################################################
Training a model using the curated data
---------------------------------------
"""
# %%
# Now we can start training our model using PyTorch Lightning
# We start by importing the necessary dependencies
import os
import pytorch_lightning as pl
import torch
import torchmetrics
from torch.utils.data import DataLoader, random_split
from torchvision import transforms
from torchvision.datasets import ImageFolder
from torchvision.models import resnet18
# %%
# We use a small batch size to make sure we can run the training on all kinds
# of machines. Feel free to adjust the value to one that works on your machine.
batch_size = 8
seed = 42
# %%
# Set the seed to make the experiment reproducible
pl.seed_everything(seed)
# %%
# Let's set up the augmentations for the train and the test data.
train_transform = transforms.Compose(
[
transforms.RandomResizedCrop((224, 224), scale=(0.7, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
# we don't do any resizing or mirroring for the test data
test_transform = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
# %%
# We load our data and split it into train/test with a 70/30 ratio.
# Please make sure the data folder contains subfolders for each class
#
# pizzas
# L salami
# L margherita
dset = ImageFolder("pizzas", transform=train_transform)
# to use the random_split method we need to obtain the length
# of the train and test set
full_len = len(dset)
train_len = int(full_len * 0.7)
test_len = int(full_len - train_len)
dataset_train, dataset_test = random_split(dset, [train_len, test_len])
dataset_test.transforms = test_transform
print("Training set consists of {} images".format(len(dataset_train)))
print("Test set consists of {} images".format(len(dataset_test)))
# %%
# We can create our data loaders to fetch the data from the training and test
# set and pack them into batches.
dataloader_train = DataLoader(dataset_train, batch_size=batch_size, shuffle=True)
dataloader_test = DataLoader(dataset_test, batch_size=batch_size)
# %%
# PyTorch Lightning allows us to pack the loss as well as the
# optimizer into a single module.
class MyModel(pl.LightningModule):
def __init__(self, num_classes=2):
super().__init__()
self.save_hyperparameters()
# load a pretrained resnet from torchvision
self.model = resnet18(pretrained=True)
# add new linear output layer (transfer learning)
num_ftrs = self.model.fc.in_features
self.model.fc = torch.nn.Linear(num_ftrs, 2)
self.accuracy = torchmetrics.Accuracy()
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = torch.nn.functional.cross_entropy(y_hat, y)
self.log("train_loss", loss, prog_bar=True)
return loss
def METHOD_NAME(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = torch.nn.functional.cross_entropy(y_hat, y)
y_hat = torch.nn.functional.softmax(y_hat, dim=1)
self.accuracy(y_hat, y)
self.log("val_loss", loss, on_epoch=True, prog_bar=True)
self.log("val_acc", self.accuracy.compute(), on_epoch=True, prog_bar=True)
def configure_optimizers(self):
return torch.optim.SGD(self.model.fc.parameters(), lr=0.001, momentum=0.9)
# %%
# Finally, we can create the model and use the Trainer
# to train our model.
model = MyModel()
trainer = pl.Trainer(max_epochs=4, devices=1)
trainer.fit(model, dataloader_train, dataloader_test) | null |
755 | from boa3_test.tests.boa_test import BoaTest # needs to be the first import to avoid circular imports
from boa3.internal import constants
from boa3.internal.exception import CompilerError
from boa3.internal.neo.vm.opcode.Opcode import Opcode
from boa3.internal.neo.vm.type.Integer import Integer
from boa3.internal.neo.vm.type.String import String
from boa3.internal.neo3.vm import VMState
from boa3_test.test_drive.testrunner.neo_test_runner import NeoTestRunner
class TestNativeContracts(BoaTest):
default_folder: str = 'test_sc/native_test/oracle'
ORACLE_CONTRACT_NAME = 'OracleContract'
def test_get_hash(self):
path, _ = self.get_deploy_file_paths('GetHash.py')
runner = NeoTestRunner(runner_id=self.method_name())
invokes = []
expected_results = []
invokes.append(runner.call_contract(path, 'main'))
expected_results.append(constants.ORACLE_SCRIPT)
runner.execute()
self.assertEqual(VMState.HALT, runner.vm_state, msg=runner.error)
for x in range(len(invokes)):
self.assertEqual(expected_results[x], invokes[x].result)
def test_oracle_request(self):
path, _ = self.get_deploy_file_paths('OracleRequestCall.py')
runner = NeoTestRunner(runner_id=self.method_name())
invokes = []
expected_results = []
test_url = 'abc'
request_filter = 'ABC'
callback = '123'
gas_for_response = 1_0000000
oracle_invoke = runner.call_contract(path, 'oracle_call',
test_url, request_filter, callback, None, gas_for_response)
invokes.append(oracle_invoke)
expected_results.append(None)
runner.execute(clear_invokes=False)
self.assertEqual(VMState.HALT, runner.vm_state, msg=runner.error)
contract_script = oracle_invoke.invoke.contract.script_hash
oracle_requests = runner.get_events('OracleRequest', constants.ORACLE_SCRIPT)
self.assertEqual(1, len(oracle_requests))
self.assertEqual(4, len(oracle_requests[0].arguments))
self.assertEqual(contract_script, oracle_requests[0].arguments[1])
self.assertEqual(test_url, oracle_requests[0].arguments[2])
self.assertEqual(request_filter, oracle_requests[0].arguments[3])
test_url = 'abc'
request_filter = 'ABC'
callback = 'test_callback'
gas_for_response = 1_0000000
invokes.append(runner.call_contract(path, 'oracle_call',
test_url, request_filter, callback, None, gas_for_response))
expected_results.append(None)
runner.execute()
self.assertEqual(VMState.HALT, runner.vm_state, msg=runner.error)
oracle_requests = runner.get_events('OracleRequest', constants.ORACLE_SCRIPT)
self.assertEqual(2, len(oracle_requests))
self.assertEqual(4, len(oracle_requests[1].arguments))
for x in range(len(invokes)):
self.assertEqual(expected_results[x], invokes[x].result)
def test_oracle_request_url_mismatched_type(self):
path = self.get_contract_path('OracleRequestUrlMismatchedType.py')
self.assertCompilerLogs(CompilerError.MismatchedTypes, path)
def test_oracle_request_filter_mismatched_type(self):
path = self.get_contract_path('OracleRequestFilterMismatchedType.py')
self.assertCompilerLogs(CompilerError.MismatchedTypes, path)
def test_oracle_request_callback_mismatched_type(self):
path = self.get_contract_path('OracleRequestCallCallbackMismatchedType.py')
self.assertCompilerLogs(CompilerError.MismatchedTypes, path)
def test_oracle_request_gas_mismatched_type(self):
path = self.get_contract_path('OracleRequestGasMismatchedType.py')
self.assertCompilerLogs(CompilerError.MismatchedTypes, path)
def test_import_interop_oracle(self):
path, _ = self.get_deploy_file_paths('ImportOracle.py')
runner = NeoTestRunner(runner_id=self.method_name())
invokes = []
expected_results = []
test_url = 'abc'
request_filter = 'ABC'
callback = '123'
gas_for_response = 1_0000000
oracle_invoke = runner.call_contract(path, 'oracle_call',
test_url, request_filter, callback, None, gas_for_response)
invokes.append(oracle_invoke)
expected_results.append(None)
runner.execute()
self.assertEqual(VMState.HALT, runner.vm_state, msg=runner.error)
contract_script = oracle_invoke.invoke.contract.script_hash
oracle_requests = runner.get_events('OracleRequest', constants.ORACLE_SCRIPT)
self.assertEqual(1, len(oracle_requests))
self.assertEqual(4, len(oracle_requests[0].arguments))
self.assertEqual(contract_script, oracle_requests[0].arguments[1])
self.assertEqual(test_url, oracle_requests[0].arguments[2])
self.assertEqual(request_filter, oracle_requests[0].arguments[3])
for x in range(len(invokes)):
self.assertEqual(expected_results[x], invokes[x].result)
def test_import_interop_oracle_package(self):
path, _ = self.get_deploy_file_paths('ImportInteropOracle.py')
runner = NeoTestRunner(runner_id=self.method_name())
invokes = []
expected_results = []
test_url = 'abc'
request_filter = 'ABC'
callback = '123'
gas_for_response = 1_0000000
oracle_invoke = runner.call_contract(path, 'oracle_call',
test_url, request_filter, callback, None, gas_for_response)
invokes.append(oracle_invoke)
expected_results.append(None)
runner.execute()
self.assertEqual(VMState.HALT, runner.vm_state, msg=runner.error)
contract_script = oracle_invoke.invoke.contract.script_hash
oracle_requests = runner.get_events('OracleRequest', constants.ORACLE_SCRIPT)
self.assertEqual(1, len(oracle_requests))
self.assertEqual(4, len(oracle_requests[0].arguments))
self.assertEqual(contract_script, oracle_requests[0].arguments[1])
self.assertEqual(test_url, oracle_requests[0].arguments[2])
self.assertEqual(request_filter, oracle_requests[0].arguments[3])
for x in range(len(invokes)):
self.assertEqual(expected_results[x], invokes[x].result)
def METHOD_NAME(self):
from boa3.internal.neo3.contracts import CallFlags
from boa3.internal.model.builtin.interop.oracle.oraclegetpricemethod import OracleGetPriceMethod
call_flags = Integer(CallFlags.ALL).to_byte_array(signed=True, min_length=1)
method = String(OracleGetPriceMethod().method_name).to_bytes()
expected_output = (
Opcode.CALLT + b'\x00\x00'
+ Opcode.RET
)
path = self.get_contract_path('OracleGetPrice.py')
output, manifest = self.compile_and_save(path)
self.assertEqual(expected_output, output)
path, _ = self.get_deploy_file_paths(path)
runner = NeoTestRunner(runner_id=self.method_name())
invoke = runner.call_contract(path, 'main')
runner.execute()
self.assertEqual(VMState.HALT, runner.vm_state, msg=runner.error)
self.assertIsInstance(invoke.result, int) | null |
756 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkservicemesh.endpoint import endpoint_data
class CreateServiceMeshRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'servicemesh', '2020-01-11', 'CreateServiceMesh','servicemesh')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProxyRequestCPU(self):
return self.get_body_params().get('ProxyRequestCPU')
def set_ProxyRequestCPU(self,ProxyRequestCPU):
self.add_body_params('ProxyRequestCPU', ProxyRequestCPU)
def get_OPALimitCPU(self):
return self.get_body_params().get('OPALimitCPU')
def set_OPALimitCPU(self,OPALimitCPU):
self.add_body_params('OPALimitCPU', OPALimitCPU)
def get_OpenAgentPolicy(self):
return self.get_body_params().get('OpenAgentPolicy')
def set_OpenAgentPolicy(self,OpenAgentPolicy):
self.add_body_params('OpenAgentPolicy', OpenAgentPolicy)
def get_OpaEnabled(self):
return self.get_body_params().get('OpaEnabled')
def set_OpaEnabled(self,OpaEnabled):
self.add_body_params('OpaEnabled', OpaEnabled)
def get_ProxyLimitMemory(self):
return self.get_body_params().get('ProxyLimitMemory')
def set_ProxyLimitMemory(self,ProxyLimitMemory):
self.add_body_params('ProxyLimitMemory', ProxyLimitMemory)
def get_OPALogLevel(self):
return self.get_body_params().get('OPALogLevel')
def set_OPALogLevel(self,OPALogLevel):
self.add_body_params('OPALogLevel', OPALogLevel)
def get_ExcludeIPRanges(self):
return self.get_body_params().get('ExcludeIPRanges')
def set_ExcludeIPRanges(self,ExcludeIPRanges):
self.add_body_params('ExcludeIPRanges', ExcludeIPRanges)
def get_IstioVersion(self):
return self.get_body_params().get('IstioVersion')
def set_IstioVersion(self,IstioVersion):
self.add_body_params('IstioVersion', IstioVersion)
def get_Tracing(self):
return self.get_body_params().get('Tracing')
def METHOD_NAME(self,Tracing):
self.add_body_params('Tracing', Tracing)
def get_IncludeIPRanges(self):
return self.get_body_params().get('IncludeIPRanges')
def set_IncludeIPRanges(self,IncludeIPRanges):
self.add_body_params('IncludeIPRanges', IncludeIPRanges)
def get_ExcludeInboundPorts(self):
return self.get_body_params().get('ExcludeInboundPorts')
def set_ExcludeInboundPorts(self,ExcludeInboundPorts):
self.add_body_params('ExcludeInboundPorts', ExcludeInboundPorts)
def get_OPALimitMemory(self):
return self.get_body_params().get('OPALimitMemory')
def set_OPALimitMemory(self,OPALimitMemory):
self.add_body_params('OPALimitMemory', OPALimitMemory)
def get_VSwitches(self):
return self.get_body_params().get('VSwitches')
def set_VSwitches(self,VSwitches):
self.add_body_params('VSwitches', VSwitches)
def get_ProxyLimitCPU(self):
return self.get_body_params().get('ProxyLimitCPU')
def set_ProxyLimitCPU(self,ProxyLimitCPU):
self.add_body_params('ProxyLimitCPU', ProxyLimitCPU)
def get_ProxyRequestMemory(self):
return self.get_body_params().get('ProxyRequestMemory')
def set_ProxyRequestMemory(self,ProxyRequestMemory):
self.add_body_params('ProxyRequestMemory', ProxyRequestMemory)
def get_Name(self):
return self.get_body_params().get('Name')
def set_Name(self,Name):
self.add_body_params('Name', Name)
def get_Telemetry(self):
return self.get_body_params().get('Telemetry')
def set_Telemetry(self,Telemetry):
self.add_body_params('Telemetry', Telemetry)
def get_OPARequestCPU(self):
return self.get_body_params().get('OPARequestCPU')
def set_OPARequestCPU(self,OPARequestCPU):
self.add_body_params('OPARequestCPU', OPARequestCPU)
def get_OPARequestMemory(self):
return self.get_body_params().get('OPARequestMemory')
def set_OPARequestMemory(self,OPARequestMemory):
self.add_body_params('OPARequestMemory', OPARequestMemory)
def get_EnableAudit(self):
return self.get_body_params().get('EnableAudit')
def set_EnableAudit(self,EnableAudit):
self.add_body_params('EnableAudit', EnableAudit)
def get_LocalityLoadBalancing(self):
return self.get_body_params().get('LocalityLoadBalancing')
def set_LocalityLoadBalancing(self,LocalityLoadBalancing):
self.add_body_params('LocalityLoadBalancing', LocalityLoadBalancing)
def get_ApiServerPublicEip(self):
return self.get_body_params().get('ApiServerPublicEip')
def set_ApiServerPublicEip(self,ApiServerPublicEip):
self.add_body_params('ApiServerPublicEip', ApiServerPublicEip)
def get_TraceSampling(self):
return self.get_body_params().get('TraceSampling')
def set_TraceSampling(self,TraceSampling):
self.add_body_params('TraceSampling', TraceSampling)
def get_PilotPublicEip(self):
return self.get_body_params().get('PilotPublicEip')
def set_PilotPublicEip(self,PilotPublicEip):
self.add_body_params('PilotPublicEip', PilotPublicEip)
def get_AuditProject(self):
return self.get_body_params().get('AuditProject')
def set_AuditProject(self,AuditProject):
self.add_body_params('AuditProject', AuditProject)
def get_VpcId(self):
return self.get_body_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_body_params('VpcId', VpcId)
def get_ExcludeOutboundPorts(self):
return self.get_body_params().get('ExcludeOutboundPorts')
def set_ExcludeOutboundPorts(self,ExcludeOutboundPorts):
self.add_body_params('ExcludeOutboundPorts', ExcludeOutboundPorts | null |
757 | from galaxy.security import idencoding
test_helper_1 = idencoding.IdEncodingHelper(id_secret="secu1")
test_helper_2 = idencoding.IdEncodingHelper(id_secret="secu2")
def METHOD_NAME():
# Test that id secrets can be up to 56 characters long.
longest_id_secret = "m" * idencoding.MAXIMUM_ID_SECRET_LENGTH
helper = idencoding.IdEncodingHelper(id_secret=longest_id_secret)
helper.encode_id(1)
# Test that security helper will catch if the id secret is too long.
threw_exception = False
longer_id_secret = "m" * (idencoding.MAXIMUM_ID_SECRET_LENGTH + 1)
try:
idencoding.IdEncodingHelper(id_secret=longer_id_secret)
except Exception:
threw_exception = True
assert threw_exception
# Test that different kinds produce different keys even when id secret
# is very long.
e11 = helper.encode_id(1, kind="moo")
e12 = helper.encode_id(1, kind="moo2")
assert e11 != e12
# Test that long kinds are rejected because it uses up "too much" randomness
# from id_secret values. This isn't a strict requirement up but lets just enforce
# the best practice.
assertion_error_raised = False
try:
helper.encode_id(1, kind="this is a really long kind")
except AssertionError:
assertion_error_raised = True
assert assertion_error_raised
def test_maximum_length_handling_nonascii():
longest_id_secret = "◎◎◎◎◎◎◎◎◎◎◎◎◎◎◎◎◎◎"
helper = idencoding.IdEncodingHelper(id_secret=longest_id_secret)
helper.encode_id(1)
# Test that security helper will catch if the id secret is too long.
threw_exception = False
longer_id_secret = "◎◎◎◎◎◎◎◎◎◎◎◎◎◎◎◎◎◎◎"
try:
idencoding.IdEncodingHelper(id_secret=longer_id_secret)
except Exception:
threw_exception = True
assert threw_exception
# Test that different kinds produce different keys even when id secret
# is very long.
e11 = helper.encode_id(1, kind="moo")
e12 = helper.encode_id(1, kind="moo2")
assert e11 != e12
def test_unicode_null_decoding():
encoded_id = test_helper_1.encode_id(1)
threw_exception = False
try:
test_helper_1.decode_guid(f"{encoded_id[:-1]}\0")
except Exception:
threw_exception = True
assert threw_exception
def test_encode_decode():
# Different ids are encoded differently
assert test_helper_1.encode_id(1) != test_helper_1.encode_id(2)
# But decoding and encoded id brings back to original id
assert 1 == test_helper_1.decode_id(test_helper_1.encode_id(1))
def test_nested_encoding():
# Does nothing if not a dict
assert test_helper_1.encode_all_ids(1) == 1
# Encodes top-level things ending in _id
assert test_helper_1.encode_all_ids(dict(history_id=1))["history_id"] == test_helper_1.encode_id(1)
# ..except tool_id
assert test_helper_1.encode_all_ids(dict(tool_id=1))["tool_id"] == 1
# Encodes lists at top level is end in _ids
expected_ids = [test_helper_1.encode_id(1), test_helper_1.encode_id(2)]
assert test_helper_1.encode_all_ids(dict(history_ids=[1, 2]))["history_ids"] == expected_ids
# Encodes nested stuff if and only if recursive set to true.
nested_dict = dict(objects=dict(history_ids=[1, 2]))
assert test_helper_1.encode_all_ids(nested_dict)["objects"]["history_ids"] == [1, 2]
assert test_helper_1.encode_all_ids(nested_dict, recursive=False)["objects"]["history_ids"] == [1, 2]
assert test_helper_1.encode_all_ids(nested_dict, recursive=True)["objects"]["history_ids"] == expected_ids
def test_per_kind_encode_deocde():
# Different ids are encoded differently
assert test_helper_1.encode_id(1, kind="k1") != test_helper_1.encode_id(2, kind="k1")
# But decoding and encoded id brings back to original id
assert 1 == test_helper_1.decode_id(test_helper_1.encode_id(1, kind="k1"), kind="k1")
def test_different_secrets_encode_differently():
assert test_helper_1.encode_id(1) != test_helper_2.encode_id(1)
def test_per_kind_encodes_id_differently():
assert test_helper_1.encode_id(1) != test_helper_2.encode_id(1, kind="new_kind")
def test_encode_dict():
test_dict = dict(
id=1,
other=2,
history_id=3,
)
encoded_dict = test_helper_1.encode_dict_ids(test_dict)
assert encoded_dict["id"] == test_helper_1.encode_id(1)
assert encoded_dict["other"] == 2
assert encoded_dict["history_id"] == test_helper_1.encode_id(3)
def test_guid_generation():
guids = set()
for _ in range(100):
guids.add(test_helper_1.get_new_guid())
assert len(guids) == 100 # Not duplicate guids generated.
def test_encode_decode_guid():
session_key = test_helper_1.get_new_guid()
encoded_key = test_helper_1.encode_guid(session_key)
decoded_key = test_helper_1.decode_guid(encoded_key)
assert session_key == decoded_key, f"{session_key} != {decoded_key}" | null |
758 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class CreateVirtualBorderRouterRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'CreateVirtualBorderRouter','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_CircuitCode(self): # String
return self.get_query_params().get('CircuitCode')
def set_CircuitCode(self, CircuitCode): # String
self.add_query_param('CircuitCode', CircuitCode)
def get_VlanId(self): # Integer
return self.get_query_params().get('VlanId')
def set_VlanId(self, VlanId): # Integer
self.add_query_param('VlanId', VlanId)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_EnableIpv6(self): # Boolean
return self.get_query_params().get('EnableIpv6')
def set_EnableIpv6(self, EnableIpv6): # Boolean
self.add_query_param('EnableIpv6', EnableIpv6)
def get_Description(self): # String
return self.get_query_params().get('Description')
def METHOD_NAME(self, Description): # String
self.add_query_param('Description', Description)
def get_PeerGatewayIp(self): # String
return self.get_query_params().get('PeerGatewayIp')
def set_PeerGatewayIp(self, PeerGatewayIp): # String
self.add_query_param('PeerGatewayIp', PeerGatewayIp)
def get_PeerIpv6GatewayIp(self): # String
return self.get_query_params().get('PeerIpv6GatewayIp')
def set_PeerIpv6GatewayIp(self, PeerIpv6GatewayIp): # String
self.add_query_param('PeerIpv6GatewayIp', PeerIpv6GatewayIp)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_PeeringSubnetMask(self): # String
return self.get_query_params().get('PeeringSubnetMask')
def set_PeeringSubnetMask(self, PeeringSubnetMask): # String
self.add_query_param('PeeringSubnetMask', PeeringSubnetMask)
def get_LocalGatewayIp(self): # String
return self.get_query_params().get('LocalGatewayIp')
def set_LocalGatewayIp(self, LocalGatewayIp): # String
self.add_query_param('LocalGatewayIp', LocalGatewayIp)
def get_PeeringIpv6SubnetMask(self): # String
return self.get_query_params().get('PeeringIpv6SubnetMask')
def set_PeeringIpv6SubnetMask(self, PeeringIpv6SubnetMask): # String
self.add_query_param('PeeringIpv6SubnetMask', PeeringIpv6SubnetMask)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_Bandwidth(self): # Long
return self.get_query_params().get('Bandwidth')
def set_Bandwidth(self, Bandwidth): # Long
self.add_query_param('Bandwidth', Bandwidth)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_Tagss(self): # RepeatList
return self.get_query_params().get('Tags')
def set_Tagss(self, Tags): # RepeatList
for depth1 in range(len(Tags)):
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tags.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tags.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
def get_PhysicalConnectionId(self): # String
return self.get_query_params().get('PhysicalConnectionId')
def set_PhysicalConnectionId(self, PhysicalConnectionId): # String
self.add_query_param('PhysicalConnectionId', PhysicalConnectionId)
def get_LocalIpv6GatewayIp(self): # String
return self.get_query_params().get('LocalIpv6GatewayIp')
def set_LocalIpv6GatewayIp(self, LocalIpv6GatewayIp): # String
self.add_query_param('LocalIpv6GatewayIp', LocalIpv6GatewayIp)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_VbrOwnerId(self): # Long
return self.get_query_params().get('VbrOwnerId')
def set_VbrOwnerId(self, VbrOwnerId): # Long
self.add_query_param('VbrOwnerId', VbrOwnerId) | null |
759 | # -*- coding: utf-8 -*-
"""
debug.py - Functions to aid in debugging
Copyright 2010 Luke Campagnola
Distributed under MIT/X11 license. See license.txt for more information.
"""
from __future__ import print_function
from pyqtgraph.debug import *
import pyqtgraph.debug as pgdebug
from pyqtgraph.exceptionHandling import original_excepthook
LOG_UI = None
def __reload__(old):
# preserve old log window
global LOG_UI
LOG_UI = old["LOG_UI"]
def installExceptionHandler():
# install global exception handler for others to hook into.
import pyqtgraph.exceptionHandling as exceptionHandling
exceptionHandling.setTracebackClearing(True)
exceptionHandling.register(METHOD_NAME)
def createLogWindow(manager):
from .LogWindow import LogWindow
global LOG_UI
assert LOG_UI is None
LOG_UI = LogWindow(manager)
return LOG_UI
def printExc(msg="", indent=4, prefix="|", msgType="error"):
"""Print an error message followed by an indented exception backtrace
(This function is intended to be called within except: blocks)"""
pgdebug.printExc(msg, indent, prefix)
try:
import acq4.Manager
if hasattr(acq4, "Manager"):
acq4.Manager.logExc(msg=msg, msgType=msgType)
except Exception:
pgdebug.printExc(f"[failed to log this error to manager] {msgType}: {msg}")
def logMsg(msg, **kwargs):
"""msg: the text of the log message
msgTypes: user, status, error, warning (status is default)
importance: 0-9 (0 is low importance, 9 is high, 5 is default)
other supported keywords:
exception: a tuple (type, exception, traceback) as returned by sys.exc_info()
docs: a list of strings where documentation related to the message can be found
reasons: a list of reasons (as strings) for the message
traceback: a list of formatted callstack/traceback objects (formatting a traceback/callstack returns a list of strings), usually looks like [['line 1', 'line 2', 'line3'], ['line1', 'line2']]
Feel free to add your own keyword arguments. These will be saved in the log.txt file, but will not affect the content or way that messages are displayed.
"""
global LOG_UI
if LOG_UI is not None:
try:
LOG_UI.logMsg(msg, **kwargs)
except:
print("Error logging message:")
print(" " + "\n ".join(msg.split("\n")))
print(" " + str(kwargs))
sys.excepthook(*sys.exc_info())
else:
print("Can't log message; no log created yet.")
# print args
print(kwargs)
def logExc(msg, *args, **kwargs):
"""Calls logMsg, but adds in the current exception and callstack. Must be called within an except block, and should only be called if the exception is not re-raised. Unhandled exceptions, or exceptions that reach the top of the callstack are automatically logged, so logging an exception that will be re-raised can cause the exception to be logged twice. Takes the same arguments as logMsg."""
global LOG_UI
if LOG_UI is not None:
try:
LOG_UI.logExc(msg, *args, **kwargs)
except:
print("Error logging exception:")
print(" " + "\n ".join(msg.split("\n")))
print(" " + str(kwargs))
sys.excepthook(*sys.exc_info())
else:
print("Can't log error message; no log created yet.")
print(args)
print(kwargs)
blockLogging = False
def METHOD_NAME(*args):
# Called whenever there is an unhandled exception.
# unhandled exceptions generate an error message by default, but this
# can be overridden by raising HelpfulException(msgType='...')
global blockLogging
if not blockLogging:
# if an error occurs *while* trying to log another exception, disable any further logging to prevent recursion.
try:
blockLogging = True
logMsg("Unexpected error: ", exception=args, msgType="error")
except:
print("Error: Exception could no be logged.")
original_excepthook(*sys.exc_info())
finally:
blockLogging = False | null |
760 | from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
else:
return result
if not six.PY3 and isinstance(value, six.text_type): # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.METHOD_NAME(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value is not None:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def METHOD_NAME(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location | null |
761 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
from aliyunsdkedas.endpoint import endpoint_data
class GetServiceMethodPageRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'GetServiceMethodPage','Edas')
self.set_uri_pattern('/pop/sp/api/mseForOam/getServiceMethodPage')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_registryType(self): # String
return self.get_query_params().get('registryType')
def set_registryType(self, registryType): # String
self.add_query_param('registryType', registryType)
def get_origin(self): # String
return self.get_query_params().get('origin')
def set_origin(self, origin): # String
self.add_query_param('origin', origin)
def get_ip(self): # String
return self.get_query_params().get('ip')
def set_ip(self, ip): # String
self.add_query_param('ip', ip)
def get_source(self): # String
return self.get_query_params().get('source')
def set_source(self, source): # String
self.add_query_param('source', source)
def get_pageNumber(self): # Integer
return self.get_query_params().get('pageNumber')
def set_pageNumber(self, pageNumber): # Integer
self.add_query_param('pageNumber', pageNumber)
def get_path(self): # String
return self.get_query_params().get('path')
def set_path(self, path): # String
self.add_query_param('path', path)
def get_serviceType(self): # String
return self.get_query_params().get('serviceType')
def set_serviceType(self, serviceType): # String
self.add_query_param('serviceType', serviceType)
def get_appId(self): # String
return self.get_query_params().get('appId')
def set_appId(self, appId): # String
self.add_query_param('appId', appId)
def get_namespace(self): # String
return self.get_query_params().get('namespace')
def set_namespace(self, namespace): # String
self.add_query_param('namespace', namespace)
def METHOD_NAME(self): # String
return self.get_query_params().get('serviceVersion')
def set_serviceVersion(self, serviceVersion): # String
self.add_query_param('serviceVersion', serviceVersion)
def get_pageSize(self): # Integer
return self.get_query_params().get('pageSize')
def set_pageSize(self, pageSize): # Integer
self.add_query_param('pageSize', pageSize)
def get_name(self): # String
return self.get_query_params().get('name')
def set_name(self, name): # String
self.add_query_param('name', name)
def get_serviceName(self): # String
return self.get_query_params().get('serviceName')
def set_serviceName(self, serviceName): # String
self.add_query_param('serviceName', serviceName)
def get_region(self): # String
return self.get_query_params().get('region')
def set_region(self, region): # String
self.add_query_param('region', region)
def get_serviceId(self): # String
return self.get_query_params().get('serviceId')
def set_serviceId(self, serviceId): # String
self.add_query_param('serviceId', serviceId)
def get_methodController(self): # String
return self.get_query_params().get('methodController')
def set_methodController(self, methodController): # String
self.add_query_param('methodController', methodController)
def get_group(self): # String
return self.get_query_params().get('group')
def set_group(self, group): # String
self.add_query_param('group', group) | null |
762 | from __future__ import annotations
import os
import shutil
from dataclasses import dataclass
from mmap import mmap
from typing import Dict, Hashable, Sequence, Tuple, Union
import numpy as np
from meerkat.block.ref import BlockRef
from meerkat.columns.abstract import Column
from meerkat.errors import ConsolidationError
from meerkat.tools.lazy_loader import LazyLoader
from .abstract import AbstractBlock, BlockIndex, BlockView
torch = LazyLoader("torch")
class NumPyBlock(AbstractBlock):
@dataclass(eq=True, frozen=True)
class Signature:
dtype: np.dtype
nrows: int
shape: Tuple[int]
klass: type
mmap: Union[bool, int]
def __init__(self, data, *args, **kwargs):
super(NumPyBlock, self).__init__(*args, **kwargs)
if len(data.shape) <= 1:
raise ValueError(
"Cannot create a `NumpyBlock` from data with less than 2 axes."
)
self.data = data
@property
def signature(self) -> Hashable:
return self.Signature(
klass=NumPyBlock,
# don't want to consolidate any mmaped blocks
mmap=id(self) if isinstance(self.data, np.memmap) else False,
nrows=self.data.shape[0],
shape=self.data.shape[2:],
dtype=self.data.dtype,
)
def _get_data(self, index: BlockIndex, materialize: bool = True) -> np.ndarray:
return self.data[:, index]
@classmethod
def from_column_data(cls, data: np.ndarray) -> Tuple[NumPyBlock, BlockView]:
"""[summary]
Args:
data (np.ndarray): [description]
names (Sequence[str]): [description]
Raises:
ValueError: [description]
Returns:
Tuple[NumpyBlock, Mapping[str, BlockIndex]]: [description]
"""
if len(data.shape) == 1:
data = np.expand_dims(data, axis=1)
block_index = 0
elif data.shape[1] == 1:
block_index = slice(0, 1)
else:
block_index = slice(0, data.shape[1])
block = cls(data)
return BlockView(block=block, block_index=block_index)
@classmethod
def _consolidate(
cls,
block_refs: Sequence[BlockRef],
consolidated_inputs: Dict[int, "Column"] = None,
) -> BlockRef:
offset = 0
new_indices = {}
columns = {}
to_concat = []
for block_ref in block_refs:
for name, col in block_ref.items():
# keep track of all the columns in the block_refs
if name in columns:
raise ConsolidationError(
"Cannot consolidate two block refs containing the same column."
)
columns[name] = col
# add block and compute new indices
block_index = col._block_index
if isinstance(block_index, slice):
block_view = col._block.data[:, block_index]
new_indices[name] = slice(
# need to update slice offset and remove step
offset,
block_view.shape[1] + offset,
1,
)
elif isinstance(block_index, int):
# keep block axis
block_view = col._block.data[:, block_index : block_index + 1]
new_indices[name] = offset
to_concat.append(block_view)
offset += block_view.shape[1]
block = cls(np.concatenate(to_concat, axis=1))
# create columns
new_columns = {
name: columns[name]._clone(data=block[block_index])
for name, block_index in new_indices.items()
}
return BlockRef(block=block, columns=new_columns)
@staticmethod
def _convert_index(index):
if torch.is_tensor(index):
# need to convert to numpy for boolean indexing
return index.numpy()
return index
def _get(
self, index, block_ref: BlockRef, materialize: bool = True
) -> Union[BlockRef, dict]:
index = self._convert_index(index)
# TODO: check if they're trying to index more than just the row dimension
data = self.data[index]
if isinstance(index, int):
# if indexing a single row, we do not return a block manager, just a dict
return {
name: data[col._block_index] for name, col in block_ref.columns.items()
}
block = self.__class__(data)
columns = {
name: col._clone(data=block[col._block_index])
for name, col in block_ref.columns.items()
}
# note that the new block may share memory with the old block
return BlockRef(block=block, columns=columns)
@property
def is_mmap(self):
# important to check if .base is a python mmap object, since a view of a mmap
# is also a memmap object, but should not be symlinked or copied
return isinstance(self.data, np.memmap) and isinstance(self.data.base, mmap)
def _write_data(self, path: str, link: bool = True):
path = os.path.join(path, "data.npy")
if self.is_mmap:
if link:
os.symlink(self.data.filename, path)
else:
shutil.copy(self.data.filename, path)
else:
np.save(path, self.data)
@staticmethod
def METHOD_NAME(
path: str, mmap: bool = False, read_inputs: Dict[str, Column] = None
):
data_path = os.path.join(path, "data.npy")
if mmap:
return np.load(data_path, mmap_mode="r")
return np.load(data_path, allow_pickle=True) | null |
763 | # container-service-extension
# Copyright (c) 2020 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause\
import click
from vcd_cli.utils import stderr
from vcd_cli.utils import stdout
import yaml
from container_service_extension.client.system import System
import container_service_extension.client.utils as client_utils
import container_service_extension.common.constants.shared_constants as shared_constants # noqa: E501
from container_service_extension.logging.logger import CLIENT_LOGGER
@click.group(name='system', short_help='Manage CSE service (system daemon)')
@click.pass_context
def system_group(ctx):
"""Manage CSE server remotely.
\b
Examples
vcd cse system info
Display detailed information of the CSE server.
\b
vcd cse system enable --yes
Enable CSE server without prompting.
\b
vcd cse system stop --yes
Stop CSE server without prompting.
\b
vcd cse system disable --yes
Disable CSE server without prompting.
"""
pass
@system_group.command('info', short_help='Display info of CSE server')
@click.pass_context
def system_info(ctx):
"""Display CSE server info."""
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
try:
client_utils.cse_restore_session(ctx)
client = ctx.obj['client']
system = System(client)
result = system.get_info()
stdout(result, ctx)
CLIENT_LOGGER.debug(result)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@system_group.command('stop', short_help='Gracefully stop CSE server')
@click.pass_context
@click.confirmation_option(prompt='Are you sure you want to stop the server?')
def stop_service(ctx):
"""Stop CSE server."""
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
try:
client_utils.cse_restore_session(ctx)
client = ctx.obj['client']
system = System(client)
result = system.update_service_status(action=shared_constants.ServerAction.STOP) # noqa: E501
stdout(result, ctx)
CLIENT_LOGGER.debug(result)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@system_group.command('enable', short_help='Enable CSE server')
@click.pass_context
def enable_service(ctx):
"""Enable CSE server."""
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
try:
client_utils.cse_restore_session(ctx)
client = ctx.obj['client']
system = System(client)
result = system.update_service_status(action=shared_constants.ServerAction.ENABLE) # noqa: E501
stdout(result, ctx)
CLIENT_LOGGER.debug(result)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@system_group.command('disable', short_help='Disable CSE server')
@click.pass_context
def METHOD_NAME(ctx):
"""Disable CSE server."""
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
try:
client_utils.cse_restore_session(ctx)
client = ctx.obj['client']
system = System(client)
result = system.update_service_status(action=shared_constants.ServerAction.DISABLE) # noqa: E501
stdout(result, ctx)
CLIENT_LOGGER.debug(result)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True)
@system_group.command('config', short_help='Display CSE server configuration')
@click.pass_context
def system_config(ctx):
"""Display CSE server info."""
CLIENT_LOGGER.debug(f'Executing command: {ctx.command_path}')
try:
client_utils.cse_restore_session(ctx)
client = ctx.obj['client']
system = System(client)
result = system.get_runtime_config()
result = yaml.safe_dump(result)
click.secho(result)
CLIENT_LOGGER.debug(result)
except Exception as e:
stderr(e, ctx)
CLIENT_LOGGER.error(str(e), exc_info=True) | null |
764 | # Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
from __future__ import annotations
import json
from pathlib import Path
from typing import Callable, ContextManager
import pytest
from conda.common.compat import on_win
from conda.testing import CondaCLIFixture, PathFactoryFixture, TmpEnvFixture
@pytest.fixture
def environment_yml(path_factory: PathFactoryFixture) -> Path:
path = path_factory(name="environment.yml")
path.write_text(
"name: scratch\n"
"channels:\n"
" - defaults\n"
"dependencies:\n"
" - ca-certificates=2023\n"
)
return path
def test_clean(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("clean", "--all", "--yes")
assert out
assert not err
assert not code
def test_create(conda_cli: CondaCLIFixture, path_factory: PathFactoryFixture):
out, err, code = conda_cli("create", "--prefix", path_factory(), "--yes")
assert out
assert not err
assert not code
def test_compare(
conda_cli: CondaCLIFixture,
tmp_env: TmpEnvFixture,
environment_yml: Path,
):
with tmp_env() as prefix:
out, err, code = conda_cli("compare", "--prefix", prefix, environment_yml)
assert out
assert not err
assert code
def test_config(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("config", "--show-sources")
assert out
assert not err
assert not code
def test_doctor(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("doctor")
assert out
assert not err
assert not code
def test_info(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("info")
assert out
assert not err
assert not code
def test_info_json(conda_cli: CondaCLIFixture):
out1, err, code = conda_cli("info", "--json")
assert json.loads(out1)
assert not err
assert not code
out2, err, code = conda_cli("--json", "info")
assert json.loads(out2)
assert not err
assert not code
assert out1 == out2
def test_init(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("init", "--dry-run")
assert out
assert not err
assert not code
def test_install(conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env() as prefix:
out, err, code = conda_cli(
"install",
*("--prefix", prefix),
"ca-certificates",
"--yes",
)
assert out
assert not err
assert not code
def test_list(conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env("ca-certificates") as prefix:
out, err, code = conda_cli("list", "--prefix", prefix)
assert out
assert not err
assert not code
def test_notices(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("notices")
assert out
assert not err
assert not code
def test_package(conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env() as prefix:
out, err, code = conda_cli("package", "--prefix", prefix)
assert out
assert not err
assert not code
@pytest.mark.parametrize("subcommand", ["remove", "uninstall"])
def test_remove(subcommand: str, conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env() as prefix:
out, err, code = conda_cli(subcommand, "--prefix", prefix, "--all", "--yes")
assert out
assert not err
assert not code
def test_rename(
conda_cli: CondaCLIFixture,
tmp_env: TmpEnvFixture,
path_factory: PathFactoryFixture,
):
with tmp_env() as prefix:
out, err, code = conda_cli("rename", "--prefix", prefix, path_factory())
assert out
assert not err
assert not code
def test_run(conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env("m2-patch" if on_win else "patch") as prefix:
out, err, code = conda_cli("run", "--prefix", prefix, "patch", "--help")
assert out
assert not err
assert not code
def METHOD_NAME(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("search", "python")
assert out
assert not err
assert not code
@pytest.mark.parametrize("subcommand", ["update", "upgrade"])
def test_update(subcommand: str, conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env("ca-certificates<2023") as prefix:
out, err, code = conda_cli(subcommand, "--prefix", prefix, "--all", "--yes")
assert out
assert not err
assert not code
def test_env_list(conda_cli: CondaCLIFixture):
assert conda_cli("env", "list") == conda_cli("info", "--envs")
def test_env_export(conda_cli: CondaCLIFixture):
out, err, code = conda_cli("env", "export")
assert out
assert not err
assert not code
def test_env_remove(conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env() as prefix:
out, err, code = conda_cli("env", "remove", "--prefix", prefix, "--yes")
assert out
assert not err
assert not code
def test_env_create(
conda_cli: CondaCLIFixture,
path_factory: PathFactoryFixture,
environment_yml: Path,
):
out, err, code = conda_cli(
"env",
"create",
*("--prefix", path_factory()),
*("--file", environment_yml),
)
assert out
assert not err
assert not code
def test_env_update(
conda_cli: CondaCLIFixture,
tmp_env: TmpEnvFixture,
environment_yml: Path,
):
with tmp_env("ca-certificates<2023") as prefix:
out, err, code = conda_cli(
"env",
"update",
*("--prefix", prefix),
*("--file", environment_yml),
)
assert out
assert not err
assert not code
def test_env_config_vars(conda_cli: CondaCLIFixture, tmp_env: TmpEnvFixture):
with tmp_env() as prefix:
out, err, code = conda_cli(
"env",
"config",
"vars",
"set",
*("--prefix", prefix),
"FOO=foo",
)
assert not out
assert not err
assert not code
out, err, code = conda_cli("env", "config", "vars", "list", "--prefix", prefix)
assert out
assert not err
assert not code
out, err, code = conda_cli(
"env",
"config",
"vars",
"unset",
*("--prefix", prefix),
"FOO",
)
assert not out
assert not err
assert not code | null |
765 | # -*- coding: utf-8 -*-
import logging
from addons.base.models import (BaseOAuthNodeSettings, BaseOAuthUserSettings,
BaseStorageAddon)
from django.db import models
from framework.auth import Auth
from osf.models.files import File, Folder, BaseFileNode
from owncloud import Client as OwnCloudClient
from addons.base import exceptions
from addons.owncloud import settings
from addons.owncloud.serializer import OwnCloudSerializer
from addons.owncloud.settings import DEFAULT_HOSTS, USE_SSL
from osf.models.external import BasicAuthProviderMixin
from website.util import api_v2_url
logger = logging.getLogger(__name__)
class OwncloudFileNode(BaseFileNode):
_provider = 'owncloud'
class OwncloudFolder(OwncloudFileNode, Folder):
pass
class OwncloudFile(OwncloudFileNode, File):
@property
def _hashes(self):
# ownCloud API doesn't provide this metadata
return None
class OwnCloudProvider(BasicAuthProviderMixin):
"""An alternative to `ExternalProvider` not tied to OAuth"""
name = 'ownCloud'
short_name = 'owncloud'
def __init__(self, account=None, host=None, username=None, password=None):
if username:
username = username.lower()
return super(OwnCloudProvider, self).__init__(account=account, host=host, username=username, password=password)
def __repr__(self):
return '<{name}: {status}>'.format(
name=self.__class__.__name__,
status=self.account.display_name if self.account else 'anonymous'
)
class UserSettings(BaseOAuthUserSettings):
oauth_provider = OwnCloudProvider
serializer = OwnCloudSerializer
def to_json(self, user):
ret = super(UserSettings, self).to_json(user)
ret['hosts'] = DEFAULT_HOSTS
return ret
class NodeSettings(BaseOAuthNodeSettings, BaseStorageAddon):
oauth_provider = OwnCloudProvider
serializer = OwnCloudSerializer
folder_id = models.TextField(blank=True, null=True)
user_settings = models.ForeignKey(UserSettings, null=True, blank=True, on_delete=models.CASCADE)
_api = None
@property
def api(self):
if self._api is None:
self._api = OwnCloudProvider(self.external_account)
return self._api
@property
def folder_path(self):
return self.folder_id
@property
def folder_name(self):
return self.folder_id
def set_folder(self, folder, auth=None):
if folder == '/ (Full ownCloud)':
folder = '/'
self.folder_id = folder
self.save()
self.nodelogger.log(action='folder_selected', save=True)
def fetch_folder_name(self):
if self.folder_id == '/':
return '/ (Full ownCloud)'
return self.folder_id.strip('/').split('/')[-1]
def clear_settings(self):
self.folder_id = None
def deauthorize(self, auth=None, add_log=True):
"""Remove user authorization from this node and log the event."""
self.clear_settings()
if add_log:
self.nodelogger.log(action='node_deauthorized')
self.clear_auth() # Also performs a .save()
def serialize_waterbutler_credentials(self):
if not self.has_auth:
raise exceptions.AddonError('Addon is not authorized')
provider = OwnCloudProvider(self.external_account)
return {
'host': provider.host,
'username': provider.username,
'password': provider.password
}
def serialize_waterbutler_settings(self):
if not self.folder_id:
raise exceptions.AddonError('ownCloud is not configured')
return {
'folder': self.folder_id,
'verify_ssl': USE_SSL
}
def create_waterbutler_log(self, auth, action, metadata):
url = self.owner.web_url_for('addon_view_or_download_file',
path=metadata['path'], provider='owncloud')
self.owner.add_log(
'owncloud_{0}'.format(action),
auth=auth,
params={
'project': self.owner.parent_id,
'node': self.owner._id,
'folder': self.folder_id,
'path': metadata['materialized'].lstrip('/'),
'urls': {
'view': url,
'download': url + '?action=download'
},
},
)
def after_delete(self, user):
self.deauthorize(Auth(user=user), add_log=True)
self.save()
def on_delete(self):
self.deauthorize(add_log=False)
self.save()
def METHOD_NAME(self, **kwargs):
path = kwargs.get('path')
if path is None:
return [{
'addon': 'owncloud',
'path': '/',
'kind': 'folder',
'id': '/',
'name': '/ (Full ownCloud)',
'urls': {
'folders': api_v2_url('nodes/{}/addons/owncloud/folders/'.format(self.owner._id),
params={
'path': '/',
})
}
}]
provider = OwnCloudProvider(account=self.external_account)
c = OwnCloudClient(provider.host, verify_certs=settings.USE_SSL)
c.login(provider.username, provider.password)
ret = []
for item in c.list(path):
if item.file_type is 'dir':
ret.append({
'addon': 'owncloud',
'path': item.path,
'kind': 'folder',
'id': item.path,
'name': item.path.strip('/').split('/')[-1],
'urls': {
'folders': api_v2_url('nodes/{}/addons/owncloud/folders/'.format(self.owner._id),
params={
'path': item.path,
})
}
})
return ret | null |
766 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdomain.endpoint import endpoint_data
class QueryAdvancedDomainListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'QueryAdvancedDomainList')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ProductDomainType(self): # String
return self.get_query_params().get('ProductDomainType')
def set_ProductDomainType(self, ProductDomainType): # String
self.add_query_param('ProductDomainType', ProductDomainType)
def get_PageNum(self): # Integer
return self.get_query_params().get('PageNum')
def set_PageNum(self, PageNum): # Integer
self.add_query_param('PageNum', PageNum)
def get_Excluded(self): # String
return self.get_query_params().get('Excluded')
def set_Excluded(self, Excluded): # String
self.add_query_param('Excluded', Excluded)
def get_StartLength(self): # Integer
return self.get_query_params().get('StartLength')
def set_StartLength(self, StartLength): # Integer
self.add_query_param('StartLength', StartLength)
def get_ExcludedSuffix(self): # Boolean
return self.get_query_params().get('ExcludedSuffix')
def set_ExcludedSuffix(self, ExcludedSuffix): # Boolean
self.add_query_param('ExcludedSuffix', ExcludedSuffix)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def METHOD_NAME(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_ExcludedPrefix(self): # Boolean
return self.get_query_params().get('ExcludedPrefix')
def set_ExcludedPrefix(self, ExcludedPrefix): # Boolean
self.add_query_param('ExcludedPrefix', ExcludedPrefix)
def get_KeyWord(self): # String
return self.get_query_params().get('KeyWord')
def set_KeyWord(self, KeyWord): # String
self.add_query_param('KeyWord', KeyWord)
def get_ProductDomainTypeSort(self): # Boolean
return self.get_query_params().get('ProductDomainTypeSort')
def set_ProductDomainTypeSort(self, ProductDomainTypeSort): # Boolean
self.add_query_param('ProductDomainTypeSort', ProductDomainTypeSort)
def get_EndExpirationDate(self): # Long
return self.get_query_params().get('EndExpirationDate')
def set_EndExpirationDate(self, EndExpirationDate): # Long
self.add_query_param('EndExpirationDate', EndExpirationDate)
def get_Suffixs(self): # String
return self.get_query_params().get('Suffixs')
def set_Suffixs(self, Suffixs): # String
self.add_query_param('Suffixs', Suffixs)
def get_DomainNameSort(self): # Boolean
return self.get_query_params().get('DomainNameSort')
def set_DomainNameSort(self, DomainNameSort): # Boolean
self.add_query_param('DomainNameSort', DomainNameSort)
def get_ExpirationDateSort(self): # Boolean
return self.get_query_params().get('ExpirationDateSort')
def set_ExpirationDateSort(self, ExpirationDateSort): # Boolean
self.add_query_param('ExpirationDateSort', ExpirationDateSort)
def get_StartExpirationDate(self): # Long
return self.get_query_params().get('StartExpirationDate')
def set_StartExpirationDate(self, StartExpirationDate): # Long
self.add_query_param('StartExpirationDate', StartExpirationDate)
def get_DomainStatus(self): # Integer
return self.get_query_params().get('DomainStatus')
def set_DomainStatus(self, DomainStatus): # Integer
self.add_query_param('DomainStatus', DomainStatus)
def get_DomainGroupId(self): # Long
return self.get_query_params().get('DomainGroupId')
def set_DomainGroupId(self, DomainGroupId): # Long
self.add_query_param('DomainGroupId', DomainGroupId)
def get_KeyWordSuffix(self): # Boolean
return self.get_query_params().get('KeyWordSuffix')
def set_KeyWordSuffix(self, KeyWordSuffix): # Boolean
self.add_query_param('KeyWordSuffix', KeyWordSuffix)
def get_KeyWordPrefix(self): # Boolean
return self.get_query_params().get('KeyWordPrefix')
def set_KeyWordPrefix(self, KeyWordPrefix): # Boolean
self.add_query_param('KeyWordPrefix', KeyWordPrefix)
def get_TradeType(self): # Integer
return self.get_query_params().get('TradeType')
def set_TradeType(self, TradeType): # Integer
self.add_query_param('TradeType', TradeType)
def get_EndRegistrationDate(self): # Long
return self.get_query_params().get('EndRegistrationDate')
def set_EndRegistrationDate(self, EndRegistrationDate): # Long
self.add_query_param('EndRegistrationDate', EndRegistrationDate)
def get_Form(self): # Integer
return self.get_query_params().get('Form')
def set_Form(self, Form): # Integer
self.add_query_param('Form', Form)
def get_UserClientIp(self): # String
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self, UserClientIp): # String
self.add_query_param('UserClientIp', UserClientIp)
def get_RegistrationDateSort(self): # Boolean
return self.get_query_params().get('RegistrationDateSort')
def set_RegistrationDateSort(self, RegistrationDateSort): # Boolean
self.add_query_param('RegistrationDateSort', RegistrationDateSort)
def get_StartRegistrationDate(self): # Long
return self.get_query_params().get('StartRegistrationDate')
def set_StartRegistrationDate(self, StartRegistrationDate): # Long
self.add_query_param('StartRegistrationDate', StartRegistrationDate)
def get_EndLength(self): # Integer
return self.get_query_params().get('EndLength')
def set_EndLength(self, EndLength): # Integer
self.add_query_param('EndLength', EndLength) | null |
767 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdomain.endpoint import endpoint_data
class SaveSingleTaskForCreatingOrderActivateRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'SaveSingleTaskForCreatingOrderActivate')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Country(self): # String
return self.get_query_params().get('Country')
def set_Country(self, Country): # String
self.add_query_param('Country', Country)
def get_SubscriptionDuration(self): # Integer
return self.get_query_params().get('SubscriptionDuration')
def set_SubscriptionDuration(self, SubscriptionDuration): # Integer
self.add_query_param('SubscriptionDuration', SubscriptionDuration)
def get_PermitPremiumActivation(self): # Boolean
return self.get_query_params().get('PermitPremiumActivation')
def set_PermitPremiumActivation(self, PermitPremiumActivation): # Boolean
self.add_query_param('PermitPremiumActivation', PermitPremiumActivation)
def get_City(self): # String
return self.get_query_params().get('City')
def set_City(self, City): # String
self.add_query_param('City', City)
def get_Dns2(self): # String
return self.get_query_params().get('Dns2')
def set_Dns2(self, Dns2): # String
self.add_query_param('Dns2', Dns2)
def get_Dns1(self): # String
return self.get_query_params().get('Dns1')
def set_Dns1(self, Dns1): # String
self.add_query_param('Dns1', Dns1)
def get_RegistrantProfileId(self): # Long
return self.get_query_params().get('RegistrantProfileId')
def set_RegistrantProfileId(self, RegistrantProfileId): # Long
self.add_query_param('RegistrantProfileId', RegistrantProfileId)
def get_CouponNo(self): # String
return self.get_query_params().get('CouponNo')
def set_CouponNo(self, CouponNo): # String
self.add_query_param('CouponNo', CouponNo)
def get_AliyunDns(self): # Boolean
return self.get_query_params().get('AliyunDns')
def set_AliyunDns(self, AliyunDns): # Boolean
self.add_query_param('AliyunDns', AliyunDns)
def get_ZhCity(self): # String
return self.get_query_params().get('ZhCity')
def set_ZhCity(self, ZhCity): # String
self.add_query_param('ZhCity', ZhCity)
def get_TelExt(self): # String
return self.get_query_params().get('TelExt')
def set_TelExt(self, TelExt): # String
self.add_query_param('TelExt', TelExt)
def get_ZhRegistrantName(self): # String
return self.get_query_params().get('ZhRegistrantName')
def set_ZhRegistrantName(self, ZhRegistrantName): # String
self.add_query_param('ZhRegistrantName', ZhRegistrantName)
def get_Province(self): # String
return self.get_query_params().get('Province')
def set_Province(self, Province): # String
self.add_query_param('Province', Province)
def get_PostalCode(self): # String
return self.get_query_params().get('PostalCode')
def set_PostalCode(self, PostalCode): # String
self.add_query_param('PostalCode', PostalCode)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_Email(self): # String
return self.get_query_params().get('Email')
def set_Email(self, Email): # String
self.add_query_param('Email', Email)
def get_ZhRegistrantOrganization(self): # String
return self.get_query_params().get('ZhRegistrantOrganization')
def set_ZhRegistrantOrganization(self, ZhRegistrantOrganization): # String
self.add_query_param('ZhRegistrantOrganization', ZhRegistrantOrganization)
def get_Address(self): # String
return self.get_query_params().get('Address')
def set_Address(self, Address): # String
self.add_query_param('Address', Address)
def get_TelArea(self): # String
return self.get_query_params().get('TelArea')
def set_TelArea(self, TelArea): # String
self.add_query_param('TelArea', TelArea)
def get_DomainName(self): # String
return self.get_query_params().get('DomainName')
def set_DomainName(self, DomainName): # String
self.add_query_param('DomainName', DomainName)
def get_ZhAddress(self): # String
return self.get_query_params().get('ZhAddress')
def set_ZhAddress(self, ZhAddress): # String
self.add_query_param('ZhAddress', ZhAddress)
def METHOD_NAME(self): # String
return self.get_query_params().get('RegistrantType')
def set_RegistrantType(self, RegistrantType): # String
self.add_query_param('RegistrantType', RegistrantType)
def get_Telephone(self): # String
return self.get_query_params().get('Telephone')
def set_Telephone(self, Telephone): # String
self.add_query_param('Telephone', Telephone)
def get_TrademarkDomainActivation(self): # Boolean
return self.get_query_params().get('TrademarkDomainActivation')
def set_TrademarkDomainActivation(self, TrademarkDomainActivation): # Boolean
self.add_query_param('TrademarkDomainActivation', TrademarkDomainActivation)
def get_UseCoupon(self): # Boolean
return self.get_query_params().get('UseCoupon')
def set_UseCoupon(self, UseCoupon): # Boolean
self.add_query_param('UseCoupon', UseCoupon)
def get_ZhProvince(self): # String
return self.get_query_params().get('ZhProvince')
def set_ZhProvince(self, ZhProvince): # String
self.add_query_param('ZhProvince', ZhProvince)
def get_RegistrantOrganization(self): # String
return self.get_query_params().get('RegistrantOrganization')
def set_RegistrantOrganization(self, RegistrantOrganization): # String
self.add_query_param('RegistrantOrganization', RegistrantOrganization)
def get_PromotionNo(self): # String
return self.get_query_params().get('PromotionNo')
def set_PromotionNo(self, PromotionNo): # String
self.add_query_param('PromotionNo', PromotionNo)
def get_EnableDomainProxy(self): # Boolean
return self.get_query_params().get('EnableDomainProxy')
def set_EnableDomainProxy(self, EnableDomainProxy): # Boolean
self.add_query_param('EnableDomainProxy', EnableDomainProxy)
def get_UserClientIp(self): # String
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self, UserClientIp): # String
self.add_query_param('UserClientIp', UserClientIp)
def get_RegistrantName(self): # String
return self.get_query_params().get('RegistrantName')
def set_RegistrantName(self, RegistrantName): # String
self.add_query_param('RegistrantName', RegistrantName)
def get_UsePromotion(self): # Boolean
return self.get_query_params().get('UsePromotion')
def set_UsePromotion(self, UsePromotion): # Boolean
self.add_query_param('UsePromotion', UsePromotion) | null |
768 | #
# formatter.py
#
# Convert parsed content blocks to a structured document (library file).
#
# Copyright 2002-2018 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
#
# This is the base Formatter class. Its purpose is to convert a content
# processor's data into specific documents (i.e., table of contents, global
# index, and individual API reference indices).
#
# You need to sub-class it to output anything sensible. For example, the
# file `tohtml.py' contains the definition of the `HtmlFormatter' sub-class
# to output HTML.
#
from sources import *
from content import *
from utils import *
################################################################
##
## FORMATTER CLASS
##
class Formatter:
def __init__( self, processor ):
self.processor = processor
self.identifiers = {}
self.chapters = processor.chapters
self.sections = processor.sections.values()
self.block_index = []
# store all blocks in a dictionary
self.blocks = []
for section in self.sections:
for block in section.blocks.values():
self.add_identifier( block.name, block )
# add enumeration values to the index, since this is useful
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
self.add_identifier( field.name, block )
self.block_index = self.identifiers.keys()
self.block_index.sort( key = index_key )
# also add section names to dictionary (without making them appear
# in the index)
for section in self.sections:
self.add_identifier( section.name, section )
def add_identifier( self, name, block ):
if name in self.identifiers:
# duplicate name!
sys.stderr.write( "WARNING: duplicate definition for"
+ " '" + name + "' "
+ "in " + block.location() + ", "
+ "previous definition in "
+ self.identifiers[name].location()
+ "\n" )
else:
self.identifiers[name] = block
#
# formatting the table of contents
#
def toc_enter( self ):
pass
def toc_chapter_enter( self, chapter ):
pass
def toc_section_enter( self, section ):
pass
def toc_section_exit( self, section ):
pass
def toc_chapter_exit( self, chapter ):
pass
def toc_index( self, index_filename ):
pass
def METHOD_NAME( self ):
pass
def toc_dump( self, toc_filename = None, index_filename = None ):
output = None
if toc_filename:
output = open_output( toc_filename )
self.toc_enter()
for chap in self.processor.chapters:
self.toc_chapter_enter( chap )
for section in chap.sections:
self.toc_section_enter( section )
self.toc_section_exit( section )
self.toc_chapter_exit( chap )
self.toc_index( index_filename )
self.METHOD_NAME()
if output:
close_output( output )
#
# formatting the index
#
def index_enter( self ):
pass
def index_name_enter( self, name ):
pass
def index_name_exit( self, name ):
pass
def index_exit( self ):
pass
def index_dump( self, index_filename = None ):
output = None
if index_filename:
output = open_output( index_filename )
self.index_enter()
for name in self.block_index:
self.index_name_enter( name )
self.index_name_exit( name )
self.index_exit()
if output:
close_output( output )
#
# formatting a section
#
def section_enter( self, section ):
pass
def block_enter( self, block ):
pass
def markup_enter( self, markup, block = None ):
pass
def field_enter( self, field, markup = None, block = None ):
pass
def field_exit( self, field, markup = None, block = None ):
pass
def markup_exit( self, markup, block = None ):
pass
def block_exit( self, block ):
pass
def section_exit( self, section ):
pass
def section_dump( self, section, section_filename = None ):
output = None
if section_filename:
output = open_output( section_filename )
self.section_enter( section )
for name in section.block_names:
skip_entry = 0
try:
block = self.identifiers[name]
# `block_names' can contain field names also,
# which we filter out
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
if field.name == name:
skip_entry = 1
except:
skip_entry = 1 # this happens e.g. for `/empty/' entries
if skip_entry:
continue
self.block_enter( block )
for markup in block.markups[1:]: # always ignore first markup!
self.markup_enter( markup, block )
for field in markup.fields:
self.field_enter( field, markup, block )
self.field_exit( field, markup, block )
self.markup_exit( markup, block )
self.block_exit( block )
self.section_exit( section )
if output:
close_output( output )
def section_dump_all( self ):
for section in self.sections:
self.section_dump( section )
# eof | null |
769 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkr_kvstore.endpoint import endpoint_data
class DescribeInstancesOverviewRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'DescribeInstancesOverview','redisa')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_SearchKey(self): # String
return self.get_query_params().get('SearchKey')
def set_SearchKey(self, SearchKey): # String
self.add_query_param('SearchKey', SearchKey)
def get_NetworkType(self): # String
return self.get_query_params().get('NetworkType')
def set_NetworkType(self, NetworkType): # String
self.add_query_param('NetworkType', NetworkType)
def get_EngineVersion(self): # String
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self, EngineVersion): # String
self.add_query_param('EngineVersion', EngineVersion)
def get_InstanceClass(self): # String
return self.get_query_params().get('InstanceClass')
def METHOD_NAME(self, InstanceClass): # String
self.add_query_param('InstanceClass', InstanceClass)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_InstanceType(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def get_EditionType(self): # String
return self.get_query_params().get('EditionType')
def set_EditionType(self, EditionType): # String
self.add_query_param('EditionType', EditionType)
def get_InstanceStatus(self): # String
return self.get_query_params().get('InstanceStatus')
def set_InstanceStatus(self, InstanceStatus): # String
self.add_query_param('InstanceStatus', InstanceStatus)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_PrivateIp(self): # String
return self.get_query_params().get('PrivateIp')
def set_PrivateIp(self, PrivateIp): # String
self.add_query_param('PrivateIp', PrivateIp)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_InstanceIds(self): # String
return self.get_query_params().get('InstanceIds')
def set_InstanceIds(self, InstanceIds): # String
self.add_query_param('InstanceIds', InstanceIds)
def get_ArchitectureType(self): # String
return self.get_query_params().get('ArchitectureType')
def set_ArchitectureType(self, ArchitectureType): # String
self.add_query_param('ArchitectureType', ArchitectureType)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType) | null |
770 | import argparse
import json
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
p = argparse.ArgumentParser()
p.add_argument("--datadir", default="data", help="Location of MNIST data")
p.add_argument(
"--rundir",
default=".",
help="Location to write prepared data, logs and checkpoints",
)
p.add_argument(
"--batch_size", type=int, default=100, help="Batch size used for training"
)
p.add_argument("--epochs", type=int, default=10, help="Number of epochs to train")
p.add_argument(
"--prepare",
dest='just_data',
action="store_true",
help="Just prepare data - don't train",
)
p.add_argument(
"--test", action="store_true", help="Evaluate a trained model with test data"
)
FLAGS = p.parse_args()
def METHOD_NAME():
global mnist
mnist = input_data.read_data_sets(FLAGS.datadir, one_hot=True)
def init_train():
init_model()
init_train_op()
init_eval_op()
init_summaries()
init_collections()
init_session()
def init_model():
global x, y, W, b
x = tf.compat.v1.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
def init_train_op():
global y_, loss, train_op
y_ = tf.compat.v1.placeholder(tf.float32, [None, 10])
loss = tf.reduce_mean(-tf.reduce_sum(y_ * tf.math.log(y), reduction_indices=[1]))
train_op = tf.compat.v1.train.GradientDescentOptimizer(0.5).minimize(loss)
def init_eval_op():
global accuracy
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def init_summaries():
init_inputs_summary()
init_variable_summaries(W, "weights")
init_variable_summaries(b, "biases")
init_op_summaries()
init_summary_writers()
def init_inputs_summary():
tf.compat.v1.summary.image("inputs", tf.reshape(x, [-1, 28, 28, 1]), 10)
def init_variable_summaries(var, name):
with tf.name_scope(name):
mean = tf.reduce_mean(var)
tf.compat.v1.summary.scalar("mean", mean)
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.compat.v1.summary.scalar("stddev", stddev)
tf.compat.v1.summary.scalar("max", tf.reduce_max(var))
tf.compat.v1.summary.scalar("min", tf.reduce_min(var))
tf.compat.v1.summary.histogram(name, var)
def init_op_summaries():
tf.compat.v1.summary.scalar("loss", loss)
tf.compat.v1.summary.scalar("acc", accuracy)
def init_summary_writers():
global summaries, train_writer, validate_writer
summaries = tf.compat.v1.summary.merge_all()
train_writer = tf.compat.v1.summary.FileWriter(
FLAGS.rundir, tf.compat.v1.get_default_graph()
)
validate_writer = tf.compat.v1.summary.FileWriter(FLAGS.rundir + "/val")
def init_collections():
tf.compat.v1.add_to_collection("inputs", json.dumps({"image": x.name}))
tf.compat.v1.add_to_collection("outputs", json.dumps({"prediction": y.name}))
tf.compat.v1.add_to_collection("x", x.name)
tf.compat.v1.add_to_collection("y_", y_.name)
tf.compat.v1.add_to_collection("acc", accuracy.name)
def init_session():
global sess
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())
def train():
steps = (mnist.train.num_examples // FLAGS.batch_size) * FLAGS.epochs
for step in range(steps):
images, labels = mnist.train.next_batch(FLAGS.batch_size)
batch = {x: images, y_: labels}
sess.run(train_op, batch)
maybe_log_accuracy(step, batch)
maybe_save_model(step)
save_model()
def maybe_log_accuracy(step, last_training_batch):
if step % 20 == 0:
evaluate(step, last_training_batch, train_writer, "training")
validate_data = {x: mnist.validation.images, y_: mnist.validation.labels}
evaluate(step, validate_data, validate_writer, "validate")
def evaluate(step, data, writer, name):
accuracy_val, summary = sess.run([accuracy, summaries], data)
writer.add_summary(summary, step)
writer.flush()
print("Step %i: %s=%f" % (step, name, accuracy_val))
def maybe_save_model(step):
epoch_step = mnist.train.num_examples / FLAGS.batch_size
if step != 0 and step % epoch_step == 0:
save_model()
def save_model():
print("Saving trained model")
tf.io.gfile.makedirs(FLAGS.rundir + "/model")
tf.compat.v1.train.Saver().save(sess, FLAGS.rundir + "/model/export")
def init_test():
init_session()
init_exported_collections()
init_test_writer()
def init_exported_collections():
global x, y_, accuracy
saver = tf.train.import_meta_graph(FLAGS.rundir + "/model/export.meta")
saver.restore(sess, FLAGS.rundir + "/model/export")
x = tensor_by_collection_name("x")
y_ = tensor_by_collection_name("y_")
accuracy = tensor_by_collection_name("acc")
def tensor_by_collection_name(name):
tensor_name = tf.get_collection(name)[0].decode("UTF-8")
return sess.graph.get_tensor_by_name(tensor_name)
def init_test_writer():
global summaries, writer
summaries = tf.summary.merge_all()
writer = tf.summary.FileWriter(FLAGS.rundir)
def test():
data = {x: mnist.test.images, y_: mnist.test.labels}
test_accuracy, summary = sess.run([accuracy, summaries], data)
writer.add_summary(summary)
writer.flush()
print("Test accuracy=%f" % test_accuracy)
if __name__ == "__main__":
METHOD_NAME()
if FLAGS.just_data:
pass
elif FLAGS.test:
init_test()
test()
else:
init_train()
train() | null |
771 | # -*- mode: python; coding: utf-8 -*-
"""
Format the UVData object parameters into a sphinx rst file.
"""
import inspect
import os
from astropy.time import Time
from pyuvdata import UVData
def METHOD_NAME(write_file=None):
UV = UVData()
out = "UVData\n======\n\n"
out += (
"UVData is the main user class for intereferometric data (visibilities).\n"
"It provides import and export functionality to and from the supported file\n"
"formats (UVFITS, MeasurementSets, Miriad, uvh5, FHD, MIR) as well as\n"
"numerous methods for transforming the data (phasing, averaging, selecting,\n"
"sorting) and can be interacted with directly.\n\n"
"Attributes\n----------\n"
"The attributes on UVData hold all of the metadata and data required to\n"
"analyze interferometric data sets. Under the hood, the attributes are\n"
"implemented as properties based on :class:`pyuvdata.parameter.UVParameter`\n"
"objects but this is fairly transparent to users.\n\n"
"UVData objects can be initialized from a file using the\n"
":meth:`pyuvdata.UVData.from_file` class method\n"
"(as ``uvd = UVData.from_file(<filename>)``) or be initialized as an empty\n"
"object (as ``uvd = UVData()``). When an empty UVData object is initialized,\n"
"it has all of these attributes defined but set to ``None``. The attributes\n"
"can be set by reading in a data file using the :meth:`pyuvdata.UVData.read`\n"
"method or by setting them directly on the object. Some of these attributes\n"
"are `required`_ to be set to have a fully defined data set while others are\n"
"`optional`_. The :meth:`pyuvdata.UVData.check` method can be called on the\n"
"object to verify that all of the required attributes have been set in a\n"
"consistent way.\n\n"
'Note that objects can be in a "metadata only" state where\n'
"all of the metadata is defined but the data-like attributes (``data_array``,\n"
"``flag_array``, ``nsample_array``) are not. The\n"
":meth:`pyuvdata.UVData.check` method will still pass for metadata only\n"
"objects.\n\n"
"Note that angle type attributes also have convenience properties named the\n"
"same thing with ``_degrees`` appended through which you can get or set the\n"
"value in degrees. Similarly location type attributes (which are given in\n"
"topocentric xyz coordinates) have convenience properties named the\n"
"same thing with ``_lat_lon_alt`` and ``_lat_lon_alt_degrees`` appended\n"
"through which you can get or set the values using latitude, longitude and\n"
"altitude values in radians or degrees and meters.\n\n"
)
out += "Required\n********\n"
out += (
"These parameters are required to have a sensible UVData object and\n"
"are required for most kinds of interferometric data files."
)
out += "\n\n"
for thing in UV.required():
obj = getattr(UV, thing)
out += "**{name}**\n".format(name=obj.name)
out += " {desc}\n".format(desc=obj.description)
out += "\n"
out += "Optional\n********\n"
out += (
"These parameters are defined by one or more file standard but are not\n"
"always required. Some of them are required depending on the\n"
"phase_type (as noted below)."
)
out += "\n\n"
for thing in UV.extra():
obj = getattr(UV, thing)
out += "**{name}**\n".format(name=obj.name)
out += " {desc}\n".format(desc=obj.description)
out += "\n"
out += "Methods\n-------\n.. autoclass:: pyuvdata.UVData\n :members:\n\n"
t = Time.now()
t.format = "iso"
t.out_subfmt = "date"
out += "last updated: {date}".format(date=t.iso)
if write_file is None:
write_path = os.path.dirname(os.path.abspath(inspect.stack()[0][1]))
write_file = os.path.join(write_path, "uvdata.rst")
F = open(write_file, "w")
F.write(out)
print("wrote " + write_file) | null |
772 | import codecs
import collections
import logging
from typing import (
Optional,
Union,
)
from Crypto.Cipher import Blowfish
from Crypto.Random import get_random_bytes
import galaxy.exceptions
from galaxy.util import (
smart_str,
unicodify,
)
log = logging.getLogger(__name__)
MAXIMUM_ID_SECRET_BITS = 448
MAXIMUM_ID_SECRET_LENGTH = int(MAXIMUM_ID_SECRET_BITS / 8)
KIND_TOO_LONG_MESSAGE = (
"Galaxy coding error, keep encryption 'kinds' smaller to utilize more bites of randomness from id_secret values."
)
class IdEncodingHelper:
def __init__(self, **config):
id_secret = config["id_secret"]
self.id_secret = id_secret
self.id_cipher = Blowfish.new(smart_str(self.id_secret), mode=Blowfish.MODE_ECB)
per_kind_id_secret_base = config.get("per_kind_id_secret_base", self.id_secret)
self.id_ciphers_for_kind = _cipher_cache(per_kind_id_secret_base)
def encode_id(self, obj_id, kind=None):
if obj_id is None:
raise galaxy.exceptions.MalformedId("Attempted to encode None id")
id_cipher = self.__id_cipher(kind)
# Convert to bytes
s = smart_str(obj_id)
# Pad to a multiple of 8 with leading "!"
s = (b"!" * (8 - len(s) % 8)) + s
# Encrypt
return unicodify(codecs.encode(id_cipher.encrypt(s), "hex"))
def encode_dict_ids(self, a_dict, kind=None, skip_startswith=None):
"""
Encode all ids in dictionary. Ids are identified by (a) an 'id' key or
(b) a key that ends with '_id'
"""
for key, val in a_dict.items():
if key == "id" or key.endswith("_id") and (skip_startswith is None or not key.startswith(skip_startswith)):
a_dict[key] = self.encode_id(val, kind=kind)
return a_dict
def METHOD_NAME(self, rval, recursive=False):
"""
Encodes all integer values in the dict rval whose keys are 'id' or end
with '_id' excluding `tool_id` which are consumed and produced as is
via the API.
"""
if not isinstance(rval, dict):
return rval
for k, v in rval.items():
if (k == "id" or k.endswith("_id")) and v is not None and k not in ["tool_id", "external_id"]:
try:
rval[k] = self.encode_id(v)
except Exception:
pass # probably already encoded
if k.endswith("_ids") and isinstance(v, list):
try:
o = []
for i in v:
o.append(self.encode_id(i))
rval[k] = o
except Exception:
pass
else:
if recursive and isinstance(v, dict):
rval[k] = self.METHOD_NAME(v, recursive)
elif recursive and isinstance(v, list):
rval[k] = [self.METHOD_NAME(el, True) for el in v]
return rval
def decode_id(self, obj_id, kind=None, object_name: Optional[str] = None):
try:
id_cipher = self.__id_cipher(kind)
return int(unicodify(id_cipher.decrypt(codecs.decode(obj_id, "hex"))).lstrip("!"))
except TypeError:
raise galaxy.exceptions.MalformedId(
f"Malformed {object_name if object_name is not None else ''} id ( {obj_id} ) specified, unable to decode."
)
except ValueError:
raise galaxy.exceptions.MalformedId(
f"Wrong {object_name if object_name is not None else ''} id ( {obj_id} ) specified, unable to decode."
)
def encode_guid(self, session_key):
# Session keys are strings
# Pad to a multiple of 8 with leading "!"
session_key = smart_str(session_key)
s = (b"!" * (8 - len(session_key) % 8)) + session_key
# Encrypt
return codecs.encode(self.id_cipher.encrypt(s), "hex")
def decode_guid(self, session_key: Union[bytes, str]) -> str:
# Session keys are strings
try:
decoded_session_key = codecs.decode(session_key, "hex")
stripped_decoded_session_key = unicodify(self.id_cipher.decrypt(decoded_session_key)).lstrip("!")
# Ensure session key is hexadecimal value
int(stripped_decoded_session_key, 16)
return stripped_decoded_session_key
except TypeError:
raise galaxy.exceptions.MalformedId(f"Malformed guid '{session_key!r}' specified, unable to decode.")
except ValueError:
raise galaxy.exceptions.MalformedId(f"Wrong guid '{session_key!r}' specified, unable to decode.")
def get_new_guid(self):
# Generate a unique, high entropy 128 bit random number
return unicodify(codecs.encode(get_random_bytes(16), "hex"))
def __id_cipher(self, kind):
if not kind:
id_cipher = self.id_cipher
else:
id_cipher = self.id_ciphers_for_kind[kind]
return id_cipher
class _cipher_cache(collections.defaultdict):
def __init__(self, secret_base):
self.secret_base = secret_base
def __missing__(self, key):
assert len(key) < 15, KIND_TOO_LONG_MESSAGE
secret = f"{self.secret_base}__{key}"
return Blowfish.new(_last_bits(secret), mode=Blowfish.MODE_ECB)
def _last_bits(secret):
"""We append the kind at the end, so just use the bits at the end."""
last_bits = smart_str(secret)
if len(last_bits) > MAXIMUM_ID_SECRET_LENGTH:
last_bits = last_bits[-MAXIMUM_ID_SECRET_LENGTH:]
return last_bits | null |
773 | """
API operations on Quota objects.
"""
import logging
from fastapi import Path
from fastapi.param_functions import Body
from galaxy.managers.context import ProvidesUserContext
from galaxy.quota._schema import (
CreateQuotaParams,
CreateQuotaResult,
DeleteQuotaPayload,
QuotaDetails,
QuotaSummaryList,
UpdateQuotaParams,
)
from galaxy.schema.fields import DecodedDatabaseIdField
from galaxy.webapps.galaxy.api import (
depends,
DependsOnTrans,
Router,
)
from galaxy.webapps.galaxy.services.quotas import QuotasService
log = logging.getLogger(__name__)
router = Router(tags=["quotas"])
QuotaIdPathParam: DecodedDatabaseIdField = Path(
..., title="Quota ID", description="The encoded identifier of the Quota." # Required
)
@router.cbv
class FastAPIQuota:
service: QuotasService = depends(QuotasService)
@router.get(
"/api/quotas",
summary="Displays a list with information of quotas that are currently active.",
require_admin=True,
)
def METHOD_NAME(
self,
trans: ProvidesUserContext = DependsOnTrans,
) -> QuotaSummaryList:
"""Displays a list with information of quotas that are currently active."""
return self.service.METHOD_NAME(trans)
@router.get(
"/api/quotas/deleted",
summary="Displays a list with information of quotas that have been deleted.",
require_admin=True,
)
def index_deleted(
self,
trans: ProvidesUserContext = DependsOnTrans,
) -> QuotaSummaryList:
"""Displays a list with information of quotas that have been deleted."""
return self.service.METHOD_NAME(trans, deleted=True)
@router.get(
"/api/quotas/{id}",
name="quota",
summary="Displays details on a particular active quota.",
require_admin=True,
)
def show(
self, trans: ProvidesUserContext = DependsOnTrans, id: DecodedDatabaseIdField = QuotaIdPathParam
) -> QuotaDetails:
"""Displays details on a particular active quota."""
return self.service.show(trans, id)
@router.get(
"/api/quotas/deleted/{id}",
name="deleted_quota",
summary="Displays details on a particular quota that has been deleted.",
require_admin=True,
)
def show_deleted(
self,
trans: ProvidesUserContext = DependsOnTrans,
id: DecodedDatabaseIdField = QuotaIdPathParam,
) -> QuotaDetails:
"""Displays details on a particular quota that has been deleted."""
return self.service.show(trans, id, deleted=True)
@router.post(
"/api/quotas",
summary="Creates a new quota.",
require_admin=True,
)
def create(
self,
payload: CreateQuotaParams,
trans: ProvidesUserContext = DependsOnTrans,
) -> CreateQuotaResult:
"""Creates a new quota."""
return self.service.create(trans, payload)
@router.put(
"/api/quotas/{id}",
summary="Updates an existing quota.",
require_admin=True,
)
def update(
self,
payload: UpdateQuotaParams,
id: DecodedDatabaseIdField = QuotaIdPathParam,
trans: ProvidesUserContext = DependsOnTrans,
) -> str:
"""Updates an existing quota."""
return self.service.update(trans, id, payload)
@router.delete(
"/api/quotas/{id}",
summary="Deletes an existing quota.",
require_admin=True,
)
def delete(
self,
id: DecodedDatabaseIdField = QuotaIdPathParam,
trans: ProvidesUserContext = DependsOnTrans,
payload: DeleteQuotaPayload = Body(None), # Optional
) -> str:
"""Deletes an existing quota."""
return self.service.delete(trans, id, payload)
@router.post(
"/api/quotas/deleted/{id}/undelete",
summary="Restores a previously deleted quota.",
require_admin=True,
)
def undelete(
self,
id: DecodedDatabaseIdField = QuotaIdPathParam,
trans: ProvidesUserContext = DependsOnTrans,
) -> str:
"""Restores a previously deleted quota."""
return self.service.undelete(trans, id) | null |
774 | # Copyright 2017-2023 Posit Software, PBC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from guild import click_util
from . import ac_support
def _ac_python(_ctx, _param, incomplete):
return ac_support.ac_python(incomplete)
def _ac_guild_version_or_path(_ctx, _param, incomplete):
versions = [ver for ver in _guild_versions() if ver.startswith(incomplete)]
return versions + ac_support.ac_filename(["whl"], incomplete)
def _guild_versions():
import json
from urllib.request import urlopen
def f():
resp = urlopen("https://pypi.org/pypi/guildai/json")
data = json.loads(resp.read())
return sorted(data.get("releases") or {})
return ac_support.ac_safe_apply(f, []) or []
def _ac_guild_home(_ctx, _param, incomplete):
return ac_support.ac_dir(incomplete)
def METHOD_NAME(_ctx, _param, incomplete):
return ac_support.ac_filename(["txt"], incomplete)
def _ac_dir(_ctx, _param, incomplete):
return ac_support.ac_dir(incomplete)
@click.command()
@click.argument("dir", default=None, required=False, shell_complete=_ac_dir)
@click.option(
"--venv",
is_flag=True,
help=(
"Creates a virtual environment in DIR. This option "
"enabled pre-0.9 init behavior and is implied when "
"specifying any of the virtual environment settings below."
),
)
@click.option(
"-n",
"--name",
metavar="NAME",
help=("Environment name (default is env parent directory name). Implies `--venv`."),
)
@click.option(
"-p",
"--python",
metavar="VERSION",
help=("Version of Python to use for the environment. Implies `--venv`."),
shell_complete=_ac_python,
)
@click.option(
"-g",
"--guild",
metavar="VERSION_OR_PATH",
help=(
"Version of Guild AI to use for the environment. The activate "
"version of Guild is installed by default. This value may "
"alternatively be a path to a Guild wheel distribution. Implies "
"`--venv`."
),
shell_complete=_ac_guild_version_or_path,
)
@click.option(
"-s",
"--system-site-packages",
is_flag=True,
help="Give environment access to system site packages. Implies `--venv`.",
)
@click.option(
"-H",
"--no-isolate",
is_flag=True,
help=(
"Use current Guild home for the environment. Ignored if `--guild-home` "
"is specified."
),
)
@click.option(
"-h",
"--guild-home",
metavar="PATH",
help=(
"Alternative Guild home location for with the environment. "
"By default, Guild home is '.guild' in `DIR`."
),
shell_complete=_ac_guild_home,
)
@click.option(
"-r",
"--requirements",
metavar="REQ",
multiple=True,
help=(
"Install required package or packages defined in a file. May be "
"used multiple times. Implies `--venv`."
),
shell_complete=METHOD_NAME,
)
@click.option(
"-P",
"--path",
metavar="DIR",
multiple=True,
help="Include DIR as a Python path in the environment. Implies `--venv`.",
shell_complete=_ac_dir,
)
@click.option(
"--no-reqs",
is_flag=True,
help=(
"Don't install from requirements.txt or guild.yml in environment "
"parent directory. Implies `--venv`."
),
)
@click.option(
"-l",
"--isolate-resources",
is_flag=True,
help="Use a local cache when initializing an environment.",
)
@click.option(
"-y",
"--yes",
is_flag=True,
help="Initialize a Guild environment without prompting.",
)
@click.option(
"--no-progress",
is_flag=True,
help=(
"Don't show progress when installing environment packages. Ignored "
"if a virtual environment is not created."
),
)
@click.option(
"--pre",
"pre_release",
is_flag=True,
help="Install pre-release versions of applicable packages. Implies `--venv`.",
)
@click_util.use_args
def init(args):
"""Initialize a Guild environment.
By default, creates `.guild` under `DIR`, or the current directory
if `DIR` is omitted.
NOTE: As of 0.9, this command does NOT create a virtual directory
in `DIR`. To enable pre-0.9 behavior, specify `--venv` along with
any of the applicable virtual environment options. We recommend
creating a virtual environment using standard tools rather than
using this command. Backward compatibility will be maintained via
the `--venv` option.
Options that are associated with virtual environments are noted as
such below.
### Resource Cache
By default resources are cached and shared at the user level in
`~/.guild/cache/resources` so that resources downloaded from one
environment are available to other environments. You can modify
this behavior to have all resources downloaded local to the
environment by specifying `--local-resource-cache`.
### Python Interpreter (virtual environments only)
Use `--python` to specify the Python interpreter to use within the
generated virtual environment. By default, the default Python
interpreter for `virtualenv` is used unless `python` is explicitly
listed as a requirement. If `no-venv` is specified, `--python` is
ignored.
### Requirements (virtual environments only)
By default, any required packages listed under packages.requires
in `guild.yml` in the environment parent directory are installed
into the environment. Use `--no-reqs` to suppress this behavior.
Additionally, packages defined in `requirements.txt` in the
environment parent directory will be installed. Use `--no-reqs` to
suppress this behavior.
Note that packages defined in `guild.yml` use Guild package names
while packages defined in `requirements.txt` use PyPI package
names.
For information on requirements files, see:
https://bit.ly/guild-help-req-files
You may explicitly specify requirements file using `-r` or
`--requirement`. If `-r, --requirement` is specified, Guild will
not automatically install packages in `requirements.txt` -- that
file must be specified explicitly in the command.
### Guild AI Version (virtual environments only)
By default `init` installs the active version of Guild AI in the
initialized environment. To install a different version, or to
install a Guild wheel distribution file use the `--guild` option.
"""
from . import init_impl
init_impl.main(args) | null |
775 | """ testing models """
from io import BytesIO
import pathlib
import pytest
from dateutil.parser import parse
from PIL import Image
from django.core.files.base import ContentFile
from django.test import TestCase
from django.utils import timezone
from bookwyrm import models, settings
from bookwyrm.models.book import isbn_10_to_13, isbn_13_to_10
from bookwyrm.settings import ENABLE_THUMBNAIL_GENERATION
class Book(TestCase):
"""not too much going on in the books model but here we are"""
def setUp(self):
"""we'll need some books"""
self.work = models.Work.objects.create(
title="Example Work", remote_id="https://example.com/book/1"
)
self.first_edition = models.Edition.objects.create(
title="Example Edition", parent_work=self.work
)
self.second_edition = models.Edition.objects.create(
title="Another Example Edition",
parent_work=self.work,
)
def test_remote_id(self):
"""fanciness with remote/origin ids"""
remote_id = f"https://{settings.DOMAIN}/book/{self.work.id}"
self.assertEqual(self.work.get_remote_id(), remote_id)
self.assertEqual(self.work.remote_id, remote_id)
def test_generated_links(self):
"""links produced from identifiers"""
book = models.Edition.objects.create(
title="ExEd",
parent_work=self.work,
openlibrary_key="OL123M",
inventaire_id="isbn:123",
)
self.assertEqual(book.openlibrary_link, "https://openlibrary.org/books/OL123M")
self.assertEqual(book.inventaire_link, "https://inventaire.io/entity/isbn:123")
def test_create_book_invalid(self):
"""you shouldn't be able to create Books (only editions and works)"""
self.assertRaises(ValueError, models.Book.objects.create, title="Invalid Book")
def test_isbn_10_to_13(self):
"""checksums and so on"""
isbn_10 = "178816167X"
isbn_13 = isbn_10_to_13(isbn_10)
self.assertEqual(isbn_13, "9781788161671")
isbn_10 = "1-788-16167-X"
isbn_13 = isbn_10_to_13(isbn_10)
self.assertEqual(isbn_13, "9781788161671")
def test_isbn_13_to_10(self):
"""checksums and so on"""
isbn_13 = "9781788161671"
isbn_10 = isbn_13_to_10(isbn_13)
self.assertEqual(isbn_10, "178816167X")
isbn_13 = "978-1788-16167-1"
isbn_10 = isbn_13_to_10(isbn_13)
self.assertEqual(isbn_10, "178816167X")
def test_get_edition_info(self):
"""text slug about an edition"""
book = models.Edition.objects.create(title="Test Edition")
self.assertEqual(book.edition_info, "")
book.physical_format = "worm"
book.save()
self.assertEqual(book.edition_info, "worm")
book.languages = ["English"]
book.save()
self.assertEqual(book.edition_info, "worm")
book.languages = ["Glorbish", "English"]
book.save()
self.assertEqual(book.edition_info, "worm, Glorbish language")
book.published_date = timezone.make_aware(parse("2020"))
book.save()
self.assertEqual(book.edition_info, "worm, Glorbish language, 2020")
self.assertEqual(book.alt_text, "Test Edition (worm, Glorbish language, 2020)")
def test_get_rank(self):
"""sets the data quality index for the book"""
# basic rank
self.assertEqual(self.first_edition.edition_rank, 0)
self.first_edition.description = "hi"
self.first_edition.save()
self.assertEqual(self.first_edition.edition_rank, 1)
@pytest.mark.skipif(
not ENABLE_THUMBNAIL_GENERATION,
reason="Thumbnail generation disabled in settings",
)
def test_thumbnail_fields(self):
"""Just hit them"""
image_file = pathlib.Path(__file__).parent.joinpath(
"../../static/images/default_avi.jpg"
)
image = Image.open(image_file)
output = BytesIO()
image.save(output, format=image.format)
book = models.Edition.objects.create(title="hello")
book.cover.save("test.jpg", ContentFile(output.getvalue()))
self.assertIsNotNone(book.cover_bw_book_xsmall_webp.url)
self.assertIsNotNone(book.cover_bw_book_xsmall_jpg.url)
self.assertIsNotNone(book.cover_bw_book_small_webp.url)
self.assertIsNotNone(book.cover_bw_book_small_jpg.url)
self.assertIsNotNone(book.cover_bw_book_medium_webp.url)
self.assertIsNotNone(book.cover_bw_book_medium_jpg.url)
self.assertIsNotNone(book.cover_bw_book_large_webp.url)
self.assertIsNotNone(book.cover_bw_book_large_jpg.url)
self.assertIsNotNone(book.cover_bw_book_xlarge_webp.url)
self.assertIsNotNone(book.cover_bw_book_xlarge_jpg.url)
self.assertIsNotNone(book.cover_bw_book_xxlarge_webp.url)
self.assertIsNotNone(book.cover_bw_book_xxlarge_jpg.url)
def test_populate_sort_title(self):
"""The sort title should remove the initial article on save"""
books = (
models.Edition.objects.create(
title=f"{article} Test Edition", languages=[langauge]
)
for langauge, articles in settings.LANGUAGE_ARTICLES.items()
for article in articles
)
self.assertTrue(all(book.sort_title == "test edition" for book in books))
def METHOD_NAME(self):
"""Fix editions with no works"""
edition = models.Edition.objects.create(title="test")
edition.authors.set([models.Author.objects.create(name="Author Name")])
self.assertIsNone(edition.parent_work)
edition.repair()
edition.refresh_from_db()
self.assertEqual(edition.parent_work.title, "test")
self.assertEqual(edition.parent_work.authors.count(), 1) | null |
776 | from django.contrib.auth import get_user_model
from django.urls import reverse
from nextcloudappstore.api.v1.tests.api import ApiTest
from nextcloudappstore.core.models import App, AppRelease
class AppTest(ApiTest):
def test_apps(self):
url = reverse("api:v1:app", kwargs={"version": "9.1.0"})
response = self.api_client.get(url)
self.assertEqual(200, response.status_code)
def test_delete(self):
App.objects.create(id="news", owner=self.user)
url = reverse("api:v1:app-delete", kwargs={"pk": "news"})
self._login_token()
response = self.api_client.delete(url)
self.assertEqual(204, response.status_code)
with self.assertRaises(App.DoesNotExist):
App.objects.get(id="news")
def test_delete_unauthenticated(self):
App.objects.create(id="news", owner=self.user)
url = reverse("api:v1:app-delete", kwargs={"pk": "news"})
response = self.api_client.delete(url)
self.assertEqual(401, response.status_code)
def test_delete_unauthorized(self):
owner = get_user_model().objects.create_user(username="owner", password="owner", email="[email protected]")
App.objects.create(id="news", owner=owner)
url = reverse("api:v1:app-delete", kwargs={"pk": "news"})
self._login()
response = self.api_client.delete(url)
self.assertEqual(403, response.status_code)
def test_delete_co_maintainer(self):
owner = get_user_model().objects.create_user(username="owner", password="owner", email="[email protected]")
app = App.objects.create(id="news", owner=owner)
app.co_maintainers.add(self.user)
app.save()
url = reverse("api:v1:app-delete", kwargs={"pk": "news"})
self._login_token()
response = self.api_client.delete(url)
self.assertEqual(403, response.status_code)
def test_delete_not_found(self):
self.api_client.login(username="test", password="test")
url = reverse("api:v1:app-delete", kwargs={"pk": "news"})
self._login()
response = self.api_client.delete(url)
self.assertEqual(404, response.status_code)
def test_releases_platform_min(self):
app = App.objects.create(pk="news", owner=self.user)
AppRelease.objects.create(app=app, version="10.1", platform_version_spec=">=9.1.1")
url = reverse("api:v1:app", kwargs={"version": "9.1.0"})
response = self.api_client.get(url)
self.assertEqual(200, response.status_code)
self.assertEqual(0, len(response.data))
def test_releases_platform_min_max(self):
app = App.objects.create(pk="news", owner=self.user)
AppRelease.objects.create(app=app, version="10.1", platform_version_spec=">=9.1.1,<9.1.2")
url = reverse("api:v1:app", kwargs={"version": "9.1.2"})
response = self.api_client.get(url)
self.assertEqual(200, response.status_code)
self.assertEqual(0, len(response.data))
def test_releases_platform_max(self):
app = App.objects.create(pk="news", owner=self.user)
AppRelease.objects.create(app=app, version="10.1", platform_version_spec="<9.1.2")
url = reverse("api:v1:app", kwargs={"version": "9.1.2"})
response = self.api_client.get(url)
self.assertEqual(200, response.status_code)
self.assertEqual(0, len(response.data))
def METHOD_NAME(self):
app = App.objects.create(pk="news", owner=self.user)
AppRelease.objects.create(app=app, version="10.1", platform_version_spec="<9.2.0")
url = reverse("api:v1:app", kwargs={"version": "9.1.2"})
response = self.api_client.get(url)
self.assertEqual(200, response.status_code)
self.assertEqual(1, len(response.data))
def test_releases_platform_ok(self):
app = App.objects.create(pk="news", owner=self.user)
AppRelease.objects.create(app=app, version="10.1", platform_version_spec=">=9.1.1,<9.1.2")
url = reverse("api:v1:app", kwargs={"version": "9.1.1"})
response = self.api_client.get(url)
self.assertEqual(200, response.status_code)
self.assertEqual(1, len(response.data))
def tearDown(self):
self.user.delete() | null |
777 | # Copyright (c) 2016, 2020-2021 Arm Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2008-2009 The Hewlett-Packard Development Company
# Copyright (c) 2004-2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import sys
from functools import wraps
from . import convert
from .attrdict import attrdict, multiattrdict, optiondict
from .multidict import multidict
# panic() should be called when something happens that should never
# ever happen regardless of what the user does (i.e., an acutal m5
# bug).
def panic(fmt, *args):
print("panic:", fmt % args, file=sys.stderr)
sys.exit(1)
# fatal() should be called when the simulation cannot continue due to
# some condition that is the user's fault (bad configuration, invalid
# arguments, etc.) and not a simulator bug.
def fatal(fmt, *args):
print("fatal:", fmt % args, file=sys.stderr)
sys.exit(1)
# warn() should be called when the user should be warned about some condition
# that may or may not be the user's fault, but that they should be made aware
# of as it may affect the simulation or results.
def METHOD_NAME(fmt, *args):
print("warn:", fmt % args, file=sys.stderr)
# inform() should be called when the user should be informed about some
# condition that they may be interested in.
def inform(fmt, *args):
print("info:", fmt % args, file=sys.stdout)
def callOnce(func):
"""Decorator that enables to run a given function only once. Subsequent
calls are discarded."""
@wraps(func)
def wrapper(*args, **kwargs):
if not wrapper.has_run:
wrapper.has_run = True
return func(*args, **kwargs)
wrapper.has_run = False
return wrapper
def deprecated(replacement=None, logger=METHOD_NAME):
"""This decorator warns the user about a deprecated function."""
def decorator(func):
@callOnce
def notifyDeprecation():
try:
func_name = lambda f: f.__module__ + "." + f.__qualname__
message = f"Function {func_name(func)} is deprecated."
if replacement:
message += f" Prefer {func_name(replacement)} instead."
except AttributeError:
message = f"Function {func} is deprecated."
if replacement:
message += f" Prefer {replacement} instead."
logger(message)
@wraps(func)
def wrapper(*args, **kwargs):
notifyDeprecation()
return func(*args, **kwargs)
return wrapper
return decorator
class Singleton(type):
def __call__(cls, *args, **kwargs):
if hasattr(cls, "_instance"):
return cls._instance
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
def addToPath(path):
"""Prepend given directory to system module search path. We may not
need this anymore if we can structure our config library more like a
Python package."""
# if it's a relative path and we know what directory the current
# python script is in, make the path relative to that directory.
if not os.path.isabs(path) and sys.path[0]:
path = os.path.join(sys.path[0], path)
path = os.path.realpath(path)
# sys.path[0] should always refer to the current script's directory,
# so place the new dir right after that.
sys.path.insert(1, path)
def repoPath():
"""
Return the abspath of the gem5 repository.
This is relying on the following structure:
<gem5-repo>/build/<ISA>/gem5.[opt,debug...]
"""
return os.path.dirname(os.path.dirname(os.path.dirname(sys.executable)))
# Apply method to object.
# applyMethod(obj, 'meth', <args>) is equivalent to obj.meth(<args>)
def applyMethod(obj, meth, *args, **kwargs):
return getattr(obj, meth)(*args, **kwargs)
# If the first argument is an (non-sequence) object, apply the named
# method with the given arguments. If the first argument is a
# sequence, apply the method to each element of the sequence (a la
# 'map').
def applyOrMap(objOrSeq, meth, *args, **kwargs):
if not isinstance(objOrSeq, (list, tuple)):
return applyMethod(objOrSeq, meth, *args, **kwargs)
else:
return [applyMethod(o, meth, *args, **kwargs) for o in objOrSeq]
def crossproduct(items):
if len(items) == 1:
for i in items[0]:
yield (i,)
else:
for i in items[0]:
for j in crossproduct(items[1:]):
yield (i,) + j
def flatten(items):
while items:
item = items.pop(0)
if isinstance(item, (list, tuple)):
items[0:0] = item
else:
yield item
# force scalars to one-element lists for uniformity
def makeList(objOrList):
if isinstance(objOrList, list):
return objOrList
return [objOrList]
def printList(items, indent=4):
line = " " * indent
for i, item in enumerate(items):
if len(line) + len(item) > 76:
print(line)
line = " " * indent
if i < len(items) - 1:
line += f"{item}, "
else:
line += item
print(line)
def isInteractive():
"""Check if the simulator is run interactively or in a batch environment"""
return sys.__stdin__.isatty() | null |
778 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksmartag.endpoint import endpoint_data
class ModifyQosCarRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Smartag', '2018-03-13', 'ModifyQosCar','smartag')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_MinBandwidthAbs(self): # Integer
return self.get_query_params().get('MinBandwidthAbs')
def set_MinBandwidthAbs(self, MinBandwidthAbs): # Integer
self.add_query_param('MinBandwidthAbs', MinBandwidthAbs)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_PercentSourceType(self): # String
return self.get_query_params().get('PercentSourceType')
def set_PercentSourceType(self, PercentSourceType): # String
self.add_query_param('PercentSourceType', PercentSourceType)
def get_QosId(self): # String
return self.get_query_params().get('QosId')
def set_QosId(self, QosId): # String
self.add_query_param('QosId', QosId)
def get_MaxBandwidthAbs(self): # Integer
return self.get_query_params().get('MaxBandwidthAbs')
def set_MaxBandwidthAbs(self, MaxBandwidthAbs): # Integer
self.add_query_param('MaxBandwidthAbs', MaxBandwidthAbs)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_MaxBandwidthPercent(self): # Integer
return self.get_query_params().get('MaxBandwidthPercent')
def set_MaxBandwidthPercent(self, MaxBandwidthPercent): # Integer
self.add_query_param('MaxBandwidthPercent', MaxBandwidthPercent)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def METHOD_NAME(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_QosCarId(self): # String
return self.get_query_params().get('QosCarId')
def set_QosCarId(self, QosCarId): # String
self.add_query_param('QosCarId', QosCarId)
def get_Priority(self): # Integer
return self.get_query_params().get('Priority')
def set_Priority(self, Priority): # Integer
self.add_query_param('Priority', Priority)
def get_MinBandwidthPercent(self): # Integer
return self.get_query_params().get('MinBandwidthPercent')
def set_MinBandwidthPercent(self, MinBandwidthPercent): # Integer
self.add_query_param('MinBandwidthPercent', MinBandwidthPercent)
def get_LimitType(self): # String
return self.get_query_params().get('LimitType')
def set_LimitType(self, LimitType): # String
self.add_query_param('LimitType', LimitType)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name) | null |
779 | import os
import pytest
import mock
import shutil
import tempfile
import xml
from future.moves.urllib.parse import urljoin
from scripts import generate_sitemap
from osf_tests.factories import (AuthUserFactory, ProjectFactory, RegistrationFactory, CollectionFactory,
PreprintFactory, PreprintProviderFactory, EmbargoFactory, UnconfirmedUserFactory)
from website import settings
def get_all_sitemap_urls():
# Create temporary directory for the sitemaps to be generated
generate_sitemap.main()
# Parse the generated XML sitemap file
with open(os.path.join(settings.STATIC_FOLDER, 'sitemaps/sitemap_0.xml')) as f:
tree = xml.etree.ElementTree.parse(f)
shutil.rmtree(settings.STATIC_FOLDER)
# Get all the urls in the sitemap
# Note: namespace was defined in the XML file, therefore necessary to include in tag
namespace = '{http://www.sitemaps.org/schemas/sitemap/0.9}'
urls = [element.text for element in tree.iter(namespace + 'loc')]
return urls
@pytest.mark.django_db
class TestGenerateSitemap:
@pytest.fixture(autouse=True)
def user_admin_project_public(self):
return AuthUserFactory()
@pytest.fixture(autouse=True)
def user_unconfirmed(self):
return UnconfirmedUserFactory()
@pytest.fixture(autouse=True)
def user_admin_project_private(self):
return AuthUserFactory()
@pytest.fixture(autouse=True)
def project_registration_public(self, user_admin_project_public):
return ProjectFactory(creator=user_admin_project_public, is_public=True)
@pytest.fixture(autouse=True)
def project_preprint_osf(self, user_admin_project_public):
return ProjectFactory(creator=user_admin_project_public, is_public=True)
@pytest.fixture(autouse=True)
def project_preprint_other(self, user_admin_project_public):
return ProjectFactory(creator=user_admin_project_public, is_public=True)
@pytest.fixture(autouse=True)
def project_private(self, user_admin_project_private):
return ProjectFactory(creator=user_admin_project_private, is_public=False)
@pytest.fixture(autouse=True)
def METHOD_NAME(self, user_admin_project_public):
return ProjectFactory(creator=user_admin_project_public, is_deleted=True)
@pytest.fixture(autouse=True)
def registration_active(self, user_admin_project_public, project_registration_public):
return RegistrationFactory(project=project_registration_public,
creator=user_admin_project_public,
is_public=True)
@pytest.fixture(autouse=True)
def registration_embargoed(self, user_admin_project_public, project_registration_public):
return RegistrationFactory(project=project_registration_public,
creator=user_admin_project_public,
embargo=EmbargoFactory(user=user_admin_project_public))
@pytest.fixture(autouse=True)
def collection(self, user_admin_project_public):
return CollectionFactory(creator=user_admin_project_public)
@pytest.fixture(autouse=True)
def provider_osf(self):
# Note: at least a provider whose _id == 'osf' have to exist for the script to work
return PreprintProviderFactory(_id='osf', name='osfprovider')
@pytest.fixture(autouse=True)
def provider_other(self):
return PreprintProviderFactory(_id='adl', name='anotherprovider')
@pytest.fixture(autouse=True)
def preprint_osf(self, project_preprint_osf, user_admin_project_public, provider_osf):
return PreprintFactory(project=project_preprint_osf,
creator=user_admin_project_public,
provider=provider_osf)
@pytest.fixture(autouse=True)
def preprint_other(self, project_preprint_other, user_admin_project_public, provider_other):
return PreprintFactory(project=project_preprint_other,
creator=user_admin_project_public,
provider=provider_other)
@pytest.fixture(autouse=True)
def all_included_links(self, user_admin_project_public, user_admin_project_private, project_registration_public,
project_preprint_osf, project_preprint_other,
registration_active, provider_other, preprint_osf,
preprint_other):
# Return urls of all fixtures
urls_to_include = [item['loc'] for item in settings.SITEMAP_STATIC_URLS]
urls_to_include.extend([
user_admin_project_public.url,
user_admin_project_private.url,
project_registration_public.url,
project_preprint_osf.url,
project_preprint_other.url,
registration_active.url,
'/preprints/{}/'.format(preprint_osf._id),
'/preprints/{}/{}/'.format(provider_other._id, preprint_other._id),
'/{}/download/?format=pdf'.format(preprint_osf._id),
'/{}/download/?format=pdf'.format(preprint_other._id)
])
urls_to_include = [urljoin(settings.DOMAIN, item) for item in urls_to_include]
return urls_to_include
@pytest.fixture()
def create_tmp_directory(self):
return tempfile.mkdtemp()
def test_all_links_included(self, all_included_links, create_tmp_directory):
with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory):
urls = get_all_sitemap_urls()
urls_to_include = all_included_links
assert len(urls_to_include) == len(urls)
assert set(urls_to_include) == set(urls)
def test_unconfirmed_user_not_included(self, create_tmp_directory, user_unconfirmed):
with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory):
urls = get_all_sitemap_urls()
assert urljoin(settings.DOMAIN, user_unconfirmed.url) not in urls
def test_collection_link_not_included(self, collection, create_tmp_directory):
with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory):
urls = get_all_sitemap_urls()
assert urljoin(settings.DOMAIN, collection.url) not in urls
def test_private_project_link_not_included(self, project_private, create_tmp_directory):
with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory):
urls = get_all_sitemap_urls()
assert urljoin(settings.DOMAIN, project_private.url) not in urls
def test_embargoed_registration_link_not_included(self, registration_embargoed, create_tmp_directory):
with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory):
urls = get_all_sitemap_urls()
assert urljoin(settings.DOMAIN, registration_embargoed.url) not in urls
def test_deleted_project_link_not_included(self, METHOD_NAME, create_tmp_directory):
with mock.patch('website.settings.STATIC_FOLDER', create_tmp_directory):
urls = get_all_sitemap_urls()
assert urljoin(settings.DOMAIN, METHOD_NAME.url) not in urls | null |
780 | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import Optional
from pydantic import Extra, BaseModel, Field, StrictStr, constr, validator
from lightly.openapi_generated.swagger_client.models.datasource_config_base import DatasourceConfigBase
from lightly.openapi_generated.swagger_client.models.s3_region import S3Region
class DatasourceConfigS3(DatasourceConfigBase):
"""
DatasourceConfigS3
"""
full_path: StrictStr = Field(..., alias="fullPath", description="path includes the bucket name and the path within the bucket where you have stored your information")
s3_region: S3Region = Field(..., alias="s3Region")
s3_access_key_id: constr(strict=True, min_length=1) = Field(..., alias="s3AccessKeyId", description="The accessKeyId of the credential you are providing Lightly to use")
s3_secret_access_key: constr(strict=True, min_length=1) = Field(..., alias="s3SecretAccessKey", description="The secretAccessKey of the credential you are providing Lightly to use")
s3_server_side_encryption_kms_key: Optional[constr(strict=True, min_length=1)] = Field(None, alias="s3ServerSideEncryptionKMSKey", description="If set, Lightly Worker will automatically set the headers to use server side encryption https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html with this value as the appropriate KMS key arn. This will encrypt the files created by Lightly (crops, frames, thumbnails) in the S3 bucket. ")
__properties = ["id", "purpose", "type", "thumbSuffix", "fullPath", "s3Region", "s3AccessKeyId", "s3SecretAccessKey", "s3ServerSideEncryptionKMSKey"]
@validator('s3_server_side_encryption_kms_key')
def METHOD_NAME(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^arn:aws:kms:[a-zA-Z0-9-]*:[0-9]{12}:key.+$", value):
raise ValueError(r"must validate the regular expression /^arn:aws:kms:[a-zA-Z0-9-]*:[0-9]{12}:key.+$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> DatasourceConfigS3:
"""Create an instance of DatasourceConfigS3 from a JSON string"""
return cls.from_dict(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def from_dict(cls, obj: dict) -> DatasourceConfigS3:
"""Create an instance of DatasourceConfigS3 from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return DatasourceConfigS3.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in DatasourceConfigS3) in the input: " + str(obj))
_obj = DatasourceConfigS3.parse_obj({
"id": obj.get("id"),
"purpose": obj.get("purpose"),
"type": obj.get("type"),
"thumb_suffix": obj.get("thumbSuffix"),
"full_path": obj.get("fullPath"),
"s3_region": obj.get("s3Region"),
"s3_access_key_id": obj.get("s3AccessKeyId"),
"s3_secret_access_key": obj.get("s3SecretAccessKey"),
"s3_server_side_encryption_kms_key": obj.get("s3ServerSideEncryptionKMSKey")
})
return _obj
| null |
781 | # -*- coding: utf-8 -*-
import jieba
import numpy as np
import re
import os
import json
import sys
class Result:
def __init__(self, cid, number, title, url, dist, output):
self.id = cid
self.number = number
self.title = title
self.url = url
self.dist = dist
self.output = output
def __str__(self):
return json.dumps(dict(self), ensure_ascii=False)
def get_word_list(src_str,paddle_shut):
cut = sorted(clean_stopword(jieba.cut(src_str,use_paddle=paddle_shut)))
return (','.join(cut)).split(',')
def METHOD_NAME(list_word1,list_word2):
"""
:param s1: 句子1
:param s2: 句子2
:return: 返回句子的余弦相似度
"""
# 列出所有的词,取并集
key_word = list(set(list_word1 + list_word2))
# 给定形状和类型的用0填充的矩阵存储向量
word_vector1 = np.zeros(len(key_word))
word_vector2 = np.zeros(len(key_word))
# 计算词频
# 依次确定向量的每个位置的值
for i in range(len(key_word)):
# 遍历key_word中每个词在句子中的出现次数
for j in range(len(list_word1)):
if key_word[i] == list_word1[j]:
word_vector1[i] += 1
for k in range(len(list_word2)):
if key_word[i] == list_word2[k]:
word_vector2[i] += 1
# 输出向量
#print(word_vector1)
#print(word_vector2)
return word_vector1, word_vector2
def clean_stopword(word_cut):
cleanword = []
for word in word_cut:
word = word.lower()
if word not in stopword_list:
cleanword.append(word)
return cleanword
def cos_dist(vec1,vec2):
"""
:param vec1: 向量1
:param vec2: 向量2
:return: 返回两个向量的余弦相似度
"""
dist1=float(np.dot(vec1,vec2)/(np.linalg.norm(vec1)*np.linalg.norm(vec2)))
return dist1
def filter_html(html):
"""
:param html: html
:return: 返回去掉html的纯净文本
"""
dr = re.compile(r'<[^>]+>',re.S)
dd = dr.sub('',html).strip()
return dd
def pick_result(results):
"""
挑选出合适的结果
"""
compareIndex = 0
pick = []
for compare_item in results:
if compareIndex == len(compareMaxValueArray):
break
if len(pick) < compareCountArray[compareIndex]:
if compareMaxValueArray[compareIndex] < compare_item.dist:
continue
elif compareMinValueArray[compareIndex] < compare_item.dist and compare_item.dist <= compareMaxValueArray[compareIndex]:
pick.append(compare_item.__dict__)
if len(pick) >= compareCountArray[compareIndex]:
compareIndex = compareIndex + 1
else:
for i in range(compareCountArray[compareIndex] - len(pick)):
pick.append(None)
while True:
compareIndex = compareIndex + 1
if compareIndex == len(compareMaxValueArray):
break
if compareMinValueArray[compareIndex] < compare_item.dist and compare_item.dist <= compareMaxValueArray[compareIndex]:
pick.append(compare_item.__dict__)
if len(pick) >= compareCountArray[compareIndex]:
compareIndex = compareIndex + 1
break
return pick
stopword_list=[]
compareCountArray=[]
compareMinValueArray=[]
compareMaxValueArray=[]
if __name__ == '__main__':
log="------start------"
try:
json_path = os.path.join(os.getcwd(),"compare.json")
stopwords_path = os.path.join(os.getcwd(),"stopwords.txt");
result_path = os.path.join(os.getcwd(),"recommend.json")
if not os.path.exists(json_path):
log = log + " 未找到对比文件:" + json_path
quit(1)
else:
log = log + " 已找到对比文件!"
if not os.path.exists(stopwords_path):
log = log + " 未找到停用词文件:" + stopwords_path
quit(1)
else:
log = log + " 已找到停用词文件!"
with open(json_path,"r") as json_file:
obj = json.loads(json_file.read())
log = log + " 已读取对比文件!"
with open(stopwords_path,"r") as file:
stopword_list = file.read().splitlines()
log = log + " 已读取停用词文件!"
source = obj.get("Source")
compareCountArray = obj.get("PickCount")
compareMinValueArray = obj.get("PickMinValues")
compareMaxValueArray = obj.get("PickMaxValues")
log = log +" 源语句:" + source
compares = obj.get("Reference")
results=[]
use_paddle = False
use_paddle_env=os.environ.get("USE_PADDLE");
if use_paddle_env == "TRUE":
use_paddle = True
log = log + " 启用 Paddle 模式!"
src_list = get_word_list(source,use_paddle)
log = log +"源分词:\t"+'/'.join(src_list)
sys.stderr.write(log)
for compare_item in compares:
compare_str = compare_item.get("Title")
compare_list = get_word_list(compare_str, use_paddle)
vec1,vec2 = METHOD_NAME(src_list,compare_list)
dist = cos_dist(vec1,vec2)
results.append(Result(compare_item.get("Id"), compare_item.get("Number"), compare_str,compare_item.get("Url"), dist, "{:.2%}".format(dist)))
log = log + " 已完成相似度计算"
results.sort(key=lambda x: x.dist, reverse=True)
pick = pick_result(results)
with open(result_path, 'w') as file_handler:
file_handler.write(json.dumps(pick))
log = log + " 已将结果写入文件"
quit(0)
except Exception as e:
sys.stderr.write(str(e) + log)
quit(1)
| null |
782 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvs.endpoint import endpoint_data
class CreateDeviceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'vs', '2018-12-12', 'CreateDevice')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_GbId(self):
return self.get_query_params().get('GbId')
def set_GbId(self,GbId):
self.add_query_param('GbId',GbId)
def get_Latitude(self):
return self.get_query_params().get('Latitude')
def set_Latitude(self,Latitude):
self.add_query_param('Latitude',Latitude)
def get_Description(self):
return self.get_query_params().get('Description')
def set_Description(self,Description):
self.add_query_param('Description',Description)
def get_Type(self):
return self.get_query_params().get('Type')
def set_Type(self,Type):
self.add_query_param('Type',Type)
def get_AutoStart(self):
return self.get_query_params().get('AutoStart')
def set_AutoStart(self,AutoStart):
self.add_query_param('AutoStart',AutoStart)
def get_ParentId(self):
return self.get_query_params().get('ParentId')
def set_ParentId(self,ParentId):
self.add_query_param('ParentId',ParentId)
def get_Password(self):
return self.get_query_params().get('Password')
def set_Password(self,Password):
self.add_query_param('Password',Password)
def get_Vendor(self):
return self.get_query_params().get('Vendor')
def set_Vendor(self,Vendor):
self.add_query_param('Vendor',Vendor)
def get_AlarmMethod(self):
return self.get_query_params().get('AlarmMethod')
def set_AlarmMethod(self,AlarmMethod):
self.add_query_param('AlarmMethod',AlarmMethod)
def get_DirectoryId(self):
return self.get_query_params().get('DirectoryId')
def set_DirectoryId(self,DirectoryId):
self.add_query_param('DirectoryId',DirectoryId)
def get_Longitude(self):
return self.get_query_params().get('Longitude')
def set_Longitude(self,Longitude):
self.add_query_param('Longitude',Longitude)
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_Ip(self):
return self.get_query_params().get('Ip')
def set_Ip(self,Ip):
self.add_query_param('Ip',Ip)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_Params(self):
return self.get_query_params().get('Params')
def set_Params(self,Params):
self.add_query_param('Params',Params)
def get_Url(self):
return self.get_query_params().get('Url')
def set_Url(self,Url):
self.add_query_param('Url',Url)
def get_Port(self):
return self.get_query_params().get('Port')
def set_Port(self,Port):
self.add_query_param('Port',Port)
def get_Name(self):
return self.get_query_params().get('Name')
def set_Name(self,Name):
self.add_query_param('Name',Name)
def get_PosInterval(self):
return self.get_query_params().get('PosInterval')
def set_PosInterval(self,PosInterval):
self.add_query_param('PosInterval',PosInterval)
def get_Dsn(self):
return self.get_query_params().get('Dsn')
def set_Dsn(self,Dsn):
self.add_query_param('Dsn',Dsn)
def get_Username(self):
return self.get_query_params().get('Username')
def set_Username(self,Username):
self.add_query_param('Username',Username)
def METHOD_NAME(self):
return self.get_query_params().get('AutoPos')
def set_AutoPos(self,AutoPos):
self.add_query_param('AutoPos',AutoPos | null |
783 | """EmceeSampler class."""
from __future__ import annotations
import logging
from typing import List, Union
import numpy as np
from ..problem import Problem
from ..result import McmcPtResult
from ..startpoint import UniformStartpoints, uniform
from .sampler import Sampler, SamplerImportError
logger = logging.getLogger(__name__)
class EmceeSampler(Sampler):
"""Use emcee for sampling.
Wrapper around https://emcee.readthedocs.io/en/stable/, see there for
details.
"""
def __init__(
self,
nwalkers: int = 1,
sampler_args: dict = None,
run_args: dict = None,
):
"""
Initialize sampler.
Parameters
----------
nwalkers:
The number of walkers in the ensemble.
sampler_args:
Further keyword arguments that are passed on to
``emcee.EnsembleSampler.__init__``.
run_args:
Further keyword arguments that are passed on to
``emcee.EnsembleSampler.run_mcmc``.
"""
# check dependencies
try:
import emcee
except ImportError:
raise SamplerImportError("emcee")
super().__init__()
self.nwalkers: int = nwalkers
if sampler_args is None:
sampler_args = {}
self.sampler_args: dict = sampler_args
if run_args is None:
run_args = {}
self.run_args: dict = run_args
# set in initialize
self.problem: Union[Problem, None] = None
self.sampler: Union[emcee.EnsembleSampler, None] = None
self.state: Union[emcee.State, None] = None
def get_epsilon_ball_initial_state(
self,
center: np.ndarray,
problem: Problem,
epsilon: float = 1e-3,
):
"""Get walker initial positions as samples from an epsilon ball.
The ball is scaled in each direction according to the magnitude of the
center in that direction.
It is assumed that, because vectors are generated near a good point,
all generated vectors are evaluable, so evaluability is not checked.
Points that are generated outside the problem bounds will get shifted
to lie on the edge of the problem bounds.
Parameters
----------
center:
The center of the epsilon ball. The dimension should match the full
dimension of the pyPESTO problem. This will be returned as the
first position.
problem:
The pyPESTO problem.
epsilon:
The relative radius of the ball. e.g., if `epsilon=0.5`
and the center of the first dimension is at 100, then the upper
and lower bounds of the epsilon ball in the first dimension will
be 150 and 50, respectively.
"""
# Epsilon ball
lb = center * (1 - epsilon)
ub = center * (1 + epsilon)
# Adjust bounds to satisfy problem bounds
lb[lb < problem.lb] = problem.lb[lb < problem.lb]
ub[ub > problem.ub] = problem.ub[ub > problem.ub]
# Sample initial positions
initial_state_after_first = uniform(
n_starts=self.nwalkers - 1,
lb=lb,
ub=ub,
)
# Include `center` in initial positions
initial_state = np.row_stack(
(
center,
initial_state_after_first,
)
)
return initial_state
def initialize(
self,
problem: Problem,
x0: Union[np.ndarray, List[np.ndarray]],
) -> None:
"""Initialize the sampler.
It is recommended to initialize walkers
Parameters
----------
x0:
The "a priori preferred position". e.g., an optimized parameter
vector. https://emcee.readthedocs.io/en/stable/user/faq/
The position of the first walker will be this, the remaining
walkers will be assigned positions uniformly in a smaller ball
around this vector.
Alternatively, a set of vectors can be provided, which will be used
to initialize walkers. In this case, any remaining walkers will be
initialized at points sampled uniformly within the problem bounds.
"""
import emcee
self.problem = problem
# extract for pickling efficiency
objective = self.problem.objective
lb = self.problem.lb
ub = self.problem.ub
# parameter dimenstion
ndim = len(self.problem.x_free_indices)
def log_prob(x):
"""Log-probability density function."""
# check if parameter lies within bounds
if any(x < lb) or any(x > ub):
return -np.inf
# invert sign
return -1.0 * objective(x)
# initialize sampler
self.sampler = emcee.EnsembleSampler(
nwalkers=self.nwalkers,
ndim=ndim,
log_prob_fn=log_prob,
**self.sampler_args,
)
# assign startpoints
if self.state is None:
if x0.ndim > 1 and len(x0.shape[0]) > 1:
logger.warning(
"More than a single vector was provided to initialize the "
"walker positions. If these vectors do not exist in a "
"small ball around a high-probability position (e.g. "
"optimized vector) then sampling may be inefficient (see "
"emcee FAQ: "
"https://emcee.readthedocs.io/en/stable/user/faq/ )."
)
# extract x0
x0 = np.asarray(x0)
if x0.ndim == 1:
x0 = [x0]
x0 = np.array([problem.get_full_vector(x) for x in x0])
x_guesses_full0 = problem.x_guesses_full
# add x0 to guesses
problem.set_x_guesses(
np.row_stack(
(
x0,
problem.x_guesses_full,
)
)
)
# sample start points
initial_state = UniformStartpoints(
use_guesses=True,
check_fval=True,
check_grad=False,
)(
n_starts=self.nwalkers,
problem=problem,
)
# restore original guesses
problem.set_x_guesses(x_guesses_full0)
else:
initial_state = self.get_epsilon_ball_initial_state(
center=x0,
problem=problem,
)
self.state = initial_state
def sample(self, n_samples: int, beta: float = 1.0) -> None:
"""Return the most recent sample state."""
self.state = self.sampler.run_mcmc(
initial_state=self.state,
nsteps=n_samples,
**self.run_args,
)
def METHOD_NAME(self) -> McmcPtResult:
"""Get the samples into the fitting pypesto format."""
# all walkers are concatenated, yielding a flat array
trace_x = np.array([self.sampler.get_chain(flat=True)])
trace_neglogpost = np.array([-self.sampler.get_log_prob(flat=True)])
# the sampler does not know priors
trace_neglogprior = np.full(trace_neglogpost.shape, np.nan)
# the walkers all run on temperature 1
betas = np.array([1.0])
result = McmcPtResult(
trace_x=trace_x,
trace_neglogpost=trace_neglogpost,
trace_neglogprior=trace_neglogprior,
betas=betas,
)
return result | null |
784 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkunimkt.endpoint import endpoint_data
class SaveCpmTradeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'UniMkt', '2018-12-12', 'SaveCpmTrade')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_TaskType(self): # String
return self.get_query_params().get('TaskType')
def set_TaskType(self, TaskType): # String
self.add_query_param('TaskType', TaskType)
def get_RealCostAmount(self): # String
return self.get_query_params().get('RealCostAmount')
def set_RealCostAmount(self, RealCostAmount): # String
self.add_query_param('RealCostAmount', RealCostAmount)
def get_Sex(self): # String
return self.get_query_params().get('Sex')
def set_Sex(self, Sex): # String
self.add_query_param('Sex', Sex)
def get_CostDetail(self): # String
return self.get_query_params().get('CostDetail')
def set_CostDetail(self, CostDetail): # String
self.add_query_param('CostDetail', CostDetail)
def get_TaskTag(self): # String
return self.get_query_params().get('TaskTag')
def set_TaskTag(self, TaskTag): # String
self.add_query_param('TaskTag', TaskTag)
def get_BizType(self): # String
return self.get_query_params().get('BizType')
def set_BizType(self, BizType): # String
self.add_query_param('BizType', BizType)
def get_DeviceCode(self): # String
return self.get_query_params().get('DeviceCode')
def set_DeviceCode(self, DeviceCode): # String
self.add_query_param('DeviceCode', DeviceCode)
def get_V(self): # String
return self.get_query_params().get('V')
def set_V(self, V): # String
self.add_query_param('V', V)
def get_ApplyPrice(self): # String
return self.get_query_params().get('ApplyPrice')
def set_ApplyPrice(self, ApplyPrice): # String
self.add_query_param('ApplyPrice', ApplyPrice)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType)
def get_HoldId(self): # String
return self.get_query_params().get('HoldId')
def set_HoldId(self, HoldId): # String
self.add_query_param('HoldId', HoldId)
def get_ExtendString(self): # String
return self.get_query_params().get('ExtendString')
def set_ExtendString(self, ExtendString): # String
self.add_query_param('ExtendString', ExtendString)
def get_ChannelId(self): # String
return self.get_query_params().get('ChannelId')
def set_ChannelId(self, ChannelId): # String
self.add_query_param('ChannelId', ChannelId)
def get_Age(self): # Integer
return self.get_query_params().get('Age')
def set_Age(self, Age): # Integer
self.add_query_param('Age', Age)
def get_TaskId(self): # String
return self.get_query_params().get('TaskId')
def set_TaskId(self, TaskId): # String
self.add_query_param('TaskId', TaskId)
def get_AdvertType(self): # String
return self.get_query_params().get('AdvertType')
def set_AdvertType(self, AdvertType): # String
self.add_query_param('AdvertType', AdvertType)
def get_OuterCode(self): # String
return self.get_query_params().get('OuterCode')
def set_OuterCode(self, OuterCode): # String
self.add_query_param('OuterCode', OuterCode)
def METHOD_NAME(self): # Long
return self.get_query_params().get('TradeTime')
def set_TradeTime(self, TradeTime): # Long
self.add_query_param('TradeTime', TradeTime) | null |
785 | # Copyright 2017-2023 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from datetime import datetime, timedelta
from mock import MagicMock, Mock
from pipeline.hpc.event import AvailableInstanceEvent, InsufficientInstanceEvent, FailingInstanceEvent
from pipeline.hpc.instance.avail import InstanceAvailabilityManager
from pipeline.hpc.instance.provider import Instance
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s [%(threadName)s] [%(levelname)s] %(message)s')
unavailability_delay = 3600
unavailability_count_insufficient = 1
unavailability_count_failure = 10
started = datetime(2018, 12, 21, 11, 00, 00)
stopped = datetime(2018, 12, 21, 11, 5, 00)
run_id = 12345
worker_name = 'pipeline-12345'
price_type = 'spot'
inner_instance_provider = Mock()
clock = Mock()
event_manager = Mock()
availability_manager = InstanceAvailabilityManager(event_manager=event_manager, clock=clock,
unavail_delay=unavailability_delay,
unavail_count_insufficient=unavailability_count_insufficient,
unavail_count_failure=unavailability_count_failure)
instance_2cpu = Instance(name='m5.large', price_type=price_type, cpu=2, mem=8, gpu=0)
instance_4cpu = Instance(name='m5.xlarge', price_type=price_type, cpu=4, mem=16, gpu=0)
instance_8cpu = Instance(name='m5.2xlarge', price_type=price_type, cpu=8, mem=32, gpu=0)
def setup_function():
inner_instance_provider.provide = MagicMock(return_value=[
instance_2cpu,
instance_4cpu,
instance_8cpu])
clock.now = MagicMock(return_value=stopped + timedelta(seconds=unavailability_delay - 1))
def test_get_unavailable_if_no_events():
event_manager.get = MagicMock(return_value=[])
unavailable_instances = list(availability_manager.get_unavailable())
assert not unavailable_instances
def test_get_unavailable_if_available_event():
event_manager.get = MagicMock(return_value=[
AvailableInstanceEvent(instance_type=instance_8cpu.name, date=stopped)])
unavailable_instances = list(availability_manager.get_unavailable())
assert not unavailable_instances
def test_get_unavailable_if_insufficient_event():
event_manager.get = MagicMock(return_value=[
InsufficientInstanceEvent(instance_type=instance_8cpu.name, date=stopped)])
unavailable_instances = list(availability_manager.get_unavailable())
assert len(unavailable_instances) == 1
assert instance_8cpu.name in unavailable_instances
def test_get_unavailable_if_insufficient_outdated_event():
clock.now = MagicMock(return_value=stopped + timedelta(seconds=unavailability_delay + 1))
event_manager.get = MagicMock(return_value=[
InsufficientInstanceEvent(instance_type=instance_8cpu.name, date=stopped)])
unavailable_instances = list(availability_manager.get_unavailable())
assert not unavailable_instances
def test_get_unavailable_if_available_and_insufficient_events():
event_manager.get = MagicMock(return_value=[
AvailableInstanceEvent(instance_type=instance_8cpu.name, date=stopped),
InsufficientInstanceEvent(instance_type=instance_8cpu.name, date=stopped)])
unavailable_instances = list(availability_manager.get_unavailable())
assert len(unavailable_instances) == 1
assert instance_8cpu.name in unavailable_instances
def test_get_unavailable_if_insufficient_and_available_events():
event_manager.get = MagicMock(return_value=[
InsufficientInstanceEvent(instance_type=instance_8cpu.name, date=stopped),
AvailableInstanceEvent(instance_type=instance_8cpu.name, date=stopped)])
unavailable_instances = list(availability_manager.get_unavailable())
assert not unavailable_instances
def test_get_unavailable_if_failing_event():
event_manager.get = MagicMock(return_value=[
FailingInstanceEvent(instance_type=instance_8cpu.name, date=stopped)])
unavailable_instances = list(availability_manager.get_unavailable())
assert not unavailable_instances
def test_get_unavailable_if_multiple_failing_events():
event_manager.get = MagicMock(
return_value=unavailability_count_failure *
[FailingInstanceEvent(instance_type=instance_8cpu.name, date=stopped)])
unavailable_instances = list(availability_manager.get_unavailable())
assert len(unavailable_instances) == 1
assert instance_8cpu.name in unavailable_instances
def test_get_unavailable_if_multiple_failing_outdated_events():
clock.now = MagicMock(return_value=stopped + timedelta(seconds=unavailability_delay + 1))
event_manager.get = MagicMock(
return_value=unavailability_count_failure *
[FailingInstanceEvent(instance_type=instance_8cpu.name, date=stopped)])
unavailable_instances = list(availability_manager.get_unavailable())
assert not unavailable_instances
def test_get_unavailable_if_available_and_multiple_failing_events():
event_manager.get = MagicMock(
return_value=[AvailableInstanceEvent(instance_type=instance_8cpu.name, date=stopped)]
+ unavailability_count_failure *
[FailingInstanceEvent(instance_type=instance_8cpu.name, date=stopped)])
unavailable_instances = list(availability_manager.get_unavailable())
assert len(unavailable_instances) == 1
assert instance_8cpu.name in unavailable_instances
def METHOD_NAME():
event_manager.get = MagicMock(
return_value=unavailability_count_failure *
[FailingInstanceEvent(instance_type=instance_8cpu.name, date=stopped)]
+ [AvailableInstanceEvent(instance_type=instance_8cpu.name, date=stopped)])
unavailable_instances = list(availability_manager.get_unavailable())
assert not unavailable_instances
def test_get_unavailable_if_couple_failing_and_available_and_couple_failing_events():
event_manager.get = MagicMock(
return_value=(unavailability_count_failure - 1) *
[FailingInstanceEvent(instance_type=instance_8cpu.name, date=stopped)]
+ [AvailableInstanceEvent(instance_type=instance_8cpu.name, date=stopped)]
+ (unavailability_count_failure - 1) *
[FailingInstanceEvent(instance_type=instance_8cpu.name, date=stopped)])
unavailable_instances = list(availability_manager.get_unavailable())
assert not unavailable_instances
def test_get_unavailable_if_first_instance_insufficient_and_second_instance_multiple_failing_events():
event_manager.get = MagicMock(
return_value=([InsufficientInstanceEvent(instance_8cpu.name, date=stopped)]
+ unavailability_count_failure *
[FailingInstanceEvent(instance_type=instance_4cpu.name, date=stopped)]))
unavailable_instances = list(availability_manager.get_unavailable())
assert len(unavailable_instances) == 2
assert instance_8cpu.name in unavailable_instances
assert instance_4cpu.name in unavailable_instances | null |
786 | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: [email protected]
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
import pprint
import re # noqa: F401
import json
from typing import List, Optional
from pydantic import Extra, BaseModel, Field, StrictInt, StrictStr, conint, conlist, constr, validator
from lightly.openapi_generated.swagger_client.models.dataset_type import DatasetType
from lightly.openapi_generated.swagger_client.models.image_type import ImageType
from lightly.openapi_generated.swagger_client.models.shared_access_type import SharedAccessType
class DatasetDataEnriched(BaseModel):
"""
DatasetDataEnriched
"""
id: constr(strict=True) = Field(..., description="MongoDB ObjectId")
name: constr(strict=True, min_length=3) = Field(...)
user_id: StrictStr = Field(..., alias="userId", description="The owner of the dataset")
access_type: Optional[SharedAccessType] = Field(None, alias="accessType")
type: DatasetType = Field(...)
img_type: Optional[ImageType] = Field(None, alias="imgType")
n_samples: StrictInt = Field(..., alias="nSamples")
size_in_bytes: StrictInt = Field(..., alias="sizeInBytes")
created_at: conint(strict=True, ge=0) = Field(..., alias="createdAt", description="unix timestamp in milliseconds")
last_modified_at: conint(strict=True, ge=0) = Field(..., alias="lastModifiedAt", description="unix timestamp in milliseconds")
meta_data_configuration_id: Optional[constr(strict=True)] = Field(None, alias="metaDataConfigurationId", description="MongoDB ObjectId")
access_role: Optional[constr(strict=True)] = Field(None, alias="accessRole", description="AccessRole bitmask of the one accessing the dataset")
datasources: Optional[conlist(constr(strict=True))] = None
parent_dataset_id: Optional[constr(strict=True)] = Field(None, alias="parentDatasetId", description="MongoDB ObjectId")
original_dataset_id: Optional[constr(strict=True)] = Field(None, alias="originalDatasetId", description="MongoDB ObjectId")
samples: conlist(constr(strict=True)) = Field(...)
n_tags: StrictInt = Field(..., alias="nTags")
n_embeddings: StrictInt = Field(..., alias="nEmbeddings")
__properties = ["id", "name", "userId", "accessType", "type", "imgType", "nSamples", "sizeInBytes", "createdAt", "lastModifiedAt", "metaDataConfigurationId", "accessRole", "datasources", "parentDatasetId", "originalDatasetId", "samples", "nTags", "nEmbeddings"]
@validator('id')
def id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
@validator('name')
def name_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if not re.match(r"^[a-zA-Z0-9][a-zA-Z0-9 _-]+$", value):
raise ValueError(r"must validate the regular expression /^[a-zA-Z0-9][a-zA-Z0-9 _-]+$/")
return value
@validator('meta_data_configuration_id')
def meta_data_configuration_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
@validator('access_role')
def access_role_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^0b[01]{6}$", value):
raise ValueError(r"must validate the regular expression /^0b[01]{6}$/")
return value
@validator('parent_dataset_id')
def parent_dataset_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
@validator('original_dataset_id')
def original_dataset_id_validate_regular_expression(cls, value):
"""Validates the regular expression"""
if value is None:
return value
if not re.match(r"^[a-f0-9]{24}$", value):
raise ValueError(r"must validate the regular expression /^[a-f0-9]{24}$/")
return value
class Config:
"""Pydantic configuration"""
allow_population_by_field_name = True
validate_assignment = True
use_enum_values = True
extra = Extra.forbid
def to_str(self, by_alias: bool = False) -> str:
"""Returns the string representation of the model"""
return pprint.pformat(self.dict(by_alias=by_alias))
def to_json(self, by_alias: bool = False) -> str:
"""Returns the JSON representation of the model"""
return json.dumps(self.to_dict(by_alias=by_alias))
@classmethod
def from_json(cls, json_str: str) -> DatasetDataEnriched:
"""Create an instance of DatasetDataEnriched from a JSON string"""
return cls.METHOD_NAME(json.loads(json_str))
def to_dict(self, by_alias: bool = False):
"""Returns the dictionary representation of the model"""
_dict = self.dict(by_alias=by_alias,
exclude={
},
exclude_none=True)
return _dict
@classmethod
def METHOD_NAME(cls, obj: dict) -> DatasetDataEnriched:
"""Create an instance of DatasetDataEnriched from a dict"""
if obj is None:
return None
if not isinstance(obj, dict):
return DatasetDataEnriched.parse_obj(obj)
# raise errors for additional fields in the input
for _key in obj.keys():
if _key not in cls.__properties:
raise ValueError("Error due to additional fields (not defined in DatasetDataEnriched) in the input: " + str(obj))
_obj = DatasetDataEnriched.parse_obj({
"id": obj.get("id"),
"name": obj.get("name"),
"user_id": obj.get("userId"),
"access_type": obj.get("accessType"),
"type": obj.get("type"),
"img_type": obj.get("imgType"),
"n_samples": obj.get("nSamples"),
"size_in_bytes": obj.get("sizeInBytes"),
"created_at": obj.get("createdAt"),
"last_modified_at": obj.get("lastModifiedAt"),
"meta_data_configuration_id": obj.get("metaDataConfigurationId"),
"access_role": obj.get("accessRole"),
"datasources": obj.get("datasources"),
"parent_dataset_id": obj.get("parentDatasetId"),
"original_dataset_id": obj.get("originalDatasetId"),
"samples": obj.get("samples"),
"n_tags": obj.get("nTags"),
"n_embeddings": obj.get("nEmbeddings")
})
return _obj
| null |
787 | # Copyright 2021 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import fields
from odoo.addons.shopinvader.tests.common import CommonCase
class TestShopinvaderPos(CommonCase):
"""
Tests for shopinvader.pos.service
"""
@classmethod
def METHOD_NAME(cls):
super().METHOD_NAME()
cls.env = cls.env(context=dict(cls.env.context, tracking_disable=True))
cls.PosOrder = cls.env["pos.order"]
cls.partner = cls.env.ref("base.res_partner_2")
cls.pricelist = cls.env.ref("product.list0")
cls.pick_type_out = cls.env["stock.picking.type"].search(
[("code", "=", "outgoing")], limit=1
)
cls.product1 = cls.env.ref("product.product_product_4")
cls.product2 = cls.env.ref("product.product_product_2")
cls.pos_config = cls.env["pos.config"].create(
{"name": "Test POS", "picking_type_id": cls.pick_type_out.id}
)
cls.pos_config.open_session_cb()
amount_base = 1 * 100 + 12 * 30
amount_tax = amount_base * 0.21
amount_total = amount_base + amount_tax
cls.pos_values = {
"partner_id": cls.partner.id,
"pricelist_id": cls.pricelist.id,
"session_id": cls.pos_config.current_session_id.id,
"amount_tax": amount_tax,
"amount_total": amount_total,
"amount_paid": 0,
"amount_return": 0,
"lines": [
(
0,
False,
{
"name": "Test line 1",
"qty": 1,
"price_unit": 100,
"product_id": cls.product1.id,
"price_subtotal": 1 * 100,
"price_subtotal_incl": 1 * 100 * 1.21,
},
),
(
0,
False,
{
"name": "Test line 2",
"qty": 12,
"price_unit": 30,
"product_id": cls.product2.id,
"price_subtotal": 12 * 30,
"price_subtotal_incl": 12 * 30 * 1.21,
},
),
],
}
cls.pos_order1 = cls.PosOrder.create(cls.pos_values)
cls.pos_order2 = cls.PosOrder.create(cls.pos_values)
cls.pos_order1.write({"state": "done"})
cls.pos_order2.write({"state": "done"})
def setUp(self):
super().setUp()
usage = "point_of_sale"
with self.work_on_services(
partner=self.partner, shopinvader_session=self.shopinvader_session
) as work:
self.service = work.component(usage=usage)
with self.work_on_services(
shopinvader_session=self.shopinvader_session
) as work:
self.service_guest = work.component(usage=usage)
def _build_json(self, pos_order):
result = {
"pos_id": pos_order.id,
"amount_untaxed": pos_order.amount_total - pos_order.amount_tax,
"name": pos_order.name,
"reference": pos_order.pos_reference or None,
"amount_tax": pos_order.amount_tax,
"date": fields.Datetime.to_string(pos_order.date_order),
"partner": {
"partner_id": pos_order.partner_id.id,
"name": pos_order.partner_id.name,
},
"amount_total": pos_order.amount_total,
}
return result
def test_search1(self):
result = self.service.dispatch("search")
result_data = result.get("data", {})
pos_orders = self.pos_order2 | self.pos_order1
expected_result = [self._build_json(pos_order) for pos_order in pos_orders]
for result, expected in zip(result_data, expected_result):
self.assertDictEqual(result, expected)
def test_get1(self):
pos_order = self.pos_order1
result = self.service.dispatch("get", pos_order.id)
result_data = result.get("data", {})
expected_result = self._build_json(pos_order)
self.assertDictEqual(result_data, expected_result) | null |
788 | # Copyright (C) 2018-2023 The NeoVintageous Team (NeoVintageous).
#
# This file is part of NeoVintageous.
#
# NeoVintageous is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# NeoVintageous is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NeoVintageous. If not, see <https://www.gnu.org/licenses/>.
from unittest.mock import call
from NeoVintageous.tests import unittest
from NeoVintageous.nv.plugin_input_method import IMSwitcher
from NeoVintageous.nv.plugin_input_method import Listener
class TestInputMethod(unittest.ViewTestCase):
def setUp(self):
super().setUp()
self.set_setting('auto_switch_input_method', False)
self.set_setting('auto_switch_input_method_default', 'en')
self.set_setting('auto_switch_input_method_get_cmd', '/path/to/im-get')
self.set_setting('auto_switch_input_method_set_cmd', '/path/to/im-set {im}')
@unittest.mock.patch('NeoVintageous.nv.plugin_input_method.read')
def test_can_switch_to_default(self, shell):
self.set_setting('auto_switch_input_method', True)
shell.side_effect = ['call_1_return', 'call_2_return']
switcher = IMSwitcher()
switcher.run(self.view, unittest.NORMAL)
self.assertEqual(2, shell.call_count)
self.assertEqual('call_1_return', switcher.saved_im)
shell.assert_has_calls([
call(self.view, '/path/to/im-get'),
call(self.view, '/path/to/im-set en'),
])
@unittest.mock.patch('NeoVintageous.nv.plugin_input_method.read')
def METHOD_NAME(self, shell):
self.set_setting('auto_switch_input_method', True)
self.set_setting('auto_switch_input_method_default', 'ie')
shell.side_effect = ['ie', 'call_2_return']
switcher = IMSwitcher()
switcher.saved_im = 'xx'
switcher.run(self.view, unittest.NORMAL)
self.assertEqual(1, shell.call_count)
self.assertEqual('ie', switcher.saved_im)
shell.assert_has_calls([
call(self.view, '/path/to/im-get'),
])
@unittest.mock.patch('NeoVintageous.nv.plugin_input_method.read')
def test_is_noop_when_disabled(self, shell):
self.set_setting('auto_switch_input_method', False)
shell.side_effect = ['call_1_return', 'call_2_return']
switcher = IMSwitcher()
switcher.run(self.view, unittest.NORMAL)
self.assertMockNotCalled(shell)
self.assertEqual('', switcher.saved_im)
@unittest.mock.patch('NeoVintageous.nv.plugin_input_method.read')
def test_can_resume(self, shell):
self.set_setting('auto_switch_input_method', True)
self.set_setting('auto_switch_input_method_default', 'en')
shell.side_effect = ['call_1_return', 'call_2_return']
switcher = IMSwitcher()
switcher.saved_im = 'ie'
switcher.run(self.view, unittest.INSERT)
self.assertEqual(1, shell.call_count)
self.assertEqual('ie', switcher.saved_im)
shell.assert_has_calls([
call(self.view, '/path/to/im-set ie'),
])
@unittest.mock.patch('NeoVintageous.nv.plugin_input_method.read')
def test_resume_is_noop_when_no_previously_saved_im(self, shell):
self.set_setting('auto_switch_input_method', True)
shell.side_effect = ['call_1_return', 'call_2_return']
switcher = IMSwitcher()
switcher.run(self.view, unittest.INSERT)
self.assertMockNotCalled(shell)
self.assertEqual('', switcher.saved_im)
@unittest.mock.patch('NeoVintageous.nv.plugin_input_method.read')
def test_resume_is_noop_if_saved_im_is_same_as_default(self, shell):
self.set_setting('auto_switch_input_method', True)
self.set_setting('auto_switch_input_method_default', 'ie')
shell.side_effect = ['call_1_return', 'call_2_return']
switcher = IMSwitcher()
switcher.saved_im = 'ie'
switcher.run(self.view, unittest.INSERT)
self.assertMockNotCalled(shell)
self.assertEqual('ie', switcher.saved_im)
@unittest.mock.patch('NeoVintageous.nv.plugin_input_method.read')
def test_listener(self, shell):
# User == ie, Default == en
self.set_setting('auto_switch_input_method', True)
self.set_setting('auto_switch_input_method_default', 'en')
shell.side_effect = ['ie', '', '', 'ie', '']
switcher = IMSwitcher()
listener = Listener(switcher)
# 1. ENTER insert stay in "ie" (Insert mode)
listener.on_insert_enter(self.view, prev_mode=unittest.NORMAL)
self.assertMockNotCalled(shell)
self.assertEqual('', switcher.saved_im)
# 2. LEAVE insert switch to "en" (Normal mode)
listener.on_insert_leave(self.view, new_mode=unittest.NORMAL)
self.assertEqual(2, shell.call_count)
self.assertEqual('ie', switcher.saved_im)
shell.assert_has_calls([
call(self.view, '/path/to/im-get'),
call(self.view, '/path/to/im-set en'),
])
# 3. ENTER insert switch to "ie" (Insert mode)
listener.on_insert_enter(self.view, prev_mode=unittest.NORMAL)
self.assertEqual(3, shell.call_count)
self.assertEqual('ie', switcher.saved_im)
shell.assert_has_calls([
call(self.view, '/path/to/im-set ie'),
])
# 4. LEAVE insert switch to "en" (Normal mode)
listener.on_insert_leave(self.view, new_mode=unittest.NORMAL)
self.assertEqual(5, shell.call_count)
self.assertEqual('ie', switcher.saved_im)
shell.assert_has_calls([
call(self.view, '/path/to/im-get'),
call(self.view, '/path/to/im-set en'),
]) | null |
789 | import json
import pprint
from pcs_test.tools.command_env.config_http_booth import BoothShortcuts
from pcs_test.tools.command_env.config_http_corosync import CorosyncShortcuts
from pcs_test.tools.command_env.config_http_files import FilesShortcuts
from pcs_test.tools.command_env.config_http_host import HostShortcuts
from pcs_test.tools.command_env.config_http_pcmk import PcmkShortcuts
from pcs_test.tools.command_env.config_http_sbd import SbdShortcuts
from pcs_test.tools.command_env.config_http_scsi import ScsiShortcuts
from pcs_test.tools.command_env.config_http_status import StatusShortcuts
from pcs_test.tools.command_env.mock_node_communicator import (
place_communication,
METHOD_NAME,
place_requests,
place_responses,
)
# pylint: disable=line-too-long
def _mutual_exclusive(param_names, **kwargs):
entered = {
key: value
for key, value in kwargs.items()
if key in param_names and value is not None
}
if len(entered) != 1:
raise AssertionError(
"Exactly one of '{0}' must be specified, \nwas specified:\n{1}".format(
"', '".join(param_names),
pprint.pformat(entered) if entered else " nothing",
)
)
class HttpConfig:
# pylint: disable=too-many-instance-attributes
def __init__(self, call_collection, wrap_helper):
self.__calls = call_collection
self.booth = wrap_helper(BoothShortcuts(self.__calls))
self.corosync = wrap_helper(CorosyncShortcuts(self.__calls))
self.files = wrap_helper(FilesShortcuts(self.__calls))
self.host = wrap_helper(HostShortcuts(self.__calls))
self.pcmk = wrap_helper(PcmkShortcuts(self.__calls))
self.sbd = wrap_helper(SbdShortcuts(self.__calls))
self.scsi = wrap_helper(ScsiShortcuts(self.__calls))
self.status = wrap_helper(StatusShortcuts(self.__calls))
def add_communication(self, name, communication_list, **kwargs):
"""
Create a generic call for network communication.
string name -- key of the call
list of dict communication_list -- see
pcs_test.tools.command_env.mock_node_communicator.create_communication
**kwargs -- see
pcs_test.tools.command_env.mock_node_communicator.create_communication
"""
place_communication(self.__calls, name, communication_list, **kwargs)
def add_requests(self, request_list, name):
place_requests(self.__calls, name, request_list)
def start_loop(self, response_list, name):
place_responses(self.__calls, name, response_list)
def put_file(
self,
communication_list,
name="http.common.put_file",
results=None,
files=None,
**kwargs,
):
"""
Example:
config.http.put_file(
communication_list=[dict(label="node")],
files={
"pacemaker_remote authkey": {
"type": "pcmk_remote_authkey",
"data": base64.b64encode(pcmk_authkey_content),
"rewrite_existing": True
}
},
results={
"pacemaker_remote authkey": {
"code": "written",
"message": "",
}
}
)
"""
_mutual_exclusive(["output", "results"], results=results, **kwargs)
_mutual_exclusive(["files", "param_list"], files=files, **kwargs)
if results:
kwargs["output"] = json.dumps({"files": results})
if files:
kwargs["param_list"] = [("data_json", json.dumps(files))]
self.METHOD_NAME(
name,
communication_list=communication_list,
action="remote/put_file",
**kwargs,
)
def remove_file(
self,
communication_list,
name="http.common.remove_file",
results=None,
files=None,
**kwargs,
):
"""
Example:
config.http.remove_file(
communication_list=[dict(label="node")],
files={
"pacemaker_remote authkey": {
"type": "pcmk_remote_authkey",
}
},
results={
"pacemaker_remote authkey": {
"code": "deleted",
"message": "",
}
}
)
"""
_mutual_exclusive(["output", "results"], results=results, **kwargs)
_mutual_exclusive(["files", "param_list"], files=files, **kwargs)
if results:
kwargs["output"] = json.dumps({"files": results})
if files:
kwargs["param_list"] = [("data_json", json.dumps(files))]
self.METHOD_NAME(
name,
communication_list=communication_list,
action="remote/remove_file",
**kwargs,
)
def manage_services(
self,
communication_list,
name="http.common.manage_services",
results=None,
action_map=None,
**kwargs,
):
"""
Example:
config.http.manage_services(
communication_list=[dict(label=label)],
action_map={
"pacemaker_remote enable": {
"type": "service_command",
"service": "pacemaker_remote",
"command": "enable",
},
"pacemaker_remote start": {
"type": "service_command",
"service": "pacemaker_remote",
"command": "start",
},
},
results={
"pacemaker_remote enable": {
"code": "success",
"message": "",
},
"pacemaker_remote start": {
"code": "success",
"message": "",
}
}
)
"""
_mutual_exclusive(["output", "results"], results=results, **kwargs)
_mutual_exclusive(
["action_map", "param_list"], action_map=action_map, **kwargs
)
if results:
kwargs["output"] = json.dumps({"actions": results})
if action_map:
kwargs["param_list"] = [("data_json", json.dumps(action_map))]
self.METHOD_NAME(
name,
communication_list=communication_list,
action="remote/manage_services",
**kwargs,
)
def METHOD_NAME(self, *args, **kwargs):
METHOD_NAME(self.__calls, *args, **kwargs) | null |
790 | import asyncio
import logging
from typing import Any, Dict, Optional
import numpy as np
from hummingbot.core.network_iterator import NetworkStatus, safe_ensure_future
from hummingbot.core.web_assistant.connections.data_types import WSJSONRequest
from hummingbot.core.web_assistant.ws_assistant import WSAssistant
from hummingbot.data_feed.candles_feed.binance_perpetual_candles import constants as CONSTANTS
from hummingbot.data_feed.candles_feed.candles_base import CandlesBase
from hummingbot.logger import HummingbotLogger
class BinancePerpetualCandles(CandlesBase):
_logger: Optional[HummingbotLogger] = None
@classmethod
def logger(cls) -> HummingbotLogger:
if cls._logger is None:
cls._logger = logging.getLogger(__name__)
return cls._logger
def __init__(self, trading_pair: str, interval: str = "1m", max_records: int = 150):
super().__init__(trading_pair, interval, max_records)
@property
def name(self):
return f"binance_perpetuals_{self._trading_pair}"
@property
def rest_url(self):
return CONSTANTS.REST_URL
@property
def wss_url(self):
return CONSTANTS.WSS_URL
@property
def health_check_url(self):
return self.rest_url + CONSTANTS.HEALTH_CHECK_ENDPOINT
@property
def candles_url(self):
return self.rest_url + CONSTANTS.CANDLES_ENDPOINT
@property
def rate_limits(self):
return CONSTANTS.RATE_LIMITS
@property
def intervals(self):
return CONSTANTS.INTERVALS
async def check_network(self) -> NetworkStatus:
rest_assistant = await self._api_factory.get_rest_assistant()
await rest_assistant.execute_request(url=self.health_check_url,
throttler_limit_id=CONSTANTS.HEALTH_CHECK_ENDPOINT)
return NetworkStatus.CONNECTED
def get_exchange_trading_pair(self, trading_pair):
return trading_pair.replace("-", "")
async def fetch_candles(self,
start_time: Optional[int] = None,
end_time: Optional[int] = None,
limit: Optional[int] = 500):
rest_assistant = await self._api_factory.get_rest_assistant()
params = {"symbol": self._ex_trading_pair, "interval": self.interval, "limit": limit}
if start_time:
params["startTime"] = start_time
if end_time:
params["endTime"] = end_time
candles = await rest_assistant.execute_request(url=self.candles_url,
throttler_limit_id=CONSTANTS.CANDLES_ENDPOINT,
params=params)
return np.array(candles)[:, [0, 1, 2, 3, 4, 5, 7, 8, 9, 10]].astype(float)
async def fill_historical_candles(self):
max_request_needed = (self._candles.maxlen // 1000) + 1
requests_executed = 0
while not self.is_ready:
missing_records = self._candles.maxlen - len(self._candles)
end_timestamp = int(self._candles[0][0])
try:
if requests_executed < max_request_needed:
# we have to add one more since, the last row is not going to be included
candles = await self.fetch_candles(end_time=end_timestamp, limit=min(1000, missing_records + 1))
# we are computing again the quantity of records again since the websocket process is able to
# modify the deque and if we extend it, the new observations are going to be dropped.
missing_records = self._candles.maxlen - len(self._candles)
self._candles.extendleft(candles[-(missing_records + 1):-1][::-1])
requests_executed += 1
else:
self.logger().error(f"There is no data available for the quantity of "
f"candles requested for {self.name}.")
raise
except asyncio.CancelledError:
raise
except Exception:
self.logger().exception(
"Unexpected error occurred when getting historical klines. Retrying in 1 seconds...",
)
await self._sleep(1.0)
async def _subscribe_channels(self, ws: WSAssistant):
"""
Subscribes to the candles events through the provided websocket connection.
:param ws: the websocket assistant used to connect to the exchange
"""
try:
candle_params = []
candle_params.append(f"{self._ex_trading_pair.lower()}@kline_{self.interval}")
payload = {
"method": "SUBSCRIBE",
"params": candle_params,
"id": 1
}
subscribe_candles_request: WSJSONRequest = WSJSONRequest(payload=payload)
await ws.send(subscribe_candles_request)
self.logger().info("Subscribed to public klines...")
except asyncio.CancelledError:
raise
except Exception:
self.logger().error(
"Unexpected error occurred subscribing to public klines...",
exc_info=True
)
raise
async def METHOD_NAME(self, websocket_assistant: WSAssistant):
async for ws_response in websocket_assistant.iter_messages():
data: Dict[str, Any] = ws_response.data
if data is not None and data.get("e") == "kline": # data will be None when the websocket is disconnected
timestamp = data["k"]["t"]
open = data["k"]["o"]
low = data["k"]["l"]
high = data["k"]["h"]
close = data["k"]["c"]
volume = data["k"]["v"]
quote_asset_volume = data["k"]["q"]
n_trades = data["k"]["n"]
taker_buy_base_volume = data["k"]["V"]
taker_buy_quote_volume = data["k"]["Q"]
if len(self._candles) == 0:
self._candles.append(np.array([timestamp, open, high, low, close, volume,
quote_asset_volume, n_trades, taker_buy_base_volume,
taker_buy_quote_volume]))
safe_ensure_future(self.fill_historical_candles())
elif timestamp > int(self._candles[-1][0]):
# TODO: validate also that the diff of timestamp == interval (issue with 1M interval).
self._candles.append(np.array([timestamp, open, high, low, close, volume,
quote_asset_volume, n_trades, taker_buy_base_volume,
taker_buy_quote_volume]))
elif timestamp == int(self._candles[-1][0]):
self._candles.pop()
self._candles.append(np.array([timestamp, open, high, low, close, volume,
quote_asset_volume, n_trades, taker_buy_base_volume,
taker_buy_quote_volume])) | null |
791 | # coding: utf-8
"""
openapi 3.0.3 sample spec
sample spec for testing openapi functionality, built from json schema tests for draft6 # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
"""
from __future__ import annotations
from unit_test_api.shared_imports.schema_imports import * # pyright: ignore [reportWildcardImportFromLibrary]
class FooEnums:
@schemas.classproperty
def FOO(cls) -> typing.Literal["foo"]:
return Foo.validate("foo")
@dataclasses.dataclass(frozen=True)
class Foo(
schemas.Schema
):
types: typing.FrozenSet[typing.Type] = frozenset({
str,
})
enum_value_to_name: typing.Mapping[typing.Union[int, float, str, schemas.Bool, None], str] = dataclasses.field(
default_factory=lambda: {
"foo": "FOO",
}
)
enums = FooEnums
@typing.overload
@classmethod
def validate(
cls,
arg: typing.Literal["foo"],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> typing.Literal["foo"]: ...
@typing.overload
@classmethod
def validate(
cls,
arg: str,
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> typing.Literal["foo",]: ...
@classmethod
def validate(
cls,
arg,
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> typing.Literal[
"foo",
]:
validated_arg = super().validate_base(
arg,
configuration=configuration,
)
return typing.cast(typing.Literal[
"foo",
],
validated_arg
)
class BarEnums:
@schemas.classproperty
def BAR(cls) -> typing.Literal["bar"]:
return Bar.validate("bar")
@dataclasses.dataclass(frozen=True)
class Bar(
schemas.Schema
):
types: typing.FrozenSet[typing.Type] = frozenset({
str,
})
enum_value_to_name: typing.Mapping[typing.Union[int, float, str, schemas.Bool, None], str] = dataclasses.field(
default_factory=lambda: {
"bar": "BAR",
}
)
enums = BarEnums
@typing.overload
@classmethod
def validate(
cls,
arg: typing.Literal["bar"],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> typing.Literal["bar"]: ...
@typing.overload
@classmethod
def validate(
cls,
arg: str,
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> typing.Literal["bar",]: ...
@classmethod
def validate(
cls,
arg,
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> typing.Literal[
"bar",
]:
validated_arg = super().validate_base(
arg,
configuration=configuration,
)
return typing.cast(typing.Literal[
"bar",
],
validated_arg
)
Properties = typing.TypedDict(
'Properties',
{
"foo": typing.Type[Foo],
"bar": typing.Type[Bar],
}
)
class EnumsInPropertiesDict(schemas.immutabledict[str, str]):
__required_keys__: typing.FrozenSet[str] = frozenset({
"bar",
})
__optional_keys__: typing.FrozenSet[str] = frozenset({
"foo",
})
def __new__(
cls,
*,
bar: typing.Literal[
"bar"
],
foo: typing.Union[
typing.Literal[
"foo"
],
schemas.Unset
] = schemas.unset,
configuration_: typing.Optional[schema_configuration.SchemaConfiguration] = None,
**kwargs: schemas.INPUT_TYPES_ALL,
):
arg_: typing.Dict[str, typing.Any] = {
"bar": bar,
}
for key, val in (
("foo", foo),
):
if isinstance(val, schemas.Unset):
continue
arg_[key] = val
arg_.update(kwargs)
used_arg_ = typing.cast(EnumsInPropertiesDictInput, arg_)
return EnumsInProperties.validate(used_arg_, configuration=configuration_)
@staticmethod
def from_dict_(
arg: typing.Union[
EnumsInPropertiesDictInput,
EnumsInPropertiesDict
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> EnumsInPropertiesDict:
return EnumsInProperties.validate(arg, configuration=configuration)
@property
def bar(self) -> typing.Literal["bar"]:
return typing.cast(
typing.Literal["bar"],
self.__getitem__("bar")
)
@property
def foo(self) -> typing.Union[typing.Literal["foo"], schemas.Unset]:
val = self.get("foo", schemas.unset)
if isinstance(val, schemas.Unset):
return val
return typing.cast(
typing.Literal["foo"],
val
)
def METHOD_NAME(self, name: str) -> typing.Union[schemas.OUTPUT_BASE_TYPES, schemas.Unset]:
schemas.raise_if_key_known(name, self.__required_keys__, self.__optional_keys__)
return self.get(name, schemas.unset)
EnumsInPropertiesDictInput = typing.Mapping[str, schemas.INPUT_TYPES_ALL]
@dataclasses.dataclass(frozen=True)
class EnumsInProperties(
schemas.Schema[EnumsInPropertiesDict, tuple]
):
"""NOTE: This class is auto generated by OpenAPI JSON Schema Generator.
Ref: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
Do not edit the class manually.
"""
types: typing.FrozenSet[typing.Type] = frozenset({schemas.immutabledict})
required: typing.FrozenSet[str] = frozenset({
"bar",
})
properties: Properties = dataclasses.field(default_factory=lambda: schemas.typed_dict_to_instance(Properties)) # type: ignore
type_to_output_cls: typing.Mapping[
typing.Type,
typing.Type
] = dataclasses.field(
default_factory=lambda: {
schemas.immutabledict: EnumsInPropertiesDict
}
)
@classmethod
def validate(
cls,
arg: typing.Union[
EnumsInPropertiesDictInput,
EnumsInPropertiesDict,
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> EnumsInPropertiesDict:
return super().validate_base(
arg,
configuration=configuration,
)
| null |
792 | # coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for c4_wsrs_utils."""
import re
from unittest import mock
from absl.testing import absltest
import numpy as np
from tensorflow_datasets.text.c4_wsrs import c4_wsrs_utils
class C4WsrsUtilsTest(absltest.TestCase):
def test_create_word_finder_regex(self):
words = ['pt', 'ct', 's']
word_finder_re = c4_wsrs_utils.create_word_finder_regex(words)
string = "the (pt's) #ct to feel drowsy."
self.assertEqual(word_finder_re.findall(string), ['pt', 'ct'])
def test_get_abbreviation_expansion_pairs(self):
snippet = 'the patient is in the emergency room.'
abbreviations_by_expansion = {'patient': ['pt'], 'emergency room': ['er']}
expansion_re = re.compile('(patient|emergency room)')
result = c4_wsrs_utils._get_abbreviation_expansion_pairs(
snippet, abbreviations_by_expansion, expansion_re
)
self.assertEqual(
result, {4: ('pt', 'patient'), 22: ('er', 'emergency room')}
)
def test_extract_snippets(self):
doc = {
'url': b'test/url.com',
'text': b'the patient is in the emergency room. the doctor just left.',
}
abbreviations_by_expansion = {
'patient': ['pt'],
'emergency room': ['er'],
'doctor': ['dr'],
}
expansion_re = re.compile('(patient|emergency room|doctor)')
with mock.patch.object(
np.random, 'uniform', autospec=True
) as random_uniform_mock:
random_uniform_mock.return_value = 0.1
results = list(
c4_wsrs_utils.extract_snippets(
doc,
max_sentences_per_snippet=1,
abbreviations_by_expansion=abbreviations_by_expansion,
expansion_regex=expansion_re,
max_snippet_char_len=1024,
alpha_keep_no_rs=0.0,
alpha_keep_rs=0.0,
)
)
self.assertEqual(
results,
[
(
'url=test/url.com,snippet_id=0',
(
'the patient is in the emergency room.',
('pt', 'patient'),
{4: ('pt', 'patient'), 22: ('er', 'emergency room')},
),
),
(
'url=test/url.com,snippet_id=1',
(
'the doctor just left.',
('dr', 'doctor'),
{4: ('dr', 'doctor')},
),
),
],
)
def test_reverse_substitute_snippet(self):
snippet = 'the patient is in the emergency room.'
index_to_pair = {4: ('pt', 'patient'), 22: ('er', 'emergency room')}
result = c4_wsrs_utils._reverse_substitute_snippet(
snippet, index_to_pair, substitution_rate=1.0
)
self.assertEqual(result, 'the pt is in the er.')
def METHOD_NAME(self):
element = (
'url=test/url.com,snippet_id=0',
(
'the patient is in the emergency room.',
('pt', 'patient'),
{4: ('pt', 'patient'), 22: ('er', 'emergency room')},
),
)
rs_results = list(
c4_wsrs_utils.reverse_substitution(
element, substitution_rate=1.0, min_snippet_token_len=3
)
)
expected_features = c4_wsrs_utils.WSRSFeatures(
original_snippet='the patient is in the emergency room.',
abbreviated_snippet='the pt is in the er.',
)
self.assertSameElements(
rs_results,
[(
('pt', 'patient'),
('url=test/url.com,snippet_id=0', expected_features),
)],
)
if __name__ == '__main__':
absltest.main() | null |
793 | # Copyright 2018-2022 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import typing
import bpy
import mathutils
from ...com import gltf2_blender_math
class Keyframe:
def __init__(self, channels: typing.Tuple[bpy.types.FCurve], frame: float, bake_channel: typing.Union[str, None]):
self.seconds = frame / bpy.context.scene.render.fps
self.frame = frame
self.fps = bpy.context.scene.render.fps
self.__length_morph = 0
# Note: channels has some None items only for SK if some SK are not animated
if bake_channel is None:
self.target = [c for c in channels if c is not None][0].data_path.split('.')[-1]
if self.target != "value":
self.__indices = [c.array_index for c in channels]
else:
self.__indices = [i for i, c in enumerate(channels) if c is not None]
self.__length_morph = len(channels)
else:
if bake_channel == "value":
self.__length_morph = len(channels)
self.target = bake_channel
self.__indices = []
for i in range(self.get_target_len()):
self.__indices.append(i)
# Data holders for virtual properties
self.__value = None
self.__in_tangent = None
self.__out_tangent = None
def get_target_len(self):
length = {
"delta_location": 3,
"delta_rotation_euler": 3,
"delta_rotation_quaternion": 4,
"delta_scale": 3,
"location": 3,
"rotation_axis_angle": 4,
"rotation_euler": 3,
"rotation_quaternion": 4,
"scale": 3,
"value": self.__length_morph
}.get(self.target)
if length is None:
raise RuntimeError("Animations with target type '{}' are not supported.".format(self.target))
return length
def __set_indexed(self, value):
# Sometimes blender animations only reference a subset of components of a data target. Keyframe should always
# contain a complete Vector/ Quaternion --> use the array_index value of the keyframe to set components in such
# structures
# For SK, must contains all SK values
result = [0.0] * self.get_target_len()
for i, v in zip(self.__indices, value):
result[i] = v
return result
def METHOD_NAME(self):
return self.__indices
def set_value_index(self, idx, val):
self.__value[idx] = val
def set_value_index_in(self, idx, val):
self.__in_tangent[idx] = val
def set_value_index_out(self, idx, val):
self.__out_tangent[idx] = val
def set_first_tangent(self):
self.__in_tangent = self.__value
def set_last_tangent(self):
self.__out_tangent = self.__value
@property
def value(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
if self.target == "value":
return self.__value
return gltf2_blender_math.list_to_mathutils(self.__value, self.target)
@value.setter
def value(self, value: typing.List[float]):
self.__value = self.__set_indexed(value)
@value.setter
def value_total(self, value: typing.List[float]):
self.__value = value
@property
def in_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
if self.__in_tangent is None:
return None
if self.target == "value":
return self.__in_tangent
return gltf2_blender_math.list_to_mathutils(self.__in_tangent, self.target)
@in_tangent.setter
def in_tangent(self, value: typing.List[float]):
self.__in_tangent = self.__set_indexed(value)
@property
def out_tangent(self) -> typing.Union[mathutils.Vector, mathutils.Euler, mathutils.Quaternion, typing.List[float]]:
if self.__out_tangent is None:
return None
if self.target == "value":
return self.__out_tangent
return gltf2_blender_math.list_to_mathutils(self.__out_tangent, self.target)
@out_tangent.setter
def out_tangent(self, value: typing.List[float]):
self.__out_tangent = self.__set_indexed(value) | null |
794 | """
This type stub file was generated by pyright.
"""
import threading
from collections import deque
from time import time
from typing import Any
from sentry_sdk._types import MYPY
"""
A fork of Python 3.6's stdlib queue with Lock swapped out for RLock to avoid a
deadlock while garbage collecting.
See
https://codewithoutrules.com/2017/08/16/concurrency-python/
https://bugs.python.org/issue14976
https://github.com/sqlalchemy/sqlalchemy/blob/4eb747b61f0c1b1c25bdee3856d7195d10a0c227/lib/sqlalchemy/queue.py#L1
We also vendor the code to evade eventlet's broken monkeypatching, see
https://github.com/getsentry/sentry-python/pull/484
"""
if MYPY: ...
__all__ = ["EmptyError", "FullError", "Queue"]
class EmptyError(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
...
class FullError(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
...
class Queue:
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=...) -> None: ...
def task_done(self): # -> None:
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
...
def join(self): # -> None:
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
...
def qsize(self): # -> int:
"""Return the approximate size of the queue (not reliable!)."""
...
def empty(self): # -> bool:
"""Return True if the queue is empty, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() == 0
as a direct substitute, but be aware that either approach risks a race
condition where a queue can grow before the result of empty() or
qsize() can be used.
To create code that needs to wait for all queued tasks to be
completed, the preferred technique is to use the join() method.
"""
...
def full(self): # -> bool:
"""Return True if the queue is full, False otherwise (not reliable!).
This method is likely to be removed at some point. Use qsize() >= n
as a direct substitute, but be aware that either approach risks a race
condition where a queue can shrink before the result of full() or
qsize() can be used.
"""
...
def put(self, item, block=..., timeout=...): # -> None:
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the FullError exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the FullError exception ('timeout'
is ignored in that case).
"""
...
def get(self, block=..., timeout=...): # -> Any:
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the EmptyError exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the EmptyError exception ('timeout' is ignored
in that case).
"""
...
def put_nowait(self, item): # -> None:
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the FullError exception.
"""
...
def METHOD_NAME(self): # -> Any:
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the EmptyError exception.
"""
... | null |
795 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class CreateRouterInterfaceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'CreateRouterInterface','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AccessPointId(self): # String
return self.get_query_params().get('AccessPointId')
def set_AccessPointId(self, AccessPointId): # String
self.add_query_param('AccessPointId', AccessPointId)
def get_OppositeRouterId(self): # String
return self.get_query_params().get('OppositeRouterId')
def set_OppositeRouterId(self, OppositeRouterId): # String
self.add_query_param('OppositeRouterId', OppositeRouterId)
def get_OppositeAccessPointId(self): # String
return self.get_query_params().get('OppositeAccessPointId')
def set_OppositeAccessPointId(self, OppositeAccessPointId): # String
self.add_query_param('OppositeAccessPointId', OppositeAccessPointId)
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Role(self): # String
return self.get_query_params().get('Role')
def set_Role(self, Role): # String
self.add_query_param('Role', Role)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_HealthCheckTargetIp(self): # String
return self.get_query_params().get('HealthCheckTargetIp')
def set_HealthCheckTargetIp(self, HealthCheckTargetIp): # String
self.add_query_param('HealthCheckTargetIp', HealthCheckTargetIp)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_Spec(self): # String
return self.get_query_params().get('Spec')
def set_Spec(self, Spec): # String
self.add_query_param('Spec', Spec)
def get_UserCidr(self): # String
return self.get_query_params().get('UserCidr')
def set_UserCidr(self, UserCidr): # String
self.add_query_param('UserCidr', UserCidr)
def get_OppositeInterfaceId(self): # String
return self.get_query_params().get('OppositeInterfaceId')
def set_OppositeInterfaceId(self, OppositeInterfaceId): # String
self.add_query_param('OppositeInterfaceId', OppositeInterfaceId)
def get_InstanceChargeType(self): # String
return self.get_query_params().get('InstanceChargeType')
def set_InstanceChargeType(self, InstanceChargeType): # String
self.add_query_param('InstanceChargeType', InstanceChargeType)
def get_Period(self): # Integer
return self.get_query_params().get('Period')
def set_Period(self, Period): # Integer
self.add_query_param('Period', Period)
def get_AutoPay(self): # Boolean
return self.get_query_params().get('AutoPay')
def set_AutoPay(self, AutoPay): # Boolean
self.add_query_param('AutoPay', AutoPay)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OppositeRegionId(self): # String
return self.get_query_params().get('OppositeRegionId')
def set_OppositeRegionId(self, OppositeRegionId): # String
self.add_query_param('OppositeRegionId', OppositeRegionId)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_OppositeInterfaceOwnerId(self): # String
return self.get_query_params().get('OppositeInterfaceOwnerId')
def set_OppositeInterfaceOwnerId(self, OppositeInterfaceOwnerId): # String
self.add_query_param('OppositeInterfaceOwnerId', OppositeInterfaceOwnerId)
def METHOD_NAME(self): # String
return self.get_query_params().get('RouterType')
def set_RouterType(self, RouterType): # String
self.add_query_param('RouterType', RouterType)
def get_HealthCheckSourceIp(self): # String
return self.get_query_params().get('HealthCheckSourceIp')
def set_HealthCheckSourceIp(self, HealthCheckSourceIp): # String
self.add_query_param('HealthCheckSourceIp', HealthCheckSourceIp)
def get_RouterId(self): # String
return self.get_query_params().get('RouterId')
def set_RouterId(self, RouterId): # String
self.add_query_param('RouterId', RouterId)
def get_OppositeRouterType(self): # String
return self.get_query_params().get('OppositeRouterType')
def set_OppositeRouterType(self, OppositeRouterType): # String
self.add_query_param('OppositeRouterType', OppositeRouterType)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_PricingCycle(self): # String
return self.get_query_params().get('PricingCycle')
def set_PricingCycle(self, PricingCycle): # String
self.add_query_param('PricingCycle', PricingCycle) | null |
796 | from unittest.mock import patch
from django.contrib.auth import get_user_model
from django.urls import reverse
from nextcloudappstore.api.v1.release.provider import AppReleaseProvider
from nextcloudappstore.api.v1.tests.api import ApiTest
from nextcloudappstore.core.models import App, AppRelease
class AppReleaseTest(ApiTest):
delete_url = reverse("api:v1:app-release-delete", kwargs={"app": "news0", "version": "9.0.0"})
delete_url_nightly = reverse(
"api:v1:app-release-delete", kwargs={"app": "news0", "version": "9.0.0", "nightly": "nightly"}
)
create_url = reverse("api:v1:app-release-create")
app_args = {
"app": {
"id": "news0",
"release": {
"version": "9.0.0",
"platform_min_version": "9.0.0",
"raw_platform_min_version": "9.0.0",
"platform_max_version": "*",
"raw_platform_max_version": "*",
"php_min_version": "5.6.0",
"raw_php_min_version": "5.6.0",
"php_max_version": "*",
"raw_php_max_version": "*",
},
}
}
def create_release(self, owner, version="9.0.0", co_maintainers=[]):
app = App.objects.create(id="news0", owner=owner)
app.co_maintainers.set(co_maintainers)
app.save()
return AppRelease.objects.create(version=version, app=app)
def test_delete(self):
self.create_release(self.user)
self._login_token()
response = self.api_client.delete(self.delete_url)
self.assertEqual(204, response.status_code)
with self.assertRaises(AppRelease.DoesNotExist):
AppRelease.objects.get(version="9.0.0", app__id="news0")
def test_delete_unauthenticated(self):
self.create_release(self.user)
response = self.api_client.delete(self.delete_url)
self.assertEqual(401, response.status_code)
def test_delete_unauthorized(self):
owner = get_user_model().objects.create_user(username="owner", password="owner", email="[email protected]")
self.create_release(owner)
self._login_token()
response = self.api_client.delete(self.delete_url)
self.assertEqual(403, response.status_code)
def test_delete_not_found_token(self):
owner = get_user_model().objects.create_user(username="owner", password="owner", email="[email protected]")
self.create_release(owner)
self._login_token()
response = self.api_client.delete(self.delete_url_nightly)
self.assertEqual(404, response.status_code)
def test_delete_co_maintainer(self):
owner = get_user_model().objects.create_user(username="owner", password="owner", email="[email protected]")
self.create_release(owner=owner, co_maintainers=[self.user])
self._login_token()
response = self.api_client.delete(self.delete_url)
self.assertEqual(204, response.status_code)
with self.assertRaises(AppRelease.DoesNotExist):
AppRelease.objects.get(version="9.0.0", app__id="news0")
def test_delete_not_found(self):
self._login()
response = self.api_client.delete(self.delete_url)
self.assertEqual(404, response.status_code)
def test_create_unauthenticated(self):
self.create_release(self.user)
response = self.api_client.post(self.create_url, data={"download": "https://download.com"}, format="json")
self.assertEqual(401, response.status_code)
@patch.object(AppReleaseProvider, "get_release_info")
def METHOD_NAME(self, get_release_info):
owner = get_user_model().objects.create_user(username="owner", password="owner", email="[email protected]")
self.create_release(owner)
self._login()
get_release_info.return_value = (self.app_args, "checksum")
response = self.api_client.post(
self.create_url,
data={
"download": "https://download.com",
"signature": "sign",
},
format="json",
)
self.assertEqual(403, response.status_code)
@patch.object(AppReleaseProvider, "get_release_info")
def test_create_co_maintainer(self, get_release_info):
owner = get_user_model().objects.create_user(username="owner", password="owner", email="[email protected]")
self.create_release(owner=owner, co_maintainers=[self.user])
self._login()
get_release_info.return_value = (self.app_args, "checksum")
with self.settings(VALIDATE_CERTIFICATES=False):
response = self.api_client.post(
self.create_url,
data={
"download": "https://download.com",
"signature": "sign",
},
format="json",
)
self.assertEqual(200, response.status_code)
AppRelease.objects.get(version="9.0.0", app__id="news0")
@patch.object(AppReleaseProvider, "get_release_info")
def test_no_app(self, get_release_info):
self._login()
get_release_info.return_value = (self.app_args, "checksum")
with self.settings(VALIDATE_CERTIFICATES=False):
response = self.api_client.post(
self.create_url,
data={
"download": "https://download.com",
"signature": "sign",
},
format="json",
)
self.assertEqual(400, response.status_code)
with self.assertRaises(AppRelease.DoesNotExist):
AppRelease.objects.get(version="9.0.0", app__id="news0")
def test_create_validate_https(self):
self._login_token()
response = self.api_client.post(
self.create_url,
data={
"download": "http://download.com",
"signature": "sign",
},
format="json",
)
self.assertEqual(400, response.status_code) | null |
797 | # Copyright (C) 2012 Anaconda, Inc
# SPDX-License-Identifier: BSD-3-Clause
# Try to keep compat small because it's imported by everything
# What is compat, and what isn't?
# If a piece of code is "general" and used in multiple modules, it goes here.
# If it's only used in one module, keep it in that module, preferably near the top.
# This module should contain ONLY stdlib imports.
import sys
on_win = bool(sys.platform == "win32")
on_mac = bool(sys.platform == "darwin")
on_linux = bool(sys.platform == "linux")
FILESYSTEM_ENCODING = sys.getfilesystemencoding()
# Control some tweakables that will be removed finally.
ENCODE_ENVIRONMENT = True
ENCODE_ARGS = False
def encode_for_env_var(value) -> str:
"""Environment names and values need to be string."""
if isinstance(value, str):
return value
elif isinstance(value, bytes):
return value.decode()
return str(value)
def encode_environment(env):
if ENCODE_ENVIRONMENT:
env = {encode_for_env_var(k): encode_for_env_var(v) for k, v in env.items()}
return env
def encode_arguments(arguments):
if ENCODE_ARGS:
arguments = {encode_for_env_var(arg) for arg in arguments}
return arguments
from collections.abc import Iterable
def isiterable(obj):
return not isinstance(obj, str) and isinstance(obj, Iterable)
# #############################
# other
# #############################
from collections import OrderedDict as odict # noqa: F401
from io import open as io_open # NOQA
def open(
file, mode="r", buffering=-1, encoding=None, errors=None, newline=None, closefd=True
):
if "b" in mode:
return io_open(
file,
str(mode),
buffering=buffering,
errors=errors,
newline=newline,
closefd=closefd,
)
else:
return io_open(
file,
str(mode),
buffering=buffering,
encoding=encoding or "utf-8",
errors=errors,
newline=newline,
closefd=closefd,
)
def six_with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, "temporary_class", (), {})
NoneType = type(None)
primitive_types = (str, int, float, complex, bool, NoneType)
def ensure_binary(value):
try:
return value.encode("utf-8")
except AttributeError: # pragma: no cover
# AttributeError: '<>' object has no attribute 'encode'
# In this case assume already binary type and do nothing
return value
def ensure_text_type(value) -> str:
try:
return value.decode("utf-8")
except AttributeError: # pragma: no cover
# AttributeError: '<>' object has no attribute 'decode'
# In this case assume already text_type and do nothing
return value
except UnicodeDecodeError: # pragma: no cover
try:
from chardet import detect
except ImportError:
try:
from requests.packages.chardet import detect
except ImportError: # pragma: no cover
from pip._vendor.requests.packages.chardet import detect
encoding = detect(value).get("encoding") or "utf-8"
return value.decode(encoding, errors="replace")
except UnicodeEncodeError: # pragma: no cover
# it's already str, so ignore?
# not sure, surfaced with tests/models/test_match_spec.py test_tarball_match_specs
# using py27
return value
def ensure_unicode(value):
try:
return value.decode("unicode_escape")
except AttributeError: # pragma: no cover
# AttributeError: '<>' object has no attribute 'decode'
# In this case assume already unicode and do nothing
return value
def METHOD_NAME(value):
try:
return value.encode(FILESYSTEM_ENCODING)
except AttributeError:
return value
except UnicodeEncodeError:
return value
def ensure_utf8_encoding(value):
try:
return value.encode("utf-8")
except AttributeError:
return value
except UnicodeEncodeError:
return value | null |
798 | from __future__ import annotations
import itertools
import logging
import random
import sys
from array import array
import importlib_metadata
from dask.utils import Dispatch
sizeof = Dispatch(name="sizeof")
logger = logging.getLogger(__name__)
@sizeof.register(object)
def sizeof_default(o):
return sys.getsizeof(o)
@sizeof.register(bytes)
@sizeof.register(bytearray)
def sizeof_bytes(o):
return len(o)
@sizeof.register(memoryview)
def sizeof_memoryview(o):
return o.nbytes
@sizeof.register(array)
def sizeof_array(o):
return o.itemsize * len(o)
@sizeof.register(list)
@sizeof.register(tuple)
@sizeof.register(set)
@sizeof.register(frozenset)
def sizeof_python_collection(seq):
num_items = len(seq)
num_samples = 10
if num_items > num_samples:
if isinstance(seq, (set, frozenset)):
# As of Python v3.9, it is deprecated to call random.sample() on
# sets but since sets are unordered anyways we can simply pick
# the first `num_samples` items.
samples = itertools.islice(seq, num_samples)
else:
samples = random.sample(seq, num_samples)
return sys.getsizeof(seq) + int(
num_items / num_samples * sum(map(sizeof, samples))
)
else:
return sys.getsizeof(seq) + sum(map(sizeof, seq))
class SimpleSizeof:
"""Sentinel class to mark a class to be skipped by the dispatcher. This only
works if this sentinel mixin is first in the mro.
Examples
--------
>>> class TheAnswer(SimpleSizeof):
... def __sizeof__(self):
... # Sizeof always add overhead of an object for GC
... return 42 - sizeof(object())
>>> sizeof(TheAnswer())
42
"""
@sizeof.register(SimpleSizeof)
def sizeof_blocked(d):
return sys.getsizeof(d)
@sizeof.register(dict)
def sizeof_python_dict(d):
return (
sys.getsizeof(d)
+ sizeof(list(d.keys()))
+ sizeof(list(d.values()))
- 2 * sizeof(list())
)
@sizeof.register_lazy("cupy")
def register_cupy():
import cupy
@sizeof.register(cupy.ndarray)
def METHOD_NAME(x):
return int(x.nbytes)
@sizeof.register_lazy("numba")
def register_numba():
import numba.cuda
@sizeof.register(numba.cuda.cudadrv.devicearray.DeviceNDArray)
def sizeof_numba_devicendarray(x):
return int(x.nbytes)
@sizeof.register_lazy("rmm")
def register_rmm():
import rmm
# Only included in 0.11.0+
if hasattr(rmm, "DeviceBuffer"):
@sizeof.register(rmm.DeviceBuffer)
def sizeof_rmm_devicebuffer(x):
return int(x.nbytes)
@sizeof.register_lazy("numpy")
def register_numpy():
import numpy as np
@sizeof.register(np.ndarray)
def sizeof_numpy_ndarray(x):
if 0 in x.strides:
xs = x[tuple(slice(None) if s != 0 else slice(1) for s in x.strides)]
return xs.nbytes
return int(x.nbytes)
@sizeof.register_lazy("pandas")
def register_pandas():
import numpy as np
import pandas as pd
OBJECT_DTYPES = (object, pd.StringDtype("python"))
def object_size(*xs):
if not xs:
return 0
ncells = sum(len(x) for x in xs)
if not ncells:
return 0
# Deduplicate Series of references to the same objects,
# e.g. as produced by read_parquet
unique_samples = {}
for x in xs:
sample = np.random.choice(x, size=100, replace=True)
for i in sample.tolist():
unique_samples[id(i)] = i
nsamples = 100 * len(xs)
sample_nbytes = sum(sizeof(i) for i in unique_samples.values())
if len(unique_samples) / nsamples > 0.5:
# Less than half of the references are duplicated.
# Assume that, if we were to analyze twice the amount of random references,
# we would get twice the amount of unique objects too.
return int(sample_nbytes * ncells / nsamples)
else:
# Assume we've already found all unique objects and that all references that
# we have not yet analyzed are going to point to the same data.
return sample_nbytes
@sizeof.register(pd.DataFrame)
def sizeof_pandas_dataframe(df):
p = sizeof(df.index) + sizeof(df.columns)
object_cols = []
prev_dtype = None
# Unlike df.items(), df._series will not duplicate multiple views of the same
# column e.g. df[["x", "x", "x"]]
for col in df._series.values():
if prev_dtype is None or col.dtype != prev_dtype:
prev_dtype = col.dtype
# Contiguous columns of the same dtype share the same overhead
p += 1200
p += col.memory_usage(index=False, deep=False)
if col.dtype in OBJECT_DTYPES:
object_cols.append(col._values)
# Deduplicate references to the same objects appearing in different Series
p += object_size(*object_cols)
return max(1200, p)
@sizeof.register(pd.Series)
def sizeof_pandas_series(s):
# https://github.com/dask/dask/pull/9776#issuecomment-1359085962
p = 1200 + sizeof(s.index) + s.memory_usage(index=False, deep=False)
if s.dtype in OBJECT_DTYPES:
p += object_size(s._values)
return p
@sizeof.register(pd.Index)
def sizeof_pandas_index(i):
p = 400 + i.memory_usage(deep=False)
if i.dtype in OBJECT_DTYPES:
p += object_size(i)
return p
@sizeof.register(pd.MultiIndex)
def sizeof_pandas_multiindex(i):
p = sum(sizeof(lev) for lev in i.levels)
for c in i.codes:
p += c.nbytes
return p
@sizeof.register_lazy("scipy")
def register_spmatrix():
from scipy import sparse
@sizeof.register(sparse.dok_matrix)
def sizeof_spmatrix_dok(s):
return s.__sizeof__()
@sizeof.register(sparse.spmatrix)
def sizeof_spmatrix(s):
return sum(sizeof(v) for v in s.__dict__.values())
@sizeof.register_lazy("pyarrow")
def register_pyarrow():
import pyarrow as pa
def _get_col_size(data):
p = 0
if not isinstance(data, pa.ChunkedArray):
data = data.data # pyarrow <0.15.0
for chunk in data.iterchunks():
for buffer in chunk.buffers():
if buffer:
p += buffer.size
return p
@sizeof.register(pa.Table)
def sizeof_pyarrow_table(table):
p = sizeof(table.schema.metadata)
for col in table.itercolumns():
p += _get_col_size(col)
return int(p) + 1000
@sizeof.register(pa.ChunkedArray)
def sizeof_pyarrow_chunked_array(data):
return int(_get_col_size(data)) + 1000
def _register_entry_point_plugins():
"""Register sizeof implementations exposed by the entry_point mechanism."""
for entry_point in importlib_metadata.entry_points(group="dask.sizeof"):
registrar = entry_point.load()
try:
registrar(sizeof)
except Exception:
logger.exception(
f"Failed to register sizeof entry point {entry_point.name}"
)
_register_entry_point_plugins() | null |
799 | """Tests for anomaly detection with OTX CLI."""
# Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import os
import pytest
from otx.cli.registry import Registry
from tests.test_suite.e2e_test_system import e2e_pytest_component
from tests.test_suite.run_test_command import (
nncf_eval_openvino_testing,
nncf_eval_testing,
nncf_export_testing,
nncf_optimize_testing,
nncf_validate_fq_testing,
otx_demo_deployment_testing,
otx_demo_openvino_testing,
otx_demo_testing,
otx_deploy_openvino_testing,
otx_eval_deployment_testing,
otx_eval_openvino_testing,
otx_eval_testing,
otx_export_testing,
otx_train_testing,
ptq_eval_testing,
ptq_optimize_testing,
ptq_validate_fq_testing,
)
args = {
"--train-data-roots": "tests/assets/anomaly/hazelnut/train",
"--val-data-roots": "tests/assets/anomaly/hazelnut/test",
"--test-data-roots": "tests/assets/anomaly/hazelnut/test",
"--input": "tests/assets/anomaly/hazelnut/test/colour",
"train_params": [],
}
otx_dir = os.getcwd()
templates = Registry("src/otx/algorithms").filter(task_type="ANOMALY_DETECTION").templates
templates_ids = [template.model_template_id for template in templates]
class TestToolsAnomalyDetection:
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_train(self, template, tmp_dir_path):
otx_train_testing(template, tmp_dir_path, otx_dir, args, deterministic=True)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_export(self, template, tmp_dir_path):
otx_export_testing(template, tmp_dir_path, check_ir_meta=True)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_eval(self, template, tmp_dir_path):
otx_eval_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_eval_openvino(self, template, tmp_dir_path):
otx_eval_openvino_testing(template, tmp_dir_path, otx_dir, args, threshold=0.2)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_demo(self, template, tmp_dir_path):
otx_demo_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_demo_openvino(self, template, tmp_dir_path):
otx_demo_openvino_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_deploy_openvino(self, template, tmp_dir_path):
otx_deploy_openvino_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_eval_deployment(self, template, tmp_dir_path):
otx_eval_deployment_testing(template, tmp_dir_path, otx_dir, args, threshold=0.0)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_otx_demo_deployment(self, template, tmp_dir_path):
otx_demo_deployment_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_optimize(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_optimize_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_export(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_export_testing(template, tmp_dir_path)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_validate_fq(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_validate_fq_testing(template, tmp_dir_path, otx_dir, "anomaly", type(self).__name__)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_nncf_eval(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_eval_testing(template, tmp_dir_path, otx_dir, args, threshold=0.01)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def METHOD_NAME(self, template, tmp_dir_path):
if template.entrypoints.nncf is None:
pytest.skip("nncf entrypoint is none")
nncf_eval_openvino_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ptq_optimize(self, template, tmp_dir_path):
ptq_optimize_testing(template, tmp_dir_path, otx_dir, args)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ptq_validate_fq(self, template, tmp_dir_path):
ptq_validate_fq_testing(template, tmp_dir_path, otx_dir, "anomaly", type(self).__name__)
@e2e_pytest_component
@pytest.mark.parametrize("template", templates, ids=templates_ids)
def test_ptq_eval(self, template, tmp_dir_path):
ptq_eval_testing(template, tmp_dir_path, otx_dir, args) | null |