label
stringlengths 1
61
| code
stringlengths 4k
8k
|
---|---|
get num slices | #!/usr/bin/python
##################
# TiffDataSource.py
#
# Copyright David Baddeley, 2009
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
from PYME.IO.FileUtils.nameUtils import getFullExistingFilename
#from PYME.IO.FileUtils import readTiff
#import Image
#from PYME.misc import TiffImagePlugin #monkey patch PIL with improved tiff support from Priithon
#import numpy as np
try:
import tifffile
local_tifffile = False
except ImportError:
local_tifffile = True
from PYME.contrib.gohlke import tifffile
from .BaseDataSource import XYZTCDataSource
import logging
logger = logging.getLogger(__name__)
class DataSource(XYZTCDataSource):
moduleName = 'TiffDataSource'
def __init__(self, filename, taskQueue=None, chanNum = 0, series=0):
self.filename = getFullExistingFilename(filename)#convert relative path to full path
self.chanNum = chanNum
self.RGB = False
#self.data = readTiff.read3DTiff(self.filename)
#self.im = Image.open(filename)
#self.im.seek(0)
#PIL's endedness support is subtly broken - try to fix it
#NB this is untested for floating point tiffs
#self.endedness = 'LE'
#if self.im.ifd.prefix =='MM':
# self.endedness = 'BE'
#to find the number of images we have to loop over them all
#this is obviously not ideal as PIL loads the image data into memory for each
#slice and this is going to represent a huge performance penalty for large stacks
#should still let them be opened without having all images in memory at once though
#self.numSlices = self.im.tell()
#try:
# while True:
# self.numSlices += 1
# self.im.seek(self.numSlices)
#except EOFError:
# pass
print((self.filename))
if local_tifffile:
logger.info('Using PYMEs built-in, old version of tifffile, better support for ImageJ tiffs can be had with the more recent pip version (`pip install tifffile`)')
tf = tifffile.TIFFfile(self.filename)
else:
tf = tifffile.TiffFile(self.filename)
self.tf = tf # keep a reference for debugging
print(tf.series[series].shape)
self.im = tf.series[series].pages
axisOrder = 'XYZTC'
size_z = len(self.im)
size_c, size_t = 1,1
if tf.is_ome or ((not local_tifffile)):
#print('Detected OME TIFF')
sh = {'Z':1, 'T': 1,'C':1}
_axes = tf.series[series].axes
if 'I' in _axes:
logger.info('Tiff file does not fully specify axes (axes=%s)' % (_axes.replace('I', '?')[::-1]))
if 'Z' not in _axes:
logger.info('Assuming unknown axis is Z') # TODO - explain how to change axes later
_axes = _axes.replace('I', 'Z')
elif 'C' not in _axes:
logger.info('Assuming unknown axis is C')
_axes = _axes.replace('I', 'C')
elif 'T' not in _axes:
logger.info('Assuming unkown axis is T')
_axes = _axes.replace('I', 'T')
else:
logger.warning('Unknown axis with all standard axes defined - data might not read correctly')
sh.update(dict(zip(_axes, tf.series[0].shape)))
logger.debug('sh = %s' % sh)
size_c = sh['C']
size_z = sh['Z']
size_t = sh['T']
axisOrder = _axes[::-1]
axisOrder = axisOrder + ''.join([a for a in ['Z', 'T', 'C'] if not a in axisOrder])
logger.debug('raw TIFF axisOrder = %s' %axisOrder)
#self.additionalDims = ''.join([a for a in axisOrder[2:] if sh[a] > 1])
elif tf.is_rgb:
print('WARNING: Detected RGB TIFF - data not likely to be suitable for quantitative analysis')
size_c = 3
self.RGB = True
if len(self.im) > 1:
# we can have multi-page RGB TIFF - why?????
print('WARNING: Multi-page RGB TIFF detected - where did this come from???')
axisOrder = 'XYCZT'
size_z = len(self.im)
size_t = 1
XYZTCDataSource.__init__(self, input_order=axisOrder, size_z=size_z, size_t=size_t, size_c=size_c)
sl0 = self.getSlice(0)
self._dtype = sl0.dtype
self._shape = [sl0.shape[0], sl0.shape[1], size_z, size_t, size_c]
def getSlice(self, ind):
#self.im.seek(ind)
#ima = np.array(im.getdata()).newbyteorder(self.endedness)
#return ima.reshape((self.im.size[1], self.im.size[0]))
#return self.data[:,:,ind]
if self.RGB:
# special case for RGB TIFF
ind_0 = ind%len(self.im)
ind_1 = int(ind/len(self.im))
return self.im[ind_0].asarray(squeeze=False)[0, 0, :,:,ind_1].squeeze()
if local_tifffile:
res = self.im[ind].asarray(squeeze=False, colormapped=False)
res = res[0, self.chanNum, :, :].squeeze()
else:
res = self.im[ind].asarray()
#if res.ndim == 3:
#print res.shape
#print self.chanNum
#print res.shape
return res
def getSliceShape(self):
#return (self.im.size[1], self.im.size[0])
if len(self.im[0].shape) == 2:
return self.im[0].shape
elif self.RGB:
return self.im[0].shape[:2]
else:
return self.im[0].shape[1:3] #FIXME - when is this used?
#return self.data.shape[:2]
def METHOD_NAME(self):
if self.RGB:
return len(self.im)*3
return len(self.im)
def getEvents(self):
return []
def release(self):
#self.im.close()
pass
def reloadData(self):
pass |
do the read | # Copyright (c) "Neo4j"
# Neo4j Sweden AB [https://neo4j.com]
#
# This file is part of Neo4j.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import random
from functools import wraps
import pytest
import neo4j
from neo4j import exceptions as neo4j_exceptions
from ..._async_compat import mark_async_test
from ...conftest import get_async_driver
def _get_work():
work_cancelled = False
async def work(tx, i=1):
nonlocal work_cancelled
assert not work_cancelled # no retries after cancellation!
try:
result = await tx.run(f"RETURN {i}")
try:
for _ in range(3):
await asyncio.sleep(0)
except asyncio.CancelledError as e:
e.during_sleep = True
raise
records = [record async for record in result]
summary = await result.consume()
assert isinstance(summary, neo4j.ResultSummary)
assert len(records) == 1
assert list(records[0]) == [i]
except asyncio.CancelledError:
work_cancelled = True
raise
return work
async def _do_the_read_tx_func(session_, i=1):
await session_.execute_read(_get_work(), i=i)
def _with_retry(outer):
@wraps(outer)
async def inner(*args, **kwargs):
for _ in range(15): # super simple retry-mechanism
try:
return await outer(*args, **kwargs)
except (neo4j_exceptions.DriverError,
neo4j_exceptions.Neo4jError) as e:
if not e.is_retryable():
raise
await asyncio.sleep(1.5)
return inner
@_with_retry
async def _do_the_read_tx_context(session_, i=1):
async with await session_.begin_transaction() as tx:
await _get_work()(tx, i=i)
@_with_retry
async def _do_the_read_explicit_tx(session_, i=1):
tx = await session_.begin_transaction()
try:
await _get_work()(tx, i=i)
except asyncio.CancelledError:
tx.cancel()
raise
await tx.commit()
@_with_retry
async def METHOD_NAME(session_, i=1):
try:
return await _get_work()(session_, i=i)
except asyncio.CancelledError:
session_.cancel()
raise
REPETITIONS = 250
@mark_async_test
@pytest.mark.parametrize(("i", "read_func", "waits", "cancel_count"), (
(
f"{i + 1:0{len(str(REPETITIONS))}}/{REPETITIONS}",
random.choice((
METHOD_NAME, _do_the_read_tx_context, _do_the_read_explicit_tx,
_do_the_read_tx_func
)),
random.randint(0, 1000),
random.randint(1, 20),
)
for i in range(REPETITIONS)
))
async def test_async_cancellation(
uri, auth, mocker, read_func, waits, cancel_count, i
):
async with get_async_driver(
uri, auth=auth, connection_acquisition_timeout=10
) as driver:
async with driver.session() as session:
session._handle_cancellation = mocker.Mock(
wraps=session._handle_cancellation
)
fut = asyncio.ensure_future(read_func(session))
for _ in range(waits):
await asyncio.sleep(0)
# time for crazy abuse!
was_done = fut.done() and not fut.cancelled()
for _ in range(cancel_count):
fut.cancel()
await asyncio.sleep(0)
cancelled_error = None
if not was_done:
with pytest.raises(asyncio.CancelledError) as exc:
await fut
cancelled_error = exc.value
else:
await fut
bookmarks = await session.last_bookmarks()
if not waits:
assert not bookmarks
session._handle_cancellation.assert_not_called()
elif cancelled_error is not None:
assert not bookmarks
if (
read_func is METHOD_NAME
and not getattr(cancelled_error, "during_sleep", False)
):
# manually handling the session can lead to calling
# `session.cancel` twice, but that's ok, it's a noop if
# already cancelled.
assert len(session._handle_cancellation.call_args) == 2
else:
session._handle_cancellation.assert_called_once()
else:
assert bookmarks
session._handle_cancellation.assert_not_called()
for read_func in (
METHOD_NAME, _do_the_read_tx_context,
_do_the_read_explicit_tx, _do_the_read_tx_func
):
await read_func(session, i=2)
# test driver is still working
async with driver.session() as session:
await _do_the_read_tx_func(session, i=3)
new_bookmarks = await session.last_bookmarks()
assert new_bookmarks
assert bookmarks != new_bookmarks
SESSION_REPETITIONS = 50
READS_PER_SESSION = 15
@mark_async_test
async def test_async_cancellation_does_not_leak(uri, auth):
async with get_async_driver(
uri, auth=auth,
connection_acquisition_timeout=10,
# driver needs to cope with a single connection in the pool!
max_connection_pool_size=1,
) as driver:
for session_number in range(SESSION_REPETITIONS):
async with driver.session() as session:
for read_number in range(READS_PER_SESSION):
read_func = random.choice((
METHOD_NAME, _do_the_read_tx_context,
_do_the_read_explicit_tx, _do_the_read_tx_func
))
waits = random.randint(0, 1000)
cancel_count = random.randint(1, 20)
fut = asyncio.ensure_future(read_func(session))
for _ in range(waits):
await asyncio.sleep(0)
# time for crazy abuse!
was_done = fut.done() and not fut.cancelled()
for _ in range(cancel_count):
fut.cancel()
await asyncio.sleep(0)
if not was_done:
with pytest.raises(asyncio.CancelledError):
await fut
else:
await fut
await _do_the_read_tx_func(session, i=2)
pool_connections = driver._pool.connections
for connections in pool_connections.values():
assert len(connections) <= 1 |
line to | from fontTools.pens.basePen import BasePen
from functools import partial
from itertools import count
import sympy as sp
import sys
n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic
t, x, y = sp.symbols("t x y", real=True)
c = sp.symbols("c", real=False) # Complex representation instead of x/y
X = tuple(sp.symbols("x:%d" % (n + 1), real=True))
Y = tuple(sp.symbols("y:%d" % (n + 1), real=True))
P = tuple(zip(*(sp.symbols("p:%d[%s]" % (n + 1, w), real=True) for w in "01")))
C = tuple(sp.symbols("c:%d" % (n + 1), real=False))
# Cubic Bernstein basis functions
BinomialCoefficient = [(1, 0)]
for i in range(1, n + 1):
last = BinomialCoefficient[-1]
this = tuple(last[j - 1] + last[j] for j in range(len(last))) + (0,)
BinomialCoefficient.append(this)
BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient)
del last, this
BernsteinPolynomial = tuple(
tuple(c * t**i * (1 - t) ** (n - i) for i, c in enumerate(coeffs))
for n, coeffs in enumerate(BinomialCoefficient)
)
BezierCurve = tuple(
tuple(
sum(P[i][j] * bernstein for i, bernstein in enumerate(bernsteins))
for j in range(2)
)
for n, bernsteins in enumerate(BernsteinPolynomial)
)
BezierCurveC = tuple(
sum(C[i] * bernstein for i, bernstein in enumerate(bernsteins))
for n, bernsteins in enumerate(BernsteinPolynomial)
)
def green(f, curveXY):
f = -sp.integrate(sp.sympify(f), y)
f = f.subs({x: curveXY[0], y: curveXY[1]})
f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1))
return f
class _BezierFuncsLazy(dict):
def __init__(self, symfunc):
self._symfunc = symfunc
self._bezfuncs = {}
def __missing__(self, i):
args = ["p%d" % d for d in range(i + 1)]
f = green(self._symfunc, BezierCurve[i])
f = sp.gcd_terms(f.collect(sum(P, ()))) # Optimize
return sp.lambdify(args, f)
class GreenPen(BasePen):
_BezierFuncs = {}
@classmethod
def _getGreenBezierFuncs(celf, func):
funcstr = str(func)
if not funcstr in celf._BezierFuncs:
celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func)
return celf._BezierFuncs[funcstr]
def __init__(self, func, glyphset=None):
BasePen.__init__(self, glyphset)
self._funcs = self._getGreenBezierFuncs(func)
self.value = 0
def _moveTo(self, p0):
self.__startPoint = p0
def _closePath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
self.METHOD_NAME(self.__startPoint)
def _endPath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
# Green theorem is not defined on open contours.
raise NotImplementedError
def METHOD_NAME(self, p1):
p0 = self._getCurrentPoint()
self.value += self._funcs[1](p0, p1)
def _qCurveToOne(self, p1, p2):
p0 = self._getCurrentPoint()
self.value += self._funcs[2](p0, p1, p2)
def _curveToOne(self, p1, p2, p3):
p0 = self._getCurrentPoint()
self.value += self._funcs[3](p0, p1, p2, p3)
# Sample pens.
# Do not use this in real code.
# Use fontTools.pens.momentsPen.MomentsPen instead.
AreaPen = partial(GreenPen, func=1)
MomentXPen = partial(GreenPen, func=x)
MomentYPen = partial(GreenPen, func=y)
MomentXXPen = partial(GreenPen, func=x * x)
MomentYYPen = partial(GreenPen, func=y * y)
MomentXYPen = partial(GreenPen, func=x * y)
def printGreenPen(penName, funcs, file=sys.stdout, docstring=None):
if docstring is not None:
print('"""%s"""' % docstring)
print(
"""from fontTools.pens.basePen import BasePen, OpenContourError
try:
import cython
COMPILED = cython.compiled
except (AttributeError, ImportError):
# if cython not installed, use mock module with no-op decorators and types
from fontTools.misc import cython
COMPILED = False
__all__ = ["%s"]
class %s(BasePen):
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
"""
% (penName, penName),
file=file,
)
for name, f in funcs:
print(" self.%s = 0" % name, file=file)
print(
"""
def _moveTo(self, p0):
self.__startPoint = p0
def _closePath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
self._lineTo(self.__startPoint)
def _endPath(self):
p0 = self._getCurrentPoint()
if p0 != self.__startPoint:
# Green theorem is not defined on open contours.
raise OpenContourError(
"Green theorem is not defined on open contours."
)
""",
end="",
file=file,
)
for n in (1, 2, 3):
subs = {P[i][j]: [X, Y][j][i] for i in range(n + 1) for j in range(2)}
greens = [green(f, BezierCurve[n]) for name, f in funcs]
greens = [sp.gcd_terms(f.collect(sum(P, ()))) for f in greens] # Optimize
greens = [f.subs(subs) for f in greens] # Convert to p to x/y
defs, exprs = sp.cse(
greens,
optimizations="basic",
symbols=(sp.Symbol("r%d" % i) for i in count()),
)
print()
for name, value in defs:
print(" @cython.locals(%s=cython.double)" % name, file=file)
if n == 1:
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
def _lineTo(self, p1):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
""",
file=file,
)
elif n == 2:
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double)
def _qCurveToOne(self, p1, p2):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
x2,y2 = p2
""",
file=file,
)
elif n == 3:
print(
"""\
@cython.locals(x0=cython.double, y0=cython.double)
@cython.locals(x1=cython.double, y1=cython.double)
@cython.locals(x2=cython.double, y2=cython.double)
@cython.locals(x3=cython.double, y3=cython.double)
def _curveToOne(self, p1, p2, p3):
x0,y0 = self._getCurrentPoint()
x1,y1 = p1
x2,y2 = p2
x3,y3 = p3
""",
file=file,
)
for name, value in defs:
print(" %s = %s" % (name, value), file=file)
print(file=file)
for name, value in zip([f[0] for f in funcs], exprs):
print(" self.%s += %s" % (name, value), file=file)
print(
"""
if __name__ == '__main__':
from fontTools.misc.symfont import x, y, printGreenPen
printGreenPen('%s', ["""
% penName,
file=file,
)
for name, f in funcs:
print(" ('%s', %s)," % (name, str(f)), file=file)
print(" ])", file=file)
if __name__ == "__main__":
pen = AreaPen()
pen.moveTo((100, 100))
pen.lineTo((100, 200))
pen.lineTo((200, 200))
pen.curveTo((200, 250), (300, 300), (250, 350))
pen.lineTo((200, 100))
pen.closePath()
print(pen.value) |
test two cl is in parallel test | import qt
import slicer
from slicer.ScriptedLoadableModule import *
#
# TwoCLIsInParallelTest
#
class TwoCLIsInParallelTest(ScriptedLoadableModule):
def __init__(self, parent):
parent.title = "TwoCLIsInParallelTest" # TODO make this more human readable by adding spaces
parent.categories = ["Testing.TestCases"]
parent.dependencies = ["CLI4Test"]
parent.contributors = ["Johan Andruejol (Kitware)"]
parent.helpText = """
This is a self test that tests running two CLIs in parallel through python
"""
parent.acknowledgementText = """""" # replace with organization, grant and thanks.
self.parent = parent
# Add this test to the SelfTest module's list for discovery when the module
# is created. Since this module may be discovered before SelfTests itself,
# create the list if it doesn't already exist.
try:
slicer.selfTests
except AttributeError:
slicer.selfTests = {}
slicer.selfTests['TwoCLIsInParallelTest'] = self.runTest
def runTest(self):
tester = TwoCLIsInParallelTestTest()
tester.runTest()
#
# TwoCLIsInParallelTestWidget
#
class TwoCLIsInParallelTestWidget(ScriptedLoadableModuleWidget):
def setup(self):
ScriptedLoadableModuleWidget.setup(self)
#
# TwoCLIsInParallelTestLogic
#
class TwoCLIsInParallelTestLogic(ScriptedLoadableModuleLogic):
def __init__(self):
self.Observations = []
self.StatusModifiedEvent = slicer.vtkMRMLCommandLineModuleNode().StatusModifiedEvent
self.parameters = {}
self.success = False
def runModule1(self):
cliModule = slicer.modules.cli4test
cliNode = slicer.cli.createNode(cliModule)
cliNode.SetName("CLIModule1")
self.addObserver(cliNode, self.StatusModifiedEvent, self.onModule1Modified)
cliNode = slicer.cli.run(cliModule, cliNode, self.parameters, False)
def onModule1Modified(self, cliNode, event):
print("--", cliNode.GetStatusString(), ":", cliNode.GetName())
if not cliNode.IsBusy():
self.removeObservers(cliNode, self.StatusModifiedEvent, self.onModule1Modified)
def runModule2(self):
cliModule = slicer.modules.cli4test
cliNode = slicer.cli.createNode(cliModule)
cliNode.SetName("CLIModule2")
cliNode = slicer.cli.run(cliModule, cliNode, self.parameters, True)
self.success = cliNode.GetStatusString() == 'Completed'
def addObserver(self, object, event, method, group='none'):
if self.hasObserver(object, event, method):
print(object.GetName(), 'already has observer')
return
tag = object.AddObserver(event, method)
self.Observations.append([object, event, method, group, tag])
def hasObserver(self, object, event, method):
for o, e, m, g, t in self.Observations:
if o == object and e == event and m == method:
return True
return False
def removeObservers(self, object, event, method):
for o, e, m, g, t in self.Observations:
if object == o and event == e and method == m:
o.RemoveObserver(t)
self.Observations.remove([o, e, m, g, t])
#
# TwoCLIsInParallelTestLogic
#
class TwoCLIsInParallelTestTest(ScriptedLoadableModuleTest):
def setUp(self):
""" Reset the state for testing.
"""
pass
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.METHOD_NAME()
def METHOD_NAME(self):
self.delayDisplay('Running two CLIs in a row Test')
tempFile = qt.QTemporaryFile("TwoCLIsInParallelTest-outputFile-XXXXXX")
self.assertTrue(tempFile.open())
logic = TwoCLIsInParallelTestLogic()
logic.parameters = {}
logic.parameters["InputValue1"] = 1
logic.parameters["InputValue2"] = 2
logic.parameters["OperationType"] = 'Addition'
logic.parameters["OutputFile"] = tempFile.fileName()
logic.runModule1()
self.delayDisplay('... Waiting to start module 2 ...')
logic.runModule2()
self.assertTrue(logic.success)
self.delayDisplay('Two CLIs in parallel test passed !') |
aut o techsuppor t global | """
Auto-generated show CLI plugin.
Manually Edited to add show cli for "show auto_techsupport history"
"""
import click
import tabulate
import natsort
import utilities_common.cli as clicommon
def format_attr_value(entry, attr):
""" Helper that formats attribute to be presented in the table output.
Args:
entry (Dict[str, str]): CONFIG DB entry configuration.
attr (Dict): Attribute metadata.
Returns:
str: fomatted attribute value.
"""
if attr["is-leaf-list"]:
return "\n".join(entry.get(attr["name"], []))
return entry.get(attr["name"], "N/A")
def format_group_value(entry, attrs):
""" Helper that formats grouped attribute to be presented in the table output.
Args:
entry (Dict[str, str]): CONFIG DB entry configuration.
attrs (List[Dict]): Attributes metadata that belongs to the same group.
Returns:
str: fomatted group attributes.
"""
data = []
for attr in attrs:
if entry.get(attr["name"]):
data.append((attr["name"] + ":", format_attr_value(entry, attr)))
return tabulate.tabulate(data, tablefmt="plain")
@click.group(name="auto-techsupport",
cls=clicommon.AliasedGroup)
def AUTO_TECHSUPPORT():
""" AUTO_TECHSUPPORT part of config_db.json """
pass
@AUTO_TECHSUPPORT.command(name="global")
@clicommon.pass_db
def METHOD_NAME(db):
""" """
header = [
"STATE",
"RATE LIMIT INTERVAL (sec)",
"MAX TECHSUPPORT LIMIT (%)",
"MAX CORE LIMIT (%)",
"AVAILABLE MEM THRESHOLD (%)",
"MIN AVAILABLE MEM (Kb)",
"SINCE",
]
body = []
table = db.cfgdb.get_table("AUTO_TECHSUPPORT")
entry = table.get("GLOBAL", {})
row = [
format_attr_value(
entry,
{'name': 'state', 'description': 'Knob to make techsupport invocation event-driven based on core-dump generation', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''}
),
format_attr_value(
entry,
{'name': 'rate_limit_interval', 'description': 'Minimum time in seconds between two successive techsupport invocations. Configure 0 to explicitly disable', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''}
),
format_attr_value(
entry,
{'name': 'max_techsupport_limit', 'description': 'Max Limit in percentage for the cummulative size of ts dumps. No cleanup is performed if the value isn\'t configured or is 0.0', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''}
),
format_attr_value(
entry,
{'name': 'max_core_limit', 'description': 'Max Limit in percentage for the cummulative size of core dumps. No cleanup is performed if the value isn\'t congiured or is 0.0', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''}
),
format_attr_value(
entry,
{'name': 'available_mem_threshold', 'description': 'Memory threshold; 0 to disable techsupport invocation on memory usage threshold crossing.', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''}
),
format_attr_value(
entry,
{'name': 'min_available_mem', 'description': 'Minimum Free memory (in MB) that should be available for the techsupport execution to start', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''}
),
format_attr_value(
entry,
{'name': 'since', 'description': "Only collect the logs & core-dumps generated since the time provided. A default value of '2 days ago' is used if this value is not set explicitly or a non-valid string is provided", 'is-leaf-list': False, 'is-mandatory': False, 'group': ''}
),
]
body.append(row)
click.echo(tabulate.tabulate(body, header, numalign="left"))
@AUTO_TECHSUPPORT.command(name="history")
@clicommon.pass_db
def AUTO_TECHSUPPORT_history(db):
keys = db.db.keys("STATE_DB", "AUTO_TECHSUPPORT_DUMP_INFO|*")
header = ["TECHSUPPORT DUMP", "TRIGGERED BY", "EVENT TYPE", "CORE DUMP"]
body = []
for key in keys:
dump = key.split("|")[-1]
fv_pairs = db.db.get_all("STATE_DB", key)
core_dump = fv_pairs.get("core_dump", "")
container = fv_pairs.get("container_name", "")
event_type = fv_pairs.get("event_type", "")
body.append([dump, container, event_type, core_dump])
click.echo(tabulate.tabulate(body, header, numalign="left"))
@click.group(name="auto-techsupport-feature",
cls=clicommon.AliasedGroup,
invoke_without_command=True)
@clicommon.pass_db
def AUTO_TECHSUPPORT_FEATURE(db):
""" [Callable command group] """
header = [
"FEATURE NAME",
"STATE",
"RATE LIMIT INTERVAL (sec)",
"AVAILABLE MEM THRESHOLD (%)",
]
body = []
table = db.cfgdb.get_table("AUTO_TECHSUPPORT_FEATURE")
for key in natsort.natsorted(table):
entry = table[key]
if not isinstance(key, tuple):
key = (key,)
row = [*key] + [
format_attr_value(
entry,
{'name': 'state', 'description': 'Enable auto techsupport invocation on the processes running inside this feature', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''}
),
format_attr_value(
entry,
{'name': 'rate_limit_interval', 'description': 'Rate limit interval for the corresponding feature. Configure 0 to explicitly disable', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''}
),
format_attr_value(
entry,
{'name': 'available_mem_threshold', 'description': 'Memory threshold; 0 to disable techsupport invocation on memory usage threshold crossing.', 'is-leaf-list': False, 'is-mandatory': False, 'group': ''}
),
]
body.append(row)
click.echo(tabulate.tabulate(body, header, numalign="left"))
def register(cli):
cli_node = AUTO_TECHSUPPORT
if cli_node.name in cli.commands:
raise Exception(f"{cli_node.name} already exists in CLI")
cli.add_command(AUTO_TECHSUPPORT)
cli_node = AUTO_TECHSUPPORT_FEATURE
if cli_node.name in cli.commands:
raise Exception(f"{cli_node.name} already exists in CLI")
cli.add_command(AUTO_TECHSUPPORT_FEATURE)
cli_node = AUTO_TECHSUPPORT_history
if cli_node.name in cli.commands:
raise Exception(f"{cli_node.name} already exists in CLI")
cli.add_command(AUTO_TECHSUPPORT_history) |
point distances | import array
import geohash
import os
import math
import operator
import six
import ujson as json
from collections import defaultdict, OrderedDict
from leveldb import LevelDB
from geodata.distance.haversine import haversine_distance
class PointIndex(object):
include_only_properties = None
persistent_index = False
cache_size = 0
POINTS_DB_DIR = 'points'
GEOHASH_PRECISION = 7
PROPS_FILENAME = 'properties.json'
POINTS_FILENAME = 'points.json'
INDEX_FILENAME = 'index.json'
def __init__(self, index=None, save_dir=None,
points=None,
points_path=None,
points_db=None,
points_db_path=None,
index_path=None,
include_only_properties=None,
precision=GEOHASH_PRECISION):
if save_dir:
self.save_dir = save_dir
else:
self.save_dir = None
if include_only_properties and hasattr(include_only_properties, '__contains__'):
self.include_only_properties = include_only_properties
if not index_path:
index_path = os.path.join(save_dir or '.', self.INDEX_FILENAME)
self.index_path = index_path
if not index:
self.index = defaultdict(list)
else:
self.index = index
if not points_path:
points_path = os.path.join(save_dir or '.', self.POINTS_FILENAME)
self.points_path = points_path
if not points:
self.points = array.array('d')
else:
self.points = points
if not points_db_path:
points_db_path = os.path.join(save_dir or '.', self.POINTS_DB_DIR)
if not points_db:
self.points_db = LevelDB(points_db_path)
else:
self.points_db = points_db
self.precision = precision
self.i = 0
def index_point(self, lat, lon):
code = geohash.encode(lat, lon)[:self.precision]
for key in [code] + geohash.neighbors(code):
self.index[key].append(self.i)
self.points.extend([lat, lon])
def add_point(self, lat, lon, properties, cache=False, include_only_properties=None):
if include_only_properties is None and self.include_only_properties:
include_only_properties = self.include_only_properties
if include_only_properties is not None:
properties = {k: v for k, v in properties.iteritems() if k in include_only_properties}
self.index_point(lat, lon)
self.points_db.Put(self.properties_key(self.i), json.dumps(properties))
self.i += 1
def load_properties(self, filename):
properties = json.load(open(filename))
self.i = int(properties.get('num_points', self.i))
self.precision = int(properties.get('precision', self.precision))
def save_properties(self, out_filename):
out = open(out_filename, 'w')
json.dump({'num_points': str(self.i),
'precision': self.precision}, out)
def save_index(self):
if not self.index_path:
self.index_path = os.path.join(self.save_dir or '.', self.INDEX_FILENAME)
json.dump(self.index, open(self.index_path, 'w'))
@classmethod
def load_index(cls, d, index_name=None):
return json.load(open(os.path.join(d, index_name or cls.INDEX_FILENAME)))
def save_points(self):
json.dump(self.points, open(self.points_path, 'w'))
@classmethod
def load_points(cls, d):
return array.array('d', json.load(open(os.path.join(d, cls.POINTS_FILENAME))))
def properties_key(self, i):
return 'props:{}'.format(i)
def get_properties(self, i):
return json.loads(self.points_db.Get(self.properties_key(i)))
def compact_points_db(self):
self.points_db.CompactRange('\x00', '\xff')
def save(self):
self.save_index()
self.save_points()
self.compact_points_db()
self.save_properties(os.path.join(self.save_dir, self.PROPS_FILENAME))
@classmethod
def load(cls, d):
index = cls.load_index(d)
points = cls.load_points(d)
points_db = LevelDB(os.path.join(d, cls.POINTS_DB_DIR))
point_index = cls(index=index, points=points, points_db=points_db)
point_index.load_properties(os.path.join(d, cls.PROPS_FILENAME))
return point_index
def __iter__(self):
for i in xrange(self.i):
lat, lon = self.points[i * 2], self.points[i * 2 + 1]
yield self.get_properties(i), lat, lon
def __len__(self):
return self.i
def get_candidate_points(self, latitude, longitude):
code = geohash.encode(latitude, longitude)[:self.precision]
candidates = OrderedDict()
candidates.update([(k, None) for k in self.index.get(code, [])])
for neighbor in geohash.neighbors(code):
candidates.update([(k, None) for k in self.index.get(neighbor, [])])
return candidates.keys()
def METHOD_NAME(self, latitude, longitude):
candidates = self.get_candidate_points(latitude, longitude)
return [(i, self.points[i * 2], self.points[i * 2 + 1],
haversine_distance(latitude, longitude,
self.points[i * 2],
self.points[i * 2 + 1]))
for i in candidates]
def all_nearby_points(self, latitude, longitude):
distances = self.METHOD_NAME(latitude, longitude)
if not distances:
return []
return sorted(distances, key=operator.itemgetter(-1))
def points_with_properties(self, results):
return [(self.get_properties(i), lat, lon, distance)
for i, lat, lon, distance in results]
def nearest_points(self, latitude, longitude):
return self.points_with_properties(self.all_nearby_points(latitude, longitude))
def nearest_n_points(self, latitude, longitude, n=2):
return self.points_with_properties(self.all_nearby_points(latitude, longitude)[:n])
def nearest_point(self, latitude, longitude):
distances = self.all_nearby_points(latitude, longitude)
if not distances:
return None
return self.points_with_properties(distances[:1])[0] |
layout | from conan import ConanFile
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps
from conan.tools.files import get, copy, rmdir
from conan.tools.METHOD_NAME import basic_layout
from conan.errors import ConanInvalidConfiguration
from conan.tools.microsoft import check_min_vs, is_msvc
from conan.tools.scm import Version
import os
required_conan_version = ">=1.50.0"
class LogrConan(ConanFile):
name = "logr"
description = (
"Logger frontend substitution for spdlog, glog, etc "
"for server/desktop applications"
)
license = "BSD-3-Clause"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/ngrodzitski/logr"
topics = ("logger", "development", "util", "utils")
settings = "os", "arch", "compiler", "build_type"
options = {
"backend": ["spdlog", "glog", "log4cplus", "boostlog", None],
}
default_options = {
"backend": "spdlog",
}
def METHOD_NAME(self):
basic_layout(self, src_folder="src")
def requirements(self):
if self.options.backend != "spdlog":
fmt_ref = "fmt/10.0.0"
elif Version(self.version) >= "0.6.0":
fmt_ref = "fmt/9.1.0"
spdlog_ref = "spdlog/1.11.0"
else:
fmt_ref = "fmt/8.1.1"
spdlog_ref = "spdlog/1.10.0"
self.requires(fmt_ref)
if self.options.backend == "spdlog":
self.requires(spdlog_ref)
elif self.options.backend == "glog":
self.requires("glog/0.6.0")
elif self.options.backend == "log4cplus":
self.requires("log4cplus/2.1.0")
elif self.options.backend == "boostlog":
self.requires("boost/1.82.0")
def package_id(self):
self.info.settings.clear()
self.info.requires.clear()
def validate(self):
minimal_cpp_standard = "17"
if self.settings.get_safe("compiler.cppstd"):
check_min_cppstd(self, minimal_cpp_standard)
minimal_version = {
"gcc": "10",
"clang": "11",
"apple-clang": "12",
}
check_min_vs(self, 192)
if not is_msvc(self):
minimum_version = minimal_version.get(str(self.settings.compiler), False)
if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires minimum {self.settings.compiler}-{minimum_version}."
)
def generate(self):
tc = CMakeToolchain(self)
tc.variables["LOGR_WITH_SPDLOG_BACKEND"] = (
self.options.backend == "spdlog"
)
tc.variables["LOGR_WITH_GLOG_BACKEND"] = (
self.options.backend == "glog"
)
tc.variables["LOGR_WITH_LOG4CPLUS_BACKEND"] = (
self.options.backend == "log4cplus"
)
tc.variables["LOGR_WITH_BOOSTLOG_BACKEND"] = (
self.options.backend == "boostlog"
)
tc.variables["LOGR_INSTALL"] = True
tc.variables["LOGR_CONAN_PACKAGING"] = True
tc.variables["LOGR_BUILD_TESTS"] = False
tc.variables["LOGR_BUILD_EXAMPLES"] = False
tc.variables["LOGR_BUILD_BENCHMARKS"] = False
tc.generate()
deps = CMakeDeps(self)
deps.generate()
def source(self):
get(self, **self.conan_data["sources"][self.version],
destination=self.source_folder, strip_root=True)
def package(self):
copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses"))
cmake = CMake(self)
cmake.configure(build_script_folder=os.path.join(self.source_folder, "logr"))
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib"))
def package_info(self):
self.cpp_info.bindirs = []
self.cpp_info.frameworkdirs = []
self.cpp_info.libdirs = []
self.cpp_info.set_property("cmake_file_name", "logr")
self.cpp_info.names["cmake_find_package"] = "logr"
self.cpp_info.names["cmake_find_package_multi"] = "logr"
self.cpp_info.components["logr_base"].includedirs = ["include"]
self.cpp_info.components["logr_base"].requires = ["fmt::fmt"]
if self.options.backend == "spdlog":
self.cpp_info.components["logr_spdlog"].includedirs = []
self.cpp_info.components["logr_spdlog"].requires = [
"logr_base",
"spdlog::spdlog",
]
elif self.options.backend == "glog":
self.cpp_info.components["logr_glog"].includedirs = []
self.cpp_info.components["logr_glog"].requires = [
"logr_base",
"glog::glog",
]
elif self.options.backend == "log4cplus":
self.cpp_info.components["logr_log4cplus"].includedirs = []
self.cpp_info.components["logr_log4cplus"].requires = [
"logr_base",
"log4cplus::log4cplus",
]
elif self.options.backend == "boostlog":
self.cpp_info.components["logr_boostlog"].includedirs = []
self.cpp_info.components["logr_boostlog"].requires = [
"logr_base",
"boost::log",
] |
get selection | from travertino.size import at_least
from ..libs import GdkPixbuf, Gtk
from .base import Widget
from .table import TogaRow
class Tree(Widget):
def create(self):
self.store = None
# Create a tree view, and put it in a scroll view.
# The scroll view is the _impl, because it's the outer container.
self.native_tree = Gtk.TreeView(model=self.store)
self.native_tree.connect("row-activated", self.gtk_on_row_activated)
self.selection = self.native_tree.METHOD_NAME()
if self.interface.multiple_select:
self.selection.set_mode(Gtk.SelectionMode.MULTIPLE)
else:
self.selection.set_mode(Gtk.SelectionMode.SINGLE)
self.selection.connect("changed", self.gtk_on_select)
self._create_columns()
self.native = Gtk.ScrolledWindow()
self.native.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
self.native.add(self.native_tree)
self.native.set_min_content_width(200)
self.native.set_min_content_height(200)
def _create_columns(self):
if self.interface.headings:
headings = self.interface.headings
self.native_tree.set_headers_visible(True)
else:
headings = self.interface.accessors
self.native_tree.set_headers_visible(False)
for i, heading in enumerate(headings):
column = Gtk.TreeViewColumn(heading)
column.set_sizing(Gtk.TreeViewColumnSizing.AUTOSIZE)
column.set_expand(True)
column.set_resizable(True)
column.set_min_width(16)
icon = Gtk.CellRendererPixbuf()
column.pack_start(icon, False)
column.add_attribute(icon, "pixbuf", i * 2 + 1)
value = Gtk.CellRendererText()
column.pack_start(value, True)
column.add_attribute(value, "text", i * 2 + 2)
self.native_tree.append_column(column)
def gtk_on_select(self, selection):
self.interface.on_select(None)
def gtk_on_row_activated(self, widget, path, column):
node = self.store[path][0].value
self.interface.on_activate(None, node=node)
def change_source(self, source):
# Temporarily disconnecting the TreeStore improves performance for large
# updates by deferring row rendering until the update is complete.
self.native_tree.set_model(None)
for column in self.native_tree.get_columns():
self.native_tree.remove_column(column)
self._create_columns()
types = [TogaRow]
for accessor in self.interface._accessors:
types.extend([GdkPixbuf.Pixbuf, str])
self.store = Gtk.TreeStore(*types)
for i, row in enumerate(self.interface.data):
self.insert(None, i, row)
self.native_tree.set_model(self.store)
self.refresh()
def insert(self, parent, index, item):
row = TogaRow(item)
values = [row]
for accessor in self.interface.accessors:
values.extend(
[
row.icon(accessor),
row.text(accessor, self.interface.missing_value),
]
)
if parent is None:
iter = None
else:
iter = parent._impl
item._impl = self.store.insert(iter, index, values)
for i, child in enumerate(item):
self.insert(item, i, child)
def change(self, item):
row = self.store[item._impl]
for i, accessor in enumerate(self.interface.accessors):
row[i * 2 + 1] = row[0].icon(accessor)
row[i * 2 + 2] = row[0].text(accessor, self.interface.missing_value)
def remove(self, item, index, parent):
del self.store[item._impl]
item._impl = None
def clear(self):
self.store.clear()
def METHOD_NAME(self):
if self.interface.multiple_select:
store, itrs = self.selection.get_selected_rows()
return [store[itr][0].value for itr in itrs]
else:
store, iter = self.selection.get_selected()
if iter is None:
return None
return store[iter][0].value
def expand_node(self, node):
self.native_tree.expand_row(
self.native_tree.get_model().get_path(node._impl), True
)
def expand_all(self):
self.native_tree.expand_all()
def collapse_node(self, node):
self.native_tree.collapse_row(self.native_tree.get_model().get_path(node._impl))
def collapse_all(self):
self.native_tree.collapse_all()
def insert_column(self, index, heading, accessor):
# Adding/removing a column means completely rebuilding the ListStore
self.change_source(self.interface.data)
def remove_column(self, accessor):
self.change_source(self.interface.data)
def rehint(self):
self.interface.intrinsic.width = at_least(self.interface._MIN_WIDTH)
self.interface.intrinsic.height = at_least(self.interface._MIN_HEIGHT) |
extended option | # Copyright 2021-2023 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
from __future__ import annotations
import functools
import logging
import os
import shutil
from dataclasses import dataclass
import click
from vdk.internal.control.configuration.defaults_config import load_default_rest_api_url
from vdk.internal.control.configuration.vdk_config import VDKConfig
from vdk.internal.control.exception.vdk_exception import VDKException
from vdk.internal.control.utils import output_printer
log = logging.getLogger(__name__)
def get_or_prompt(option_description, option_value, default_value=None):
if not option_value:
option_value = click.prompt(option_description, default=default_value)
return option_value
def METHOD_NAME(hide_if_default=False):
"""Extends click option with extra functionality:
* Records default value if coming from default_map in help (click does not)
* Flag to hide options with default value (so that administrative option can be hidden)
"""
class OptionExtended(click.Option):
def get_help_record(self, ctx):
self.show_default = True
default_value = ctx.lookup_default(self.name)
if default_value is not None:
if hide_if_default:
self.hidden = True
self.default = default_value
return super().get_help_record(ctx)
return OptionExtended
def rest_api_url_option(*names, **kwargs):
"""
A decorator that adds a `--rest-api-url, -u` option to the decorated
command.
Name can be configured through ``*names``. Keyword arguments are passed to
the underlying ``click.option`` decorator.
"""
if not names:
names = ["--rest-api-url", "-u"]
def decorator(f):
return click.option(
*names,
type=click.STRING,
cls=METHOD_NAME(hide_if_default=True),
help="The base REST API URL. It looks like http://server (without path e.g. /data-jobs)",
default=VDKConfig().control_service_rest_api_url
or load_default_rest_api_url(),
**kwargs,
)(f)
return decorator
def check_rest_api_url(rest_api_url: str):
if not rest_api_url:
raise VDKException(
what="Cannot connect to the Control Service.",
why="The following (mandatory) option is missing (--rest-api-url). Please, provide a valid value.",
consequence="Cannot manage (create, execute, delete, etc.) data jobs.",
countermeasure="Verify that the --rest-api-url is specified, either directly or via a plugin.",
)
def check_required_parameters(f):
"""
A decorator that checks whether the `--rest-api-irl` parameter is specified
before calling the wrapped function and, if not, throws an exception.
"""
@functools.wraps(f)
def check(*args, **kwargs):
log.debug(f"Passed parameters to function {f}: {args}, {kwargs}")
rest_api_url = kwargs.get("rest_api_url", "")
check_rest_api_url(rest_api_url)
return f(*args, **kwargs)
return check
def output_option(*names, **kwargs):
"""
A decorator that adds an `--output, -o` option to the decorated command.
Name can be configured through ``*names``. Keyword arguments are passed to
the underlying ``click.option`` decorator.
"""
if not names:
names = ["--output", "-o"]
def decorator(f):
return click.option(
*names,
type=click.Choice(
[e.upper() for e in output_printer._registered_printers.keys()],
case_sensitive=False,
),
default="text",
cls=METHOD_NAME(hide_if_default=True),
help="The desirable format of the result. Supported formats include text and json.",
**kwargs,
)(f)
return decorator
def copy_directory(src, dst):
if not os.path.exists(dst):
os.makedirs(dst)
for item in os.listdir(src):
source_file = os.path.join(src, item)
dest_file = os.path.join(dst, item)
if os.path.isfile(source_file):
shutil.copy(source_file, dest_file)
@dataclass
class QueryField:
name: str = ""
alias: str = ""
arguments: dict[str, str | int] = None
fields: list[QueryField] = None
def __hash__(self):
return self.name.__hash__()
def __eq__(self, other):
return self.name.__eq__(other)
def to_string(self):
field_as_string = self.name
if self.arguments:
arguments_as_strings: list[str] = []
for key, value in self.arguments.items():
arguments_as_strings.append(f"{key}: {value}")
field_as_string = (
field_as_string + "(" + ", ".join(arguments_as_strings) + ")"
)
if self.alias:
field_as_string = f"{self.alias}: {field_as_string}"
if self.fields:
field_as_string = (
field_as_string + " { " + " ".join(map(str, self.fields)) + " } "
)
return field_as_string
def __repr__(self):
return self.to_string()
def __str__(self):
return self.to_string()
def add(self, name: str, alias: str = None, arguments: dict[str, str | int] = None):
"""
Add new field as child to the current field and return current field
So you can chain multiple children of one field
Example:
root_field.add('child1').add(child2)
:param name: the name of the field
:param alias: alias name for the field if applicable , by default emptyh
:param arguments: arguments to pass to the field, including any filters
:return: the current field
"""
self.add_return_new(name, alias, arguments)
return self
def add_return_new(
self, name: str, alias: str = None, arguments: dict[str, str | int] = None
):
"""
Add new field as child to the current field and return newly added field
Example:
root_field.add_return_new('child').add_return_new('grand_child')
:param name: the name of the field
:param alias: alias name for the field if applicable, by default empty
:param arguments: arguments to pass to the field, including any filters
:return: the newly created field.
"""
query_field = QueryField(name=name, alias=alias, arguments=arguments)
if not self.fields:
self.fields = []
self.fields.append(query_field)
return query_field
@dataclass
class GqlQueryBuilder:
"""
Help Builds GraphQL query.
For our simple cases we do not need anything more complicated like https://pypi.org/project/gql
It covers only basic querying of fields
"""
def __init__(self):
self.__root_field = QueryField("jobs")
def start(self) -> QueryField:
"""
Start new build process. It will reset current progress
"""
self.__root_field = QueryField("")
return self.__root_field
def build(self) -> str:
"""
Return GraphQL like query to list all added fields
"""
return str(self.__root_field).strip() |
run vm | # TCG Plugins tests
#
# These are a little more involved than the basic tests run by check-tcg.
#
# Copyright (c) 2021 Linaro
#
# Author:
# Alex Bennée <[email protected]>
#
# SPDX-License-Identifier: GPL-2.0-or-later
import tempfile
import mmap
import re
from boot_linux_console import LinuxKernelTest
class PluginKernelBase(LinuxKernelTest):
"""
Boots a Linux kernel with a TCG plugin enabled.
"""
timeout = 120
KERNEL_COMMON_COMMAND_LINE = 'printk.time=1 panic=-1 '
def METHOD_NAME(self, kernel_path, kernel_command_line,
plugin, plugin_log, console_pattern, args=None):
vm = self.get_vm()
vm.set_console()
vm.add_args('-kernel', kernel_path,
'-append', kernel_command_line,
'-plugin', plugin,
'-d', 'plugin',
'-D', plugin_log,
'-net', 'none',
'-no-reboot')
if args:
vm.add_args(*args)
try:
vm.launch()
except:
# TODO: probably fails because plugins not enabled but we
# can't currently probe for the feature.
self.cancel("TCG Plugins not enabled?")
self.wait_for_console_pattern(console_pattern, vm)
# ensure logs are flushed
vm.shutdown()
class PluginKernelNormal(PluginKernelBase):
def _grab_aarch64_kernel(self):
kernel_url = ('http://security.debian.org/'
'debian-security/pool/updates/main/l/linux-signed-arm64/'
'linux-image-4.19.0-12-arm64_4.19.152-1_arm64.deb')
kernel_sha1 = '2036c2792f80ac9c4ccaae742b2e0a28385b6010'
kernel_deb = self.fetch_asset(kernel_url, asset_hash=kernel_sha1)
kernel_path = self.extract_from_deb(kernel_deb,
"/boot/vmlinuz-4.19.0-12-arm64")
return kernel_path
def test_aarch64_virt_insn(self):
"""
:avocado: tags=accel:tcg
:avocado: tags=arch:aarch64
:avocado: tags=machine:virt
:avocado: tags=cpu:cortex-a53
"""
kernel_path = self._grab_aarch64_kernel()
kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
'console=ttyAMA0')
console_pattern = 'Kernel panic - not syncing: VFS:'
plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin",
suffix=".log")
self.METHOD_NAME(kernel_path, kernel_command_line,
"tests/plugin/libinsn.so", plugin_log.name,
console_pattern)
with plugin_log as lf, \
mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s:
m = re.search(br"insns: (?P<count>\d+)", s)
if "count" not in m.groupdict():
self.fail("Failed to find instruction count")
def test_aarch64_virt_insn_icount(self):
"""
:avocado: tags=accel:tcg
:avocado: tags=arch:aarch64
:avocado: tags=machine:virt
:avocado: tags=cpu:cortex-a53
"""
kernel_path = self._grab_aarch64_kernel()
kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
'console=ttyAMA0')
console_pattern = 'Kernel panic - not syncing: VFS:'
plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin",
suffix=".log")
self.METHOD_NAME(kernel_path, kernel_command_line,
"tests/plugin/libinsn.so", plugin_log.name,
console_pattern,
args=('-icount', 'shift=1'))
with plugin_log as lf, \
mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s:
m = re.search(br"detected repeat execution @ (?P<addr>0x[0-9A-Fa-f]+)", s)
if m is not None and "addr" in m.groupdict():
self.fail("detected repeated instructions")
def test_aarch64_virt_mem_icount(self):
"""
:avocado: tags=accel:tcg
:avocado: tags=arch:aarch64
:avocado: tags=machine:virt
:avocado: tags=cpu:cortex-a53
"""
kernel_path = self._grab_aarch64_kernel()
kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE +
'console=ttyAMA0')
console_pattern = 'Kernel panic - not syncing: VFS:'
plugin_log = tempfile.NamedTemporaryFile(mode="r+t", prefix="plugin",
suffix=".log")
self.METHOD_NAME(kernel_path, kernel_command_line,
"tests/plugin/libmem.so,inline=true,callback=true", plugin_log.name,
console_pattern,
args=('-icount', 'shift=1'))
with plugin_log as lf, \
mmap.mmap(lf.fileno(), 0, access=mmap.ACCESS_READ) as s:
m = re.findall(br"mem accesses: (?P<count>\d+)", s)
if m is None or len(m) != 2:
self.fail("no memory access counts found")
else:
inline = int(m[0])
callback = int(m[1])
if inline != callback:
self.fail("mismatched access counts") |
init ndim | import numpy as np
from AnyQt.QtCore import Qt
from Orange.data import Table, ContinuousVariable
from Orange.widgets.settings import DomainContextHandler, ContextSetting
from Orange.widgets.utils.itemmodels import DomainModel
from Orange.widgets.widget import OWWidget, Input, Output, Msg
from Orange.widgets import gui, settings
from orangecontrib.spectroscopy.utils import NanInsideHypercube, InvalidAxisException
from orangecontrib.spectroscopy.utils.binning import bin_hyperspectra, InvalidBlockShape
from orangecontrib.spectroscopy.widgets.gui import lineEditIntRange
MAX_DIMENSIONS = 5
class OWBin(OWWidget):
# Widget's name as displayed in the canvas
name = "Bin"
# Short widget description
description = (
"Bins a hyperspectral dataset by continous variable such as coordinates.")
icon = "icons/bin.svg"
# Define inputs and outputs
class Inputs:
data = Input("Data", Table, default=True)
class Outputs:
bindata = Output("Binned Data", Table, default=True)
class Error(OWWidget.Error):
invalid_axis = Msg("Invalid axis: {}")
invalid_block = Msg("Bin block size not compatible with dataset: {}")
class Warning(OWWidget.Warning):
nan_in_image = Msg("Unknown values within images: {} unknowns")
autocommit = settings.Setting(True)
want_main_area = False
want_control_area = True
resizing_enabled = False
settingsHandler = DomainContextHandler()
attrs = ContextSetting([None, None])
bin_shape = settings.Setting((1, 1))
square_bin = settings.Setting(True)
def __init__(self):
super().__init__()
self.data = None
for i in range(MAX_DIMENSIONS):
setattr(self, f"bin_{i}", 1)
setattr(self, f"attr_{i}", None)
self._init_bins()
self.METHOD_NAME()
self._init_attrs()
box = gui.widgetBox(self.controlArea, "Parameters")
gui.checkBox(box, self, "square_bin",
label="Use square bin shape",
callback=self._bin_changed)
gui.separator(box)
gui.spin(box, self, "ndim", minv=1, maxv=MAX_DIMENSIONS,
label="Number of axes to bin:",
callback=self._dim_changed)
self.axes_box = gui.widgetBox(self.controlArea, "Axes")
self.xy_model = DomainModel(DomainModel.METAS | DomainModel.CLASSES,
valid_types=ContinuousVariable)
self.contextAboutToBeOpened.connect(self._init_interface_data)
common_options = dict(labelWidth=50, orientation=Qt.Horizontal,
sendSelectedValue=True)
for i in range(MAX_DIMENSIONS):
hbox = gui.hBox(self.axes_box)
gui.comboBox(
hbox, self, f"attr_{i}", label=f"Axis {i}:",
callback=self._attr_changed,
model=self.xy_model, **common_options)
le = lineEditIntRange(hbox, self, f"bin_{i}", bottom=1, default=1,
callback=self._bin_changed)
le.setFixedWidth(40)
gui.separator(hbox, width=40)
gui.widgetLabel(hbox, label="Bin size:", labelWidth=50)
hbox.layout().addWidget(le)
self._update_cb_attr()
box = gui.widgetBox(self.controlArea, "Info")
gui.label(box, self, "Block shape: %(bin_shape)s")
gui.rubber(self.controlArea)
gui.auto_commit(self.controlArea, self, "autocommit", "Send Data")
def _sanitize_bin_value(self):
pass #TODO make sure bin value is compatible with dataset
def _update_bins(self):
if self.square_bin:
self.bin_shape = tuple([self.bin_0] * len(self.bin_shape))
self._init_bins()
return
new_shape = []
for i, _ in enumerate(self.bin_shape):
new_shape.append(getattr(self, f"bin_{i}"))
self.bin_shape = tuple(new_shape)
def _attr_changed(self):
self._update_attrs()
self.commit.deferred()
def _bin_changed(self):
self._update_bins()
self._sanitize_bin_value()
self.commit.deferred()
def _dim_changed(self):
while len(self.bin_shape) != self.ndim:
if len(self.bin_shape) < self.ndim:
self.bin_shape += (1,)
self.attrs.append(None)
elif len(self.bin_shape) > self.ndim:
self.bin_shape = self.bin_shape[:-1]
self.attrs = self.attrs[:-1]
self._update_bins()
self._update_attrs()
self._update_cb_attr()
self.commit.deferred()
def _init_bins(self):
for i, bin in enumerate(self.bin_shape):
setattr(self, f"bin_{i}", bin)
def METHOD_NAME(self):
self.ndim = len(self.bin_shape)
def _init_attrs(self):
for i, attr in enumerate(self.attrs):
setattr(self, f"attr_{i}", attr)
def _init_attr_values(self, data):
domain = data.domain if data is not None else None
self.xy_model.set_domain(domain)
attrs = []
for i in range(self.ndim):
try:
attr = self.xy_model[i] if self.xy_model else None
except IndexError:
attr = None
attrs.append(attr)
self.attrs = attrs
def _init_interface_data(self, args):
data = args[0]
self._init_attr_values(data)
self._init_attrs()
def _update_attrs(self):
new_attrs = []
for i, _ in enumerate(self.attrs):
new_attrs.append(getattr(self, f"attr_{i}"))
self.attrs = new_attrs
def _update_cb_attr(self):
for i in range(MAX_DIMENSIONS):
w = self.axes_box.layout().itemAt(i).widget()
if i < self.ndim:
w.show()
else:
w.hide()
@Inputs.data
def set_data(self, dataset):
self.closeContext()
self.openContext(dataset)
if dataset is not None:
self.data = dataset
self._sanitize_bin_value()
else:
self.data = None
self.Warning.nan_in_image.clear()
self.Error.invalid_axis.clear()
self.Error.invalid_block.clear()
self.commit.now()
@gui.deferred
def commit(self):
bin_data = None
self.Warning.nan_in_image.clear()
self.Error.invalid_axis.clear()
self.Error.invalid_block.clear()
attrs = self.attrs
if self.data and len(self.data.domain.attributes) and len(attrs):
if np.any(np.isnan(self.data.X)):
self.Warning.nan_in_image(np.sum(np.isnan(self.data.X)))
try:
bin_data = bin_hyperspectra(self.data, attrs, self.bin_shape)
except InvalidAxisException as e:
self.Error.invalid_axis(e.args[0])
except InvalidBlockShape as e:
self.Error.invalid_block(e.args[0])
self.Outputs.bindata.send(bin_data)
if __name__ == "__main__": # pragma: no cover
from Orange.widgets.utils.widgetpreview import WidgetPreview
WidgetPreview(OWBin).run(Table("agilent/5_mosaic_agg1024.dmt")) |
grad | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List, Tuple
import numpy as np
import scipy as scipy
import scipy.sparse as sp
from cvxpy.atoms.atom import Atom
from cvxpy.constraints.constraint import Constraint
class quad_over_lin(Atom):
""" :math:`(sum_{ij}X^2_{ij})/y`
"""
_allow_complex = True
def __init__(self, x, y) -> None:
super(quad_over_lin, self).__init__(x, y)
@Atom.numpy_numeric
def numeric(self, values):
"""Returns the sum of the entries of x squared over y.
"""
if self.args[0].is_complex():
return (np.square(values[0].imag) + np.square(values[0].real)).sum()/values[1]
return np.square(values[0]).sum()/values[1]
def _domain(self) -> List[Constraint]:
"""Returns constraints describing the domain of the node.
"""
# y > 0.
return [self.args[1] >= 0]
def METHOD_NAME(self, values):
"""Gives the (sub/super)gradient of the atom w.r.t. each argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
values: A list of numeric values for the arguments.
Returns:
A list of SciPy CSC sparse matrices or None.
"""
X = values[0]
y = values[1]
if y <= 0:
return [None, None]
else:
# DX = 2X/y, Dy = -||X||^2_2/y^2
if self.args[0].is_complex():
Dy = -(np.square(X.real) + np.square(X.imag)).sum()/np.square(y)
else:
Dy = -np.square(X).sum()/np.square(y)
Dy = sp.csc_matrix(Dy)
DX = 2.0*X/y
DX = np.reshape(DX, (self.args[0].size, 1))
DX = scipy.sparse.csc_matrix(DX)
return [DX, Dy]
def shape_from_args(self) -> Tuple[int, ...]:
"""Returns the (row, col) shape of the expression.
"""
return tuple()
def sign_from_args(self) -> Tuple[bool, bool]:
"""Returns sign (is positive, is negative) of the expression.
"""
# Always positive.
return (True, False)
def is_atom_convex(self) -> bool:
"""Is the atom convex?
"""
return True
def is_atom_concave(self) -> bool:
"""Is the atom concave?
"""
return False
def is_atom_log_log_convex(self) -> bool:
"""Is the atom log-log convex?
"""
return True
def is_atom_log_log_concave(self) -> bool:
"""Is the atom log-log concave?
"""
return False
def is_incr(self, idx) -> bool:
"""Is the composition non-decreasing in argument idx?
"""
return (idx == 0) and self.args[idx].is_nonneg()
def is_decr(self, idx) -> bool:
"""Is the composition non-increasing in argument idx?
"""
return ((idx == 0) and self.args[idx].is_nonpos()) or (idx == 1)
def validate_arguments(self) -> None:
"""Check dimensions of arguments.
"""
if not self.args[1].is_scalar():
raise ValueError("The second argument to quad_over_lin must be a scalar.")
if self.args[1].is_complex():
raise ValueError("The second argument to quad_over_lin cannot be complex.")
super(quad_over_lin, self).validate_arguments()
def is_quadratic(self) -> bool:
"""Quadratic if x is affine and y is constant.
"""
return self.args[0].is_affine() and self.args[1].is_constant()
def has_quadratic_term(self) -> bool:
"""A quadratic term if y is constant.
"""
return self.args[1].is_constant()
def is_qpwa(self) -> bool:
"""Quadratic of piecewise affine if x is PWL and y is constant.
"""
return self.args[0].is_pwl() and self.args[1].is_constant() |
convert flat dict to nested dict | # Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Contains helper functions for auto-logging hparams."""
from enum import Enum
from typing import Any, Dict, List, Tuple
__all__ = ['extract_hparams', 'convert_nested_dict_to_flat_dict', 'convert_flat_dict_to_nested_dict']
def extract_hparams(locals_dict: Dict[str, Any]) -> Dict[str, Any]:
"""Takes in local symbol table and recursively grabs any hyperparameter.
Args:
locals_dict (Dict[str, Any]): The local symbol table returned when calling locals(),
which maps any free local variables' names to their values.
Returns:
Dict[str, Any]: A nested dictionary with every element of locals_dict mapped to its
value or to another sub_dict.
"""
hparams = {}
for k, v in locals_dict.items():
if k.startswith('_') or k == 'self' or type(v) is type:
continue
hparams_to_add = _grab_hparams(v)
hparams[k] = hparams_to_add
return hparams
def _grab_hparams(obj) -> Any:
"""Helper function parses objects for their hyperparameters going only one level deep."""
# If the object has already grabbed its hyperparameters (it calls extract_hparams inside __init__)
# then parse hparams attribute (which is a dict) and name those sub-hyperparameters
if hasattr(obj, 'local_hparams'):
return {obj.__class__.__name__: obj.local_hparams}
elif isinstance(obj, List) or isinstance(obj, Tuple):
return [_get_obj_repr(sub_obj) for sub_obj in obj]
elif isinstance(obj, Dict):
return {k: _get_obj_repr(sub_obj) for k, sub_obj in obj.items()}
else:
return _get_obj_repr(obj)
def _get_obj_repr(obj: Any):
"""Returns best representation of object.
Args:
obj (Any): the object.
Returns:
obj if obj is None or it is a int, float, str, bool type.
obj.value if obj is an Enum. Otherwise returns obj.__class__.__name__.
"""
if any(isinstance(obj, type_) for type_ in [int, float, str, bool]) or obj is None:
return obj
elif isinstance(obj, Enum):
return obj.value
else:
return obj.__class__.__name__
def convert_nested_dict_to_flat_dict(nested_dict: Dict, prefix='') -> Dict:
"""Takes in a nested dict converts it to a flat dict with keys separated by slashes.
Args:
nested_dict (Dict): A dictionary containing at least one other dictionary.
prefix (str, optional): A prefix to left append to the keys in the dictionary.
'Defaults to ''.
Returns:
Dict: A flat dictionary representation of the nested one (contains no other
dictionaries inside of it)
"""
flat_dict = {}
for k, v in nested_dict.items():
key = prefix + '/' + k if prefix != '' else k
# Recursively crawl sub-dictionary.
if isinstance(v, dict):
sub_flat_dict = convert_nested_dict_to_flat_dict(prefix=key, nested_dict=v)
flat_dict.update(sub_flat_dict)
else:
flat_dict[key] = v
return flat_dict
def METHOD_NAME(flat_dict: Dict) -> Dict:
"""Converts flat dictionary separated by slashes to nested dictionary.
Args:
flat_dict (Dict): flat dictionary containing no sub-dictionary with keys
separated by slashes. e.g. {'a':1, 'b/c':2}
Returns:
Dict: a nested dict.
"""
nested_dict = {}
for k, v in flat_dict.items():
# Initially sub_dict is the main nested_dict, but we will continually update it to be the
# sub-dictionary of sub_dict.
sub_dict = nested_dict
sub_keys = k.split('/')
for sub_key in sub_keys[:-1]:
if sub_key not in sub_dict:
# Create a new sub-dictionary inside of sub_dict.
sub_dict[sub_key] = {}
# Change the sub_dict reference to be the sub-dictionary of sub_dict (i.e. go one level deeper).
sub_dict = sub_dict[sub_key]
# The last key in sub_keys does not map to a dict. It just maps to v.
sub_dict[sub_keys[-1]] = v
# Changes to sub_dict will be reflected in nested_dict, so we can just return nested_dict.
return nested_dict |
get network | """
A route is a rule that specifies how certain packets should be handled by the
virtual network. Routes are associated with virtual machine instances by tag,
and the set of routes for a particular VM is called its routing table.
For each packet leaving a virtual machine, the system searches that machine's
routing table for a single best matching route.
.. versionadded:: 2018.3.0
This module will create a route to send traffic destined to the Internet
through your gateway instance.
:codeauthor: `Pratik Bandarkar <[email protected]>`
:maturity: new
:depends: google-api-python-client
:platform: Linux
"""
import logging
try:
import googleapiclient.discovery
import oauth2client.service_account
HAS_LIB = True
except ImportError:
HAS_LIB = False
log = logging.getLogger(__name__)
__virtualname__ = "gcp"
def __virtual__():
"""
Check for googleapiclient api
"""
if HAS_LIB is False:
return (
False,
"Required dependencies 'googleapiclient' and/or 'oauth2client' were not"
" found.",
)
return __virtualname__
def METHOD_NAME(project_id, network_name, service):
"""
Fetch network selfLink from network name.
"""
return service.networks().get(project=project_id, network=network_name).execute()
def _get_instance(project_id, instance_zone, name, service):
"""
Get instance details
"""
return (
service.instances()
.get(project=project_id, zone=instance_zone, instance=name)
.execute()
)
def route_create(
credential_file=None,
project_id=None,
name=None,
dest_range=None,
next_hop_instance=None,
instance_zone=None,
tags=None,
network=None,
priority=None,
):
"""
Create a route to send traffic destined to the Internet through your
gateway instance
credential_file : string
File location of application default credential. For more information,
refer: https://developers.google.com/identity/protocols/application-default-credentials
project_id : string
Project ID where instance and network resides.
name : string
name of the route to create
next_hop_instance : string
the name of an instance that should handle traffic matching this route.
instance_zone : string
zone where instance("next_hop_instance") resides
network : string
Specifies the network to which the route will be applied.
dest_range : string
The destination range of outgoing packets that the route will apply to.
tags : list
(optional) Identifies the set of instances that this route will apply to.
priority : int
(optional) Specifies the priority of this route relative to other routes.
default=1000
CLI Example:
.. code-block:: bash
salt 'salt-master.novalocal' gcp.route_create
credential_file=/root/secret_key.json
project_id=cp100-170315
name=derby-db-route1
next_hop_instance=instance-1
instance_zone=us-central1-a
network=default
dest_range=0.0.0.0/0
tags=['no-ip']
priority=700
In above example, the instances which are having tag "no-ip" will route the
packet to instance "instance-1"(if packet is intended to other network)
"""
credentials = (
oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_name(
credential_file
)
)
service = googleapiclient.discovery.build("compute", "v1", credentials=credentials)
routes = service.routes()
routes_config = {
"name": str(name),
"network": METHOD_NAME(project_id, str(network), service=service)["selfLink"],
"destRange": str(dest_range),
"nextHopInstance": _get_instance(
project_id, instance_zone, next_hop_instance, service=service
)["selfLink"],
"tags": tags,
"priority": priority,
}
route_create_request = routes.insert(project=project_id, body=routes_config)
return route_create_request.execute() |
test datasource store get by name | from typing import Callable, Dict
from unittest import mock
import pytest
from great_expectations.core.serializer import DictConfigSerializer
from great_expectations.data_context.cloud_constants import GXCloudRESTResource
from great_expectations.data_context.store import DatasourceStore
from great_expectations.data_context.types.base import (
DatasourceConfig,
datasourceConfigSchema,
)
from great_expectations.data_context.types.resource_identifiers import GXCloudIdentifier
from great_expectations.exceptions import StoreBackendError
from tests.data_context.conftest import MockResponse
@pytest.mark.cloud
def test_datasource_store_set(
ge_cloud_base_url: str,
ge_cloud_organization_id: str,
block_config_datasource_config: DatasourceConfig,
datasource_config_with_names_and_ids: DatasourceConfig,
datasource_store_ge_cloud_backend: DatasourceStore,
mocked_datasource_post_response: Callable[[], MockResponse],
mocked_datasource_get_response: Callable[[], MockResponse],
) -> None:
"""What does this test and why?
The datasource store when used with a cloud backend should emit the correct request when creating a new datasource.
"""
# Note: id will be provided by the backend on create
key = GXCloudIdentifier(
resource_type=GXCloudRESTResource.DATASOURCE,
)
with mock.patch(
"requests.Session.post",
autospec=True,
side_effect=mocked_datasource_post_response,
) as mock_post, mock.patch(
"requests.Session.get",
autospec=True,
side_effect=mocked_datasource_get_response,
):
saved_datasource_config: DatasourceConfig = (
datasource_store_ge_cloud_backend.set(
key=key, value=block_config_datasource_config
)
)
serializer = DictConfigSerializer(schema=datasourceConfigSchema)
expected_datasource_config = serializer.serialize(block_config_datasource_config)
mock_post.assert_called_once_with(
mock.ANY, # requests.Session object
f"{ge_cloud_base_url}/organizations/{ge_cloud_organization_id}/datasources",
json={
"data": {
"type": "datasource",
"attributes": {
"datasource_config": expected_datasource_config,
"organization_id": ge_cloud_organization_id,
},
}
},
)
assert serializer.serialize(saved_datasource_config) == serializer.serialize(
datasource_config_with_names_and_ids
)
@pytest.mark.cloud
def test_datasource_store_get_by_id(
ge_cloud_base_url: str,
ge_cloud_organization_id: str,
block_config_datasource_config: DatasourceConfig,
datasource_store_ge_cloud_backend: DatasourceStore,
) -> None:
"""What does this test and why?
The datasource store when used with a cloud backend should emit the correct request when getting a datasource.
"""
id: str = "example_id_normally_uuid"
key = GXCloudIdentifier(resource_type=GXCloudRESTResource.DATASOURCE, id=id)
def mocked_response(*args, **kwargs):
return MockResponse(
{
"data": {
"id": id,
"attributes": {"datasource_config": block_config_datasource_config},
}
},
200,
)
with mock.patch(
"requests.Session.get", autospec=True, side_effect=mocked_response
) as mock_get:
datasource_store_ge_cloud_backend.get(key=key)
mock_get.assert_called_once_with(
mock.ANY, # requests.Session object
f"{ge_cloud_base_url}/organizations/{ge_cloud_organization_id}/datasources/{id}",
params=None,
)
@pytest.mark.cloud
def METHOD_NAME(
ge_cloud_base_url: str,
ge_cloud_organization_id: str,
block_config_datasource_config: DatasourceConfig,
datasource_store_ge_cloud_backend: DatasourceStore,
) -> None:
"""What does this test and why?
The datasource store when used with a cloud backend should emit the correct request when getting a datasource with a name.
"""
id: str = "example_id_normally_uuid"
datasource_name: str = "example_datasource_config_name"
def mocked_response(*args, **kwargs):
return MockResponse(
{
"data": {
"id": id,
"attributes": {"datasource_config": block_config_datasource_config},
}
},
200,
)
with mock.patch(
"requests.Session.get", autospec=True, side_effect=mocked_response
) as mock_get, mock.patch(
"great_expectations.data_context.store.DatasourceStore.has_key", autospec=True
) as mock_has_key:
# Mocking has_key so that we don't try to connect to the cloud backend to verify key existence.
mock_has_key.return_value = True
datasource_store_ge_cloud_backend.retrieve_by_name(
datasource_name=datasource_name
)
mock_get.assert_called_once_with(
mock.ANY, # requests.Session object
f"{ge_cloud_base_url}/organizations/{ge_cloud_organization_id}/datasources",
params={"name": datasource_name},
)
@pytest.mark.cloud
def test_datasource_store_delete_by_id(
ge_cloud_base_url: str,
ge_cloud_organization_id: str,
datasource_store_ge_cloud_backend: DatasourceStore,
) -> None:
"""What does this test and why?
The datasource store when used with a cloud backend should emit the correct request when getting a datasource.
"""
id: str = "example_id_normally_uuid"
key = GXCloudIdentifier(resource_type=GXCloudRESTResource.DATASOURCE, id=id)
with mock.patch("requests.Session.delete", autospec=True) as mock_delete:
type(mock_delete.return_value).status_code = mock.PropertyMock(return_value=200)
datasource_store_ge_cloud_backend.remove_key(key=key)
mock_delete.assert_called_once_with(
mock.ANY, # requests.Session object
f"{ge_cloud_base_url}/organizations/{ge_cloud_organization_id}/datasources/{id}",
json={
"data": {
"type": "datasource",
"id": id,
"attributes": {"deleted": True},
}
},
)
@pytest.mark.unit
@pytest.mark.parametrize(
"http_verb,method,args",
[
("get", "get", []),
("put", "set", ["foobar"]),
pytest.param(
"delete",
"delete",
[],
marks=pytest.mark.xfail(
reason="We do not raise errors on delete fail", strict=True
),
),
],
)
def test_datasource_http_error_handling(
datasource_store_ge_cloud_backend: DatasourceStore,
mock_http_unavailable: Dict[str, mock.Mock],
http_verb: str,
method: str,
args: list,
):
id: str = "example_id_normally_uuid"
key = GXCloudIdentifier(resource_type=GXCloudRESTResource.DATASOURCE, id=id)
with pytest.raises(
StoreBackendError, match=r"Unable to \w+ object in GX Cloud Store Backend: .*"
) as exc_info:
backend_method = getattr(datasource_store_ge_cloud_backend, method)
backend_method(key, *args)
print(f"Exception details:\n\t{exc_info.type}\n\t{exc_info.value}")
mock_http_unavailable[http_verb].assert_called_once() |
num pitches | # Copyright 2023 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes for datasets and batches."""
import os
from magenta.models.coconet import lib_mask
from magenta.models.coconet import lib_pianoroll
from magenta.models.coconet import lib_util
import numpy as np
import tensorflow.compat.v1 as tf
class Dataset(lib_util.Factory):
"""Class for retrieving different datasets."""
def __init__(self, basepath, hparams, fold):
"""Initialize a `Dataset` instance.
Args:
basepath: path to directory containing dataset npz files.
hparams: Hyperparameters object.
fold: data subset, one of {train,valid,test}.
Raises:
ValueError: if requested a temporal resolution shorter then that available
in the dataset.
"""
self.basepath = basepath
self.hparams = hparams
self.fold = fold
if self.shortest_duration != self.hparams.quantization_level:
raise ValueError("The data has a temporal resolution of shortest "
"duration=%r, requested=%r" %
(self.shortest_duration,
self.hparams.quantization_level))
# Update the default pitch ranges in hparams to reflect that of dataset.
hparams.pitch_ranges = [self.min_pitch, self.max_pitch] # legacy hparam
hparams.min_pitch = self.min_pitch
hparams.max_pitch = self.max_pitch
hparams.shortest_duration = self.shortest_duration
hparams.dataset = self.key
self.encoder = lib_pianoroll.get_pianoroll_encoder_decoder(hparams)
data_path = os.path.join(tf.resource_loader.get_data_files_path(),
self.basepath, "%s.npz" % self.name)
print("Loading data from", data_path)
with tf.gfile.Open(data_path, "rb") as p:
self.data = np.load(p, allow_pickle=True, encoding="latin1")[fold]
@property
def name(self):
return self.hparams.dataset
@property
def num_examples(self):
return len(self.data)
@property
def METHOD_NAME(self):
return self.max_pitch + 1 - self.min_pitch
def get_sequences(self):
"""Return the raw collection of examples."""
return self.data
def get_pianorolls(self, sequences=None):
"""Turn sequences into pianorolls.
Args:
sequences: the collection of sequences to convert. If not given, the
entire dataset is converted.
Returns:
A list of multi-instrument pianorolls, each shaped
(duration, pitches, instruments)
"""
if sequences is None:
sequences = self.get_sequences()
return list(map(self.encoder.encode, sequences))
def get_featuremaps(self, sequences=None):
"""Turn sequences into features for training/evaluation.
Encodes sequences into randomly cropped and masked pianorolls, and returns
a padded Batch containing three channels: the pianorolls, the corresponding
masks and their lengths before padding (but after cropping).
Args:
sequences: the collection of sequences to convert. If not given, the
entire dataset is converted.
Returns:
A Batch containing pianorolls, masks and piece lengths.
"""
if sequences is None:
sequences = self.get_sequences()
pianorolls = []
masks = []
for sequence in sequences:
pianoroll = self.encoder.encode(sequence)
pianoroll = lib_util.random_crop(pianoroll, self.hparams.crop_piece_len)
mask = lib_mask.get_mask(
self.hparams.maskout_method,
pianoroll.shape,
separate_instruments=self.hparams.separate_instruments,
blankout_ratio=self.hparams.corrupt_ratio)
pianorolls.append(pianoroll)
masks.append(mask)
(pianorolls, masks), lengths = lib_util.pad_and_stack(pianorolls, masks)
assert pianorolls.ndim == 4 and masks.ndim == 4
assert pianorolls.shape == masks.shape
return Batch(pianorolls=pianorolls, masks=masks, lengths=lengths)
def update_hparams(self, hparams):
"""Update subset of Hyperparameters pertaining to data."""
for key in "num_instruments min_pitch max_pitch qpm".split():
setattr(hparams, key, getattr(self, key))
def get_dataset(basepath, hparams, fold):
"""Factory for Datasets."""
return Dataset.make(hparams.dataset, basepath, hparams, fold)
class Jsb16thSeparated(Dataset):
key = "Jsb16thSeparated"
min_pitch = 36
max_pitch = 81
shortest_duration = 0.125
num_instruments = 4
qpm = 60
class TestData(Dataset):
key = "TestData"
min_pitch = 0
max_pitch = 127
shortest_duration = 0.125
num_instruments = 4
qpm = 60
class Beethoven16thSeparated(Dataset):
key = "Beethoven16thSeparated"
min_pitch = 28
max_pitch = 101
shortest_duration = 0.125
num_instruments = 4
qpm = 60
class Batch(object):
"""A Batch of training/evaluation data."""
keys = set("pianorolls masks lengths".split())
def __init__(self, **kwargs):
"""Initialize a Batch instance.
Args:
**kwargs: data dictionary. Must have three keys "pianorolls", "masks",
"lengths", each corresponding to a model placeholder. Each value
is a sequence (i.e. a batch) of examples.
"""
assert set(kwargs.keys()) == self.keys
assert all(
len(value) == len(list(kwargs.values())[0])
for value in kwargs.values())
self.features = kwargs
def get_feed_dict(self, placeholders):
"""Zip placeholders and batch data into a feed dict.
Args:
placeholders: placeholder dictionary. Must have three keys "pianorolls",
"masks" and "lengths".
Returns:
A feed dict mapping the given placeholders to the data in this batch.
"""
assert set(placeholders.keys()) == self.keys
return dict((placeholders[key], self.features[key]) for key in self.keys)
def batches(self, **batches_kwargs):
"""Iterate over sub-batches of this batch.
Args:
**batches_kwargs: kwargs passed on to lib_util.batches.
Yields:
An iterator over sub-Batches.
"""
keys, values = list(zip(*list(self.features.items())))
for batch in lib_util.batches(*values, **batches_kwargs):
yield Batch(**dict(lib_util.eqzip(keys, batch))) |
run | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: Red Hat Inc. 2013-2014
# Author: Lucas Meneghel Rodrigues <[email protected]>
"""
The core Avocado application.
"""
import os
import signal
import sys
from avocado.core import output
from avocado.core.dispatcher import CLICmdDispatcher, CLIDispatcher
from avocado.core.output import STD_OUTPUT
from avocado.core.parser import Parser
from avocado.core.settings import settings
from avocado.utils import process
class AvocadoApp:
"""
Avocado application.
"""
def __init__(self):
# Catch all libc runtime errors to STDERR
os.environ["LIBC_FATAL_STDERR_"] = "1"
self._cli_dispatcher = None
self._cli_cmd_dispatcher = None
self._setup_signals()
self.parser = Parser()
self.parser.start()
output.early_start()
show = getattr(self.parser.args, "core.show")
reconfigure_settings = {"core.paginator": False, "core.show": show}
try:
self._load_cli_plugins()
self._configure_cli_plugins()
self.parser.finish()
settings.merge_with_configs()
settings.merge_with_arguments(self.parser.config)
self.parser.config.update(settings.as_dict())
self._run_cli_plugins()
except SystemExit as detail:
# If someone tries to exit Avocado, we should first close the
# STD_OUTPUT and only then exit.
output.reconfigure(reconfigure_settings)
STD_OUTPUT.close()
sys.exit(detail.code)
except:
# For any other exception we also need to close the STD_OUTPUT.
output.reconfigure(reconfigure_settings)
STD_OUTPUT.close()
raise
else:
# In case of no exceptions, we just reconfigure the output.
output.reconfigure(self.parser.config)
def _load_cli_plugins(self):
self._cli_dispatcher = CLIDispatcher()
self._cli_cmd_dispatcher = CLICmdDispatcher()
output.log_plugin_failures(
self._cli_dispatcher.load_failures + self._cli_cmd_dispatcher.load_failures
)
def _configure_cli_plugins(self):
if self._cli_cmd_dispatcher.extensions:
self._cli_cmd_dispatcher.map_method("configure", self.parser)
if self._cli_dispatcher.extensions:
self._cli_dispatcher.map_method("configure", self.parser)
def _run_cli_plugins(self):
if self._cli_dispatcher.extensions:
self._cli_dispatcher.map_method("run", self.parser.config)
@staticmethod
def _setup_signals():
def sigterm_handler(signum, frame): # pylint: disable=W0613
children = process.get_children_pids(os.getpid())
for child in children:
process.kill_process_tree(int(child))
raise SystemExit("Terminated")
signal.signal(signal.SIGTERM, sigterm_handler)
if hasattr(signal, "SIGTSTP"):
signal.signal(signal.SIGTSTP, signal.SIG_IGN) # ignore ctrl+z
def METHOD_NAME(self):
try:
try:
subcmd = self.parser.config.get("subcommand")
extension = self._cli_cmd_dispatcher[subcmd]
except KeyError:
return
method = extension.obj.METHOD_NAME
return method(self.parser.config)
finally:
# This makes sure we cleanup the console (stty echo). The only way
# to avoid cleaning it is to kill the less (paginator) directly
STD_OUTPUT.close() |
main | #!/usr/bin/env python
# -*- coding: utf-8
import sys
import anvio
import anvio.terminal as terminal
import anvio.taxonomyops.trna as trnataxonomyops
from anvio.errors import ConfigError, FilesNPathsError
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2020, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__authors__ = ['meren']
__requires__ = ['contigs-db', 'trna-taxonomy-db']
__provides__ = ['trna-taxonomy']
__resources__ = []
__description__ = ("The purpose of this program is to affiliate tRNA gene sequences in an anvi'o contigs database with "
"taxonomic names. A properly setup local tRNA taxonomy database is required for this program to perform properly. "
"After its successful run, `anvi-estimate-trna-taxonomy` will be useful to estimate taxonomy at genome-, collection-, or metagenome-level).")
@terminal.time_program
def METHOD_NAME(args):
t = trnataxonomyops.PopulateContigsDatabaseWithTRNATaxonomy(args)
t.populate_contigs_database()
if __name__ == '__main__':
from anvio.argparse import ArgumentParser
parser = ArgumentParser(description=__description__)
groupA = parser.add_argument_group('INPUT DATABASE', "An anvi'o contigs databaes to search for and store the taxonomic\
affiliations of tRNA genes.")
groupA.add_argument(*anvio.A('contigs-db'), **anvio.K('contigs-db', {'required': True}))
groupA = parser.add_argument_group("ADVANCED STUFF")
groupA.add_argument(*anvio.A('trna-taxonomy-data-dir'), **anvio.K('trna-taxonomy-data-dir'))
groupA.add_argument(*anvio.A('min-percent-identity'), **anvio.K('min-percent-identity', {'help': "The defualt value for this is \
%(default).1f%%, and in an ideal world you sholdn't really change it. Lowering this value will probably give \
you too many hits from neighboring genomes, which may ruin your consensus taxonomy (imagine, at 90%% identity \
you may match to a single species, but at 70%% identity you may match to every species in a genus and your \
consensus assignment may be influenced by that). But once in a while you will have a genome that doesn't have any\
close match in GTDB, and you will be curious to find out what it could be. So, when you are getting no tRNA hits\
whatsoever, only then you may want to play with this value. In those cases you can run anvi-estimate-trna-taxonomy\
with a `--debug` flag to see what is really going on. We strongly advice you to do this only with single genomes,\
and never with metagenomes.", 'default': 90}))
groupA.add_argument(*anvio.A('max-num-target-sequences'), **anvio.K('max-num-target-sequences', {'help': "This parameter is used to determine \
how many hits from the database that has a reasonable match to the query sequence should be taken into consideration \
to make a final decision about the consensus taxonomy for each individual transfer RNA gene sequence. The default \
is %(default)d, which has been quite reasonable in our tests, however, you may need to increase this number to get more \
accurate results for your own data. In cases where you think this is what you need, the best way to test the parameter \
space for `--max-num-target-sequences` is to run the program multiple times on the same database with `--debug` \
and compare results.", 'default': 100}))
groupH = parser.add_argument_group("PERFORMANCE")
groupH.add_argument(*anvio.A('num-parallel-processes'), **anvio.K('num-parallel-processes'))
groupH.add_argument(*anvio.A('num-threads'), **anvio.K('num-threads'))
groupH.add_argument(*anvio.A('write-buffer-size'), **anvio.K('write-buffer-size'))
groupI = parser.add_argument_group("OUTPUT", "By default, this program does not generate an output and instead simply store taxonomy \
information into the contigs database. But if the user wants more, they get more.")
groupI.add_argument(*anvio.A('all-hits-output-file'), **anvio.K('all-hits-output-file'))
args = parser.get_args(parser)
try:
METHOD_NAME(args)
except ConfigError as e:
print(e)
sys.exit(-1)
except FilesNPathsError as e:
print(e)
sys.exit(-2) |
test resume must failure | import os
import pickle
import sys
import numpy as np
import pytest
import wandb
from wandb import wandb_sdk
from wandb.errors import UsageError
def test_log_code(wandb_init):
run = wandb_init(mode="offline")
with open("test.py", "w") as f:
f.write('print("test")')
with open("big_file.h5", "w") as f:
f.write("Not that big")
art = run.log_code()
assert sorted(art.manifest.entries.keys()) == ["test.py"]
run.finish()
def test_log_code_include(wandb_init):
run = wandb_init(mode="offline")
with open("test.py", "w") as f:
f.write('print("test")')
with open("test.cc", "w") as f:
f.write("Not that big")
art = run.log_code(include_fn=lambda p: p.endswith(".py") or p.endswith(".cc"))
assert sorted(art.manifest.entries.keys()) == ["test.cc", "test.py"]
run.finish()
def test_log_code_custom_root(wandb_init):
run = wandb_init(mode="offline")
with open("test.py", "w") as f:
f.write('print("test")')
os.mkdir("custom")
os.chdir("custom")
with open("test.py", "w") as f:
f.write('print("test")')
art = run.log_code(root="../")
assert sorted(art.manifest.entries.keys()) == ["custom/test.py", "test.py"]
run.finish()
@pytest.mark.parametrize("project_name", ["test:?", "test" * 33])
def test_invalid_project_name(wandb_init, project_name):
with pytest.raises(UsageError) as e:
wandb_init(project=project_name)
assert 'Invalid project name "{project_name}"' in str(e.value)
def METHOD_NAME(wandb_init):
with pytest.raises(wandb.UsageError):
wandb_init(reinit=True, resume="must")
@pytest.mark.nexus_failure(feature="artifacts")
def test_unlogged_artifact_in_config(wandb_init, test_settings):
run = wandb_init(settings=test_settings())
artifact = wandb.Artifact("my-arti", type="dataset")
with pytest.raises(Exception) as e_info:
run.config.dataset = artifact
assert (
str(e_info.value)
== "Cannot json encode artifact before it has been logged or in offline mode."
)
run.finish()
def test_media_in_config(runner, wandb_init, test_settings):
with runner.isolated_filesystem():
run = wandb_init(settings=test_settings())
with pytest.raises(ValueError):
run.config["image"] = wandb.Image(np.random.randint(0, 255, (100, 100, 3)))
run.finish()
def test_init_with_settings(wandb_init, test_settings):
# test that when calling `wandb.init(settings=wandb.Settings(...))`,
# the settings are passed with Source.INIT as the source
test_settings = test_settings()
test_settings.update(_disable_stats=True)
run = wandb_init(settings=test_settings)
assert run.settings._disable_stats
assert (
run.settings.__dict__["_disable_stats"].source
== wandb_sdk.wandb_settings.Source.INIT
)
run.finish()
def test_attach_same_process(wandb_init, test_settings):
with pytest.raises(RuntimeError) as excinfo:
run = wandb_init(settings=test_settings())
new_run = pickle.loads(pickle.dumps(run))
new_run.log({"a": 2})
run.finish()
assert "attach in the same process is not supported" in str(excinfo.value)
def test_deprecated_feature_telemetry(wandb_init, relay_server, test_settings, user):
with relay_server() as relay:
run = wandb_init(
config_include_keys=("lol",),
settings=test_settings(),
)
# use deprecated features
_ = [
run.mode,
run.save(),
run.join(),
]
telemetry = relay.context.get_run_telemetry(run.id)
# TelemetryRecord field 10 is Deprecated,
# whose fields 2-4 correspond to deprecated wandb.run features
# fields 7 & 8 are deprecated wandb.init kwargs
telemetry_deprecated = telemetry.get("10", [])
assert (
(2 in telemetry_deprecated)
and (3 in telemetry_deprecated)
and (4 in telemetry_deprecated)
and (7 in telemetry_deprecated)
)
def test_except_hook(wandb_init, test_settings):
# Test to make sure we respect excepthooks by 3rd parties like pdb
errs = []
def hook(etype, val, tb):
return errs.append(str(val))
sys.excepthook = hook
# We cant use raise statement in pytest context
def raise_(exc):
return sys.excepthook(type(exc), exc, None)
raise_(Exception("Before wandb.init()"))
run = wandb_init(mode="offline", settings=test_settings())
old_stderr_write = sys.stderr.write
stderr = []
sys.stderr.write = stderr.append
raise_(Exception("After wandb.init()"))
assert errs == ["Before wandb.init()", "After wandb.init()"]
# make sure wandb prints the traceback
assert "".join(stderr) == "Exception: After wandb.init()\n"
sys.stderr.write = old_stderr_write
run.finish()
def assertion(run_id, found, stderr):
msg = (
"`resume` will be ignored since W&B syncing is set to `offline`. "
f"Starting a new run with run id {run_id}"
)
return msg in stderr if found else msg not in stderr
@pytest.mark.parametrize(
"resume, found",
[
("auto", True),
("allow", True),
("never", True),
("must", True),
("", False),
(True, True),
(None, False),
],
)
def test_offline_resume(wandb_init, test_settings, capsys, resume, found):
run = wandb_init(mode="offline", resume=resume, settings=test_settings())
captured = capsys.readouterr()
assert assertion(run.id, found, captured.err)
run.finish() |
send confirmation email | # Copyright 2021 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from lxml import etree
from odoo import fields, models
from odoo.osv import expression
from odoo.tools.safe_eval import safe_eval
from odoo.addons.base.models.ir_ui_view import (
transfer_modifiers_to_node,
transfer_node_to_modifiers,
)
class StockPicking(models.Model):
_inherit = "stock.picking"
delivery_notification_sent = fields.Boolean(default=False)
def METHOD_NAME(self):
for picking in self:
skip_delivery_cost = picking._handle_send_to_shipper_at_operation()
picking = picking.with_context(skip_delivery_cost=skip_delivery_cost)
super(StockPicking, picking).METHOD_NAME()
def _handle_send_to_shipper_at_operation(self):
"""Send the delivery notice to the carrier from a specific operation type.
We are only interested by sending the delivery notice, the delivery fee
still have to be added to the SO by the ship operation.
Return True if the operation has send the delivery notice.
"""
self.ensure_one()
if not self.carrier_id:
# If the current operation has no carrier defined, but a carrier
# has been found from the ship and is configured to match the
# current operation type: force the sending of the delivery notice
# to the carrier
related_ship = self.ship_picking_id
carrier = related_ship.carrier_id
if (
carrier.integration_level == "rate_and_ship"
and carrier.send_delivery_notice_on == "custom"
and self.picking_type_id
in carrier.send_delivery_notice_picking_type_ids
):
self.carrier_id = carrier
self.with_context(skip_delivery_cost=True).send_to_shipper()
# Flag the current operation and the ship one.
# Mandatory to not execute twice 'send_to_shipper' method
self.delivery_notification_sent = True
related_ship.delivery_notification_sent = True
related_ship.carrier_price = self.carrier_price
if not related_ship.carrier_tracking_ref:
related_ship.carrier_tracking_ref = self.carrier_tracking_ref
else:
related_ship.carrier_tracking_ref += "," + self.carrier_tracking_ref
return True
return False
def send_to_shipper(self):
# Do not send delivery notice to the carrier if it has already been sent
# through a previous operation (like a pack)
self.ensure_one()
if self.delivery_notification_sent:
# But we still need to add the delivery cost to the SO
self._add_delivery_cost_to_so()
return False
return super().send_to_shipper()
def _add_delivery_cost_to_so(self):
if self.env.context.get("skip_delivery_cost"):
return
return super()._add_delivery_cost_to_so()
def fields_view_get(
self, view_id=None, view_type="form", toolbar=False, submenu=False
):
# Override to hide the "Send to shipper" button if the delivery
# notification has already been sent
result = super().fields_view_get(
view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu
)
if result.get("name") == "stock.picking.form":
result["arch"] = self._fields_view_get_adapt_send_to_shipper_attrs(
result["arch"]
)
return result
def _fields_view_get_adapt_send_to_shipper_attrs(self, view_arch):
"""Hide 'Send to Shipper' button if 'delivery_notification_sent' is True."""
doc = etree.XML(view_arch)
xpath_expr = "//button[@name='send_to_shipper']"
attrs_key = "invisible"
nodes = doc.xpath(xpath_expr)
for field in nodes:
attrs = safe_eval(field.attrib.get("attrs", "{}"))
if not attrs[attrs_key]:
continue
invisible_domain = expression.OR(
[attrs[attrs_key], [("delivery_notification_sent", "=", True)]]
)
attrs[attrs_key] = invisible_domain
field.set("attrs", str(attrs))
modifiers = {}
transfer_node_to_modifiers(
field, modifiers, self.env.context, current_node_path=["tree"]
)
transfer_modifiers_to_node(modifiers, field)
return etree.tostring(doc, encoding="unicode") |
import users | import io
import os
from flask import current_app
from linotp.lib.user import User
from linotp.tests import TestController
class TestAdminUserPrivilege(TestController):
ADMIN_REALM = None
ADMIN_RESOLVER = None
def setUp(self):
self.ADMIN_REALM = current_app.config["ADMIN_REALM_NAME"].lower()
self.ADMIN_RESOLVER = current_app.config["ADMIN_RESOLVER_NAME"]
self.admin_user = User(
login="admin",
realm=self.ADMIN_REALM,
resolver_config_identifier=self.ADMIN_RESOLVER,
)
TestController.setUp(self)
# clean setup
self.delete_all_policies(auth_user=self.admin_user)
self.delete_all_token()
self.delete_all_realms()
self.delete_all_resolvers()
# create the common resolvers and realm
self.create_common_resolvers()
self.create_common_realms()
def tearDown(self):
TestController.tearDown(self)
self.delete_all_policies(auth_user=self.admin_user)
self.delete_all_token()
self.delete_all_realms()
self.delete_all_resolvers()
def METHOD_NAME(self, file_name="4user.csv", resolver_name=None):
# ------------------------------------------------------------------ --
if not resolver_name:
resolver_name = file_name.strip(".csv")
# open the csv data and import the users
user_file = os.path.join(self.fixture_path, file_name)
with io.open(user_file, "r", encoding="utf-8") as f:
content = f.read()
upload_files = [("file", "user_list", content)]
params = {
"resolver": resolver_name,
"dryrun": False,
"format": "csv",
}
response = self.make_tools_request(
action="import_users", params=params, upload_files=upload_files
)
assert response.json["result"]["status"]
return response
def test_privilege(self):
"""
verify that the admin policies take the resolver definion into account
1. create resolvers called "admin_user1" and "admin_user2" via import users
2. add these resolvers to the admin realm
3. enroll a token
4. verify that the admin of "admin_user1" and "admin_user2" resolver can
disable and enable the token
5. define the policies which allows the admins of the "admin_user1"
resolver only to disable tokens
6. verify that the admin of "admin_user2" resolver still can disable and
enable tokens and the admin of "admin_user1" resolver only can disable
and not enable tokens
step 6. verifies that admin policies take the resolver definion into
account:
a policy comparisson on a simple name or realm match would not
prevent the "admin_user1" to enable tokens.
"""
admin_realm = self.ADMIN_REALM
# 1.a create the "admin_user1" resolver from the 4users.csv via import
response = self.METHOD_NAME("4users.csv", "admin_user1")
params = {"resolver": "admin_user1"}
response = self.make_system_request("getResolver", params=params)
assert response.json["result"]["status"]
_4user_resolver_spec = response.json["result"]["value"]["spec"]
# 1.b create the "admin_user2" resolver from the 4users.csv via import
response = self.METHOD_NAME("4users.csv", "admin_user2")
params = {"resolver": "admin_user2"}
response = self.make_system_request("getResolver", params=params)
assert response.json["result"]["status"]
_user4_resolver_spec = response.json["result"]["value"]["spec"]
# 2. add the resolver to admin realm
# first we need to get the admin realm with its resolvers
param = {"realm": admin_realm}
response = self.make_system_request(action="getRealms", params=params)
assert response.json["result"]["status"]
resolvers = response.json["result"]["value"][admin_realm][
"useridresolver"
]
# set the admin realm with the extended list of resolvers
resolvers.append(_4user_resolver_spec)
resolvers.append(_user4_resolver_spec)
params = {"realm": admin_realm, "resolvers": ",".join(resolvers)}
response = self.make_system_request(
action="setRealm", params=params, auth_user=self.admin_user
)
assert response.json["result"]["status"]
# 3. enroll a token
params = {
"serial": "token1",
"type": "pw",
"pin": "otppin",
"otpkey": "secret",
}
response = self.make_admin_request("init", params=params)
assert response.json["result"]["status"]
# 4. verify that the admin of "admin_user1" and "admin_user2" resolver can
# disable and enable the token
# 4.a define the users
_4user_admin = User(
login="admin",
realm=admin_realm,
resolver_config_identifier="admin_user1",
)
_user4_admin = User(
login="admin",
realm=admin_realm,
resolver_config_identifier="admin_user2",
)
# 4.b run our test vector
params = {"serial": "token1"}
test_set = [
(_4user_admin, "disable", True),
(_4user_admin, "enable", True),
(_user4_admin, "disable", True),
(_user4_admin, "enable", True),
]
for auth_user, action, expected in test_set:
self.make_admin_request(action, params=params, auth_user=auth_user)
assert response.json["result"]["status"] is expected
# 5. define the policies
all_allowed = {
"action": "*",
"active": True,
"client": "*",
"realm": "*",
"scope": "admin",
"user": "admin_user2:",
"name": "admin_user2",
}
response = self.make_system_request(
"setPolicy", params=all_allowed, auth_user=self.admin_user
)
assert response.json["result"]["status"]
restricted = {
"action": "disable",
"active": "True",
"client": "*",
"realm": "*",
"scope": "admin",
"user": "*",
"name": "admin_readonly",
}
response = self.make_system_request(
"setPolicy", params=restricted, auth_user=self.admin_user
)
assert response.json["result"]["status"]
# 6. verify that the admin of "admin_user2" resolver still can disable and
# enable tokens and the admin of "admin_user1" resolver only can disable
# and not enable tokens
params = {"serial": "token1"}
test_set = [
(_4user_admin, "disable", True),
(_4user_admin, "enable", False),
(_user4_admin, "disable", True),
(_user4_admin, "enable", True),
]
for auth_user, action, expected in test_set:
response = self.make_admin_request(
action, params=params, auth_user=auth_user
)
assert response.json["result"]["status"] == expected |
aggregate labels | from collections import defaultdict
from typing import Dict, List, Optional, Tuple, Union
from haystack.schema import Label, MultiLabel
def METHOD_NAME(
labels: List[Label],
add_closed_domain_filter: bool = False,
add_meta_filters: Optional[Union[str, list]] = None,
drop_negative_labels: bool = False,
drop_no_answers: bool = False,
) -> List[MultiLabel]:
"""
Aggregates Labels into MultiLabel objects (e.g. for evaluation with `Pipeline.eval()`).
Labels are always aggregated by question and filters defined in the Label objects.
Beyond that you have options to drop certain labels or to dynamically add filters to control the aggregation process.
Closed domain aggregation:
If the questions are being asked only on the document defined within the Label (i.e. SQuAD style), set `add_closed_domain_filter=True` to aggregate by question, filters and document.
Note that Labels' filters are enriched with the document_id of the Label's document.
Note that you don't need that step
- if your labels already contain the document_id in their filters
- if you're using `Pipeline.eval()`'s `add_isolated_node_eval` feature
Dynamic metadata aggregation:
If the questions are being asked on a subslice of your document set, that is not defined with the Label's filters but with an additional meta field,
populate `add_meta_filters` with the names of Label meta fields to aggregate by question, filters and your custom meta fields.
Note that Labels' filters are enriched with the specified meta fields defined in the Label.
Remarks: `add_meta_filters` is only intended for dynamic metadata aggregation (e.g. separate evaluations per document type).
For standard questions use-cases, where a question is always asked on multiple files individually, consider setting the Label's filters instead.
For example, if you want to ask a couple of standard questions for each of your products, set filters for "product_id" to your Labels.
Thus you specify that each Label is always only valid for documents with the respective product_id.
:param labels: List of Labels to aggregate.
:param add_closed_domain_filter: When True, adds a filter for the document ID specified in the label.
Thus, labels are aggregated in a closed domain fashion based on the question text, filters,
and also the id of the document that the label is tied to. See "closed domain aggregation" section for more details.
:param add_meta_filters: The names of the Label meta fields by which to aggregate in addition to question and filters. For example: ["product_id"].
Note that Labels' filters are enriched with the specified meta fields defined in the Label.
:param drop_negative_labels: When True, labels with incorrect answers and documents are dropped.
:param drop_no_answers: When True, labels with no answers are dropped.
:return: A list of MultiLabel objects.
"""
if add_meta_filters:
if type(add_meta_filters) == str:
add_meta_filters = [add_meta_filters]
else:
add_meta_filters = []
# drop no_answers in order to not create empty MultiLabels
if drop_no_answers:
labels = [label for label in labels if label.no_answer is False]
# add filters for closed domain and dynamic metadata aggregation
for l in labels:
label_filters_to_add = {}
if add_closed_domain_filter:
label_filters_to_add["_id"] = l.document.id
for meta_key in add_meta_filters:
meta = l.meta or {}
curr_meta = meta.get(meta_key, None)
if curr_meta:
curr_meta = curr_meta if isinstance(curr_meta, list) else [curr_meta]
label_filters_to_add[meta_key] = curr_meta
if label_filters_to_add:
if l.filters is None:
l.filters = label_filters_to_add
else:
l.filters.update(label_filters_to_add)
# Filters define the scope a label is valid for the query, so we group the labels by query and filters.
grouped_labels: Dict[Tuple, List[Label]] = defaultdict(list)
for l in labels:
label_filter_keys = [f"{k}={v}" for k, v in l.filters.items()] if l.filters else []
group_keys: list = [l.query] + label_filter_keys
group_key = tuple(group_keys)
grouped_labels[group_key].append(l)
aggregated_labels = [
MultiLabel(labels=ls, drop_negative_labels=drop_negative_labels, drop_no_answers=drop_no_answers)
for ls in grouped_labels.values()
]
return aggregated_labels |
parse from output | # Copyright 2023 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run MLPerf Inference CPU benchmarks.
This benchmark measures the MLPerf inference performance of the CPU.
"""
from typing import Any, Dict, List
from absl import flags
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
FLAGS = flags.FLAGS
MLPERF_INFERENCE_VERSION = 'v3.0'
_MLPERF_SCRATCH_PATH = '/scratch'
_DLRM_DATA_MODULE = 'criteo'
_DLRM_DATA = 'day_23.gz'
_DLRM_PREPROCESSED_DATA = 'full_recalib.tar.gz'
_DLRM_MODEL = '40m_limit.tar.gz'
_DLRM_ROW_FREQ = 'tb00_40M.pt'
BENCHMARK_NAME = 'mlperf_inference_cpu'
BENCHMARK_CONFIG = """
mlperf_inference_cpu:
description: Runs MLPerf Inference Benchmark on CPU.
vm_groups:
default:
vm_spec:
GCP:
machine_type: n2-standard-16
zone: us-central1-b
boot_disk_size: 200
AWS:
machine_type: p4d.24xlarge
zone: us-west-2a
boot_disk_size: 200
Azure:
machine_type: Standard_ND96asr_v4
zone: westus2
boot_disk_size: 200
"""
_BACKEND = flags.DEFINE_enum(
'mlperf_inference_cpu_backend',
'onnxruntime',
['onnxruntime', 'tvm-onnx', 'tf'],
'backend',
)
_TVM_PIP_INSTALL = flags.DEFINE_bool(
'mlperf_inference_cpu_tvm_pip_install', False, 'TVM pip install'
)
_MODEL = flags.DEFINE_enum(
'mlperf_inference_cpu_model',
'resnet50',
[
'resnet50',
'bert-99',
'bert-99.9',
'3d-unet-99',
'3d-unet-99.9',
'retinanet',
'rnnt',
],
'model',
)
_MODE = flags.DEFINE_enum(
'mlperf_inference_cpu_mode',
'performance',
['performance', 'accuracy'],
'mode',
)
_DIVISION = flags.DEFINE_enum(
'mlperf_inference_cpu_division',
'open',
['closed', 'open'],
'division',
)
_CATEGORY = flags.DEFINE_enum(
'mlperf_inference_cpu_category',
'datacenter',
['datacenter', 'edge'],
'category',
)
_DEVICE = flags.DEFINE_enum(
'mlperf_inference_cpu_device', 'cpu', ['cpu', 'cuda', 'tensorrt'], 'device'
)
_IMPLEMENTATION = flags.DEFINE_enum(
'mlperf_inference_cpu_implementation',
'python',
['reference', 'python', 'nvidia-original'],
'implementation',
)
_TARGET_LATENCY = flags.DEFINE_integer(
'mlperf_inference_latency', None, 'target latency'
)
def GetConfig(user_config: Dict[str, Any]) -> Dict[str, Any]:
"""Loads and returns benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
return config
def Prepare(bm_spec: benchmark_spec.BenchmarkSpec) -> None:
"""Installs and sets up MLPerf Inference on the target vm.
Args:
bm_spec: The benchmark specification
Raises:
errors.Config.InvalidValue upon both GPUs and TPUs appear in the config
"""
vm = bm_spec.vms[0]
vm.Install('pip3')
vm.RemoteCommand('python3 -m pip install cmind -U')
cm = 'PATH=~/.local/bin:$PATH cm'
bm_spec.cm = cm
vm.RemoteCommand(f'{cm} pull repo mlcommons@ck --checkout=master')
vm.RemoteCommand(f'{cm} run script "get sys-utils-cm" --quiet')
vm.RemoteCommand(
f'{cm} run script "install python-venv" --version=3.10.8 --name=mlperf'
)
def METHOD_NAME(output: str) -> Dict[str, str]:
"""Creates samples containing metrics.
Args:
output: string, command output
Example output:
perfkitbenchmarker/tests/linux_benchmarks/mlperf_inference_cpu_benchmark_test.py
Returns:
Samples containing training metrics.
"""
result = regex_util.ExtractAllMatches(r'(.*):(.*)', output)
return {key.strip(): value.strip() for key, value in result}
def MakeSamplesFromOutput(
base_metadata: Dict[str, Any], output: str
) -> sample.Sample:
"""Creates samples containing metrics.
Args:
base_metadata: dict contains all the metadata that reports.
output: string, command output
Example output:
perfkitbenchmarker/tests/linux_benchmarks/mlperf_inference_cpu_benchmark_test.py
Returns:
Sample containing training metrics.
"""
metadata = METHOD_NAME(output)
metadata.update(base_metadata)
return sample.Sample(
'throughput',
metadata['Samples per second'],
'samples per second',
metadata,
)
def _IsValid(output: str) -> List[sample.Sample]:
"""Creates samples containing metrics.
Args:
output: string, command output
Returns:
whether the result is valid or invalid.
"""
results = regex_util.ExtractAllMatches(r'Result is : (\S+)', output)
return results[0] == 'VALID'
def _Run(
bm_spec: benchmark_spec.BenchmarkSpec, target_qps: float = 1.01
) -> str:
"""Runs MLPerf Inference on the cluster.
Args:
bm_spec: The benchmark specification. Contains all data that is required to
run the benchmark.
target_qps: float, the scheduled samples per second.
Returns:
mlperf inference output.
"""
# TODO(tohaowu) Add a full Run function test.
vm = bm_spec.vms[0]
stdout, _ = vm.RemoteCommand(
f'{bm_spec.cm} run script'
' --tags=run,mlperf,inference,generate-run-cmds,_find-performance'
' --adr.python.name=mlperf --adr.python.version_min=3.8'
' --submitter="Community" --hw_name=default --quiet --clean'
' --results_dir=$HOME/logs --execution-mode=valid --test_query_count=5'
f' --implementation={_IMPLEMENTATION.value} --model={_MODEL.value}'
f' --backend={_BACKEND.value} --device={_DEVICE.value}'
f' --scenario={FLAGS.mlperf_inference_scenarios} --mode={_MODE.value}'
f' --category={_CATEGORY.value} --target_qps={target_qps} --count=1'
f' {"--adr.tvm.tags=_pip-install" if _TVM_PIP_INSTALL.value else ""}'
)
return stdout
def _SearchQps(bm_spec: benchmark_spec.BenchmarkSpec) -> str:
"""Finds the system under test QPS.
Uses binary search between to find the max GPU QPS while meet the latency
constraint. Stops searching when the absolute difference is less 1 samples
per second.
Args:
bm_spec: The benchmark specification. Contains all data that is required to
run the benchmark.
Returns:
The best performance test result.
"""
target_qps = float(METHOD_NAME(_Run(bm_spec))['Samples per second'])
return _Run(bm_spec, target_qps)
def Run(bm_spec: benchmark_spec.BenchmarkSpec) -> List[sample.Sample]:
"""Runs MLPerf Inference on the cluster.
Args:
bm_spec: The benchmark specification. Contains all data that is required to
run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vm = bm_spec.vms[0]
vm.RemoteCommand(f'{bm_spec.cm} rm cache -f')
stdout = _SearchQps(bm_spec)
metadata = {
'implementation': _IMPLEMENTATION.value,
'model': _MODEL.value,
'backend': _BACKEND.value,
'device': _DEVICE.value,
'scenario': FLAGS.mlperf_inference_scenarios,
'mode': _MODE.value,
'category': _CATEGORY.value,
}
return [MakeSamplesFromOutput(metadata, stdout)]
def Cleanup(unused_bm_spec: benchmark_spec.BenchmarkSpec) -> None:
"""Cleanup MLPerf Inference on the cluster."""
pass |
read | # LocalStack Resource Provider Scaffolding v2
from __future__ import annotations
import json
import random
import string
from pathlib import Path
from typing import Optional, Type, TypedDict
import localstack.services.cloudformation.provider_utils as util
from localstack.services.cloudformation.resource_provider import (
CloudFormationResourceProviderPlugin,
OperationStatus,
ProgressEvent,
ResourceProvider,
ResourceRequest,
)
class IAMPolicyProperties(TypedDict):
PolicyDocument: Optional[dict]
PolicyName: Optional[str]
Groups: Optional[list[str]]
Id: Optional[str]
Roles: Optional[list[str]]
Users: Optional[list[str]]
REPEATED_INVOCATION = "repeated_invocation"
class IAMPolicyProvider(ResourceProvider[IAMPolicyProperties]):
TYPE = "AWS::IAM::Policy" # Autogenerated. Don't change
SCHEMA = util.get_schema_path(Path(__file__)) # Autogenerated. Don't change
def create(
self,
request: ResourceRequest[IAMPolicyProperties],
) -> ProgressEvent[IAMPolicyProperties]:
"""
Create a new resource.
Primary identifier fields:
- /properties/Id
Required properties:
- PolicyDocument
- PolicyName
Read-only properties:
- /properties/Id
"""
model = request.desired_state
iam_client = request.aws_client_factory.iam
policy_doc = json.dumps(util.remove_none_values(model["PolicyDocument"]))
policy_name = model["PolicyName"]
for role in model.get("Roles", []):
iam_client.put_role_policy(
RoleName=role, PolicyName=policy_name, PolicyDocument=policy_doc
)
for user in model.get("Users", []):
iam_client.put_user_policy(
UserName=user, PolicyName=policy_name, PolicyDocument=policy_doc
)
for group in model.get("Groups", []):
iam_client.put_group_policy(
GroupName=group, PolicyName=policy_name, PolicyDocument=policy_doc
)
# the physical resource ID here has a bit of a weird format
# e.g. 'stack-fnSe-1OKWZIBB89193' where fnSe are the first 4 characters of the LogicalResourceId (or name?)
suffix = "".join(random.choices(string.ascii_uppercase + string.digits, k=13))
model["Id"] = f"stack-{model.get('PolicyName', '')[:4]}-{suffix}"
return ProgressEvent(status=OperationStatus.SUCCESS, resource_model=model)
def METHOD_NAME(
self,
request: ResourceRequest[IAMPolicyProperties],
) -> ProgressEvent[IAMPolicyProperties]:
"""
Fetch resource information
"""
raise NotImplementedError
def delete(
self,
request: ResourceRequest[IAMPolicyProperties],
) -> ProgressEvent[IAMPolicyProperties]:
"""
Delete a resource
"""
iam = request.aws_client_factory.iam
iam.delete_policy(PolicyArn=request.desired_state["Id"])
return ProgressEvent(status=OperationStatus.SUCCESS, resource_model={})
def update(
self,
request: ResourceRequest[IAMPolicyProperties],
) -> ProgressEvent[IAMPolicyProperties]:
"""
Update a resource
"""
iam_client = request.aws_client_factory.iam
model = request.desired_state
# FIXME: this wasn't properly implemented before as well, still needs to be rewritten
policy_doc = json.dumps(util.remove_none_values(model["PolicyDocument"]))
policy_name = model["PolicyName"]
for role in model.get("Roles", []):
iam_client.put_role_policy(
RoleName=role, PolicyName=policy_name, PolicyDocument=policy_doc
)
for user in model.get("Users", []):
iam_client.put_user_policy(
UserName=user, PolicyName=policy_name, PolicyDocument=policy_doc
)
for group in model.get("Groups", []):
iam_client.put_group_policy(
GroupName=group, PolicyName=policy_name, PolicyDocument=policy_doc
)
return ProgressEvent(
status=OperationStatus.SUCCESS,
resource_model={**request.previous_state, **request.desired_state},
)
class IAMPolicyProviderPlugin(CloudFormationResourceProviderPlugin):
name = "AWS::IAM::Policy"
def __init__(self):
self.factory: Optional[Type[ResourceProvider]] = None
def load(self):
self.factory = IAMPolicyProvider |
test fetch from container | #
# Copyright (c) 2022, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import random
import time
import uuid
import pytest
import neptune
from neptune.metadata_containers import Model
from tests.e2e.base import (
BaseE2ETest,
fake,
)
from tests.e2e.utils import a_key
class TestFetchTable(BaseE2ETest):
def test_fetch_runs_by_tag(self, environment, project):
tag1, tag2 = str(uuid.uuid4()), str(uuid.uuid4())
with neptune.init_run(project=environment.project) as run:
run_id1 = run["sys/id"].fetch()
run["sys/tags"].add(tag1)
run["sys/tags"].add(tag2)
with neptune.init_run(project=environment.project) as run:
run["sys/tags"].add(tag2)
# wait for the cache to fill
time.sleep(5)
runs_table = project.fetch_runs_table(tag=[tag1, tag2]).to_rows()
assert len(runs_table) == 1
assert runs_table[0].get_attribute_value("sys/id") == run_id1
@pytest.mark.parametrize("container", ["model"], indirect=True)
def test_fetch_model_versions_with_correct_ids(self, container: Model, environment):
model_sys_id = container["sys/id"].fetch()
versions_to_initialize = 5
for _ in range(versions_to_initialize):
with neptune.init_model_version(model=model_sys_id, project=environment.project):
pass
# wait for the elasticsearch cache to fill
time.sleep(5)
versions_table = sorted(
container.fetch_model_versions_table().to_rows(),
key=lambda r: r.get_attribute_value("sys/id"),
)
assert len(versions_table) == versions_to_initialize
for index in range(versions_to_initialize):
assert versions_table[index].get_attribute_value("sys/id") == f"{model_sys_id}-{index + 1}"
def METHOD_NAME(self, init_container, get_containers_as_rows):
container_id1, container_id2 = None, None
key1 = self.gen_key()
key2 = f"{self.gen_key()}/{self.gen_key()}"
value1 = random.randint(1, 100)
value2 = fake.name()
with init_container() as container:
container_id1 = container["sys/id"].fetch()
container[key1] = value1
container[key2] = value2
container.sync()
with init_container() as container:
container_id2 = container["sys/id"].fetch()
container[key1] = value1
container.sync()
# wait for the cache to fill
time.sleep(5)
containers_as_rows = get_containers_as_rows()
container1 = next(filter(lambda m: m.get_attribute_value("sys/id") == container_id1, containers_as_rows))
container2 = next(filter(lambda m: m.get_attribute_value("sys/id") == container_id2, containers_as_rows))
assert container1.get_attribute_value(key1) == value1
assert container1.get_attribute_value(key2) == value2
assert container2.get_attribute_value(key1) == value1
with pytest.raises(ValueError):
container2.get_attribute_value(key2)
def get_container1(**kwargs):
containers_as_rows = get_containers_as_rows(**kwargs)
return next(filter(lambda m: m.get_attribute_value("sys/id") == container_id1, containers_as_rows))
non_filtered = get_container1()
assert non_filtered.get_attribute_value(key1) == value1
assert non_filtered.get_attribute_value(key2) == value2
columns_none = get_container1(columns=None)
assert columns_none.get_attribute_value(key1) == value1
assert columns_none.get_attribute_value(key2) == value2
columns_empty = get_container1(columns=[])
with pytest.raises(ValueError):
columns_empty.get_attribute_value(key1)
with pytest.raises(ValueError):
columns_empty.get_attribute_value(key2)
columns_with_one_key = get_container1(columns=[key1])
assert columns_with_one_key.get_attribute_value(key1) == value1
with pytest.raises(ValueError):
columns_with_one_key.get_attribute_value(key2)
columns_with_one_key = get_container1(columns=[key2])
with pytest.raises(ValueError):
columns_with_one_key.get_attribute_value(key1)
assert columns_with_one_key.get_attribute_value(key2) == value2
def test_fetch_runs_table(self, environment, project):
def init_run():
return neptune.init_run(project=environment.project)
def get_runs_as_rows(**kwargs):
return project.fetch_runs_table(**kwargs).to_rows()
self.METHOD_NAME(init_run, get_runs_as_rows)
def test_fetch_models_table(self, environment, project):
def init_run():
return neptune.init_model(project=environment.project, key=a_key())
def get_models_as_rows(**kwargs):
return project.fetch_models_table(**kwargs).to_rows()
self.METHOD_NAME(init_run, get_models_as_rows)
@pytest.mark.parametrize("container", ["model"], indirect=True)
def test_fetch_model_versions_table(self, container: Model, environment):
model_sys_id = container["sys/id"].fetch()
def init_run():
return neptune.init_model_version(model=model_sys_id, project=environment.project)
def get_model_versions_as_rows(**kwargs):
return container.fetch_model_versions_table(**kwargs).to_rows()
self.METHOD_NAME(init_run, get_model_versions_as_rows)
def test_fetch_runs_table_by_state(self, environment, project):
tag = str(uuid.uuid4())
random_val = random.random()
with neptune.init_run(project=environment.project, tags=tag) as run:
run["some_random_val"] = random_val
time.sleep(30)
runs = project.fetch_runs_table(state="active").to_pandas()
assert not runs.empty
assert tag in runs["sys/tags"].values
assert random_val in runs["some_random_val"].values
time.sleep(30)
runs = project.fetch_runs_table(state="inactive").to_pandas()
assert not runs.empty
assert tag in runs["sys/tags"].values
assert random_val in runs["some_random_val"].values |
test lambda scheduler steps with optimizer single | # Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from functools import partial
import torch
from accelerate import Accelerator, debug_launcher
from accelerate.state import AcceleratorState, GradientState
from accelerate.test_utils import require_cpu, require_huggingface_suite
from accelerate.utils import GradientAccumulationPlugin
def one_cycle_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):
accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches)
model = torch.nn.Linear(2, 4)
optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)
scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, max_lr=0.01, steps_per_epoch=2, epochs=1)
model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
# Optimizer has stepped
scheduler.step()
if step_scheduler_with_optimizer or (num_processes == 1):
assert (
scheduler.scheduler.last_epoch == num_processes
), f"Last Epoch ({scheduler.scheduler.last_epoch}) != Num Processes ({num_processes})"
else:
assert (
scheduler.scheduler.last_epoch != num_processes
), f"Last Epoch ({scheduler.scheduler.last_epoch}) == Num Processes ({num_processes})"
def lambda_test(num_processes=2, step_scheduler_with_optimizer=True, split_batches=False):
accelerator = Accelerator(step_scheduler_with_optimizer=step_scheduler_with_optimizer, split_batches=split_batches)
model = torch.nn.Linear(2, 4)
optimizer = torch.optim.AdamW(model.parameters(), lr=1.0)
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda n: 1 - n / 10)
model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
# Optimizer has stepped
optimizer._is_overflow = False
scheduler.step()
expected_lr = 1 - (num_processes if (step_scheduler_with_optimizer and not split_batches) else 1) / 10
assert (
scheduler.get_last_lr()[0] == expected_lr
), f"Wrong lr found at first step, expected {expected_lr}, got {scheduler.get_last_lr()[0]}"
# Optimizer has not stepped
optimizer._is_overflow = True
scheduler.step()
if not step_scheduler_with_optimizer:
expected_lr = 1 - 2 / 10
assert (
scheduler.get_last_lr()[0] == expected_lr
), f"Wrong lr found at second step, expected {expected_lr}, got {scheduler.get_last_lr()[0]}"
def accumulation_test(num_processes: int = 2):
"""
With this test, an observed batch size of 64 should result in neglible
differences in the scheduler after going through the correct number of steps.
Uses single, two, and four steps to test.
"""
from transformers import get_linear_schedule_with_warmup
steps = [1, 2, 4]
for num_steps in steps:
plugin = GradientAccumulationPlugin(num_steps=num_steps, adjust_scheduler=num_steps > 1)
accelerator = Accelerator(gradient_accumulation_plugin=plugin)
model = torch.nn.Linear(2, 4)
optimizer = torch.optim.AdamW(model.parameters(), lr=10.0)
scheduler = get_linear_schedule_with_warmup(optimizer=optimizer, num_warmup_steps=0, num_training_steps=20)
model, optimizer, scheduler = accelerator.prepare(model, optimizer, scheduler)
for i in range(10 * num_steps):
with accelerator.accumulate(model):
optimizer.step()
scheduler.step()
if i == (10 * num_steps - 2):
assert (
scheduler.get_last_lr()[0] != 0
), f"Wrong lr found at second-to-last step, expected non-zero, got {scheduler.get_last_lr()[0]}. num_steps: {num_steps}"
assert (
scheduler.get_last_lr()[0] == 0
), f"Wrong lr found at last step, expected 0, got {scheduler.get_last_lr()[0]}"
GradientState._reset_state()
@require_cpu
class SchedulerTester(unittest.TestCase):
def METHOD_NAME(self):
debug_launcher(partial(lambda_test, num_processes=1), num_processes=1)
debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1)
def test_one_cycle_scheduler_steps_with_optimizer_single_process(self):
debug_launcher(partial(one_cycle_test, num_processes=1), num_processes=1)
debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1)
def test_lambda_scheduler_not_step_with_optimizer_single_process(self):
debug_launcher(partial(lambda_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1)
def test_one_cycle_scheduler_not_step_with_optimizer_single_process(self):
debug_launcher(partial(one_cycle_test, num_processes=1, step_scheduler_with_optimizer=False), num_processes=1)
def test_lambda_scheduler_steps_with_optimizer_multiprocess(self):
AcceleratorState._reset_state(True)
debug_launcher(lambda_test)
debug_launcher(partial(lambda_test, num_processes=1, split_batches=True), num_processes=1)
def test_one_cycle_scheduler_steps_with_optimizer_multiprocess(self):
AcceleratorState._reset_state(True)
debug_launcher(one_cycle_test)
debug_launcher(partial(one_cycle_test, num_processes=1, split_batches=True), num_processes=1)
def test_lambda_scheduler_not_step_with_optimizer_multiprocess(self):
AcceleratorState._reset_state(True)
debug_launcher(partial(lambda_test, step_scheduler_with_optimizer=False))
def test_one_cycle_scheduler_not_step_with_optimizer_multiprocess(self):
AcceleratorState._reset_state(True)
debug_launcher(partial(one_cycle_test, step_scheduler_with_optimizer=False))
@require_huggingface_suite
def test_accumulation(self):
AcceleratorState._reset_state(True)
debug_launcher(partial(accumulation_test, num_processes=1))
debug_launcher(accumulation_test) |
test expand metrics | import pytest
from sqlmesh.core import dialect as d
from sqlmesh.core.metric import expand_metrics, load_metric_ddl
from sqlmesh.core.metric.definition import _get_measure_and_dim_tables
from sqlmesh.utils.errors import ConfigError
def test_load_metric_ddl():
a = d.parse_one(
"""
-- description a
METRIC (
name A,
expression SUM(x),
owner b
);
"""
)
meta = load_metric_ddl(a, dialect="")
assert meta.name == "a"
assert meta.expression.sql() == "SUM(x)"
assert meta.owner == "b"
assert meta.description == "description a"
def test_load_invalid():
with pytest.raises(
ConfigError, match=r"Only METRIC\(...\) statements are allowed. Found SELECT"
):
load_metric_ddl(
d.parse_one(
"""
SELECT 1;
"""
),
dialect="",
)
with pytest.raises(ConfigError, match=r"Metric 'a' missing an aggregation or metric ref."):
load_metric_ddl(
d.parse_one(
"""
METRIC (
name a,
expression 1
)
"""
),
dialect="",
).to_metric({}, {})
def METHOD_NAME():
expressions = d.parse(
"""
-- description a
METRIC (
name a,
expression SUM(model.x),
owner b
);
-- description b
METRIC (
name b,
expression COUNT(DISTINCT model.y),
owner b
);
-- description c
METRIC (
name c,
expression a / b,
owner b
);
-- description d
METRIC (
name d,
expression c + 1,
owner b
);
"""
)
metas = {}
for expr in expressions:
meta = load_metric_ddl(expr, dialect="")
metas[meta.name] = meta
metrics = expand_metrics(metas)
metric_a = metrics["a"]
assert metric_a.name == "a"
assert metric_a.expression.sql() == "SUM(model.x)"
assert metric_a.expanded.sql() == "SUM(model.x) AS a"
assert metric_a.formula.sql() == "a AS a"
assert metric_a.owner == "b"
assert metric_a.description == "description a"
metric_b = metrics["b"]
assert metric_b.name == "b"
assert metric_b.expression.sql() == "COUNT(DISTINCT model.y)"
assert metric_b.expanded.sql() == "COUNT(DISTINCT model.y) AS b"
assert metric_b.formula.sql() == "b AS b"
metric_c = metrics["c"]
assert metric_c.name == "c"
assert metric_c.expression.sql() == "a / b"
assert metric_c.expanded.sql() == "SUM(model.x) AS a / COUNT(DISTINCT model.y) AS b"
assert metric_c.formula.sql() == "a / b AS c"
metric_d = metrics["d"]
assert metric_d.expression.sql() == "c + 1"
assert metric_d.expanded.sql() == "SUM(model.x) AS a / COUNT(DISTINCT model.y) AS b + 1"
assert metric_d.formula.sql() == "a / b + 1 AS d"
assert metric_d.aggs == {
d.parse_one("SUM(model.x) AS a"): ("model", ()),
d.parse_one("COUNT(DISTINCT model.y) AS b"): ("model", ()),
}
metas = {}
for expr in expressions:
meta = load_metric_ddl(expr, dialect="snowflake")
metas[meta.name] = meta
# Checks that metric names are not normalized according to the target dialect
snowflake_metrics = expand_metrics(metas)
assert all(metric_name.islower() for metric_name in snowflake_metrics)
metric_c = snowflake_metrics["c"]
assert metric_c.name == "c"
assert metric_c.expression.sql() == "a / b"
assert metric_c.expanded.sql() == "SUM(model.x) AS a / COUNT(DISTINCT model.y) AS b"
assert metric_c.formula.sql() == "a / b AS c"
def test_get_measure_and_dim_tables():
assert _get_measure_and_dim_tables(d.parse_one("SUM(a.x)"), "") == ("a", ())
assert _get_measure_and_dim_tables(d.parse_one("SUM(a.x + a.y)"), "") == ("a", ())
assert _get_measure_and_dim_tables(d.parse_one("SUM(a.x + b.y)"), "") == ("a", ("b",))
assert _get_measure_and_dim_tables(d.parse_one("c.z + SUM(a.x)"), "") == ("a", ("c",))
assert _get_measure_and_dim_tables(d.parse_one("SUM(IF(c.z = 'dim', a.x, 0))"), "") == (
"a",
("c",),
)
assert _get_measure_and_dim_tables(
d.parse_one("SUM(IF(c.z = 'dim' AND b.y > 0, (a.x + a.x) + 3, 0))"), ""
) == ("a", ("c", "b"))
assert _get_measure_and_dim_tables(
d.parse_one("SUM(CASE b.y WHEN 1 THEN a.x ELSE 0 END)"), ""
) == ("a", ("b",)) |
on 200 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"dynatrace monitor sso-config list"
)
class List(AAZCommand):
"""List all dynatrace sso-config by monitor name
:example: List sso-config
az dynatrace monitor sso-config list -g rg --monitor-name monitor
"""
_aaz_info = {
"version": "2021-09-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/dynatrace.observability/monitors/{}/singlesignonconfigurations", "2021-09-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.monitor_name = AAZStrArg(
options=["--monitor-name"],
help="Monitor resource name",
required=True,
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.SingleSignOnList(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class SingleSignOnList(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.METHOD_NAME(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Dynatrace.Observability/monitors/{monitorName}/singleSignOnConfigurations",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"monitorName", self.ctx.args.monitor_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-09-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def METHOD_NAME(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType(
flags={"required": True},
)
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"required": True, "client_flatten": True},
)
_element.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.value.Element.properties
properties.aad_domains = AAZListType(
serialized_name="aadDomains",
)
properties.enterprise_app_id = AAZStrType(
serialized_name="enterpriseAppId",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
)
properties.single_sign_on_state = AAZStrType(
serialized_name="singleSignOnState",
)
properties.single_sign_on_url = AAZStrType(
serialized_name="singleSignOnUrl",
)
aad_domains = cls._schema_on_200.value.Element.properties.aad_domains
aad_domains.Element = AAZStrType()
system_data = cls._schema_on_200.value.Element.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
return cls._schema_on_200
__all__ = ["List"] |
test 405 error | from django.utils.translation import gettext as _
from rest_framework.test import APIRequestFactory, APITestCase
from . import error_views as views
class ExceptionHandlerTests(APITestCase):
"""
Test the error handling responses
"""
maxDiff = None
factory = APIRequestFactory()
def assertErrorResponse(self, view, expected_data: dict):
_view = view.as_view()
# method doesn't matter since we're using `dispatch`
request = self.factory.get("/some/irrelevant/url")
response = _view(request)
expected_status = expected_data["status"]
self.assertEqual(response.status_code, expected_status)
self.assertEqual(response["Content-Type"], "application/problem+json")
# can't verify UUID...
self.assertTrue(response.data["instance"].startswith("urn:uuid:"))
del response.data["instance"]
exc_class = view.exception.__class__.__name__
expected_data["type"] = f"http://testserver/fouten/{exc_class}/"
self.assertEqual(response.data, expected_data)
def test_400_error(self):
self.assertErrorResponse(
views.ValidationErrorView,
{
"code": "invalid",
"title": _("Invalid input."),
"status": 400,
"detail": "",
"invalid_params": [
{
"name": "foo",
"code": "validation-error",
"reason": _("Invalid data."),
}
],
},
)
def test_401_error(self):
self.assertErrorResponse(
views.NotAuthenticatedView,
{
"code": "not_authenticated",
"title": _("Authentication credentials were not provided."),
"status": 401,
"detail": _("Authentication credentials were not provided."),
},
)
def test_403_error(self):
self.assertErrorResponse(
views.PermissionDeniedView,
{
"code": "permission_denied",
"title": _("You do not have permission to perform this action."),
"status": 403,
"detail": _("This action is not allowed"),
},
)
def test_404_error(self):
self.assertErrorResponse(
views.NotFoundView,
{
"code": "not_found",
"title": _("Not found."),
"status": 404,
"detail": _("Some detail message"),
},
)
def METHOD_NAME(self):
self.assertErrorResponse(
views.MethodNotAllowedView,
{
"code": "method_not_allowed",
"title": _('Method "{method}" not allowed.'),
"status": 405,
"detail": _('Method "{method}" not allowed.').format(method="GET"),
},
)
def test_406_error(self):
self.assertErrorResponse(
views.NotAcceptableView,
{
"code": "not_acceptable",
"title": _("Could not satisfy the request Accept header."),
"status": 406,
"detail": _("Content negotation failed"),
},
)
def test_409_error(self):
self.assertErrorResponse(
views.ConflictView,
{
"code": "conflict",
"title": _("A conflict occurred"),
"status": 409,
"detail": _("The resource was updated, please retrieve it again"),
},
)
def test_410_error(self):
self.assertErrorResponse(
views.GoneView,
{
"code": "gone",
"title": _("The resource is gone"),
"status": 410,
"detail": _("The resource was destroyed"),
},
)
def test_412_error(self):
self.assertErrorResponse(
views.PreconditionFailed,
{
"code": "precondition_failed",
"title": _("Precondition failed"),
"status": 412,
"detail": _("Something about CRS"),
},
)
def test_415_error(self):
self.assertErrorResponse(
views.UnsupportedMediaTypeView,
{
"code": "unsupported_media_type",
"title": _('Unsupported media type "{media_type}" in request.'),
"status": 415,
"detail": _("This media type is not supported"),
},
)
def test_429_error(self):
self.assertErrorResponse(
views.ThrottledView,
{
"code": "throttled",
"title": _("Request was throttled."),
"status": 429,
"detail": _("Too many requests"),
},
)
def test_500_error(self):
self.assertErrorResponse(
views.InternalServerErrorView,
{
"code": "error",
"title": _("A server error occurred."),
"status": 500,
"detail": _("Everything broke"),
},
) |
test unpin all general forum topic messages | from typing import Optional
from pytest import mark, param
from aiogram.enums import ChatAction
from aiogram.types import BufferedInputFile, Chat, ChatPermissions
class TestChat:
def test_ban_sender_chat(self):
chat = Chat(id=-42, type="supergroup")
method = chat.ban_sender_chat(sender_chat_id=-1337)
assert method.chat_id == chat.id
assert method.sender_chat_id == -1337
def test_unban_sender_chat(self):
chat = Chat(id=-42, type="supergroup")
method = chat.unban_sender_chat(sender_chat_id=-1337)
assert method.chat_id == chat.id
assert method.sender_chat_id == -1337
def test_get_administrators(self):
chat = Chat(id=-42, type="supergroup")
method = chat.get_administrators()
assert method.chat_id == chat.id
def test_delete_message(self):
chat = Chat(id=-42, type="supergroup")
method = chat.delete_message(message_id=1)
assert method.chat_id == chat.id
def test_revoke_invite_link(self):
chat = Chat(id=-42, type="supergroup")
method = chat.revoke_invite_link(invite_link="test")
assert method.chat_id == chat.id
def test_edit_invite_link(self):
chat = Chat(id=-42, type="supergroup")
method = chat.edit_invite_link(invite_link="test", name="test")
assert method.chat_id == chat.id
def test_create_invite_link(self):
chat = Chat(id=-42, type="supergroup")
method = chat.create_invite_link(name="test")
assert method.chat_id == chat.id
def test_export_invite_link(self):
chat = Chat(id=-42, type="supergroup")
method = chat.export_invite_link()
assert method.chat_id == chat.id
def test_do(self):
chat = Chat(id=-42, type="supergroup")
method = chat.do(ChatAction.TYPING)
assert method.chat_id == chat.id
def test_delete_sticker_set(self):
chat = Chat(id=-42, type="supergroup")
method = chat.delete_sticker_set()
assert method.chat_id == chat.id
def test_set_sticker_set(self):
chat = Chat(id=-42, type="supergroup")
method = chat.set_sticker_set(sticker_set_name="test")
assert method.chat_id == chat.id
def test_get_member(self):
chat = Chat(id=-42, type="supergroup")
method = chat.get_member(user_id=42)
assert method.chat_id == chat.id
def test_get_member_count(self):
chat = Chat(id=-42, type="supergroup")
method = chat.get_member_count()
assert method.chat_id == chat.id
def test_leave(self):
chat = Chat(id=-42, type="supergroup")
method = chat.leave()
assert method.chat_id == chat.id
def test_unpin_all_messages(self):
chat = Chat(id=-42, type="supergroup")
method = chat.unpin_all_messages()
assert method.chat_id == chat.id
def test_unpin_message(self):
chat = Chat(id=-42, type="supergroup")
method = chat.unpin_message()
assert method.chat_id == chat.id
def test_pin_message(self):
chat = Chat(id=-42, type="supergroup")
method = chat.pin_message(message_id=1)
assert method.chat_id == chat.id
def test_set_administrator_custom_title(self):
chat = Chat(id=-42, type="supergroup")
method = chat.set_administrator_custom_title(user_id=1, custom_title="test")
assert method.chat_id == chat.id
def test_set_permissions(self):
chat = Chat(id=-42, type="supergroup")
method = chat.set_permissions(
permissions=ChatPermissions(
can_send_messages=True,
)
)
assert method.chat_id == chat.id
def test_promote(self):
chat = Chat(id=-42, type="supergroup")
method = chat.promote(
user_id=42,
can_manage_chat=True,
)
assert method.chat_id == chat.id
def test_restrict(self):
chat = Chat(id=-42, type="supergroup")
method = chat.restrict(
user_id=42,
permissions=ChatPermissions(
can_send_messages=True,
),
)
assert method.chat_id == chat.id
def test_unban(self):
chat = Chat(id=-42, type="supergroup")
method = chat.unban(
user_id=42,
)
assert method.chat_id == chat.id
def test_ban(self):
chat = Chat(id=-42, type="supergroup")
method = chat.ban(
user_id=42,
)
assert method.chat_id == chat.id
def test_set_description(self):
chat = Chat(id=-42, type="supergroup")
method = chat.set_description(description="test")
assert method.chat_id == chat.id
def test_set_title(self):
chat = Chat(id=-42, type="supergroup")
method = chat.set_title(title="test")
assert method.chat_id == chat.id
def test_delete_photo(self):
chat = Chat(id=-42, type="supergroup")
method = chat.delete_photo(description="test")
assert method.chat_id == chat.id
def test_set_photo(self):
chat = Chat(id=-42, type="supergroup")
method = chat.set_photo(photo=BufferedInputFile(b"PNG", filename="photo.png"))
assert method.chat_id == chat.id
@mark.parametrize(
"first,last,title,chat_type,result",
[
param("First", None, None, "private", "First", id="private_first_only"),
param("First", "Last", None, "private", "First Last", id="private_with_last"),
param(None, None, "Title", "group", "Title", id="group_with_title"),
param(None, None, "Title", "supergroup", "Title", id="supergroup_with_title"),
param(None, None, "Title", "channel", "Title", id="channel_with_title"),
],
)
def test_full_name(
self,
first: Optional[str],
last: Optional[str],
title: Optional[str],
chat_type: str,
result: str,
):
chat = Chat(id=42, first_name=first, last_name=last, title=title, type=chat_type)
assert chat.full_name == result
def METHOD_NAME(self):
chat = Chat(id=-42, type="supergroup")
method = chat.unpin_all_general_forum_topic_messages()
assert method.chat_id == chat.id |
test get order | """
Oanda interface custom unit tests.
Copyright (C) 2021 Emerson Dove
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import time
from pathlib import Path
import pytest
import blankly
from blankly.exchanges.interfaces.oanda.oanda_interface import OandaInterface
from tests.helpers.comparisons import validate_response
from tests.testing_utils import forex_market_open
@pytest.fixture
def oanda_interface():
keys_file_path = Path("tests/config/keys.json").resolve()
settings_file_path = Path("tests/config/settings.json").resolve()
oanda = blankly.Oanda(keys_path=keys_file_path,
settings_path=settings_file_path,
portfolio_name='oanda test portfolio')
# auth_obj = OandaAuth(str(keys_file_path), "oanda test portfolio")
# _, oanda_interface = DirectCallsFactory.create("oanda", auth_obj, str(settings_file_path))
return oanda.interface
def test_get_exchange(oanda_interface: OandaInterface) -> None:
assert oanda_interface.get_exchange_type() == 'oanda'
def test_get_price(oanda_interface: OandaInterface) -> None:
assert isinstance(oanda_interface.get_price("EUR-USD"), float)
assert isinstance(oanda_interface.get_price("ZAR-JPY"), float)
def test_get_cash(oanda_interface: OandaInterface) -> None:
assert isinstance(oanda_interface.cash, float)
def test_marketorder_comprehensive(oanda_interface: OandaInterface) -> None:
# query for the unique ID of EUR_USD
if not forex_market_open():
return
products = oanda_interface.get_products()
found_eur_usd = False
for product in products:
if product['symbol'] == 'EUR-USD':
found_eur_usd = True
assert found_eur_usd
eur_usd = 'EUR-USD'
market_buy_order = oanda_interface.market_order(eur_usd, 'buy', 200)
time.sleep(2)
resp = oanda_interface.get_order(eur_usd, market_buy_order.get_id())
# verify resp is correct
validate_response(oanda_interface.needed['market_order'], resp)
# Todo: validate market sell order object can query its info correctly
oanda_interface.market_order(eur_usd, 'sell', 200)
time.sleep(2)
resp = oanda_interface.get_order(eur_usd, market_buy_order.get_id())
# verify resp is correct
validate_response(oanda_interface.needed['market_order'], resp)
def test_get_products(oanda_interface: OandaInterface) -> None:
products = oanda_interface.get_products()
for product in products:
validate_response(oanda_interface.needed['get_products'], product)
def test_get_account(oanda_interface: OandaInterface) -> None:
account = oanda_interface.get_account()
usd_found = False
for key, val in account.items():
validate_response(oanda_interface.needed['get_account'], val)
if key == "USD":
usd_found = True
assert usd_found
account = oanda_interface.get_account('USD')
assert 'available' in account
assert 'hold' in account
assert isinstance(account['available'], float)
assert isinstance(account['hold'], float)
def test_limitorder_comprehensive(oanda_interface: OandaInterface) -> None:
eur_usd = 'EUR-USD'
limit_buy_order = oanda_interface.limit_order(eur_usd, 'buy', 1, 5)
time.sleep(1)
resp = oanda_interface.get_order(eur_usd, limit_buy_order.get_id())
validate_response(oanda_interface.needed['limit_order'], resp)
# now cancel the order
resp = oanda_interface.cancel_order(eur_usd, limit_buy_order.get_id())
validate_response(oanda_interface.needed['cancel_order'], resp)
def METHOD_NAME(oanda_interface: OandaInterface) -> None:
locally_created_orders = []
for i in range(5):
locally_created_orders.append(oanda_interface.limit_order("EUR-USD", 'buy', 1, 5))
time.sleep(0.1)
orders = oanda_interface.get_open_orders()
for order in orders:
needed = oanda_interface.choose_order_specificity(order['type'])
validate_response(needed, order)
orders = oanda_interface.get_open_orders("EUR-USD")
for order in orders:
needed = oanda_interface.choose_order_specificity(order['type'])
validate_response(needed, order)
for order in locally_created_orders:
resp = oanda_interface.cancel_order("EUR-USD", order.get_id())
validate_response(oanda_interface.needed['cancel_order'], resp)
def test_get_filters(oanda_interface: OandaInterface) -> None:
products = oanda_interface.get_products()
for product in products:
resp = oanda_interface.get_order_filter(product['symbol'])
validate_response(oanda_interface.needed['get_order_filter'], resp)
def test_get_product_history(oanda_interface: OandaInterface) -> None:
pass
def test_history(oanda_interface: OandaInterface) -> None:
pass
#
# def test_get_all_open_orders(oanda_interface: OandaInterface) -> None:
# start = dateparser.parse("2021-02-04 9:30AM EST").timestamp()
# end = dateparser.parse("2021-02-04 9:35AM EST").timestamp()
#
# # bars = oanda_interface.history("EUR_USD", to=5, resolution=60)
# # bars = oanda_interface.history("EUR_USD", to=5, resolution=60, end_date=end)
# bars = oanda_interface.history("EUR_USD", to=5, resolution=60*3, end_date=end)
# assert False
# |
direction | """Metric model class."""
from __future__ import annotations
from collections.abc import Callable, Sequence
from datetime import date
from typing import TYPE_CHECKING, cast
from shared.utils.type import (
Direction,
MetricId,
Scale,
SourceId,
Status,
SubjectId,
TargetType,
)
from .source import Source
if TYPE_CHECKING:
from .measurement import Measurement
class Metric(dict):
"""Class representing a metric."""
def __init__(
self,
data_model: dict,
metric_data: dict,
metric_uuid: MetricId,
subject_uuid: SubjectId | None = None,
) -> None:
self.__data_model = data_model
self.uuid = metric_uuid
self.subject_uuid = subject_uuid or cast(SubjectId, "")
source_data = metric_data.get("sources", {})
metric_data["sources"] = {
source_uuid: Source(source_uuid, self, **source_dict) for source_uuid, source_dict in source_data.items()
}
super().__init__(metric_data)
self.sources = list(self.sources_dict.values())
self.source_uuids = list(self.sources_dict.keys())
def __eq__(self, other: object) -> bool:
"""Return whether the metrics are equal."""
return self.uuid == other.uuid if isinstance(other, self.__class__) else False # pragma: no feature-test-cover
def type(self) -> str | None: # noqa: A003
"""Return the type of the metric."""
return str(self["type"]) if "type" in self else None
@property
def sources_dict(self) -> dict[SourceId, Source]:
"""Return the dict with source_uuid as keys and source instances as values."""
return self.get("sources", {})
@property
def name(self) -> str | None:
"""Either a custom name or one from the metric type in the data model."""
if name := self.get("name"):
return str(name)
default_name = self.__data_model["metrics"].get(self.type(), {}).get("name")
return str(default_name) if default_name else None
@property
def unit(self) -> str:
"""Either a custom unit or one from the metric type in the data model."""
return cast(str, self.get("unit") or self.__data_model["metrics"].get(self.type(), {}).get("unit"))
def evaluate_targets(self) -> bool:
"""Return whether the metric is to evaluate its targets. If not, it is considered to be informative."""
return self.get("evaluate_targets", True)
def status(self, last_measurement: Measurement | None) -> Status | None:
"""Determine the metric status."""
if last_measurement and (status := last_measurement.status()):
return status
return "debt_target_met" if self.accept_debt() and not self.debt_end_date_passed() else None
def issue_statuses(self, last_measurement: Measurement | None) -> list[dict]:
"""Return the metric's issue statuses."""
last_issue_statuses = last_measurement.get("issue_status", []) if last_measurement else []
return [status for status in last_issue_statuses if status["issue_id"] in self.issue_ids()]
def issue_ids(self) -> list[str]:
"""Return the ids of this metric's issues."""
return self.get("issue_ids", [])
def addition(self) -> Callable:
"""Return the addition operator of the metric: sum, min, or max."""
addition = self.get("addition") or self.__data_model["metrics"][self.type()]["addition"]
# The cast works around https://github.com/python/mypy/issues/10740
return cast(Callable, {"max": max, "min": min, "sum": sum}[addition])
def METHOD_NAME(self) -> Direction:
"""Return the direction of the metric: < or >."""
return cast(
Direction,
self.get("direction") or self.__data_model["metrics"][self.type()]["direction"],
)
def scale(self) -> Scale:
"""Return the current metric scale."""
return cast(
Scale,
self.get("scale") or self.__data_model["metrics"][self.type()].get("default_scale", "count"),
)
def scales(self) -> Sequence[Scale]:
"""Return the scales supported by the metric."""
scales = self.__data_model.get("metrics", {}).get(self.type(), {}).get("scales", [])
return cast(Sequence[Scale], scales)
def accept_debt(self) -> bool:
"""Return whether the metric has its technical debt accepted."""
return bool(self.get("accept_debt", False))
def debt_end_date(self) -> str:
"""Return the end date of the accepted technical debt."""
return str(self.get("debt_end_date") or date.max.isoformat())
def debt_end_date_passed(self) -> bool:
"""Return whether the end date of the accepted technical debt has passed."""
# The debt end date is a date in ISO-format without timezone information; suppress the Ruff error
return date.today().isoformat() > self.debt_end_date() # noqa: DTZ011
def get_target(self, target_type: TargetType) -> str | None:
"""Return the target."""
target = self.get(target_type)
return str(target) if target else None
def get_measured_attribute(self, source: Source) -> tuple[str | None, str]:
"""Return the attribute of the source entities that is used to measure the value, and its type.
For example, when using Jira as source for user story points, the points of user stories (the source entities)
are summed to arrive at the total number of user story points.
"""
source_type = self.sources_dict[source["source_uuid"]]["type"]
entity_type = self.__data_model["sources"][source_type]["entities"].get(self.type(), {})
attribute = entity_type.get("measured_attribute")
measured_attribute = None if attribute is None else str(attribute)
attribute_type = self._get_measured_attribute_type(entity_type, measured_attribute)
return measured_attribute, attribute_type
@staticmethod
def _get_measured_attribute_type(entity: dict[str, list[dict[str, str]]], attribute_key: str | None) -> str:
"""Look up the type of an entity attribute."""
attribute = {attr["key"]: attr for attr in entity.get("attributes", [])}.get(str(attribute_key), {})
return str(attribute.get("type", "text"))
def summarize(self, measurements: list[Measurement], **kwargs) -> dict:
"""Add a summary of the metric to the report."""
latest_measurement = measurements[-1] if measurements else None
summary = dict(self)
summary["scale"] = self.scale()
summary["status"] = self.status(latest_measurement)
summary["status_start"] = latest_measurement.status_start() if latest_measurement else None
summary["latest_measurement"] = latest_measurement
summary["recent_measurements"] = [measurement.summarize() for measurement in measurements]
if latest_measurement:
summary["issue_status"] = self.issue_statuses(latest_measurement)
summary.update(kwargs)
return summary |
circle size args | import pytest
import os
from simba.plotting.directing_animals_visualizer import DirectingOtherAnimalsVisualizer
from simba.plotting.directing_animals_visualizer_mp import DirectingOtherAnimalsVisualizerMultiprocess
IN_GITHUB_ACTIONS = os.getenv("GITHUB_ACTIONS") == "true"
class TestDirectingAnimalsVisualizer(object):
@pytest.fixture(params=['tests/data/test_projects/two_c57/project_folder/csv/outlier_corrected_movement_location/Together_1.csv'])
def data_path_args(self, request):
return request
@pytest.fixture(params=['tests/data/test_projects/two_c57/project_folder/project_config.ini'])
def config_path_args(self, request):
return request
@pytest.fixture(params=[True, False])
def show_pose_args(self, request):
return request
@pytest.fixture(params=[3])
def METHOD_NAME(self, request):
return request
@pytest.fixture(params=[5])
def core_cnt_args(self, request):
return request
@pytest.fixture(params=['Random', 'Orange'])
def direction_color_args(self, request):
return request
@pytest.fixture(params=[4])
def direction_thickness_args(self, request):
return request
@pytest.fixture(params=[True, False])
def highlight_endpoints_args(self, request):
return request
@pytest.fixture(params=[True, False])
def polyfill_args(self, request):
return request
def test_directing_animal_visualizer_single_core(self,
data_path_args,
config_path_args,
show_pose_args,
METHOD_NAME,
direction_color_args,
direction_thickness_args,
highlight_endpoints_args,
polyfill_args):
style_attr = {'Show_pose': show_pose_args.param,
'Pose_circle_size': METHOD_NAME.param,
"Direction_color": direction_color_args.param,
'Direction_thickness': direction_thickness_args.param,
'Highlight_endpoints': highlight_endpoints_args.param,
'Polyfill': polyfill_args.param}
single_core_visualizer = DirectingOtherAnimalsVisualizer(config_path=config_path_args.param,
style_attr=style_attr,
data_path=data_path_args.param)
single_core_visualizer.run()
@pytest.mark.skipif(IN_GITHUB_ACTIONS, reason="LONG RUNNING TIME.")
def test_directing_animal_visualizer_multi_core(self,
data_path_args,
core_cnt_args,
config_path_args,
show_pose_args,
METHOD_NAME,
direction_color_args,
direction_thickness_args,
highlight_endpoints_args,
polyfill_args):
style_attr = {'Show_pose': show_pose_args.param,
'Pose_circle_size': METHOD_NAME.param,
"Direction_color": direction_color_args.param,
'Direction_thickness': direction_thickness_args.param,
'Highlight_endpoints': highlight_endpoints_args.param,
'Polyfill': polyfill_args.param}
multi_core_visualizer = DirectingOtherAnimalsVisualizerMultiprocess(config_path=config_path_args.param,
style_attr=style_attr,
data_path=data_path_args.param,
core_cnt=core_cnt_args.param)
multi_core_visualizer.run() |
symbolic | # Copyright (c) OpenMMLab. All rights reserved.
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
from ..utils import ext_loader
ext_module = ext_loader.load_ext(
'_ext', ['deform_roi_pool_forward', 'deform_roi_pool_backward'])
class DeformRoIPoolFunction(Function):
@staticmethod
def METHOD_NAME(g, input, rois, offset, output_size, spatial_scale,
sampling_ratio, gamma):
return g.op(
'mmcv::MMCVDeformRoIPool',
input,
rois,
offset,
pooled_height_i=output_size[0],
pooled_width_i=output_size[1],
spatial_scale_f=spatial_scale,
sampling_ratio_f=sampling_ratio,
gamma_f=gamma)
@staticmethod
def forward(ctx,
input,
rois,
offset,
output_size,
spatial_scale=1.0,
sampling_ratio=0,
gamma=0.1):
if offset is None:
offset = input.new_zeros(0)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = float(spatial_scale)
ctx.sampling_ratio = int(sampling_ratio)
ctx.gamma = float(gamma)
assert rois.size(1) == 5, 'RoI must be (idx, x1, y1, x2, y2)!'
output_shape = (rois.size(0), input.size(1), ctx.output_size[0],
ctx.output_size[1])
output = input.new_zeros(output_shape)
ext_module.deform_roi_pool_forward(
input,
rois,
offset,
output,
pooled_height=ctx.output_size[0],
pooled_width=ctx.output_size[1],
spatial_scale=ctx.spatial_scale,
sampling_ratio=ctx.sampling_ratio,
gamma=ctx.gamma)
ctx.save_for_backward(input, rois, offset)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
input, rois, offset = ctx.saved_tensors
grad_input = grad_output.new_zeros(input.shape)
grad_offset = grad_output.new_zeros(offset.shape)
ext_module.deform_roi_pool_backward(
grad_output,
input,
rois,
offset,
grad_input,
grad_offset,
pooled_height=ctx.output_size[0],
pooled_width=ctx.output_size[1],
spatial_scale=ctx.spatial_scale,
sampling_ratio=ctx.sampling_ratio,
gamma=ctx.gamma)
if grad_offset.numel() == 0:
grad_offset = None
return grad_input, None, grad_offset, None, None, None, None
deform_roi_pool = DeformRoIPoolFunction.apply
class DeformRoIPool(nn.Module):
def __init__(self,
output_size,
spatial_scale=1.0,
sampling_ratio=0,
gamma=0.1):
super(DeformRoIPool, self).__init__()
self.output_size = _pair(output_size)
self.spatial_scale = float(spatial_scale)
self.sampling_ratio = int(sampling_ratio)
self.gamma = float(gamma)
def forward(self, input, rois, offset=None):
return deform_roi_pool(input, rois, offset, self.output_size,
self.spatial_scale, self.sampling_ratio,
self.gamma)
class DeformRoIPoolPack(DeformRoIPool):
def __init__(self,
output_size,
output_channels,
deform_fc_channels=1024,
spatial_scale=1.0,
sampling_ratio=0,
gamma=0.1):
super(DeformRoIPoolPack, self).__init__(output_size, spatial_scale,
sampling_ratio, gamma)
self.output_channels = output_channels
self.deform_fc_channels = deform_fc_channels
self.offset_fc = nn.Sequential(
nn.Linear(
self.output_size[0] * self.output_size[1] *
self.output_channels, self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels,
self.output_size[0] * self.output_size[1] * 2))
self.offset_fc[-1].weight.data.zero_()
self.offset_fc[-1].bias.data.zero_()
def forward(self, input, rois):
assert input.size(1) == self.output_channels
x = deform_roi_pool(input, rois, None, self.output_size,
self.spatial_scale, self.sampling_ratio,
self.gamma)
rois_num = rois.size(0)
offset = self.offset_fc(x.view(rois_num, -1))
offset = offset.view(rois_num, 2, self.output_size[0],
self.output_size[1])
return deform_roi_pool(input, rois, offset, self.output_size,
self.spatial_scale, self.sampling_ratio,
self.gamma)
class ModulatedDeformRoIPoolPack(DeformRoIPool):
def __init__(self,
output_size,
output_channels,
deform_fc_channels=1024,
spatial_scale=1.0,
sampling_ratio=0,
gamma=0.1):
super(ModulatedDeformRoIPoolPack,
self).__init__(output_size, spatial_scale, sampling_ratio, gamma)
self.output_channels = output_channels
self.deform_fc_channels = deform_fc_channels
self.offset_fc = nn.Sequential(
nn.Linear(
self.output_size[0] * self.output_size[1] *
self.output_channels, self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels, self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels,
self.output_size[0] * self.output_size[1] * 2))
self.offset_fc[-1].weight.data.zero_()
self.offset_fc[-1].bias.data.zero_()
self.mask_fc = nn.Sequential(
nn.Linear(
self.output_size[0] * self.output_size[1] *
self.output_channels, self.deform_fc_channels),
nn.ReLU(inplace=True),
nn.Linear(self.deform_fc_channels,
self.output_size[0] * self.output_size[1] * 1),
nn.Sigmoid())
self.mask_fc[2].weight.data.zero_()
self.mask_fc[2].bias.data.zero_()
def forward(self, input, rois):
assert input.size(1) == self.output_channels
x = deform_roi_pool(input, rois, None, self.output_size,
self.spatial_scale, self.sampling_ratio,
self.gamma)
rois_num = rois.size(0)
offset = self.offset_fc(x.view(rois_num, -1))
offset = offset.view(rois_num, 2, self.output_size[0],
self.output_size[1])
mask = self.mask_fc(x.view(rois_num, -1))
mask = mask.view(rois_num, 1, self.output_size[0], self.output_size[1])
d = deform_roi_pool(input, rois, offset, self.output_size,
self.spatial_scale, self.sampling_ratio,
self.gamma)
return d * mask |
test issues request params | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from unittest.mock import MagicMock
import pendulum as pdm
import pytest
from source_sentry.streams import Events, Issues, ProjectDetail, Projects, SentryStreamPagination
INIT_ARGS = {"hostname": "sentry.io", "organization": "test-org", "project": "test-project"}
@pytest.fixture
def patch_base_class(mocker):
# Mock abstract methods to enable instantiating abstract class
mocker.patch.object(SentryStreamPagination, "path", "test_endpoint")
mocker.patch.object(SentryStreamPagination, "__abstractmethods__", set())
def test_next_page_token(patch_base_class):
stream = SentryStreamPagination(hostname="sentry.io")
resp = MagicMock()
cursor = "next_page_num"
resp.links = {"next": {"results": "true", "cursor": cursor}}
inputs = {"response": resp}
expected_token = {"cursor": cursor}
assert stream.next_page_token(**inputs) == expected_token
def test_next_page_token_is_none(patch_base_class):
stream = SentryStreamPagination(hostname="sentry.io")
resp = MagicMock()
resp.links = {"next": {"results": "false", "cursor": "no_next"}}
inputs = {"response": resp}
expected_token = None
assert stream.next_page_token(**inputs) == expected_token
def next_page_token_inputs():
links_headers = [
{},
{"next": {}},
]
responses = [MagicMock() for _ in links_headers]
for mock, header in zip(responses, links_headers):
mock.links = header
return responses
@pytest.mark.parametrize("response", next_page_token_inputs())
def test_next_page_token_raises(patch_base_class, response):
stream = SentryStreamPagination(hostname="sentry.io")
inputs = {"response": response}
with pytest.raises(KeyError):
stream.next_page_token(**inputs)
def test_events_path():
stream = Events(**INIT_ARGS)
expected = "projects/test-org/test-project/events/"
assert stream.path() == expected
def test_issues_path():
stream = Issues(**INIT_ARGS)
expected = "projects/test-org/test-project/issues/"
assert stream.path() == expected
def test_projects_path():
stream = Projects(hostname="sentry.io")
expected = "projects/"
assert stream.path() == expected
def test_project_detail_path():
stream = ProjectDetail(**INIT_ARGS)
expected = "projects/test-org/test-project/"
assert stream.path() == expected
def test_sentry_stream_pagination_request_params(patch_base_class):
stream = SentryStreamPagination(hostname="sentry.io")
expected = {"cursor": "next-page"}
assert stream.request_params(stream_state=None, next_page_token={"cursor": "next-page"}) == expected
def test_events_request_params():
stream = Events(**INIT_ARGS)
expected = {"cursor": "next-page", "full": "true"}
assert stream.request_params(stream_state=None, next_page_token={"cursor": "next-page"}) == expected
def METHOD_NAME():
stream = Issues(**INIT_ARGS)
expected = {"cursor": "next-page", "statsPeriod": "", "query": "lastSeen:>1900-01-01T00:00:00Z"}
assert stream.request_params(stream_state=None, next_page_token={"cursor": "next-page"}) == expected
def test_projects_request_params():
stream = Projects(hostname="sentry.io")
expected = {"cursor": "next-page"}
assert stream.request_params(stream_state=None, next_page_token={"cursor": "next-page"}) == expected
def test_project_detail_request_params():
stream = ProjectDetail(**INIT_ARGS)
expected = {}
assert stream.request_params(stream_state=None, next_page_token=None) == expected
@pytest.mark.parametrize(
"state, expected",
[
({}, "1900-01-01T00:00:00.0Z"),
({"dateCreated": ""}, "1900-01-01T00:00:00.0Z"),
({"dateCreated": "None"}, "1900-01-01T00:00:00.0Z"),
({"dateCreated": "2023-01-01T00:00:00.0Z"}, "2023-01-01T00:00:00.0Z"),
],
ids=[
"No State",
"State is Empty String",
"State is 'None'",
"State is present",
],
)
def test_validate_state_value(state, expected):
stream = Events(**INIT_ARGS)
state_value = state.get(stream.cursor_field)
assert stream.validate_state_value(state_value) == expected
@pytest.mark.parametrize(
"state, expected",
[
({}, "1900-01-01T00:00:00.0Z"),
({"dateCreated": ""}, "1900-01-01T00:00:00.0Z"),
({"dateCreated": "None"}, "1900-01-01T00:00:00.0Z"),
({"dateCreated": "2023-01-01T00:00:00.0Z"}, "2023-01-01T00:00:00.0Z"),
],
ids=[
"No State",
"State is Empty String",
"State is 'None'",
"State is present",
],
)
def test_get_state_value(state, expected):
stream = Events(**INIT_ARGS)
# we expect the datetime object out of get_state_value method.
assert stream.get_state_value(state) == pdm.parse(expected) |
write | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
import selectors
import socket
import ssl
import struct
import threading
import time
import aioquic.quic.configuration # type: ignore
import aioquic.quic.connection # type: ignore
import aioquic.quic.events # type: ignore
import dns.exception
import dns.inet
from dns.quic._common import (
QUIC_MAX_DATAGRAM,
BaseQuicConnection,
BaseQuicManager,
BaseQuicStream,
UnexpectedEOF,
)
# Avoid circularity with dns.query
if hasattr(selectors, "PollSelector"):
_selector_class = selectors.PollSelector # type: ignore
else:
_selector_class = selectors.SelectSelector # type: ignore
class SyncQuicStream(BaseQuicStream):
def __init__(self, connection, stream_id):
super().__init__(connection, stream_id)
self._wake_up = threading.Condition()
self._lock = threading.Lock()
def wait_for(self, amount, expiration):
while True:
timeout = self._timeout_from_expiration(expiration)
with self._lock:
if self._buffer.have(amount):
return
self._expecting = amount
with self._wake_up:
if not self._wake_up.wait(timeout):
raise dns.exception.Timeout
self._expecting = 0
def receive(self, timeout=None):
expiration = self._expiration_from_timeout(timeout)
self.wait_for(2, expiration)
with self._lock:
(size,) = struct.unpack("!H", self._buffer.get(2))
self.wait_for(size, expiration)
with self._lock:
return self._buffer.get(size)
def send(self, datagram, is_end=False):
data = self._encapsulate(datagram)
self._connection.METHOD_NAME(self._stream_id, data, is_end)
def _add_input(self, data, is_end):
if self._common_add_input(data, is_end):
with self._wake_up:
self._wake_up.notify()
def close(self):
with self._lock:
self._close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
with self._wake_up:
self._wake_up.notify()
return False
class SyncQuicConnection(BaseQuicConnection):
def __init__(self, connection, address, port, source, source_port, manager):
super().__init__(connection, address, port, source, source_port, manager)
self._socket = socket.socket(self._af, socket.SOCK_DGRAM, 0)
self._socket.connect(self._peer)
(self._send_wakeup, self._receive_wakeup) = socket.socketpair()
self._receive_wakeup.setblocking(False)
self._socket.setblocking(False)
if self._source is not None:
try:
self._socket.bind(
dns.inet.low_level_address_tuple(self._source, self._af)
)
except Exception:
self._socket.close()
raise
self._handshake_complete = threading.Event()
self._worker_thread = None
self._lock = threading.Lock()
def _read(self):
count = 0
while count < 10:
count += 1
try:
datagram = self._socket.recv(QUIC_MAX_DATAGRAM)
except BlockingIOError:
return
with self._lock:
self._connection.receive_datagram(datagram, self._peer[0], time.time())
def _drain_wakeup(self):
while True:
try:
self._receive_wakeup.recv(32)
except BlockingIOError:
return
def _worker(self):
try:
sel = _selector_class()
sel.register(self._socket, selectors.EVENT_READ, self._read)
sel.register(self._receive_wakeup, selectors.EVENT_READ, self._drain_wakeup)
while not self._done:
(expiration, interval) = self._get_timer_values(False)
items = sel.select(interval)
for key, _ in items:
key.data()
with self._lock:
self._handle_timer(expiration)
datagrams = self._connection.datagrams_to_send(time.time())
for datagram, _ in datagrams:
try:
self._socket.send(datagram)
except BlockingIOError:
# we let QUIC handle any lossage
pass
self._handle_events()
finally:
with self._lock:
self._done = True
# Ensure anyone waiting for this gets woken up.
self._handshake_complete.set()
def _handle_events(self):
while True:
with self._lock:
event = self._connection.next_event()
if event is None:
return
if isinstance(event, aioquic.quic.events.StreamDataReceived):
with self._lock:
stream = self._streams.get(event.stream_id)
if stream:
stream._add_input(event.data, event.end_stream)
elif isinstance(event, aioquic.quic.events.HandshakeCompleted):
self._handshake_complete.set()
elif isinstance(
event, aioquic.quic.events.ConnectionTerminated
) or isinstance(event, aioquic.quic.events.StreamReset):
with self._lock:
self._done = True
def METHOD_NAME(self, stream, data, is_end=False):
with self._lock:
self._connection.send_stream_data(stream, data, is_end)
self._send_wakeup.send(b"\x01")
def run(self):
if self._closed:
return
self._worker_thread = threading.Thread(target=self._worker)
self._worker_thread.start()
def make_stream(self, timeout=None):
if not self._handshake_complete.wait(timeout):
raise dns.exception.Timeout
with self._lock:
if self._done:
raise UnexpectedEOF
stream_id = self._connection.get_next_available_stream_id(False)
stream = SyncQuicStream(self, stream_id)
self._streams[stream_id] = stream
return stream
def close_stream(self, stream_id):
with self._lock:
super().close_stream(stream_id)
def close(self):
with self._lock:
if self._closed:
return
self._manager.closed(self._peer[0], self._peer[1])
self._closed = True
self._connection.close()
self._send_wakeup.send(b"\x01")
self._worker_thread.join()
class SyncQuicManager(BaseQuicManager):
def __init__(self, conf=None, verify_mode=ssl.CERT_REQUIRED, server_name=None):
super().__init__(conf, verify_mode, SyncQuicConnection, server_name)
self._lock = threading.Lock()
def connect(self, address, port=853, source=None, source_port=0):
with self._lock:
(connection, start) = self._connect(address, port, source, source_port)
if start:
connection.run()
return connection
def closed(self, address, port):
with self._lock:
super().closed(address, port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Copy the iterator into a list as exiting things will mutate the connections
# table.
connections = list(self._connections.values())
for connection in connections:
connection.close()
return False |
test 9 | #################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
"""
This module contains tests for the variable replace transformation.
"""
import pytest
import pyomo.environ as pyo
import idaes.core.plugins
__author__ = "John Eslick"
@pytest.mark.unit
def test_1():
# Test scalar variables
rp = pyo.TransformationFactory("replace_variables")
m = pyo.ConcreteModel()
m.x = pyo.Var(initialize=2)
m.y = pyo.Var(initialize=3)
m.z = pyo.Var(initialize=0)
m.c1 = pyo.Constraint(expr=m.z == m.x + m.y)
m.e1 = pyo.Expression(expr=m.x**m.y)
m.o1 = pyo.Objective(expr=m.y - m.x)
assert m.c1.body() == -5 # hope constraint arrangement is deterministic
assert pyo.value(m.e1) == 8
assert pyo.value(m.o1) == 1
rp.apply_to(m, substitute=[(m.y, m.x)])
assert m.c1.body() == -4
assert pyo.value(m.e1) == 4
assert pyo.value(m.o1) == 0
@pytest.mark.unit
def test_2():
# Test vector variables and sums
rp = pyo.TransformationFactory("replace_variables")
m = pyo.ConcreteModel()
m.x = pyo.Var(["a", "b", "c"], initialize=2)
m.y = pyo.Var(initialize=3)
m.z = pyo.Var(initialize=0)
m.c1 = pyo.Constraint(expr=m.z == m.x["a"] + m.x["b"] + m.x["c"])
m.e1 = pyo.Expression(expr=sum(m.x[i] for i in m.x))
assert m.c1.body() == -6 # hope constraint arrangement is deterministic
assert pyo.value(m.e1) == 6
rp.apply_to(m, substitute=[(m.x["c"], m.y)])
assert m.c1.body() == -7
assert pyo.value(m.e1) == 7
@pytest.mark.unit
def test_3():
# Test expression in constraint
rp = pyo.TransformationFactory("replace_variables")
m = pyo.ConcreteModel()
m.x = pyo.Var(["a", "b", "c"], initialize=2)
m.y = pyo.Var(initialize=3)
m.z = pyo.Var(initialize=0)
m.e1 = pyo.Expression(expr=sum(m.x[i] for i in m.x))
m.c1 = pyo.Constraint(expr=m.z == m.e1)
assert m.c1.body() == -6 # hope constraint arrangement is deterministic
rp.apply_to(m, substitute=[(m.x["c"], m.y)])
assert m.c1.body() == -7
@pytest.mark.unit
def test_4():
# Test expression in objective
rp = pyo.TransformationFactory("replace_variables")
m = pyo.ConcreteModel()
m.x = pyo.Var(["a", "b", "c"], initialize=2)
m.y = pyo.Var(initialize=3)
m.z = pyo.Var(initialize=0)
m.e1 = pyo.Expression(expr=sum(m.x[i] for i in m.x))
m.c1 = pyo.Constraint(expr=m.z == m.e1)
m.o1 = pyo.Objective(expr=m.e1)
assert pyo.value(m.o1) == 6
rp.apply_to(m, substitute=[(m.x["c"], m.y)])
assert pyo.value(m.o1) == 7
@pytest.mark.unit
def test_4():
# Test in a hierarchical model
rp = pyo.TransformationFactory("replace_variables")
m = pyo.ConcreteModel()
m.b1 = pyo.Block()
m.b1.b2 = pyo.Block()
x = m.b1.b2.x = pyo.Var(["a", "b", "c"], initialize=2)
m.y = pyo.Var(initialize=3)
m.z = pyo.Var(initialize=0)
m.e1 = pyo.Expression(expr=sum(x[i] for i in x))
m.b1.c1 = pyo.Constraint(expr=m.z == m.e1)
m.o1 = pyo.Objective(expr=m.e1)
assert pyo.value(m.o1) == 6
assert m.b1.c1.body() == -6
assert pyo.value(m.e1) == 6
rp.apply_to(m, substitute=[(x["c"], m.y)])
assert pyo.value(m.o1) == 7
assert m.b1.c1.body() == -7
assert pyo.value(m.e1) == 7
@pytest.mark.unit
def test_5():
# Test indexed var replace
rp = pyo.TransformationFactory("replace_variables")
m = pyo.ConcreteModel()
m.b1 = pyo.Block()
m.b1.b2 = pyo.Block()
x = m.b1.b2.x = pyo.Var(["a", "b", "c"], [1, 2, 3], initialize=2)
m.y = pyo.Var(["a", "b", "c", "d"], [1, 2, 3], initialize=3)
m.z = pyo.Var(initialize=0)
m.e1 = pyo.Expression(expr=sum(x[i] for i in x))
m.b1.c1 = pyo.Constraint(expr=m.z == m.e1)
m.o1 = pyo.Objective(expr=m.e1)
assert pyo.value(m.o1) == 18
assert m.b1.c1.body() == -18
assert pyo.value(m.e1) == 18
rp.apply_to(m, substitute=[(x, m.y)])
assert pyo.value(m.o1) == 27
assert m.b1.c1.body() == -27
assert pyo.value(m.e1) == 27
@pytest.mark.unit
def test_6():
# Test non-variable exception
rp = pyo.TransformationFactory("replace_variables")
m = pyo.ConcreteModel()
m.b1 = pyo.Block()
m.b1.b2 = pyo.Block()
x = m.b1.b2.x = pyo.Var(["a", "b", "c"], [1, 2, 3], initialize=2)
m.y = pyo.Var(["a", "b", "c", "d"], [1, 2, 3], initialize=3)
m.z = pyo.Var(initialize=0)
with pytest.raises(TypeError):
rp.apply_to(m, substitute=[(x, m.b1)])
with pytest.raises(TypeError):
rp.apply_to(m, substitute=[(m.b1, x)])
@pytest.mark.unit
def test_7():
# Test replace indexed by non-indexed
rp = pyo.TransformationFactory("replace_variables")
m = pyo.ConcreteModel()
m.b1 = pyo.Block()
m.b1.b2 = pyo.Block()
x = m.b1.b2.x = pyo.Var(["a", "b", "c"], [1, 2, 3], initialize=2)
m.y = pyo.Var(["a", "b", "c", "d"], [1, 2, 3], initialize=3)
m.z = pyo.Var(initialize=0)
assert x.is_indexed()
assert not m.z.is_indexed()
with pytest.raises(TypeError):
rp.apply_to(m, substitute=[(x, m.z)])
@pytest.mark.unit
def test_8():
# Test replace indexed by indexed var that doesn't have enough/right indexes
rp = pyo.TransformationFactory("replace_variables")
m = pyo.ConcreteModel()
m.b1 = pyo.Block()
m.b1.b2 = pyo.Block()
x = m.b1.b2.x = pyo.Var(["a", "b", "c"], [1, 2, 3], initialize=2)
m.y = pyo.Var(["a", "b", "d"], [1, 2, 3], initialize=3)
with pytest.raises(ValueError):
rp.apply_to(m, substitute=[(x, m.y)])
@pytest.mark.unit
def METHOD_NAME():
# test with references the way we handle time indexing a lot in IDAES
rp = pyo.TransformationFactory("replace_variables")
block_set = {1, 2, 3}
m = pyo.ConcreteModel()
m.b1 = pyo.Block(block_set)
for i in block_set:
m.b1[i].x = pyo.Var(initialize=2)
m.y = pyo.Var([1, 2, 3], initialize=3)
m.xx = pyo.Reference(m.b1[:].x)
m.display()
m.e1 = pyo.Expression(expr=sum(m.xx[i] for i in m.xx))
m.e2 = pyo.Expression(expr=sum(m.b1[i].x for i in m.b1))
assert pyo.value(m.e1) == 6
assert pyo.value(m.e2) == 6
rp.apply_to(m, substitute=[(m.xx, m.y)])
assert pyo.value(m.e1) == 9
assert pyo.value(m.e2) == 9
@pytest.mark.unit
def test_10():
# test with more than one variable in the list
rp = pyo.TransformationFactory("replace_variables")
m = pyo.ConcreteModel()
m.x = pyo.Var(["a", "b", "c"], initialize=2)
m.a = pyo.Var(initialize=5)
m.b = pyo.Var(initialize=6)
m.c = pyo.Var(initialize=7)
m.e1 = pyo.Expression(expr=sum(m.x[i] for i in m.x))
assert pyo.value(m.e1) == 6
rp.apply_to(m, substitute=[(m.x["a"], m.a), (m.x["b"], m.b), (m.x["c"], m.c)])
assert pyo.value(m.e1) == 18 |
expected 1 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test function extraction"""
import pytest
import tvm
from tvm import relay
def get_conv_net():
"""This gets the net for:
conv2d
/ |
/ |
conv2d |
\ |
\ |
elemwise add
|
|
|
split
|
|
|
elemwise add
"""
dshape = (1, 1, 5, 1)
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
tuple_out = relay.op.split(z, indices_or_sections=1, axis=0)
tuple_0_add = relay.add(tuple_out[0], relay.const(1, dtype="float32"))
return tvm.IRModule.from_expr(tuple_0_add)
def get_conv2d():
x = relay.var("x", shape=(1, 56, 56, 64))
weight1 = relay.var("weight1", shape=(3, 3, 64, 32))
y = relay.nn.conv2d(
x,
weight1,
channels=32,
kernel_size=(3, 3),
padding=(1, 1),
data_layout="NHWC",
kernel_layout="HWIO",
)
return tvm.IRModule.from_expr(y)
def test_extract():
dshape = (1, 1, 5, 1)
def before():
return get_conv_net()
def expected_0():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
return tvm.IRModule.from_expr(y)
def METHOD_NAME():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
return tvm.IRModule.from_expr(x1)
def expected_2():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
return tvm.IRModule.from_expr(z)
def expected_3():
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
tuple_out = relay.op.split(z, indices_or_sections=1, axis=0)
return tvm.IRModule.from_expr(tuple_out.astuple())
def expected_4():
# check tuple node
x = relay.var("x", shape=dshape)
y = relay.nn.conv2d(x, relay.var("w1"), kernel_size=(3, 3), padding=(1, 1), channels=1)
x1 = relay.nn.conv2d(y, relay.var("w2"), kernel_size=(3, 3), padding=(1, 1), channels=1)
z = relay.add(y, x1)
tuple_out = relay.op.split(z, indices_or_sections=1, axis=0)
return tvm.IRModule.from_expr(tuple_out[0])
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 0), expected_0()
)
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 1), METHOD_NAME()
)
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 2), expected_2()
)
assert tvm.ir.structural_equal(
(relay.analysis.extract_intermdeiate_expr(before(), 3)), expected_3()
)
assert tvm.ir.structural_equal(
relay.analysis.extract_intermdeiate_expr(before(), 4), expected_4()
)
assert tvm.ir.structural_equal(relay.analysis.extract_intermdeiate_expr(before(), 5), before())
if __name__ == "__main__":
tvm.testing.main() |
sortnum | """Unit tests for the keyword only argument specified in PEP 3102."""
__author__ = "Jiwon Seo"
__email__ = "seojiwon at gmail dot com"
import unittest
from test.support import run_unittest
def posonly_sum(pos_arg1, *arg, **kwarg):
return pos_arg1 + sum(arg) + sum(kwarg.values())
def keywordonly_sum(*, k1=0, k2):
return k1 + k2
def keywordonly_nodefaults_sum(*, k1, k2):
return k1 + k2
def keywordonly_and_kwarg_sum(*, k1, k2, **kwarg):
return k1 + k2 + sum(kwarg.values())
def mixedargs_sum(a, b=0, *arg, k1, k2=0):
return a + b + k1 + k2 + sum(arg)
def mixedargs_sum2(a, b=0, *arg, k1, k2=0, **kwargs):
return a + b + k1 + k2 + sum(arg) + sum(kwargs.values())
def METHOD_NAME(*nums, reverse=False):
return sorted(list(nums), reverse=reverse)
def sortwords(*words, reverse=False, **kwargs):
return sorted(list(words), reverse=reverse)
class Foo:
def __init__(self, *, k1, k2=0):
self.k1 = k1
self.k2 = k2
def set(self, p1, *, k1, k2):
self.k1 = k1
self.k2 = k2
def sum(self):
return self.k1 + self.k2
class KeywordOnlyArgTestCase(unittest.TestCase):
def assertRaisesSyntaxError(self, codestr):
def shouldRaiseSyntaxError(s):
compile(s, "<test>", "single")
self.assertRaises(SyntaxError, shouldRaiseSyntaxError, codestr)
def testSyntaxErrorForFunctionDefinition(self):
self.assertRaisesSyntaxError("def f(p, *):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, p1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *k1, k1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, k1, k1=100):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, k1, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p1, *, None, **k1):\n pass\n")
self.assertRaisesSyntaxError("def f(p, *, (k1, k2), **kw):\n pass\n")
def testSyntaxForManyArguments(self):
fundef = "def f("
for i in range(255):
fundef += "i%d, "%i
fundef += "*, key=100):\n pass\n"
self.assertRaisesSyntaxError(fundef)
fundef2 = "def foo(i,*,"
for i in range(255):
fundef2 += "i%d, "%i
fundef2 += "lastarg):\n pass\n"
self.assertRaisesSyntaxError(fundef2)
# exactly 255 arguments, should compile ok
fundef3 = "def f(i,*,"
for i in range(253):
fundef3 += "i%d, "%i
fundef3 += "lastarg):\n pass\n"
compile(fundef3, "<test>", "single")
def testTooManyPositionalErrorMessage(self):
def f(a, b=None, *, c=None):
pass
with self.assertRaises(TypeError) as exc:
f(1, 2, 3)
expected = "f() takes from 1 to 2 positional arguments but 3 were given"
self.assertEqual(str(exc.exception), expected)
def testSyntaxErrorForFunctionCall(self):
self.assertRaisesSyntaxError("f(p, k=1, p2)")
self.assertRaisesSyntaxError("f(p, k1=50, *(1,2), k1=100)")
def testRaiseErrorFuncallWithUnexpectedKeywordArgument(self):
self.assertRaises(TypeError, keywordonly_sum, ())
self.assertRaises(TypeError, keywordonly_nodefaults_sum, ())
self.assertRaises(TypeError, Foo, ())
try:
keywordonly_sum(k2=100, non_existing_arg=200)
self.fail("should raise TypeError")
except TypeError:
pass
try:
keywordonly_nodefaults_sum(k2=2)
self.fail("should raise TypeError")
except TypeError:
pass
def testFunctionCall(self):
self.assertEqual(1, posonly_sum(1))
self.assertEqual(1+2, posonly_sum(1,**{"2":2}))
self.assertEqual(1+2+3, posonly_sum(1,*(2,3)))
self.assertEqual(1+2+3+4, posonly_sum(1,*(2,3),**{"4":4}))
self.assertEqual(1, keywordonly_sum(k2=1))
self.assertEqual(1+2, keywordonly_sum(k1=1, k2=2))
self.assertEqual(1+2, keywordonly_and_kwarg_sum(k1=1, k2=2))
self.assertEqual(1+2+3, keywordonly_and_kwarg_sum(k1=1, k2=2, k3=3))
self.assertEqual(1+2+3+4,
keywordonly_and_kwarg_sum(k1=1, k2=2,
**{"a":3,"b":4}))
self.assertEqual(1+2, mixedargs_sum(1, k1=2))
self.assertEqual(1+2+3, mixedargs_sum(1, 2, k1=3))
self.assertEqual(1+2+3+4, mixedargs_sum(1, 2, k1=3, k2=4))
self.assertEqual(1+2+3+4+5, mixedargs_sum(1, 2, 3, k1=4, k2=5))
self.assertEqual(1+2, mixedargs_sum2(1, k1=2))
self.assertEqual(1+2+3, mixedargs_sum2(1, 2, k1=3))
self.assertEqual(1+2+3+4, mixedargs_sum2(1, 2, k1=3, k2=4))
self.assertEqual(1+2+3+4+5, mixedargs_sum2(1, 2, 3, k1=4, k2=5))
self.assertEqual(1+2+3+4+5+6,
mixedargs_sum2(1, 2, 3, k1=4, k2=5, k3=6))
self.assertEqual(1+2+3+4+5+6,
mixedargs_sum2(1, 2, 3, k1=4, **{'k2':5, 'k3':6}))
self.assertEqual(1, Foo(k1=1).sum())
self.assertEqual(1+2, Foo(k1=1,k2=2).sum())
self.assertEqual([1,2,3], METHOD_NAME(3,2,1))
self.assertEqual([3,2,1], METHOD_NAME(1,2,3, reverse=True))
self.assertEqual(['a','b','c'], sortwords('a','c','b'))
self.assertEqual(['c','b','a'], sortwords('a','c','b', reverse=True))
self.assertEqual(['c','b','a'],
sortwords('a','c','b', reverse=True, ignore='ignore'))
def testKwDefaults(self):
def foo(p1,p2=0, *, k1, k2=0):
return p1 + p2 + k1 + k2
self.assertEqual(2, foo.__code__.co_kwonlyargcount)
self.assertEqual({"k2":0}, foo.__kwdefaults__)
foo.__kwdefaults__ = {"k1":0}
try:
foo(1,k1=10)
self.fail("__kwdefaults__ is not properly changed")
except TypeError:
pass
def test_kwonly_methods(self):
class Example:
def f(self, *, k1=1, k2=2):
return k1, k2
self.assertEqual(Example().f(k1=1, k2=2), (1, 2))
self.assertEqual(Example.f(Example(), k1=1, k2=2), (1, 2))
self.assertRaises(TypeError, Example.f, k1=1, k2=2)
def test_issue13343(self):
# The Python compiler must scan all symbols of a function to
# determine their scope: global, local, cell...
# This was not done for the default values of keyword
# arguments in a lambda definition, and the following line
# used to fail with a SystemError.
lambda *, k1=unittest: None
def test_mangling(self):
class X:
def f(self, *, __a=42):
return __a
self.assertEqual(X().f(), 42)
def test_default_evaluation_order(self):
# See issue 16967
a = 42
with self.assertRaises(NameError) as err:
def f(v=a, x=b, *, y=c, z=d):
pass
self.assertEqual(str(err.exception), "name 'b' is not defined")
with self.assertRaises(NameError) as err:
f = lambda v=a, x=b, *, y=c, z=d: None
self.assertEqual(str(err.exception), "name 'b' is not defined")
def test_main():
run_unittest(KeywordOnlyArgTestCase)
if __name__ == "__main__":
test_main() |
update function | import logging
from io import BytesIO
from zipfile import ZipFile
import requests
from django.conf import settings
from google.cloud.functions_v1.services.cloud_functions_service import CloudFunctionsServiceClient
from google.cloud.functions_v1.types import (
CloudFunction,
EventTrigger,
GenerateUploadUrlRequest,
UpdateFunctionRequest,
)
from google.cloud.functions_v1.types.functions import DeleteFunctionRequest
from google.cloud.pubsub_v1 import PublisherClient
from google.protobuf.field_mask_pb2 import FieldMask
from sentry.utils import json
WRAPPER_JS = """
const userFunc = require('./function.js');
Object.assign(process.env, require('./env.json'));
"""
PACKAGE_JSON = {
"dependencies": {
"@sentry/node": "^6.11.0",
"@sentry/tracing": "^6.11.0",
"node-fetch": "^2.6.1",
}
}
def function_pubsub_name(funcId):
return f"projects/{settings.SENTRY_FUNCTIONS_PROJECT_NAME}/topics/fn-{funcId}"
def project_location_function_name(
include_proj=False, include_loc=False, include_func=False, funcId=None
):
return_value = ""
if include_proj:
return_value += f"projects/{settings.SENTRY_FUNCTIONS_PROJECT_NAME}/"
if include_loc:
return_value += f"locations/{settings.SENTRY_FUNCTIONS_REGION}"
if include_func:
return_value += f"/functions/fn-{funcId}"
return return_value
def create_function_pubsub_topic(funcId):
logger = logging.getLogger("sentry.functions")
logger.info(f"Created topic {function_pubsub_name(funcId)}")
publisher = PublisherClient()
publisher.create_topic(name=function_pubsub_name(funcId))
def upload_function_files(client, code, env_variables):
f = BytesIO()
with ZipFile(f, "w") as codezip:
codezip.writestr("function.js", code)
# codezip.writestr("index.js", WRAPPER_JS)
codezip.writestr("package.json", json.dumps(PACKAGE_JSON))
codezip.writestr("env.json", json.dumps(env_variables))
f.seek(0)
logger = logging.getLogger("sentry.functions")
logger.info(f"The region is {settings.SENTRY_FUNCTIONS_REGION}")
upload_url = client.generate_upload_url(
request=GenerateUploadUrlRequest(
parent=project_location_function_name(include_proj=True, include_loc=True)
)
).upload_url
requests.put(
upload_url,
data=f,
headers={"content-type": "application/zip", "x-goog-content-length-range": "0,104857600"},
)
return upload_url
def create_function(code, funcId, description, env_variables):
create_function_pubsub_topic(funcId)
client = CloudFunctionsServiceClient()
client.create_function(
function=subcreate_function(client, code, funcId, description, env_variables),
location=project_location_function_name(include_proj=True, include_loc=True),
)
def METHOD_NAME(code, funcId, description, env_variables):
client = CloudFunctionsServiceClient()
client.METHOD_NAME(
request=UpdateFunctionRequest(
function=subcreate_function(client, code, funcId, description, env_variables),
update_mask=FieldMask(paths=["source_upload_url", "environment_variables"]),
)
)
def subcreate_function(client, code, funcId, description, env_variables):
upload_url = upload_function_files(client, code, env_variables)
return CloudFunction(
name=project_location_function_name(
include_proj=True, include_loc=True, include_func=True, funcId=funcId
),
description=description,
source_upload_url=upload_url,
runtime="nodejs16",
entry_point="yourFunction",
event_trigger=EventTrigger(
event_type="providers/cloud.pubsub/eventTypes/topic.publish",
resource=function_pubsub_name(funcId),
),
environment_variables=env_variables,
)
def delete_function(funcId):
client = CloudFunctionsServiceClient()
client.delete_function(
request=DeleteFunctionRequest(
name=project_location_function_name(
include_proj=True, include_loc=True, include_func=True, funcId=funcId
)
),
)
def publish_message(funcId, message):
publisher = PublisherClient()
publisher.publish(
topic=function_pubsub_name(funcId),
data=message,
) |
read block schemas | """
Routes for interacting with block schema objects.
"""
from typing import List, Optional
from uuid import UUID
from fastapi import Body, Depends, HTTPException, Path, Query, Response, status
from prefect.server import models, schemas
from prefect.server.api import dependencies
from prefect.server.database.dependencies import provide_database_interface
from prefect.server.database.interface import PrefectDBInterface
from prefect.server.models.block_schemas import MissingBlockTypeException
from prefect.server.utilities.server import PrefectRouter
router = PrefectRouter(prefix="/block_schemas", tags=["Block schemas"])
@router.post("/", status_code=status.HTTP_201_CREATED)
async def create_block_schema(
block_schema: schemas.actions.BlockSchemaCreate,
response: Response,
db: PrefectDBInterface = Depends(provide_database_interface),
) -> schemas.core.BlockSchema:
from prefect.blocks.core import Block
async with db.session_context(begin_transaction=True) as session:
block_type = await models.block_types.read_block_type(
session=session, block_type_id=block_schema.block_type_id
)
if block_type is None:
raise HTTPException(
status.HTTP_404_NOT_FOUND,
detail=f"Block type {block_schema.block_type_id} not found.",
)
block_schema_checksum = Block._calculate_schema_checksum(block_schema.fields)
existing_block_schema = (
await models.block_schemas.read_block_schema_by_checksum(
session=session,
checksum=block_schema_checksum,
version=block_schema.version,
)
)
if existing_block_schema:
response.status_code = status.HTTP_200_OK
return existing_block_schema
try:
model = await models.block_schemas.create_block_schema(
session=session,
block_schema=block_schema,
)
except MissingBlockTypeException as ex:
raise HTTPException(status.HTTP_409_CONFLICT, detail=str(ex))
return model
@router.delete("/{id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_block_schema(
block_schema_id: UUID = Path(..., description="The block schema id", alias="id"),
db: PrefectDBInterface = Depends(provide_database_interface),
api_version=Depends(dependencies.provide_request_api_version),
):
"""
Delete a block schema by id.
"""
async with db.session_context(begin_transaction=True) as session:
block_schema = await models.block_schemas.read_block_schema(
session=session, block_schema_id=block_schema_id
)
if not block_schema:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND, detail="Block schema not found"
)
if block_schema.block_type.is_protected:
raise HTTPException(
status.HTTP_403_FORBIDDEN,
detail="Block schemas for protected block types cannot be deleted.",
)
await models.block_schemas.delete_block_schema(
session=session, block_schema_id=block_schema_id
)
@router.post("/filter")
async def METHOD_NAME(
block_schemas: Optional[schemas.filters.BlockSchemaFilter] = None,
limit: int = dependencies.LimitBody(),
offset: int = Body(0, ge=0),
db: PrefectDBInterface = Depends(provide_database_interface),
) -> List[schemas.core.BlockSchema]:
"""
Read all block schemas, optionally filtered by type
"""
async with db.session_context() as session:
result = await models.block_schemas.METHOD_NAME(
session=session,
block_schema_filter=block_schemas,
limit=limit,
offset=offset,
)
return result
@router.get("/{id}")
async def read_block_schema_by_id(
block_schema_id: UUID = Path(..., description="The block schema id", alias="id"),
db: PrefectDBInterface = Depends(provide_database_interface),
) -> schemas.core.BlockSchema:
"""
Get a block schema by id.
"""
async with db.session_context() as session:
block_schema = await models.block_schemas.read_block_schema(
session=session, block_schema_id=block_schema_id
)
if not block_schema:
raise HTTPException(status.HTTP_404_NOT_FOUND, detail="Block schema not found")
return block_schema
@router.get("/checksum/{checksum}")
async def read_block_schema_by_checksum(
block_schema_checksum: str = Path(
..., description="The block schema checksum", alias="checksum"
),
db: PrefectDBInterface = Depends(provide_database_interface),
version: Optional[str] = Query(
None,
description=(
"Version of block schema. If not provided the most recently created block"
" schema with the matching checksum will be returned."
),
),
) -> schemas.core.BlockSchema:
async with db.session_context() as session:
block_schema = await models.block_schemas.read_block_schema_by_checksum(
session=session, checksum=block_schema_checksum, version=version
)
if not block_schema:
raise HTTPException(status.HTTP_404_NOT_FOUND, detail="Block schema not found")
return block_schema |
test index objects while reindexing | from unittest import mock
from olympia.addons.indexers import AddonIndexer
from olympia.addons.models import Addon
from olympia.amo.tests import ESTestCase, TestCase, addon_factory
from olympia.search.models import Reindexing
from olympia.search.utils import get_es, index_objects, unindex_objects
class TestGetES(ESTestCase):
def test_get_es(self):
es = get_es()
assert es.transport._verified_elasticsearch
@mock.patch('olympia.search.utils.helpers')
class TestIndexObjects(TestCase):
def test_index_objects(self, helpers_mock):
addon1 = addon_factory()
addon2 = addon_factory()
fake_extract = {
addon1.pk: mock.Mock(),
addon2.pk: mock.Mock(),
}
with mock.patch.object(
AddonIndexer, 'extract_document', lambda a: fake_extract[a.pk]
):
index_objects(
queryset=Addon.objects.filter(id__in=(addon1.pk, addon2.pk)),
indexer_class=AddonIndexer,
)
bulk_mock = helpers_mock.bulk
assert bulk_mock.call_count == 1
assert bulk_mock.call_args[0][1] == [
{
'_source': fake_extract[addon1.pk],
'_id': addon1.pk,
'_index': 'test_amo_addons',
},
{
'_source': fake_extract[addon2.pk],
'_id': addon2.pk,
'_index': 'test_amo_addons',
},
]
def test_index_objects_with_index(self, helpers_mock):
target_index = 'amazing_index'
addon1 = addon_factory()
addon2 = addon_factory()
fake_extract = {
addon1.pk: mock.Mock(),
addon2.pk: mock.Mock(),
}
with mock.patch.object(
AddonIndexer, 'extract_document', lambda a: fake_extract[a.pk]
):
index_objects(
queryset=Addon.objects.filter(id__in=(addon1.pk, addon2.pk)),
indexer_class=AddonIndexer,
index=target_index,
)
bulk_mock = helpers_mock.bulk
assert bulk_mock.call_count == 1
assert bulk_mock.call_args[0][1] == [
{
'_source': fake_extract[addon1.pk],
'_id': addon1.pk,
'_index': target_index,
},
{
'_source': fake_extract[addon2.pk],
'_id': addon2.pk,
'_index': target_index,
},
]
def METHOD_NAME(self, helpers_mock):
target_index = AddonIndexer.get_index_alias() # the default index
Reindexing.objects.create(
alias=target_index, old_index='old_index', new_index='new_index'
)
addon1 = addon_factory()
addon2 = addon_factory()
fake_extract = {
addon1.pk: mock.Mock(),
addon2.pk: mock.Mock(),
}
with mock.patch.object(
AddonIndexer, 'extract_document', lambda a: fake_extract[a.pk]
):
index_objects(
queryset=Addon.objects.filter(id__in=(addon1.pk, addon2.pk)),
indexer_class=AddonIndexer,
)
bulk_mock = helpers_mock.bulk
assert bulk_mock.call_count == 1
# We're reindexing and didn't specify an index: index_object() is going
# to index our objects on both the old and the new indices instead of
# the alias.
assert bulk_mock.call_args[0][1] == [
{
'_source': fake_extract[addon1.pk],
'_id': addon1.pk,
'_index': 'new_index',
},
{
'_source': fake_extract[addon1.pk],
'_id': addon1.pk,
'_index': 'old_index',
},
{
'_source': fake_extract[addon2.pk],
'_id': addon2.pk,
'_index': 'new_index',
},
{
'_source': fake_extract[addon2.pk],
'_id': addon2.pk,
'_index': 'old_index',
},
]
def test_index_objects_with_index_while_reindexing(self, helpers_mock):
target_index = 'amazing_index'
Reindexing.objects.create(
alias=target_index, old_index='old_index', new_index='new_index'
)
addon1 = addon_factory()
addon2 = addon_factory()
fake_extract = {
addon1.pk: mock.Mock(),
addon2.pk: mock.Mock(),
}
with mock.patch.object(
AddonIndexer, 'extract_document', lambda a: fake_extract[a.pk]
):
index_objects(
queryset=Addon.objects.filter(id__in=(addon1.pk, addon2.pk)),
indexer_class=AddonIndexer,
index=target_index,
)
bulk_mock = helpers_mock.bulk
assert bulk_mock.call_count == 1
# We're reindexing but we specified which index to use so it doesn't
# matter.
assert bulk_mock.call_args[0][1] == [
{
'_source': fake_extract[addon1.pk],
'_id': addon1.pk,
'_index': 'amazing_index',
},
{
'_source': fake_extract[addon2.pk],
'_id': addon2.pk,
'_index': 'amazing_index',
},
]
class TestUnindexObjects(ESTestCase):
def test_unindex_objects(self):
def _es_search_ids():
return [
o['_id'] for o in es.search(query={'match_all': {}})['hits']['hits']
]
es = get_es()
addon1 = addon_factory()
addon2 = addon_factory()
addon3 = addon_factory()
assert list(Addon.objects.all().values_list('id', flat=True)) == [
addon1.pk,
addon2.pk,
addon3.pk,
]
self.reindex(Addon)
assert es.count()['count'] == 3, _es_search_ids()
unindex_objects((addon1.id,), indexer_class=AddonIndexer)
self.refresh()
assert es.count()['count'] == 2, _es_search_ids()
unindex_objects((addon1.id, addon2.id), indexer_class=AddonIndexer)
self.refresh()
assert es.count()['count'] == 1, _es_search_ids() |
is runner enabled | # Copyright (c) 2015-2016 ACSONE SA/NV (<http://acsone.eu>)
# Copyright 2016 Camptocamp SA
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html)
import logging
from threading import Thread
import time
from odoo.service import server
from odoo.tools import config
try:
from odoo.addons.server_environment import serv_config
if serv_config.has_section("queue_job"):
queue_job_config = serv_config["queue_job"]
else:
queue_job_config = {}
except ImportError:
queue_job_config = config.misc.get("queue_job", {})
from .runner import QueueJobRunner, _channels
_logger = logging.getLogger(__name__)
START_DELAY = 5
# Here we monkey patch the Odoo server to start the job runner thread
# in the main server process (and not in forked workers). This is
# very easy to deploy as we don't need another startup script.
class QueueJobRunnerThread(Thread):
def __init__(self):
Thread.__init__(self)
self.daemon = True
self.runner = QueueJobRunner.from_environ_or_config()
def run(self):
# sleep a bit to let the workers start at ease
time.sleep(START_DELAY)
self.runner.run()
def stop(self):
self.runner.stop()
class WorkerJobRunner(server.Worker):
"""Jobrunner workers"""
def __init__(self, multi):
super().__init__(multi)
self.watchdog_timeout = None
self.runner = QueueJobRunner.from_environ_or_config()
self._recover = False
def sleep(self):
pass
def signal_handler(self, sig, frame): # pylint: disable=missing-return
_logger.debug("WorkerJobRunner (%s) received signal %s", self.pid, sig)
super().signal_handler(sig, frame)
self.runner.stop()
def process_work(self):
if self._recover:
_logger.info("WorkerJobRunner (%s) runner is reinitialized", self.pid)
self.runner = QueueJobRunner.from_environ_or_config()
self._recover = False
_logger.debug("WorkerJobRunner (%s) starting up", self.pid)
time.sleep(START_DELAY)
self.runner.run()
def signal_time_expired_handler(self, n, stack):
_logger.info(
"Worker (%d) CPU time limit (%s) reached.Stop gracefully and recover",
self.pid,
config["limit_time_cpu"],
)
self._recover = True
self.runner.stop()
runner_thread = None
def METHOD_NAME():
return not _channels().strip().startswith("root:0")
def _start_runner_thread(server_type):
global runner_thread
if not config["stop_after_init"]:
if METHOD_NAME():
_logger.info("starting jobrunner thread (in %s)", server_type)
runner_thread = QueueJobRunnerThread()
runner_thread.start()
else:
_logger.info(
"jobrunner thread (in %s) NOT started, "
"because the root channel's capacity is set to 0",
server_type,
)
orig_prefork__init__ = server.PreforkServer.__init__
orig_prefork_process_spawn = server.PreforkServer.process_spawn
orig_prefork_worker_pop = server.PreforkServer.worker_pop
orig_threaded_start = server.ThreadedServer.start
orig_threaded_stop = server.ThreadedServer.stop
def prefork__init__(server, app):
res = orig_prefork__init__(server, app)
server.jobrunner = {}
return res
def prefork_process_spawn(server):
orig_prefork_process_spawn(server)
if not hasattr(server, "jobrunner"):
# if 'queue_job' is not in server wide modules, PreforkServer is
# not initialized with a 'jobrunner' attribute, skip this
return
if not server.jobrunner and METHOD_NAME():
server.worker_spawn(WorkerJobRunner, server.jobrunner)
def prefork_worker_pop(server, pid):
res = orig_prefork_worker_pop(server, pid)
if not hasattr(server, "jobrunner"):
# if 'queue_job' is not in server wide modules, PreforkServer is
# not initialized with a 'jobrunner' attribute, skip this
return res
if pid in server.jobrunner:
server.jobrunner.pop(pid)
return res
def threaded_start(server, *args, **kwargs):
res = orig_threaded_start(server, *args, **kwargs)
_start_runner_thread("threaded server")
return res
def threaded_stop(server):
global runner_thread
if runner_thread:
runner_thread.stop()
res = orig_threaded_stop(server)
if runner_thread:
runner_thread.join()
runner_thread = None
return res
server.PreforkServer.__init__ = prefork__init__
server.PreforkServer.process_spawn = prefork_process_spawn
server.PreforkServer.worker_pop = prefork_worker_pop
server.ThreadedServer.start = threaded_start
server.ThreadedServer.stop = threaded_stop |
remove tmpdir | #!/usr/bin/env python
# coding: UTF-8
# Copyright (c) 2012-2016 Seafile Ltd.
import sys
import os
import tempfile
import shutil
import subprocess
import subprocess
import atexit
import optparse
cwd = os.getcwd()
####################
### Common helper functions
####################
def highlight(content, is_error=False):
'''Add ANSI color to content to get it highlighted on terminal'''
if is_error:
return '\x1b[1;31m%s\x1b[m' % content
else:
return '\x1b[1;32m%s\x1b[m' % content
def info(msg):
print(highlight('[INFO] ') + msg)
def exist_in_path(prog):
'''Test whether prog exists in system path'''
dirs = os.environ['PATH'].split(':')
for d in dirs:
if d == '':
continue
path = os.path.join(d, prog)
if os.path.exists(path):
return True
return False
def error(msg=None, usage=None):
if msg:
print(highlight('[ERROR] ') + msg)
if usage:
print(usage)
sys.exit(1)
def run(cmdline, cwd=None, env=None, suppress_stdout=False, suppress_stderr=False):
'''Like run_argv but specify a command line string instead of argv'''
info('running: %s' % cmdline)
with open(os.devnull, 'w') as devnull:
if suppress_stdout:
stdout = devnull
else:
stdout = sys.stdout
if suppress_stderr:
stderr = devnull
else:
stderr = sys.stderr
proc = subprocess.Popen(cmdline,
cwd=cwd,
stdout=stdout,
stderr=stderr,
env=env,
shell=True)
return proc.wait()
def must_mkdir(path):
'''Create a directory, exit on failure'''
try:
os.mkdir(path)
except OSError:
error('failed to create directory %s' % path)
def must_copy(src, dst):
'''Copy src to dst, exit on failure'''
try:
shutil.copy(src, dst)
except Exception as e:
error('failed to copy %s to %s: %s' % (src, dst, e))
def must_move(src, dst):
'''Copy src to dst, exit on failure'''
try:
shutil.move(src, dst)
except Exception as e:
error('failed to move %s to %s: %s' % (src, dst, e))
usage = '''\
Usage: gen-tarball.py <version>
version must be a local branch name and must be like "1.3" or "1.3.0".
'''
def parse_args():
parser = optparse.OptionParser()
parser.add_option('--version',
dest='version',
nargs=1,
help='version of the tarball')
parser.add_option('--branch',
dest='branch',
nargs=1,
help='which branch to generate the tarball')
usage = parser.format_help()
options, remain = parser.parse_args()
if remain or options.version == None or options.branch == None:
print(usage)
sys.exit(1)
return options.version, options.branch
def main():
parse_args()
version, branch = parse_args()
if not exist_in_path('django-admin') and not exist_in_path('django-admin.py'):
error('django-admin scripts not found in PATH')
# Note: we double % to escape it in a format string
latest_commit_info = subprocess.getoutput('git log --format="%%H" %s -1' % branch)
# begin
tmpdir = tempfile.mkdtemp()
info('tmpdir is %s' % tmpdir)
def METHOD_NAME():
try:
shutil.rmtree(tmpdir)
except:
pass
atexit.register(METHOD_NAME)
os.chdir(tmpdir)
tarball_name = 'seahub-%s.tar.gz' % version
tmp_tarball = os.path.join(tmpdir, tarball_name)
cmd = 'git archive --prefix=seahub-%(version)s/ -o %(tarball)s %(branch)s' \
% dict(version=version, tarball=tmp_tarball, branch=branch)
if run(cmd, cwd=cwd) != 0:
error('failed to "git archive"')
# uncompress the tarball
if run('tar xf %s' % tmp_tarball) != 0:
error('failed to uncompress the tarball')
seahub_dir = os.path.join(tmpdir, 'seahub-%s' % version)
if run('./i18n.sh compile-all', cwd=seahub_dir) != 0:
error('failed to compile messages')
with open(os.path.join(seahub_dir, 'latest_commit'), 'w') as fp:
fp.write(latest_commit_info)
fp.write('\n')
if run('tar czvf %s seahub-%s' % (tarball_name, version)) != 0:
error('failed to generate tarball')
dst_tarball = os.path.join(cwd, tarball_name)
must_move(tmp_tarball, dst_tarball)
info('output is:\t%s' % dst_tarball)
if __name__ == '__main__':
main() |
default handler | # Copyright 2021-2023 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import logging
import os
import sys
from dataclasses import dataclass
from typing import Any
from typing import cast
from typing import Dict
from typing import List
from typing import Optional
import click
from vdk.api.plugin.hook_markers import hookimpl
from vdk.internal.builtin_plugins.run.job_context import JobContext
from vdk.internal.core.context import CoreContext
from vdk.internal.core.statestore import StoreKey
from vdk.internal.plugin.plugin import PluginRegistry
log = logging.getLogger(__name__)
@dataclass
class ParsedCall:
"""
Parsed Hook Call.
"""
def __init__(self, name: str, kwargs: Dict[str, Any]) -> None:
self.__dict__.update(kwargs)
self._name = name
def __repr__(self) -> str:
"""
:return: The hook call invocation as string
"""
d = self.__dict__.copy()
del d["_name"]
return f"<ParsedCall {self._name!r}(**{d!r})>"
def pluggy_result_to_string(outcome: Any) -> str:
try:
result = outcome.get_result()
except BaseException as e:
outcome.force_exception(e)
return ""
result_string = ""
for item in result:
result_string = f"\nJSON config override is {item}"
return result_string
class HookRecorder:
"""Record all hooks invocations.
This wraps all the hook calls/invocations, recording each call
before propagating the normal calls.
"""
def __init__(self, plugin_registry: PluginRegistry) -> None:
self._plugin_registry = plugin_registry
self.calls: List[ParsedCall] = []
def before(hook_name: str, hook_impls: Any, kwargs: Any) -> None:
print(
f"------>> About to call hook {hook_name}\n"
f" with args:\n"
f" {kwargs}.\n"
f" Hook Impl: {hook_impls}",
file=sys.stderr,
)
self.calls.append(ParsedCall(hook_name, kwargs))
def after(outcome: Any, hook_name: str, hook_impls: Any, kwargs: Any) -> None:
print(
f"<<------ Finished call hook {hook_name}\n"
f" with args:\n"
f" {kwargs}.\n"
f" Hook Impl: {hook_impls}\n",
f" Outcome: {pluggy_result_to_string(outcome)}",
file=sys.stderr,
)
self._undo_wrapping = (
self._plugin_registry.plugin_manager().add_hookcall_monitoring(
before, after
)
)
def finish_recording(self) -> None:
"""
Do not record hook calls anymore.
"""
self._undo_wrapping()
def get_calls(self) -> List[ParsedCall]:
"""
Get a list of currently record calls
"""
return [call for call in self.calls]
@click.command(help="Print hello and the CLI context.")
@click.pass_context
def hello(ctx: click.Context) -> None:
"""
Just prints hello for testing purposes.
"""
click.echo(f"Hello! Nice to meet you. Here are some nerdy details about me:")
import json
def METHOD_NAME(o: Any) -> Any:
from vdk.internal.core.context import CoreContext
from vdk.internal.core.config import Configuration
if (
isinstance(o, click.core.Context)
or isinstance(o, JobContext)
or isinstance(o, CoreContext)
or isinstance(o, Configuration)
):
return vars(o)
return str(o)
click.echo(
f"My context is {json.dumps(vars(ctx), default=METHOD_NAME, indent=4, sort_keys=True)}"
)
class DebugPlugins:
"""
Plugin which adds some debug functionalities
"""
def __init__(self) -> None:
self.when = 1
self.hook_recorder: Optional[HookRecorder] = None
@hookimpl
def vdk_start(
self, plugin_registry: PluginRegistry, command_line_args: List
) -> None:
"""
check if debug is needed and active it if yes.
We also check with env variable.
We do not use vdk_configure hook in order to trigger the recorder
at earliest moment possible.
"""
if "--debug-hooks" in command_line_args:
# a bit hacky but works for hidden options
command_line_args.remove("--debug-hooks")
self.hook_recorder = HookRecorder(plugin_registry)
if os.getenv("VDK_DEBUG_HOOKS_ENABLED", "no") == "yes":
self.hook_recorder = HookRecorder(plugin_registry)
def set_debug_hooks(self, ctx: click.Context, param: Any, value: Any):
self.hook_recorder = HookRecorder(ctx.obj.plugin_registry)
@hookimpl
def vdk_initialize(self, context: CoreContext) -> None:
plugin_registry = context.plugin_registry
log.debug(f"Plugins loaded\n: {plugin_registry}")
@hookimpl
def vdk_command_line(self, root_command: click.Group) -> None:
"""
Modify command line arguments to add debug option with callback (instead of above appraoch)
"""
root_command.add_command(hello)
self.add_debug_flag(root_command)
@staticmethod
def add_debug_flag(root_command):
def set_debug(ctx: click.Context, param: Any, value: Any) -> None:
if value and not ctx.resilient_parsing:
log.debug("Enabling debug")
core_context = cast(CoreContext, ctx.obj)
core_context.state.set(StoreKey[bool]("vdk.debug"), True)
debug_option = click.option(
"--debug",
"-D",
type=click.BOOL,
is_eager=True,
expose_value=False,
default=False,
is_flag=True,
callback=set_debug,
help="Run the command in debug mode",
)
run_cmd = cast(click.Command, root_command.get_command(None, "run"))
if run_cmd:
root_command.add_command(debug_option(run_cmd)) |
refresh | #
# Copyright 2018-2022 GoPro Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import math
from fractions import Fraction
from PySide6 import QtCore, QtGui, QtWidgets
class Seekbar(QtWidgets.QWidget):
seek = QtCore.Signal(float)
step = QtCore.Signal(int)
SLIDER_TIMEBASE = 1000
SLIDER_TIMESCALE = 1.0 / SLIDER_TIMEBASE
def __init__(self, config):
super().__init__()
self._slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self._time_lbl = QtWidgets.QLabel()
self._time_lbl.setFont(QtGui.QFontDatabase.systemFont(QtGui.QFontDatabase.FixedFont))
fw_btn = QtWidgets.QToolButton()
fw_btn.setText(">")
bw_btn = QtWidgets.QToolButton()
bw_btn.setText("<")
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(bw_btn)
layout.addWidget(fw_btn)
layout.addWidget(self._slider)
layout.addWidget(self._time_lbl)
self._frame_index = 0
self._scene_duration = 0
self._framerate = Fraction(*config.get("framerate"))
self._slider.sliderMoved.connect(self._slider_moved)
self._slider.sliderPressed.connect(self._slider_pressed)
self._slider.sliderReleased.connect(self._slider_released)
self._slider_dragged = False
fw_btn.clicked.connect(self._step_fw)
bw_btn.clicked.connect(self._step_bw)
@QtCore.Slot(int)
def _slider_moved(self, value): # only user move
if not self._scene_duration:
return
self.seek.emit(value * self.SLIDER_TIMESCALE)
@QtCore.Slot()
def _slider_pressed(self):
self._slider_dragged = True
@QtCore.Slot()
def _slider_released(self):
self._slider_dragged = False
self.METHOD_NAME()
@QtCore.Slot()
def _step_fw(self):
self.step.emit(1)
@QtCore.Slot()
def _step_bw(self):
self.step.emit(-1)
def _get_time_lbl_text(self, frame_index, frame_time):
cur_time = "%02d:%02d" % divmod(frame_time, 60)
duration = "%02d:%02d" % divmod(self._scene_duration, 60)
return "%s / %s (%d @ %.4gHz)" % (cur_time, duration, frame_index, self._framerate)
def _adjust_time_label_size(self):
# Make the time label flexible again
self._time_lbl.setMinimumSize(0, 0)
self._time_lbl.setMaximumSize(0xFFFFFF, 0xFFFFFF)
# Set the label to its largest possible content (last frame)
last_frame_index = int(math.ceil(self._scene_duration * self._framerate))
text = self._get_time_lbl_text(last_frame_index, self._scene_duration)
self._time_lbl.setText(text)
# Probe the occupied size and make it fixed for the current scene
hint = self._time_lbl.sizeHint()
self._time_lbl.setFixedSize(hint)
@QtCore.Slot(dict)
def set_scene_metadata(self, cfg):
self._scene_duration = cfg["duration"]
self._framerate = Fraction(*cfg["framerate"])
self._slider.setRange(0, self._scene_duration * self.SLIDER_TIMEBASE)
self._adjust_time_label_size()
self.METHOD_NAME()
@QtCore.Slot(int, float)
def set_frame_time(self, frame_index, frame_time):
self._frame_index = frame_index
self.METHOD_NAME()
def METHOD_NAME(self):
t = self._frame_index / self._framerate
text = self._get_time_lbl_text(self._frame_index, t)
self._time_lbl.setText(text)
if not self._slider_dragged:
self._slider.setValue(int(t * self.SLIDER_TIMEBASE)) |
set callback | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Configurations for TensorFlow Debugger (TFDBG) command-line interfaces."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.platform import gfile
RL = debugger_cli_common.RichLine
class CLIConfig(object):
"""Client-facing configurations for TFDBG command-line interfaces."""
_CONFIG_FILE_NAME = ".tfdbg_config"
_DEFAULT_CONFIG = [
("graph_recursion_depth", 20),
("mouse_mode", True),
]
def __init__(self, config_file_path=None):
self._config_file_path = (config_file_path or
self._default_config_file_path())
self._config = collections.OrderedDict(self._DEFAULT_CONFIG)
if gfile.Exists(self._config_file_path):
config = self._load_from_file()
for key, value in config.items():
self._config[key] = value
self._save_to_file()
self._set_callbacks = {}
def get(self, property_name):
if property_name not in self._config:
raise KeyError("%s is not a valid property name." % property_name)
return self._config[property_name]
def set(self, property_name, property_val):
"""Set the value of a property.
Supports limitd property value types: `bool`, `int` and `str`.
Args:
property_name: Name of the property.
property_val: Value of the property. If the property has `bool` type and
this argument has `str` type, the `str` value will be parsed as a `bool`
Raises:
ValueError: if a `str` property_value fails to be parsed as a `bool`.
KeyError: if `property_name` is an invalid property name.
"""
if property_name not in self._config:
raise KeyError("%s is not a valid property name." % property_name)
orig_val = self._config[property_name]
if isinstance(orig_val, bool):
if isinstance(property_val, str):
if property_val.lower() in ("1", "true", "t", "yes", "y", "on"):
property_val = True
elif property_val.lower() in ("0", "false", "f", "no", "n", "off"):
property_val = False
else:
raise ValueError(
"Invalid string value for bool type: %s" % property_val)
else:
property_val = bool(property_val)
elif isinstance(orig_val, int):
property_val = int(property_val)
elif isinstance(orig_val, str):
property_val = str(property_val)
else:
raise TypeError("Unsupported property type: %s" % type(orig_val))
self._config[property_name] = property_val
self._save_to_file()
# Invoke set-callback.
if property_name in self._set_callbacks:
self._set_callbacks[property_name](self._config)
def METHOD_NAME(self, property_name, callback):
"""Set a set-callback for given property.
Args:
property_name: Name of the property.
callback: The callback as a `callable` of signature:
def cbk(config):
where config is the config after it is set to the new value.
The callback is invoked each time the set() method is called with the
matching property_name.
Raises:
KeyError: If property_name does not exist.
TypeError: If `callback` is not callable.
"""
if property_name not in self._config:
raise KeyError("%s is not a valid property name." % property_name)
if not callable(callback):
raise TypeError("The callback object provided is not callable.")
self._set_callbacks[property_name] = callback
def _default_config_file_path(self):
return os.path.join(os.path.expanduser("~"), self._CONFIG_FILE_NAME)
def _save_to_file(self):
try:
with gfile.Open(self._config_file_path, "w") as config_file:
json.dump(self._config, config_file)
except IOError:
pass
def summarize(self, highlight=None):
"""Get a text summary of the config.
Args:
highlight: A property name to highlight in the output.
Returns:
A `RichTextLines` output.
"""
lines = [RL("Command-line configuration:", "bold"), RL("")]
for name, val in self._config.items():
highlight_attr = "bold" if name == highlight else None
line = RL(" ")
line += RL(name, ["underline", highlight_attr])
line += RL(": ")
line += RL(str(val), font_attr=highlight_attr)
lines.append(line)
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
def _load_from_file(self):
try:
with gfile.Open(self._config_file_path, "r") as config_file:
config_dict = json.load(config_file)
config = collections.OrderedDict()
for key in sorted(config_dict.keys()):
config[key] = config_dict[key]
return config
except (IOError, ValueError):
# The reading of the config file may fail due to IO issues or file
# corruption. We do not want tfdbg to error out just because of that.
return dict() |
write | import logging
from odoo import _, api, fields, models
from odoo.exceptions import UserError
from odoo.addons.base_vat.models.res_partner import _eu_country_vat
CODE_SPAIN = "ES"
_logger = logging.getLogger(__name__)
class ResPartner(models.Model):
_inherit = "res.partner"
ine_code = fields.Char(
string="INE State Code",
compute="_compute_ine_code",
store=True,
)
@api.depends("nationality_id", "residence_state_id")
def _compute_ine_code(self):
for record in self:
if not record.nationality_id:
record.ine_code = False
elif record.nationality_id.code != CODE_SPAIN:
record.ine_code = record.nationality_id.code_alpha3
else:
if not record.residence_state_id:
record.ine_code = False
record.ine_code = record.residence_state_id.ine_code
def _check_enought_invoice_data(self):
self.ensure_one()
res = super(ResPartner, self)._check_enought_invoice_data()
if not res:
return res
if not self.country_id or not self.city or not (self.street or self.street2):
return False
if not self.vat:
if self.country_id.code == "ES":
return False
elif not self.aeat_identification:
return False
return True
def METHOD_NAME(self, vals):
res = super(ResPartner, self).METHOD_NAME(vals)
# REVIEW: Force Contrain vat
# https://github.com/odoo/odoo/issues/23242
for partner in self:
if vals.get("vat") or vals.get("country_id"):
country = (
self.env["res.country"].browse(vals.get("country_id"))
or partner.country_id
)
if country.code == "ES":
self.check_vat()
self._pms_check_unique_vat()
return res
@api.model
def create(self, vals):
records = super(ResPartner, self).create(vals)
# REVIEW: Force Contrain vat
# https://github.com/odoo/odoo/issues/23242
if vals.get("vat") and vals.get("country_id"):
country = self.env["res.country"].browse(vals.get("country_id"))
if country.code == "ES":
self.check_vat()
records._pms_check_unique_vat()
return records
# This function is a candidate to be moved to the module
# partner_vat_unique
def _pms_check_unique_vat(self):
for partner in self.filtered(lambda p: p.vat and p.country_id):
repeat_partner = self._get_repeat_partner(partner)
if bool(partner.vat) and not partner.parent_id and repeat_partner:
raise UserError(
_("The VAT number %s already exists in other contacts: %s")
% (
repeat_partner.vat,
repeat_partner.name,
)
)
def _get_repeat_partner(self, partner):
europe = self.env.ref("base.europe")
if not europe:
europe = self.env["res.country.group"].search(
[("name", "=", "Europe")], limit=1
)
partner_country_code = partner.commercial_partner_id.country_id.code
vat_country, vat_number = self._split_vat(partner.vat)
if europe and partner.country_id.id in europe.country_ids.ids:
vat_country = _eu_country_vat.get(vat_country, vat_country).upper()
vat_with_code = (
partner.vat
if partner_country_code.upper() == vat_country.upper()
else partner_country_code.upper() + partner.vat
)
vat_without_code = (
partner.vat
if partner_country_code.upper() != vat_country.upper()
else vat_number
)
domain = [
("company_id", "in", [False, partner.company_id.id]),
"|",
("vat", "=", vat_with_code),
("vat", "=", vat_without_code),
]
domain += [("id", "!=", partner.id), "!", ("id", "child_of", partner.id)]
return self.with_context(active_test=False).search(domain, limit=1)
def _missing_document(self, vals, partners=False):
res = super(ResPartner, self)._missing_document(vals)
if not res:
return res
if (
vals.get("aeat_identification") is False
or vals.get("aeat_identification") == ""
or (
"aeat_identification" not in vals
and (
any([not partner.aeat_identification for partner in partners])
if partners
else True
)
)
):
return True
return False
@api.constrains("country_id", "vat")
def update_vat_code_country(self):
if self.env.context.get("ignore_vat_update"):
return
for record in self:
country_id = record.country_id.id
vat = record.vat
if vat and country_id:
vat_with_code = record.fix_eu_vat_number(country_id, vat)
if country_id and vat != vat_with_code:
record.with_context({"ignore_vat_update": True}).METHOD_NAME(
{"vat": vat_with_code}
) |
test xcdecorator | # coding: utf-8
import sys
import abipy.data as abidata
import abipy.abilab as abilab
import abipy.abio.decorators as ideco
from abipy.core.testing import AbipyTest
from abipy.abio.factories import *
class DecoratorTest(AbipyTest):
def setUp(self):
# Si ebands
si_structure = abilab.Structure.from_file(abidata.cif_file("si.cif"))
self.si_ebands = ebands_input(si_structure, abidata.pseudos("14si.pspnc"), ecut=2, kppa=10)
# Reference input string. Used to test if decorators do not change the initial Input.
self.si_ebands_inpstr = str(self.si_ebands)
# NiO bands with PAW
nio_structure = abidata.structure_from_ucell("NiO")
self.nio_ebands = ebands_input(nio_structure, abidata.pseudos("28ni.paw", "8o.2.paw"),
ecut=2, pawecutdg=4, kppa=10)
self.nio_ebands_inpstr = str(self.nio_ebands)
def tearDown(self):
"""Testing if initial inputs are unchanged."""
assert all(not inp.decorators for inp in self.si_ebands)
assert self.si_ebands_inpstr == str(self.si_ebands)
assert all(not inp.decorators for inp in self.nio_ebands)
assert self.nio_ebands_inpstr == str(self.nio_ebands)
def validate_inp(self, inp, ndec=1):
# Hack needed because ecut is not in the pseudos.
inp.set_vars(ecut=3)
#v = inp.validate()
#if v.retcode != 0:
# raise RuntimeError(v.err)
#else:
# print("Valid input!")
# Test validity of individual datasets.
for dtset in inp.split_datasets():
v = dtset.abivalidate()
#assert dtset.decorators == inp.decorators
#assert len(dtset.decorators) == ndec
if v.retcode != 0:
raise RuntimeError("Wrong input. See {0}".format(v))
else:
print("Valid input!")
def test_spin_decorator(self):
"""Testing spin decorator."""
spinor_deco = ideco.SpinDecorator("spinor")
self.assert_msonable(spinor_deco)
print(spinor_deco)
new_inp = spinor_deco(self.si_ebands)
print(new_inp)
# kptopt is set to 4 if non-collinear magnetism and kptopt == 3 is not specified.
for dt in new_inp:
assert dt["nsppol"] == 1 and dt["nspinor"] == 2 and dt["kptopt"] == 4
#self.validate_inp(new_inp)
# kptopt should not be changes if it's set to 3 and non-collinear magnetism
inp_with_kpt3 = self.si_ebands.deepcopy()
inp_with_kpt3.kptopt = 3
# FIXME: Here there's a bug because get should check the global variables!
#for dt in spinor_deco(inp_with_kpt3):
# assert dt["nsppol"] == 1 and dt["nspinor"] == 2 and dt["kptopt"] == 3
def test_smearing_decorator(self):
"""Testing electronic smearing decorator."""
smearing_deco = ideco.SmearingDecorator("fermi_dirac:0.1 eV")
self.assert_msonable(smearing_deco)
new_inp = smearing_deco(self.si_ebands)
self.validate_inp(new_inp)
def METHOD_NAME(self):
"""Testing XCdecorator."""
xc_deco = ideco.XcDecorator(17)
self.assert_msonable(xc_deco)
new_inp = xc_deco(self.si_ebands)
self.validate_inp(new_inp)
def test_ldau_decorators(self):
"""Testing LdaUDecorator."""
symbols_luj = dict(Ni=dict(l=2, u=5.0, j=0.5))
ldau_deco = ideco.LdaUDecorator(symbols_luj, usepawu=1, unit="eV")
self.assert_msonable(ldau_deco)
new_inp = ldau_deco(self.nio_ebands)
new_inp.set_vars(chkprim=0, ecut=3, pawecutdg=3)
print(new_inp)
self.validate_inp(new_inp)
#assert 0
# LDA+U only if PAW
with self.assertRaises(ldau_deco.Error):
ldau_deco(self.si_ebands)
def test_lexx_decorators(self):
"""Testing LexxDecorator."""
lexx_deco = ideco.LexxDecorator({"Ni": 2})
self.assert_msonable(lexx_deco)
new_inp = lexx_deco(self.nio_ebands)
new_inp.set_vars(chkprim=0, ecut=3, pawecutdg=3)
print(new_inp)
self.validate_inp(new_inp)
#assert 0
def test_new_with_decorators(self):
"""Testing AbinitInput.new_with_decorators."""
spinor_deco = ideco.SpinDecorator("spinor")
smearing_deco = ideco.SmearingDecorator("nosmearing")
new_inp = self.si_ebands.new_with_decorators(spinor_deco)
new_inp = self.si_ebands.new_with_decorators([spinor_deco, smearing_deco])
if __name__ == '__main__':
import unittest
unittest.main() |
main | #! /usr/bin/env python
########################################################################
# File : dirac-stager-monitor-file
# Author : Daniela Remenska
########################################################################
"""
Give monitoring information regarding a staging file uniquely identified with (LFN,SE)
- status
- last update
- jobs requesting this file to be staged
- SRM requestID
- pin expiry time
- pin length
Example:
$ dirac-stager-monitor-file.py /lhcb/LHCb/Collision12/FULL.DST/00020846/0005/0_1.full.dst GRIDKA-RDST
--------------------
LFN : /lhcb/LHCb/Collision12/FULL.DST/00020846/0005/0_1.full.dst
SE : GRIDKA-RDST
PFN : srm://gridka-dCache.fzk.de/pnfs/gridka.de/lhcb/LHCb/Collision12/FULL.DST/00020846/0005/0_1.full.dst
Status : StageSubmitted
LastUpdate: 2013-06-11 18:13:40
Reason : None
Jobs requesting this file to be staged: 48518896
------SRM staging request info--------------
SRM RequestID: -1768636375
SRM StageStatus: StageSubmitted
SRM StageRequestSubmitTime: 2013-06-11 18:13:38
SRM StageRequestCompletedTime: None
SRM PinExpiryTime: None
SRM PinLength: 43200
"""
from DIRAC.Core.Base.Script import Script
@Script()
def METHOD_NAME():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument("LFN: LFN of the staging file")
Script.registerArgument("SE: Storage Element for the staging file")
Script.parseCommandLine(ignoreErrors=True)
from DIRAC import exit as DIRACExit, gLogger
lfn, se = Script.getPositionalArgs(group=True)
from DIRAC.StorageManagementSystem.Client.StorageManagerClient import StorageManagerClient
client = StorageManagerClient()
res = client.getCacheReplicas({"LFN": lfn, "SE": se})
if not res["OK"]:
gLogger.error(res["Message"])
cacheReplicaInfo = res["Value"]
if cacheReplicaInfo:
replicaID = list(cacheReplicaInfo)[0]
outStr = "\n--------------------"
outStr += f"\n{'LFN'.ljust(8)}: {cacheReplicaInfo[replicaID]['LFN'].ljust(100)}"
outStr += f"\n{'SE'.ljust(8)}: {cacheReplicaInfo[replicaID]['SE'].ljust(100)}"
outStr += f"\n{'PFN'.ljust(8)}: {cacheReplicaInfo[replicaID]['PFN'].ljust(100)}"
outStr += f"\n{'Status'.ljust(8)}: {cacheReplicaInfo[replicaID]['Status'].ljust(100)}"
outStr += f"\n{'LastUpdate'.ljust(8)}: {str(cacheReplicaInfo[replicaID]['LastUpdate']).ljust(100)}"
outStr += f"\n{'Reason'.ljust(8)}: {str(cacheReplicaInfo[replicaID]['Reason']).ljust(100)}"
resTasks = client.getTasks({"ReplicaID": replicaID})
if resTasks["OK"]:
# print resTasks['Message']
outStr += "\nJob IDs requesting this file to be staged:".ljust(8)
tasks = resTasks["Value"]
for tid in tasks.keys():
outStr += f" {tasks[tid]['SourceTaskID']} "
resStageRequests = client.getStageRequests({"ReplicaID": replicaID})
if not resStageRequests["OK"]:
gLogger.error(resStageRequests["Message"])
if resStageRequests["Records"]:
stageRequests = resStageRequests["Value"]
outStr += "\n------SRM staging request info--------------"
for info in stageRequests.values():
outStr += f"\n{'SRM RequestID'.ljust(8)}: {info['RequestID'].ljust(100)}"
outStr += f"\n{'SRM StageStatus'.ljust(8)}: {info['StageStatus'].ljust(100)}"
outStr += "\n{}: {}".format(
"SRM StageRequestSubmitTime".ljust(8),
str(info["StageRequestSubmitTime"]).ljust(100),
)
outStr += "\n{}: {}".format(
"SRM StageRequestCompletedTime".ljust(8),
str(info["StageRequestCompletedTime"]).ljust(100),
)
outStr += f"\n{'SRM PinExpiryTime'.ljust(8)}: {str(info['PinExpiryTime']).ljust(100)}"
outStr += f"\n{'SRM PinLength'.ljust(8)}: {str(info['PinLength']).ljust(100)} sec"
else:
outStr += "\nThere are no staging requests submitted to the site yet.".ljust(8)
else:
outStr = "\nThere is no such file requested for staging. Check for typo's!"
# Script.showHelp()
gLogger.notice(outStr)
DIRACExit(0)
if __name__ == "__main__":
METHOD_NAME() |
check uri | from __future__ import annotations
import urllib.parse
from collections.abc import Iterable, Mapping
from typing import Any, Literal, Optional, TypeVar, Union, get_args
from mopidy import exceptions
from mopidy.audio.constants import PlaybackState
from mopidy.types import (
DistinctField,
Query,
QueryValue,
SearchField,
TracklistField,
)
def get_literals(literal_type: Any) -> set[str]:
# Check if it's a union
if hasattr(literal_type, "__origin__") and literal_type.__origin__ is Union:
literals = set()
for arg in get_args(literal_type):
literals.update(get_literals(arg))
return literals
# Check if it's a literal
if hasattr(literal_type, "__origin__") and literal_type.__origin__ is Literal:
return set(get_args(literal_type))
raise ValueError("Provided type is neither a Union nor a Literal type.")
T = TypeVar("T")
PLAYBACK_STATES: set[str] = {ps.value for ps in PlaybackState}
FIELD_TYPES: dict[str, type] = {
"album": str,
"albumartist": str,
"any": Union[int, str],
"artist": str,
"comment": str,
"composer": str,
"date": str,
"disc_no": int,
"genre": str,
"musicbrainz_id": str,
"musicbrainz_albumid": str,
"musicbrainz_artistid": str,
"musicbrainz_trackid": str,
"name": str,
"performer": str,
"tlid": int,
"track_name": str,
"track_no": int,
"uri": str,
}
DISTINCT_FIELDS: dict[str, type] = {
x: FIELD_TYPES[x] for x in get_literals(DistinctField)
}
SEARCH_FIELDS: dict[str, type] = {x: FIELD_TYPES[x] for x in get_literals(SearchField)}
TRACKLIST_FIELDS: dict[str, type] = {
x: FIELD_TYPES[x] for x in get_literals(TracklistField) - {"tlid"}
}
# TODO: _check_iterable(check, msg, **kwargs) + [check(a) for a in arg]?
def _check_iterable(
arg,
msg,
**kwargs: Any,
) -> None:
"""Ensure we have an iterable which is not a string or an iterator"""
if isinstance(arg, str):
raise exceptions.ValidationError(msg.format(arg=arg, **kwargs))
if not isinstance(arg, Iterable):
raise exceptions.ValidationError(msg.format(arg=arg, **kwargs))
if iter(arg) is iter(arg):
raise exceptions.ValidationError(msg.format(arg=arg, **kwargs))
def check_choice(
arg: T,
choices: Iterable[T],
msg: str = "Expected one of {choices}, not {arg!r}",
) -> None:
if arg not in choices:
raise exceptions.ValidationError(msg.format(arg=arg, choices=tuple(choices)))
def check_boolean(
arg: bool,
msg: str = "Expected a boolean, not {arg!r}",
) -> None:
check_instance(arg, bool, msg=msg)
def check_instance(
arg: T,
cls: type[T],
msg: str = "Expected a {name} instance, not {arg!r}",
) -> None:
if not isinstance(arg, cls):
raise exceptions.ValidationError(msg.format(arg=arg, name=cls.__name__))
def check_instances(
arg: Iterable[Any],
cls: type,
msg: str = "Expected a list of {name}, not {arg!r}",
) -> None:
_check_iterable(arg, msg, name=cls.__name__)
if not all(isinstance(instance, cls) for instance in arg):
raise exceptions.ValidationError(msg.format(arg=arg, name=cls.__name__))
def check_integer(
arg: int,
min: Optional[int] = None,
max: Optional[int] = None,
) -> None:
if not isinstance(arg, int):
raise exceptions.ValidationError(f"Expected an integer, not {arg!r}")
if min is not None and arg < min:
raise exceptions.ValidationError(
f"Expected number larger or equal to {min}, not {arg!r}"
)
if max is not None and arg > max:
raise exceptions.ValidationError(
f"Expected number smaller or equal to {max}, not {arg!r}"
)
def check_query(
arg: Union[Query[SearchField], Query[TracklistField]],
fields: Optional[Iterable[str]] = None,
) -> None:
if fields is None:
fields = SEARCH_FIELDS.keys()
# TODO: normalize name -> track_name
# TODO: normalize value -> [value]
# TODO: normalize blank -> [] or just remove field?
if not isinstance(arg, Mapping):
raise exceptions.ValidationError(f"Expected a query dictionary, not {arg!r}")
for key, value in arg.items():
check_choice(
key,
fields,
msg="Expected query field to be one of {choices}, not {arg!r}",
)
msg = 'Expected "{key}" to be list of strings, not {arg!r}'
_check_iterable(value, msg, key=key)
[_check_query_value(key, v, msg) for v in value]
def _check_query_value(
key: Union[DistinctField, SearchField, TracklistField],
arg: QueryValue,
msg: str,
) -> None:
if not isinstance(arg, str) or not arg.strip():
raise exceptions.ValidationError(msg.format(arg=arg, key=key))
def METHOD_NAME(
arg: str,
msg="Expected a valid URI, not {arg!r}",
) -> None:
if not isinstance(arg, str):
raise exceptions.ValidationError(msg.format(arg=arg))
if urllib.parse.urlparse(arg).scheme == "":
raise exceptions.ValidationError(msg.format(arg=arg))
def check_uris(
arg: Iterable[str],
msg="Expected a list of URIs, not {arg!r}",
) -> None:
_check_iterable(arg, msg)
[METHOD_NAME(a, msg) for a in arg] |
get top cryptocurrencies | from bs4 import BeautifulSoup
import requests
class CoinMarketCap:
"""
Create an instance of `CoinMarketCap` class
```python
crypto = CoinMarketCap()
```
| Method | Details |
| ---------------------------- | -------------------------------------------------------- |
| `get_top_cryptocurrencies()` | Fetches and returns data about the top cryptocurrencies. |
"""
def __init__(self):
"""
Initialize the CoinMarketCap class by fetching data from the CoinMarketCap website.
"""
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win 64 ; x64) Apple WeKit /537.36(KHTML , like Gecko) Chrome/80.0.3987.162 Safari/537.36"
}
url = "https://coinmarketcap.com/"
html_text = requests.get(url, headers=headers).text
self.soup = BeautifulSoup(html_text, "lxml")
def METHOD_NAME(self):
"""
A list of dictionaries containing details of the top cryptocurrencies.\n
```python
crypto = CoinMarketCap()
```
Example output:
```python
[
{
"Name": "Bitcoin",
"Symbol": "BTC",
"Link": "https://coinmarketcap.com/...",
"Price": "$65,432.10",
"1h%": "-1.23% (Down)",
"24h%": "+0.45% (Up)",
"7d%": "-2.15% (Down)",
"MarketCap": "$1.23T",
"Volume(24h)": "$12.5B",
"Circulating Supply": "18.7M BTC"
},
...
]
"""
try:
cryptocurrency = []
container = self.soup.find("div", {"class": "sc-4c520df-2 kGWYlx"})
i = 0
tbody = container.find("tbody")
for items in tbody.find_all("tr"):
i += 1
if i == 11:
break
j = 0
for item in items.find_all("td"):
j += 1
if j == 1 or j == 2:
continue
elif j == 3:
name = item.find("p", {"class": "sc-4984dd93-0 kKpPOn"}).text
symbol = item.find(
"p", {"class": "sc-4984dd93-0 iqdbQL coin-item-symbol"}
).text
link = (
"https://coinmarketcap.com/"
+ item.find("a", href=True)["href"]
)
elif j == 4:
price = item.text
elif j == 5:
if item.find("span", {"class": "icon-Caret-down"}) is not None:
market = "Down"
else:
market = "Up"
hour = item.text + f" ({market})"
elif j == 6:
if item.find("span", {"class": "icon-Caret-down"}) is not None:
market = "Down"
else:
market = "Up"
hour_24 = item.text + f" ({market})"
elif j == 7:
if item.find("span", {"class": "icon-Caret-down"}) is not None:
market = "Down"
else:
market = "Up"
day = item.text + f" ({market})"
elif j == 8:
marketcap = item.find(
"span", {"class": "sc-f8982b1f-1 bOsKfy"}
).text
elif j == 9:
volume = item.find(
"p", {"class": "sc-4984dd93-0 jZrMxO font_weight_500"}
).text
elif j == 10:
supply = item.find("p", {"class": "sc-4984dd93-0 WfVLk"}).text
data = {
"Name": name,
"Symbol": symbol,
"Link": link,
"Price": price,
"1h%": hour,
"24h%": hour_24,
"7d%": day,
"MarketCap": marketcap,
"Volume(24h)": volume,
"Circulating Supply": supply,
}
cryptocurrency.append(data)
return cryptocurrency
except:
return Non |
populate symbol vector | # IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: src/build/tools/ebmc/symbols.py $
#
# OpenPOWER HostBoot Project
#
# Contributors Listed Below - COPYRIGHT 2020
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
from enum import Enum
"""This file contains classes to read HB symbols file and provide a lookup
mechanism from "src/usr/errl/plugins/symbols.H"
These classes are used by the parser ErrlUserDetailsParserBackTrace in b0100.py
"""
"""Enum used to verify if hbSymbols are built correclty.
"""
class validation(Enum):
TYPE = 0x8
NAME = 0x4
LENGTH = 0x2
ADDRESS = 0x1
"""This class contains the data from a line of the HB syms file.
"""
class hbSymbol:
# Data from a line in hbicore.syms
address = 0
length = 0
name = ""
validationBits = 0x0
# Char in column 1 of hbicore.syms
type = ""
"""Checks to see if all four variables have been set.
If so, symbol is considered valid
@returns: true if symbol is valid, else false
"""
def isValid(self):
valid = validation.TYPE.value|validation.ADDRESS.value|validation.LENGTH.value|validation.NAME.value
return valid == self.validationBits
"""Sets address value for symbol and updates validationBits
"""
def setAddress(self, pszAddress):
self.address = int(pszAddress, 16)
self.validationBits |= validation.ADDRESS.value
"""Sets length value for symbol and updates validationBits
"""
def setLength(self, pszLength):
self.length = int(pszLength, 16)
self.validationBits |= validation.LENGTH.value
"""Sets type value for symbol and updates validationBits
"""
def setType(self, type):
self.type = int(type, 16)
self.validationBits |= validation.TYPE.value
"""Sets name for symbol and updates validationBits
"""
def setName(self, pszName):
# remove trailing whitespace
self.name = pszName.rstrip()
if self.name != "":
self.validationBits |= validation.NAME.value
"""Container for hbSymbols with methods to initialize and access.
"""
class hbSymbolTable:
fPopulated = False
pFileName = ""
vecSymbols = []
"""Read the symbol file, return zero for success
@param filename: file to read from
@returns: an int, 0 for success
"""
def readSymbols(self, filename):
self.vecSymbols.clear()
self.fPopulated = False
self.pFileName = ""
try:
# if file is opened correctly, set file name
with open(filename) as f:
self.pFileName = filename
except IOError:
return 2
# populate symbol vector with data from file
self.METHOD_NAME()
return 0
"""Read the file to populate the symbol vector
@returns: an int, 0 for success
"""
def METHOD_NAME(self):
#return int
if self.pFileName == "":
return 2
try:
# if file is opened correctly, populate symbol table
with open(self.pFileName) as f:
# read each line of file
for x in f.readlines():
# only use function symbols
if x[0] == 'F':
pSymbol = hbSymbol()
pch = x.split(",", 4)
k = 0
for y in pch:
if k == 0:
pSymbol.setType(y)
elif k == 1:
pSymbol.setAddress(y)
elif k == 3:
pSymbol.setLength(y)
elif k == 4:
pSymbol.setName(y)
k += 1
# add symbol to vector if all variables were set
if pSymbol.isValid():
self.vecSymbols.append(pSymbol)
except IOError:
return 2
# Ensure vector is sorted
self.vecSymbols.sort(key=lambda x: x.address, reverse=False)
self.fPopulated = True
return 0
"""Given the address, find the nearest symbol name
@param address: Address to find the nearest match for
@returns: a string of the symbol name
"""
def nearestSymbol(self, address):
rc = self.locateSymbol(int(address, 16))
# match not found
if rc < 0:
return None
return self.vecSymbols[rc].name
"""Given the address, use binary search to find the index of nearest or exact match
@param addr: Address to find match for
@returns: index of nearest or exact match, -1 if nothing found
"""
def locateSymbol(self, addr):
low = 0
high = len(self.vecSymbols) - 1
mid = 0
while low <= high:
mid = (high + low) // 2
# Address is greater than mid point
if self.vecSymbols[mid].address < addr:
# check if it is nearest match
if (self.vecSymbols[mid].address + self.vecSymbols[mid].length) > addr:
return mid
# not a match, ignore right half
else:
low = mid + 1
# Address is less than mid point, ignore left half
elif self.vecSymbols[mid].address > addr:
high = mid - 1
# Found exact match
else:
return mid
# No matches were found
return -1
|
test delete customer gateway | from tests.compat import OrderedDict
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, CustomerGateway
class TestDescribeCustomerGateways(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeCustomerGatewaysResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<customerGatewaySet>
<item>
<customerGatewayId>cgw-b4dc3961</customerGatewayId>
<state>available</state>
<type>ipsec.1</type>
<ipAddress>12.1.2.3</ipAddress>
<bgpAsn>65534</bgpAsn>
<tagSet/>
</item>
</customerGatewaySet>
</DescribeCustomerGatewaysResponse>
"""
def test_get_all_customer_gateways(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_customer_gateways(
'cgw-b4dc3961',
filters=OrderedDict([('state', ['pending', 'available']),
('ip-address', '12.1.2.3')]))
self.assert_request_parameters({
'Action': 'DescribeCustomerGateways',
'CustomerGatewayId.1': 'cgw-b4dc3961',
'Filter.1.Name': 'state',
'Filter.1.Value.1': 'pending',
'Filter.1.Value.2': 'available',
'Filter.2.Name': 'ip-address',
'Filter.2.Value.1': '12.1.2.3'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 1)
self.assertIsInstance(api_response[0], CustomerGateway)
self.assertEqual(api_response[0].id, 'cgw-b4dc3961')
class TestCreateCustomerGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateCustomerGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<customerGateway>
<customerGatewayId>cgw-b4dc3961</customerGatewayId>
<state>pending</state>
<type>ipsec.1</type>
<ipAddress>12.1.2.3</ipAddress>
<bgpAsn>65534</bgpAsn>
<tagSet/>
</customerGateway>
</CreateCustomerGatewayResponse>
"""
def test_create_customer_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_customer_gateway(
'ipsec.1', '12.1.2.3', 65534)
self.assert_request_parameters({
'Action': 'CreateCustomerGateway',
'Type': 'ipsec.1',
'IpAddress': '12.1.2.3',
'BgpAsn': 65534},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, CustomerGateway)
self.assertEquals(api_response.id, 'cgw-b4dc3961')
self.assertEquals(api_response.state, 'pending')
self.assertEquals(api_response.type, 'ipsec.1')
self.assertEquals(api_response.ip_address, '12.1.2.3')
self.assertEquals(api_response.bgp_asn, 65534)
class TestDeleteCustomerGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteCustomerGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteCustomerGatewayResponse>
"""
def METHOD_NAME(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_customer_gateway('cgw-b4dc3961')
self.assert_request_parameters({
'Action': 'DeleteCustomerGateway',
'CustomerGatewayId': 'cgw-b4dc3961'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main() |
add to queue | from decouple import config
from confluent_kafka import Consumer
from datetime import datetime
import os as _os
import queue
import requests
import json
from time import time, sleep
#decryption = config('encrypted', cast=bool)
decryption = False
MessageCodec = None
max_retry=3
Fetch, FetchEvent, PageEvent, GraphQ = None, None, None, None
if decryption:
from msgcodec.msgcodec import MessageCodec
from msgcodec.messages import Fetch, FetchEvent, PageEvent, GraphQL
print("Enabled decryption mode")
def _quickwit_ingest(index, data_list, retry=0):
try:
res = requests.post(f'http://localhost:7280/api/v1/{index}/ingest', data=__jsonify_data(data_list, index))
except requests.exceptions.ConnectionError as e:
retry += 1
assert retry <= max_retry, f'[ENDPOINT CONNECTION FAIL] Failed to connect to endpoint http://localhost:7280/api/v1/{index}/ingest\n{e}\n'
sleep(5*retry)
print(f"[ENDPOINT ERROR] Failed to connect to endpoint http://localhost:7280/api/v1/{index}/ingest, retrying in {5*retry} seconds..\n")
return _quickwit_ingest(index, data_list, retry=retry)
return res
def __jsonify_data(data_list, msg_type):
res = list()
i = 0
for data in data_list:
if msg_type == 'fetchevent':
try:
_tmp = data['request']
if _tmp != '':
data['request'] = json.loads(_tmp)
else:
data['request'] = {}
_tmp = data['response']
if _tmp != '':
data['response'] = json.loads(_tmp)
if data['response']['body'][:1] == '{' or data['response']['body'][:2] == '[{':
data['response']['body'] = json.loads(data['response']['body'])
else:
data['response'] = {}
except Exception as e:
print(f'Error {e}\tWhile decoding fetchevent\nEvent: {data}\n')
elif msg_type == 'graphql':
try:
_tmp = data['variables']
if _tmp != '':
data['variables'] = json.loads(_tmp)
else:
data['variables'] = {}
_tmp = data['response']
if _tmp != '':
data['response'] = json.loads(_tmp)
else:
data['response'] = {}
except Exception as e:
print(f'Error {e}\tWhile decoding graphql\nEvent: {data}\n')
i += 1
res.append(json.dumps(data))
return '\n'.join(res)
def message_type(message):
if decryption:
if isinstance(message, FetchEvent) or isinstance(Fetch):
return 'fetchevent'
elif isinstance(message, PageEvent):
return 'pageevent'
elif isinstance(message, GraphQL):
return 'graphql'
else:
return 'default'
else:
if 'loaded' in message.keys():
return 'pageevent'
elif 'variables' in message.keys():
return 'graphql'
elif 'status' in message.keys():
return 'fetchevent'
else:
return 'default'
class KafkaFilter():
def __init__(self):
kafka_sources = config('KAFKA_SERVER')
topic = config('QUICKWIT_TOPIC')
fetchevent_maxsize = config('fetch_maxsize', default=100, cast=int)
graphql_maxsize = config('graphql_maxsize', default=100, cast=int)
pageevent_maxsize = config('pageevent_maxsize', default=100, cast=int)
if decryption:
self.codec = MessageCodec()
self.consumer = Consumer({
"security.protocol": "SSL",
"bootstrap.servers": kafka_sources,
"group.id": config("group_id"),
"auto.offset.reset": "earliest",
"enable.auto.commit":False
})
else:
self.consumer = Consumer({
"security.protocol": "SSL",
"bootstrap.servers": kafka_sources,
"group.id": config("group_id"),
"auto.offset.reset": "earliest",
#value_deserializer=lambda m: json.loads(m.decode('utf-8')),
"enable.auto.commit": False
})
self.consumer.subscribe([topic])
self.queues = {'fetchevent': queue.Queue(fetchevent_maxsize),
'graphql': queue.Queue(graphql_maxsize),
'pageevent': queue.Queue(pageevent_maxsize)
}
def METHOD_NAME(self, message):
associated_queue = message_type(message)
if associated_queue == 'default':
return
if self.queues[associated_queue].full():
self.flush_to_quickwit()
self.queues[associated_queue].put(message)
def flush_to_quickwit(self):
for queue_name, _queue in self.queues.items():
_list = list()
unix_timestamp = int(datetime.now().timestamp())
while not _queue.empty():
msg = _queue.get()
if decryption:
value = msg.__dict__
else:
value = dict(msg)
value['insertion_timestamp'] = unix_timestamp
if queue_name == 'fetchevent' and 'message_id' not in value.keys():
value['message_id'] = 0
_list.append(value)
if len(_list) > 0:
_quickwit_ingest(queue_name, _list)
self.consumer.commit()
def run(self):
_tmp_previous = None
repeated = False
while True:
msg = self.consumer.poll(1.0)
if msg is None:
continue
if msg.error():
print(f'[Consumer error] {msg.error()}')
continue
value = json.loads(msg.value().decode('utf-8'))
if decryption:
messages = self.codec.decode_detailed(value)
else:
messages = [value]
if _tmp_previous is None:
_tmp_previous = messages
if type(messages)==list:
for message in messages:
self.METHOD_NAME(message)
else:
self.METHOD_NAME(messages)
elif _tmp_previous != messages:
if type(messages)==list:
for message in messages:
self.METHOD_NAME(message)
else:
self.METHOD_NAME(messages)
_tmp_previous = messages
repeated = False
elif not repeated:
repeated = True
if __name__ == '__main__':
layer = KafkaFilter()
layer.run() |
test cityscapes stdc seg75 train dataloader | import unittest
from typing import Type
import pkg_resources
import yaml
from torch.utils.data import DataLoader, Dataset
from super_gradients.training.dataloaders.dataloaders import (
cityscapes_train,
cityscapes_val,
cityscapes_stdc_seg50_train,
cityscapes_stdc_seg50_val,
cityscapes_stdc_seg75_val,
cityscapes_ddrnet_train,
cityscapes_regseg48_val,
cityscapes_regseg48_train,
cityscapes_ddrnet_val,
cityscapes_stdc_seg75_train,
get,
)
from super_gradients.training.datasets.segmentation_datasets.cityscape_segmentation import CityscapesDataset, CityscapesConcatDataset
class CityscapesDatasetTest(unittest.TestCase):
def _cityscapes_dataset_params(self):
default_config_path = pkg_resources.resource_filename("super_gradients.recipes", "dataset_params/cityscapes_dataset_params.yaml")
with open(default_config_path, "r") as file:
dataset_params = yaml.safe_load(file)
return dataset_params
def _cityscapes_al_dataset_params(self):
default_config_path = pkg_resources.resource_filename("super_gradients.recipes", "dataset_params/cityscapes_al_dataset_params.yaml")
with open(default_config_path, "r") as file:
dataset_params = yaml.safe_load(file)
return dataset_params
def dataloader_tester(self, dl: DataLoader, dataset_cls: Type[Dataset] = CityscapesDataset):
self.assertTrue(isinstance(dl, DataLoader))
self.assertTrue(isinstance(dl.dataset, dataset_cls))
it = iter(dl)
for _ in range(10):
next(it)
def test_train_dataset_creation(self):
dataset_params = self._cityscapes_dataset_params()
train_dataset = CityscapesDataset(**dataset_params["train_dataset_params"])
for i in range(10):
image, mask = train_dataset[i]
def test_al_train_dataset_creation(self):
dataset_params = self._cityscapes_al_dataset_params()
train_dataset = CityscapesConcatDataset(**dataset_params["train_dataset_params"])
for i in range(10):
image, mask = train_dataset[i]
def test_val_dataset_creation(self):
dataset_params = self._cityscapes_dataset_params()
val_dataset = CityscapesDataset(**dataset_params["val_dataset_params"])
for i in range(10):
image, mask = val_dataset[i]
def test_cityscapes_train_dataloader(self):
dl_train = cityscapes_train()
self.dataloader_tester(dl_train)
def test_cityscapes_al_train_dataloader(self):
dataset_params = self._cityscapes_al_dataset_params()
# Same dataloader creation as in `train_from_recipe`
dl_train = get(
name=None,
dataset_params=dataset_params["train_dataset_params"],
dataloader_params=dataset_params["train_dataloader_params"],
)
self.dataloader_tester(dl_train, dataset_cls=CityscapesConcatDataset)
def test_cityscapes_val_dataloader(self):
dl_val = cityscapes_val()
self.dataloader_tester(dl_val)
def test_cityscapes_stdc_seg50_train_dataloader(self):
dl_train = cityscapes_stdc_seg50_train()
self.dataloader_tester(dl_train)
def test_cityscapes_stdc_seg50_val_dataloader(self):
dl_val = cityscapes_stdc_seg50_val()
self.dataloader_tester(dl_val)
def METHOD_NAME(self):
dl_train = cityscapes_stdc_seg75_train()
self.dataloader_tester(dl_train)
def test_cityscapes_stdc_seg75_val_dataloader(self):
dl_val = cityscapes_stdc_seg75_val()
self.dataloader_tester(dl_val)
def test_cityscapes_regseg48_train_dataloader(self):
dl_train = cityscapes_regseg48_train()
self.dataloader_tester(dl_train)
def test_cityscapes_regseg48_val_dataloader(self):
dl_val = cityscapes_regseg48_val()
self.dataloader_tester(dl_val)
def test_cityscapes_ddrnet_train_dataloader(self):
dl_train = cityscapes_ddrnet_train()
self.dataloader_tester(dl_train)
def test_cityscapes_ddrnet_val_dataloader(self):
dl_val = cityscapes_ddrnet_val()
self.dataloader_tester(dl_val)
if __name__ == "__main__":
unittest.main() |
download failed | from Components.Task import PythonTask, Task, Job, job_manager as JobManager, Condition
from Tools.Directories import fileExists
from enigma import eTimer
from os import path
from shutil import rmtree, copy2, move
class DeleteFolderTask(PythonTask):
def openFiles(self, fileList):
self.fileList = fileList
def work(self):
print("[DeleteFolderTask] files ", self.fileList)
errors = []
try:
rmtree(self.fileList)
except Exception as e:
errors.append(e)
if errors:
raise errors[0]
class CopyFileJob(Job):
def __init__(self, srcfile, destfile, name):
Job.__init__(self, _("Copying files"))
cmdline = 'cp -Rf "%s" "%s"' % (srcfile, destfile)
AddFileProcessTask(self, cmdline, srcfile, destfile, name)
class MoveFileJob(Job):
def __init__(self, srcfile, destfile, name):
Job.__init__(self, _("Moving files"))
cmdline = 'mv -f "%s" "%s"' % (srcfile, destfile)
AddFileProcessTask(self, cmdline, srcfile, destfile, name)
class AddFileProcessTask(Task):
def __init__(self, job, cmdline, srcfile, destfile, name):
Task.__init__(self, job, name)
self.setCmdline(cmdline)
self.srcfile = srcfile
self.destfile = destfile
self.ProgressTimer = eTimer()
self.ProgressTimer.callback.append(self.ProgressUpdate)
def ProgressUpdate(self):
if self.srcsize <= 0 or not fileExists(self.destfile, 'r'):
return
self.setProgress(int((path.getsize(self.destfile) / float(self.srcsize)) * 100))
self.ProgressTimer.start(5000, True)
def prepare(self):
if fileExists(self.srcfile, 'r'):
self.srcsize = path.getsize(self.srcfile)
self.ProgressTimer.start(5000, True)
def afterRun(self):
self.setProgress(100)
self.ProgressTimer.stop()
class DownloadProcessTask(Job):
def __init__(self, url, filename, file, **kwargs):
Job.__init__(self, _("%s") % file)
DownloadTask(self, url, filename, **kwargs)
class DownloaderPostcondition(Condition):
def check(self, task):
return task.returncode == 0
def getErrorMessage(self, task):
return self.error_message
class DownloadTask(Task):
def __init__(self, job, url, path, **kwargs):
self.kwargs = kwargs
Task.__init__(self, job, _("Downloading"))
self.postconditions.append(DownloaderPostcondition())
self.job = job
self.url = url.decode() if isinstance(url, bytes) else url
self.path = path
self.error_message = ""
self.last_recvbytes = 0
self.error_message = None
self.download = None
self.aborted = False
def run(self, callback):
from Tools.Downloader import DownloadWithProgress
self.callback = callback
self.download = DownloadWithProgress(self.url, self.path, **self.kwargs)
self.download.addProgress(self.download_progress)
self.download.addEnd(self.download_finished)
self.download.addError(self.METHOD_NAME)
self.download.start()
print("[DownloadTask] downloading", self.url, "to", self.path)
def abort(self):
print("[DownloadTask] aborting", self.url)
if self.download:
self.download.stop()
self.aborted = True
def download_progress(self, recvbytes, totalbytes):
if (recvbytes - self.last_recvbytes) > 100000: # anti-flicker
self.progress = int(100 * (float(recvbytes) / float(totalbytes)))
if (((float(totalbytes) / 1024) / 1024) / 1024) >= 1:
self.name = _("Downloading") + ' ' + _("%s of %s GB") % (str(round((((float(recvbytes) / 1024) / 1024) / 1024), 2)), str(round((((float(totalbytes) / 1024) / 1024) / 1024), 2)))
elif ((float(totalbytes) / 1024) / 1024) >= 1:
self.name = _("Downloading") + ' ' + _("%s of %s MB") % (str(round(((float(recvbytes) / 1024) / 1024), 2)), str(round(((float(totalbytes) / 1024) / 1024), 2)))
elif (totalbytes / 1024) >= 1:
self.name = _("Downloading") + ' ' + _("%d of %d KB") % (recvbytes / 1024, totalbytes / 1024)
else:
self.name = _("Downloading") + ' ' + _("%d of %d Bytes") % (recvbytes, totalbytes)
self.last_recvbytes = recvbytes
def METHOD_NAME(self, failure_instance=None, error_message=""):
self.error_message = error_message
if error_message == "" and failure_instance is not None:
self.error_message = failure_instance.getErrorMessage()
Task.processFinished(self, 1)
def download_finished(self, string=""):
if self.aborted:
self.finish(aborted=True)
else:
Task.processFinished(self, 0)
def copyFiles(fileList, name):
for src, dst in fileList:
if path.isdir(src) or int(path.getsize(src)) / 1000 / 1000 > 100:
JobManager.AddJob(CopyFileJob(src, dst, name))
else:
copy2(src, dst)
def moveFiles(fileList, name):
for src, dst in fileList:
if path.isdir(src) or int(path.getsize(src)) / 1000 / 1000 > 100:
JobManager.AddJob(MoveFileJob(src, dst, name))
else:
move(src, dst)
def deleteFiles(fileList, name):
job = Job(_("Deleting files"))
task = DeleteFolderTask(job, name)
task.openFiles(fileList)
JobManager.AddJob(job)
def downloadFile(url, file_name, sel, **kwargs):
JobManager.AddJob(DownloadProcessTask(url, file_name, sel, **kwargs)) |
method | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vpn-connection shared-key show",
)
class Show(AAZCommand):
"""Retrieve a VPN connection shared key.
:example: View the shared key of a connection.
az network vpn-connection shared-key show -g MyResourceGroup --connection-name MyConnection
:example: Retrieve a VPN connection shared key.
az network vpn-connection shared-key show --connection-name MyConnection --resource-group MyResourceGroup --subscription MySubscription
"""
_aaz_info = {
"version": "2018-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/connections/{}/sharedkey", "2018-11-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.connection_name = AAZStrArg(
options=["--connection-name"],
help="Connection name.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.VirtualNetworkGatewayConnectionsGetSharedKey(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class VirtualNetworkGatewayConnectionsGetSharedKey(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/sharedkey",
**self.url_parameters
)
@property
def METHOD_NAME(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualNetworkGatewayConnectionName", self.ctx.args.connection_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2018-11-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType()
_schema_on_200.value = AAZStrType(
flags={"required": True},
)
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
step delete | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# --------------------------------------------------------------------------------------------
# pylint: disable=too-few-public-methods,unnecessary-pass,unused-argument
"""
TrunkedNetwork tests scenarios
"""
from azure.cli.testsdk import ResourceGroupPreparer, ScenarioTest
from .config import CONFIG
def setup_scenario1(test):
"""Env setup_scenario1"""
pass
def cleanup_scenario1(test):
"""Env cleanup_scenario1"""
pass
def call_scenario1(test):
"""# Testcase: scenario1"""
setup_scenario1(test)
step_create(
test,
checks=[
test.check("name", "{name}"),
test.check("provisioningState", "Succeeded"),
],
)
step_update(
test,
checks=[
test.check("tags", "{tagsUpdate}"),
test.check("provisioningState", "Succeeded"),
],
)
step_show(test, checks=[])
step_list_subscription(test, checks=[])
step_list_resource_group(test, checks=[])
METHOD_NAME(test, checks=[])
cleanup_scenario1(test)
def step_create(test, checks=None):
"""TrunkedNetwork create operation"""
if checks is None:
checks = []
test.cmd(
"az networkcloud trunkednetwork create --name {name} --extended-location "
' name={extendedLocation} type="CustomLocation" --location {location} '
'--interface-name "{interfaceName}" '
"--isolation-domain-ids {isolationDomainIds} --vlans {vlans} "
"--tags {tags} --resource-group {rg} --debug",
checks=checks,
)
def step_show(test, checks=None):
"""TrunkedNetwork show operation"""
if checks is None:
checks = []
test.cmd("az networkcloud trunkednetwork show --name {name} --resource-group {rg}")
def METHOD_NAME(test, checks=None):
"""TrunkedNetwork delete operation"""
if checks is None:
checks = []
test.cmd(
"az networkcloud trunkednetwork delete --name {name} --resource-group {rg} -y"
)
def step_list_resource_group(test, checks=None):
"""TrunkedNetwork list by resource group operation"""
if checks is None:
checks = []
test.cmd("az networkcloud trunkednetwork list --resource-group {rg}")
def step_list_subscription(test, checks=None):
"""TrunkedNetwork list by subscription operation"""
if checks is None:
checks = []
test.cmd("az networkcloud trunkednetwork list")
def step_update(test, checks=None):
"""TrunkedNetwork update operation"""
if checks is None:
checks = []
test.cmd(
"az networkcloud trunkednetwork update --name {name} --tags {tagsUpdate} --resource-group {rg}"
)
class TrunkedNetworkScenarioTest(ScenarioTest):
"""TrunkedNetwork scenario test"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.kwargs.update(
{
"name": self.create_random_name(
prefix="cli-test-trunkednw-", length=24
),
"location": CONFIG.get("TRUNKED_NETWORK", "location"),
"extendedLocation": CONFIG.get("TRUNKED_NETWORK", "extended_location"),
"tags": CONFIG.get("TRUNKED_NETWORK", "tags"),
"tagsUpdate": CONFIG.get("TRUNKED_NETWORK", "tags_update"),
"type": CONFIG.get("TRUNKED_NETWORK", "type"),
"vlans": CONFIG.get("TRUNKED_NETWORK", "vlans"),
"interfaceName": CONFIG.get("TRUNKED_NETWORK", "interface_name"),
"isolationDomainIds": CONFIG.get(
"TRUNKED_NETWORK", "isolation_domain_ids"
),
}
)
@ResourceGroupPreparer(name_prefix="clitest_rg"[:7], key="rg", parameter_name="rg")
def test_trunkednetwork_scenario1(self):
"""test scenario for TrunkedNetwork CRUD operations"""
call_scenario1(self) |
unix getpass | """Utilities to get a password and/or the current user name.
getpass(prompt[, stream]) - Prompt for a password, with echo turned off.
getuser() - Get the user name from the environment or password database.
GetPassWarning - This UserWarning is issued when getpass() cannot prevent
echoing of the password contents while reading.
On Windows, the msvcrt module will be used.
On the Mac EasyDialogs.AskPassword is used, if available.
"""
# Authors: Piers Lauder (original)
# Guido van Rossum (Windows support and cleanup)
# Gregory P. Smith (tty support & GetPassWarning)
import contextlib
import io
import os
import sys
import warnings
__all__ = ["getpass","getuser","GetPassWarning"]
class GetPassWarning(UserWarning): pass
def METHOD_NAME(prompt='Password: ', stream=None):
"""Prompt for a password, with echo turned off.
Args:
prompt: Written on stream to ask for the input. Default: 'Password: '
stream: A writable file object to display the prompt. Defaults to
the tty. If no tty is available defaults to sys.stderr.
Returns:
The seKr3t input.
Raises:
EOFError: If our input tty or stdin was closed.
GetPassWarning: When we were unable to turn echo off on the input.
Always restores terminal settings before returning.
"""
passwd = None
with contextlib.ExitStack() as stack:
try:
# Always try reading and writing directly on the tty first.
fd = os.open('/dev/tty', os.O_RDWR|os.O_NOCTTY)
tty = io.FileIO(fd, 'w+')
stack.enter_context(tty)
input = io.TextIOWrapper(tty)
stack.enter_context(input)
if not stream:
stream = input
except OSError as e:
# If that fails, see if stdin can be controlled.
stack.close()
try:
fd = sys.stdin.fileno()
except (AttributeError, ValueError):
fd = None
passwd = fallback_getpass(prompt, stream)
input = sys.stdin
if not stream:
stream = sys.stderr
if fd is not None:
try:
old = termios.tcgetattr(fd) # a copy to save
new = old[:]
new[3] &= ~termios.ECHO # 3 == 'lflags'
tcsetattr_flags = termios.TCSAFLUSH
if hasattr(termios, 'TCSASOFT'):
tcsetattr_flags |= termios.TCSASOFT
try:
termios.tcsetattr(fd, tcsetattr_flags, new)
passwd = _raw_input(prompt, stream, input=input)
finally:
termios.tcsetattr(fd, tcsetattr_flags, old)
stream.flush() # issue7208
except termios.error:
if passwd is not None:
# _raw_input succeeded. The final tcsetattr failed. Reraise
# instead of leaving the terminal in an unknown state.
raise
# We can't control the tty or stdin. Give up and use normal IO.
# fallback_getpass() raises an appropriate warning.
if stream is not input:
# clean up unused file objects before blocking
stack.close()
passwd = fallback_getpass(prompt, stream)
stream.write('\n')
return passwd
def win_getpass(prompt='Password: ', stream=None):
"""Prompt for password with echo off, using Windows getch()."""
if sys.stdin is not sys.__stdin__:
return fallback_getpass(prompt, stream)
import msvcrt
for c in prompt:
msvcrt.putwch(c)
pw = ""
while 1:
c = msvcrt.getwch()
if c == '\r' or c == '\n':
break
if c == '\003':
raise KeyboardInterrupt
if c == '\b':
pw = pw[:-1]
else:
pw = pw + c
msvcrt.putwch('\r')
msvcrt.putwch('\n')
return pw
def fallback_getpass(prompt='Password: ', stream=None):
warnings.warn("Can not control echo on the terminal.", GetPassWarning,
stacklevel=2)
if not stream:
stream = sys.stderr
print("Warning: Password input may be echoed.", file=stream)
return _raw_input(prompt, stream)
def _raw_input(prompt="", stream=None, input=None):
# This doesn't save the string in the GNU readline history.
if not stream:
stream = sys.stderr
if not input:
input = sys.stdin
prompt = str(prompt)
if prompt:
try:
stream.write(prompt)
except UnicodeEncodeError:
# Use replace error handler to get as much as possible printed.
prompt = prompt.encode(stream.encoding, 'replace')
prompt = prompt.decode(stream.encoding)
stream.write(prompt)
stream.flush()
# NOTE: The Python C API calls flockfile() (and unlock) during readline.
line = input.readline()
if not line:
raise EOFError
if line[-1] == '\n':
line = line[:-1]
return line
def getuser():
"""Get the username from the environment or password database.
First try various environment variables, then the password
database. This works on Windows as long as USERNAME is set.
"""
for name in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.environ.get(name)
if user:
return user
# If this fails, the exception will "explain" why
import pwd
return pwd.getpwuid(os.getuid())[0]
# Bind the name getpass to the appropriate function
try:
import termios
# it's possible there is an incompatible termios from the
# McMillan Installer, make sure we have a UNIX-compatible termios
termios.tcgetattr, termios.tcsetattr
except (ImportError, AttributeError):
try:
import msvcrt
except ImportError:
getpass = fallback_getpass
else:
getpass = win_getpass
else:
getpass = METHOD_NAME |
generate unsub topic action | import logging
import grpc
from couchers import errors, urls
from couchers.constants import DATETIME_INFINITY
from couchers.crypto import UNSUBSCRIBE_KEY_NAME, b64encode, generate_hash_signature, get_secret, verify_hash_signature
from couchers.db import session_scope
from couchers.models import GroupChatSubscription, NotificationDeliveryType, User
from couchers.notifications import settings
from couchers.notifications.utils import enum_from_topic_action
from couchers.sql import couchers_select as select
from proto.internal import unsubscribe_pb2
logger = logging.getLogger(__name__)
def _generate_unsubscribe_link(payload):
msg = payload.SerializeToString()
sig = generate_hash_signature(message=msg, key=get_secret(UNSUBSCRIBE_KEY_NAME))
return urls.unsubscribe_link(payload=b64encode(msg), sig=b64encode(sig))
def generate_mute_all(user_id):
return _generate_unsubscribe_link(
unsubscribe_pb2.UnsubscribePayload(
user_id=user_id,
all=unsubscribe_pb2.MuteAll(),
)
)
def generate_unsub_topic_key(notification):
return _generate_unsubscribe_link(
unsubscribe_pb2.UnsubscribePayload(
user_id=notification.user_id,
topic_key=unsubscribe_pb2.UnsubscribeTopicKey(
topic=notification.topic,
key=notification.key,
),
)
)
def METHOD_NAME(notification):
return _generate_unsubscribe_link(
unsubscribe_pb2.UnsubscribePayload(
user_id=notification.user_id,
topic_action=unsubscribe_pb2.UnsubscribeTopicAction(
topic=notification.topic,
action=notification.action,
),
)
)
def unsubscribe(request, context):
"""
Returns a response string or uses context.abort upon error
"""
if not verify_hash_signature(message=request.payload, key=get_secret(UNSUBSCRIBE_KEY_NAME), sig=request.sig):
context.abort(grpc.StatusCode.PERMISSION_DENIED, errors.WRONG_SIGNATURE)
payload = unsubscribe_pb2.UnsubscribePayload.FromString(request.payload)
with session_scope() as session:
user = session.execute(select(User).where(User.id == payload.user_id)).scalar_one()
if payload.HasField("all"):
logger.info(f"User {user.name} unsubscribing from all")
# todo: some other system when out of preview
user.new_notifications_enabled = False
return "You've been unsubscribed from all non-security notifications"
if payload.HasField("topic_action"):
logger.info(f"User {user.name} unsubscribing from topic_action")
topic = payload.topic_action.topic
action = payload.topic_action.action
topic_action = enum_from_topic_action[topic, action]
# disable emails for this type
settings.set_preference(session, user.id, topic_action, NotificationDeliveryType.email, False)
return "You've been unsubscribed from all email notifications of that type"
if payload.HasField("topic_key"):
logger.info(f"User {user.name} unsubscribing from topic_key")
topic = payload.topic_key.topic
key = payload.topic_key.key
# a bunch of manual stuff
if topic == "chat":
group_chat_id = int(key)
subscription = session.execute(
select(GroupChatSubscription)
.where(GroupChatSubscription.group_chat_id == group_chat_id)
.where(GroupChatSubscription.user_id == user.id)
.where(GroupChatSubscription.left == None)
).scalar_one_or_none()
if not subscription:
context.abort(grpc.StatusCode.NOT_FOUND, errors.CHAT_NOT_FOUND)
subscription.muted_until = DATETIME_INFINITY
return "That group chat has been muted."
else:
context.abort(grpc.StatusCode.UNIMPLEMENTED, errors.CANT_UNSUB_TOPIC) |
old skyline | # -*- coding: utf-8 -*-
"""Tests for dataset analysis utilities."""
import itertools
import unittest
from typing import Iterable, Mapping
import numpy as np
import pandas
from pykeen.datasets import Dataset, Nations
from pykeen.datasets import analysis as dataset_analysis
from pykeen.triples import analysis as triple_analysis
from pykeen.typing import LABEL_HEAD, LABEL_TAIL
def METHOD_NAME(xs):
# naive implementation, O(n2)
return {(s, c) for s, c in xs if not any(s2 >= s and c2 >= c for s2, c2 in xs if (s, c) != (s2, c2))}
class TestUtils(unittest.TestCase):
"""Test skyline."""
def test_skyline(self):
"""Test the skyline function."""
n = 500
pairs = list(
zip(
np.random.randint(low=0, high=200, size=n, dtype=int),
np.random.uniform(0, 6, size=n),
)
)
self.assertEqual(set(METHOD_NAME(pairs)), set(triple_analysis._get_skyline(pairs)))
def _test_count_dataframe(
dataset: Dataset,
df: pandas.DataFrame,
labels: bool = True,
merge_subsets: bool = True,
merge_sides: bool = True,
):
"""Check the general structure of a count dataframe."""
# check correct output type
assert isinstance(df, pandas.DataFrame)
expected_columns = {triple_analysis.COUNT_COLUMN_NAME}
expected_columns.update(
_check_labels(
df=df,
labels=labels,
id_column_name=triple_analysis.ENTITY_ID_COLUMN_NAME,
label_column_name=triple_analysis.ENTITY_LABEL_COLUMN_NAME,
label_to_id=dataset.entity_to_id,
)
)
expected_columns.update(
_check_labels(
df=df,
labels=labels,
id_column_name=triple_analysis.RELATION_ID_COLUMN_NAME,
label_column_name=triple_analysis.RELATION_LABEL_COLUMN_NAME,
label_to_id=dataset.relation_to_id,
)
)
if not merge_subsets:
expected_columns.add(dataset_analysis.SUBSET_COLUMN_NAME)
# check value range subset
assert df[dataset_analysis.SUBSET_COLUMN_NAME].isin(dataset.factory_dict.keys()).all()
if not merge_sides:
expected_columns.add(triple_analysis.ENTITY_POSITION_COLUMN_NAME)
# check value range side
assert (
df[triple_analysis.ENTITY_POSITION_COLUMN_NAME]
.isin(
{
LABEL_HEAD,
LABEL_TAIL,
}
)
.all()
)
# check columns
assert expected_columns == set(df.columns)
# check value range and type
assert (df[triple_analysis.COUNT_COLUMN_NAME] >= 0).all()
assert df[triple_analysis.COUNT_COLUMN_NAME].dtype == np.int64
def _check_labels(
df: pandas.DataFrame,
labels: bool,
id_column_name: str,
label_column_name: str,
label_to_id: Mapping[str, int],
) -> Iterable[str]:
if id_column_name in df.columns:
yield id_column_name
# check value range entity IDs
assert df[id_column_name].isin(label_to_id.values()).all()
if labels:
yield label_column_name
# check value range entity labels
assert df[label_column_name].isin(label_to_id.keys()).all()
class DatasetAnalysisTests(unittest.TestCase):
"""Tests for dataset analysis utilities."""
def setUp(self) -> None:
"""Initialize the unittest."""
self.dataset = Nations()
def test_relation_count_dataframe(self):
"""Test relation count dataframe."""
for labels, merge_subsets in itertools.product((False, True), repeat=2):
_test_count_dataframe(
dataset=self.dataset,
df=dataset_analysis.get_relation_count_df(
dataset=self.dataset,
add_labels=labels,
merge_subsets=merge_subsets,
),
labels=labels,
merge_subsets=merge_subsets,
)
def test_entity_count_dataframe(self):
"""Test entity count dataframe."""
for labels, merge_subsets, merge_sides in itertools.product((False, True), repeat=3):
_test_count_dataframe(
dataset=self.dataset,
df=dataset_analysis.get_entity_count_df(
dataset=self.dataset,
add_labels=labels,
merge_subsets=merge_subsets,
merge_sides=merge_sides,
),
labels=labels,
merge_subsets=merge_subsets,
merge_sides=merge_sides,
)
def test_entity_relation_co_occurrence_dataframe(self):
"""Test entity-relation co-occurrence dataframe."""
for labels, merge_sides, merge_subsets in itertools.product((False, True), repeat=3):
_test_count_dataframe(
dataset=self.dataset,
df=dataset_analysis.get_entity_relation_co_occurrence_df(
dataset=self.dataset,
merge_sides=merge_sides,
merge_subsets=merge_subsets,
add_labels=labels,
),
labels=labels,
merge_subsets=merge_subsets,
merge_sides=merge_sides,
)
def test_relation_pattern_types(self):
"""Helper method for relation pattern classification."""
df = dataset_analysis.get_relation_pattern_types_df(
dataset=self.dataset,
drop_confidence=False,
)
# check correct type
assert isinstance(df, pandas.DataFrame)
# check relation_id value range
assert df[triple_analysis.RELATION_ID_COLUMN_NAME].isin(self.dataset.relation_to_id.values()).all()
# check pattern value range
assert df[triple_analysis.PATTERN_TYPE_COLUMN_NAME].isin(triple_analysis.RELATION_PATTERN_TYPES).all()
# check confidence value range
x = df[triple_analysis.CONFIDENCE_COLUMN_NAME].values
assert (0 <= x).all()
assert (x <= 1).all()
# check support value range
x = df[triple_analysis.SUPPORT_COLUMN_NAME].values
assert (1 <= x).all()
def test_relation_cardinality_types(self):
"""Tests for relation cardinality type classification."""
df = dataset_analysis.get_relation_cardinality_types_df(
dataset=self.dataset,
)
# check correct type
assert isinstance(df, pandas.DataFrame)
# check relation_id value range
assert df[triple_analysis.RELATION_ID_COLUMN_NAME].isin(self.dataset.relation_to_id.values()).all()
# check pattern value range
assert df[triple_analysis.CARDINALITY_TYPE_COLUMN_NAME].isin(triple_analysis.RELATION_CARDINALITY_TYPES).all()
def test_calculate_relation_functionality(self):
"""Tests calculate_relation_functionality."""
df = dataset_analysis.get_relation_functionality_df(
dataset=self.dataset,
)
# check correct type
assert isinstance(df, pandas.DataFrame)
assert {
triple_analysis.RELATION_ID_COLUMN_NAME,
triple_analysis.FUNCTIONALITY_COLUMN_NAME,
triple_analysis.INVERSE_FUNCTIONALITY_COLUMN_NAME,
}.issubset(df.columns)
# check relation_id value range
assert df[triple_analysis.RELATION_ID_COLUMN_NAME].isin(self.dataset.relation_to_id.values()).all() |
last modified by type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'SystemDataResponse',
]
@pulumi.output_type
class SystemDataResponse(dict):
"""
Metadata pertaining to creation and last modification of the resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
last_modified_at: Optional[str] = None,
last_modified_by: Optional[str] = None,
METHOD_NAME: Optional[str] = None):
"""
Metadata pertaining to creation and last modification of the resource.
:param str created_at: The timestamp of resource creation (UTC).
:param str created_by: The identity that created the resource.
:param str created_by_type: The type of identity that created the resource.
:param str last_modified_at: The timestamp of resource last modification (UTC)
:param str last_modified_by: The identity that last modified the resource.
:param str last_modified_by_type: The type of identity that last modified the resource.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if METHOD_NAME is not None:
pulumi.set(__self__, "last_modified_by_type", METHOD_NAME)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def METHOD_NAME(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
|
test finance denied | # Copyright (c) 2015 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import pytest
from jinja2.runtime import Context
from sqlalchemy.orm import Session
from pycroft.model.config import Config
from pycroft.model.user import User
from tests.factories.property import AdminPropertyGroupFactory
from tests.factories.user import UserFactory
from web import PycroftFlask
from web.template_filters import require
from .assertions import TestClient
from .fixture_helpers import login_context, BlueprintUrls
@pytest.fixture(scope="module")
def admin_group(module_session: Session):
return AdminPropertyGroupFactory.create()
@pytest.fixture(scope="module")
def client(module_test_client: TestClient) -> TestClient:
return module_test_client
class TestAnonymous:
"""First test as anonymous user.
Anonymous users should be able to access the login page and the /static/
content, nothing else.
"""
@pytest.fixture(scope='class')
def jinja_context(self, app: PycroftFlask) -> Context:
return Context(app.jinja_env, parent=None, name="pseudo", blocks={})
def test_login_page_visible(self, client: TestClient):
client.assert_ok("login.login")
def test_static_content_can_be_fetched(
self, client: TestClient, jinja_context: Context
):
client.assert_url_ok(require(jinja_context, "main.css"))
def METHOD_NAME(self, client: TestClient):
client.assert_response_code("finance.bank_accounts_list", 302)
def test_infrastructure_denied(self, client: TestClient):
client.assert_response_code("infrastructure.switches", 302)
def test_user_denied(self, client: TestClient):
client.assert_response_code("user.overview", 302)
class TestPermissionsAdmin:
"""Test permissions for admin usergroup.
"""
@pytest.fixture(scope="class", autouse=True)
def admin_logged_in(self, admin: User, client: TestClient):
with login_context(client, admin.login, "password"):
yield
def test_access_buildings(self, client: TestClient):
client.assert_ok("facilities.overview")
def test_access_finance(self, client: TestClient):
client.assert_forbidden("finance.bank_accounts_list")
class TestPermissionsFinance:
"""Test permissions for finance usergroup (advanced).
"""
@pytest.fixture(scope="class", autouse=True)
def treasurer_logged_in(
self,
treasurer: User,
client: TestClient,
) -> None:
with login_context(client, treasurer.login, "password"):
yield
def test_access_buildings(self, client: TestClient):
client.assert_ok("facilities.overview")
def test_access_finance(self, client: TestClient):
client.assert_ok("finance.bank_accounts_list")
class TestPermissionsUser:
"""Test permissions as a user without any membership
"""
@pytest.fixture(scope="class", autouse=True)
def member_logged_in(
self, class_session: Session, config: Config, client: TestClient
):
UserFactory.create(
login="member",
with_membership=True,
membership__group=config.member_group,
membership__includes_today=True,
)
class_session.flush()
with login_context(client, "member", "password"):
yield
def test_access_user(self, client: TestClient, blueprint_urls: BlueprintUrls):
for url in blueprint_urls("user"):
client.assert_url_forbidden(url)
def test_access_finance(self, client: TestClient, blueprint_urls: BlueprintUrls):
for url in blueprint_urls("finance"):
client.assert_url_forbidden(url)
def test_access_buildings(self, client: TestClient, blueprint_urls: BlueprintUrls):
for url in blueprint_urls("facilities"):
client.assert_url_forbidden(url)
def test_access_infrastructure(
self, client: TestClient, blueprint_urls: BlueprintUrls
):
for url in blueprint_urls("infrastructure"):
client.assert_url_forbidden(url)
def test_access_properties(self, client: TestClient, blueprint_urls: BlueprintUrls):
for url in blueprint_urls("properties"):
client.assert_url_forbidden(url)
def test_access_login(self, client: TestClient, blueprint_urls: BlueprintUrls):
# Login see Test_010_Anonymous
#TODO assert client response by text or better, not code
client.assert_response_code("login.logout", 302) |
partial constant body request | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""Customize generated code here.
Follow our quickstart for examples: https://aka.ms/azsdk/python/dpcodegen/python/customize
"""
from typing import Any, Dict, List, cast
from azure.core.exceptions import (
ClientAuthenticationError,
ResourceExistsError,
ResourceNotFoundError,
map_error,
HttpResponseError,
)
from azure.core.rest import HttpRequest
from azure.core.utils import case_insensitive_dict
from azure.core.tracing.decorator import distributed_trace
from azure.core.pipeline import PipelineResponse
from ._operations import FormdataurlencodedOperations as _FormdataurlencodedOperations
class Helpers:
@staticmethod
def _update_pet_with_form_request(pet_id: int, *, data: Dict[str, Any], **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/x-www-form-urlencoded"))
# Construct URL
_url = "/formsdataurlencoded/pet/add/{petId}"
path_format_arguments = {
"petId": pet_id,
}
_url = _url.format(**path_format_arguments)
if content_type is not None:
_headers["Content-Type"] = content_type
return HttpRequest(
method="POST", url=cast(str, _url), headers=_headers, data=data, params=kwargs.pop("params", {})
)
@staticmethod
def _update_pet_with_form_deserialize( # pylint: disable=inconsistent-return-statements
pipeline_response: PipelineResponse, **kwargs: Any
) -> None:
cls = kwargs.pop("cls", None)
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
response = pipeline_response.http_response
if response.status_code not in [200, 405]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
@staticmethod
def METHOD_NAME(*, data: Dict[str, Any], **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = kwargs.pop("params", {}) or {}
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", "application/x-www-form-urlencoded"))
# Construct URL
_url = "/formsdataurlencoded/partialConstantBody"
# Construct headers
if content_type is not None:
_headers["Content-Type"] = content_type
return HttpRequest(method="POST", url=_url, headers=_headers, data=data, params=_params)
@staticmethod
def _partial_constant_body_deserialize( # pylint: disable=inconsistent-return-statements
pipeline_response: PipelineResponse, **kwargs: Any
) -> None:
error_map = {401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop("error_map", {}) or {})
cls = kwargs.pop("cls", None)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response)
if cls:
return cls(pipeline_response, None, {})
class FormdataurlencodedOperations(_FormdataurlencodedOperations, Helpers):
def _send_request(self, request: HttpRequest, *, stream: bool = False, **kwargs: Any) -> PipelineResponse:
return self._client._pipeline.run(request, stream=stream, **kwargs) # pylint: disable=protected-access
@distributed_trace
def update_pet_with_form(
self, pet_id: int, data: Dict[str, Any], **kwargs: Any
) -> None: # pylint: disable=inconsistent-return-statements
"""Updates a pet in the store with form data.
Updates a pet in the store with form data.
:param pet_id: ID of pet that needs to be updated.
:type pet_id: int
:param data: Form-encoded input for data. See the template in our example to find the input
shape.
:type data: dict[str, any]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# form-encoded input template you can fill out and use as your `data` input.
data = {
"name": "str", # Optional. Updated name of the pet. Default value is None.
"pet_age": 0, # How many years is it old?.
"pet_food": "str", # Can take a value of meat, or fish, or plant. Known
values are: "meat", "fish", and "plant".
"pet_type": "str", # Can take a value of dog, or cat, or fish. Known values
are: "dog", "cat", and "fish".
"status": "str" # Optional. Updated status of the pet. Default value is
None.
}
"""
request = self._update_pet_with_form_request(pet_id=pet_id, data=data, **kwargs)
request.url = self._client.format_url(request.url)
return self._update_pet_with_form_deserialize(self._send_request(request, **kwargs))
@distributed_trace
def partial_constant_body(
self, data: Dict[str, Any], **kwargs: Any
) -> None: # pylint: disable=inconsistent-return-statements
"""Test a partially constant formdata body. Pass in { grant_type: 'access_token', access_token:
'foo', service: 'bar' } to pass the test.
:param data: Form-encoded input for data. See the template in our example to find the input
shape.
:type data: dict[str, any]
:return: None
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
Example:
.. code-block:: python
# form-encoded input template you can fill out and use as your `data` input.
data = {
"access_token": "str", # AAD access token, mandatory when grant_type is
access_token_refresh_token or access_token.
"grant_type": "access_token", # Default value is "access_token". Constant
part of a formdata body. Default value is "access_token". Note that overriding
this default value may result in unsupported behavior.
"service": "str" # Indicates the name of your Azure container registry.
}
"""
request = self.METHOD_NAME(data=data, **kwargs)
request.url = self._client.format_url(request.url)
return self._partial_constant_body_deserialize(self._send_request(request, **kwargs))
__all__: List[str] = [
"FormdataurlencodedOperations"
] # Add all objects you want publicly available to users at this package level
def patch_sdk():
"""Do not remove from this file.
`patch_sdk` is a last resort escape hatch that allows you to do customizations
you can't accomplish using the techniques described in
https://aka.ms/azsdk/python/dpcodegen/python/customize
""" |
pillar cache dir | """
unit tests for the pillar runner
"""
import logging
import os
import shutil
import pytest
import salt.runners.pillar as pillar_runner
import salt.utils.files
import salt.utils.gitfs
import salt.utils.msgpack
from tests.support.mock import MagicMock, mock_open, patch
log = logging.getLogger(__name__)
@pytest.fixture
def configure_loader_modules():
return {
pillar_runner: {
"__opts__": {
"pillar_cache": True,
"pillar_cache_backend": "disk",
"pillar_cache_ttl": 30,
}
}
}
@pytest.fixture(scope="module")
def cachedir_tree(tmp_path_factory):
_cachedir_tree = tmp_path_factory.mktemp("cachedir")
try:
yield _cachedir_tree
finally:
shutil.rmtree(str(_cachedir_tree), ignore_errors=True)
@pytest.fixture(scope="module")
def METHOD_NAME(cachedir_tree):
METHOD_NAME = cachedir_tree / "pillar_cache"
METHOD_NAME.mkdir()
return METHOD_NAME
@pytest.fixture(scope="function")
def pillar_cache_files(METHOD_NAME):
MINION_ID = "test-host"
cache = {
"CacheDisk_data": {
MINION_ID: {
None: {
"this": "one",
"that": "two",
"those": ["three", "four", "five"],
}
}
},
"CacheDisk_cachetime": {MINION_ID: 1612302460.146923},
}
packer = salt.utils.msgpack.Packer()
cache_contents = packer.pack(cache)
with salt.utils.files.fopen(
os.path.join(str(METHOD_NAME), MINION_ID), "wb+"
) as fp:
fp.write(cache_contents)
MINION_ID = "another-host"
cache = {
"CacheDisk_data": {
MINION_ID: {
None: {
"this": "six",
"that": "seven",
"those": ["eight", "nine", "ten"],
}
}
},
"CacheDisk_cachetime": {MINION_ID: 1612302460.146923},
}
packer = salt.utils.msgpack.Packer()
cache_contents = packer.pack(cache)
with salt.utils.files.fopen(
os.path.join(str(METHOD_NAME), MINION_ID), "wb+"
) as fp:
fp.write(cache_contents)
def test_clear_pillar_cache(cachedir_tree, METHOD_NAME, pillar_cache_files):
"""
test pillar.clear_pillar_cache
"""
MINION_IDS = [
["test-host", "another-host"],
["test-host"],
["test-host", "another-host"],
["test-host", "another-host"],
]
_CHECK_MINIONS_RETURN = []
for entry in MINION_IDS:
_CHECK_MINIONS_RETURN.append({"minions": entry, "missing": []})
with patch.dict(pillar_runner.__opts__, {"cachedir": str(cachedir_tree)}):
with patch(
"salt.utils.minions.CkMinions.check_minions",
MagicMock(side_effect=_CHECK_MINIONS_RETURN),
):
expected = {
"test-host": {
"those": ["three", "four", "five"],
"that": "two",
"this": "one",
},
"another-host": {
"those": ["eight", "nine", "ten"],
"that": "seven",
"this": "six",
},
}
ret = pillar_runner.show_pillar_cache()
assert ret == expected
ret = pillar_runner.clear_pillar_cache("test-host")
assert ret == {}
expected = {
"another-host": {
"those": ["eight", "nine", "ten"],
"that": "seven",
"this": "six",
}
}
ret = pillar_runner.show_pillar_cache()
assert ret == expected
ret = pillar_runner.clear_pillar_cache()
assert ret == {}
def test_show_pillar_cache(cachedir_tree, METHOD_NAME, pillar_cache_files):
"""
test pillar.clear_pillar_cache
"""
MINION_IDS = [["test-host", "another-host"], ["test-host"]]
_CHECK_MINIONS_RETURN = []
for entry in MINION_IDS:
_CHECK_MINIONS_RETURN.append({"minions": entry, "missing": []})
with patch.dict(pillar_runner.__opts__, {"cachedir": str(cachedir_tree)}):
with patch(
"salt.utils.minions.CkMinions.check_minions",
MagicMock(side_effect=_CHECK_MINIONS_RETURN),
):
expected = {
"test-host": {
"those": ["three", "four", "five"],
"that": "two",
"this": "one",
},
"another-host": {
"those": ["eight", "nine", "ten"],
"that": "seven",
"this": "six",
},
}
ret = pillar_runner.show_pillar_cache()
assert ret == expected
expected = {
"test-host": {
"this": "one",
"that": "two",
"those": ["three", "four", "five"],
}
}
ret = pillar_runner.show_pillar_cache("test-host")
assert ret == expected
_EMPTY_CHECK_MINIONS_RETURN = {"minions": [], "missing": []}
with patch(
"salt.utils.minions.CkMinions.check_minions",
MagicMock(return_value=_EMPTY_CHECK_MINIONS_RETURN),
), patch("salt.utils.atomicfile.atomic_open", mock_open()) as atomic_open_mock:
ret = pillar_runner.show_pillar_cache("fake-host")
assert ret == {} |
test isnt match | # -*- coding: utf-8 -*-
import os
from subprocess import PIPE, STDOUT
from mock import Mock
import pytest
from tests.utils import CorrectedCommand, Rule
from thefuck import const
from thefuck.exceptions import EmptyCommand
from thefuck.system import Path
from thefuck.types import Command
class TestCorrectedCommand(object):
def test_equality(self):
assert (CorrectedCommand('ls', None, 100) ==
CorrectedCommand('ls', None, 200))
assert (CorrectedCommand('ls', None, 100) !=
CorrectedCommand('ls', lambda *_: _, 100))
def test_hashable(self):
assert {CorrectedCommand('ls', None, 100),
CorrectedCommand('ls', None, 200)} == {CorrectedCommand('ls')}
def test_representable(self):
assert '{}'.format(CorrectedCommand('ls', None, 100)) == \
'CorrectedCommand(script=ls, side_effect=None, priority=100)'
assert u'{}'.format(CorrectedCommand(u'echo café', None, 100)) == \
u'CorrectedCommand(script=echo café, side_effect=None, priority=100)'
@pytest.mark.parametrize('script, printed, override_settings', [
('git branch', 'git branch', {'repeat': False, 'debug': False}),
('git brunch',
"git brunch || fuck --repeat --force-command 'git brunch'",
{'repeat': True, 'debug': False}),
('git brunch',
"git brunch || fuck --repeat --debug --force-command 'git brunch'",
{'repeat': True, 'debug': True})])
def test_run(self, capsys, settings, script, printed, override_settings):
settings.update(override_settings)
CorrectedCommand(script, None, 1000).run(Command(script, ''))
out, _ = capsys.readouterr()
assert out == printed
class TestRule(object):
def test_from_path_rule_exception(self, mocker):
load_source = mocker.patch('thefuck.types.load_source',
side_effect=ImportError("No module named foo..."))
assert Rule.from_path(Path('git.py')) is None
load_source.assert_called_once_with('git', 'git.py')
def test_from_path(self, mocker):
match = object()
get_new_command = object()
load_source = mocker.patch(
'thefuck.types.load_source',
return_value=Mock(match=match,
get_new_command=get_new_command,
enabled_by_default=True,
priority=900,
requires_output=True))
rule_path = os.path.join(os.sep, 'rules', 'bash.py')
assert (Rule.from_path(Path(rule_path))
== Rule('bash', match, get_new_command, priority=900))
load_source.assert_called_once_with('bash', rule_path)
def test_from_path_excluded_rule(self, mocker, settings):
load_source = mocker.patch('thefuck.types.load_source')
settings.update(exclude_rules=['git'])
rule_path = os.path.join(os.sep, 'rules', 'git.py')
assert Rule.from_path(Path(rule_path)) is None
assert not load_source.called
@pytest.mark.parametrize('rules, rule, is_enabled', [
(const.DEFAULT_RULES, Rule('git', enabled_by_default=True), True),
(const.DEFAULT_RULES, Rule('git', enabled_by_default=False), False),
([], Rule('git', enabled_by_default=False), False),
([], Rule('git', enabled_by_default=True), False),
(const.DEFAULT_RULES + ['git'], Rule('git', enabled_by_default=False), True),
(['git'], Rule('git', enabled_by_default=False), True)])
def test_is_enabled(self, settings, rules, rule, is_enabled):
settings.update(rules=rules)
assert rule.is_enabled == is_enabled
def METHOD_NAME(self):
assert not Rule('', lambda _: False).is_match(
Command('ls', ''))
def test_is_match(self):
rule = Rule('', lambda x: x.script == 'cd ..')
assert rule.is_match(Command('cd ..', ''))
@pytest.mark.usefixtures('no_colors')
def test_isnt_match_when_rule_failed(self, capsys):
rule = Rule('test', Mock(side_effect=OSError('Denied')),
requires_output=False)
assert not rule.is_match(Command('ls', ''))
assert capsys.readouterr()[1].split('\n')[0] == '[WARN] Rule test:'
def test_get_corrected_commands_with_rule_returns_list(self):
rule = Rule(get_new_command=lambda x: [x.script + '!', x.script + '@'],
priority=100)
assert (list(rule.get_corrected_commands(Command('test', '')))
== [CorrectedCommand(script='test!', priority=100),
CorrectedCommand(script='test@', priority=200)])
def test_get_corrected_commands_with_rule_returns_command(self):
rule = Rule(get_new_command=lambda x: x.script + '!',
priority=100)
assert (list(rule.get_corrected_commands(Command('test', '')))
== [CorrectedCommand(script='test!', priority=100)])
class TestCommand(object):
@pytest.fixture(autouse=True)
def Popen(self, monkeypatch):
Popen = Mock()
Popen.return_value.stdout.read.return_value = b'output'
monkeypatch.setattr('thefuck.output_readers.rerun.Popen', Popen)
return Popen
@pytest.fixture(autouse=True)
def prepare(self, monkeypatch):
monkeypatch.setattr('thefuck.output_readers.rerun._wait_output',
lambda *_: True)
def test_from_script_calls(self, Popen, settings, os_environ):
settings.env = {}
assert Command.from_raw_script(
['apt-get', 'search', 'vim']) == Command(
'apt-get search vim', 'output')
Popen.assert_called_once_with('apt-get search vim',
shell=True,
stdin=PIPE,
stdout=PIPE,
stderr=STDOUT,
env=os_environ)
@pytest.mark.parametrize('script, result', [
([], None),
([''], None),
(['', ''], None),
(['ls', '-la'], 'ls -la'),
(['ls'], 'ls'),
(['echo \\ '], 'echo \\ '),
(['echo \\\n'], 'echo \\\n')])
def test_from_script(self, script, result):
if result:
assert Command.from_raw_script(script).script == result
else:
with pytest.raises(EmptyCommand):
Command.from_raw_script(script) |
dataset | #########################################################################
#
# Copyright (C) 2018 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import logging
from django.db import connection
from django.db.models import Max, Min, Count
from django.conf import settings
from pycsw.core.repository import Repository, query_spatial, get_geometry_area
from geonode.base.models import ResourceBase
from pycsw.core import util
LOGGER = logging.getLogger(__name__)
GEONODE_SERVICE_TYPES = {
# 'GeoNode enum': 'CSW enum'
"http://www.opengis.net/cat/csw/2.0.2": "OGC:CSW",
"http://www.opengis.net/wms": "OGC:WMS",
"http://www.opengis.net/wmts/1.0": "OGC:WMTS",
"https://wiki.osgeo.org/wiki/TMS": "OSGeo:TMS",
"urn:x-esri:serviceType:ArcGIS:MapServer": "ESRI:ArcGIS:MapServer",
"urn:x-esri:serviceType:ArcGIS:ImageServer": "ESRI:ArcGIS:ImageServer",
}
class GeoNodeRepository(Repository):
"""
Class to interact with underlying repository
"""
def __init__(self, context, repo_filter=None):
"""
Initialize repository
"""
self.context = context
self.filter = repo_filter
self.fts = False
self.label = "GeoNode"
self.local_ingest = True
self.dbtype = settings.DATABASES["default"]["ENGINE"].split(".")[-1]
# GeoNode PostgreSQL installs are PostGIS enabled
if self.dbtype == "postgis":
self.dbtype = "postgresql+postgis+wkt"
if self.dbtype in {"sqlite", "sqlite3"}: # load SQLite query bindings
connection.connection.create_function("query_spatial", 4, query_spatial)
connection.connection.create_function("get_anytext", 1, util.get_anytext)
connection.connection.create_function("get_geometry_area", 1, get_geometry_area)
# generate core queryables db and obj bindings
self.queryables = {}
for tname in self.context.model["typenames"]:
for qname in self.context.model["typenames"][tname]["queryables"]:
self.queryables[qname] = {}
items = list(self.context.model["typenames"][tname]["queryables"][qname].items())
for qkey, qvalue in items:
self.queryables[qname][qkey] = qvalue
# flatten all queryables
# TODO smarter way of doing this
self.queryables["_all"] = {}
for qbl in self.queryables:
self.queryables["_all"].update(self.queryables[qbl])
self.queryables["_all"].update(self.context.md_core_model["mappings"])
if "Harvest" in self.context.model["operations"] and "Transaction" in self.context.model["operations"]:
self.context.model["operations"]["Harvest"]["parameters"]["ResourceType"]["values"] = list(
GEONODE_SERVICE_TYPES.keys()
) # noqa
self.context.model["operations"]["Transaction"]["parameters"]["TransactionSchemas"]["values"] = list(
GEONODE_SERVICE_TYPES.keys()
) # noqa
def METHOD_NAME(self):
"""
Stub to mock a pycsw dataset object for Transactions
"""
return type("ResourceBase", (object,), {})
def query_ids(self, ids):
"""
Query by list of identifiers
"""
results = self._get_repo_filter(ResourceBase.objects).filter(uuid__in=ids).all()
return results
def query_domain(self, domain, typenames, domainquerytype="list", count=False):
"""
Query by property domain values
"""
objects = self._get_repo_filter(ResourceBase.objects)
if domainquerytype == "range":
return [tuple(objects.aggregate(Min(domain), Max(domain)).values())]
else:
if count:
return [(d[domain], d[f"{domain}__count"]) for d in objects.values(domain).annotate(Count(domain))]
else:
return objects.values_list(domain).distinct()
def query_insert(self, direction="max"):
"""
Query to get latest (default) or earliest update to repository
"""
if direction == "min":
return ResourceBase.objects.aggregate(Min("last_updated"))["last_updated__min"].strftime(
"%Y-%m-%dT%H:%M:%SZ"
)
return (
self._get_repo_filter(ResourceBase.objects)
.aggregate(Max("last_updated"))["last_updated__max"]
.strftime("%Y-%m-%dT%H:%M:%SZ")
)
def query_source(self, source):
"""
Query by source
"""
return self._get_repo_filter(ResourceBase.objects).filter(url=source)
def query(self, constraint, sortby=None, typenames=None, maxrecords=10, startposition=0):
"""
Query records from underlying repository
"""
# run the raw query and get total
# we want to exclude layers which are not valid, as it is done in the
# search engine
pycsw_filters = settings.PYCSW.get("FILTER", {"resource_type__in": ["dataset"]})
if "where" in constraint: # GetRecords with constraint
query = self._get_repo_filter(ResourceBase.objects.filter(**pycsw_filters)).extra(
where=[constraint["where"]], params=constraint["values"]
)
else: # GetRecords sans constraint
query = self._get_repo_filter(ResourceBase.objects.filter(**pycsw_filters))
total = query.count()
# apply sorting, limit and offset
if sortby is not None:
if "spatial" in sortby and sortby["spatial"]: # spatial sort
desc = False
if sortby["order"] == "DESC":
desc = True
query = query.all()
return [
str(total),
sorted(
query,
key=lambda x: float(util.get_geometry_area(getattr(x, sortby["propertyname"]))),
reverse=desc,
)[startposition : startposition + int(maxrecords)],
]
else:
if sortby["order"] == "DESC":
pname = f"-{sortby['propertyname']}"
else:
pname = sortby["propertyname"]
return [str(total), query.order_by(pname)[startposition : startposition + int(maxrecords)]]
else: # no sort
return [str(total), query.all()[startposition : startposition + int(maxrecords)]]
def delete(self, constraint):
"""
Delete a record from the repository
"""
results = (
self._get_repo_filter(ResourceBase.objects)
.extra(where=[constraint["where"]], params=constraint["values"])
.all()
)
deleted = len(results)
results.delete()
return deleted
def _get_repo_filter(self, query):
"""
Apply repository wide side filter / mask query
"""
if self.filter is not None:
return query.extra(where=[self.filter])
return query |
test disk quota default | '''
copyright: Copyright (C) 2015-2022, Wazuh Inc.
Created by Wazuh, Inc. <[email protected]>.
This program is free software; you can redistribute it and/or modify it under the terms of GPLv2
type: integration
brief: File Integrity Monitoring (FIM) system watches selected files and triggering alerts when
these files are modified. Specifically, these tests will check if FIM limits the size of
the 'queue/diff/local' folder, where Wazuh stores the compressed files used to perform
the 'diff' operation, to the default value when the 'report_changes' option is enabled.
The FIM capability is managed by the 'wazuh-syscheckd' daemon, which checks configured
files for changes to the checksums, permissions, and ownership.
components:
- fim
suite: files_report_changes
targets:
- agent
- manager
daemons:
- wazuh-syscheckd
os_platform:
- linux
- windows
os_version:
- Arch Linux
- Amazon Linux 2
- Amazon Linux 1
- CentOS 8
- CentOS 7
- Debian Buster
- Red Hat 8
- Ubuntu Focal
- Ubuntu Bionic
- Windows 10
- Windows Server 2019
- Windows Server 2016
references:
- https://documentation.wazuh.com/current/user-manual/capabilities/file-integrity/index.html
- https://documentation.wazuh.com/current/user-manual/reference/ossec-conf/syscheck.html#disk-quota
pytest_args:
- fim_mode:
realtime: Enable real-time monitoring on Linux (using the 'inotify' system calls) and Windows systems.
whodata: Implies real-time monitoring but adding the 'who-data' information.
- tier:
0: Only level 0 tests are performed, they check basic functionalities and are quick to perform.
1: Only level 1 tests are performed, they check functionalities of medium complexity.
2: Only level 2 tests are performed, they check advanced functionalities and are slow to perform.
tags:
- fim_report_changes
'''
import os
import pytest
from wazuh_testing import global_parameters, LOG_FILE_PATH
from wazuh_testing.tools import PREFIX
from wazuh_testing.tools.configuration import load_wazuh_configurations
from wazuh_testing.tools.monitoring import FileMonitor, generate_monitoring_callback
from wazuh_testing.modules.fim import FIM_DEFAULT_LOCAL_INTERNAL_OPTIONS as local_internal_options
from wazuh_testing.modules.fim.event_monitor import CB_DISK_QUOTA_LIMIT_CONFIGURED_VALUE, ERR_MSG_DISK_QUOTA_LIMIT
from wazuh_testing.modules.fim.utils import generate_params
# Marks
pytestmark = [pytest.mark.tier(level=1)]
# Variables
wazuh_log_monitor = FileMonitor(LOG_FILE_PATH)
test_directories = [os.path.join(PREFIX, 'testdir1')]
directory_str = ','.join(test_directories)
test_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
configurations_path = os.path.join(test_data_path, 'wazuh_conf.yaml')
testdir1 = test_directories[0]
DEFAULT_SIZE = 1 * 1024 * 1024
# Configurations
conf_params, conf_metadata = generate_params(extra_params={'REPORT_CHANGES': {'report_changes': 'yes'},
'TEST_DIRECTORIES': directory_str})
configurations = load_wazuh_configurations(configurations_path, __name__, params=conf_params, metadata=conf_metadata)
# Fixtures
@pytest.fixture(scope='module', params=configurations)
def get_configuration(request):
"""Get configurations from the module."""
return request.param
# Tests
def METHOD_NAME(get_configuration, configure_environment,
configure_local_internal_options_module, restart_syscheckd):
'''
description: Check if the 'wazuh-syscheckd' daemon limits the size of the folder where the data used to perform
the 'diff' operations is stored to the default value. For this purpose, the test will monitor
a directory and, once the FIM is started, it will wait for the FIM event related to the maximum
disk quota to store 'diff' information. Finally, the test will verify that the value gotten from
that FIM event corresponds with the default value of the 'disk_quota' tag (1GB).
wazuh_min_version: 4.6.0
tier: 1
parameters:
- get_configuration:
type: fixture
brief: Get configurations from the module.
- configure_environment:
type: fixture
brief: Configure a custom environment for testing.
- configure_local_internal_options_module:
type: fixture
brief: Configure the local internal options file.
- restart_syscheckd:
type: fixture
brief: Clear the 'ossec.log' file and start a new monitor.
assertions:
- Verify that an FIM event is generated indicating the size limit of the folder
to store 'diff' information to the default limit of the 'disk_quota' tag (1GB).
input_description: A test case (ossec_conf_diff_default) is contained in external YAML
file (wazuh_conf.yaml) which includes configuration settings for
the 'wazuh-syscheckd' daemon and, these are combined with the
testing directory to be monitored defined in the module.
expected_output:
- r'.*Maximum disk quota size limit configured to'
tags:
- disk_quota
- scheduled
'''
disk_quota_value = wazuh_log_monitor.start(
timeout=global_parameters.default_timeout,
callback=generate_monitoring_callback(CB_DISK_QUOTA_LIMIT_CONFIGURED_VALUE),
error_message=ERR_MSG_DISK_QUOTA_LIMIT).result()
if disk_quota_value:
assert disk_quota_value == str(DEFAULT_SIZE), 'Wrong value for disk_quota'
else:
raise AssertionError('Wrong value for disk_quota') |
set rpm | from __future__ import annotations
from enum import Enum
import asyncio
from typing import Optional, Dict
from opentrons.drivers import utils
from opentrons.drivers.command_builder import CommandBuilder
from opentrons.drivers.asyncio.communication import AsyncResponseSerialConnection
from opentrons.drivers.heater_shaker.abstract import AbstractHeaterShakerDriver
from opentrons.drivers.types import Temperature, RPM, HeaterShakerLabwareLatchStatus
class GCODE(str, Enum):
SET_RPM = "M3"
GET_RPM = "M123"
SET_TEMPERATURE = "M104"
GET_TEMPERATURE = "M105"
HOME = "G28"
ENTER_BOOTLOADER = "dfu"
GET_VERSION = "M115"
OPEN_LABWARE_LATCH = "M242"
CLOSE_LABWARE_LATCH = "M243"
GET_LABWARE_LATCH_STATE = "M241"
DEACTIVATE_HEATER = "M106"
HS_BAUDRATE = 115200
DEFAULT_HS_TIMEOUT = 40
HS_COMMAND_TERMINATOR = "\n"
HS_ACK = "OK" + HS_COMMAND_TERMINATOR
HS_ERROR_KEYWORD = "err"
HS_ASYNC_ERROR_ACK = "async"
DEFAULT_COMMAND_RETRIES = 0
class HeaterShakerDriver(AbstractHeaterShakerDriver):
@classmethod
async def create(
cls, port: str, loop: Optional[asyncio.AbstractEventLoop]
) -> HeaterShakerDriver:
"""
Create a heater-shaker driver.
Args:
port: port or url of heater shaker
loop: optional event loop
Returns: driver
"""
connection = await AsyncResponseSerialConnection.create(
port=port,
baud_rate=HS_BAUDRATE,
timeout=DEFAULT_HS_TIMEOUT,
ack=HS_ACK,
loop=loop,
error_keyword=HS_ERROR_KEYWORD,
async_error_ack=HS_ASYNC_ERROR_ACK,
)
return cls(connection=connection)
def __init__(self, connection: AsyncResponseSerialConnection) -> None:
"""
Constructor
Args:
connection: SerialConnection to the heater-shaker
"""
self._connection = connection
async def connect(self) -> None:
"""Connect to heater-shaker"""
await self._connection.open()
async def disconnect(self) -> None:
"""Disconnect from heater-shaker"""
await self._connection.close()
async def is_connected(self) -> bool:
"""Check connection"""
return await self._connection.is_open()
async def open_labware_latch(self) -> None:
"""Send open-plate-lock command.
Note: Labware latch is referred to as 'plate lock' in firmware.
"""
c = CommandBuilder(terminator=HS_COMMAND_TERMINATOR).add_gcode(
gcode=GCODE.OPEN_LABWARE_LATCH
)
await self._connection.send_command(command=c, retries=DEFAULT_COMMAND_RETRIES)
async def close_labware_latch(self) -> None:
"""Send close-plate-lock command.
Note: Labware latch is referred to as 'plate lock' in firmware.
"""
c = CommandBuilder(terminator=HS_COMMAND_TERMINATOR).add_gcode(
gcode=GCODE.CLOSE_LABWARE_LATCH
)
await self._connection.send_command(command=c, retries=DEFAULT_COMMAND_RETRIES)
async def set_temperature(self, temperature: float) -> None:
"""Set temperature"""
c = (
CommandBuilder(terminator=HS_COMMAND_TERMINATOR)
.add_gcode(gcode=GCODE.SET_TEMPERATURE)
.add_float(
prefix="S",
value=temperature,
precision=utils.HS_GCODE_ROUNDING_PRECISION,
)
)
await self._connection.send_command(command=c, retries=DEFAULT_COMMAND_RETRIES)
async def get_temperature(self) -> Temperature:
c = CommandBuilder(terminator=HS_COMMAND_TERMINATOR).add_gcode(
gcode=GCODE.GET_TEMPERATURE
)
response = await self._connection.send_command(
command=c, retries=DEFAULT_COMMAND_RETRIES
)
return utils.parse_temperature_response(
temperature_string=response, rounding_val=utils.HS_GCODE_ROUNDING_PRECISION
)
async def METHOD_NAME(self, rpm: int) -> None:
"""Set RPM"""
c = (
CommandBuilder(terminator=HS_COMMAND_TERMINATOR)
.add_gcode(gcode=GCODE.SET_RPM)
.add_int(prefix="S", value=int(rpm))
)
await self._connection.send_command(command=c, retries=DEFAULT_COMMAND_RETRIES)
async def get_rpm(self) -> RPM:
"""Get RPM"""
c = CommandBuilder(terminator=HS_COMMAND_TERMINATOR).add_gcode(
gcode=GCODE.GET_RPM
)
response = await self._connection.send_command(
command=c, retries=DEFAULT_COMMAND_RETRIES
)
return utils.parse_rpm_response(rpm_string=response)
async def get_labware_latch_status(self) -> HeaterShakerLabwareLatchStatus:
"""Send get-labware-latch-status command.
Note: Labware latch is referred to as 'plate lock' in firmware.
"""
c = CommandBuilder(terminator=HS_COMMAND_TERMINATOR).add_gcode(
gcode=GCODE.GET_LABWARE_LATCH_STATE
)
response = await self._connection.send_command(
command=c, retries=DEFAULT_COMMAND_RETRIES
)
return utils.parse_labware_latch_status_response(status_string=response)
async def home(self) -> None:
"""Send home command.
Note: Homing also stops the shaking motion if applicable.
"""
c = CommandBuilder(terminator=HS_COMMAND_TERMINATOR).add_gcode(gcode=GCODE.HOME)
await self._connection.send_command(command=c, retries=DEFAULT_COMMAND_RETRIES)
async def get_device_info(self) -> Dict[str, str]:
"""Send get-device-info command"""
c = CommandBuilder(terminator=HS_COMMAND_TERMINATOR).add_gcode(
gcode=GCODE.GET_VERSION
)
response = await self._connection.send_command(
command=c, retries=DEFAULT_COMMAND_RETRIES
)
return utils.parse_hs_device_information(device_info_string=response)
async def enter_programming_mode(self) -> None:
c = CommandBuilder(terminator=HS_COMMAND_TERMINATOR).add_gcode(
gcode=GCODE.ENTER_BOOTLOADER
)
await self._connection.send_dfu_command(command=c)
await self._connection.close()
async def deactivate_heater(self) -> None:
"""Send deactivate-heater command"""
c = CommandBuilder(terminator=HS_COMMAND_TERMINATOR).add_gcode(
gcode=GCODE.DEACTIVATE_HEATER
)
await self._connection.send_command(command=c, retries=DEFAULT_COMMAND_RETRIES) |
save plot | #!/usr/bin/env python
"""Make graphics and example image for coordinate tutorial
Expects MNI nonlinear template t1 and t2 images in directory of script -
specifically these files:
* mni_icbm152_t1_tal_nlin_asym_09a.nii
* mni_icbm152_t2_tal_nlin_asym_09a.nii
Requires nipy and matplotlib.
Executing this script generates the following files in the current directory:
* localizer.png (pretend localizer sagittal image)
* someones_epi.nii.gz (pretend single EPI volume)
* someones_anatomy.nii.gz (pretend single subject structural)
"""
import math
import matplotlib.pyplot as plt
import nipy
import nipy.algorithms.resample as rsm
import nipy.core.api as nca
import numpy as np
import numpy.linalg as npl
import nibabel.eulerangles as euler
T1_IMG = 'mni_icbm152_t1_tal_nlin_asym_09a.nii'
T2_IMG = 'mni_icbm152_t2_tal_nlin_asym_09a.nii'
imgs = []
for img_fname in (T1_IMG, T2_IMG):
img = nipy.load_image(img_fname)
# Set affine as for FOV, not AC
RZS = img.affine[:3, :3]
vox_fov_center = -(np.array(img.shape) - 1) / 2.0
T = RZS.dot(vox_fov_center)
img.affine[:3, 3] = T
# Take stuff off the top of the full image, to emphasize FOV
img_z_shave = 10
# Take stuff off left and right to save disk space
img_x_shave = 20
img = img[img_x_shave:-img_x_shave, :, :-img_z_shave]
imgs.append(img)
t1_img, t2_img = imgs
# Make fake localizer
data = t1_img.get_fdata()
n_x, n_y, n_z = img.shape
mid_x = round(n_x / 2)
sagittal = data[mid_x, :, :].T
# EPI bounding box
# 3 points on a not-completely-rectangular box. The box is to give a by-eye
# estimate, then we work out the box side lengths and make a rectangular box
# from those, using the origin point
epi_bl = np.array((20, 15)) * 2
epi_br = np.array((92, 70)) * 2
epi_tl = np.array((7, 63)) * 2
# Find lengths of sides
epi_y_len = np.sqrt((np.subtract(epi_bl, epi_tl) ** 2).sum())
epi_x_len = np.sqrt((np.subtract(epi_bl, epi_br) ** 2).sum())
x, y = 0, 1
# Make a rectangular box with these sides
def make_ortho_box(bl, x_len, y_len):
"""Make a box with sides parallel to the axes"""
return np.array(
(bl, [bl[x] + x_len, bl[y]], [bl[x], bl[y] + y_len], [bl[x] + x_len, bl[y] + y_len])
)
orth_epi_box = make_ortho_box(epi_bl, epi_x_len, epi_y_len)
# Structural bounding box
anat_bl = (25, 3)
anat_x_len = 185
anat_y_len = 155
anat_box = make_ortho_box(anat_bl, anat_x_len, anat_y_len)
def plot_line(pt1, pt2, fmt='r-', label=None):
plt.plot([pt1[0], pt2[0]], [pt1[1], pt2[1]], fmt, label=label)
def plot_box(box_def, fmt='r-', label=None):
bl, br, tl, tr = box_def
plot_line(bl, br, fmt, label=label)
plot_line(bl, tl, fmt)
plot_line(br, tr, fmt)
plot_line(tl, tr, fmt)
def rotate_box(box_def, angle, origin):
origin = np.atleast_2d(origin)
box_def_zeroed = box_def - origin
cost = math.cos(angle)
sint = math.sin(angle)
rot_array = np.array([[cost, -sint], [sint, cost]])
box_def_zeroed = np.dot(rot_array, box_def_zeroed.T).T
return box_def_zeroed + origin
def labeled_point(pt, marker, text, markersize=10, color='k'):
plt.plot(pt[0], pt[1], marker, markersize=markersize)
plt.text(pt[0] + markersize / 2, pt[1] - markersize / 2, text, color=color)
def plot_localizer():
plt.imshow(sagittal, cmap='gray', origin='lower', extent=sag_extents)
plt.xlabel('mm from isocenter')
plt.ylabel('mm from isocenter')
def METHOD_NAME():
# Plot using global variables
plot_localizer()
def vx2mm(pts):
return pts - iso_center
plot_box(vx2mm(rot_box), label='EPI bounding box')
plot_box(vx2mm(anat_box), 'b-', label='Structural bounding box')
labeled_point(vx2mm(epi_center), 'ro', 'EPI FOV center')
labeled_point(vx2mm(anat_center), 'bo', 'Structural FOV center')
labeled_point(vx2mm(iso_center), 'g^', 'Magnet isocenter')
plt.axis('tight')
plt.legend(loc='lower right')
plt.title('Scanner localizer image')
plt.savefig('localizer.png')
angle = 0.3
rot_box = rotate_box(orth_epi_box, angle, orth_epi_box[0])
epi_center = np.mean(rot_box, axis=0)
anat_center = np.mean(anat_box, axis=0)
# y axis on the plot is first axis of image
sag_y, sag_x = sagittal.shape
iso_center = (np.array([sag_x, sag_y]) - 1) / 2.0
sag_extents = [-iso_center[0], iso_center[0], -iso_center[1], iso_center[1]]
# Back to image coordinates
br_img = np.array([0, rot_box[0, 0], rot_box[0, 1]])
epi_trans = np.eye(4)
epi_trans[:3, 3] = -br_img
rot = np.eye(4)
rot[:3, :3] = euler.euler2mat(0, 0, -angle)
# downsample to make smaller output image
downsamp = 1 / 3
epi_scale = np.diag([downsamp, downsamp, downsamp, 1])
# template voxels to epi box image voxels
vox2epi_vox = epi_scale.dot(rot.dot(epi_trans))
# epi image voxels to mm
epi_vox2mm = t2_img.affine.dot(npl.inv(vox2epi_vox))
# downsampled image shape
epi_vox_shape = np.array([data.shape[0], epi_x_len, epi_y_len]) * downsamp
# Make sure dimensions are odd by rounding up or down
# This makes the voxel center an integer index, which is convenient
epi_vox_shape = [np.floor(d) if np.floor(d) % 2 else np.ceil(d) for d in epi_vox_shape]
# resample, preserving affine
epi_cmap = nca.vox2mni(epi_vox2mm)
epi = rsm.resample(t2_img, epi_cmap, np.eye(4), epi_vox_shape)
epi_data = epi.get_fdata()
# Do the same kind of thing for the anatomical scan
anat_vox_sizes = [2.75, 2.75, 2.75]
anat_scale = npl.inv(np.diag(anat_vox_sizes + [1]))
anat_trans = np.eye(4)
anat_trans[:3, 3] = -np.array([0, anat_box[0, 0], anat_box[0, 1]])
vox2anat_vox = anat_scale.dot(anat_trans)
anat_vox2mm = t1_img.affine.dot(npl.inv(vox2anat_vox))
anat_vox_shape = np.round(np.divide([data.shape[0], anat_x_len, anat_y_len], anat_vox_sizes))
anat_cmap = nca.vox2mni(anat_vox2mm)
anat = rsm.resample(t1_img, anat_cmap, np.eye(4), anat_vox_shape)
anat_data = anat.get_fdata()
METHOD_NAME()
nipy.save_image(epi, 'someones_epi.nii.gz', dtype_from='uint8')
nipy.save_image(anat, 'someones_anatomy.nii.gz', dtype_from='uint8')
# Do progressive transforms
epi2_vox = make_ortho_box((0, 0), epi_vox_shape[1], epi_vox_shape[2])
epi_vox_sizes = np.sqrt(np.sum(epi_vox2mm[:3, :3] ** 2, axis=0))
epi2_scaled = np.diag(epi_vox_sizes[1:]).dot(epi2_vox.T).T
epi2_rotted = rotate_box(epi2_scaled, angle, (0, 0))
epi2_pulled = epi2_rotted + epi_vox2mm[1:3, 3]
plt.figure()
plot_localizer()
plot_box(epi2_vox, 'k', label='voxels')
plot_box(epi2_scaled, 'g', label='scaled')
plot_box(epi2_rotted, 'y', label='scaled, rotated')
plot_box(epi2_pulled, 'r', label='scaled, rotated, translated')
plt.legend(loc='upper left')
plt.title('Anatomy of an affine transform')
plt.savefig('illustrating_affine.png') |
date to date number | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for API compatibility between TensorFlow release versions.
See [Version
Compatibility](https://tensorflow.org/guide/version_compat#backward_forward)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from tensorflow.python.util import tf_contextlib
from tensorflow.python.util.tf_export import tf_export
# This value changes every day with an automatic CL. It can be modified in code
# via `forward_compatibility_horizon()` or with the environment variable
# TF_FORWARD_COMPATIBILITY_DELTA_DAYS, which is added to the compatibility date.
_FORWARD_COMPATIBILITY_HORIZON = datetime.date(2019, 8, 21)
_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME = "TF_FORWARD_COMPATIBILITY_DELTA_DAYS"
_FORWARD_COMPATIBILITY_DATE_NUMBER = None
def METHOD_NAME(year, month, day):
return (year << 9) | (month << 5) | day
def _update_forward_compatibility_date_number(date_to_override=None):
"""Update the base date to compare in forward_compatible function."""
global _FORWARD_COMPATIBILITY_DATE_NUMBER
if date_to_override:
date = date_to_override
else:
date = _FORWARD_COMPATIBILITY_HORIZON
delta_days = os.getenv(_FORWARD_COMPATIBILITY_DELTA_DAYS_VAR_NAME)
if delta_days:
date += datetime.timedelta(days=int(delta_days))
_FORWARD_COMPATIBILITY_DATE_NUMBER = METHOD_NAME(
date.year, date.month, date.day)
_update_forward_compatibility_date_number()
@tf_export("compat.forward_compatible")
def forward_compatible(year, month, day):
"""Return true if the forward compatibility window has expired.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
Forward-compatibility refers to scenarios where the producer of a TensorFlow
model (a GraphDef or SavedModel) is compiled against a version of the
TensorFlow library newer than what the consumer was compiled against. The
"producer" is typically a Python program that constructs and trains a model
while the "consumer" is typically another program that loads and serves the
model.
TensorFlow has been supporting a 3 week forward-compatibility window for
programs compiled from source at HEAD.
For example, consider the case where a new operation `MyNewAwesomeAdd` is
created with the intent of replacing the implementation of an existing Python
wrapper - `tf.add`. The Python wrapper implementation should change from
something like:
```python
def add(inputs, name=None):
return gen_math_ops.add(inputs, name)
```
to:
```python
from tensorflow.python.compat import compat
def add(inputs, name=None):
if compat.forward_compatible(year, month, day):
# Can use the awesome new implementation.
return gen_math_ops.my_new_awesome_add(inputs, name)
# To maintain forward compatibiltiy, use the old implementation.
return gen_math_ops.add(inputs, name)
```
Where `year`, `month`, and `day` specify the date beyond which binaries
that consume a model are expected to have been updated to include the
new operations. This date is typically at least 3 weeks beyond the date
the code that adds the new operation is committed.
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Returns:
True if the caller can expect that serialized TensorFlow graphs produced
can be consumed by programs that are compiled with the TensorFlow library
source code after (year, month, day).
"""
return _FORWARD_COMPATIBILITY_DATE_NUMBER > METHOD_NAME(
year, month, day)
@tf_export("compat.forward_compatibility_horizon")
@tf_contextlib.contextmanager
def forward_compatibility_horizon(year, month, day):
"""Context manager for testing forward compatibility of generated graphs.
See [Version
compatibility](https://tensorflow.org/guide/version_compat#backward_forward).
To ensure forward compatibility of generated graphs (see `forward_compatible`)
with older binaries, new features can be gated with:
```python
if compat.forward_compatible(year=2018, month=08, date=01):
generate_graph_with_new_features()
else:
generate_graph_so_older_binaries_can_consume_it()
```
However, when adding new features, one may want to unittest it before
the forward compatibility window expires. This context manager enables
such tests. For example:
```python
from tensorflow.python.compat import compat
def testMyNewFeature(self):
with compat.forward_compatibility_horizon(2018, 08, 02):
# Test that generate_graph_with_new_features() has an effect
```
Args:
year: A year (e.g., 2018). Must be an `int`.
month: A month (1 <= month <= 12) in year. Must be an `int`.
day: A day (1 <= day <= 31, or 30, or 29, or 28) in month. Must be an
`int`.
Yields:
Nothing.
"""
try:
_update_forward_compatibility_date_number(datetime.date(year, month, day))
yield
finally:
_update_forward_compatibility_date_number() |
dump | # cython: auto_cpdef=True
"""
Python Lexical Analyser
Converting NFA to DFA
"""
from __future__ import absolute_import
from . import Machines
from .Machines import LOWEST_PRIORITY
from .Transitions import TransitionMap
def nfa_to_dfa(old_machine, debug=None):
"""
Given a nondeterministic Machine, return a new equivalent
Machine which is deterministic.
"""
# We build a new machine whose states correspond to sets of states
# in the old machine. Initially we add a new state corresponding to
# the epsilon-closure of each initial old state. Then we give transitions
# to each new state which are the union of all transitions out of any
# of the corresponding old states. The new state reached on a given
# character is the one corresponding to the set of states reachable
# on that character from any of the old states. As new combinations of
# old states are created, new states are added as needed until closure
# is reached.
new_machine = Machines.FastMachine()
state_map = StateMap(new_machine)
# Seed the process using the initial states of the old machine.
# Make the corresponding new states into initial states of the new
# machine with the same names.
for (key, old_state) in old_machine.initial_states.items():
new_state = state_map.old_to_new(epsilon_closure(old_state))
new_machine.make_initial_state(key, new_state)
# Tricky bit here: we add things to the end of this list while we're
# iterating over it. The iteration stops when closure is achieved.
for new_state in new_machine.states:
transitions = TransitionMap()
for old_state in state_map.new_to_old(new_state):
for event, old_target_states in old_state.transitions.items():
if event and old_target_states:
transitions.add_set(event, set_epsilon_closure(old_target_states))
for event, old_states in transitions.items():
new_machine.add_transitions(new_state, event, state_map.old_to_new(old_states))
if debug:
debug.write("\n===== State Mapping =====\n")
state_map.METHOD_NAME(debug)
return new_machine
def set_epsilon_closure(state_set):
"""
Given a set of states, return the union of the epsilon
closures of its member states.
"""
result = {}
for state1 in state_set:
for state2 in epsilon_closure(state1):
result[state2] = 1
return result
def epsilon_closure(state):
"""
Return the set of states reachable from the given state
by epsilon moves.
"""
# Cache the result
result = state.epsilon_closure
if result is None:
result = {}
state.epsilon_closure = result
add_to_epsilon_closure(result, state)
return result
def add_to_epsilon_closure(state_set, state):
"""
Recursively add to |state_set| states reachable from the given state
by epsilon moves.
"""
if not state_set.get(state, 0):
state_set[state] = 1
state_set_2 = state.transitions.get_epsilon()
if state_set_2:
for state2 in state_set_2:
add_to_epsilon_closure(state_set, state2)
class StateMap(object):
"""
Helper class used by nfa_to_dfa() to map back and forth between
sets of states from the old machine and states of the new machine.
"""
def __init__(self, new_machine):
self.new_machine = new_machine # Machine
self.old_to_new_dict = {} # {(old_state,...) : new_state}
self.new_to_old_dict = {} # {id(new_state) : old_state_set}
def old_to_new(self, old_state_set):
"""
Return the state of the new machine corresponding to the
set of old machine states represented by |state_set|. A new
state will be created if necessary. If any of the old states
are accepting states, the new state will be an accepting state
with the highest priority action from the old states.
"""
key = self.make_key(old_state_set)
new_state = self.old_to_new_dict.get(key, None)
if not new_state:
action = self.highest_priority_action(old_state_set)
new_state = self.new_machine.new_state(action)
self.old_to_new_dict[key] = new_state
self.new_to_old_dict[id(new_state)] = old_state_set
return new_state
def highest_priority_action(self, state_set):
best_action = None
best_priority = LOWEST_PRIORITY
for state in state_set:
priority = state.action_priority
if priority > best_priority:
best_action = state.action
best_priority = priority
return best_action
def new_to_old(self, new_state):
"""Given a new state, return a set of corresponding old states."""
return self.new_to_old_dict[id(new_state)]
def make_key(self, state_set):
"""
Convert a set of states into a uniquified
sorted tuple suitable for use as a dictionary key.
"""
return tuple(sorted(state_set))
def METHOD_NAME(self, file):
from .Transitions import state_set_str
for new_state in self.new_machine.states:
old_state_set = self.new_to_old_dict[id(new_state)]
file.write(" State %s <-- %s\n" % (
new_state['number'], state_set_str(old_state_set))) |
get notifyicondataw | ''' Defines ctypes windows api.
'''
__all__ = ('GUID', 'get_DLLVERSIONINFO', 'MAKEDLLVERULL',
'get_NOTIFYICONDATAW', 'CreateWindowExW', 'WindowProc',
'DefWindowProcW', 'get_WNDCLASSEXW', 'GetModuleHandleW',
'RegisterClassExW', 'UpdateWindow', 'LoadImageW',
'Shell_NotifyIconW', 'DestroyIcon', 'UnregisterClassW',
'DestroyWindow', 'LoadIconW', 'get_PATH')
import ctypes
from ctypes import Structure, windll, sizeof, POINTER, WINFUNCTYPE
from ctypes.wintypes import (
DWORD, HICON, HWND, UINT, WCHAR, WORD, BYTE,
LPCWSTR, INT, LPVOID, HINSTANCE, HMENU, LPARAM,
WPARAM, HBRUSH, HMODULE, ATOM, BOOL, HANDLE
)
LRESULT = LPARAM
HRESULT = HANDLE
HCURSOR = HICON
class GUID(Structure):
_fields_ = [
('Data1', DWORD),
('Data2', WORD),
('Data3', WORD),
('Data4', BYTE * 8)
]
class DLLVERSIONINFO(Structure):
_fields_ = [
('cbSize', DWORD),
('dwMajorVersion', DWORD),
('dwMinorVersion', DWORD),
('dwBuildNumber', DWORD),
('dwPlatformID', DWORD),
]
def get_DLLVERSIONINFO(*largs):
version_info = DLLVERSIONINFO(*largs)
version_info.cbSize = sizeof(DLLVERSIONINFO)
return version_info
def MAKEDLLVERULL(major, minor, build, sp):
return (major << 48) | (minor << 32) | (build << 16) | sp
NOTIFYICONDATAW_fields = [
("cbSize", DWORD),
("hWnd", HWND),
("uID", UINT),
("uFlags", UINT),
("uCallbackMessage", UINT),
("hIcon", HICON),
("szTip", WCHAR * 128),
("dwState", DWORD),
("dwStateMask", DWORD),
("szInfo", WCHAR * 256),
("uVersion", UINT),
("szInfoTitle", WCHAR * 64),
("dwInfoFlags", DWORD),
("guidItem", GUID),
("hBalloonIcon", HICON),
]
class NOTIFYICONDATAW(Structure):
_fields_ = NOTIFYICONDATAW_fields[:]
class NOTIFYICONDATAW_V3(Structure):
_fields_ = NOTIFYICONDATAW_fields[:-1]
class NOTIFYICONDATAW_V2(Structure):
_fields_ = NOTIFYICONDATAW_fields[:-2]
class NOTIFYICONDATAW_V1(Structure):
_fields_ = NOTIFYICONDATAW_fields[:6]
NOTIFYICONDATA_V3_SIZE = sizeof(NOTIFYICONDATAW_V3)
NOTIFYICONDATA_V2_SIZE = sizeof(NOTIFYICONDATAW_V2)
NOTIFYICONDATA_V1_SIZE = sizeof(NOTIFYICONDATAW_V1)
def METHOD_NAME(*largs):
notify_data = NOTIFYICONDATAW(*largs)
# get shell32 version to find correct NOTIFYICONDATAW size
DllGetVersion = windll.Shell32.DllGetVersion
DllGetVersion.argtypes = [POINTER(DLLVERSIONINFO)]
DllGetVersion.restype = HRESULT
version = get_DLLVERSIONINFO()
if DllGetVersion(version):
raise Exception('Cannot get Windows version numbers.')
v = MAKEDLLVERULL(version.dwMajorVersion, version.dwMinorVersion,
version.dwBuildNumber, version.dwPlatformID)
# from the version info find the NOTIFYICONDATA size
if v >= MAKEDLLVERULL(6, 0, 6, 0):
notify_data.cbSize = sizeof(NOTIFYICONDATAW)
elif v >= MAKEDLLVERULL(6, 0, 0, 0):
notify_data.cbSize = NOTIFYICONDATA_V3_SIZE
elif v >= MAKEDLLVERULL(5, 0, 0, 0):
notify_data.cbSize = NOTIFYICONDATA_V2_SIZE
else:
notify_data.cbSize = NOTIFYICONDATA_V1_SIZE
return notify_data
CreateWindowExW = windll.User32.CreateWindowExW
CreateWindowExW.argtypes = [DWORD, ATOM, LPCWSTR, DWORD, INT, INT, INT, INT,
HWND, HMENU, HINSTANCE, LPVOID]
CreateWindowExW.restype = HWND
GetModuleHandleW = windll.Kernel32.GetModuleHandleW
GetModuleHandleW.argtypes = [LPCWSTR]
GetModuleHandleW.restype = HMODULE
WindowProc = WINFUNCTYPE(LRESULT, HWND, UINT, WPARAM, LPARAM)
DefWindowProcW = windll.User32.DefWindowProcW
DefWindowProcW.argtypes = [HWND, UINT, WPARAM, LPARAM]
DefWindowProcW.restype = LRESULT
class WNDCLASSEXW(Structure):
_fields_ = [
('cbSize', UINT),
('style', UINT),
('lpfnWndProc', WindowProc),
('cbClsExtra', INT),
('cbWndExtra', INT),
('hInstance', HINSTANCE),
('hIcon', HICON),
('hCursor', HCURSOR),
('hbrBackground', HBRUSH),
('lpszMenuName', LPCWSTR),
('lpszClassName', LPCWSTR),
('hIconSm', HICON),
]
def get_WNDCLASSEXW(*largs):
wnd_class = WNDCLASSEXW(*largs)
wnd_class.cbSize = sizeof(WNDCLASSEXW)
return wnd_class
RegisterClassExW = windll.User32.RegisterClassExW
RegisterClassExW.argtypes = [POINTER(WNDCLASSEXW)]
RegisterClassExW.restype = ATOM
UpdateWindow = windll.User32.UpdateWindow
UpdateWindow.argtypes = [HWND]
UpdateWindow.restype = BOOL
LoadImageW = windll.User32.LoadImageW
LoadImageW.argtypes = [HINSTANCE, LPCWSTR, UINT, INT, INT, UINT]
LoadImageW.restype = HANDLE
Shell_NotifyIconW = windll.Shell32.Shell_NotifyIconW
Shell_NotifyIconW.argtypes = [DWORD, POINTER(NOTIFYICONDATAW)]
Shell_NotifyIconW.restype = BOOL
DestroyIcon = windll.User32.DestroyIcon
DestroyIcon.argtypes = [HICON]
DestroyIcon.restype = BOOL
UnregisterClassW = windll.User32.UnregisterClassW
UnregisterClassW.argtypes = [ATOM, HINSTANCE]
UnregisterClassW.restype = BOOL
DestroyWindow = windll.User32.DestroyWindow
DestroyWindow.argtypes = [HWND]
DestroyWindow.restype = BOOL
LoadIconW = windll.User32.LoadIconW
LoadIconW.argtypes = [HINSTANCE, LPCWSTR]
LoadIconW.restype = HICON
class SYSTEM_POWER_STATUS(Structure):
_fields_ = [
('ACLineStatus', BYTE),
('BatteryFlag', BYTE),
('BatteryLifePercent', BYTE),
('Reserved1', BYTE),
('BatteryLifeTime', DWORD),
('BatteryFullLifeTime', DWORD),
]
SystemPowerStatusP = POINTER(SYSTEM_POWER_STATUS)
GetSystemPowerStatus = windll.kernel32.GetSystemPowerStatus
GetSystemPowerStatus.argtypes = [SystemPowerStatusP]
GetSystemPowerStatus.restype = BOOL
class GUID_(Structure):
_fields_ = [
('Data1', DWORD),
('Data2', WORD),
('Data3', WORD),
('Data4', BYTE * 8)
]
def __init__(self, uuid_):
Structure.__init__(self)
self.Data1, self.Data2, self.Data3, self.Data4[0], self.Data4[1], rest\
= uuid_.fields
for i in range(2, 8):
self.Data4[i] = rest >> (8 - i - 1) * 8 & 0xff
_CoTaskMemFree = windll.ole32.CoTaskMemFree
_CoTaskMemFree.restype = None
_CoTaskMemFree.argtypes = [ctypes.c_void_p]
_SHGetKnownFolderPath = windll.shell32.SHGetKnownFolderPath
_SHGetKnownFolderPath.argtypes = [
POINTER(GUID_),
DWORD,
HANDLE,
POINTER(ctypes.c_wchar_p)
]
class PathNotFoundException(Exception):
pass
def get_PATH(folderid):
fid = GUID_(folderid)
pPath = ctypes.c_wchar_p()
S_OK = 0
Result = _SHGetKnownFolderPath(ctypes.byref(fid),
0, None, ctypes.byref(pPath))
if Result != S_OK:
raise PathNotFoundException()
path = pPath.value
_CoTaskMemFree(pPath)
return path |
update label | #################################################################
# seSceneGraphExplorer.py
# Originally from SceneGraphExplorer.py
# Altered by Yi-Hong Lin, [email protected], 2004
#
# we need a customized SceneGraphExplorer.
#
# Do forget to check the seTree.
#
#################################################################
from direct.showbase.DirectObject import DirectObject
from seTree import TreeNode, TreeItem
import Pmw
from tkinter import IntVar, Frame, Label
import tkinter
# changing these strings requires changing sceneEditor.py SGE_ strs too!
# This list of items will be showed on the pop out window when user right click on
# any node on the graph. And, this is also the main reason we decide to copy from
# the original one but not inherited from it.
# Because except drawing part, we have changed a lot of things...
DEFAULT_MENU_ITEMS = [
'Update Explorer',
'Separator',
'Properties',
'Separator',
'Duplicate',
'Remove',
'Add Dummy',
'Add Collision Object',
'Metadata',
'Separator',
'Set as Reparent Target',
'Reparent to Target',
'Separator',
'Animation Panel',
'Blend Animation Panel',
'MoPath Panel',
'Align Tool',
'Separator']
class seSceneGraphExplorer(Pmw.MegaWidget, DirectObject):
"Graphical display of a scene graph"
def __init__(self, parent = None, nodePath = render, **kw):
# Define the megawidget options.
optiondefs = (
('menuItems', [], Pmw.INITOPT),
)
self.defineoptions(kw, optiondefs)
# Initialise superclass
Pmw.MegaWidget.__init__(self, parent)
# Initialize some class variables
self.nodePath = nodePath
# Create the components.
# Setup up container
interior = self.interior()
interior.configure(relief = tkinter.GROOVE, borderwidth = 2)
# Create a label and an entry
self._scrolledCanvas = self.createcomponent(
'scrolledCanvas',
(), None,
Pmw.ScrolledCanvas, (interior,),
hull_width = 200, hull_height = 300,
usehullsize = 1)
self._canvas = self._scrolledCanvas.component('canvas')
self._canvas['scrollregion'] = ('0i', '0i', '2i', '4i')
self._scrolledCanvas.resizescrollregion()
self._scrolledCanvas.pack(padx = 3, pady = 3, expand=1, fill = tkinter.BOTH)
self._canvas.bind('<ButtonPress-2>', self.mouse2Down)
self._canvas.bind('<B2-Motion>', self.mouse2Motion)
self._canvas.bind('<Configure>',
lambda e, sc = self._scrolledCanvas:
sc.resizescrollregion())
self.interior().bind('<Destroy>', self.onDestroy)
# Create the contents
self._treeItem = SceneGraphExplorerItem(self.nodePath)
self._node = TreeNode(self._canvas, None, self._treeItem,
DEFAULT_MENU_ITEMS + self['menuItems'])
self._node.expand()
self._parentFrame = Frame(interior)
self._label = self.createcomponent(
'parentLabel',
(), None,
Label, (interior,),
text = 'Active Reparent Target: ',
anchor = tkinter.W, justify = tkinter.LEFT)
self._label.pack(fill = tkinter.X)
# Add update parent label
def METHOD_NAME(nodePath = None, s = self):
s._label['text'] = 'Active Reparent Target: ' + nodePath.getName()
self.accept('DIRECT_activeParent', METHOD_NAME)
# Add update hook
self.accept('SGE_Update Explorer',
lambda np, s = self: s.update())
# Check keywords and initialise options based on input values.
self.initialiseoptions(seSceneGraphExplorer)
def update(self):
""" Refresh scene graph explorer """
self._node.update()
def mouse2Down(self, event):
self._width = 1.0 * self._canvas.winfo_width()
self._height = 1.0 * self._canvas.winfo_height()
xview = self._canvas.xview()
yview = self._canvas.yview()
self._left = xview[0]
self._top = yview[0]
self._dxview = xview[1] - xview[0]
self._dyview = yview[1] - yview[0]
self._2lx = event.x
self._2ly = event.y
def mouse2Motion(self,event):
newx = self._left - ((event.x - self._2lx)/self._width) * self._dxview
self._canvas.xview_moveto(newx)
newy = self._top - ((event.y - self._2ly)/self._height) * self._dyview
self._canvas.yview_moveto(newy)
self._2lx = event.x
self._2ly = event.y
self._left = self._canvas.xview()[0]
self._top = self._canvas.yview()[0]
def onDestroy(self, event):
# Remove hooks
self.ignore('DIRECT_activeParent')
self.ignore('SGE_Update Explorer')
def deSelectTree(self):
self._node.deselecttree()
def selectNodePath(self,nodePath, callBack=True):
item = self._node.find(nodePath.get_key())
if item!= None:
item.select(callBack)
else:
print('----SGE: Error Selection')
class SceneGraphExplorerItem(TreeItem):
"""Example TreeItem subclass -- browse the file system."""
def __init__(self, nodePath):
self.nodePath = nodePath
def GetText(self):
type = self.nodePath.node().getType().getName()
name = self.nodePath.getName()
return type + " " + name
def GetTextForEdit(self):
name = self.nodePath.getName()
return name
def GetKey(self):
return self.nodePath.get_key()
def IsEditable(self):
# All nodes' names can be edited nowadays.
return 1
#return issubclass(self.nodePath.node().__class__, NamedNode)
def SetText(self, text):
try:
messenger.send('SGE_changeName', [self.nodePath, text])
except AttributeError:
pass
def GetIconName(self):
return "sphere2" # XXX wish there was a "file" icon
def IsExpandable(self):
return self.nodePath.getNumChildren() != 0
def GetSubList(self):
sublist = []
for nodePath in self.nodePath.getChildren():
item = SceneGraphExplorerItem(nodePath)
sublist.append(item)
return sublist
def OnSelect(self, callback):
messenger.send('SGE_Flash', [self.nodePath])
if not callback:
messenger.send('SGE_madeSelection', [self.nodePath, callback])
else:
messenger.send('SGE_madeSelection', [self.nodePath])
def MenuCommand(self, command):
messenger.send('SGE_' + command, [self.nodePath])
def explore(nodePath = render):
tl = Toplevel()
tl.title('Explore: ' + nodePath.getName())
sge = seSceneGraphExplorer(parent = tl, nodePath = nodePath)
sge.pack(expand = 1, fill = 'both')
return sge |
test42 | # -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2021 sliptonic <[email protected]> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
import Part
import Path
import Path.Base.Generator.drill as generator
import PathTests.PathTestUtils as PathTestUtils
Path.Log.setLevel(Path.Log.Level.INFO, Path.Log.thisModule())
Path.Log.trackModule(Path.Log.thisModule())
class TestPathDrillGenerator(PathTestUtils.PathTestBase):
def test00(self):
"""Test Basic Drill Generator Return"""
v1 = FreeCAD.Vector(0, 0, 10)
v2 = FreeCAD.Vector(0, 0, 0)
e = Part.makeLine(v1, v2)
result = generator.generate(e)
self.assertTrue(type(result) is list)
self.assertTrue(type(result[0]) is Path.Command)
command = result[0]
self.assertTrue(command.Name == "G81")
self.assertTrue(command.Parameters["R"] == 10)
self.assertTrue(command.Parameters["X"] == 0)
self.assertTrue(command.Parameters["Y"] == 0)
self.assertTrue(command.Parameters["Z"] == 0)
# repeat must be > 0
args = {"edge": e, "repeat": 0}
self.assertRaises(ValueError, generator.generate, **args)
# repeat must be integer
args = {"edge": e, "repeat": 1.5}
self.assertRaises(ValueError, generator.generate, **args)
def test10(self):
"""Test edge alignment check"""
v1 = FreeCAD.Vector(0, 10, 10)
v2 = FreeCAD.Vector(0, 0, 0)
e = Part.makeLine(v1, v2)
self.assertRaises(ValueError, generator.generate, e)
v1 = FreeCAD.Vector(0, 0, 0)
v2 = FreeCAD.Vector(0, 0, 10)
e = Part.makeLine(v1, v2)
self.assertRaises(ValueError, generator.generate, e)
def test20(self):
"""Test Basic Peck Drill Generator Return"""
v1 = FreeCAD.Vector(0, 0, 10)
v2 = FreeCAD.Vector(0, 0, 0)
e = Part.makeLine(v1, v2)
result = generator.generate(e, peckdepth=1.2)
self.assertTrue(type(result) is list)
self.assertTrue(type(result[0]) is Path.Command)
command = result[0]
self.assertTrue(command.Name == "G83")
self.assertTrue(command.Parameters["Q"] == 1.2)
# peckdepth must be a float
args = {"edge": e, "peckdepth": 1}
self.assertRaises(ValueError, generator.generate, **args)
def test30(self):
"""Test Basic Dwell Drill Generator Return"""
v1 = FreeCAD.Vector(0, 0, 10)
v2 = FreeCAD.Vector(0, 0, 0)
e = Part.makeLine(v1, v2)
result = generator.generate(e, dwelltime=0.5)
self.assertTrue(type(result) is list)
self.assertTrue(type(result[0]) is Path.Command)
command = result[0]
self.assertTrue(command.Name == "G82")
self.assertTrue(command.Parameters["P"] == 0.5)
# dwelltime should be a float
args = {"edge": e, "dwelltime": 1}
self.assertRaises(ValueError, generator.generate, **args)
def test40(self):
"""Specifying retract height should set R parameter to specified value"""
v1 = FreeCAD.Vector(0, 0, 10)
v2 = FreeCAD.Vector(0, 0, 0)
e = Part.makeLine(v1, v2)
result = generator.generate(e, retractheight=20.0)
command = result[0]
self.assertTrue(command.Parameters["R"] == 20.0)
def test41(self):
"""Not specifying retract height should set R parameter to Z position of start point"""
v1 = FreeCAD.Vector(0, 0, 10)
v2 = FreeCAD.Vector(0, 0, 0)
e = Part.makeLine(v1, v2)
result = generator.generate(e)
command = result[0]
self.assertTrue(command.Parameters["R"] == 10.0)
def METHOD_NAME(self):
"""Non-float retract height should raise ValueError"""
v1 = FreeCAD.Vector(0, 0, 10)
v2 = FreeCAD.Vector(0, 0, 0)
e = Part.makeLine(v1, v2)
args = {"edge": e, "retractheight": 1}
self.assertRaises(ValueError, generator.generate, **args)
args = {"edge": e, "retractheight": "1"}
self.assertRaises(ValueError, generator.generate, **args)
def test50(self):
"""Test Error if dwell and peck"""
v1 = FreeCAD.Vector(0, 0, 10)
v2 = FreeCAD.Vector(0, 0, 0)
e = Part.makeLine(v1, v2)
# dwelltime should be a float
args = {"edge": e, "dwelltime": 1.0, "peckdepth": 1.0}
self.assertRaises(ValueError, generator.generate, **args)
def test60(self):
"""Test chipBreak"""
v1 = FreeCAD.Vector(0, 0, 10)
v2 = FreeCAD.Vector(0, 0, 0)
e = Part.makeLine(v1, v2)
args = {"edge": e, "peckdepth": 1.0, "chipBreak": True}
result = generator.generate(**args)
command = result[0]
self.assertTrue(command.Name == "G73") |
tag frequency | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from datetime import datetime
from distutils.util import strtobool
from types import SimpleNamespace
from typing import Dict
import numpy as np
from toolz import compose_left
from gluonts import json
from gluonts.exceptions import GluonTSDataError
parse_bool = compose_left(strtobool, bool)
def parse_attribute(ty, value: str):
if ty == "numeric":
return int(value)
if ty == "string":
return value
if ty == "date":
return datetime.strptime(value, "%Y-%m-%d %H-%M-%S")
raise AttributeError(ty)
def frequency_converter(freq: str):
parts = freq.split("_")
if len(parts) == 1:
return convert_base(parts[0])
if len(parts) == 2:
return convert_multiple(parts[0]) + convert_base(parts[1])
raise ValueError(f"Invalid frequency string {freq}.")
BASE_FREQ_TO_PANDAS_OFFSET: Dict[str, str] = {
"seconds": "S",
"minutely": "T",
"minutes": "T",
"hourly": "H",
"hours": "H",
"daily": "D",
"days": "D",
"weekly": "W",
"weeks": "W",
"monthly": "M",
"months": "M",
"quarterly": "Q",
"quarters": "Q",
"yearly": "Y",
"years": "Y",
}
def convert_base(text: str) -> str:
try:
return BASE_FREQ_TO_PANDAS_OFFSET[text]
except KeyError:
raise GluonTSDataError(
f'"{text}" is not recognized as a frequency string'
)
def convert_multiple(text: str) -> str:
if text.isnumeric():
return text
if text == "half":
return "0.5"
raise ValueError(f"Unknown frequency multiple {text}.")
class TSFReader:
def __init__(
self,
path,
target_name="target",
):
self.path = path
self.target_name = target_name
self.meta = SimpleNamespace(columns={})
def read(self):
with open(self.path, encoding="latin1") as in_file:
# strip whitespace
lines = map(str.strip, in_file)
# ignore all lines starting with #
lines = filter(lambda line: not line.startswith("#"), lines)
data_tag_found = self._read_header(lines)
assert data_tag_found, "Missing @data tag."
assert self.meta.columns, (
"Missing attribute section. Attribute section must come before"
" data."
)
assert self.target_name not in self.meta.columns
self.meta.columns[self.target_name] = None
data = list(map(self._read_data, lines))
return self.meta, data
def _read_header(self, lines):
for line in lines:
assert line.startswith("@")
stop = self._tag(line[1:])
if stop:
return True
return False
def _read_data(self, line):
parts = line.split(":")
assert len(parts) == len(
self.meta.columns
), "Missing attributes/values in series."
*attributes, target = parts
record = {}
record[self.target_name] = self._data_target(target)
for (column, ty), attr in zip(self.meta.columns.items(), attributes):
record[column] = parse_attribute(ty, attr)
return record
def _data_target(self, s):
s = s.replace("?", '"nan"')
values = json.loads(f"[{s}]")
assert values, (
"A given series should contains a set of comma separated numeric"
" values. At least one numeric value should be there in a series."
" Missing values should be indicated with ? symbol"
)
return np.array(values, dtype=float)
def _tag(self, line):
fn_by_tag = {
"attribute": self._tag_attribute,
"frequency": self.METHOD_NAME,
"horizon": self._tag_horizon,
"missing": self._tag_missing,
"equallength": self._tag_equallength,
"data": self._tag_data,
}
tag, *args = line.split(" ")
if tag not in fn_by_tag:
return
return fn_by_tag[tag](*args)
def _tag_attribute(self, name, ty):
self.meta.columns[name] = ty
def METHOD_NAME(self, frequency):
self.meta.frequency = frequency
def _tag_horizon(self, horizon):
self.meta.forecast_horizon = horizon
def _tag_missing(self, missing):
self.meta.has_missing_values = parse_bool(missing)
def _tag_equallength(self, equallength):
self.meta.has_equal_length = parse_bool(equallength)
def _tag_data(self):
return True |
grav pot | __author__ = "sibirrer"
import numpy as np
import scipy.special as special
from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase
__all__ = ["SPP"]
class SPP(LensProfileBase):
"""Class for circular power-law mass distribution."""
param_names = ["theta_E", "gamma", "center_x", "center_y"]
lower_limit_default = {
"theta_E": 0,
"gamma": 1.5,
"center_x": -100,
"center_y": -100,
}
upper_limit_default = {
"theta_E": 100,
"gamma": 2.5,
"center_x": 100,
"center_y": 100,
}
def function(self, x, y, theta_E, gamma, center_x=0, center_y=0):
"""
:param x: set of x-coordinates
:type x: array of size (n)
:param y: set of y-coordinates
:type y: array of size (n)
:param theta_E: Einstein radius of lens
:type theta_E: float.
:param gamma: power law slope of mass profile
:type gamma: <2 float
:returns: function
:raises: AttributeError, KeyError
"""
gamma = self._gamma_limit(gamma)
x_ = x - center_x
y_ = y - center_y
E = theta_E / ((3.0 - gamma) / 2.0) ** (1.0 / (1.0 - gamma))
# E = phi_E_spp
eta = -gamma + 3
p2 = x_**2 + y_**2
s2 = 0.0 # softening
return 2 * E**2 / eta**2 * ((p2 + s2) / E**2) ** (eta / 2)
def derivatives(self, x, y, theta_E, gamma, center_x=0.0, center_y=0.0):
gamma = self._gamma_limit(gamma)
xt1 = x - center_x
xt2 = y - center_y
r2 = xt1 * xt1 + xt2 * xt2
a = np.maximum(r2, 0.000001)
r = np.sqrt(a)
alpha = theta_E * (r2 / theta_E**2) ** (1 - gamma / 2.0)
fac = alpha / r
f_x = fac * xt1
f_y = fac * xt2
return f_x, f_y
def hessian(self, x, y, theta_E, gamma, center_x=0.0, center_y=0.0):
gamma = self._gamma_limit(gamma)
xt1 = x - center_x
xt2 = y - center_y
E = theta_E / ((3.0 - gamma) / 2.0) ** (1.0 / (1.0 - gamma))
# E = phi_E_spp
eta = -gamma + 3.0
P2 = xt1**2 + xt2**2
if isinstance(P2, int) or isinstance(P2, float):
a = max(0.000001, P2)
else:
a = np.empty_like(P2)
p2 = P2[P2 > 0] # in the SIS regime
a[P2 == 0] = 0.000001
a[P2 > 0] = p2
kappa = (
1.0
/ eta
* (a / E**2) ** (eta / 2 - 1)
* ((eta - 2) * (xt1**2 + xt2**2) / a + (1 + 1))
)
gamma1 = (
1.0
/ eta
* (a / E**2) ** (eta / 2 - 1)
* ((eta / 2 - 1) * (2 * xt1**2 - 2 * xt2**2) / a)
)
gamma2 = (
4 * xt1 * xt2 * (1.0 / 2 - 1 / eta) * (a / E**2) ** (eta / 2 - 2) / E**2
)
f_xx = kappa + gamma1
f_yy = kappa - gamma1
f_xy = gamma2
return f_xx, f_xy, f_xy, f_yy
@staticmethod
def rho2theta(rho0, gamma):
"""Converts 3d density into 2d projected density parameter.
:param rho0:
:param gamma:
:return:
"""
fac = (
np.sqrt(np.pi)
* special.gamma(1.0 / 2 * (-1 + gamma))
/ special.gamma(gamma / 2.0)
* 2
/ (3 - gamma)
* rho0
)
# fac = theta_E**(gamma - 1)
theta_E = fac ** (1.0 / (gamma - 1))
return theta_E
@staticmethod
def theta2rho(theta_E, gamma):
"""Converts projected density parameter (in units of deflection) into 3d density
parameter.
:param theta_E:
:param gamma:
:return:
"""
fac1 = (
np.sqrt(np.pi)
* special.gamma(1.0 / 2 * (-1 + gamma))
/ special.gamma(gamma / 2.0)
* 2
/ (3 - gamma)
)
fac2 = theta_E ** (gamma - 1)
rho0 = fac2 / fac1
return rho0
@staticmethod
def mass_3d(r, rho0, gamma):
"""Mass enclosed a 3d sphere or radius r.
:param r:
:param rho0:
:param gamma:
:return:
"""
mass_3d = 4 * np.pi * rho0 / (-gamma + 3) * r ** (-gamma + 3)
return mass_3d
def mass_3d_lens(self, r, theta_E, gamma):
"""
:param r:
:param theta_E:
:param gamma:
:return:
"""
rho0 = self.theta2rho(theta_E, gamma)
return self.mass_3d(r, rho0, gamma)
def mass_2d(self, r, rho0, gamma):
"""Mass enclosed projected 2d sphere of radius r.
:param r:
:param rho0:
:param gamma:
:return:
"""
alpha = (
np.sqrt(np.pi)
* special.gamma(1.0 / 2 * (-1 + gamma))
/ special.gamma(gamma / 2.0)
* r ** (2 - gamma)
/ (3 - gamma)
* 2
* rho0
)
mass_2d = alpha * r * np.pi
return mass_2d
def mass_2d_lens(self, r, theta_E, gamma):
"""
:param r: projected radius
:param theta_E: Einstein radius
:param gamma: power-law slope
:return: 2d projected radius enclosed
"""
rho0 = self.theta2rho(theta_E, gamma)
return self.mass_2d(r, rho0, gamma)
def METHOD_NAME(self, x, y, rho0, gamma, center_x=0, center_y=0):
"""Gravitational potential (modulo 4 pi G and rho0 in appropriate units)
:param x:
:param y:
:param rho0:
:param gamma:
:param center_x:
:param center_y:
:return:
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
mass_3d = self.mass_3d(r, rho0, gamma)
pot = mass_3d / r
return pot
@staticmethod
def density(r, rho0, gamma):
"""Computes the density.
:param r:
:param rho0:
:param gamma:
:return:
"""
rho = rho0 / r**gamma
return rho
def density_lens(self, r, theta_E, gamma):
"""Computes the density at 3d radius r given lens model parameterization.
The integral in projected in units of angles (i.e. arc seconds) results in the
convergence quantity.
"""
rho0 = self.theta2rho(theta_E, gamma)
return self.density(r, rho0, gamma)
@staticmethod
def density_2d(x, y, rho0, gamma, center_x=0, center_y=0):
"""Projected density.
:param x:
:param y:
:param rho0:
:param gamma:
:param center_x:
:param center_y:
:return:
"""
x_ = x - center_x
y_ = y - center_y
r = np.sqrt(x_**2 + y_**2)
sigma = (
np.sqrt(np.pi)
* special.gamma(1.0 / 2 * (-1 + gamma))
/ special.gamma(gamma / 2.0)
* r ** (1 - gamma)
* rho0
)
return sigma
@staticmethod
def _gamma_limit(gamma):
"""Limits the power-law slope to certain bounds.
:param gamma: power-law slope
:return: bounded power-law slopte
"""
return gamma |
send error and close | import time
import threading
from DIRAC import gLogger, S_ERROR
from DIRAC.Core.Utilities.ThreadScheduler import gThreadScheduler
class TransportPool:
def __init__(self, logger=False):
if logger:
self.log = logger
else:
self.log = gLogger
self.__modLock = threading.Lock()
self.__transports = {}
self.__listenPersistConn = False
self.__msgCounter = 0
result = gThreadScheduler.addPeriodicTask(5, self.__sendKeepAlives)
if not result["OK"]:
self.log.fatal("Cannot add task to thread scheduler", result["Message"])
self.__keepAlivesTask = result["Value"]
#
# Send keep alives
#
def __sendKeepAlives(self, retries=5):
if retries == 0:
return
tridList = []
try:
tridList = [trid for trid in self.__transports]
except RuntimeError:
self.__sendKeepAlives(retries - 1)
for trid in tridList:
try:
tr = self.__transports[trid][0]
except KeyError:
continue
if not tr.getKeepAliveLapse():
continue
try:
tr.sendKeepAlive(now=time.time())
except KeyError:
continue
except Exception:
gLogger.exception("Cannot send keep alive")
# exists
def exists(self, trid):
return trid in self.__transports
# Add
def add(self, transport):
remoteAddr = transport.getRemoteAddress()
localAddr = transport.getLocalAddress()
self.log.debug(f"New connection -> {remoteAddr[0]}:{remoteAddr[1]}")
trid = f"{localAddr[0]}:{localAddr[1]}->{remoteAddr[0]}:{remoteAddr[1]}"
return self.__add(trid, transport)
def __add(self, trid, transport):
self.__modLock.acquire()
try:
if not self.exists(trid):
self.__transports[trid] = (transport, {})
finally:
self.__modLock.release()
return trid
# Data association
def associateData(self, trid, kw, value):
self.__modLock.acquire()
try:
if trid in self.__transports:
self.__transports[trid][1][kw] = value
finally:
self.__modLock.release()
def getAssociatedData(self, trid, kw):
try:
return self.__transports[trid][1][kw]
except KeyError:
return None
# Get transport
def get(self, trid):
try:
return self.__transports[trid][0]
except KeyError:
return None
# Receive
def receive(self, trid, maxBufferSize=0, blockAfterKeepAlive=True, idleReceive=False):
try:
received = self.__transports[trid][0].receiveData(maxBufferSize, blockAfterKeepAlive, idleReceive)
return received
except KeyError:
return S_ERROR(f"No transport with id {trid} defined")
# Send
def send(self, trid, msg):
try:
transport = self.__transports[trid][0]
except KeyError:
return S_ERROR(f"No transport with id {trid} defined")
return transport.sendData(msg)
# Send And Close
def METHOD_NAME(self, trid, msg):
try:
result = self.__transports[trid][0].sendData(S_ERROR(msg))
if not result["OK"]:
return result
except KeyError:
return S_ERROR(f"No transport with id {trid} defined")
finally:
self.close(trid)
def sendAndClose(self, trid, msg):
try:
result = self.__transports[trid][0].sendData(msg)
if not result["OK"]:
return result
except KeyError:
return S_ERROR(f"No transport with id {trid} defined")
finally:
self.close(trid)
def sendKeepAlive(self, trid, responseId=None):
try:
return self.__transports[trid][0].sendKeepAlive(responseId)
except KeyError:
return S_ERROR(f"No transport with id {trid} defined")
# Close
def close(self, trid):
try:
self.__transports[trid][0].close()
except KeyError:
return S_ERROR(f"No transport with id {trid} defined")
self.remove(trid)
def remove(self, trid):
self.__modLock.acquire()
try:
if trid in self.__transports:
del self.__transports[trid]
finally:
self.__modLock.release()
gTransportPool = None
def getGlobalTransportPool():
global gTransportPool
if not gTransportPool:
gTransportPool = TransportPool()
return gTransportPool |
controller | #!/usr/bin/env python
#
# Copyright (C) 2016 GNS3 Technologies Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import uuid
from unittest.mock import MagicMock
from gns3.link import Link
from gns3.ports.ethernet_port import EthernetPort
from gns3.modules.vpcs.vpcs_node import VPCSNode
from gns3.modules.vpcs import VPCS
from gns3.METHOD_NAME import Controller
@pytest.fixture
def devices(local_server, project):
"""
Create two VPCS for test
"""
device1 = VPCSNode(VPCS(), local_server, project)
device1._vpcs_id = str(uuid.uuid4())
device1._settings = {"name": "VPCS 1", "script_file": "", "console": None, "startup_script": None}
device1.setInitialized(True)
port = EthernetPort("E1")
port.setAdapterNumber(0)
port.setPortNumber(0)
device1._ports.append(port)
device2 = VPCSNode(VPCS(), local_server, project)
device2._vpcs_id = str(uuid.uuid4())
device2._settings = {"name": "VPCS 2", "script_file": "", "console": None, "startup_script": None}
device2.setInitialized(True)
port = EthernetPort("E2")
port.setAdapterNumber(0)
port.setPortNumber(0)
device2._ports.append(port)
return (device1, device2)
@pytest.fixture
def link(devices, METHOD_NAME, project):
link = Link(devices[0], devices[0].ports()[0], devices[1], devices[1].ports()[0])
data = {
"suspend": False,
"nodes": [
{"node_id": devices[0].node_id(), "adapter_number": 0, "port_number": 0},
{"node_id": devices[1].node_id(), "adapter_number": 0, "port_number": 0}
],
"link_style": {},
"filters": {},
}
METHOD_NAME.post.assert_called_with("/projects/{}/links".format(project.id()), link._linkCreatedCallback, body=data)
link._linkCreatedCallback({"link_id": str(uuid.uuid4())})
return link
@pytest.fixture
def METHOD_NAME():
Controller._instance = MagicMock()
return Controller._instance
def test_create_link(devices, project, METHOD_NAME):
link = Link(devices[0], devices[0].ports()[0], devices[1], devices[1].ports()[0])
data = {
"suspend": False,
"nodes": [
{"node_id": devices[0].node_id(), "adapter_number": 0, "port_number": 0},
{"node_id": devices[1].node_id(), "adapter_number": 0, "port_number": 0},
],
"link_style": {},
"filters": {},
}
METHOD_NAME.post.assert_called_with("/projects/{}/links".format(project.id()), link._linkCreatedCallback, body=data)
mock_signal = MagicMock()
link.add_link_signal.connect(mock_signal)
link._linkCreatedCallback({"link_id": str(uuid.uuid4())})
mock_signal.assert_called_with(link._id)
assert link._link_id is not None
assert not devices[0].ports()[0].isFree()
assert link.getNodePort(devices[0]) == devices[0].ports()[0]
assert link.getNodePort(devices[1]) == devices[1].ports()[0]
def test_delete_link(devices, project, METHOD_NAME):
link = Link(devices[0], devices[0].ports()[0], devices[1], devices[1].ports()[0])
link._link_id = str(uuid.uuid4())
link.deleteLink()
METHOD_NAME.delete.assert_called_with("/projects/{}/links/{}".format(project.id(), link._link_id), link._linkDeletedCallback)
mock_signal = MagicMock()
link.delete_link_signal.connect(mock_signal)
link._linkDeletedCallback({})
mock_signal.assert_called_with(link._id)
assert devices[0].ports()[0].isFree()
assert link not in devices[0].links()
assert link not in devices[1].links()
def test_start_capture_link(link, METHOD_NAME, project):
link.startCapture("DLT_EN10MB", "test.pcap")
METHOD_NAME.post.assert_called_with("/projects/{}/links/{}/start_capture".format(project.id(), link._link_id), link._startCaptureCallback, body={'capture_file_name': 'test.pcap', 'data_link_type': 'DLT_EN10MB'})
def test_stop_capture_link(link, METHOD_NAME, project):
link.stopCapture()
METHOD_NAME.post.assert_called_with("/projects/{}/links/{}/stop_capture".format(project.id(), link._link_id), link._stopCaptureCallback) |
visit individual node | from typing import Mapping, MutableMapping, Optional, Set
from snuba.clickhouse.query_dsl.accessors import get_time_range_estimate
from snuba.query import ProcessableQuery
from snuba.query.composite import CompositeQuery
from snuba.query.conditions import BooleanFunctions
from snuba.query.data_source.join import IndividualNode, JoinClause, JoinVisitor
from snuba.query.data_source.simple import Table
from snuba.query.data_source.visitor import DataSourceVisitor
from snuba.query.expressions import Column as ColumnExpr
from snuba.query.expressions import Expression
from snuba.query.expressions import FunctionCall as FunctionCallExpr
def _get_date_range(query: ProcessableQuery[Table]) -> Optional[int]:
from_date, to_date = get_time_range_estimate(query)
if from_date is None or to_date is None:
return None
else:
return (to_date - from_date).days
class TablesCollector(DataSourceVisitor[None, Table], JoinVisitor[None, Table]):
"""
Traverses the data source of a composite query and collects
all the referenced table names, final state and sampling rate
to fill stats.
"""
def __init__(self) -> None:
self.__tables: Set[str] = set()
self.__max_time_range: Optional[int] = None
self.__has_complex_conditions: bool = False
self.__final: bool = False
self.__sample_rate: Optional[float] = None
self.__all_raw_columns: MutableMapping[str, Set[ColumnExpr]] = {}
self.__all_conditions: MutableMapping[str, Expression] = {}
self.__all_groupby: MutableMapping[str, Set[Expression]] = {}
self.__all_array_joins: MutableMapping[str, Set[Expression]] = {}
def get_tables(self) -> Set[str]:
return self.__tables
def get_max_time_range(self) -> Optional[int]:
return self.__max_time_range
def has_complex_condition(self) -> bool:
return self.__has_complex_conditions
def any_final(self) -> bool:
return self.__final
def get_sample_rate(self) -> Optional[float]:
return self.__sample_rate
def get_all_raw_columns(self) -> Mapping[str, Set[ColumnExpr]]:
return self.__all_raw_columns
def get_all_conditions(self) -> Mapping[str, Expression]:
return self.__all_conditions
def get_all_groupby(self) -> Mapping[str, Set[Expression]]:
return self.__all_groupby
def get_all_arrayjoin(self) -> Mapping[str, Set[Expression]]:
return self.__all_array_joins
def __find_complex_conditions(self, query: ProcessableQuery[Table]) -> bool:
condition = query.get_condition()
if condition is None:
return False
for c in condition:
if (
isinstance(c, FunctionCallExpr)
and c.function_name == BooleanFunctions.OR
):
return True
return False
def _visit_simple_source(self, data_source: Table) -> None:
self.__tables.add(data_source.table_name)
self.__sample_rate = data_source.sampling_rate
if data_source.final:
self.__final = True
def _visit_join(self, data_source: JoinClause[Table]) -> None:
self.visit_join_clause(data_source)
def _list_array_join(self, query: ProcessableQuery[Table]) -> Set[Expression]:
ret: Set[Expression] = set()
query_arrayjoin = query.get_arrayjoin()
if query_arrayjoin is not None:
ret.update(query_arrayjoin)
for e in query.get_all_expressions():
if isinstance(e, FunctionCallExpr) and e.function_name == "arrayJoin":
ret.add(e)
return ret
def _visit_simple_query(self, data_source: ProcessableQuery[Table]) -> None:
time_range = _get_date_range(data_source)
if time_range and (
self.__max_time_range is None or time_range > self.__max_time_range
):
self.__max_time_range = time_range
self.__has_complex_conditions = (
self.__has_complex_conditions | self.__find_complex_conditions(data_source)
)
table_name = data_source.get_from_clause().table_name
self.__all_raw_columns[table_name] = {
c for c in data_source.get_all_ast_referenced_columns()
}
condition = data_source.get_condition()
if condition is not None:
self.__all_conditions[table_name] = condition
self.__all_groupby[table_name] = set(data_source.get_groupby())
self.__all_array_joins[table_name] = self._list_array_join(data_source)
self.visit(data_source.get_from_clause())
def _visit_composite_query(self, data_source: CompositeQuery[Table]) -> None:
self.visit(data_source.get_from_clause())
# stats do not yet support sampling rate (there is only one field)
# so if we have a composite query we set it to None.
self.__sample_rate = None
def METHOD_NAME(self, node: IndividualNode[Table]) -> None:
self.visit(node.data_source)
def visit_join_clause(self, node: JoinClause[Table]) -> None:
node.left_node.accept(self)
node.right_node.accept(self) |
find longest common time window from list | #!/usr/bin/env python
"""
This module contains helper functions for miscellaneous stuff.
Calculations, optimisations, ....
@UofA, 2013
(LK)
"""
import sys
import os
import numpy as np
import ctypes
def METHOD_NAME(lo_time_windows, sampling_rate):
"""
Determine the longest consecutive time window, in which all given input traces are present.
input:
list of lists - 4(5) lists, each containing 2-tuples with start and end values of time series.
output:
3-tuple (start time, end time, n_samples) - end time refers to the start time of the last sample
"""
# find absolute min and max:
mins = []
maxs = []
for ch_list in lo_time_windows:
for minmaxtuple in ch_list:
mins.append(minmaxtuple[0])
maxs.append(minmaxtuple[1])
totalmin = np.min(mins)
totalmax = np.max(maxs)
# do not correct for last sample, since 'end' in the MTpy handling is
# defined as the start time of the last sample already
totallength = int((totalmax - totalmin) * sampling_rate + 1)
# define time axis:
ta = np.arange(totallength) / sampling_rate + totalmin
# set up array for 4/5 components
d = np.zeros((totallength, len(lo_time_windows)))
for idx_ch, ch_list in enumerate(lo_time_windows):
for minmaxtuple in ch_list:
s = np.argmin(np.abs(ta - minmaxtuple[0]))
e = np.argmin(np.abs(ta - minmaxtuple[1])) + 1
# fill array with value 1 where data are available
d[s:e, idx_ch] = 1
start_idx = None
end_idx = None
t1 = 0
ts_tmp = None
te_tmp = None
longest_window = 0
window_idx = 0
print '\t\tMaximum time window covered by data files:', totalmax - totalmin
print '\t\tCheck data availablility - while-loop until "maximum time window" is reached...'
while t1 < totallength:
if np.prod(d[t1, :]) == 0:
# check, if it's been a data window before
if ts_tmp is not None:
# define sample before as end sample
te_tmp = t1 - 1
# get window length
window_length = te_tmp - ts_tmp
# check if it's been the longest window so far
if window_length > longest_window:
longest_window = window_length
start_idx = ts_tmp
end_idx = te_tmp
# re-initialise temporary variables
ts_tmp = None
te_tmp = None
# otherwise just continue to next step in outer loop
t1 += 1
continue
# check if it's the first sample of a data window:
if ts_tmp is None:
# if yes, initialise temporary variable
ts_tmp = t1
window_idx += 1
t1 += 1
if t1 % (int(totallength / 100.)) == 0:
sys.stdout.write('\t\t{0:3} %\r'.format(
int(np.round(t1 / float(totallength) * 100))))
sys.stdout.flush()
# ' \n\t\tChecking for last sample (include/exclude)...'
print '\n\t\t...Done!'
# after the loop, check, if last sample belogs to a data window:
if ts_tmp is not None:
te_tmp = t1 - 1
window_length = te_tmp - ts_tmp
if window_length > longest_window:
longest_window = window_length
start_idx = ts_tmp
end_idx = te_tmp
# rounding limits of the time window to precision defined by the sampling
# rate
precision = -int(np.log10(1. / sampling_rate))
# print 'return time window parameters:'
# print ta[start_idx],ta[end_idx] ,window_length, len(ta)
# print '\t\tStart time, end time, samples: ',round(ta[start_idx], precision),\
# round(ta[end_idx], precision), window_length#, len(ta))
window_length = (round(ta[end_idx], precision) -
round(ta[start_idx], precision)) * sampling_rate
return (round(ta[start_idx], precision), round(
ta[end_idx], precision), window_length)
def add_birrp_simple_parameters_to_dictionary(birrp_dictionary):
birrp_dictionary['ilev'] = 0
birrp_dictionary['ninp'] = 2
birrp_dictionary['tbw'] = 2
birrp_dictionary['uin'] = 0
birrp_dictionary['ainuin'] = 0.999
birrp_dictionary['nlev'] = 0
birrp_dictionary['npcs'] = 1
birrp_dictionary['nar'] = 5
birrp_dictionary['imode'] = 2
birrp_dictionary['jmode'] = 0
birrp_dictionary['nfil'] = 0
birrp_dictionary['nskip'] = 0
birrp_dictionary['theta1'] = 0
birrp_dictionary['theta2'] = 90
birrp_dictionary['phi'] = 0
return birrp_dictionary
class MemoryCheck():
"""Checks memory of a given system"""
def __init__(self):
if os.name == "posix":
self.value = self.linuxRam()
elif os.name == "nt":
self.value = self.windowsRam()
else:
print "I only work with Win or Linux "
def windowsRam(self):
"""Uses Windows API to check RAM in this OS"""
kernel32 = ctypes.windll.kernel32
c_ulong = ctypes.c_ulong
class MEMORYSTATUS(ctypes.Structure):
_fields_ = [
("dwLength", c_ulong),
("dwMemoryLoad", c_ulong),
("dwTotalPhys", c_ulong),
("dwAvailPhys", c_ulong),
("dwTotalPageFile", c_ulong),
("dwAvailPageFile", c_ulong),
("dwTotalVirtual", c_ulong),
("dwAvailVirtual", c_ulong)
]
memoryStatus = MEMORYSTATUS()
memoryStatus.dwLength = ctypes.sizeof(MEMORYSTATUS)
kernel32.GlobalMemoryStatus(ctypes.byref(memoryStatus))
return int(memoryStatus.dwTotalPhys / 1024 **
2), int(memoryStatus.dwAvailPhys / 1024**2)
def linuxRam(self):
"""Returns the RAM of a linux system"""
totalmemory = os.popen("free -m").readlines()[1].split()[1]
freememory = os.popen("free -m").readlines()[1].split()[3]
return int(totalmemory), int(freememory)
def show_memory():
M = MemoryCheck()
return M.value |
mock jwt decode | # --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import jwt
from unittest import mock
import os
import azure.cli.command_modules.role.custom
from azure.cli.testsdk import ScenarioTest
from azure.cli.testsdk import ResourceGroupPreparer
from .example_steps import step_terms_list
from .example_steps import step_organization_show
from .example_steps import step_organization_list
from .example_steps import step_organization_list2
from .example_steps import step_organization_update
from .example_steps import step_organization_delete
from azure.cli.testsdk.scenario_tests import AllowLargeResponse
from .. import (
try_manual,
raise_if,
calc_coverage
)
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
def step_offer_detail_show(test, rg, checks=None):
if checks is None:
checks = []
result = test.cmd('az confluent offer-detail show '
'--publisher-id confluentinc '
'--offer-id confluent-cloud-azure-stag',
checks=checks).get_output_in_json()
# check
for plan in result:
assert plan.get('offerId', None) is not None
assert plan.get('publisherId', None) is not None
for term_unit in plan['termUnits']:
if term_unit.get('termUnits', None):
assert term_unit['termUnits'] in ['P1M', 'P1Y']
assert term_unit.get('price', None) is not None
assert term_unit['price'].get('isPIRequired', None) is None
assert term_unit['price'].get('msrp', None) is None
assert term_unit.get('termDescription', None) is not None
orig_decode = jwt.decode
def METHOD_NAME(jwt_str, **kwargs):
if jwt_str == 'top-secret-token-for-you':
return {
'given_name': 'contoso',
'family_name': 'zhou',
'email': '[email protected]',
'oid': '00000000-0000-0000-0000-000000000000'
}
else:
return orig_decode(jwt_str, **kwargs)
orig_list_role = azure.cli.command_modules.role.custom.list_role_assignments
def mock_list_role_assignments(cmd, **kwargs):
if kwargs['assignee'] == '00000000-0000-0000-0000-000000000000':
return [{}] # mock it to pass non-empty check
else:
return orig_list_role(cmd, **kwargs)
def step_organization_create(test, rg, checks=None):
if checks is None:
checks = []
with mock.patch('jwt.decode', METHOD_NAME):
with mock.patch('azure.cli.command_modules.role.custom.list_role_assignments', mock_list_role_assignments):
test.cmd('az confluent organization create '
'--location "eastus2euap" '
'--offer-id "confluent-cloud-azure-stag" '
'--plan-id "confluent-cloud-azure-payg-stag" '
'--plan-name "Confluent Cloud - Pay as you Go" '
'--publisher-id "confluentinc" '
'--term-unit "P1M" '
'--tags environment="Dev" '
'--name "{myOrganization}" '
'--resource-group "{rg}"',
checks=checks)
test.cmd('az confluent organization wait --created '
'--name "{myOrganization}" '
'--resource-group "{rg}"',
checks=[])
# Env setup_scenario
@try_manual
def setup_scenario(test, rg):
pass
# Env cleanup_scenario
@try_manual
def cleanup_scenario(test, rg):
pass
# Testcase: Scenario
@try_manual
def call_scenario(test, rg):
setup_scenario(test, rg)
step_terms_list(test, rg, checks=[
test.greater_than('length(@)', 1)
])
step_offer_detail_show(test, rg, checks=[
test.greater_than('length(@)', 0)
])
step_organization_create(test, rg, checks=[
test.check("location", "eastus2euap", case_sensitive=False),
# change to real values for userDetail in live tests
test.check("userDetail.emailAddress", "[email protected]", case_sensitive=False),
test.check("userDetail.firstName", "contoso", case_sensitive=False),
test.check("userDetail.lastName", "zhou", case_sensitive=False),
test.check("tags.environment", "Dev", case_sensitive=False),
test.check("name", "{myOrganization}", case_sensitive=False),
])
step_organization_show(test, rg, checks=[
test.check("location", "eastus2euap", case_sensitive=False),
# change to real values for userDetail in live tests
test.check("userDetail.emailAddress", "[email protected]", case_sensitive=False),
test.check("userDetail.firstName", "contoso", case_sensitive=False),
test.check("userDetail.lastName", "zhou", case_sensitive=False),
test.check("tags.environment", "Dev", case_sensitive=False),
test.check("name", "{myOrganization}", case_sensitive=False),
])
step_organization_list(test, rg, checks=[
test.greater_than('length(@)', 0),
])
step_organization_list2(test, "", checks=[
test.greater_than('length(@)', 0),
])
step_organization_update(test, rg, checks=[
test.check("location", "eastus2euap", case_sensitive=False),
test.check("userDetail.emailAddress", "[email protected]", case_sensitive=False),
test.check("userDetail.firstName", "contoso", case_sensitive=False),
test.check("userDetail.lastName", "zhou", case_sensitive=False),
test.check("name", "{myOrganization}", case_sensitive=False),
test.check("tags.client", "dev-client", case_sensitive=False),
])
step_organization_delete(test, rg, checks=[])
cleanup_scenario(test, rg)
# Test class for Scenario
@try_manual
class ConfluentScenarioTest(ScenarioTest):
def __init__(self, *args, **kwargs):
super(ConfluentScenarioTest, self).__init__(*args, **kwargs)
self.kwargs.update({
'myOrganization': 'cliTestOrg',
})
@ResourceGroupPreparer(name_prefix='clitestconfluent_myResourceGroup'[:7], key='rg', parameter_name='rg')
@AllowLargeResponse()
def test_confluent_Scenario(self, rg):
call_scenario(self, rg)
calc_coverage(__file__)
raise_if() |
get imported symbol | import types
import sys
from pyrevit import EXEC_PARAMS, HOST_APP, DOCS
from pyrevit import PyRevitException
from pyrevit import framework
from pyrevit.coreutils.logger import get_logger
from pyrevit import DB, UI
#pylint: disable=W0401
from pyrevit.revit.db import *
from pyrevit.revit.db import query
from pyrevit.revit.db import select
from pyrevit.revit.db import create
from pyrevit.revit.db import update
from pyrevit.revit.db import ensure
from pyrevit.revit.db import delete
from pyrevit.revit.db.transaction import *
from pyrevit.revit.db import failure
from pyrevit.revit.db.pickling import *
from pyrevit.revit.journals import *
from pyrevit.revit.selection import *
from pyrevit.revit import ui
from pyrevit.revit import tabs
from pyrevit.revit import events
from pyrevit.revit import report
from pyrevit.revit import files
from pyrevit.revit import serverutils
from pyrevit.revit import geom
from pyrevit.revit import units
from pyrevit.revit import features
from pyrevit.revit import bim360
#pylint: disable=W0703,C0302,C0103
mlogger = get_logger(__name__)
def METHOD_NAME(symbol_name):
return globals().get(symbol_name, None)
class RevitWrapper(types.ModuleType):
def __init__(self):
pass
def __getattribute__(self, attr_name):
attr = METHOD_NAME(attr_name)
return attr or object.__getattribute__(self, attr_name)
def __getattr__(self, attr_name):
attr = METHOD_NAME(attr_name)
if not attr:
raise AttributeError('\'module\' object has no attribute \'{}\''
.format(attr_name))
return attr
@property
def uidoc(self):
return HOST_APP.uidoc
@property
def doc(self):
return DOCS.doc
@property
def docs(self):
return DOCS.docs
@property
def active_view(self):
return HOST_APP.active_view
@active_view.setter
def active_view(self, value):
HOST_APP.active_view = value
@property
def active_ui_view(self):
if isinstance(self.active_view, DB.View):
for uiview in self.uidoc.GetOpenUIViews():
if uiview.ViewId == self.active_view.Id:
return uiview
@property
def servers(self):
return HOST_APP.available_servers
@staticmethod
def open_doc(doc_path):
"""Open document at given path.
Args:
doc_path (str): document file path
Returns:
DB.Document: opened document
"""
return HOST_APP.app.OpenDocumentFile(doc_path)
@staticmethod
def close_doc(doc):
"""Close given document.
Args:
doc (DB.Document): document
"""
return doc.Close()
@staticmethod
def post_command(command_id):
"""Request Revit to run a command
Args:
command_id (str): command identifier e.g. ID_REVIT_SAVE_AS_TEMPLATE
"""
HOST_APP.post_command(command_id)
class ErrorSwallower():
"""Suppresses warnings during script execution
Example:
>>> with ErrorSwallower() as swallower:
>>> for fam in families:
>>> revit.doc.EditFamily(fam)
>>> if swallower.get_swallowed():
>>> logger.warn("Warnings swallowed")
"""
def __init__(self, log_errors=True):
self._fswallower = failure.FailureSwallower()
self._logerror = log_errors
def on_failure_processing(self, _, event_args):
"""Failure processing event handler"""
try:
failure_accesssor = event_args.GetFailuresAccessor()
mlogger.debug('request for failure processing...')
result = event_args.GetProcessingResult()
mlogger.debug('current failure processing result: %s', result)
result = self._fswallower.preprocess_failures(failure_accesssor)
mlogger.debug('setting failure processing results to: %s', result)
event_args.SetProcessingResult(result)
except Exception as fpex:
mlogger.error('Error occured while processing failures. | %s', fpex)
def get_swallowed_errors(self):
"""Return swallowed errors"""
return self._fswallower.get_swallowed_failures()
def reset(self):
"""Reset swallowed errors"""
self._fswallower.reset()
def __enter__(self):
"""Start listening to failure processing events"""
HOST_APP.app.FailuresProcessing += self.on_failure_processing
return self
def __exit__(self, exception, exception_value, traceback):
"""Stop listening to failure processing events"""
HOST_APP.app.FailuresProcessing -= self.on_failure_processing
if exception and self._logerror:
mlogger.error('Error in ErrorSwallower Context. | %s:%s',
exception, exception_value)
if not EXEC_PARAMS.doc_mode:
sys.modules[__name__] = RevitWrapper() |
write | #
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
Test cases for the logging framework
============================================================================
"""
import sys
import logging
import logging.handlers
import unittest
import dragonfly.log as log
#===========================================================================
class OutputCapturer(object):
def __init__(self):
self.blocks = []
def METHOD_NAME(self, data):
self.blocks.append(data)
def flush(self):
pass
def clear(self):
self.blocks = []
@property
def lines(self, prefix=""):
if not self.blocks:
return ()
else:
text = "".join(self.blocks).splitlines()
text = prefix + ("\n" + prefix).join(text)
return text.splitlines()
#---------------------------------------------------------------------------
class LogTestCase(unittest.TestCase):
""" Test behavior of logging system. """
def setUp(self):
self._original_stdout = sys.stdout
self._output = OutputCapturer()
sys.stdout = self._output
self._original_stderr = sys.stderr
self._error = OutputCapturer()
sys.stderr = self._error
def tearDown(self):
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
# if self._output.blocks:
# prefix = "Output: "
# output = "".join(self._output.blocks).splitlines()
# output = prefix + ("\n" + prefix).join(output)
# print output
# if self._error.blocks:
# prefix = "Error: "
# text = "".join(self._error.blocks).splitlines()
# text = prefix + ("\n" + prefix).join(text)
# print text
self._output = None
self._error = None
def test_filtering(self):
""" Verify that log messages are filtered according to level. """
log.setup_log()
logger = logging.getLogger("grammar")
logger.debug("test_filtering - debug")
logger.info("test_filtering - info")
logger.warning("test_filtering - warning")
logger.error("test_filtering - error")
expected = ["grammar (WARNING): test_filtering - warning",
"grammar (ERROR): test_filtering - error"]
self.assertEqual(self._error.lines, expected)
self._error.clear()
logger = logging.getLogger("grammar.begin")
logger.debug("test_filtering - debug")
logger.info("test_filtering - info")
logger.warning("test_filtering - warning")
logger.error("test_filtering - error")
expected = ["grammar.begin (INFO): test_filtering - info",
"grammar.begin (WARNING): test_filtering - warning",
"grammar.begin (ERROR): test_filtering - error"]
self.assertEqual(self._error.lines, expected)
self._error.clear()
logger = logging.getLogger("grammar.load")
logger.debug("test_filtering - debug")
logger.info("test_filtering - info")
logger.warning("test_filtering - warning")
logger.error("test_filtering - error")
expected = ["grammar.load (WARNING): test_filtering - warning",
"grammar.load (ERROR): test_filtering - error"]
self.assertEqual(self._error.lines, expected)
def _new_lines(self):
filename = None
if not hasattr(self, "_previous_line_count"):
self._previous_line_count = 0
lines = open(filename).readlines()
new_lines = lines[self._previous_line_count:]
self._previous_line_count = len(lines)
return new_lines
#===========================================================================
if __name__ == "__main__":
unittest.main() |
global function hook | import pytest
import spytest.framework as stf
from utilities.common import get_proc_name
def trace(fmt, *args):
if args:
stf.dtrace(fmt % args)
else:
stf.dtrace(fmt)
def unused_pytest_collect_file(parent, path):
trace("\n%s: start", get_proc_name())
trace("{} {}".format(parent, path))
trace("%s: end\n", get_proc_name())
def pytest_itemcollected(item):
trace("\n%s: start", get_proc_name())
trace("{} {} {}".format(item.name, item.fspath, item.nodeid))
stf.collect_test(item)
trace("%s: end\n", get_proc_name())
@pytest.hookimpl(trylast=True)
def pytest_collection_modifyitems(session, config, items):
trace("\n%s: start", get_proc_name())
trace("{}".format(items))
stf.modify_tests(config, items)
trace("%s: end\n", get_proc_name())
@pytest.hookimpl(trylast=True)
def pytest_generate_tests(metafunc):
trace("\n%s: start", get_proc_name())
trace("{}".format(metafunc))
stf.generate_tests(metafunc.config, metafunc)
trace("%s: end\n", get_proc_name())
def unused_pytest_runtest_logstart(nodeid, location):
trace("\n%s: start", get_proc_name())
trace("{} {}".format(nodeid, location))
trace("%s: end\n", get_proc_name())
# this gets called in xdist for every test completion
def pytest_runtest_logreport(report):
trace("\n%s: start", get_proc_name())
trace("{}".format(report))
stf.log_report(report)
trace("%s: end\n", get_proc_name())
def pytest_runtest_makereport(item, call):
trace("\n%s: start", get_proc_name())
trace("{} {}".format(item, call))
stf.make_report(item, call)
trace("%s: end\n", get_proc_name())
def unused_pytest_runtest_setup(item):
trace("\n%s: start", get_proc_name())
trace("{}".format(item))
trace("%s: end\n", get_proc_name())
def unused_pytest_runtest_call(item):
trace("\n%s: start", get_proc_name())
trace("{}".format(item))
trace("%s: end\n", get_proc_name())
@pytest.hookspec(firstresult=True)
def unused_pytest_runtest_protocol(item, nextitem):
print("\n%s: start", get_proc_name())
print("{}".format(item))
print("{}".format(nextitem))
print("%s: end\n", get_proc_name())
def pytest_addoption(parser):
trace("\n%s: start", get_proc_name())
stf.add_options(parser)
trace("%s: end\n", get_proc_name())
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
trace("\n%s: start", get_proc_name())
trace("{}".format(config))
stf.configure(config)
trace("%s: end\n", get_proc_name())
def pytest_unconfigure(config):
trace("\n%s: start", get_proc_name())
trace("{}".format(config))
stf.unconfigure(config)
trace("%s: end\n", get_proc_name())
@pytest.hookimpl(tryfirst=True)
def pytest_xdist_setupnodes(config, specs):
trace("\n%s: start", get_proc_name())
trace("{}".format(config))
stf.configure_nodes(config, specs)
trace("%s: end\n", get_proc_name())
def pytest_configure_node(node):
trace("\n%s: start", get_proc_name())
trace("{}".format(node))
stf.configure_node(node)
trace("%s: end\n", get_proc_name())
def pytest_xdist_newgateway(gateway):
trace("\n%s: start", get_proc_name())
trace("{}".format(gateway))
stf.begin_node(gateway)
trace("%s: end\n", get_proc_name())
def pytest_testnodedown(node, error):
trace("\n%s: start", get_proc_name())
trace("{} {}".format(node, error))
stf.finish_node(node, error)
trace("%s: end\n", get_proc_name())
def pytest_exception_interact(node, call, report):
trace("\n%s: start", get_proc_name())
if report.failed:
stf.log_test_exception(call.excinfo)
trace("%s: end\n", get_proc_name())
def pytest_xdist_make_scheduler(config, log):
trace("\n%s: start", get_proc_name())
trace("{}".format(config))
rv = stf.make_scheduler(config, log)
trace("%s: end\n", get_proc_name())
return rv
@pytest.hookimpl(hookwrapper=True)
def pytest_fixture_setup(fixturedef, request):
trace("\n%s: start", get_proc_name())
trace("{}".format(fixturedef))
trace("{}".format(request))
stf.fixture_setup(fixturedef, request)
yield
stf.fixture_setup_finish(fixturedef, request)
trace("\n%s: end", get_proc_name())
trace("{}".format(fixturedef))
trace("{}".format(request))
@pytest.hookimpl(tryfirst=True)
@pytest.hookspec(firstresult=True)
def unused_pytest_fixture_setup(fixturedef, request):
trace("\n%s: start", get_proc_name())
trace("{}".format(fixturedef))
trace("{}".format(request))
rv = stf.fixture_setup(fixturedef, request)
return rv
def pytest_fixture_post_finalizer(fixturedef, request):
trace("\n%s: start", get_proc_name())
trace("{}".format(fixturedef))
trace("{}".format(request))
stf.fixture_post_finalizer(fixturedef, request)
trace("%s: end\n", get_proc_name())
def pytest_sessionstart(session):
trace("\n%s: start", get_proc_name())
trace("{}".format(session))
stf.session_start(session)
trace("%s: end\n", get_proc_name())
def pytest_sessionfinish(session, exitstatus):
trace("\n%s: start", get_proc_name())
trace("{}".format(session))
trace("{}".format(exitstatus))
stf.session_finish(session, exitstatus)
trace("%s: end\n", get_proc_name())
def unused_pytest_keyboard_interrupt(excinfo):
trace("\n%s: start", get_proc_name())
trace("{}".format(excinfo))
trace("%s: end\n", get_proc_name())
@pytest.hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem):
trace("\n%s: prolog", get_proc_name())
stf.pyfunc_call(pyfuncitem, False)
yield
stf.pyfunc_call(pyfuncitem, True)
trace("\n%s: epilog", get_proc_name())
@pytest.fixture(autouse=True)
def global_repeat_request(request):
""" repeat hook """
trace("\n----------global repeat start------------\n")
rv = stf.global_repeat_request(request)
trace("\n----------global repeat end------------\n")
return rv
@pytest.fixture(scope="session", autouse=True)
def global_session_request(request):
""" session hook """
trace("\n----------global session start------------\n")
stf.fixture_callback(request, "session", False)
yield
stf.fixture_callback(request, "session", True)
trace("\n----------global session end------------\n")
@pytest.fixture(scope="module", autouse=True)
def global_module_hook(request):
""" common module hook """
trace("\n----------global module start------------\n")
rv = stf.fixture_callback(request, "module", False)
if rv:
return rv
def fin():
rv = stf.fixture_callback(request, "module", True)
trace("\n----------global module end------------\n")
return rv
request.addfinalizer(fin)
@pytest.fixture(scope="module", autouse=True)
def global_module_hook_addl(request):
""" additional module hook """
trace("\n----------global module addl start------------\n")
yield
trace("\n----------global module addl end------------\n")
@pytest.fixture(scope="function", autouse=True)
def METHOD_NAME(request):
""" common function hook """
trace("\n----------global test start------------\n")
stf.fixture_callback(request, "function", False)
yield
stf.fixture_callback(request, "function", True)
trace("\n----------global test end------------\n")
def pytest_internalerror(excrepr, excinfo):
trace("\n%s: start", get_proc_name())
trace("{} {}".format(excrepr, excinfo))
trace("%s: end\n", get_proc_name())
collect_fails = []
def pytest_collectreport(report):
if report.failed:
collect_fails.append(report.nodeid)
def pytest_report_collectionfinish(config, startdir, items):
if collect_fails:
for fail in collect_fails:
stf.collect_fail(fail)
# raise pytest.UsageError("Errors during collection, aborting") |
parameterize | import os
import unittest
import collections
import email
from email.message import Message
from email._policybase import compat32
from test.support import load_package_tests
from test.test_email import __file__ as landmark
# Load all tests in package
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)
# helper code used by a number of test modules.
def openfile(filename, *args, **kws):
path = os.path.join(os.path.dirname(landmark), 'data', filename)
return open(path, *args, **kws)
# Base test class
class TestEmailBase(unittest.TestCase):
maxDiff = None
# Currently the default policy is compat32. By setting that as the default
# here we make minimal changes in the test_email tests compared to their
# pre-3.3 state.
policy = compat32
# Likewise, the default message object is Message.
message = Message
def __init__(self, *args, **kw):
super().__init__(*args, **kw)
self.addTypeEqualityFunc(bytes, self.assertBytesEqual)
# Backward compatibility to minimize test_email test changes.
ndiffAssertEqual = unittest.TestCase.assertEqual
def _msgobj(self, filename):
with openfile(filename) as fp:
return email.message_from_file(fp, policy=self.policy)
def _str_msg(self, string, message=None, policy=None):
if policy is None:
policy = self.policy
if message is None:
message = self.message
return email.message_from_string(string, message, policy=policy)
def _bytes_msg(self, bytestring, message=None, policy=None):
if policy is None:
policy = self.policy
if message is None:
message = self.message
return email.message_from_bytes(bytestring, message, policy=policy)
def _make_message(self):
return self.message(policy=self.policy)
def _bytes_repr(self, b):
return [repr(x) for x in b.splitlines(keepends=True)]
def assertBytesEqual(self, first, second, msg):
"""Our byte strings are really encoded strings; improve diff output"""
self.assertEqual(self._bytes_repr(first), self._bytes_repr(second))
def assertDefectsEqual(self, actual, expected):
self.assertEqual(len(actual), len(expected), actual)
for i in range(len(actual)):
self.assertIsInstance(actual[i], expected[i],
'item {}'.format(i))
def METHOD_NAME(cls):
"""A test method parameterization class decorator.
Parameters are specified as the value of a class attribute that ends with
the string '_params'. Call the portion before '_params' the prefix. Then
a method to be parameterized must have the same prefix, the string
'_as_', and an arbitrary suffix.
The value of the _params attribute may be either a dictionary or a list.
The values in the dictionary and the elements of the list may either be
single values, or a list. If single values, they are turned into single
element tuples. However derived, the resulting sequence is passed via
*args to the parameterized test function.
In a _params dictionary, the keys become part of the name of the generated
tests. In a _params list, the values in the list are converted into a
string by joining the string values of the elements of the tuple by '_' and
converting any blanks into '_'s, and this become part of the name.
The full name of a generated test is a 'test_' prefix, the portion of the
test function name after the '_as_' separator, plus an '_', plus the name
derived as explained above.
For example, if we have:
count_params = range(2)
def count_as_foo_arg(self, foo):
self.assertEqual(foo+1, myfunc(foo))
we will get parameterized test methods named:
test_foo_arg_0
test_foo_arg_1
test_foo_arg_2
Or we could have:
example_params = {'foo': ('bar', 1), 'bing': ('bang', 2)}
def example_as_myfunc_input(self, name, count):
self.assertEqual(name+str(count), myfunc(name, count))
and get:
test_myfunc_input_foo
test_myfunc_input_bing
Note: if and only if the generated test name is a valid identifier can it
be used to select the test individually from the unittest command line.
The values in the params dict can be a single value, a tuple, or a
dict. If a single value of a tuple, it is passed to the test function
as positional arguments. If a dict, it is a passed via **kw.
"""
paramdicts = {}
testers = collections.defaultdict(list)
for name, attr in cls.__dict__.items():
if name.endswith('_params'):
if not hasattr(attr, 'keys'):
d = {}
for x in attr:
if not hasattr(x, '__iter__'):
x = (x,)
n = '_'.join(str(v) for v in x).replace(' ', '_')
d[n] = x
attr = d
paramdicts[name[:-7] + '_as_'] = attr
if '_as_' in name:
testers[name.split('_as_')[0] + '_as_'].append(name)
testfuncs = {}
for name in paramdicts:
if name not in testers:
raise ValueError("No tester found for {}".format(name))
for name in testers:
if name not in paramdicts:
raise ValueError("No params found for {}".format(name))
for name, attr in cls.__dict__.items():
for paramsname, paramsdict in paramdicts.items():
if name.startswith(paramsname):
testnameroot = 'test_' + name[len(paramsname):]
for paramname, params in paramsdict.items():
if hasattr(params, 'keys'):
test = (lambda self, name=name, params=params:
getattr(self, name)(**params))
else:
test = (lambda self, name=name, params=params:
getattr(self, name)(*params))
testname = testnameroot + '_' + paramname
test.__name__ = testname
testfuncs[testname] = test
for key, value in testfuncs.items():
setattr(cls, key, value)
return cls |
checkpoint | import json
from typing import List
import pandas as pd
import pytest
from great_expectations import DataContext
from great_expectations.METHOD_NAME import Checkpoint
from great_expectations.core.util import convert_to_json_serializable
from great_expectations.data_context.types.base import CheckpointConfig
from great_expectations.util import deep_filter_properties_iterable
DATA_CONTEXT_ID = "00000000-0000-0000-0000-000000000001"
@pytest.fixture
def METHOD_NAME(
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
):
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
return Checkpoint(
data_context=context,
**{
"name": "my_checkpoint",
"config_version": 1.0,
"template_name": None,
"run_name_template": None,
"expectation_suite_name": None,
"batch_request": None,
"action_list": [
{
"name": "store_validation_result",
"action": {"class_name": "StoreValidationResultAction"},
},
{
"name": "store_evaluation_params",
"action": {"class_name": "StoreEvaluationParametersAction"},
},
{
"name": "update_data_docs",
"action": {"class_name": "UpdateDataDocsAction", "site_names": []},
},
],
"evaluation_parameters": {},
"runtime_configuration": {},
"validations": [
{
"batch_request": {
"datasource_name": "example_datasource",
"data_connector_name": "default_runtime_data_connector_name",
"data_asset_name": "my_data_asset",
},
"expectation_suite_name": "test_suite",
}
],
"ge_cloud_id": None,
"expectation_suite_ge_cloud_id": None,
},
)
@pytest.mark.filesystem
def test_checkpoint_config_repr(METHOD_NAME):
checkpoint_config_repr: str = str(METHOD_NAME)
for key in (
"action_list",
"batch_request",
"class_name",
"config_version",
"evaluation_parameters",
"module_name",
"name",
"profilers",
"runtime_configuration",
"validations",
):
assert key in checkpoint_config_repr
@pytest.mark.filesystem
def test_checkpoint_config_repr_after_substitution(METHOD_NAME):
df: pd.DataFrame = pd.DataFrame({"a": [1, 2], "b": [3, 4]})
batch_request_param: dict = {
"runtime_parameters": {"batch_data": df},
"batch_identifiers": {"default_identifier_name": "my_simple_df"},
}
result_format_param: dict = {"result_format": "SUMMARY"}
kwargs: dict = {
"batch_request": batch_request_param,
"result_format": result_format_param,
}
# Matching how this is called in usage_statistics.py (parameter style)
resolved_runtime_kwargs: dict = (
CheckpointConfig.resolve_config_using_acceptable_arguments(
*(METHOD_NAME,), **kwargs
)
)
json_dict: dict = convert_to_json_serializable(data=resolved_runtime_kwargs)
deep_filter_properties_iterable(
properties=json_dict,
inplace=True,
)
keys: List[str] = sorted(list(json_dict.keys()))
key: str
sorted_json_dict: dict = {key: json_dict[key] for key in keys}
checkpoint_config_repr: str = json.dumps(sorted_json_dict, indent=2)
for key in (
"action_list",
"batch_request",
"class_name",
"config_version",
"evaluation_parameters",
"module_name",
"name",
"profilers",
"runtime_configuration",
"validations",
):
assert key in checkpoint_config_repr |
ping hail backend | from collections import defaultdict
from django.db.models import F, Min
import requests
from reference_data.models import Omim, GeneConstraint, GENOME_VERSION_LOOKUP
from seqr.models import Sample, PhenotypePrioritization
from seqr.utils.search.constants import PRIORITIZED_GENE_SORT
from seqr.utils.xpos_utils import MIN_POS, MAX_POS
from settings import HAIL_BACKEND_SERVICE_HOSTNAME, HAIL_BACKEND_SERVICE_PORT
def _hail_backend_url(path):
return f'{HAIL_BACKEND_SERVICE_HOSTNAME}:{HAIL_BACKEND_SERVICE_PORT}/{path}'
def _execute_search(search_body, user, path='search'):
response = requests.post(_hail_backend_url(path), json=search_body, headers={'From': user.email}, timeout=300)
response.raise_for_status()
return response.json()
def METHOD_NAME():
requests.get(_hail_backend_url('status'), timeout=5).raise_for_status()
def get_hail_variants(samples, search, user, previous_search_results, genome_version, sort=None, page=1, num_results=100,
gene_agg=False, **kwargs):
end_offset = num_results * page
search_body = _format_search_body(samples, genome_version, end_offset, search)
frequencies = search_body.pop('freqs', None)
if frequencies and frequencies.get('callset'):
frequencies['seqr'] = frequencies.pop('callset')
search_body.update({
'sort': sort,
'sort_metadata': _get_sort_metadata(sort, samples),
'frequencies': frequencies,
'quality_filter': search_body.pop('qualityFilter', None),
'custom_query': search_body.pop('customQuery', None),
})
search_body.pop('skipped_samples', None)
_parse_location_search(search_body)
path = 'gene_counts' if gene_agg else 'search'
response_json = _execute_search(search_body, user, path)
if gene_agg:
previous_search_results['gene_aggs'] = response_json
return response_json
previous_search_results['total_results'] = response_json['total']
previous_search_results['all_results'] = response_json['results']
return response_json['results'][end_offset - num_results:end_offset]
def get_hail_variants_for_variant_ids(samples, genome_version, parsed_variant_ids, user, return_all_queried_families=False):
search = {
'variant_ids': [parsed_id for parsed_id in parsed_variant_ids.values() if parsed_id],
'variant_keys': [variant_id for variant_id, parsed_id in parsed_variant_ids.items() if not parsed_id],
}
search_body = _format_search_body(samples, genome_version, len(parsed_variant_ids), search)
response_json = _execute_search(search_body, user)
if return_all_queried_families:
expected_family_guids = set(samples.values_list('individual__family__guid', flat=True))
_validate_expected_families(response_json['results'], expected_family_guids)
return response_json['results']
def _format_search_body(samples, genome_version, num_results, search):
search_body = {
'genome_version': GENOME_VERSION_LOOKUP[genome_version],
'num_results': num_results,
}
search_body.update(search)
search_body['sample_data'] = _get_sample_data(samples, search_body.get('inheritance_filter'))
return search_body
def _get_sample_data(samples, inheritance_filter):
sample_data = samples.order_by('id').values(
'sample_id', 'dataset_type', 'sample_type',
individual_guid=F('individual__guid'),
family_guid=F('individual__family__guid'),
project_guid=F('individual__family__project__guid'),
affected=F('individual__affected'),
sex=F('individual__sex'),
)
custom_affected = (inheritance_filter or {}).pop('affected', None)
if custom_affected:
for s in sample_data:
s['affected'] = custom_affected.get(s['individual_guid']) or s['affected']
sample_data_by_data_type = defaultdict(list)
for s in sample_data:
dataset_type = s.pop('dataset_type')
sample_type = s.pop('sample_type')
data_type_key = f'{dataset_type}_{sample_type}' if dataset_type == Sample.DATASET_TYPE_SV_CALLS else dataset_type
sample_data_by_data_type[data_type_key].append(s)
return sample_data_by_data_type
def _get_sort_metadata(sort, samples):
sort_metadata = None
if sort == 'in_omim':
sort_metadata = list(Omim.objects.filter(phenotype_mim_number__isnull=False).values_list('gene__gene_id', flat=True))
elif sort == 'constraint':
sort_metadata = {
agg['gene__gene_id']: agg['mis_z_rank'] + agg['pLI_rank'] for agg in
GeneConstraint.objects.values('gene__gene_id', 'mis_z_rank', 'pLI_rank')
}
elif sort == PRIORITIZED_GENE_SORT:
sort_metadata = {
agg['gene_id']: agg['min_rank'] for agg in PhenotypePrioritization.objects.filter(
individual__family_id=samples[0].individual.family_id, rank__lte=100,
).values('gene_id').annotate(min_rank=Min('rank'))
}
return sort_metadata
def _parse_location_search(search):
locus = search.pop('locus', None) or {}
parsed_locus = search.pop('parsedLocus')
genes = parsed_locus.get('genes') or {}
intervals = parsed_locus.get('intervals')
parsed_intervals = None
if genes or intervals:
gene_coords = [
{field: gene[f'{field}{search["genome_version"].title()}'] for field in ['chrom', 'start', 'end']}
for gene in genes.values()
]
parsed_intervals = [_format_interval(**interval) for interval in intervals or []] + [
'{chrom}:{start}-{end}'.format(**gene) for gene in gene_coords]
exclude_locations = locus.get('excludeLocations')
search.update({
'intervals': parsed_intervals,
'exclude_intervals': exclude_locations,
'gene_ids': None if (exclude_locations or not genes) else list(genes.keys()),
'variant_ids': parsed_locus.get('parsed_variant_ids'),
'rs_ids': parsed_locus.get('rs_ids'),
})
def _format_interval(chrom=None, start=None, end=None, offset=None, **kwargs):
if offset:
offset_pos = int((end - start) * offset)
start = max(start - offset_pos, MIN_POS)
end = min(end + offset_pos, MAX_POS)
return f'{chrom}:{start}-{end}'
def _validate_expected_families(results, expected_families):
# In the ES backed we could force return variants even if all families are hom ref
# This is not possible in the hail backend as those rows are removed at loading, so fail if missing
invalid_family_variants = []
for result in results:
missing_families = expected_families - set(result['familyGuids'])
if missing_families:
invalid_family_variants.append((result['variantId'], missing_families))
if invalid_family_variants:
from seqr.utils.search.utils import InvalidSearchException
missing = ', '.join([
f'{variant_id} ({"; ".join(sorted(families))})' for variant_id, families in invalid_family_variants
])
raise InvalidSearchException(f'Unable to return all families for the following variants: {missing}') |
runner | import math
import textwrap
import sys
import pytest
import threading
import traceback
import time
import numpy as np
from numpy.testing import IS_PYPY
from . import util
class TestF77Callback(util.F2PyTest):
sources = [util.getpath("tests", "src", "callback", "foo.f")]
@pytest.mark.parametrize("name", "t,t2".split(","))
def test_all(self, name):
self.check_function(name)
@pytest.mark.xfail(IS_PYPY,
reason="PyPy cannot modify tp_doc after PyType_Ready")
def test_docstring(self):
expected = textwrap.dedent("""\
a = t(fun,[fun_extra_args])
Wrapper for ``t``.
Parameters
----------
fun : call-back function
Other Parameters
----------------
fun_extra_args : input tuple, optional
Default: ()
Returns
-------
a : int
Notes
-----
Call-back functions::
def fun(): return a
Return objects:
a : int
""")
assert self.module.t.__doc__ == expected
def check_function(self, name):
t = getattr(self.module, name)
r = t(lambda: 4)
assert r == 4
r = t(lambda a: 5, fun_extra_args=(6, ))
assert r == 5
r = t(lambda a: a, fun_extra_args=(6, ))
assert r == 6
r = t(lambda a: 5 + a, fun_extra_args=(7, ))
assert r == 12
r = t(lambda a: math.degrees(a), fun_extra_args=(math.pi, ))
assert r == 180
r = t(math.degrees, fun_extra_args=(math.pi, ))
assert r == 180
r = t(self.module.func, fun_extra_args=(6, ))
assert r == 17
r = t(self.module.func0)
assert r == 11
r = t(self.module.func0._cpointer)
assert r == 11
class A:
def __call__(self):
return 7
def mth(self):
return 9
a = A()
r = t(a)
assert r == 7
r = t(a.mth)
assert r == 9
@pytest.mark.skipif(sys.platform == 'win32',
reason='Fails with MinGW64 Gfortran (Issue #9673)')
def test_string_callback(self):
def callback(code):
if code == "r":
return 0
else:
return 1
f = getattr(self.module, "string_callback")
r = f(callback)
assert r == 0
@pytest.mark.skipif(sys.platform == 'win32',
reason='Fails with MinGW64 Gfortran (Issue #9673)')
def test_string_callback_array(self):
# See gh-10027
cu1 = np.zeros((1, ), "S8")
cu2 = np.zeros((1, 8), "c")
cu3 = np.array([""], "S8")
def callback(cu, lencu):
if cu.shape != (lencu,):
return 1
if cu.dtype != "S8":
return 2
if not np.all(cu == b""):
return 3
return 0
f = getattr(self.module, "string_callback_array")
for cu in [cu1, cu2, cu3]:
res = f(callback, cu, cu.size)
assert res == 0
def test_threadsafety(self):
# Segfaults if the callback handling is not threadsafe
errors = []
def cb():
# Sleep here to make it more likely for another thread
# to call their callback at the same time.
time.sleep(1e-3)
# Check reentrancy
r = self.module.t(lambda: 123)
assert r == 123
return 42
def METHOD_NAME(name):
try:
for j in range(50):
r = self.module.t(cb)
assert r == 42
self.check_function(name)
except Exception:
errors.append(traceback.format_exc())
threads = [
threading.Thread(target=METHOD_NAME, args=(arg, ))
for arg in ("t", "t2") for n in range(20)
]
for t in threads:
t.start()
for t in threads:
t.join()
errors = "\n\n".join(errors)
if errors:
raise AssertionError(errors)
def test_hidden_callback(self):
try:
self.module.hidden_callback(2)
except Exception as msg:
assert str(msg).startswith("Callback global_f not defined")
try:
self.module.hidden_callback2(2)
except Exception as msg:
assert str(msg).startswith("cb: Callback global_f not defined")
self.module.global_f = lambda x: x + 1
r = self.module.hidden_callback(2)
assert r == 3
self.module.global_f = lambda x: x + 2
r = self.module.hidden_callback(2)
assert r == 4
del self.module.global_f
try:
self.module.hidden_callback(2)
except Exception as msg:
assert str(msg).startswith("Callback global_f not defined")
self.module.global_f = lambda x=0: x + 3
r = self.module.hidden_callback(2)
assert r == 5
# reproducer of gh18341
r = self.module.hidden_callback2(2)
assert r == 3
class TestF77CallbackPythonTLS(TestF77Callback):
"""
Callback tests using Python thread-local storage instead of
compiler-provided
"""
options = ["-DF2PY_USE_PYTHON_TLS"]
class TestF90Callback(util.F2PyTest):
sources = [util.getpath("tests", "src", "callback", "gh17797.f90")]
def test_gh17797(self):
def incr(x):
return x + 123
y = np.array([1, 2, 3], dtype=np.int64)
r = self.module.gh17797(incr, y)
assert r == 123 + 1 + 2 + 3
class TestGH18335(util.F2PyTest):
"""The reproduction of the reported issue requires specific input that
extensions may break the issue conditions, so the reproducer is
implemented as a separate test class. Do not extend this test with
other tests!
"""
sources = [util.getpath("tests", "src", "callback", "gh18335.f90")]
def test_gh18335(self):
def foo(x):
x[0] += 1
r = self.module.gh18335(foo)
assert r == 123 + 1 |
system data | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetManagedClusterSnapshotResult',
'AwaitableGetManagedClusterSnapshotResult',
'get_managed_cluster_snapshot',
'get_managed_cluster_snapshot_output',
]
@pulumi.output_type
class GetManagedClusterSnapshotResult:
"""
A managed cluster snapshot resource.
"""
def __init__(__self__, creation_data=None, id=None, location=None, managed_cluster_properties_read_only=None, name=None, snapshot_type=None, METHOD_NAME=None, tags=None, type=None):
if creation_data and not isinstance(creation_data, dict):
raise TypeError("Expected argument 'creation_data' to be a dict")
pulumi.set(__self__, "creation_data", creation_data)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if managed_cluster_properties_read_only and not isinstance(managed_cluster_properties_read_only, dict):
raise TypeError("Expected argument 'managed_cluster_properties_read_only' to be a dict")
pulumi.set(__self__, "managed_cluster_properties_read_only", managed_cluster_properties_read_only)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if snapshot_type and not isinstance(snapshot_type, str):
raise TypeError("Expected argument 'snapshot_type' to be a str")
pulumi.set(__self__, "snapshot_type", snapshot_type)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", METHOD_NAME)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="creationData")
def creation_data(self) -> Optional['outputs.CreationDataResponse']:
"""
CreationData to be used to specify the source resource ID to create this snapshot.
"""
return pulumi.get(self, "creation_data")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedClusterPropertiesReadOnly")
def managed_cluster_properties_read_only(self) -> 'outputs.ManagedClusterPropertiesForSnapshotResponse':
"""
What the properties will be showed when getting managed cluster snapshot. Those properties are read-only.
"""
return pulumi.get(self, "managed_cluster_properties_read_only")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="snapshotType")
def snapshot_type(self) -> Optional[str]:
"""
The type of a snapshot. The default is NodePool.
"""
return pulumi.get(self, "snapshot_type")
@property
@pulumi.getter(name="systemData")
def METHOD_NAME(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetManagedClusterSnapshotResult(GetManagedClusterSnapshotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetManagedClusterSnapshotResult(
creation_data=self.creation_data,
id=self.id,
location=self.location,
managed_cluster_properties_read_only=self.managed_cluster_properties_read_only,
name=self.name,
snapshot_type=self.snapshot_type,
METHOD_NAME=self.METHOD_NAME,
tags=self.tags,
type=self.type)
def get_managed_cluster_snapshot(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagedClusterSnapshotResult:
"""
A managed cluster snapshot resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the managed cluster resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:containerservice/v20230602preview:getManagedClusterSnapshot', __args__, opts=opts, typ=GetManagedClusterSnapshotResult).value
return AwaitableGetManagedClusterSnapshotResult(
creation_data=pulumi.get(__ret__, 'creation_data'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
managed_cluster_properties_read_only=pulumi.get(__ret__, 'managed_cluster_properties_read_only'),
name=pulumi.get(__ret__, 'name'),
snapshot_type=pulumi.get(__ret__, 'snapshot_type'),
METHOD_NAME=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_managed_cluster_snapshot)
def get_managed_cluster_snapshot_output(resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetManagedClusterSnapshotResult]:
"""
A managed cluster snapshot resource.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the managed cluster resource.
"""
... |
test xml case d full | """
tests.unit.xmlutil_test
~~~~~~~~~~~~~~~~~~~~
"""
import xml.etree.ElementTree as ET
import salt.utils.xmlutil as xml
from tests.support.unit import TestCase
class XMLUtilTestCase(TestCase):
"""
Tests that salt.utils.xmlutil properly parses XML data and returns as a properly formatted
dictionary. The default method of parsing will ignore attributes and return only the child
items. The full method will include parsing attributes.
"""
def setUp(self):
# Populate our use cases for specific XML formats.
self.cases = {
"a": {
"xml": "<parent>data</parent>",
"legacy": {"parent": "data"},
"full": "data",
},
"b": {
"xml": '<parent value="data">data</parent>',
"legacy": {"parent": "data"},
"full": {"parent": "data", "value": "data"},
},
"c": {
"xml": (
'<parent><child>data</child><child value="data">data</child>'
'<child value="data"/><child/></parent>'
),
"legacy": {
"child": [
"data",
{"child": "data"},
{"child": None},
{"child": None},
]
},
"full": {
"child": [
"data",
{"child": "data", "value": "data"},
{"value": "data"},
None,
]
},
},
"d": {
"xml": (
'<parent value="data" another="data"><child>data</child></parent>'
),
"legacy": {"child": "data"},
"full": {"child": "data", "another": "data", "value": "data"},
},
"e": {
"xml": (
'<parent value="data" another="data"><child'
' value="data">data</child></parent>'
),
"legacy": {"child": "data"},
"full": {
"child": {"child": "data", "value": "data"},
"another": "data",
"value": "data",
},
},
"f": {
"xml": (
'<parent><child><sub-child value="data">data</sub-child></child>'
"<child>data</child></parent>"
),
"legacy": {"child": [{"sub-child": "data"}, {"child": "data"}]},
"full": {
"child": [
{"sub-child": {"value": "data", "sub-child": "data"}},
"data",
]
},
},
}
def test_xml_case_a(self):
xmldata = ET.fromstring(self.cases["a"]["xml"])
defaultdict = xml.to_dict(xmldata)
self.assertEqual(defaultdict, self.cases["a"]["legacy"])
def test_xml_case_a_legacy(self):
xmldata = ET.fromstring(self.cases["a"]["xml"])
defaultdict = xml.to_dict(xmldata, False)
self.assertEqual(defaultdict, self.cases["a"]["legacy"])
def test_xml_case_a_full(self):
xmldata = ET.fromstring(self.cases["a"]["xml"])
defaultdict = xml.to_dict(xmldata, True)
self.assertEqual(defaultdict, self.cases["a"]["full"])
def test_xml_case_b(self):
xmldata = ET.fromstring(self.cases["b"]["xml"])
defaultdict = xml.to_dict(xmldata)
self.assertEqual(defaultdict, self.cases["b"]["legacy"])
def test_xml_case_b_legacy(self):
xmldata = ET.fromstring(self.cases["b"]["xml"])
defaultdict = xml.to_dict(xmldata, False)
self.assertEqual(defaultdict, self.cases["b"]["legacy"])
def test_xml_case_b_full(self):
xmldata = ET.fromstring(self.cases["b"]["xml"])
defaultdict = xml.to_dict(xmldata, True)
self.assertEqual(defaultdict, self.cases["b"]["full"])
def test_xml_case_c(self):
xmldata = ET.fromstring(self.cases["c"]["xml"])
defaultdict = xml.to_dict(xmldata)
self.assertEqual(defaultdict, self.cases["c"]["legacy"])
def test_xml_case_c_legacy(self):
xmldata = ET.fromstring(self.cases["c"]["xml"])
defaultdict = xml.to_dict(xmldata, False)
self.assertEqual(defaultdict, self.cases["c"]["legacy"])
def test_xml_case_c_full(self):
xmldata = ET.fromstring(self.cases["c"]["xml"])
defaultdict = xml.to_dict(xmldata, True)
self.assertEqual(defaultdict, self.cases["c"]["full"])
def test_xml_case_d(self):
xmldata = ET.fromstring(self.cases["d"]["xml"])
defaultdict = xml.to_dict(xmldata)
self.assertEqual(defaultdict, self.cases["d"]["legacy"])
def test_xml_case_d_legacy(self):
xmldata = ET.fromstring(self.cases["d"]["xml"])
defaultdict = xml.to_dict(xmldata, False)
self.assertEqual(defaultdict, self.cases["d"]["legacy"])
def METHOD_NAME(self):
xmldata = ET.fromstring(self.cases["d"]["xml"])
defaultdict = xml.to_dict(xmldata, True)
self.assertEqual(defaultdict, self.cases["d"]["full"])
def test_xml_case_e(self):
xmldata = ET.fromstring(self.cases["e"]["xml"])
defaultdict = xml.to_dict(xmldata)
self.assertEqual(defaultdict, self.cases["e"]["legacy"])
def test_xml_case_e_legacy(self):
xmldata = ET.fromstring(self.cases["e"]["xml"])
defaultdict = xml.to_dict(xmldata, False)
self.assertEqual(defaultdict, self.cases["e"]["legacy"])
def test_xml_case_e_full(self):
xmldata = ET.fromstring(self.cases["e"]["xml"])
defaultdict = xml.to_dict(xmldata, True)
self.assertEqual(defaultdict, self.cases["e"]["full"])
def test_xml_case_f(self):
xmldata = ET.fromstring(self.cases["f"]["xml"])
defaultdict = xml.to_dict(xmldata)
self.assertEqual(defaultdict, self.cases["f"]["legacy"])
def test_xml_case_f_legacy(self):
xmldata = ET.fromstring(self.cases["f"]["xml"])
defaultdict = xml.to_dict(xmldata, False)
self.assertEqual(defaultdict, self.cases["f"]["legacy"])
def test_xml_case_f_full(self):
xmldata = ET.fromstring(self.cases["f"]["xml"])
defaultdict = xml.to_dict(xmldata, True)
self.assertEqual(defaultdict, self.cases["f"]["full"]) |
pre change open basedir | from .signals import *
from plogical.pluginManagerGlobal import pluginManagerGlobal
class pluginManager:
@staticmethod
def preWebsiteCreation(request):
return pluginManagerGlobal.globalPlug(request, preWebsiteCreation)
@staticmethod
def postWebsiteCreation(request, response):
return pluginManagerGlobal.globalPlug(request, postWebsiteCreation, response)
@staticmethod
def preDomainCreation(request):
return pluginManagerGlobal.globalPlug(request, preDomainCreation)
@staticmethod
def postDomainCreation(request, response):
return pluginManagerGlobal.globalPlug(request, postDomainCreation, response)
@staticmethod
def preWebsiteDeletion(request):
return pluginManagerGlobal.globalPlug(request, preWebsiteDeletion)
@staticmethod
def postWebsiteDeletion(request, response):
return pluginManagerGlobal.globalPlug(request, postWebsiteDeletion, response)
@staticmethod
def preDomainDeletion(request):
return pluginManagerGlobal.globalPlug(request, preDomainDeletion)
@staticmethod
def postDomainDeletion(request, response):
return pluginManagerGlobal.globalPlug(request, postDomainDeletion, response)
@staticmethod
def preWebsiteSuspension(request):
return pluginManagerGlobal.globalPlug(request, preWebsiteSuspension)
@staticmethod
def postWebsiteSuspension(request, response):
return pluginManagerGlobal.globalPlug(request, postWebsiteSuspension, response)
@staticmethod
def preWebsiteModification(request):
return pluginManagerGlobal.globalPlug(request, preWebsiteModification)
@staticmethod
def postWebsiteModification(request, response):
return pluginManagerGlobal.globalPlug(request, postWebsiteModification, response)
@staticmethod
def preDomain(request):
return pluginManagerGlobal.globalPlug(request, preDomain)
@staticmethod
def postDomain(request, response):
return pluginManagerGlobal.globalPlug(request, postDomain, response)
@staticmethod
def preSaveConfigsToFile(request):
return pluginManagerGlobal.globalPlug(request, preSaveConfigsToFile)
@staticmethod
def postSaveConfigsToFile(request, response):
return pluginManagerGlobal.globalPlug(request, postSaveConfigsToFile, response)
@staticmethod
def preSaveRewriteRules(request):
return pluginManagerGlobal.globalPlug(request, preSaveRewriteRules)
@staticmethod
def postSaveRewriteRules(request, response):
return pluginManagerGlobal.globalPlug(request, postSaveRewriteRules, response)
@staticmethod
def preSaveSSL(request):
return pluginManagerGlobal.globalPlug(request, preSaveSSL)
@staticmethod
def postSaveSSL(request, response):
return pluginManagerGlobal.globalPlug(request, postSaveSSL, response)
@staticmethod
def preChangePHP(request):
return pluginManagerGlobal.globalPlug(request, preChangePHP)
@staticmethod
def postChangePHP(request, response):
return pluginManagerGlobal.globalPlug(request, postChangePHP, response)
@staticmethod
def METHOD_NAME(request):
return pluginManagerGlobal.globalPlug(request, METHOD_NAME)
@staticmethod
def postChangeOpenBasedir(request, response):
return pluginManagerGlobal.globalPlug(request, postChangeOpenBasedir, response)
@staticmethod
def preAddNewCron(request):
return pluginManagerGlobal.globalPlug(request, preAddNewCron)
@staticmethod
def postAddNewCron(request, response):
return pluginManagerGlobal.globalPlug(request, postAddNewCron, response)
@staticmethod
def preRemCronbyLine(request):
return pluginManagerGlobal.globalPlug(request, preRemCronbyLine)
@staticmethod
def postRemCronbyLine(request, response):
return pluginManagerGlobal.globalPlug(request, postRemCronbyLine, response)
@staticmethod
def preSubmitAliasCreation(request):
return pluginManagerGlobal.globalPlug(request, preSubmitAliasCreation)
@staticmethod
def postSubmitAliasCreation(request, response):
return pluginManagerGlobal.globalPlug(request, postSubmitAliasCreation, response)
@staticmethod
def preDelateAlias(request):
return pluginManagerGlobal.globalPlug(request, preDelateAlias)
@staticmethod
def postDelateAlias(request, response):
return pluginManagerGlobal.globalPlug(request, postDelateAlias, response |
test import relative delta all | import sys
import pytest
HOST_IS_WINDOWS = sys.platform.startswith('win')
def test_import_version_str():
""" Test that dateutil.__version__ can be imported"""
from dateutil import __version__
def test_import_version_root():
import dateutil
assert hasattr(dateutil, '__version__')
# Test that dateutil.easter-related imports work properly
def test_import_easter_direct():
import dateutil.easter
def test_import_easter_from():
from dateutil import easter
def test_import_easter_start():
from dateutil.easter import easter
# Test that dateutil.parser-related imports work properly
def test_import_parser_direct():
import dateutil.parser
def test_import_parser_from():
from dateutil import parser
def test_import_parser_all():
# All interface
from dateutil.parser import parse
from dateutil.parser import parserinfo
# Other public classes
from dateutil.parser import parser
for var in (parse, parserinfo, parser):
assert var is not None
# Test that dateutil.relativedelta-related imports work properly
def test_import_relative_delta_direct():
import dateutil.relativedelta
def test_import_relative_delta_from():
from dateutil import relativedelta
def METHOD_NAME():
from dateutil.relativedelta import relativedelta
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU
for var in (relativedelta, MO, TU, WE, TH, FR, SA, SU):
assert var is not None
# In the public interface but not in all
from dateutil.relativedelta import weekday
assert weekday is not None
# Test that dateutil.rrule related imports work properly
def test_import_rrule_direct():
import dateutil.rrule
def test_import_rrule_from():
from dateutil import rrule
def test_import_rrule_all():
from dateutil.rrule import rrule
from dateutil.rrule import rruleset
from dateutil.rrule import rrulestr
from dateutil.rrule import YEARLY, MONTHLY, WEEKLY, DAILY
from dateutil.rrule import HOURLY, MINUTELY, SECONDLY
from dateutil.rrule import MO, TU, WE, TH, FR, SA, SU
rr_all = (rrule, rruleset, rrulestr,
YEARLY, MONTHLY, WEEKLY, DAILY,
HOURLY, MINUTELY, SECONDLY,
MO, TU, WE, TH, FR, SA, SU)
for var in rr_all:
assert var is not None
# In the public interface but not in all
from dateutil.rrule import weekday
assert weekday is not None
# Test that dateutil.tz related imports work properly
def test_import_tztest_direct():
import dateutil.tz
def test_import_tz_from():
from dateutil import tz
def test_import_tz_all():
from dateutil.tz import tzutc
from dateutil.tz import tzoffset
from dateutil.tz import tzlocal
from dateutil.tz import tzfile
from dateutil.tz import tzrange
from dateutil.tz import tzstr
from dateutil.tz import tzical
from dateutil.tz import gettz
from dateutil.tz import tzwin
from dateutil.tz import tzwinlocal
from dateutil.tz import UTC
from dateutil.tz import datetime_ambiguous
from dateutil.tz import datetime_exists
from dateutil.tz import resolve_imaginary
tz_all = ["tzutc", "tzoffset", "tzlocal", "tzfile", "tzrange",
"tzstr", "tzical", "gettz", "datetime_ambiguous",
"datetime_exists", "resolve_imaginary", "UTC"]
tz_all += ["tzwin", "tzwinlocal"] if sys.platform.startswith("win") else []
lvars = locals()
for var in tz_all:
assert lvars[var] is not None
# Test that dateutil.tzwin related imports work properly
@pytest.mark.skipif(not HOST_IS_WINDOWS, reason="Requires Windows")
def test_import_tz_windows_direct():
import dateutil.tzwin
@pytest.mark.skipif(not HOST_IS_WINDOWS, reason="Requires Windows")
def test_import_tz_windows_from():
from dateutil import tzwin
@pytest.mark.skipif(not HOST_IS_WINDOWS, reason="Requires Windows")
def test_import_tz_windows_star():
from dateutil.tzwin import tzwin
from dateutil.tzwin import tzwinlocal
tzwin_all = [tzwin, tzwinlocal]
for var in tzwin_all:
assert var is not None
# Test imports of Zone Info
def test_import_zone_info_direct():
import dateutil.zoneinfo
def test_import_zone_info_from():
from dateutil import zoneinfo
def test_import_zone_info_star():
from dateutil.zoneinfo import gettz
from dateutil.zoneinfo import gettz_db_metadata
from dateutil.zoneinfo import rebuild
zi_all = (gettz, gettz_db_metadata, rebuild)
for var in zi_all:
assert var is not None |
test get backend aliases | import threading
from contextlib import contextmanager
from unittest.mock import patch
import pytest
import celery.contrib.testing.worker as contrib_embed_worker
from celery.app import backends
from celery.backends.cache import CacheBackend
from celery.exceptions import ImproperlyConfigured
from celery.utils.nodenames import anon_nodename
class CachedBackendWithTreadTrucking(CacheBackend):
test_instance_count = 0
test_call_stats = {}
def _track_attribute_access(self, method_name):
cls = type(self)
instance_no = getattr(self, '_instance_no', None)
if instance_no is None:
instance_no = self._instance_no = cls.test_instance_count
cls.test_instance_count += 1
cls.test_call_stats[instance_no] = []
cls.test_call_stats[instance_no].append({
'thread_id': threading.get_ident(),
'method_name': method_name
})
def __getattribute__(self, name):
if name == '_instance_no' or name == '_track_attribute_access':
return super().__getattribute__(name)
if name.startswith('__') and name != '__init__':
return super().__getattribute__(name)
self._track_attribute_access(name)
return super().__getattribute__(name)
@contextmanager
def embed_worker(app,
concurrency=1,
pool='threading', **kwargs):
"""
Helper embedded worker for testing.
It's based on a :func:`celery.contrib.testing.worker.start_worker`,
but doesn't modifies logging settings and additionally shutdown
worker pool.
"""
# prepare application for worker
app.finalize()
app.set_current()
worker = contrib_embed_worker.TestWorkController(
app=app,
concurrency=concurrency,
hostname=anon_nodename(),
pool=pool,
# not allowed to override TestWorkController.on_consumer_ready
ready_callback=None,
without_heartbeat=kwargs.pop("without_heartbeat", True),
without_mingle=True,
without_gossip=True,
**kwargs
)
t = threading.Thread(target=worker.start, daemon=True)
t.start()
worker.ensure_started()
yield worker
worker.stop()
t.join(10.0)
if t.is_alive():
raise RuntimeError(
"Worker thread failed to exit within the allocated timeout. "
"Consider raising `shutdown_timeout` if your tasks take longer "
"to execute."
)
class test_backends:
@pytest.mark.parametrize('url,expect_cls', [
('cache+memory://', CacheBackend),
])
def METHOD_NAME(self, url, expect_cls, app):
backend, url = backends.by_url(url, app.loader)
assert isinstance(backend(app=app, url=url), expect_cls)
def test_unknown_backend(self, app):
with pytest.raises(ImportError):
backends.by_name('fasodaopjeqijwqe', app.loader)
def test_backend_by_url(self, app, url='redis://localhost/1'):
from celery.backends.redis import RedisBackend
backend, url_ = backends.by_url(url, app.loader)
assert backend is RedisBackend
assert url_ == url
def test_sym_raises_ValuError(self, app):
with patch('celery.app.backends.symbol_by_name') as sbn:
sbn.side_effect = ValueError()
with pytest.raises(ImproperlyConfigured):
backends.by_name('xxx.xxx:foo', app.loader)
def test_backend_can_not_be_module(self, app):
with pytest.raises(ImproperlyConfigured):
backends.by_name(pytest, app.loader)
@pytest.mark.celery(
result_backend=f'{CachedBackendWithTreadTrucking.__module__}.'
f'{CachedBackendWithTreadTrucking.__qualname__}'
f'+memory://')
def test_backend_thread_safety(self):
@self.app.task
def dummy_add_task(x, y):
return x + y
with embed_worker(app=self.app, pool='threads'):
result = dummy_add_task.delay(6, 9)
assert result.get(timeout=10) == 15
call_stats = CachedBackendWithTreadTrucking.test_call_stats
# check that backend instance is used without same thread
for backend_call_stats in call_stats.values():
thread_ids = set()
for call_stat in backend_call_stats:
thread_ids.add(call_stat['thread_id'])
assert len(thread_ids) <= 1, \
"The same celery backend instance is used by multiple threads" |
plan | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetSolutionResult',
'AwaitableGetSolutionResult',
'get_solution',
'get_solution_output',
]
@pulumi.output_type
class GetSolutionResult:
"""
The container for solution.
"""
def __init__(__self__, id=None, location=None, name=None, METHOD_NAME=None, properties=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'plan' to be a dict")
pulumi.set(__self__, "plan", METHOD_NAME)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional['outputs.SolutionPlanResponse']:
"""
Plan for solution object supported by the OperationsManagement resource provider.
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter
def properties(self) -> 'outputs.SolutionPropertiesResponse':
"""
Properties for solution object supported by the OperationsManagement resource provider.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetSolutionResult(GetSolutionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSolutionResult(
id=self.id,
location=self.location,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
properties=self.properties,
tags=self.tags,
type=self.type)
def get_solution(resource_group_name: Optional[str] = None,
solution_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSolutionResult:
"""
Retrieves the user solution.
Azure REST API version: 2015-11-01-preview.
:param str resource_group_name: The name of the resource group to get. The name is case insensitive.
:param str solution_name: User Solution Name.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['solutionName'] = solution_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:operationsmanagement:getSolution', __args__, opts=opts, typ=GetSolutionResult).value
return AwaitableGetSolutionResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'plan'),
properties=pulumi.get(__ret__, 'properties'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_solution)
def get_solution_output(resource_group_name: Optional[pulumi.Input[str]] = None,
solution_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSolutionResult]:
"""
Retrieves the user solution.
Azure REST API version: 2015-11-01-preview.
:param str resource_group_name: The name of the resource group to get. The name is case insensitive.
:param str solution_name: User Solution Name.
"""
... |
inputhook wx2 | """Enable wxPython to be used interactively in prompt_toolkit
"""
import sys
import signal
import time
from timeit import default_timer as clock
import wx
def ignore_keyboardinterrupts(func):
"""Decorator which causes KeyboardInterrupt exceptions to be ignored during
execution of the decorated function.
This is used by the inputhook functions to handle the event where the user
presses CTRL+C while IPython is idle, and the inputhook loop is running. In
this case, we want to ignore interrupts.
"""
def wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
except KeyboardInterrupt:
pass
return wrapper
@ignore_keyboardinterrupts
def inputhook_wx1(context):
"""Run the wx event loop by processing pending events only.
This approach seems to work, but its performance is not great as it
relies on having PyOS_InputHook called regularly.
"""
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
# Make a temporary event loop and process system events until
# there are no more waiting, then allow idle events (which
# will also deal with pending or posted wx events.)
evtloop = wx.EventLoop()
ea = wx.EventLoopActivator(evtloop)
while evtloop.Pending():
evtloop.Dispatch()
app.ProcessIdle()
del ea
return 0
class EventLoopTimer(wx.Timer):
def __init__(self, func):
self.func = func
wx.Timer.__init__(self)
def Notify(self):
self.func()
class EventLoopRunner(object):
def Run(self, time, input_is_ready):
self.input_is_ready = input_is_ready
self.evtloop = wx.EventLoop()
self.timer = EventLoopTimer(self.check_stdin)
self.timer.Start(time)
self.evtloop.Run()
def check_stdin(self):
if self.input_is_ready():
self.timer.Stop()
self.evtloop.Exit()
@ignore_keyboardinterrupts
def METHOD_NAME(context):
"""Run the wx event loop, polling for stdin.
This version runs the wx eventloop for an undetermined amount of time,
during which it periodically checks to see if anything is ready on
stdin. If anything is ready on stdin, the event loop exits.
The argument to elr.Run controls how often the event loop looks at stdin.
This determines the responsiveness at the keyboard. A setting of 1000
enables a user to type at most 1 char per second. I have found that a
setting of 10 gives good keyboard response. We can shorten it further,
but eventually performance would suffer from calling select/kbhit too
often.
"""
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
elr = EventLoopRunner()
# As this time is made shorter, keyboard response improves, but idle
# CPU load goes up. 10 ms seems like a good compromise.
elr.Run(time=10, # CHANGE time here to control polling interval
input_is_ready=context.input_is_ready)
return 0
@ignore_keyboardinterrupts
def inputhook_wx3(context):
"""Run the wx event loop by processing pending events only.
This is like inputhook_wx1, but it keeps processing pending events
until stdin is ready. After processing all pending events, a call to
time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.
This sleep time should be tuned though for best performance.
"""
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
# The import of wx on Linux sets the handler for signal.SIGINT
# to 0. This is a bug in wx or gtk. We fix by just setting it
# back to the Python default.
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
evtloop = wx.EventLoop()
ea = wx.EventLoopActivator(evtloop)
t = clock()
while not context.input_is_ready():
while evtloop.Pending():
t = clock()
evtloop.Dispatch()
app.ProcessIdle()
# We need to sleep at this point to keep the idle CPU load
# low. However, if sleep to long, GUI response is poor. As
# a compromise, we watch how often GUI events are being processed
# and switch between a short and long sleep time. Here are some
# stats useful in helping to tune this.
# time CPU load
# 0.001 13%
# 0.005 3%
# 0.01 1.5%
# 0.05 0.5%
used_time = clock() - t
if used_time > 10.0:
# print 'Sleep for 1 s' # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print 'Sleep for 0.05 s' # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
del ea
return 0
@ignore_keyboardinterrupts
def inputhook_wxphoenix(context):
"""Run the wx event loop until the user provides more input.
This input hook is suitable for use with wxPython >= 4 (a.k.a. Phoenix).
It uses the same approach to that used in
ipykernel.eventloops.loop_wx. The wx.MainLoop is executed, and a wx.Timer
is used to periodically poll the context for input. As soon as input is
ready, the wx.MainLoop is stopped.
"""
app = wx.GetApp()
if app is None:
return
if context.input_is_ready():
return
assert wx.IsMainThread()
# Wx uses milliseconds
poll_interval = 100
# Use a wx.Timer to periodically check whether input is ready - as soon as
# it is, we exit the main loop
timer = wx.Timer()
def poll(ev):
if context.input_is_ready():
timer.Stop()
app.ExitMainLoop()
timer.Start(poll_interval)
timer.Bind(wx.EVT_TIMER, poll)
# The import of wx on Linux sets the handler for signal.SIGINT to 0. This
# is a bug in wx or gtk. We fix by just setting it back to the Python
# default.
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
# The SetExitOnFrameDelete call allows us to run the wx mainloop without
# having a frame open.
app.SetExitOnFrameDelete(False)
app.MainLoop()
# Get the major wx version number to figure out what input hook we should use.
major_version = 3
try:
major_version = int(wx.__version__[0])
except Exception:
pass
# Use the phoenix hook on all platforms for wxpython >= 4
if major_version >= 4:
inputhook = inputhook_wxphoenix
# On OSX, evtloop.Pending() always returns True, regardless of there being
# any events pending. As such we can't use implementations 1 or 3 of the
# inputhook as those depend on a pending/dispatch loop.
elif sys.platform == 'darwin':
inputhook = METHOD_NAME
else:
inputhook = inputhook_wx3 |
set up | # This code is part of Qiskit.
#
# (C) Copyright IBM 2020, 2023.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test of scikit-quant optimizers."""
import unittest
from test.python.algorithms import QiskitAlgorithmsTestCase
from ddt import ddt, data, unpack
import numpy
from qiskit import BasicAer
from qiskit.circuit.library import RealAmplitudes
from qiskit.utils import QuantumInstance, algorithm_globals
from qiskit.exceptions import MissingOptionalLibraryError
from qiskit.opflow import PauliSumOp
from qiskit.algorithms import VQE
from qiskit.algorithms.optimizers import BOBYQA, SNOBFIT, IMFIL
@ddt
class TestOptimizers(QiskitAlgorithmsTestCase):
"""Test scikit-quant optimizers."""
def METHOD_NAME(self):
"""Set the problem."""
super().METHOD_NAME()
algorithm_globals.random_seed = 50
with self.assertWarns(DeprecationWarning):
self.qubit_op = PauliSumOp.from_list(
[
("II", -1.052373245772859),
("IZ", 0.39793742484318045),
("ZI", -0.39793742484318045),
("ZZ", -0.01128010425623538),
("XX", 0.18093119978423156),
]
)
def _optimize(self, optimizer):
"""launch vqe"""
with self.assertWarns(DeprecationWarning):
qe = QuantumInstance(
BasicAer.get_backend("statevector_simulator"),
seed_simulator=algorithm_globals.random_seed,
seed_transpiler=algorithm_globals.random_seed,
)
with self.assertWarns(DeprecationWarning):
vqe = VQE(ansatz=RealAmplitudes(), optimizer=optimizer, quantum_instance=qe)
result = vqe.compute_minimum_eigenvalue(operator=self.qubit_op)
self.assertAlmostEqual(result.eigenvalue.real, -1.857, places=1)
def test_bobyqa(self):
"""BOBYQA optimizer test."""
try:
optimizer = BOBYQA(maxiter=150)
self._optimize(optimizer)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
@unittest.skipIf(
# NB: numpy.__version__ may contain letters, e.g. "1.26.0b1"
tuple(map(int, numpy.__version__.split(".")[:2])) >= (1, 24),
"scikit's SnobFit currently incompatible with NumPy 1.24.0.",
)
def test_snobfit(self):
"""SNOBFIT optimizer test."""
try:
optimizer = SNOBFIT(maxiter=100, maxfail=100, maxmp=20)
self._optimize(optimizer)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
@unittest.skipIf(
# NB: numpy.__version__ may contain letters, e.g. "1.26.0b1"
tuple(map(int, numpy.__version__.split(".")[:2])) >= (1, 24),
"scikit's SnobFit currently incompatible with NumPy 1.24.0.",
)
@data((None,), ([(-1, 1), (None, None)],))
@unpack
def test_snobfit_missing_bounds(self, bounds):
"""SNOBFIT optimizer test with missing bounds."""
try:
optimizer = SNOBFIT()
with self.assertRaises(ValueError):
optimizer.minimize(
fun=lambda _: 1, # using dummy function (never called)
x0=[0.1, 0.1], # dummy initial point
bounds=bounds,
)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
def test_imfil(self):
"""IMFIL test."""
try:
optimizer = IMFIL(maxiter=100)
self._optimize(optimizer)
except MissingOptionalLibraryError as ex:
self.skipTest(str(ex))
if __name__ == "__main__":
unittest.main() |
test track mouse position | """Test render window interactor"""
import time
import pytest
import pyvista as pv
from pyvista import _vtk
def empty_callback():
return
@pytest.mark.needs_vtk_version(9, 1)
def test_observers():
pl = pv.Plotter()
# Key events
with pytest.raises(TypeError):
pl.add_key_event('w', 1)
# Callback must not have any empty arguments.
def callback(a, b, *, c, d=1.0):
pass
with pytest.raises(TypeError):
pl.add_key_event('w', callback)
key = 'w'
pl.add_key_event(key, empty_callback)
assert key in pl.iren._key_press_event_callbacks
pl.clear_events_for_key(key)
assert key not in pl.iren._key_press_event_callbacks
# attempting to clear non-existing events doesn't raise by default
pl.clear_events_for_key(key)
with pytest.raises(ValueError, match='No events found for key'):
pl.clear_events_for_key(key, raise_on_missing=True)
# Custom events
assert not pl.iren.interactor.HasObserver(
"PickEvent"
), "Subsequent PickEvent HasObserver tests are wrong if this fails."
# Add different observers
obs_move = pl.iren.add_observer(_vtk.vtkCommand.MouseMoveEvent, empty_callback)
obs_double1 = pl.iren.add_observer(_vtk.vtkCommand.LeftButtonDoubleClickEvent, empty_callback)
obs_double2 = pl.iren.add_observer("LeftButtonDoubleClickEvent", empty_callback)
obs_picks = tuple(pl.iren.add_observer("PickEvent", empty_callback) for _ in range(5))
pl.iren.add_observer("SelectionChangedEvent", empty_callback)
assert pl.iren._observers[obs_move] == "MouseMoveEvent"
assert pl.iren.interactor.HasObserver("MouseMoveEvent")
assert pl.iren._observers[obs_double1] == "LeftButtonDoubleClickEvent"
assert pl.iren._observers[obs_double2] == "LeftButtonDoubleClickEvent"
assert pl.iren.interactor.HasObserver("LeftButtonDoubleClickEvent")
assert all(pl.iren._observers[obs_pick] == "PickEvent" for obs_pick in obs_picks)
assert pl.iren.interactor.HasObserver("SelectionChangedEvent")
# Remove a specific observer
pl.iren.remove_observer(obs_move)
assert obs_move not in pl.iren._observers
# Remove all observers of a specific event
pl.iren.remove_observers(_vtk.vtkCommand.LeftButtonDoubleClickEvent)
assert obs_double1 not in pl.iren._observers and obs_double2 not in pl.iren._observers
# Remove all (remaining) observers
pl.iren.remove_observers()
assert len(pl.iren._observers) == 0
assert not pl.iren.interactor.HasObserver("PickEvent")
def test_clear_key_event_callbacks():
pl = pv.Plotter()
pl.reset_key_events()
@pytest.mark.skip_plotting
def METHOD_NAME():
pl = pv.Plotter()
pl.track_mouse_position()
pl.show(auto_close=False)
assert pl.mouse_position is None
x, y = 10, 20
pl.iren._mouse_move(x, y)
assert pl.mouse_position == (x, y)
pl.iren.untrack_mouse_position()
assert "MouseMoveEvent" not in pl.iren._observers.values()
@pytest.mark.skip_plotting
def test_track_click_position_multi_render():
points = []
def callback(mouse_point):
points.append(mouse_point)
pl = pv.Plotter()
with pytest.raises(TypeError):
pl.track_click_position(side='dark')
pl.track_click_position(callback=callback, side='left', viewport=True)
pl.show(auto_close=False)
x, y = 10, 20
pl.iren._mouse_right_button_click(2 * x, 2 * y)
pl.iren._mouse_left_button_click(x, y)
assert points[0] == (x, y)
# disable and ensure that clicking is no longer being tracked
pl.untrack_click_position(side='left')
pl.iren._mouse_left_button_click(50, 50)
assert len(points) == 1
@pytest.mark.skip_plotting
def test_track_click_position():
events = []
def single_click_callback(mouse_position):
events.append("single")
def double_click_callback(mouse_position):
events.append("double")
pl = pv.Plotter()
pl.track_click_position(callback=single_click_callback, side='left', double=False)
pl.track_click_position(callback=double_click_callback, side='left', double=True)
pl.show(auto_close=False)
# Test single and double clicks:
pl.iren._mouse_left_button_click(10, 10)
assert len(events) == 1 and events.pop(0) == "single"
pl.iren._mouse_left_button_click(50, 50, count=2)
assert len(events) == 2 and events.pop(1) == "double" and events.pop(0) == "single"
# Test triple click behaviour:
pl.iren._mouse_left_button_click(10, 10, count=3)
assert len(events) == 3
assert events.pop(2) == "single" and events.pop(1) == "double" and events.pop(0) == "single"
@pytest.mark.skipif(
type(_vtk.vtkRenderWindowInteractor()).__name__
not in ("vtkWin32RenderWindowInteractor", "vtkXRenderWindowInteractor"),
reason='Other RenderWindowInteractors do not invoke TimerEvents during ProcessEvents.',
)
@pytest.mark.needs_vtk_version(
(9, 2),
reason='vtkXRenderWindowInteractor (Linux) does not invoke TimerEvents during ProcessEvents until VTK9.2.',
)
def test_timer():
# Create a normal interactor from the offscreen plotter (not generic,
# which is the default for offscreen rendering)
pl = pv.Plotter()
iren = pv.plotting.render_window_interactor.RenderWindowInteractor(pl)
iren.set_render_window(pl.render_window)
duration = 50 # Duration of created timers
delay = 5 * duration # Extra time we wait for the timers to fire at least once
events = []
def on_timer(obj, event):
# TimerEvent callback
events.append(event)
def process_events(iren, duration):
# Helper function to call process_events for the given duration (in milliseconds).
t = 1000 * time.time()
while 1000 * time.time() - t < duration:
iren.process_events()
# Setup interactor
iren.add_observer("TimerEvent", on_timer)
iren.initialize()
# Test one-shot timer (only fired once for the extended duration)
iren.create_timer(duration, repeating=False)
process_events(iren, delay)
assert len(events) == 1
# Test repeating timer (fired multiple times for extended duration)
repeating_timer = iren.create_timer(duration, repeating=True)
process_events(iren, 2 * delay)
assert len(events) >= 3
E = len(events)
# Test timer destruction (no more events fired)
iren.destroy_timer(repeating_timer)
process_events(iren, delay)
assert len(events) == E
@pytest.mark.skip_plotting
def test_poked_subplot_loc():
pl = pv.Plotter(shape=(2, 2), window_size=(800, 800))
pl.iren._mouse_left_button_press(200, 600)
assert tuple(pl.iren.get_event_subplot_loc()) == (0, 0)
pl.iren._mouse_left_button_press(200, 200)
assert tuple(pl.iren.get_event_subplot_loc()) == (1, 0)
pl.iren._mouse_left_button_press(600, 600)
assert tuple(pl.iren.get_event_subplot_loc()) == (0, 1)
pl.iren._mouse_left_button_press(600, 200)
assert tuple(pl.iren.get_event_subplot_loc()) == (1, 1)
pl.close()
@pytest.mark.skip_plotting
def test_poked_subplot_context(verify_image_cache):
pl = pv.Plotter(shape=(2, 2), window_size=(800, 800))
pl.iren._mouse_left_button_press(200, 600)
with pl.iren.poked_subplot():
pl.add_mesh(pv.Cone(), color=True)
pl.iren._mouse_left_button_press(200, 200)
with pl.iren.poked_subplot():
pl.add_mesh(pv.Cube(), color=True)
pl.iren._mouse_left_button_press(600, 600)
with pl.iren.poked_subplot():
pl.add_mesh(pv.Sphere(), color=True)
pl.iren._mouse_left_button_press(600, 200)
with pl.iren.poked_subplot():
pl.add_mesh(pv.Arrow(), color=True)
pl.show() |
re map severity | """
Faraday Penetration Test IDE
Copyright (C) 2013 Infobyte LLC (http://www.infobytesec.com/)
See the file 'doc/LICENSE' for the license information
"""
import re
import xml.etree.ElementTree as ET
from urllib.parse import urlparse
from faraday_plugins.plugins.plugin import PluginXMLFormat
__author__ = "Francisco Amato"
__copyright__ = "Copyright (c) 2013, Infobyte LLC"
__credits__ = ["Francisco Amato"]
__license__ = ""
__version__ = "1.0.0"
__maintainer__ = "Francisco Amato"
__email__ = "[email protected]"
__status__ = "Development"
def get_urls(string):
if isinstance(string, bytes):
string_decode = string.decode("utf-8")
urls = re.findall(r'(?:(?:https?|ftp):\/\/)?[\w/\-?=%.]+\.[\w/\-?=%.]+', string_decode)
else:
urls = re.findall(r'(?:(?:https?|ftp):\/\/)?[\w/\-?=%.]+\.[\w/\-?=%.]+', string)
return urls
class NetsparkerCloudXmlParser:
"""
The objective of this class is to parse an xml file generated by the netsparkercloud tool.
TODO: Handle errors.
TODO: Test netsparkercloud output version. Handle what happens if the parser doesn't support it.
TODO: Test cases.
@param netsparkercloud_xml_filepath A proper xml generated by netsparkercloud
"""
def __init__(self, xml_output):
self.filepath = xml_output
tree = self.parse_xml(xml_output)
if tree:
self.items = self.get_items(tree)
else:
self.items = []
def parse_xml(self, xml_output):
"""
Open and parse an xml file.
TODO: Write custom parser to just read the nodes that we need instead of
reading the whole file.
@return xml_tree An xml tree instance. None if error.
"""
try:
tree = ET.fromstring(xml_output)
except SyntaxError as err:
self.logger.error(f"SyntaxError: {err}. {xml_output}")
return None
return tree
def get_items(self, tree):
"""
@return items A list of Host instances
"""
for node in tree.findall("vulnerabilities/vulnerability"):
yield Item(node)
class Item:
"""
An abstract representation of a Item
@param item_node A item_node taken from an netsparkercloud xml tree
"""
def METHOD_NAME(self, severity):
if severity == "Important":
return "high"
return severity
def __init__(self, item_node, encoding="ascii"):
self.node = item_node
self.url = urlparse(self.get_text_from_subnode("url"))
self.protocol = self.url.scheme
self.hostname = self.url.netloc
self.port = self.url.port
if self.port is None:
self.port = '80'
self.type = self.get_text_from_subnode("type")
self.name = self.get_text_from_subnode("name")
self.severity = self.METHOD_NAME(self.get_text_from_subnode("severity"))
self.certainty = self.get_text_from_subnode("certainty")
self.node = item_node.find("http-request")
self.method = self.get_text_from_subnode("method")
self.request = self.get_text_from_subnode("content")
self.param = ""
self.paramval = ""
for p in self.node.findall("parameters/parameter"):
self.param = p.get('name')
self.paramval = p.get('value')
self.node = item_node.find("http-response")
self.response = self.get_text_from_subnode("content")
self.extra = []
for v in item_node.findall("extra-information/info"):
self.extra.append(v.get('name') + ":" + v.get('value'))
self.node = item_node.find("classification")
self.owasp = self.get_text_from_subnode("owasp")
self.wasc = self.get_text_from_subnode("wasc")
self.cwe = self.get_text_from_subnode("cwe")
self.capec = self.get_text_from_subnode("capec")
self.pci = self.get_text_from_subnode("pci31")
self.pci2 = self.get_text_from_subnode("pci32")
self.hipaa = self.get_text_from_subnode("hipaa")
self.ref = []
if self.cwe:
self.cwe = [f"CWE-{self.cwe}"]
if self.owasp:
self.ref.append(f"OWASP-{self.owasp}")
self.node = item_node
self.remedyreferences = self.get_text_from_subnode("remedy-references")
self.externalreferences = self.get_text_from_subnode("external-references")
if self.remedyreferences:
for u in get_urls(self.remedyreferences):
self.ref.append(u)
if self.externalreferences:
for u in get_urls(self.externalreferences):
self.ref.append(u)
self.impact = self.get_text_from_subnode("impact")
self.remedialprocedure = self.get_text_from_subnode("remedial-procedure")
self.remedialactions = self.get_text_from_subnode("remedial-actions")
self.exploitationskills = self.get_text_from_subnode("exploitation-skills")
self.proofofconcept = self.get_text_from_subnode("proof-of-concept")
self.resolution = "Remerdial Procedure: {} \nRemedial Actions: {}".format(self.remedialprocedure,
self.remedialactions)
self.desc = self.get_text_from_subnode("description")
self.desc = "\nImpact: {} \nExploitation Skills: {} \nProof of concept: {} \nWASC: {} \nPCI31: {} \nPCI32: {}" \
" \nCAPEC: {} \nHIPA: {} \nExtra: {}".format(self.impact, self.exploitationskills,
self.proofofconcept, self.wasc, self.pci, self.pci2,
self.capec, self.hipaa, self.extra)
def get_text_from_subnode(self, subnode_xpath_expr):
"""
Finds a subnode in the host node and the retrieves a value from it.
@return An attribute value
"""
if self.node:
sub_node = self.node.find(subnode_xpath_expr)
if sub_node is not None:
return sub_node.text
return None
class NetsparkerCloudPlugin(PluginXMLFormat):
"""
Example plugin to parse netsparkercloud output.
"""
def __init__(self, *arg, **kwargs):
super().__init__(*arg, **kwargs)
self.identifier_tag = "netsparker-cloud"
self.id = "NetsparkerCloud"
self.name = "NetsparkerCloud XML Output Plugin"
self.plugin_version = "0.0.1"
self.version = "NetsparkerCloud"
self.framework_version = "1.0.0"
self.options = None
def parseOutputString(self, output):
parser = NetsparkerCloudXmlParser(output)
first = True
for i in parser.items:
if first:
ip = self.resolve_hostname(i.hostname)
h_id = self.createAndAddHost(ip, hostnames=[i.hostname])
s_id = self.createAndAddServiceToHost(h_id, i.protocol, ports=[i.port], status="open")
first = False
v_id = self.createAndAddVulnWebToService(h_id, s_id, i.name, ref=i.ref, website=i.hostname,
severity=i.severity, desc=i.desc, path=i.url.path, method=i.method,
request=i.request, response=i.response, resolution=i.resolution,
pname=i.param, cwe=i.cwe)
del parser
def createPlugin(*args, **kwargs):
return NetsparkerCloudPlugin(*args, **kwargs) |