label
stringlengths 1
61
| code
stringlengths 4k
8k
|
---|---|
test spectral model | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_allclose
import astropy.units as u
from gammapy.catalog import SourceCatalog1LHAASO
from gammapy.modeling.models import (
GaussianSpatialModel,
PointSpatialModel,
PowerLawNormSpectralModel,
PowerLawSpectralModel,
TemplateSpatialModel,
)
from gammapy.utils.testing import requires_data
@pytest.fixture(scope="session")
def lhaaso1():
return SourceCatalog1LHAASO()
@requires_data()
class TestSourceCatalog1LHAASO:
@staticmethod
def test_source_table(lhaaso1):
assert lhaaso1.tag == "1LHAASO"
assert len(lhaaso1.table) == 90
@staticmethod
def test_positions(lhaaso1):
assert len(lhaaso1.positions) == 90
@staticmethod
def test_to_models(lhaaso1):
models = lhaaso1.to_models(which="both")
assert len(models) == 90
models = lhaaso1.to_models(which="KM2A")
assert np.all(
[m.spectral_model.reference.quantity == 50 * u.TeV for m in models]
)
assert len(models) == 75
models = lhaaso1.to_models(which="WCDA")
assert np.all(
[m.spectral_model.reference.quantity == 3 * u.TeV for m in models]
)
assert len(models) == 69
@requires_data()
class TestSourceCatalogObject1LHAASO:
@staticmethod
def test_data(lhaaso1):
assert lhaaso1[0].data["Source_Name"] == "1LHAASO J0007+5659u"
assert "KM2A" in lhaaso1[0].data["Model_a"]
assert_allclose(lhaaso1[0].data["r39_ul"].value, 0.18)
assert lhaaso1[0].data["r39_ul"].unit == u.deg
assert_allclose(lhaaso1[0].data["N0"].value, 0.33e-16)
assert lhaaso1[0].data["N0"].unit == u.Unit("cm−2 s−1 TeV−1")
assert_allclose(lhaaso1[0].data["N0_err"].value, 0.05e-16)
assert lhaaso1[0].data["N0_err"].unit == u.Unit("cm−2 s−1 TeV−1")
assert_allclose(lhaaso1[0].data["N0_ul_b"].value, 0.27e-13)
assert lhaaso1[0].data["N0_ul_b"].unit == u.Unit("cm−2 s−1 TeV−1")
assert lhaaso1[1].data["ASSO_Name"] == "CTA 1"
assert_allclose(lhaaso1[1].data["ASSO_Sep"].value, 0.12)
assert lhaaso1[0].data["ASSO_Sep"].unit == u.deg
assert lhaaso1[10].data["Source_Name"] == "1LHAASO J0428+5531*"
assert "WCDA" in lhaaso1[10].data["Model_a"]
assert_allclose(lhaaso1[10].data["RAJ2000"].value, 67.23)
assert_allclose(lhaaso1[10].data["DECJ2000"].value, 55.53)
assert_allclose(lhaaso1[10].data["pos_err"].value, 0.36)
assert lhaaso1[10].data["RAJ2000"].unit == u.deg
assert lhaaso1[10].data["DECJ2000"].unit == u.deg
assert lhaaso1[10].data["pos_err"].unit == u.deg
assert_allclose(lhaaso1[10].data["r39"].value, 1.18)
assert_allclose(lhaaso1[10].data["r39_b"].value, 0.32)
assert lhaaso1[10].data["r39_b"].unit == u.deg
assert_allclose(lhaaso1[10].data["r39_err"].value, 0.12)
assert_allclose(lhaaso1[10].data["r39_err_b"].value, 0.06)
assert lhaaso1[10].data["r39_err_b"].unit == u.deg
@staticmethod
def test_position(lhaaso1):
position = lhaaso1[0].position
assert_allclose(position.ra.deg, 1.86, atol=1e-3)
assert_allclose(position.dec.deg, 57.00, atol=1e-3)
@staticmethod
def test_sky_model(lhaaso1):
model = lhaaso1[0].sky_model("both")
assert model.name == "1LHAASO J0007+5659u"
assert isinstance(model.spectral_model, PowerLawSpectralModel)
assert isinstance(model.spatial_model, PointSpatialModel)
assert lhaaso1[0].sky_model("WCDA") is None
model = lhaaso1[1].sky_model("both")
assert model.name == "1LHAASO J0007+7303u"
assert isinstance(model.spectral_model, PowerLawNormSpectralModel)
assert isinstance(model.spatial_model, TemplateSpatialModel)
model = lhaaso1[1].sky_model("KM2A")
assert model.name == "1LHAASO J0007+7303u"
assert isinstance(model.spectral_model, PowerLawSpectralModel)
assert isinstance(model.spatial_model, GaussianSpatialModel)
model = lhaaso1[1].sky_model("WCDA")
assert model.name == "1LHAASO J0007+7303u"
assert isinstance(model.spectral_model, PowerLawSpectralModel)
assert isinstance(model.spatial_model, PointSpatialModel)
model = lhaaso1[11].sky_model("both")
assert model.name == "1LHAASO J0500+4454"
assert isinstance(model.spectral_model, PowerLawSpectralModel)
assert isinstance(model.spatial_model, GaussianSpatialModel)
@staticmethod
def METHOD_NAME(lhaaso1):
m = lhaaso1[0].spectral_model("KM2A")
dnde, dnde_err = m.evaluate_error(50 * u.TeV)
assert dnde.unit == "cm-2 s-1 TeV-1"
assert_allclose(dnde.value, 0.33e-16, rtol=1e-3)
assert_allclose(dnde_err.value, 0.05e-16, rtol=1e-3)
m = lhaaso1[11].spectral_model("WCDA")
dnde, dnde_err = m.evaluate_error(3 * u.TeV)
assert dnde.unit == "cm-2 s-1 TeV-1"
assert_allclose(dnde.value, 0.69e-13, rtol=1e-3)
assert_allclose(dnde_err.value, 0.16e-13, rtol=1e-3)
@staticmethod
def test_spatial_model(lhaaso1):
m = lhaaso1[0].spatial_model("KM2A")
assert isinstance(m, PointSpatialModel)
assert m.lon_0.unit == "deg"
assert_allclose(m.lon_0.value, 1.86, atol=1e-2)
assert_allclose(m.lon_0.error, 0.09, atol=1e-2)
assert m.lat_0.unit == "deg"
assert_allclose(m.lat_0.value, 57.00, atol=1e-2)
assert_allclose(m.lat_0.error, 0.049, atol=1e-2)
assert m.frame == "fk5"
m = lhaaso1[11].spatial_model("WCDA")
assert isinstance(m, GaussianSpatialModel)
assert m.lon_0.unit == "deg"
assert_allclose(m.lon_0.value, 75.01, atol=1e-10)
assert m.lat_0.unit == "deg"
assert_allclose(m.lat_0.value, 44.92, atol=1e-10)
assert m.frame == "fk5"
assert m.sigma.unit == "deg"
assert_allclose(m.sigma.value, 0.41, atol=1e-3)
model = lhaaso1["1LHAASO J0007+5659u"].spatial_model("KM2A")
pos_err = model.position_error
assert_allclose(pos_err.height.value, 2 * 0.12, rtol=1e-4)
assert_allclose(pos_err.width.value, 2 * 0.12, rtol=1e-4)
assert_allclose(model.position.ra.value, pos_err.center.ra.value)
assert_allclose(model.position.dec.value, pos_err.center.dec.value) |
update dynamic services | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import time
from typing import List, Optional
import grpc
from magma.common.job import Job
from magma.common.rpc_utils import (
grpc_async_wrapper,
indicates_connection_error,
)
from magma.common.sentry import EXCLUDE_FROM_ERROR_MONITORING
from magma.common.service_registry import ServiceRegistry
from magma.magmad.metrics import UNEXPECTED_SERVICE_RESTARTS
from orc8r.protos.common_pb2 import Void
from orc8r.protos.service303_pb2_grpc import Service303Stub
class ServiceInfo(object):
"""
Stores info about the individual services
"""
# Time buffer for services to restart, in seconds
SERVICE_RESTART_BUFFER_TIME = 30
def __init__(self, service_name):
self.continuous_timeouts = 0
self._service_name = service_name
self._expected_start_time = time.time()
self._status = None
self._linked_services = []
# Initialize the counter for each service
UNEXPECTED_SERVICE_RESTARTS.labels(
service_name=self._service_name,
).inc(0)
@property
def status(self):
return self._status
@property
def linked_services(self):
return self._linked_services
def add_linked_services(self, service_list):
for service in service_list:
if service != self._service_name and \
service not in self._linked_services:
self._linked_services.append(service)
def update(self, start_time, status):
self._status = status
self.continuous_timeouts = 0
if start_time <= self._expected_start_time:
# Probably a race in service starts, or magmad restarted
return
if (
start_time - self._expected_start_time
> self.SERVICE_RESTART_BUFFER_TIME
):
UNEXPECTED_SERVICE_RESTARTS.labels(
service_name=self._service_name,
).inc()
self._expected_start_time = start_time
def process_service_restart(self):
self._expected_start_time = time.time()
class ServicePoller(Job):
"""
Periodically query the services' Service303 interface
"""
# Periodicity for getting status from other services, in seconds
GET_STATUS_INTERVAL = 10
# Timeout when getting status from other local services, in seconds
GET_STATUS_TIMEOUT = 8
def __init__(self, loop, config, dynamic_services: Optional[List[str]] = None):
"""
Initialize the ServicePooler
Args:
loop: loop
config: configuration
dynamic_services: list of dynamic services
"""
super().__init__(
interval=self.GET_STATUS_INTERVAL,
loop=loop,
)
self._config = config
# Holds a map of service name -> ServiceInfo
self._service_info = {}
for service in config['magma_services']:
self._service_info[service] = ServiceInfo(service)
if dynamic_services is not None:
for service in dynamic_services:
self._service_info[service] = ServiceInfo(service)
for service_list in config.get('linked_services', []):
for service in service_list:
self._service_info[service].add_linked_services(service_list)
def METHOD_NAME(
self,
new_services: List[str],
stopped_services: List[str],
):
"""
Update the service poller when dynamic services are enabled or disabled
Args:
new_services: New services which were enabled
stopped_services: Old services which were disabled
"""
for service in new_services:
self._service_info[service] = ServiceInfo(service)
for service in stopped_services:
self._service_info.pop(service)
def get_service_timeouts(self):
ret = {}
for service_name, service in self._service_info.items():
ret[service_name] = service.continuous_timeouts
return ret
def reset_timeout_counter(self, service):
self._service_info[service].continuous_timeouts = 0
@property
def service_info(self):
return self._service_info
def process_service_restart(self, service_name):
self._service_info[service_name].process_service_restart()
for linked_service in self._service_info[service_name].linked_services:
self._service_info[linked_service].process_service_restart()
async def _run(self):
await self._get_service_info()
async def _get_service_info(self):
"""
Make RPC calls to 'GetServiceInfo' functions of other services, to
get current status.
"""
for service in list(self._service_info):
# Check whether service provides service303 interface
if service in self._config['non_service303_services']:
continue
try:
chan = ServiceRegistry.get_rpc_channel(
service, ServiceRegistry.LOCAL,
)
except ValueError:
# Service can't be contacted
logging.error('Cant get RPC channel to %s', service)
continue
client = Service303Stub(chan)
try:
future = client.GetServiceInfo.future(
Void(),
self.GET_STATUS_TIMEOUT,
)
info = await grpc_async_wrapper(future, self._loop)
self._service_info[service].update(
info.start_time_secs,
info.status,
)
self._service_info[service].continuous_timeouts = 0
except grpc.RpcError as err:
logging.error(
"GetServiceInfo Error for %s! [%s] %s",
service,
err.code(),
err.details(),
extra=EXCLUDE_FROM_ERROR_MONITORING if indicates_connection_error(err) else None,
)
self._service_info[service].continuous_timeouts += 1 |
libs | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import sys
from spack.package import *
class Mesa18(AutotoolsPackage):
"""Mesa is an open-source implementation of the OpenGL specification
- a system for rendering interactive 3D graphics."""
homepage = "https://www.mesa3d.org"
maintainers("v-dobrev", "ChristianTackeGSI")
# Note that we always want to build from the git repo instead of a
# tarball since the tarball has pre-generated files for certain versions
# of LLVM while the git repo doesn't so it can adapt at build time to
# whatever version of LLVM you're using.
git = "https://gitlab.freedesktop.org/mesa/mesa.git"
version("18.3.6", tag="mesa-18.3.6", commit="11049bcff86da8013a4f63bd68daf637e3af22f3")
depends_on("autoconf", type="build")
depends_on("automake", type="build")
depends_on("libtool", type="build")
depends_on("m4", type="build")
depends_on("pkgconfig", type="build")
depends_on("binutils+plugins", when=(sys.platform != "darwin"), type="build")
depends_on("bison", type="build")
depends_on("flex", type="build")
depends_on("gettext", type="build")
depends_on("pkgconfig", type="build")
depends_on("python@:3.8", type="build") # https://github.com/spack/spack/issues/28219
depends_on("[email protected]:", type="build")
depends_on("libxml2")
depends_on("zlib-api")
depends_on("expat")
depends_on("ncurses+termlib")
# Internal options
variant("llvm", default=True, description="Enable LLVM.")
variant(
"swr",
values=spack.variant.DisjointSetsOfValues(
("none",), ("auto",), ("avx", "avx2", "knl", "skx")
)
.with_non_feature_values("auto")
.with_non_feature_values("none")
.with_default("auto"),
when="+llvm",
description="Enable the SWR driver.",
)
# Front ends
variant("osmesa", default=True, description="Enable the OSMesa frontend.")
is_linux = sys.platform.startswith("linux")
variant("glx", default=is_linux, description="Enable the GLX frontend.")
# Additional backends
variant("opengles", default=False, description="Enable OpenGL ES support.")
# Provides
provides("libglx", when="+glx")
provides("libosmesa", when="+osmesa")
# Variant dependencies
depends_on("libllvm@6:10", when="+llvm")
depends_on("libx11", when="+glx")
depends_on("libxcb", when="+glx")
depends_on("libxext", when="+glx")
depends_on("[email protected]:", when="+glx")
# Require at least 1 front-end
conflicts("~osmesa ~glx")
# Prevent an unnecessary xcb-dri dependency
patch("autotools-x11-nodri.patch")
# Backport Mesa MR#6053 to prevent multiply-defined symbols
patch("multiple-symbols_hash.patch", when="@:20.1.4%gcc@10:")
def setup_build_environment(self, env):
env.set("PYTHON", self.spec["python"].command.path)
def autoreconf(self, spec, prefix):
which("autoreconf")("--force", "--verbose", "--install")
def configure_args(self):
spec = self.spec
args = [
"LDFLAGS={0}".format(self.spec["ncurses"].METHOD_NAME.search_flags),
"--enable-shared",
"--disable-static",
"--disable-libglvnd",
"--disable-nine",
"--disable-omx-bellagio",
"--disable-omx-tizonia",
"--disable-opencl",
"--disable-opencl-icd",
"--disable-va",
"--disable-vdpau",
"--disable-xa",
"--disable-xvmc",
"--disable-osmesa",
"--with-vulkan-drivers=",
"--disable-egl",
"--disable-gbm",
"--disable-dri",
"--enable-opengl",
]
args_platforms = []
args_gallium_drivers = ["swrast"]
args_dri_drivers = []
if spec.target.family == "arm" or spec.target.family == "aarch64":
args.append("--disable-libunwind")
num_frontends = 0
if "+osmesa" in spec:
num_frontends += 1
args.append("--enable-gallium-osmesa")
else:
args.append("--disable-gallium-osmesa")
if "+glx" in spec:
num_frontends += 1
args.append("--enable-glx=gallium-xlib")
args_platforms.append("x11")
else:
args.append("--disable-glx")
if "+opengles" in spec:
args.extend(["--enable-gles1", "--enable-gles2"])
else:
args.extend(["--disable-gles1", "--disable-gles2"])
if num_frontends > 1:
args.append("--enable-shared-glapi")
else:
args.append("--disable-shared-glapi")
if "+llvm" in spec:
args.append("--enable-llvm")
args.append("--with-llvm-prefix=%s" % spec["libllvm"].prefix)
if "+llvm_dylib" in spec["libllvm"]:
args.append("--enable-llvm-shared-libs")
else:
args.append("--disable-llvm-shared-libs")
else:
args.append("--disable-llvm")
args_swr_arches = []
if "swr=auto" in spec:
if "avx" in spec.target:
args_swr_arches.append("avx")
if "avx2" in spec.target:
args_swr_arches.append("avx2")
if "avx512f" in spec.target:
if "avx512er" in spec.target:
args_swr_arches.append("knl")
if "avx512bw" in spec.target:
args_swr_arches.append("skx")
else:
if "swr=avx" in spec:
args_swr_arches.append("avx")
if "swr=avx2" in spec:
args_swr_arches.append("avx2")
if "swr=knl" in spec:
args_swr_arches.append("knl")
if "swr=skx" in spec:
args_swr_arches.append("skx")
if args_swr_arches:
args_gallium_drivers.append("swr")
args.append("--with-swr-archs=" + ",".join(args_swr_arches))
# Add the remaining list args
args.append("--with-platforms=" + ",".join(args_platforms))
args.append("--with-gallium-drivers=" + ",".join(args_gallium_drivers))
args.append("--with-dri-drivers=" + ",".join(args_dri_drivers))
return args
@property
def METHOD_NAME(self):
spec = self.spec
libs_to_seek = set()
if "platform=windows" in spec:
libs_to_seek.add("opengl32")
if "+osmesa" in spec:
libs_to_seek.add("osmesa")
else:
libs_to_seek.add("libGL")
if "+osmesa" in spec:
libs_to_seek.add("libOSMesa")
if "+glx" in spec:
libs_to_seek.add("libGL")
if "+opengles" in spec:
libs_to_seek.add("libGLESv1_CM")
libs_to_seek.add("libGLESv2")
return find_libraries(
list(libs_to_seek), root=self.spec.prefix, shared=True, recursive=True
)
@property
def libglx_headers(self):
return find_headers("GL/glx", root=self.spec.prefix.include, recursive=False)
@property
def libglx_libs(self):
return find_libraries("libGL", root=self.spec.prefix, recursive=True)
@property
def libosmesa_headers(self):
return find_headers("GL/osmesa", root=self.spec.prefix.include, recursive=False)
@property
def libosmesa_libs(self):
if "platform=windows" in self.spec:
lib_name = "osmesa"
else:
lib_name = "libOSMesa"
return find_libraries(lib_name, root=self.spec.prefix, recursive=True) |
all interfaces | #!/usr/bin/env python3
# Copyright (c) 2014-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Linux network utilities.
Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
"""
import sys
import socket
import struct
import array
import os
# STATE_ESTABLISHED = '01'
# STATE_SYN_SENT = '02'
# STATE_SYN_RECV = '03'
# STATE_FIN_WAIT1 = '04'
# STATE_FIN_WAIT2 = '05'
# STATE_TIME_WAIT = '06'
# STATE_CLOSE = '07'
# STATE_CLOSE_WAIT = '08'
# STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
# STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = bytes.fromhex(host)
host_out = ''
for x in range(0, len(host) // 4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: https://code.activestate.com/recipes/439093/
def METHOD_NAME():
'''
Return all interfaces that are up
'''
import fcntl # Linux only, so only import when required
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', b'\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tobytes()
return [(namestr[i:i+16].split(b'\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert x < 2
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert (x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0)
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return bytearray(addr).hex()
def test_ipv6_local():
'''
Check for (local) IPv6 support.
'''
# By using SOCK_DGRAM this will not actually make a connection, but it will
# fail if there is no route to IPv6 localhost.
have_ipv6 = True
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(('::1', 1))
except socket.error:
have_ipv6 = False
return have_ipv6 |
test args | import contextlib
import sys
import os
import unittest
from test import support
import time
resource = support.import_module('resource')
# This test is checking a few specific problem spots with the resource module.
class ResourceTest(unittest.TestCase):
def METHOD_NAME(self):
self.assertRaises(TypeError, resource.getrlimit)
self.assertRaises(TypeError, resource.getrlimit, 42, 42)
self.assertRaises(TypeError, resource.setrlimit)
self.assertRaises(TypeError, resource.setrlimit, 42, 42, 42)
def test_fsize_ismax(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# RLIMIT_FSIZE should be RLIM_INFINITY, which will be a really big
# number on a platform with large file support. On these platforms,
# we need to test that the get/setrlimit functions properly convert
# the number to a C long long and that the conversion doesn't raise
# an error.
self.assertEqual(resource.RLIM_INFINITY, max)
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
def test_fsize_enforced(self):
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
# Check to see what happens when the RLIMIT_FSIZE is small. Some
# versions of Python were terminated by an uncaught SIGXFSZ, but
# pythonrun.c has been fixed to ignore that exception. If so, the
# write() should return EFBIG when the limit is exceeded.
# At least one platform has an unlimited RLIMIT_FSIZE and attempts
# to change it raise ValueError instead.
try:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (1024, max))
limit_set = True
except ValueError:
limit_set = False
f = open(support.TESTFN, "wb")
try:
f.write(b"X" * 1024)
try:
f.write(b"Y")
f.flush()
# On some systems (e.g., Ubuntu on hppa) the flush()
# doesn't always cause the exception, but the close()
# does eventually. Try flushing several times in
# an attempt to ensure the file is really synced and
# the exception raised.
for i in range(5):
time.sleep(.1)
f.flush()
except OSError:
if not limit_set:
raise
if limit_set:
# Close will attempt to flush the byte we wrote
# Restore limit first to avoid getting a spurious error
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
finally:
f.close()
finally:
if limit_set:
resource.setrlimit(resource.RLIMIT_FSIZE, (cur, max))
support.unlink(support.TESTFN)
def test_fsize_toobig(self):
# Be sure that setrlimit is checking for really large values
too_big = 10**50
try:
(cur, max) = resource.getrlimit(resource.RLIMIT_FSIZE)
except AttributeError:
pass
else:
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (too_big, max))
except (OverflowError, ValueError):
pass
try:
resource.setrlimit(resource.RLIMIT_FSIZE, (max, too_big))
except (OverflowError, ValueError):
pass
def test_getrusage(self):
self.assertRaises(TypeError, resource.getrusage)
self.assertRaises(TypeError, resource.getrusage, 42, 42)
usageself = resource.getrusage(resource.RUSAGE_SELF)
usagechildren = resource.getrusage(resource.RUSAGE_CHILDREN)
# May not be available on all systems.
try:
usageboth = resource.getrusage(resource.RUSAGE_BOTH)
except (ValueError, AttributeError):
pass
try:
usage_thread = resource.getrusage(resource.RUSAGE_THREAD)
except (ValueError, AttributeError):
pass
# Issue 6083: Reference counting bug
def test_setrusage_refcount(self):
try:
limits = resource.getrlimit(resource.RLIMIT_CPU)
except AttributeError:
pass
else:
class BadSequence:
def __len__(self):
return 2
def __getitem__(self, key):
if key in (0, 1):
return len(tuple(range(1000000)))
raise IndexError
resource.setrlimit(resource.RLIMIT_CPU, BadSequence())
def test_pagesize(self):
pagesize = resource.getpagesize()
self.assertIsInstance(pagesize, int)
self.assertGreaterEqual(pagesize, 0)
@unittest.skipUnless(sys.platform == 'linux', 'test requires Linux')
def test_linux_constants(self):
for attr in ['MSGQUEUE', 'NICE', 'RTPRIO', 'RTTIME', 'SIGPENDING']:
with contextlib.suppress(AttributeError):
self.assertIsInstance(getattr(resource, 'RLIMIT_' + attr), int)
@support.requires_freebsd_version(9)
def test_freebsd_contants(self):
for attr in ['SWAP', 'SBSIZE', 'NPTS']:
with contextlib.suppress(AttributeError):
self.assertIsInstance(getattr(resource, 'RLIMIT_' + attr), int)
@unittest.skipUnless(hasattr(resource, 'prlimit'), 'no prlimit')
@support.requires_linux_version(2, 6, 36)
def test_prlimit(self):
self.assertRaises(TypeError, resource.prlimit)
if os.geteuid() != 0:
self.assertRaises(PermissionError, resource.prlimit,
1, resource.RLIMIT_AS)
self.assertRaises(ProcessLookupError, resource.prlimit,
-1, resource.RLIMIT_AS)
limit = resource.getrlimit(resource.RLIMIT_AS)
self.assertEqual(resource.prlimit(0, resource.RLIMIT_AS), limit)
self.assertEqual(resource.prlimit(0, resource.RLIMIT_AS, limit),
limit)
def test_main(verbose=None):
support.run_unittest(ResourceTest)
if __name__ == "__main__":
test_main() |
process classlist | from docutils.parsers.rst import Directive
from docutils import nodes
import pdb
from collections import defaultdict
from sphinx import addnodes
from pathlib import Path
class classlist(nodes.General, nodes.Element):
pass
class ClasslistDirective(Directive):
def run(self):
return [classlist('')]
def generate_classlist(app, fromdocname, subtree, class_list, prefix, level=2):
for e in class_list:
ref = nodes.reference('', '')
ref['refdocname'] = e[3]
ref['refuri'] = app.builder.get_relative_uri(fromdocname, e[3])
ref['refuri'] += '#' + e[4]
ref.append(nodes.Text(prefix + e[0].split(".")[-1]))
class_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=["toctree-l" + str(level)])
#print(fromdocname, e[3])
if fromdocname.startswith(e[3].replace(".api.", ".")):
#print("current")
class_item['classes'].append('current')
subtree += class_item
def generate_collapsible_classlist(app, fromdocname, classes, container, caption, module_index, label_prefix):
toc = nodes.bullet_list()
toc += nodes.caption(caption, '', *[nodes.Text(caption)])
if module_index is not None:
entries = defaultdict(list)
#print("test", classes, fromdocname, caption)
prefix = ".".join(classes[0][0].split(".")[:module_index]) + "."
for e in classes:
module = e[0].split(".")[module_index]
entries[module].append(e)
#print("t", fromdocname)
for module, class_list in entries.items():
#print("t2", "src." + prefix + module)
ref = nodes.reference('', '')
ref['refuri'] = app.builder.get_relative_uri(fromdocname, prefix + module)
ref.append(nodes.Text((label_prefix + module) if label_prefix != "" else module.capitalize()))
module_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=["toctree-l1"])
if fromdocname.startswith(prefix + module):
module_item["classes"].append('current')
toc += module_item
subtree = nodes.bullet_list()
module_item += subtree
generate_classlist(app, fromdocname, subtree, class_list, "")
else:
generate_classlist(app, fromdocname, toc, classes, label_prefix, level=1)
container += toc
def generate_tutorials_sidebar(app, fromdocname, container):
tutorials_dir = Path(__file__).absolute().parent.parent / "docs" / "tutorials"
tutorials = [
("Loading and manipulating objects", "loader"),
("Configuring the camera", "camera"),
("Rendering the scene", "renderer"),
("Writing the results to file", "writer"),
("How key frames work", "key_frames"),
("Positioning objects via the physics simulator", "physics"),
]
container += nodes.caption("Tutorials", '', *[nodes.Text("Tutorials")])
for tutorial in tutorials:
toc = nodes.bullet_list()
ref = nodes.reference('', '')
ref['refuri'] = app.builder.get_relative_uri(fromdocname, "docs/tutorials/" + tutorial[1])
ref.append(nodes.Text(tutorial[0]))
module_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=["toctree-l1"])
if fromdocname.startswith("docs/tutorials/" + tutorial[1]):
module_item["classes"].append('current')
toc += module_item
container += toc
def generate_examples_sidebar(app, fromdocname, container):
examples = Path(__file__).absolute().parent.parent / "examples"
container += nodes.caption("Examples", '', *[nodes.Text("Examples")])
for example_groups in [examples / group for group in ["basics", "advanced", "datasets"]]:
if example_groups.is_dir():
toc = nodes.bullet_list()
ref = nodes.reference('', '')
ref['refuri'] = app.builder.get_relative_uri(fromdocname, "examples/" + example_groups.name + "/README")
ref.append(nodes.Text(example_groups.name.capitalize()))
module_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=["toctree-l1"])
if fromdocname.startswith("examples/" + example_groups.name):
module_item["classes"].append('current')
toc += module_item
subtree = nodes.bullet_list()
module_item += subtree
for example in sorted(example_groups.rglob("*/README.md"), key=lambda x: x.parent.name):
ref = nodes.reference('', '')
ref['refuri'] = app.builder.get_relative_uri(fromdocname, str(example).replace(str(examples), "examples").replace("README.md", "README"))
ref.append(nodes.Text(example.parent.name))
class_item = nodes.list_item('', addnodes.compact_paragraph('', '', ref), classes=["toctree-l2"])
if fromdocname == ref['refuri'].replace(".html", ""):
class_item['classes'].append('current')
subtree += class_item
container += toc
def generate_sidebar(app, fromdocname):
env = app.builder.env
container = nodes.compound(classes=['toctree-wrapper'])#addnodes.compact_paragraph('', '', classes=['toctree-wrapper'])
py = env.get_domain('py')
classes = py.get_objects()
#print("classes", classes, [_[2] for _ in py.get_objects()])
classes_per_group = {"api": ([], None, "bproc."), "internal": ([], 2, "bproc.python."), "modules (deprecated)": ([], 3, "")}#"modules": ([], 1), "provider": ([], 2),
for e in classes:
if e[2] == 'module' and e[3].startswith("blenderproc.api.") or e[2] == 'class' and not e[3].startswith("blenderproc.api."):
#print(e)
if e[3].startswith("blenderproc.api."):
group = "api"
elif e[0].startswith("blenderproc.python.modules."):
group = "modules (deprecated)"
else:
group = "internal"
#print(group, e)
classes_per_group[group][0].append(e)
generate_tutorials_sidebar(app, fromdocname, container)
generate_examples_sidebar(app, fromdocname, container)
for key, items in classes_per_group.items():
generate_collapsible_classlist(app, fromdocname, items[0], container, key.capitalize(), items[1], items[2])
return container
def METHOD_NAME(app, doctree, fromdocname):
container = generate_sidebar(app, fromdocname)
ctx = app.env.config['html_context']
ctx['classlist'] = container
for node in doctree.traverse(classlist):
node.replace_self([container])
continue
def add_classlist_handler(app):
def _print_classlist(**kwargs):
ctx = app.env.config['html_context']
return app.builder.render_partial(ctx['classlist'])['fragment']
ctx = app.env.config['html_context']
if 'print_classlist' not in ctx:
ctx['print_classlist'] = _print_classlist
def html_page_context(app, pagename, templatename, context, doctree):
def make_toctree(collapse=True, maxdepth=-1, includehidden=True, titles_only=False):
if "page_source_suffix" in context and context["page_source_suffix"] == ".md":
fromdocname = context["current_page_name"]
else:
fromdocname = "" if "title" not in context else context["title"]
fulltoc = generate_sidebar(app, fromdocname)
rendered_toc = app.builder.render_partial(fulltoc)['fragment']
return rendered_toc
context['toctree'] = make_toctree
def setup(app):
app.add_node(classlist)
app.add_directive('classlist', ClasslistDirective)
app.connect('doctree-resolved', METHOD_NAME)
app.connect('builder-inited', add_classlist_handler)
app.connect('html-page-context', html_page_context) |
fake authz key file |
from Utils.Utilities import encodeUnicodeToBytes
from future.utils import viewitems, viewvalues, listitems
import os, hmac, hashlib, cherrypy
from tempfile import NamedTemporaryFile
from Utils.PythonVersion import PY3
from WMCore.REST.Main import RESTMain
from WMCore.REST.Auth import authz_canonical
from WMCore.Configuration import Configuration
def fake_authz_headers(hmac_key, method = 'HNLogin',
login='testuser', name='Test User',
dn="/test/dn", roles={}, format="list"):
"""Create fake authentication and authorisation headers compatible
with the CMSWEB front-ends. Assumes you have the HMAC signing key
the back-end will use to validate the headers.
:arg str hmac_key: binary key data for signing headers.
:arg str method: authentication method, one of X509Cert, X509Proxy,
HNLogin, HostIP, AUCookie or None.
:arg str login: account login name.
:arg str name: account user name.
:arg str dn: account X509 subject.
:arg dict roles: role dictionary, each role with 'site' and 'group' lists.
:returns: list of header name, value tuples to add to a HTTP request."""
headers = { 'cms-auth-status': 'OK', 'cms-authn-method': method }
if login:
headers['cms-authn-login'] = login
if name:
headers['cms-authn-name'] = name
if dn:
headers['cms-authn-dn'] = dn
for name, role in viewitems(roles):
name = 'cms-authz-' + authz_canonical(name)
headers[name] = []
for r in 'site', 'group':
if r in role:
headers[name].extend(["%s:%s" % (r, authz_canonical(v)) for v in role[r]])
headers[name] = " ".join(headers[name])
prefix = suffix = ""
hkeys = list(headers)
for hk in sorted(hkeys):
if hk != 'cms-auth-status':
prefix += "h%xv%x" % (len(hk), len(headers[hk]))
suffix += "%s%s" % (hk, headers[hk])
msg = prefix + "#" + suffix
if PY3:
hmac_key = encodeUnicodeToBytes(hmac_key)
msg = encodeUnicodeToBytes(msg)
cksum = hmac.new(hmac_key, msg, hashlib.sha1).hexdigest()
headers['cms-authn-hmac'] = cksum
if format == "list":
return listitems(headers)
else:
return headers
def METHOD_NAME(delete=True):
"""Create temporary file for fake authorisation hmac signing key.
:returns: Instance of :class:`~.NamedTemporaryFile`, whose *data*
attribute contains the HMAC signing binary key."""
t = NamedTemporaryFile(delete=delete)
with open("/dev/urandom", "rb") as fd:
t.data = fd.read(20)
t.write(t.data)
t.seek(0)
return t
def setup_dummy_server(module_name, class_name, app_name = None, authz_key_file=None, port=8888):
"""Helper function to set up a :class:`~.RESTMain` server from given
module and class. Creates a fake server configuration and instantiates
the server application from it.
:arg str module_name: module from which to import test class.
:arg str class_type: name of the server test class.
:arg str app_name: optional test application name, 'test' by default.
:returns: tuple with the server object and authz hmac signing key."""
if authz_key_file:
test_authz_key = authz_key_file
else:
test_authz_key = METHOD_NAME()
cfg = Configuration()
main = cfg.section_('main')
main.application = app_name or 'test'
main.silent = True
main.index = 'top'
main.authz_defaults = { 'role': None, 'group': None, 'site': None }
main.section_('tools').section_('cms_auth').key_file = test_authz_key.name
app = cfg.section_(app_name or 'test')
app.admin = '[email protected]'
app.description = app.title = 'Test'
views = cfg.section_('views')
top = views.section_('top')
top.object = module_name + "." + class_name
server = RESTMain(cfg, os.getcwd())
server.validate_config()
server.setup_server()
server.install_application()
cherrypy.config.update({'server.socket_port': port})
cherrypy.config.update({'server.socket_host': '127.0.0.1'})
cherrypy.config.update({'request.show_tracebacks': True})
cherrypy.config.update({'environment': 'test_suite'})
for app in viewvalues(cherrypy.tree.apps):
if '/' in app.config:
app.config["/"]["request.show_tracebacks"] = True
return server, test_authz_key |
run | import os
import shutil
import attr
import plistlib
from packaging import version
import tempfile
from drawBot.drawBotSettings import __version__
from drawBot.scriptTools import ScriptRunner
from drawBot.drawBotDrawingTools import _drawBotDrawingTool
"""
DrawBot support for .drawbot packages.
* Read and build packages.
* Run packages
MyTool.drawbot (package)
info.plist
lib/
main.py
anotherScript.py
anImage.jpg
info.plist
name: optional, default to <name>.drawBot
version: optional, default to 0.0, but advised to set
developer: optional, default to None
developerURL: optional, default to None
requiresVersion: optional, default to all versions
mainScript: optional, default to 'lib/main.py' if this isn't specified?)
"""
drawBotVersion = version.Version(__version__)
@attr.s(slots=True)
class DrawBotPackageInfo(object):
name = attr.ib(default="", init=False, type=str, validator=attr.validators.instance_of(str))
version = attr.ib(default="0.0", init=False, type=str, validator=attr.validators.instance_of(str))
developer = attr.ib(default="", init=False, type=str, validator=attr.validators.instance_of(str))
developerURL = attr.ib(default="", init=False, type=str, validator=attr.validators.instance_of(str))
requiresVersion = attr.ib(default="0.0", init=False, type=str, validator=attr.validators.instance_of(str))
mainScript = attr.ib(default="main.py", init=False, type=str, validator=attr.validators.instance_of(str))
def asDict(self):
"""
Return only keys which are different from the defaults.
"""
defaults = attr.asdict(self.__class__())
data = attr.asdict(self)
for key, value in defaults.items():
if data[key] == value:
del data[key]
return data
def fromDict(self, data):
"""
Set data from dict.
"""
for key, value in data.items():
setattr(self, key, value)
class DrawBotPackage(object):
def __init__(self, path=None):
self.path = path
self.info = DrawBotPackageInfo()
self._readInfo()
def _readInfo(self):
"""
Read the info.plist if the file is availble.
"""
# get the info.plist path
infoPath = self.infoPath()
if infoPath and os.path.exists(infoPath):
with open(infoPath, "rb") as f:
info = plistlib.load(f)
self.info.fromDict(info)
# validate incoming info
attr.validate(self.info)
def infoPath(self):
"""
Return the info.plist path.
Return None if there is no root path.
"""
if self.path:
return os.path.join(self.path, "info.plist")
return None
def mainScriptPath(self):
"""
Return the main scripting python file path.
Return None if ther is no root path.
"""
if self.path:
return os.path.join(self.path, "lib", self.info.mainScript)
return None
def METHOD_NAME(self):
"""
Execute the .drawBot package.
Return if executing was succesfull with a report on failure.
"""
# check if the package can run in this version of DrawBot
if version.Version(self.info.requiresVersion) > drawBotVersion:
return False, "Requires a newer version of DrawBot (%s)." % self.info.requiresVersion
# get the main scriptin path
path = self.mainScriptPath()
if path is None:
return False, "Cannot execute an empty package."
if not os.path.exists(path):
return False, "Cannot find '%s'." % path
# create a namespace
namespace = {}
# add the tool callbacks in the name space
_drawBotDrawingTool._addToNamespace(namespace)
# run the script
ScriptRunner(path=path, namespace=namespace)
return True, ""
def buildPackage(self, destinationPath, scriptRoot):
"""
Build a .drawbot package
"""
if not destinationPath.endswith(".drawbot"):
return False, "The path to save the package must have a .drawbot file extension."
# check if the script root exists
if not os.path.exists(scriptRoot):
return False, "Cannot find the script root '%s'" % scriptRoot
# check if the main script path exists
mainScriptPath = os.path.join(scriptRoot, self.info.mainScript)
if not os.path.exists(mainScriptPath):
return False, "Main script path '%s' does not exists in '%s'." % (self.info.mainScript, mainScriptPath)
# build packages in temp folder
tempDir = tempfile.mkdtemp()
# set the temp folder
self.path = tempDir
# validate info
attr.validate(self.info)
# write the plist
infoData = self.info.asDict()
# only write info that is different from
if infoData:
with open(self.infoPath(), "wb") as f:
plistlib.dump(infoData, f)
# build lib root path
libRoot = os.path.join(self.path, "lib")
# copy the script root
shutil.copytree(scriptRoot, libRoot)
# remove existing destination paths
if os.path.exists(destinationPath):
shutil.rmtree(destinationPath)
# copy the temp to the destination path
shutil.copytree(tempDir, destinationPath)
# remove the temp
shutil.rmtree(tempDir)
# set the destination path
self.path = destinationPath
return True, ""
# ####
# # an empty package
# p = DrawBotPackage()
# print(p.path)
# print(p.info.name)
# print(p.infoPath())
# print(p.info)
# # create on the deskop such a file
# tempScriptDir = tempfile.mkdtemp()
# with open(os.path.join(tempScriptDir, "main.py"), "w") as f:
# f.write("print('hello world! Im running from a .drawbot package!!')")
# packagePath = os.path.expanduser(os.path.join("~", "Desktop", "simple.drawbot"))
# package = DrawBotPackage()
# package.buildPackage(packagePath, tempScriptDir)
# shutil.rmtree(tempScriptDir)
# p = DrawBotPackage(packagePath)
# print(p.path)
# print(p.info.name)
# print(p.infoPath())
# print(p.info)
# print(p.run()) |
get info | """This is the main component of the game, the core of the game that squishes
everything together and make the game work. Usually user can just import this
module and use the beat() function and everything will be fine, but will there
be any certain things that can't be accomplished that way, the user may also
freely import another modules.
"""
import re
from zulip_bots.game_handler import BadMoveException
from . import database, mechanics
COMMAND_PATTERN = re.compile("^(\\w*).*(\\d,\\d).*(\\d,\\d)|^(\\w+).*(\\d,\\d)")
def METHOD_NAME():
"""Gets the info on starting the game
:return: Info on how to start the game
"""
return "To start a game, mention me and add `create`. A game will start " "in that topic. "
def getHelp():
"""Gets the help message
:return: Help message
"""
return """Commands:
put (v,h): Put a man into the grid in phase 1
move (v,h) -> (v,h): Moves a man from one point to -> another point
take (v,h): Take an opponent's man from the grid in phase 2/3
v: vertical position of grid
h: horizontal position of grid"""
def unknown_command():
"""Returns an unknown command info
:return: A string containing info about available commands
"""
message = "Unknown command. Available commands: put (v,h), take (v,h), move (v,h) -> (v,h)"
raise BadMoveException(message)
def beat(message, topic_name, merels_storage):
"""This gets triggered every time a user send a message in any topic
:param message: User's message
:param topic_name: User's current topic
:param merels_storage: Merels' storage
:return: a tuple of response string and message, non-empty string
we want to keep the turn of the same played,
an empty string otherwise.
"""
database.MerelsStorage(topic_name, merels_storage)
match = COMMAND_PATTERN.match(message)
same_player_move = "" # message indicating move of the same player
if match is None:
return unknown_command()
if match.group(1) is not None and match.group(2) is not None and match.group(3) is not None:
responses = ""
command = match.group(1)
if command.lower() == "move":
p1 = [int(x) for x in match.group(2).split(",")]
p2 = [int(x) for x in match.group(3).split(",")]
if mechanics.get_take_status(topic_name, merels_storage) == 1:
raise BadMoveException("Take is required to proceed." " Please try again.\n")
responses += mechanics.move_man(topic_name, p1, p2, merels_storage) + "\n"
no_moves = after_event_checkup(responses, topic_name, merels_storage)
mechanics.update_hill_uid(topic_name, merels_storage)
responses += mechanics.display_game(topic_name, merels_storage) + "\n"
if no_moves != "":
same_player_move = no_moves
else:
return unknown_command()
if mechanics.get_take_status(topic_name, merels_storage) == 1:
same_player_move = "Take is required to proceed.\n"
return responses, same_player_move
elif match.group(4) is not None and match.group(5) is not None:
command = match.group(4)
p1 = [int(x) for x in match.group(5).split(",")]
# put 1,2
if command == "put":
responses = ""
if mechanics.get_take_status(topic_name, merels_storage) == 1:
raise BadMoveException("Take is required to proceed." " Please try again.\n")
responses += mechanics.put_man(topic_name, p1[0], p1[1], merels_storage) + "\n"
no_moves = after_event_checkup(responses, topic_name, merels_storage)
mechanics.update_hill_uid(topic_name, merels_storage)
responses += mechanics.display_game(topic_name, merels_storage) + "\n"
if no_moves != "":
same_player_move = no_moves
if mechanics.get_take_status(topic_name, merels_storage) == 1:
same_player_move = "Take is required to proceed.\n"
return responses, same_player_move
# take 5,3
elif command == "take":
responses = ""
if mechanics.get_take_status(topic_name, merels_storage) == 1:
responses += mechanics.take_man(topic_name, p1[0], p1[1], merels_storage) + "\n"
if "Failed" in responses:
raise BadMoveException(responses)
mechanics.update_toggle_take_mode(topic_name, merels_storage)
no_moves = after_event_checkup(responses, topic_name, merels_storage)
mechanics.update_hill_uid(topic_name, merels_storage)
responses += mechanics.display_game(topic_name, merels_storage) + "\n"
responses += check_win(topic_name, merels_storage)
if no_moves != "":
same_player_move = no_moves
return responses, same_player_move
else:
raise BadMoveException("Taking is not possible.")
else:
return unknown_command()
def check_take_mode(response, topic_name, merels_storage):
"""This checks whether the previous action can result in a take mode for
current player. This assumes that the previous action is successful and not
failed.
:param response: A response string
:param topic_name: Topic name
:param merels_storage: Merels' storage
:return: None
"""
if not ("Failed" in response):
if mechanics.can_take_mode(topic_name, merels_storage):
mechanics.update_toggle_take_mode(topic_name, merels_storage)
else:
mechanics.update_change_turn(topic_name, merels_storage)
def check_any_moves(topic_name, merels_storage):
"""Check whether the player can make any moves, if can't switch to another
player
:param topic_name: Topic name
:param merels_storage: MerelsDatabase object
:return: A response string
"""
if not mechanics.can_make_any_move(topic_name, merels_storage):
mechanics.update_change_turn(topic_name, merels_storage)
return "Cannot make any move on the grid. Switching to " "previous player.\n"
return ""
def after_event_checkup(response, topic_name, merels_storage):
"""After doing certain moves in the game, it will check for take mode
availability and check for any possible moves
:param response: Current response string. This is useful for checking
any failed previous commands
:param topic_name: Topic name
:param merels_storage: Merels' storage
:return: A response string
"""
check_take_mode(response, topic_name, merels_storage)
return check_any_moves(topic_name, merels_storage)
def check_win(topic_name, merels_storage):
"""Checks whether the current grid has a winner, if it does, finish the
game and remove it from the database
:param topic_name: Topic name
:param merels_storage: Merels' storage
:return:
"""
merels = database.MerelsStorage(topic_name, merels_storage)
win = mechanics.who_won(topic_name, merels_storage)
if win != "None":
merels.remove_game(topic_name)
return f"{win} wins the game!"
return "" |
run | import logging
import os
from avocado.utils import process
from virttest import libvirt_version
from virttest import virt_vm, utils_misc
from virttest.libvirt_xml import vm_xml, xcepts
from virttest.utils_test import libvirt
from virttest.utils_libvirt import libvirt_disk
LOG = logging.getLogger('avocado.' + __name__)
cleanup_files = []
def create_customized_disk(params, device_target, source_file_path):
"""
Create one customized disk with related attributes
:param params: dict wrapped with params
:param device_target: device target
:param source_file_path: source file path
"""
type_name = params.get("type_name")
disk_device = params.get("device_type")
device_bus = params.get("target_bus")
device_format = params.get("target_format")
source_dict = {}
if source_file_path:
if 'block' in type_name:
source_dict.update({"dev": source_file_path})
else:
source_dict.update({"file": source_file_path})
disk_src_dict = {"attrs": source_dict}
addr_str = params.get("addr_attrs")
customized_disk = libvirt_disk.create_primitive_disk_xml(
type_name, disk_device,
device_target, device_bus,
device_format, disk_src_dict, None)
if addr_str:
addr_dict = eval(addr_str)
customized_disk.address = customized_disk.new_disk_address(
**{"attrs": addr_dict})
LOG.debug("create customized xml: %s", customized_disk)
return customized_disk
def create_multiple_file_source_disks(params):
"""
Create multiple file source disks
:param params: dict wrapped with params
"""
device_format = params.get("target_format")
source_file_path = params.get("virt_disk_device_source")
source_file_list = ["%s.1" % source_file_path, "%s.2" % source_file_path, "%s.3" % source_file_path]
device_target_list = ['vdb', 'vdc', 'vdd']
created_file_source_disks = []
for device_target, source_file in zip(device_target_list, source_file_list):
libvirt.create_local_disk("file", source_file, 1, device_format)
cleanup_files.append(source_file)
source_disk = create_customized_disk(params, device_target, source_file)
created_file_source_disks.append(source_disk)
return created_file_source_disks
def check_multiple_file_source_disks(params, log_config_path, test):
"""
Check related information in libvirtd log
:param params: wrapped parameters in dictionary
:param log_config_path: log config path
:param test: test assert object
"""
msg1 = params.get('message_1', 'Setting up disks')
msg2 = params.get('message_2', 'Setup all disks')
for message in [msg1, msg2]:
result = utils_misc.wait_for(lambda: libvirt.check_logfile(message, log_config_path), timeout=20)
if not result:
test.fail("Failed to get expected messages: %s from log file: %s."
% (message, log_config_path))
def METHOD_NAME(test, params, env):
"""
Test start Vm with device without device control file.
1.Prepare test environment,destroy or suspend a VM.
2.Prepare test xml.
3.Perform test operation.
4.Recover test environment.
5.Confirm the test result.
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
virsh_dargs = {'debug': True, 'ignore_status': True}
# Disk specific attributes.
backend_device = params.get("backend_device", "disk")
hotplug = "yes" == params.get("virt_device_hotplug")
status_error = "yes" == params.get("status_error")
define_error = "yes" == params.get("define_error", "no")
log_config_path = os.path.join(test.debugdir, "libvirtd.log")
control_path = '/dev/mapper/control'
disk_objects = None
# Back up xml file.
if vm.is_alive():
vm.destroy(gracefully=False)
vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
# Skip this case if libvirt version doesn't support this feature
libvirt_version.is_libvirt_feature_supported(params)
try:
vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name)
if backend_device == "multiple_disks":
disk_objects = create_multiple_file_source_disks(params)
if os.path.exists(control_path):
process.METHOD_NAME('rm -rf /dev/mapper/control', ignore_status=True, shell=True)
if not hotplug:
# Sync VM xml.
for disk_xml in disk_objects:
vmxml.add_device(disk_xml)
vmxml.sync()
vm.start()
vm.wait_for_login().close()
except virt_vm.VMStartError as e:
if status_error:
if hotplug:
test.fail("In hotplug scenario, VM should "
"start successfully but not."
"Error: %s" % str(e))
else:
LOG.debug("VM failed to start as expected."
"Error: %s", str(e))
else:
test.fail("VM failed to start."
"Error: %s" % str(e))
except xcepts.LibvirtXMLError as xml_error:
if not define_error:
test.fail("Failed to define VM:\n%s" % xml_error)
else:
LOG.info("As expected, failed to define VM")
except Exception as ex:
test.error("unexpected exception happen: %s" % str(ex))
else:
if backend_device == "multiple_disks":
check_multiple_file_source_disks(params, log_config_path, test)
finally:
# Recover VM.
if vm.is_alive():
vm.destroy(gracefully=False)
LOG.info("Restoring vm...")
vmxml_backup.sync()
# Clean up images
for file_path in cleanup_files:
if os.path.exists(file_path):
os.remove(file_path)
if not os.path.exists(control_path):
process.METHOD_NAME('mknod /dev/mapper/control c 10 236', ignore_status=True, shell=True) |
get selected filter options | import platform
from os.path import expanduser
from PySide6 import QtWidgets, QtGui
from src.Controller.PathHandler import resource_path
import logging
class SelectSubgroupOptions(QtWidgets.QWidget):
"""
Select subgroup options for batch processing.
"""
def __init__(self):
"""
Initialise the class.
"""
QtWidgets.QWidget.__init__(self)
# Create the main layout
self.main_layout = QtWidgets.QVBoxLayout()
# Get the stylesheet
if platform.system() == 'Darwin':
self.stylesheet_path = "res/stylesheet.qss"
else:
self.stylesheet_path = "res/stylesheet-win-linux.qss"
self.stylesheet = open(resource_path(self.stylesheet_path)).read()
# Info messages
self.message = QtWidgets.QLabel(
"No Clinical-data-SR files located in current selected directory"
)
# Filter options table
self.filter_table = QtWidgets.QTableWidget(0, 0)
self.filter_table.setStyleSheet(self.stylesheet)
# Set up layout
self.main_layout.addWidget(self.message)
self.main_layout.addWidget(self.filter_table)
self.setLayout(self.main_layout)
# storing the currently selected filter options
self._selected_filters = {}
self.filter_table.cellClicked.connect(self.select_filter_option_cell)
logging.debug("SelectSubgroupOptions successfully initialised.")
def METHOD_NAME(self):
"""
Getter for the selected filters
"""
return self._selected_filters
def display_no_data(self):
"""
Utility method to display an empty table and appropriate message
"""
self.message.setText(
"No Clinical-data-SR files located in current selected directory"
)
self.filter_table.setRowCount(0)
self.filter_table.setColumnCount(0)
self.filter_table.setHorizontalHeaderLabels([])
def show_filtering_options_in_table(self, options_data_dict):
"""
Displays the data in the table
:param options_data_dict: dictionary of clinical
data attributes and values
"""
if not options_data_dict:
self.display_no_data()
return
self.message.setText("Select values to filter by:")
self.filter_table.setRowCount(0)
self.filter_table.setColumnCount(0)
# removes the Patient Identifier (assumed to be first column
# in the dataset)
# As the column name may be changing we cannot hard code the
# column name
# not a necessary filter option as specified in requirements
options_data_dict.pop(list(options_data_dict)[0])
columns_to_remove = []
# remove the keys with an empty list of options
for title, values_list in options_data_dict.items():
filtered_values = [x for x in values_list if x != ""]
if len(filtered_values) == 0:
columns_to_remove.append(title)
for title in columns_to_remove:
options_data_dict.pop(title)
for title in options_data_dict.keys():
col = self.filter_table.columnCount()
self.filter_table.insertColumn(col)
for row in range(0, len(options_data_dict[title])):
str_value = str(options_data_dict[title][row])
# filters out blank options
if str_value == "":
continue
filter_value = QtWidgets.QTableWidgetItem(str_value)
if row >= self.filter_table.rowCount():
self.filter_table.insertRow(row)
self.filter_table.setItem(row, col, filter_value)
self.filter_table.setHorizontalHeaderLabels(options_data_dict.keys())
def select_filter_option_cell(self, row, column):
"""
Toggles the selected options green and stores value
:param row: row index that was clicked
:param column: column index that was clicked
"""
item = self.filter_table.item(row, column)
# in the case they select empty cell
if not item:
return
header = self.filter_table.horizontalHeaderItem(column).text()
text_filter = item.text()
logging.debug(f"Cell selected: ({row}, {column}). " \
"Column header: '{header}'. String-value: '{text_filter}'")
if header in self._selected_filters.keys():
current_filters = self._selected_filters[header]
if text_filter in current_filters:
item.setBackground(QtGui.QColor(255, 255, 255))
# and remove item
self.remove_selected_filters(header, text_filter)
logging.debug(f"Value: '{text_filter}' deselected")
return
item.setBackground(QtGui.QColor(144, 238, 144))
# store in model for reference at output
self.set_selected_filters(header, text_filter)
logging.debug(f"Value: '{text_filter}' selected")
def set_selected_filters(self, filter_type, value):
"""
Setter for the selected filters
:param filter_type: the column name
:param value: the actual value within that column
"""
if filter_type not in self._selected_filters.keys():
self._selected_filters[filter_type] = [value]
else:
self._selected_filters[filter_type].append(value)
def remove_selected_filters(self, filter_type, value):
"""
Remove filter from the selected_filters store
:param filter_type: the column name
:param value: the actual value within that column
"""
if filter_type not in self._selected_filters.keys():
return
else:
self._selected_filters[filter_type].remove(value) |
format plotly | import pandas as pd
import streamlit as st
import yfinance
from openbb_terminal.core.plots.plotly_helper import OpenBBFigure
from openbb_terminal.core.session.current_system import set_system_variable
from openbb_terminal.dashboards.stream import streamlit_helpers as st_helpers
pd.options.plotting.backend = "plotly"
# Suppressing sdk logs
set_system_variable("LOGGING_SUPPRESS", True)
st.set_page_config(
layout="wide",
page_title="Chains",
initial_sidebar_state="expanded",
)
st_helpers.set_current_page("Chains")
st_helpers.set_css()
st.sidebar.markdown(
"<h2 style='text-align: center;'>Option Chain Dashboard</h2>",
unsafe_allow_html=True,
)
st.sidebar.write("Select a ticker and expiry to view the option chain")
TICKER_WIDGET = st.sidebar.empty()
EXPIRY_WIDGET = st.sidebar.empty()
INST_WIDGET = st.sidebar.empty()
X_WIDGET = st.sidebar.empty()
Y_WIDGET = st.sidebar.empty()
OPTS = [
"lastTradeDate",
"strike",
"lastPrice",
"bid",
"ask",
"change",
"percentChange",
"volume",
"openInterest",
"impliedVolatility",
]
def clean_str(string):
new_str = ""
for letter in string:
if letter.isupper():
new_str += " "
new_str += letter
return new_str.title()
def METHOD_NAME(fig: OpenBBFigure, x, y, ticker, expiry, inst):
fig.update_yaxes(title=clean_str(y))
fig.update_xaxes(title=clean_str(x))
expires = ", ".join(expiry)
title = (
f"{clean_str(y)} vs. {clean_str(x)} for {ticker.upper()} {inst}s on {expires}"
)
fig.update_layout(
margin=dict(t=40),
autosize=False,
width=1000,
height=500,
title=dict(
text=title,
y=0.98,
x=0.5,
xanchor="center",
yanchor="top",
),
)
class Chart:
def __init__(self):
self.stock = st_helpers.load_state("stock", {})
self.last_ticker = st_helpers.load_state("last_ticker", "")
self.expiry = st_helpers.load_state("expiry", [])
self.dfs = st_helpers.load_state("dfs", {})
self.options = st_helpers.load_state("options", [])
def update(self, ticker, inst, x, y):
if self.expiry:
fig = OpenBBFigure()
for expire in self.expiry:
if expire not in self.dfs:
self.dfs[expire] = self.stock.option_chain(expire)
group = self.dfs[expire]
df = group.calls if inst == "Call" else group.puts
fig.add_scatter(
x=df[x],
y=df[y],
mode="lines",
connectgaps=True,
name=expire,
)
METHOD_NAME(fig, x, y, ticker, self.expiry, inst)
fig.update_layout(margin=dict(l=40))
st.plotly_chart(fig, use_container_width=True)
def on_change(self):
ticker = st_helpers.load_state("ticker", "")
if ticker and ticker != self.last_ticker:
stock = yfinance.Ticker(ticker)
self.options = list([*stock.options])
st_helpers.save_state("options", self.options)
self.last_ticker = ticker
st_helpers.save_state("last_ticker", self.last_ticker)
self.stock = stock
st_helpers.save_state("stock", self.stock)
def run(self):
with TICKER_WIDGET.container():
ticker = TICKER_WIDGET.text_input(
"Ticker", key="ticker", on_change=self.on_change
)
if self.options:
with EXPIRY_WIDGET.container():
exp = EXPIRY_WIDGET.multiselect(
"Expiry",
st_helpers.load_state("options", []),
key="exp",
default=self.options[0] if self.options else None,
)
self.expiry = exp
with INST_WIDGET.container():
inst = INST_WIDGET.selectbox("Type", ["Put", "Call"])
with X_WIDGET.container():
x = X_WIDGET.selectbox("X", OPTS, index=1)
with Y_WIDGET.container():
y = Y_WIDGET.selectbox("Y", OPTS, index=3)
self.update(ticker, inst, x, y)
if __name__ == "__main__":
Chart().run() |
resolve available attributes | from typing import List
import graphene
from ...attribute import models as attribute_models
from ...page import models
from ...permission.enums import PagePermissions, PageTypePermissions
from ..attribute.filters import AttributeFilterInput, AttributeWhereInput
from ..attribute.types import Attribute, AttributeCountableConnection, SelectedAttribute
from ..core import ResolveInfo
from ..core.connection import (
CountableConnection,
create_connection_slice,
filter_connection_queryset,
)
from ..core.descriptions import ADDED_IN_33, DEPRECATED_IN_3X_FIELD, RICH_CONTENT
from ..core.doc_category import DOC_CATEGORY_PAGES
from ..core.federation import federated_entity, resolve_federation_references
from ..core.fields import FilterConnectionField, JSONString, PermissionsField
from ..core.scalars import Date
from ..core.types import ModelObjectType, NonNullList
from ..meta.types import ObjectWithMetadata
from ..translations.fields import TranslationField
from ..translations.types import PageTranslation
from .dataloaders import (
PageAttributesByPageTypeIdLoader,
PagesByPageTypeIdLoader,
PageTypeByIdLoader,
SelectedAttributesByPageIdLoader,
)
@federated_entity("id")
class PageType(ModelObjectType[models.PageType]):
id = graphene.GlobalID(required=True, description="ID of the page type.")
name = graphene.String(required=True, description="Name of the page type.")
slug = graphene.String(required=True, description="Slug of the page type.")
attributes = NonNullList(
Attribute, description="Page attributes of that page type."
)
available_attributes = FilterConnectionField(
AttributeCountableConnection,
filter=AttributeFilterInput(),
where=AttributeWhereInput(),
description="Attributes that can be assigned to the page type.",
permissions=[
PagePermissions.MANAGE_PAGES,
PageTypePermissions.MANAGE_PAGE_TYPES_AND_ATTRIBUTES,
],
)
has_pages = PermissionsField(
graphene.Boolean,
description="Whether page type has pages assigned.",
permissions=[
PagePermissions.MANAGE_PAGES,
PageTypePermissions.MANAGE_PAGE_TYPES_AND_ATTRIBUTES,
],
)
class Meta:
description = (
"Represents a type of page. It defines what attributes are available to "
"pages of this type."
)
interfaces = [graphene.relay.Node, ObjectWithMetadata]
model = models.PageType
@staticmethod
def get_model():
return models.PageType
@staticmethod
def resolve_attributes(root: models.PageType, info: ResolveInfo):
return PageAttributesByPageTypeIdLoader(info.context).load(root.pk)
@staticmethod
def METHOD_NAME(
root: models.PageType, info: ResolveInfo, **kwargs
):
qs = attribute_models.Attribute.objects.get_unassigned_page_type_attributes(
root.pk
)
qs = filter_connection_queryset(qs, kwargs, info.context)
return create_connection_slice(qs, info, kwargs, AttributeCountableConnection)
@staticmethod
def resolve_has_pages(root: models.PageType, info: ResolveInfo):
return (
PagesByPageTypeIdLoader(info.context)
.load(root.pk)
.then(lambda pages: bool(pages))
)
@staticmethod
def __resolve_references(roots: List["PageType"], _info: ResolveInfo):
return resolve_federation_references(PageType, roots, models.PageType.objects)
class PageTypeCountableConnection(CountableConnection):
class Meta:
doc_category = DOC_CATEGORY_PAGES
node = PageType
class Page(ModelObjectType[models.Page]):
id = graphene.GlobalID(required=True, description="ID of the page.")
seo_title = graphene.String(description="Title of the page for SEO.")
seo_description = graphene.String(description="Description of the page for SEO.")
title = graphene.String(required=True, description="Title of the page.")
content = JSONString(description="Content of the page." + RICH_CONTENT)
publication_date = Date(
deprecation_reason=(
f"{DEPRECATED_IN_3X_FIELD} "
"Use the `publishedAt` field to fetch the publication date."
),
)
published_at = graphene.DateTime(
description="The page publication date." + ADDED_IN_33
)
is_published = graphene.Boolean(
required=True, description="Determines if the page is published."
)
slug = graphene.String(required=True, description="Slug of the page.")
page_type = graphene.Field(
PageType, required=True, description="Determines the type of page"
)
created = graphene.DateTime(
required=True, description="Date and time at which page was created."
)
content_json = JSONString(
description="Content of the page." + RICH_CONTENT,
deprecation_reason=f"{DEPRECATED_IN_3X_FIELD} Use the `content` field instead.",
required=True,
)
translation = TranslationField(PageTranslation, type_name="page")
attributes = NonNullList(
SelectedAttribute,
required=True,
description="List of attributes assigned to this product.",
)
class Meta:
description = (
"A static page that can be manually added by a shop operator through the "
"dashboard."
)
interfaces = [graphene.relay.Node, ObjectWithMetadata]
model = models.Page
@staticmethod
def resolve_publication_date(root: models.Page, _info: ResolveInfo):
return root.published_at
@staticmethod
def resolve_created(root: models.Page, _info: ResolveInfo):
return root.created_at
@staticmethod
def resolve_page_type(root: models.Page, info: ResolveInfo):
return PageTypeByIdLoader(info.context).load(root.page_type_id)
@staticmethod
def resolve_content_json(root: models.Page, _info: ResolveInfo):
content = root.content
return content if content is not None else {}
@staticmethod
def resolve_attributes(root: models.Page, info: ResolveInfo):
return SelectedAttributesByPageIdLoader(info.context).load(root.id)
class PageCountableConnection(CountableConnection):
class Meta:
doc_category = DOC_CATEGORY_PAGES
node = Page |
profile gen | import os, sys
import time
import random
import string
from .astf_general_test import CASTFGeneral_Test, CTRexScenario
from nose.tools import assert_raises
from trex.astf.api import *
from trex.stl.trex_stl_packet_builder_scapy import ip2int, int2ip
class ASTFResilience_Test(CASTFGeneral_Test):
"""Checking stability of ASTF in non-usual conditions """
def setUp(self):
CASTFGeneral_Test.setUp(self)
self.weak = self.is_VM
setup = CTRexScenario.setup_name
if 'no_resilience' in CTRexScenario.modes:
self.skip('not enough memory for this test')
if setup in ['trex12']:
self.weak = True
self.low_memory = self.weak
if setup in ['trex41']:
self.low_memory = True # trex-41 uses the memory for the driver and crash
def ip_gen(self, client_base, server_base, client_ips, server_ips):
assert client_ips>0
assert server_ips>0
ip_gen_c = ASTFIPGenDist(ip_range = [client_base, int2ip(ip2int(client_base) + client_ips - 1)])
ip_gen_s = ASTFIPGenDist(ip_range = [server_base, int2ip(ip2int(server_base) + server_ips - 1)])
return ASTFIPGen(dist_client = ip_gen_c,
dist_server = ip_gen_s)
def progs_gen(self, msg_len = 16):
msg = 'x' * msg_len
prog_c = ASTFProgram(side = 'c')
prog_c.send(msg)
prog_c.recv(len(msg))
prog_s = ASTFProgram(side = 's')
prog_s.recv(len(msg))
#prog_s.delay(15000000)
prog_s.send(msg)
return prog_c, prog_s
def METHOD_NAME(self, client_ips, server_ips, templates):
ip_gen = self.ip_gen('16.0.0.1', '48.0.0.1', client_ips, server_ips)
prog_c, prog_s = self.progs_gen()
templates_arr = []
for i in range(templates):
temp_c = ASTFTCPClientTemplate(program = prog_c, ip_gen = ip_gen, cps = i + 1)
temp_s = ASTFTCPServerTemplate(program = prog_s, assoc = ASTFAssociationRule(port = 80 + i))
template = ASTFTemplate(client_template = temp_c, server_template = temp_s)
templates_arr.append(template)
return ASTFProfile(default_ip_gen = ip_gen, templates = templates_arr)
def test_astf_params(self):
print('')
for client_ips in (1<<8, 1<<16):
for server_ips in (1<<8, 1<<16):
for templates in (1, 1<<8, 1<<12):
if self.weak and templates > 1<<8:
continue
if self.weak:
if (client_ips > (1<<8)) and (server_ips >(1<<8)) :
continue;
params = {
'client_ips': client_ips,
'server_ips': server_ips,
'templates': templates,
}
print('Creating profile with params: %s' % params)
profile = self.METHOD_NAME(**params)
profile_str = profile.to_json_str()
print('Profile size: %s' % len(profile_str))
start_time = time.time()
self.astf_trex.load_profile(profile)
print('Load took: %g' % round(time.time() - start_time, 3))
start_time = time.time()
self.astf_trex.start(duration = 1, nc = True)
print('Start took: %g' % round(time.time() - start_time, 3))
self.astf_trex.stop()
def randomString(self, stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
def test_astf_params_dynamic_profile(self):
print('')
for client_ips in (1<<8, 1<<16):
for server_ips in (1<<8, 1<<16):
for templates in (1, 1<<8, 1<<12):
if self.weak:
if ( (templates > 1<<8) or
(server_ips > 1<<8 ) or
(client_ips > 1<<8 ) ):
continue
params = {
'client_ips': client_ips,
'server_ips': server_ips,
'templates': templates,
}
print('Creating profile with params: %s' % params)
profile = self.METHOD_NAME(**params)
profile_str = profile.to_json_str()
print('Profile size: %s' % len(profile_str))
start_time = time.time()
print('Creating random name for the dynamic profile')
random_profile = self.randomString()
print('Dynamic profile name : %s' % str(random_profile))
self.astf_trex.load_profile(profile, pid_input=str(random_profile))
print('Load took: %g' % round(time.time() - start_time, 3))
start_time = time.time()
self.astf_trex.start(duration = 1, nc = True, pid_input=str(random_profile))
print('Start took: %g' % round(time.time() - start_time, 3))
self.astf_trex.stop(pid_input=str(random_profile))
self.astf_trex.reset()
def test_double_start_stop(self):
print('')
c = self.astf_trex
c.load_profile(os.path.join(CTRexScenario.scripts_path, 'astf', 'udp1.py'))
c.start(duration = 20)
with assert_raises(TRexError):
c.start()
c.stop()
c.stop()
def test_double_start_stop_dynamic_profile(self):
print('')
c = self.astf_trex
random_profile_1 = self.randomString()
print('Dynamic profile(1) : %s' % str(random_profile_1))
c.load_profile(os.path.join(CTRexScenario.scripts_path, 'astf', 'udp1.py'), pid_input=str(random_profile_1))
c.start(duration = 20, pid_input=str(random_profile_1))
with assert_raises(TRexError):
random_profile_2 = self.randomString()
print('Dynamic profile(2) : %s' % str(random_profile_2))
c.load_profile(os.path.join(CTRexScenario.scripts_path, 'astf', 'udp1.py'), pid_input=str(random_profile_2))
c.start(pid_input=str(random_profile_2))
c.stop(pid_input=str(random_profile_1))
c.stop(pid_input=str(random_profile_2))
def test_stress_start_stop(self):
print('')
c = self.astf_trex
c.load_profile(os.path.join(CTRexScenario.scripts_path, 'astf', 'udp1.py'))
for _ in range(99):
c.start()
c.stop()
def test_stress_start_stop_dynamic_profile(self):
print('')
c = self.astf_trex
profiles =1000
if self.low_memory:
profiles = 100
for n in range(profiles):
profile_n = self.randomString()
port_n = 9000 + n
tunables = {'port': port_n}
print('Dynamic profile added : %s' % str(profile_n))
print('Port added : %s' % str(port_n))
c.load_profile(os.path.join(CTRexScenario.scripts_path, 'astf', 'http_simple_port_tunable.py'),
tunables=tunables,
pid_input=str(profile_n))
c.start(duration = 20, pid_input=str(profile_n))
c.wait_on_traffic()
c.stop(pid_input = "*")
c.reset()
|
test dict | # Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import unittest
import numpy as np
import torch
from monai.data import MetaTensor
from monai.transforms import EnsureTyped
from tests.utils import assert_allclose
class TestEnsureTyped(unittest.TestCase):
def test_array_input(self):
test_datas = [np.array([[1, 2], [3, 4]]), torch.as_tensor([[1, 2], [3, 4]])]
if torch.cuda.is_available():
test_datas.append(test_datas[-1].cuda())
for test_data in test_datas:
for dtype in ("tensor", "NUMPY"):
result = EnsureTyped(
keys="data", data_type=dtype, dtype=np.float32 if dtype == "NUMPY" else None, device="cpu"
)({"data": test_data})["data"]
if dtype == "NUMPY":
self.assertTrue(result.dtype == np.float32)
self.assertTrue(isinstance(result, torch.Tensor if dtype == "tensor" else np.ndarray))
assert_allclose(result, test_data, type_test=False)
self.assertTupleEqual(result.shape, (2, 2))
def test_single_input(self):
test_datas = [5, 5.0, False, np.asarray(5), torch.tensor(5)]
if torch.cuda.is_available():
test_datas.append(test_datas[-1].cuda())
for test_data in test_datas:
for dtype in ("tensor", "numpy"):
result = EnsureTyped(keys="data", data_type=dtype)({"data": test_data})["data"]
self.assertTrue(isinstance(result, torch.Tensor if dtype == "tensor" else np.ndarray))
if isinstance(test_data, bool):
self.assertFalse(result)
else:
assert_allclose(result, test_data, type_test=False)
self.assertEqual(result.ndim, 0)
def test_string(self):
for dtype in ("tensor", "numpy"):
# string input
result = EnsureTyped(keys="data", data_type=dtype)({"data": "test_string"})["data"]
self.assertTrue(isinstance(result, str))
self.assertEqual(result, "test_string")
# numpy array of string
result = EnsureTyped(keys="data", data_type=dtype)({"data": np.array(["test_string"])})["data"]
self.assertTrue(isinstance(result, np.ndarray))
self.assertEqual(result[0], "test_string")
def test_list_tuple(self):
for dtype in ("tensor", "numpy"):
result = EnsureTyped(keys="data", data_type=dtype, wrap_sequence=False, track_meta=True)(
{"data": [[1, 2], [3, 4]]}
)["data"]
self.assertTrue(isinstance(result, list))
self.assertTrue(isinstance(result[0][1], MetaTensor if dtype == "tensor" else np.ndarray))
assert_allclose(result[1][0], torch.as_tensor(3), type_test=False)
# tuple of numpy arrays
result = EnsureTyped(keys="data", data_type=dtype, wrap_sequence=False)(
{"data": (np.array([1, 2]), np.array([3, 4]))}
)["data"]
self.assertTrue(isinstance(result, tuple))
self.assertTrue(isinstance(result[0], torch.Tensor if dtype == "tensor" else np.ndarray))
assert_allclose(result[1], torch.as_tensor([3, 4]), type_test=False)
def METHOD_NAME(self):
# simulate complicated input data
test_data = {
"img": np.array([1.0, 2.0], dtype=np.float32),
"meta": {"dims": 3, "size": np.array([1, 2, 3]), "path": "temp/test"},
"extra": None,
}
for dtype in ("tensor", "numpy"):
result = EnsureTyped(keys="data", data_type=dtype, device="cpu")({"data": test_data})["data"]
self.assertTrue(isinstance(result, dict))
self.assertTrue(isinstance(result["img"], torch.Tensor if dtype == "tensor" else np.ndarray))
assert_allclose(result["img"], torch.as_tensor([1.0, 2.0]), type_test=False)
self.assertTrue(isinstance(result["meta"]["size"], torch.Tensor if dtype == "tensor" else np.ndarray))
assert_allclose(result["meta"]["size"], torch.as_tensor([1, 2, 3]), type_test=False)
self.assertEqual(result["meta"]["path"], "temp/test")
self.assertEqual(result["extra"], None)
if __name__ == "__main__":
unittest.main() |
write file | from __future__ import annotations
import logging
import os
import pathlib
import shutil
import struct
import tempfile
import zipfile
from typing import cast
from comicapi.archivers import Archiver
logger = logging.getLogger(__name__)
class ZipArchiver(Archiver):
"""ZIP implementation"""
def __init__(self) -> None:
super().__init__()
def get_comment(self) -> str:
with zipfile.ZipFile(self.path, "r") as zf:
comment = zf.comment.decode("utf-8")
return comment
def set_comment(self, comment: str) -> bool:
with zipfile.ZipFile(self.path, mode="a") as zf:
zf.comment = bytes(comment, "utf-8")
return True
def read_file(self, archive_file: str) -> bytes:
with zipfile.ZipFile(self.path, mode="r") as zf:
try:
data = zf.read(archive_file)
except (zipfile.BadZipfile, OSError) as e:
logger.error("Error reading zip archive [%s]: %s :: %s", e, self.path, archive_file)
raise
return data
def remove_file(self, archive_file: str) -> bool:
return self.rebuild([archive_file])
def METHOD_NAME(self, archive_file: str, data: bytes) -> bool:
# At the moment, no other option but to rebuild the whole
# zip archive w/o the indicated file. Very sucky, but maybe
# another solution can be found
files = self.get_filename_list()
if archive_file in files:
if not self.rebuild([archive_file]):
return False
try:
# now just add the archive file as a new one
with zipfile.ZipFile(self.path, mode="a", allowZip64=True, compression=zipfile.ZIP_DEFLATED) as zf:
zf.writestr(archive_file, data)
return True
except (zipfile.BadZipfile, OSError) as e:
logger.error("Error writing zip archive [%s]: %s :: %s", e, self.path, archive_file)
return False
def get_filename_list(self) -> list[str]:
try:
with zipfile.ZipFile(self.path, mode="r") as zf:
namelist = [file.filename for file in zf.infolist() if not file.is_dir()]
return namelist
except (zipfile.BadZipfile, OSError) as e:
logger.error("Error listing files in zip archive [%s]: %s", e, self.path)
return []
def rebuild(self, exclude_list: list[str]) -> bool:
"""Zip helper func
This recompresses the zip archive, without the files in the exclude_list
"""
try:
with zipfile.ZipFile(
tempfile.NamedTemporaryFile(dir=os.path.dirname(self.path), delete=False), "w", allowZip64=True
) as zout:
with zipfile.ZipFile(self.path, mode="r") as zin:
for item in zin.infolist():
buffer = zin.read(item.filename)
if item.filename not in exclude_list:
zout.writestr(item, buffer)
# preserve the old comment
zout.comment = zin.comment
# replace with the new file
self.path.unlink(missing_ok=True)
zout.close() # Required on windows
shutil.move(cast(str, zout.filename), self.path)
except (zipfile.BadZipfile, OSError) as e:
logger.error("Error rebuilding zip file [%s]: %s", e, self.path)
return False
return True
def copy_from_archive(self, other_archive: Archiver) -> bool:
"""Replace the current zip with one copied from another archive"""
try:
with zipfile.ZipFile(self.path, mode="w", allowZip64=True) as zout:
for filename in other_archive.get_filename_list():
data = other_archive.read_file(filename)
if data is not None:
zout.writestr(filename, data)
# preserve the old comment
comment = other_archive.get_comment()
if comment is not None:
if not self.write_zip_comment(self.path, comment):
return False
except Exception as e:
logger.error("Error while copying to zip archive [%s]: from %s to %s", e, other_archive.path, self.path)
return False
else:
return True
def is_writable(self) -> bool:
return True
def extension(self) -> str:
return ".cbz"
def name(self) -> str:
return "ZIP"
@classmethod
def is_valid(cls, path: pathlib.Path) -> bool:
return zipfile.is_zipfile(path)
def write_zip_comment(self, filename: pathlib.Path | str, comment: str) -> bool:
"""
This is a custom function for writing a comment to a zip file,
since the built-in one doesn't seem to work on Windows and Mac OS/X
Fortunately, the zip comment is at the end of the file, and it's
easy to manipulate. See this website for more info:
see: http://en.wikipedia.org/wiki/Zip_(file_format)#Structure
"""
# get file size
statinfo = os.stat(filename)
file_length = statinfo.st_size
try:
with open(filename, mode="r+b") as file:
# the starting position, relative to EOF
pos = -4
found = False
# walk backwards to find the "End of Central Directory" record
while (not found) and (-pos != file_length):
# seek, relative to EOF
file.seek(pos, 2)
value = file.read(4)
# look for the end of central directory signature
if bytearray(value) == bytearray([0x50, 0x4B, 0x05, 0x06]):
found = True
else:
# not found, step back another byte
pos = pos - 1
if found:
# now skip forward 20 bytes to the comment length word
pos += 20
file.seek(pos, 2)
# Pack the length of the comment string
fmt = "H" # one 2-byte integer
comment_length = struct.pack(fmt, len(comment)) # pack integer in a binary string
# write out the length
file.write(comment_length)
file.seek(pos + 2, 2)
# write out the comment itself
file.write(comment.encode("utf-8"))
file.truncate()
else:
raise Exception("Could not find the End of Central Directory record!")
except Exception as e:
logger.error("Error writing comment to zip archive [%s]: %s", e, self.path)
return False
else:
return True |
test get usage for trial services | from datetime import date, datetime
import pytest
from freezegun import freeze_time
from app.errors import InvalidRequest
from app.models import EMAIL_TYPE, SMS_TYPE
from app.platform_stats.rest import validate_date_range_is_within_a_financial_year
from tests.app.db import (
create_ft_notification_status,
create_notification,
create_service,
create_template,
save_notification,
set_up_usage_data,
)
@freeze_time("2018-06-01")
def test_get_platform_stats_uses_todays_date_if_no_start_or_end_date_is_provided(admin_request, mocker):
today = datetime.now().date()
dao_mock = mocker.patch("app.platform_stats.rest.fetch_notification_status_totals_for_all_services")
mocker.patch("app.service.rest.statistics.format_statistics")
admin_request.get("platform_stats.get_platform_stats")
dao_mock.assert_called_once_with(start_date=today, end_date=today)
def test_get_platform_stats_can_filter_by_date(admin_request, mocker):
start_date = date(2017, 1, 1)
end_date = date(2018, 1, 1)
dao_mock = mocker.patch("app.platform_stats.rest.fetch_notification_status_totals_for_all_services")
mocker.patch("app.service.rest.statistics.format_statistics")
admin_request.get("platform_stats.get_platform_stats", start_date=start_date, end_date=end_date)
dao_mock.assert_called_once_with(start_date=start_date, end_date=end_date)
def test_get_platform_stats_validates_the_date(admin_request):
start_date = "1234-56-78"
response = admin_request.get("platform_stats.get_platform_stats", start_date=start_date, _expected_status=400)
assert response["errors"][0]["message"] == "start_date time data {} does not match format %Y-%m-%d".format(start_date)
@freeze_time("2018-10-31 14:00")
def test_get_platform_stats_with_real_query(admin_request, notify_db_session):
service_1 = create_service(service_name="service_1")
sms_template = create_template(service=service_1, template_type=SMS_TYPE)
email_template = create_template(service=service_1, template_type=EMAIL_TYPE)
create_ft_notification_status(date(2018, 10, 29), "sms", service_1, count=10)
create_ft_notification_status(date(2018, 10, 29), "email", service_1, count=3)
save_notification(create_notification(sms_template, created_at=datetime(2018, 10, 31, 11, 0, 0), key_type="test"))
save_notification(create_notification(sms_template, created_at=datetime(2018, 10, 31, 12, 0, 0), status="delivered"))
save_notification(create_notification(email_template, created_at=datetime(2018, 10, 31, 13, 0, 0), status="delivered"))
response = admin_request.get(
"platform_stats.get_platform_stats",
start_date=date(2018, 10, 29),
)
assert response == {
"email": {
"failures": {
"virus-scan-failed": 0,
"temporary-failure": 0,
"permanent-failure": 0,
"technical-failure": 0,
},
"total": 4,
"test-key": 0,
},
"letter": {
"failures": {
"virus-scan-failed": 0,
"temporary-failure": 0,
"permanent-failure": 0,
"technical-failure": 0,
},
"total": 0,
"test-key": 0,
},
"sms": {
"failures": {
"virus-scan-failed": 0,
"temporary-failure": 0,
"permanent-failure": 0,
"technical-failure": 0,
},
"total": 11,
"test-key": 1,
},
}
@pytest.mark.parametrize(
"start_date, end_date",
[
("2019-04-01", "2019-06-30"),
("2019-08-01", "2019-09-30"),
("2019-01-01", "2019-03-31"),
("2019-12-01", "2020-02-28"),
],
)
def test_validate_date_range_is_within_a_financial_year(start_date, end_date):
validate_date_range_is_within_a_financial_year(start_date, end_date)
@pytest.mark.parametrize(
"start_date, end_date",
[
("2019-04-01", "2020-06-30"),
("2019-01-01", "2019-04-30"),
("2019-12-01", "2020-04-30"),
("2019-03-31", "2019-04-01"),
],
)
def test_validate_date_range_is_within_a_financial_year_raises(start_date, end_date):
with pytest.raises(expected_exception=InvalidRequest) as e:
validate_date_range_is_within_a_financial_year(start_date, end_date)
assert e.value.message == "Date must be in a single financial year."
assert e.value.status_code == 400
def test_validate_date_is_within_a_financial_year_raises_validation_error():
start_date = "2019-08-01"
end_date = "2019-06-01"
with pytest.raises(expected_exception=InvalidRequest) as e:
validate_date_range_is_within_a_financial_year(start_date, end_date)
assert e.value.message == "Start date must be before end date"
assert e.value.status_code == 400
@pytest.mark.parametrize("start_date, end_date", [("22-01-2019", "2019-08-01"), ("2019-07-01", "not-date")])
def test_validate_date_is_within_a_financial_year_when_input_is_not_a_date(start_date, end_date):
with pytest.raises(expected_exception=InvalidRequest) as e:
validate_date_range_is_within_a_financial_year(start_date, end_date)
assert e.value.message == "Input must be a date in the format: YYYY-MM-DD"
assert e.value.status_code == 400
def test_get_usage_for_all_services(notify_db_session, admin_request):
org, org_2, service, service_2, service_3, service_sms_only = set_up_usage_data(datetime(2019, 5, 1))
response = admin_request.get(
"platform_stats.get_usage_for_all_services",
start_date="2019-05-01",
end_date="2019-06-30",
)
assert len(response) == 4
assert response[0]["organisation_id"] == str(org.id)
assert response[0]["service_id"] == str(service.id)
assert response[0]["sms_cost"] == 0
assert response[0]["sms_fragments"] == 0
assert response[0]["letter_cost"] == 3.40
assert response[0]["letter_breakdown"] == "6 second class letters at 45p\n2 first class letters at 35p\n"
assert response[1]["organisation_id"] == str(org_2.id)
assert response[1]["service_id"] == str(service_2.id)
assert response[1]["sms_cost"] == 0
assert response[1]["sms_fragments"] == 0
assert response[1]["letter_cost"] == 14
assert response[1]["letter_breakdown"] == "20 second class letters at 65p\n2 first class letters at 50p\n"
assert response[2]["organisation_id"] == ""
assert response[2]["service_id"] == str(service_sms_only.id)
assert response[2]["sms_cost"] == 0.33
assert response[2]["sms_fragments"] == 3
assert response[2]["letter_cost"] == 0
assert response[2]["letter_breakdown"] == ""
assert response[3]["organisation_id"] == ""
assert response[3]["service_id"] == str(service_3.id)
assert response[3]["sms_cost"] == 0
assert response[3]["sms_fragments"] == 0
assert response[3]["letter_cost"] == 8.25
assert response[3]["letter_breakdown"] == "15 second class letters at 55p\n"
def METHOD_NAME(mocker, admin_request):
# The DAO method is already covered by tests
mock = mocker.patch(
"app.platform_stats.rest.fetch_notification_stats_for_trial_services",
return_value=[],
)
response = admin_request.get("platform_stats.get_usage_for_trial_services")
assert len(response) == 0
mock.assert_called_once()
def test_get_send_methods_stats_by_service(mocker, admin_request):
# The DAO method is already covered by tests
mock = mocker.patch("app.platform_stats.rest.send_method_stats_by_service", return_value=[])
response = admin_request.get(
"platform_stats.get_send_methods_stats_by_service",
start_date="2020-12-01",
end_date="2020-12-07",
)
assert len(response) == 0
mock.assert_called_once_with(
date(2020, 12, 1),
date(2020, 12, 7),
) |
ok | # Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import shutil
import sys
from tempfile import mkdtemp, mkstemp
import mock
import unittest
from cement.core import backend, foundation
from cement.utils.misc import rando
from ebcli.core import ebcore, ebrun, fileoperations
from ebcli.lib import aws
from tests.unit.integration import mockservice
ebrun.fix_path()
class TestApp(foundation.CementApp):
"""
Basic CementApp for generic testing.
"""
class Meta:
label = "app-%s" % rando()[:12]
config_files = []
argv = []
base_controller = None
arguments = []
exit_on_close = False
class CementTestCase(unittest.TestCase):
"""
A copy of cement.utils.test.CementTestCase inorder to avoid external, unnecessary dependency
of `nose`
"""
app_class = TestApp
def __init__(self, *args, **kw):
super(CementTestCase, self).__init__(*args, **kw)
def setUp(self):
self.app = self.make_app()
_, self.tmp_file = mkstemp()
self.tmp_dir = mkdtemp()
def make_app(self, *args, **kw):
self.reset_backend()
return self.app_class(*args, **kw)
def reset_backend(self):
for _handler in backend.__handlers__.copy():
del backend.__handlers__[_handler]
for _hook in backend.__hooks__.copy():
del backend.__hooks__[_hook]
def METHOD_NAME(self, expr, msg=None):
if not expr:
raise AssertionError(msg)
def eq(self, a, b, msg=None):
if not a == b:
raise AssertionError(msg or "%r != %r" % (a, b))
class BaseIntegrationTest(CementTestCase):
app_class = ebcore.EB
def setUp(self):
super(BaseIntegrationTest, self).setUp()
aws._flush()
aws.set_region('us-east-1')
self.reset_backend()
self.patcher_input = mock.patch('ebcli.core.io.get_input')
self.patcher_output = mock.patch('ebcli.core.io.echo')
self.patcher_warning = mock.patch('ebcli.core.io.log_warning')
self.mock_input = self.patcher_input.start()
self.mock_output = self.patcher_output.start()
self.mock_warning = self.patcher_warning.start()
self.patcher_endpoint = mock.patch('botocore.endpoint.Endpoint')
self.mock_endpoint = self.patcher_endpoint.start()
instance = self.mock_endpoint.return_value
instance.make_request = mockservice.handle_response
instance.host = 'http://someurl.test/something'
if not os.path.exists('testDir/'):
os.makedirs('testDir/')
os.chdir('testDir')
if not os.path.exists(fileoperations.beanstalk_directory):
os.makedirs(fileoperations.beanstalk_directory)
fileoperations.default_section = 'ebcli_test_default'
if not os.path.exists('home'):
os.makedirs('home')
fileoperations.aws_config_folder = 'home' + os.path.sep
fileoperations.aws_config_location \
= fileoperations.aws_config_folder + 'config'
def run_command(self, *args):
self.app = ebcore.EB(argv=list(args))
self.app.setup()
self.app.run()
self.app.close()
def tearDown(self):
self.patcher_input.stop()
self.patcher_output.stop()
self.patcher_endpoint.stop()
self.patcher_warning.stop()
os.chdir(os.path.pardir)
if os.path.exists('testDir'):
if sys.platform.startswith('win'):
os.system('rmdir /S /Q testDir')
else:
shutil.rmtree('testDir')
mockservice.reset()
class BaseOperationsTest(CementTestCase):
module_name = 'base'
def setUp(self):
super(BaseOperationsTest, self).setUp()
self.reset_backend()
self.patcher_input = mock.patch('ebcli.core.io.get_input')
self.patcher_eb = mock.patch('ebcli.operations.' + self.module_name + '.elasticbeanstalk')
self.patcher_output = mock.patch('ebcli.core.io.echo')
self.patcher_file = mock.patch('ebcli.operations.' + self.module_name + '.fileoperations')
self.mock_input = self.patcher_input.start()
self.mock_elasticbeanstalk = self.patcher_eb.start()
self.mock_output = self.patcher_output.start()
def tearDown(self):
self.patcher_eb.stop()
self.patcher_input.stop()
self.patcher_output.stop() |
test image | from recipe_scrapers.saveur import Saveur
from tests import ScraperTest
class TestSaveurScraper(ScraperTest):
scraper_class = Saveur
def test_host(self):
self.assertEqual("saveur.com", self.harvester_class.host())
def test_author(self):
self.assertEqual(None, self.harvester_class.author())
def test_title(self):
self.assertEqual(
"Baked Saffron Yogurt Rice with Chicken (Tahcheen-e Morgh)",
self.harvester_class.title(),
)
def test_total_time(self):
self.assertEqual(285, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("8 servings", self.harvester_class.yields())
def METHOD_NAME(self):
self.assertEqual(
"https://www.saveur.com/uploads/2019/03/07/httpsrecipes.saveur.comuploadsimagesabb46018d2ab45231975b33416325e96ca3afeab.jpeg?auto=webp",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertEqual(
[
"3 cups extra-long white basmati rice (1 lb. 4 oz.)",
"1⁄4 cup plus 2 Tbsp. kosher salt, divided, plus more as needed",
"1⁄4 tsp. saffron, lightly ground between your fingers, plus more for sprinkling",
"3 tbsp. extra-virgin olive oil",
"1 large yellow onion, halved and sliced ¼-inch-thick",
"4 medium garlic cloves, finely chopped (1 Tbsp.)",
"10 boneless, skinless chicken thighs (2 lb. 3 oz.), halved",
"1⁄4 tsp. freshly ground black pepper",
"1⁄4 cup fresh lemon juice",
"1 cup plain whole-milk Greek yogurt",
"1 large egg",
"5 tbsp. unsalted butter, divided",
"1 tbsp. unsalted butter",
"1⁄2 cup dried barberries, picked, soaked in cold water for 15 minutes, then drained (or substitute finely chopped dried cranberries)",
"1 tsp. sugar (omit if using cranberries)",
"1⁄3 cup raw pistachios, coarsely chopped",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
self.assertEqual(
"""instruction 0\nParboil the rice by following steps 1 and 2 of the Steamed Saffron Rice with Tahdig recipe (p. 36). Set the rice aside to drain as you prepare the dish’s other components.\ninstruction 1\nIn a small bowl, add the saffron and 2 tablespoons hot water. (Be sure to pre-soak your barberries now, too. See toppings list.)\ninstruction 2\nIn a large, deep skillet, add the olive oil and set over medium heat. Once hot, add the onion and ¼ teaspoon salt, and cook, stirring occasionally, until soft and golden, about 20 minutes. Add the garlic and cook until fragrant, 2 minutes. Add the chicken thighs, 2¾ teaspoons salt, and the pepper. Cook, turning the chicken as needed, until the pieces begin to brown slightly, about 6 minutes total. Add the lemon juice and saffron water, and turn the chicken to coat well. Lower the heat to medium-low, partially cover the pot, and let simmer, stirring occasionally, until the chicken is tender and just cooked through, 20–25 minutes. Remove from the heat and let cool slightly. Cut the chicken into ½-inch-long strips, return them to the pan, and toss to coat in the onions and pan juices. Remove the pan from the heat and set aside while you finish the rice.\ninstruction 3\nPreheat the oven to 400°F and set a rack in the lowest position.\ninstruction 4\nIn a large bowl, combine the yogurt, egg, and a tiny pinch of ground saffron. Add half of the rice and stir well to combine. Set the mixture aside.\ninstruction 5\nIn a 9-by-13-inch glass baking dish (preferably clear), add 3 tablespoons of butter; set the dish in the oven to melt the butter, about 3 minutes. Remove the dish, swirling the melted butter all over the bottom and sides. Spread the yogurt-rice mixture evenly along the bottom of the dish, packing it down firmly. Add the chicken pieces evenly over the rice, then sprinkle the remaining plain rice over the chicken in an even layer. Drizzle with 2 tablespoons of chicken juices and dot with the remaining 2 tablespoons of butter. Cover tightly with aluminum foil, transfer to the oven, and bake until the bottom of the rice is crispy and golden, 80–90 minutes. (If using a clear glass dish, carefully lift it to check the bottom.) Remove the baking dish and let the rice rest at room temperature for 5 minutes.\ninstruction 6\nWhile the tahcheen rests, prepare the topping: In a small pot, melt the butter over medium heat. Stir in the drained barberries, pistachios, and sugar (if using), and cook until the berries are just plumped, about 2 minutes. Remove from the heat.\ninstruction 7\nRun a knife along the sides of the baking dish to help release the rice. Place a large rectangular serving platter, baking tray, or cutting board over the tahcheen, take a deep breath, and quickly and confidently flip the baking dish over to unmold. You should hear a swish when the tahdig releases. Remove the baking dish and sprinkle the crispy surface of the rice with the barberry topping. Cut into pieces and serve.""",
self.harvester_class.instructions(),
) |
get metadata | import json
import sys
if sys.version_info >= (3,0):
import urllib.parse
dku_quote_fn = urllib.parse.quote
else:
import urllib
dku_quote_fn = urllib.quote
class DSSObjectDiscussions(object):
"""
A handle to manage discussions on a DSS object.
.. important::
Do not create this class directly, instead use :meth:`dataikuapi.DSSClient.get_object_discussions` on any commentable DSS object.
"""
def __init__(self, client, project_key, object_type, object_id):
self.client = client
self.project_key = project_key
self.object_type = object_type
self.object_id = object_id
# encode in UTF-8 if its python2 and unicode
if sys.version_info < (3,0) and isinstance(self.object_id, unicode):
self.object_id = self.object_id.encode('utf-8')
def list_discussions(self):
"""
Gets the list of discussions on the object.
:returns: list of discussions on the object
:rtype: list of :class:`dataikuapi.dss.discussion.DSSDiscussion`
"""
data = self.client._perform_json("GET", "/projects/%s/discussions/%s/%s/" % (self.project_key, self.object_type, self.object_id))
return [DSSDiscussion(self.client, self.project_key, self.object_type, self.object_id, discu_data['id'], discu_data, False) for discu_data in data]
def create_discussion(self, topic, message):
"""
Creates a new discussion with one message.
:param str topic: the discussion topic
:param str message: the markdown formatted first message
:returns: the newly created discussion
:rtype: :class:`dataikuapi.dss.discussion.DSSDiscussion`
"""
creation_data = {
"topic" : topic,
"reply" : message
}
discu_data = self.client._perform_json("POST", "/projects/%s/discussions/%s/%s/" % (self.project_key, self.object_type, self.object_id), body=creation_data)
return DSSDiscussion(self.client, self.project_key, self.object_type, self.object_id, discu_data['id'], discu_data, True)
def get_discussion(self, discussion_id):
"""
Gets a specific discussion.
:param str discussion_id: the discussion ID
:returns: the discussion
:rtype: :class:`dataikuapi.dss.discussion.DSSDiscussion`
"""
discu_data = self.client._perform_json("GET", "/projects/%s/discussions/%s/%s/%s" % (self.project_key, self.object_type, self.object_id, discussion_id))
return DSSDiscussion(self.client, self.project_key, self.object_type, self.object_id, discussion_id, discu_data, True)
class DSSDiscussion(object):
"""
A handle to interact with a discussion.
.. important::
Do not call directly, use :meth:`dataikuapi.dss.discussion.DSSObjectDiscussions.get_discussion`,
:meth:`dataikuapi.dss.discussion.DSSObjectDiscussions.create_discussion`
or :meth:`dataikuapi.dss.discussion.DSSObjectDiscussions.list_discussions`.
"""
def __init__(self, client, project_key, object_type, object_id, discussion_id, discussion_data, discussion_data_has_replies):
"""
:param :class:`dataikuapi.dssclient.DSSClient` client: an api client to connect to the DSS backend
:param str project_key: identifier of the project to access
:param str object_type: DSS object type
:param str object_id: DSS object ID
:param str discussion_id: identifier of the discussion
:param dict discussion_data: the discussion data
:param bool discussion_data_has_replies: a flag that indicates if the replies has been loaded
"""
self.client = client
self.project_key = project_key
self.object_type = object_type
self.object_id = object_id
self.discussion_id = discussion_id
self.discussion_data = discussion_data
self.discussion_data_has_replies = discussion_data_has_replies
def _get_with_replies(self):
"""
Reloads the discussion data from the backend including the replies.
"""
self.discussion_data = self.client._perform_json("GET", "/projects/%s/discussions/%s/%s/%s" % (self.project_key, self.object_type, self.object_id, self.discussion_id))
self.discussion_data_has_replies = True
def METHOD_NAME(self):
"""
Gets the discussion metadata.
:returns: the discussion metadata
:rtype: dict
"""
metadata = dict(self.discussion_data)
if "replies" in metadata:
del metadata["replies"]
return metadata
def set_metadata(self, discussion_metadata):
"""
Updates the discussion metadata.
:param dict discussion_metadata: the discussion metadata
"""
if not self.discussion_data_has_replies:
self._get_with_replies()
edited_metadata = dict(discussion_metadata)
edited_metadata["replies"] = self.discussion_data["replies"]
self.discussion_data = self.client._perform_json("PUT", "/projects/%s/discussions/%s/%s/%s" % (self.project_key, self.object_type, self.object_id, self.discussion_id), body=edited_metadata)
self.discussion_data_has_replies = True
def get_replies(self):
"""
Gets the list of replies in this discussion.
:returns: a list of replies
:rtype: list of :class:`dataikuapi.dss.discussion.DSSDiscussionReply`
"""
if not self.discussion_data_has_replies:
self._get_with_replies()
return [DSSDiscussionReply(reply_data) for reply_data in self.discussion_data["replies"]]
def add_reply(self, text):
"""
Adds a reply to a discussion.
:param str text: the markdown formatted text to reply
"""
reply_data = {
"reply": text
}
self.discussion_data = self.client._perform_json("POST", "/projects/%s/discussions/%s/%s/%s/replies/" % (self.project_key, self.object_type, self.object_id, self.discussion_id), body=reply_data)
self.discussion_data_has_replies = True
class DSSDiscussionReply(object):
"""
A read-only handle to access a discussion reply.
.. important::
Do not create this class directly, use :meth:`dataikuapi.dss.discussion.DSSDiscussion.get_replies`
"""
def __init__(self, reply_data):
self.reply_data = reply_data
def get_raw_data(self):
"""
Gets the reply raw data.
:returns: the reply data
:rtype: dict
"""
return self.reply_data
def get_text(self):
"""
Gets the reply text.
:returns: the reply text
:rtype: str
"""
return self.reply_data["text"]
def get_author(self):
"""
Gets the reply author.
:returns: the author ID
:rtype: str
"""
return self.reply_data["author"]
def get_timestamp(self):
"""
Gets the reply timestamp.
:returns: the reply timestamp
:rtype: long
"""
return self.reply_data["time"]
def get_edited_timestamp(self):
"""
Gets the last edition timestamp.
:returns: the last edition timestamp
:rtype: long
"""
return self.reply_data["editedOn"] |
test turbomole num hessian | import numpy as np
import pytest
import qcelemental
from qcelemental.testing import compare_values
import qcengine as qcng
from qcengine.programs.turbomole.harvester import parse_hessian
from qcengine.testing import using
@pytest.fixture
def h2o():
mol = qcelemental.models.Molecule.from_data(
"""
O 0.000000000000 0.000000000000 -0.068516245955
H 0.000000000000 -0.790689888800 0.543701278274
H 0.000000000000 0.790689888800 0.543701278274
"""
)
return mol
@pytest.fixture
def h2o_ricc2_def2svp():
"""NumForce calls only make sense for stationary points. So this
geometry was optimized at the ricc2/def2-svp level of theory and
can be used to run NumForce with ricc2."""
mol = qcelemental.models.Molecule.from_data(
"""
O 0.0000000 0.0000000 -0.0835835
H 0.7501772 0.0000000 0.5210589
H -0.7501772 0.0000000 0.5210589
"""
)
return mol
@pytest.mark.parametrize(
"method, keywords, ref_energy",
[
pytest.param("hf", {}, -75.95536954370, marks=using("turbomole")),
pytest.param("pbe0", {"grid": "m5"}, -76.27371135900, marks=using("turbomole")),
pytest.param("ricc2", {}, -76.1603807755, marks=using("turbomole")),
pytest.param("rimp2", {}, -76.1593614075, marks=using("turbomole")),
pytest.param(
"hf",
{"scf_conv": 4, "scf_iters": 1},
-75.95536954370,
marks=[using("turbomole"), pytest.mark.xfail(raises=AssertionError, strict=True)],
),
],
)
def test_turbomole_energy(method, keywords, ref_energy, h2o):
resi = {"molecule": h2o, "driver": "energy", "model": {"method": method, "basis": "def2-SVP"}, "keywords": keywords}
res = qcng.compute(resi, "turbomole", raise_error=True, return_dict=True)
assert res["driver"] == "energy"
assert res["success"] is True
assert compare_values(ref_energy, res["return_result"])
@pytest.mark.parametrize(
"method, keywords, ref_norm",
[
pytest.param("hf", {}, 0.099340, marks=using("turbomole")),
pytest.param("pbe0", {"grid": "m5"}, 0.0606266, marks=using("turbomole")),
pytest.param("ricc2", {}, 0.059378, marks=using("turbomole")),
pytest.param("rimp2", {}, 0.061576, marks=using("turbomole")),
],
)
def test_turbomole_gradient(method, keywords, ref_norm, h2o):
resi = {
"molecule": h2o,
"driver": "gradient",
"model": {"method": method, "basis": "def2-SVP"},
"keywords": keywords,
}
res = qcng.compute(resi, "turbomole", raise_error=True)
assert res.driver == "gradient"
assert res.success is True
assert res.properties.return_energy
grad = res.return_result
grad_norm = np.linalg.norm(grad)
assert compare_values(ref_norm, grad_norm)
@using("turbomole")
def test_turbomole_ri_dsp(h2o):
resi = {
"molecule": h2o,
"driver": "energy",
"model": {"method": "b-p", "basis": "def2-SVP"},
"keywords": {"ri": True, "d3bj": True},
}
res = qcng.compute(resi, "turbomole", raise_error=True)
assert res.driver == "energy"
assert res.success is True
energy = res.return_result
ref_energy = -76.36275642866
assert compare_values(ref_energy, energy)
def assert_hessian(H, ref_eigvals, ref_size):
w, v = np.linalg.eigh(H)
last_eigvals = w[-3:]
# Hessian must be symmetric
np.testing.assert_allclose(H, H.T)
# Check eigenvalues
np.testing.assert_allclose(last_eigvals, ref_eigvals)
# Hessian must be of shape (3N x 3N)
assert H.shape == (ref_size, ref_size)
@using("turbomole")
@pytest.mark.parametrize(
"method, keywords, ref_eigvals",
[
("hf", {}, (2.00771683e-01, 7.77977644e-01, 9.91091318e-01)),
("pbe0", {"grid": "m5"}, (1.72092719e-01, 7.38603449e-01, 9.73783598e-01)),
("b-p", {"grid": "m5", "ri": True}, (1.59729409e-01, 7.21364827e-01, 9.63399519e-01)),
],
)
def test_turbomole_hessian(method, keywords, ref_eigvals, h2o):
resi = {
"molecule": h2o,
"driver": "hessian",
"model": {
"method": method,
"basis": "def2-SVP",
},
"keywords": keywords,
}
res = qcng.compute(resi, "turbomole", raise_error=True)
H = res.return_result
size = h2o.geometry.size
assert res.driver == "hessian"
assert res.success is True
assert res.properties.return_energy
assert_hessian(H, ref_eigvals, size)
@using("turbomole")
@pytest.mark.parametrize(
"method, keywords, ref_eigvals",
[
("ricc2", {}, (1.65405531e-01, 9.63690706e-01, 1.24676634e00)),
],
)
def METHOD_NAME(method, keywords, ref_eigvals, h2o_ricc2_def2svp):
resi = {
"molecule": h2o_ricc2_def2svp,
"driver": "hessian",
"model": {
"method": method,
"basis": "def2-SVP",
},
"keywords": keywords,
}
res = qcng.compute(resi, "turbomole", raise_error=True)
H = res.return_result
size = h2o_ricc2_def2svp.geometry.size
assert res.driver == "hessian"
assert res.success is True
assert res.properties.return_energy
assert_hessian(H, ref_eigvals, size)
@pytest.fixture
def h2o_nprhessian():
return """$nprhessian
1 1 0.6142699252 -0.0000000000 0.0000000000 -0.3071349626 -0.2479448514
1 2 -0.0000000000 -0.3071349626 0.2479448514 -0.0000000000
2 1 -0.0000000000 0.4365036678 0.0000000000 -0.1885017686 -0.2182518339
2 2 -0.0000000000 0.1885017686 -0.2182518339 0.0000000000
3 1 0.0000000000 0.0000000000 -0.0000524175 -0.0000000000 -0.0000000000
3 2 0.0000262088 -0.0000000000 0.0000000000 0.0000262088
4 1 -0.3071349626 -0.1885017686 -0.0000000000 0.3389423895 0.2182233100
4 2 0.0000000000 -0.0318074269 -0.0297215414 -0.0000000000
5 1 -0.2479448514 -0.2182518339 -0.0000000000 0.2182233100 0.2092172237
5 2 0.0000000000 0.0297215414 0.0090346102 0.0000000000
6 1 -0.0000000000 -0.0000000000 0.0000262088 0.0000000000 0.0000000000
6 2 -0.0000125560 -0.0000000000 0.0000000000 -0.0000136528
7 1 -0.3071349626 0.1885017686 -0.0000000000 -0.0318074269 0.0297215414
7 2 -0.0000000000 0.3389423895 -0.2182233100 0.0000000000
8 1 0.2479448514 -0.2182518339 0.0000000000 -0.0297215414 0.0090346102
8 2 0.0000000000 -0.2182233100 0.2092172237 -0.0000000000
9 1 -0.0000000000 0.0000000000 0.0000262088 -0.0000000000 0.0000000000
9 2 -0.0000136528 0.0000000000 -0.0000000000 -0.0000125560
$end"""
def test_turbomole_parse_hessian(h2o_nprhessian):
"""Test parsing of unproject Turbomole Hessian for water."""
hessian = parse_hessian(h2o_nprhessian)
assert hessian.shape == (9, 9)
eigvals, _ = np.linalg.eigh(hessian)
assert eigvals[-1] == pytest.approx(1.12157030e00) |
on 200 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vhub routing-intent list",
is_preview=True,
)
class List(AAZCommand):
"""Retrieve the details of all routing intent resources of the virtual hub.
:example: Retrieve the details of all routing intent resources of the virtual hub.
az network vhub routing-intent list -g MyResourceGroup --vhub MyVirtualHub
"""
_aaz_info = {
"version": "2021-05-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/virtualhubs/{}/routingintent", "2021-05-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
return self.build_paging(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.vhub = AAZStrArg(
options=["--vhub"],
help="Name of the virtual hub.",
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.RoutingIntentList(ctx=self.ctx)()
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
next_link = self.deserialize_output(self.ctx.vars.instance.next_link)
return result, next_link
class RoutingIntentList(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.METHOD_NAME(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualHubs/{virtualHubName}/routingIntent",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualHubName", self.ctx.args.vhub,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-05-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def METHOD_NAME(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.next_link = AAZStrType(
serialized_name="nextLink",
)
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.etag = AAZStrType(
flags={"read_only": True},
)
_element.id = AAZStrType()
_element.name = AAZStrType()
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.value.Element.properties
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.routing_policies = AAZListType(
serialized_name="routingPolicies",
)
routing_policies = cls._schema_on_200.value.Element.properties.routing_policies
routing_policies.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element.properties.routing_policies.Element
_element.destinations = AAZListType(
flags={"required": True},
)
_element.name = AAZStrType(
flags={"required": True},
)
_element.next_hop = AAZStrType(
serialized_name="nextHop",
flags={"required": True},
)
destinations = cls._schema_on_200.value.Element.properties.routing_policies.Element.destinations
destinations.Element = AAZStrType()
return cls._schema_on_200
__all__ = ["List"] |
target selectors | import datetime
import random
import uuid
import factory
from sqlalchemy import orm
from h import models
from h.db.types import URLSafeUUID
from h.models.document import update_document_metadata
from .base import FAKER, ModelFactory
from .document import Document, DocumentMeta, DocumentURI
class Annotation(ModelFactory):
class Meta:
model = models.Annotation
sqlalchemy_session_persistence = (
"flush" # Always flush the db to generate annotation.id.
)
tags = factory.LazyFunction(
lambda: list(FAKER.words(nb=random.randint(0, 5))) # pylint:disable=no-member
)
target_uri = factory.Faker("uri")
text = factory.Faker("paragraph")
userid = factory.LazyFunction(
lambda: f"acct:{FAKER.user_name()}@localhost" # pylint:disable=no-member
)
document = factory.SubFactory(Document)
groupid = "__world__"
@factory.lazy_attribute
def METHOD_NAME(self):
return [
{
"endContainer": "/div[1]/article[1]/section[1]/div[1]/div[2]/div[1]",
"endOffset": 76,
"startContainer": "/div[1]/article[1]/section[1]/div[1]/div[2]/div[1]",
"startOffset": 0,
"type": "RangeSelector",
},
{"end": 362, "start": 286, "type": "TextPositionSelector"},
{
# pylint: disable=line-too-long
"exact": "If you wish to install Hypothesis on your own site then head over to GitHub.",
"prefix": " browser extension.\n ",
"suffix": "\n \n \n \n ",
"type": "TextQuoteSelector",
},
]
@factory.post_generation
def make_metadata( # pylint:disable=unused-argument
self, create, extracted, **kwargs
):
"""Create associated document metadata for the annotation."""
# The metadata objects are going to be added to the db, so if we're not
# using the create strategy then simply don't make any.
if not create:
return
def document_uri_dict():
"""
Return a randomly generated DocumentURI dict for this annotation.
This doesn't add anything to the database session yet.
"""
document_uri = DocumentURI.build(
document=None, claimant=self.target_uri, uri=self.target_uri
)
return {
"claimant": document_uri.claimant,
"uri": document_uri.uri,
"type": document_uri.type,
"content_type": document_uri.content_type,
}
document_uri_dicts = [document_uri_dict() for _ in range(random.randint(1, 3))]
def document_meta_dict(type_=None):
"""
Return a randomly generated DocumentMeta dict for this annotation.
This doesn't add anything to the database session yet.
"""
kwargs = {"document": None, "claimant": self.target_uri}
if type_ is not None:
kwargs["type"] = type_
document_meta = DocumentMeta.build(**kwargs)
return {
"claimant": document_meta.claimant,
"type": document_meta.type,
"value": document_meta.value,
}
document_meta_dicts = [
document_meta_dict() for _ in range(random.randint(1, 3))
]
# Make sure that there's always at least one DocumentMeta with
# type='title', so that we never get annotation.document.title is None:
if "title" not in [m["type"] for m in document_meta_dicts]:
document_meta_dicts.append(document_meta_dict(type_="title"))
self.document = update_document_metadata(
orm.object_session(self),
self.target_uri,
document_meta_dicts=document_meta_dicts,
document_uri_dicts=document_uri_dicts,
created=self.created,
updated=self.updated,
)
@factory.post_generation
def make_id(self, create, extracted, **kwargs): # pylint:disable=unused-argument
"""Add a randomly ID if the annotation doesn't have one yet."""
# If using the create strategy don't generate an id.
# models.Annotation.id's server_default function will generate one
# when the annotation is saved to the DB.
if create:
return
# Don't generate an id if the user passed in one of their own.
if getattr(self, "id", None):
return
# Ids in the DB are in hex, but in the code they should be URL safe
self.id = URLSafeUUID().process_result_value( # pylint:disable=attribute-defined-outside-init,invalid-name
uuid.uuid4().hex, None
)
@factory.post_generation
def timestamps(self, create, extracted, **kwargs): # pylint:disable=unused-argument
# If using the create strategy let sqlalchemy set the created and
# updated times when saving to the DB.
if create:
return
# When using the build or stub strategy sqlalchemy won't set created or updated
# times for us, so do it ourselves instead.
#
# We're generating created and updated separately (calling now() twice
# instead of just once) so created and updated won't be exactly the
# same. This is consistent with how models.Annotation does it when
# saving to the DB.
# pylint:disable=attribute-defined-outside-init
self.created = self.created or datetime.datetime.now()
self.updated = self.updated or datetime.datetime.now() |
test set log level error | # Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logger Unittest."""
import logging
import unittest
from collections import OrderedDict
from unittest.mock import patch
import pytest
from parameterized import parameterized
from streamlit import config, logger
DUMMY_CONFIG_OPTIONS = OrderedDict()
class LoggerTest(unittest.TestCase):
"""Logger Unittest class."""
# Need to fix this test:
# https://trello.com/c/ZwNR7fWI
# def test_set_log_level_by_name(self):
# """Test streamlit.logger.set_log_level."""
# data = {
# 'critical': logging.CRITICAL,
# 'error': logging.ERROR,
# 'warning': logging.WARNING,
# 'info': logging.INFO,
# 'debug': logging.DEBUG,
# }
# for k, v in data.items():
# streamlit.logger.set_log_level(k)
# self.assertEqual(v, logging.getLogger().getEffectiveLevel())
def test_set_log_level_by_constant(self):
"""Test streamlit.logger.set_log_level."""
data = [
logging.CRITICAL,
logging.ERROR,
logging.WARNING,
logging.INFO,
logging.DEBUG,
]
for k in data:
logger.set_log_level(k)
self.assertEqual(k, logging.getLogger("streamlit").getEffectiveLevel())
def METHOD_NAME(self):
"""Test streamlit.logger.set_log_level."""
with pytest.raises(SystemExit) as e:
logger.set_log_level(90)
self.assertEqual(e.type, SystemExit)
self.assertEqual(e.value.code, 1)
# Need to fix this test:
# https://trello.com/c/ZwNR7fWI
# def test_set_log_level_resets(self):
# """Test streamlit.logger.set_log_level."""
# streamlit.logger.set_log_level('debug')
# test1 = streamlit.logger.get_logger('test1')
# self.assertEqual(logging.DEBUG, test1.getEffectiveLevel())
#
# streamlit.logger.set_log_level('warning')
# self.assertEqual(logging.WARNING, test1.getEffectiveLevel())
#
# streamlit.logger.set_log_level('critical')
# test2 = streamlit.logger.get_logger('test2')
# self.assertEqual(logging.CRITICAL, test2.getEffectiveLevel())
@parameterized.expand(
[
("%(asctime)s.%(msecs)03d %(name)s: %(message)s", None),
("%(asctime)s.%(msecs)03d %(name)s: %(message)s", DUMMY_CONFIG_OPTIONS),
(None, None),
(None, DUMMY_CONFIG_OPTIONS),
]
)
def test_setup_log_formatter(self, messageFormat, config_options):
"""Test streamlit.logger.setup_log_formatter."""
LOGGER = logger.get_logger("test")
config._set_option("logger.messageFormat", messageFormat, "test")
config._set_option("logger.level", logging.DEBUG, "test")
with patch.object(config, "_config_options", new=config_options):
logger.setup_formatter(LOGGER)
self.assertEqual(len(LOGGER.handlers), 1)
if config_options:
self.assertEqual(
LOGGER.handlers[0].formatter._fmt, messageFormat or "%(message)s"
)
else:
self.assertEqual(
LOGGER.handlers[0].formatter._fmt, logger.DEFAULT_LOG_MESSAGE
)
def test_init_tornado_logs(self):
"""Test streamlit.logger.init_tornado_logs."""
logger.init_tornado_logs()
loggers = [x for x in logger._loggers.keys() if "tornado." in x]
truth = ["tornado.access", "tornado.application", "tornado.general"]
self.assertEqual(sorted(truth), sorted(loggers))
# Need to fix this test:
# https://trello.com/c/ZwNR7fWI
# def test_get_logger(self):
# """Test streamlit.logger.get_logger."""
# # Test that get_logger with no args, figures out its caller
# logger = streamlit.logger.get_logger()
# self.assertTrue('.logger_test' in streamlit.logger.LOGGERS.keys()) |
get mro | """
Patch recently added ABCs into the standard lib module
``collections.abc`` (Py3) or ``collections`` (Py2).
Usage::
import backports_abc
backports_abc.patch()
or::
try:
from collections.abc import Generator
except ImportError:
from backports_abc import Generator
"""
try:
import collections.abc as _collections_abc
except ImportError:
import collections as _collections_abc
def METHOD_NAME(cls):
try:
return cls.__mro__
except AttributeError:
return old_style_mro(cls)
def old_style_mro(cls):
yield cls
for base in cls.__bases__:
for c in old_style_mro(base):
yield c
def mk_gen():
from abc import abstractmethod
required_methods = (
'__iter__', '__next__' if hasattr(iter(()), '__next__') else 'next',
'send', 'throw', 'close')
class Generator(_collections_abc.Iterator):
__slots__ = ()
if '__next__' in required_methods:
def __next__(self):
return self.send(None)
else:
def next(self):
return self.send(None)
@abstractmethod
def send(self, value):
raise StopIteration
@abstractmethod
def throw(self, typ, val=None, tb=None):
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
def close(self):
try:
self.throw(GeneratorExit)
except (GeneratorExit, StopIteration):
pass
else:
raise RuntimeError('generator ignored GeneratorExit')
@classmethod
def __subclasshook__(cls, C):
if cls is Generator:
mro = METHOD_NAME(C)
for method in required_methods:
for base in mro:
if method in base.__dict__:
break
else:
return NotImplemented
return True
return NotImplemented
generator = type((lambda: (yield))())
Generator.register(generator)
return Generator
def mk_awaitable():
from abc import abstractmethod, ABCMeta
@abstractmethod
def __await__(self):
yield
@classmethod
def __subclasshook__(cls, C):
if cls is Awaitable:
for B in METHOD_NAME(C):
if '__await__' in B.__dict__:
if B.__dict__['__await__']:
return True
break
return NotImplemented
# calling metaclass directly as syntax differs in Py2/Py3
Awaitable = ABCMeta('Awaitable', (), {
'__slots__': (),
'__await__': __await__,
'__subclasshook__': __subclasshook__,
})
return Awaitable
def mk_coroutine():
from abc import abstractmethod
class Coroutine(Awaitable):
__slots__ = ()
@abstractmethod
def send(self, value):
"""Send a value into the coroutine.
Return next yielded value or raise StopIteration.
"""
raise StopIteration
@abstractmethod
def throw(self, typ, val=None, tb=None):
"""Raise an exception in the coroutine.
Return next yielded value or raise StopIteration.
"""
if val is None:
if tb is None:
raise typ
val = typ()
if tb is not None:
val = val.with_traceback(tb)
raise val
def close(self):
"""Raise GeneratorExit inside coroutine.
"""
try:
self.throw(GeneratorExit)
except (GeneratorExit, StopIteration):
pass
else:
raise RuntimeError('coroutine ignored GeneratorExit')
@classmethod
def __subclasshook__(cls, C):
if cls is Coroutine:
mro = METHOD_NAME(C)
for method in ('__await__', 'send', 'throw', 'close'):
for base in mro:
if method in base.__dict__:
break
else:
return NotImplemented
return True
return NotImplemented
return Coroutine
###
# make all ABCs available in this module
try:
Generator = _collections_abc.Generator
except AttributeError:
Generator = mk_gen()
try:
Awaitable = _collections_abc.Awaitable
except AttributeError:
Awaitable = mk_awaitable()
try:
Coroutine = _collections_abc.Coroutine
except AttributeError:
Coroutine = mk_coroutine()
try:
from inspect import isawaitable
except ImportError:
def isawaitable(obj):
return isinstance(obj, Awaitable)
###
# allow patching the stdlib
PATCHED = {}
def patch(patch_inspect=True):
"""
Main entry point for patching the ``collections.abc`` and ``inspect``
standard library modules.
"""
PATCHED['collections.abc.Generator'] = _collections_abc.Generator = Generator
PATCHED['collections.abc.Coroutine'] = _collections_abc.Coroutine = Coroutine
PATCHED['collections.abc.Awaitable'] = _collections_abc.Awaitable = Awaitable
if patch_inspect:
import inspect
PATCHED['inspect.isawaitable'] = inspect.isawaitable = isawaitable |
operation group two | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from azure.profiles import KnownProfiles, ProfileDefinition
from azure.profiles.multiapiclient import MultiApiClientMixin
from ._configuration import MultiapiServiceClientConfiguration
from ._operations_mixin import MultiapiServiceClientOperationsMixin
from ._serialization import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class _SDKClient(object):
def __init__(self, *args, **kwargs):
"""This is a fake class to support current implemetation of MultiApiClientMixin."
Will be removed in final version of multiapi azure-core based client
"""
pass
class MultiapiServiceClient(MultiapiServiceClientOperationsMixin, MultiApiClientMixin, _SDKClient):
"""Service client for multiapi client testing.
This ready contains multiple API versions, to help you deal with all of the Azure clouds
(Azure Stack, Azure Government, Azure China, etc.).
By default, it uses the latest API version available on public Azure.
For production, you should stick to a particular api-version and/or profile.
The profile sets a mapping between an operation group and its API version.
The api-version parameter sets the default API version if the operation
group is not described in the profile.
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param api_version: API version to use if no profile is provided, or if missing in profile.
:type api_version: str
:param base_url: Service URL
:type base_url: str
:param profile: A profile definition, from KnownProfiles to dict.
:type profile: azure.profiles.KnownProfiles
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
DEFAULT_API_VERSION = '3.0.0'
_PROFILE_TAG = "multiapinoasync.MultiapiServiceClient"
LATEST_PROFILE = ProfileDefinition({
_PROFILE_TAG: {
None: DEFAULT_API_VERSION,
'begin_test_lro': '1.0.0',
'begin_test_lro_and_paging': '1.0.0',
'test_one': '2.0.0',
}},
_PROFILE_TAG + " latest"
)
def __init__(
self,
credential: "TokenCredential",
api_version: Optional[str]=None,
base_url: str = "http://localhost:3000",
profile: KnownProfiles=KnownProfiles.default,
**kwargs: Any
):
if api_version:
kwargs.setdefault('api_version', api_version)
self._config = MultiapiServiceClientConfiguration(credential, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
super(MultiapiServiceClient, self).__init__(
api_version=api_version,
profile=profile
)
@classmethod
def _models_dict(cls, api_version):
return {k: v for k, v in cls.models(api_version).__dict__.items() if isinstance(v, type)}
@classmethod
def models(cls, api_version=DEFAULT_API_VERSION):
"""Module depends on the API version:
* 1.0.0: :mod:`v1.models<multiapinoasync.v1.models>`
* 2.0.0: :mod:`v2.models<multiapinoasync.v2.models>`
* 3.0.0: :mod:`v3.models<multiapinoasync.v3.models>`
"""
if api_version == '1.0.0':
from .v1 import models
return models
elif api_version == '2.0.0':
from .v2 import models
return models
elif api_version == '3.0.0':
from .v3 import models
return models
raise ValueError("API version {} is not available".format(api_version))
@property
def operation_group_one(self):
"""Instance depends on the API version:
* 1.0.0: :class:`OperationGroupOneOperations<multiapinoasync.v1.operations.OperationGroupOneOperations>`
* 2.0.0: :class:`OperationGroupOneOperations<multiapinoasync.v2.operations.OperationGroupOneOperations>`
* 3.0.0: :class:`OperationGroupOneOperations<multiapinoasync.v3.operations.OperationGroupOneOperations>`
"""
api_version = self._get_api_version('operation_group_one')
if api_version == '1.0.0':
from .v1.operations import OperationGroupOneOperations as OperationClass
elif api_version == '2.0.0':
from .v2.operations import OperationGroupOneOperations as OperationClass
elif api_version == '3.0.0':
from .v3.operations import OperationGroupOneOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'operation_group_one'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version)
@property
def METHOD_NAME(self):
"""Instance depends on the API version:
* 2.0.0: :class:`OperationGroupTwoOperations<multiapinoasync.v2.operations.OperationGroupTwoOperations>`
* 3.0.0: :class:`OperationGroupTwoOperations<multiapinoasync.v3.operations.OperationGroupTwoOperations>`
"""
api_version = self._get_api_version('operation_group_two')
if api_version == '2.0.0':
from .v2.operations import OperationGroupTwoOperations as OperationClass
elif api_version == '3.0.0':
from .v3.operations import OperationGroupTwoOperations as OperationClass
else:
raise ValueError("API version {} does not have operation group 'operation_group_two'".format(api_version))
self._config.api_version = api_version
return OperationClass(self._client, self._config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)), api_version)
def close(self):
self._client.close()
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *exc_details):
self._client.__exit__(*exc_details) |
flush | # Copyright 2017 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import errors
from perfkitbenchmarker import provider_info
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import memcached_server
from perfkitbenchmarker.memcache_service import MemcacheService
from perfkitbenchmarker.providers.aws import aws_network
from perfkitbenchmarker.providers.aws import util
ELASTICACHE_PORT = 11211
class ElastiCacheMemcacheService(MemcacheService):
"""Class for AWS elasticache memcache service."""
CLOUD = provider_info.AWS
def __init__(self, network, cluster_id, region, node_type, num_servers=1):
self.cluster_id = cluster_id
self.region = region
self.node_type = node_type
self.num_servers = num_servers
self.hosts = [] # [(ip, port)]
self.vpc_id = network.subnet.vpc_id
self.security_group_id = (
network.regional_network.vpc.default_security_group_id)
self.subnet_id = network.subnet.id
self.subnet_group_name = '%ssubnet' % cluster_id
def Create(self):
# Open the port memcached needs
aws_network.AwsFirewall.GetFirewall().AllowPortInSecurityGroup(
self.region, self.security_group_id, ELASTICACHE_PORT)
# Create a cache subnet group
cmd = ['aws', 'elasticache', 'create-cache-subnet-group',
'--region=%s' % self.region,
'--cache-subnet-group-name=%s' % self.subnet_group_name,
'--cache-subnet-group-description="PKB memcached_ycsb benchmark"',
'--subnet-ids=%s' % self.subnet_id]
vm_util.IssueCommand(cmd)
# Create the cluster
cmd = ['aws', 'elasticache', 'create-cache-cluster',
'--engine=memcached',
'--cache-subnet-group-name=%s' % self.subnet_group_name,
'--cache-cluster-id=%s' % self.cluster_id,
'--num-cache-nodes=%s' % self.num_servers,
'--region=%s' % self.region,
'--cache-node-type=%s' % self.node_type,
'--tags'] + util.MakeFormattedDefaultTags()
vm_util.IssueCommand(cmd)
# Wait for the cluster to come up
cluster_info = self._WaitForClusterUp()
# Parse out the hosts
self.hosts = [(node['Endpoint']['Address'], node['Endpoint']['Port'])
for node in cluster_info['CacheNodes']]
assert len(self.hosts) == self.num_servers
def Destroy(self):
# Delete the ElastiCache cluster
cmd = ['aws', 'elasticache', 'delete-cache-cluster',
'--cache-cluster-id=%s' % self.cluster_id,
'--region=%s' % self.region]
vm_util.IssueCommand(cmd, raise_on_failure=False)
# Don't have to delete the subnet group. It will be deleted with the subnet.
def METHOD_NAME(self):
background_tasks.RunThreaded(
memcached_server.FlushMemcachedServer, self.hosts
)
def GetHosts(self):
return ['%s:%s' % (ip, port) for ip, port in self.hosts]
def GetMetadata(self):
return {'num_servers': self.num_servers,
'elasticache_region': self.region,
'elasticache_node_type': self.node_type}
def _GetClusterInfo(self):
cmd = ['aws', 'elasticache', 'describe-cache-clusters']
cmd += ['--cache-cluster-id=%s' % self.cluster_id]
cmd += ['--region=%s' % self.region]
cmd += ['--show-cache-node-info']
out, _, _ = vm_util.IssueCommand(cmd)
return json.loads(out)['CacheClusters'][0]
@vm_util.Retry(poll_interval=15, timeout=300,
retryable_exceptions=(errors.Resource.RetryableCreationError))
def _WaitForClusterUp(self):
"""Block until the ElastiCache memcached cluster is up.
Will timeout after 5 minutes, and raise an exception. Before the timeout
expires any exceptions are caught and the status check is retried.
We check the status of the cluster using the AWS CLI.
Returns:
The cluster info json as a dict
Raises:
errors.Resource.RetryableCreationError when response is not as expected or
if there is an error connecting to the port or otherwise running the
remote check command.
"""
logging.info('Trying to get ElastiCache cluster info for %s',
self.cluster_id)
cluster_status = None
try:
cluster_info = self._GetClusterInfo()
cluster_status = cluster_info['CacheClusterStatus']
if cluster_status == 'available':
logging.info('ElastiCache memcached cluster is up and running.')
return cluster_info
except errors.VirtualMachine.RemoteCommandError as e:
raise errors.Resource.RetryableCreationError(
'ElastiCache memcached cluster not up yet: %s.' % str(e))
else:
raise errors.Resource.RetryableCreationError(
'ElastiCache memcached cluster not up yet. Status: %s' %
cluster_status) |
get class | import numpy as np
import pytest
from aspire.image import Image
from aspire.utils import gaussian_2d, utest_tolerance
from aspire.utils.coor_trans import grid_2d
from aspire.utils.random import randn
from aspire.volume import Volume
# Parameter combinations for testing 2D bases
# Each tuple represents (resolution in pixels, datatype of basis)
basis_params_2d = [
(8, np.float32),
(8, np.float64),
(16, np.float32),
(16, np.float64),
(32, np.float32),
(32, np.float64),
]
basis_params_3d = [
(8, np.float32),
(8, np.float64),
]
pswf_params_2d = [
(8, np.float64),
]
def show_basis_params(basis):
# print descriptive test name for parametrized test
# run pytest with option -rA to see explicitly
return f"{basis.nres}-{basis.dtype}"
class Steerable2DMixin:
"""
Inheriting Test class will expect all Steerable2DMixin functions to take a Basis object
as a parameter.
"""
def testIndices(self, basis):
ell_max = basis.ell_max
k_max = basis.k_max
indices = basis.indices()
i = 0
for ell in range(ell_max + 1):
if ell == 0:
sgns = [1]
else:
sgns = [1, -1]
for sgn in sgns:
for k in range(k_max[ell]):
assert indices["ells"][i] == ell
assert indices["sgns"][i] == sgn
assert indices["ks"][i] == k
i += 1
def testGaussianExpand(self, basis):
# Offset slightly
x0 = 0.50
y0 = 0.75
L = basis.nres
# Want sigma to be as large as possible without the Gaussian
# spilling too much outside the central disk.
sigma = L / 8
im1 = gaussian_2d(L, mu=(x0, y0), sigma=sigma, dtype=basis.dtype)
coef = basis.expand(im1)
im2 = basis.evaluate(coef)
if isinstance(im2, Image):
im2 = im2.asnumpy()
im2 = im2[0]
# For small L there's too much clipping at high freqs to get 1e-3
# accuracy.
if L < 32:
atol = 1e-2
else:
atol = 1e-3
assert im1.shape == im2.shape
assert np.allclose(im1, im2, atol=atol)
def testIsotropic(self, basis):
L = basis.nres
sigma = L / 8
im = gaussian_2d(L, sigma=sigma, dtype=basis.dtype)
coef = basis.expand(im)
ells = basis.indices()["ells"]
energy_outside = np.sum(np.abs(coef[ells != 0]) ** 2)
energy_total = np.sum(np.abs(coef) ** 2)
energy_ratio = energy_outside / energy_total
assert energy_ratio < 0.01
def testModulated(self, basis):
L = basis.nres
if L < 32:
pytest.skip()
ell = 1
sigma = L / 8
im = gaussian_2d(L, sigma=sigma, dtype=basis.dtype)
g2d = grid_2d(L)
for trig_fun in (np.sin, np.cos):
im1 = im * trig_fun(ell * g2d["phi"])
coef = basis.expand(im1)
ells = basis.indices()["ells"]
energy_outside = np.sum(np.abs(coef[ells != ell]) ** 2)
energy_total = np.sum(np.abs(coef) ** 2)
energy_ratio = energy_outside / energy_total
assert energy_ratio < 0.10
def testEvaluateExpand(self, basis):
coef1 = randn(basis.count, seed=self.seed)
coef1 = coef1.astype(basis.dtype)
im = basis.evaluate(coef1)
if isinstance(im, Image):
im = im.asnumpy()
coef2 = basis.expand(im)[0]
assert coef1.shape == coef2.shape
assert np.allclose(coef1, coef2, atol=utest_tolerance(basis.dtype))
def testAdjoint(self, basis):
u = randn(basis.count, seed=self.seed)
u = u.astype(basis.dtype)
Au = basis.evaluate(u)
if isinstance(Au, Image):
Au = Au.asnumpy()
x = Image(randn(*basis.sz, seed=self.seed), dtype=basis.dtype)
ATx = basis.evaluate_t(x)
Au_dot_x = np.sum(Au * x.asnumpy())
u_dot_ATx = np.sum(u * ATx)
assert Au_dot_x.shape == u_dot_ATx.shape
assert np.isclose(Au_dot_x, u_dot_ATx)
class UniversalBasisMixin:
"""
Inheriting Test class will expect all UniversalBasisMixin functions to take a Basis object
as a parameter.
"""
def METHOD_NAME(self, basis):
if basis.ndim == 2:
return Image
elif basis.ndim == 3:
return Volume
def testEvaluate(self, basis):
# evaluate should take a NumPy array of type basis.coefficient_dtype
# and return an Image/Volume
_class = self.METHOD_NAME(basis)
result = basis.evaluate(np.zeros((basis.count), dtype=basis.coefficient_dtype))
assert isinstance(result, _class)
def testEvaluate_t(self, basis):
# evaluate_t should take an Image/Volume and return a NumPy array of type
# basis.coefficient_dtype
_class = self.METHOD_NAME(basis)
result = basis.evaluate_t(
_class(np.zeros((basis.nres,) * basis.ndim, dtype=basis.dtype))
)
assert isinstance(result, np.ndarray)
assert result.dtype == basis.coefficient_dtype
def testExpand(self, basis):
_class = self.METHOD_NAME(basis)
# expand should take an Image/Volume and return a NumPy array of type
# basis.coefficient_dtype
result = basis.expand(
_class(np.zeros((basis.nres,) * basis.ndim, dtype=basis.dtype))
)
assert isinstance(result, np.ndarray)
assert result.dtype == basis.coefficient_dtype
def testInitWithIntSize(self, basis):
# make sure we can instantiate with just an int as a shortcut
assert (basis.nres,) * basis.ndim == basis.__class__(basis.nres).sz |
test branch change already on branch | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe and Contributors
# See license.txt
import unittest
from unittest.mock import patch
import frappe
from frappe.core.utils import find
from press.press.doctype.app.app import App
from press.press.doctype.app.test_app import create_test_app
from press.press.doctype.app_release.test_app_release import create_test_app_release
from press.press.doctype.app_source.app_source import AppSource
from press.press.doctype.app_source.test_app_source import create_test_app_source
from press.press.doctype.release_group.release_group import (
ReleaseGroup,
new_release_group,
)
from press.press.doctype.team.test_team import create_test_team
def create_test_release_group(
apps: list[App], user: str = None, public=False, frappe_version="Version 14"
) -> ReleaseGroup:
"""
Create Release Group doc.
Also creates app source
"""
user = user or frappe.session.user
release_group = frappe.get_doc(
{
"doctype": "Release Group",
"version": frappe_version,
"enabled": True,
"title": f"Test ReleaseGroup {frappe.mock('name')}",
"team": frappe.get_value("Team", {"user": user}, "name"),
"public": public,
}
)
for app in apps:
app_source = create_test_app_source(release_group.version, app)
release_group.append("apps", {"app": app.name, "source": app_source.name})
release_group.insert(ignore_if_duplicate=True)
release_group.reload()
return release_group
@patch.object(AppSource, "create_release", create_test_app_release)
class TestReleaseGroup(unittest.TestCase):
def setUp(self):
self.team = create_test_team().name
def tearDown(self):
frappe.db.rollback()
def test_create_release_group(self):
app = create_test_app("frappe", "Frappe Framework")
source = app.add_source(
"Version 12", "https://github.com/frappe/frappe", "version-12", team=self.team
)
group = new_release_group(
"Test Group",
"Version 12",
[{"app": source.app, "source": source.name}],
team=self.team,
)
self.assertEqual(group.title, "Test Group")
def test_create_release_group_set_app_from_source(self):
app1 = create_test_app("frappe", "Frappe Framework")
source1 = app1.add_source(
"Version 12", "https://github.com/frappe/frappe", "version-12", team=self.team
)
app2 = create_test_app("erpnext", "ERPNext")
source2 = app2.add_source(
"Version 12", "https://github.com/frappe/erpnext", "version-12", team=self.team
)
group = new_release_group(
"Test Group",
"Version 12",
[{"app": source2.app, "source": source1.name}],
team=self.team,
)
self.assertEqual(group.apps[0].app, source1.app)
def test_create_release_group_fail_when_first_app_is_not_frappe(self):
app = create_test_app("erpnext", "ERPNext")
source = app.add_source(
"Version 12", "https://github.com/frappe/erpnext", "version-12", team=self.team
)
self.assertRaises(
frappe.ValidationError,
new_release_group,
"Test Group",
"Version 12",
[{"app": source.app, "source": source.name}],
team=self.team,
)
def test_create_release_group_fail_when_duplicate_apps(self):
app = create_test_app("frappe", "Frappe Framework")
source = app.add_source(
"Version 12", "https://github.com/frappe/frappe", "version-12", team=self.team
)
self.assertRaises(
frappe.ValidationError,
new_release_group,
"Test Group",
"Version 12",
[
{"app": source.app, "source": source.name},
{"app": source.app, "source": source.name},
],
team=self.team,
)
def test_create_release_group_fail_when_version_mismatch(self):
app = create_test_app("frappe", "Frappe Framework")
source = app.add_source(
"Version 12", "https://github.com/frappe/frappe", "version-12", team=self.team
)
self.assertRaises(
frappe.ValidationError,
new_release_group,
"Test Group",
"Version 13",
[{"app": source.app, "source": source.name}],
team=self.team,
)
def test_create_release_group_fail_with_duplicate_titles(self):
app = create_test_app("frappe", "Frappe Framework")
source = app.add_source(
"Version 12", "https://github.com/frappe/frappe", "version-12", team=self.team
)
new_release_group(
"Test Group",
"Version 12",
[{"app": source.app, "source": source.name}],
team=self.team,
)
self.assertRaises(
frappe.ValidationError,
new_release_group,
"Test Group",
"Version 12",
[{"app": source.app, "source": source.name}],
team=self.team,
)
def METHOD_NAME(self):
app = create_test_app()
rg = create_test_release_group([app])
with self.assertRaises(frappe.ValidationError):
rg.change_app_branch("frappe", "master")
def test_branch_change_app_source_exists(self):
app = create_test_app()
rg = create_test_release_group([app])
current_app_source = frappe.get_doc("App Source", rg.apps[0].source)
app_source = create_test_app_source(
current_app_source.versions[0].version,
app,
current_app_source.repository_url,
"develop",
)
rg.change_app_branch(app.name, "develop")
rg.reload()
# Source must be set to the available `app_source` for `app`
self.assertEqual(rg.apps[0].source, app_source.name)
def test_branch_change_app_source_does_not_exist(self):
app = create_test_app()
rg = create_test_release_group([app])
previous_app_source = frappe.get_doc("App Source", rg.apps[0].source)
rg.change_app_branch(app.name, "develop")
rg.reload()
new_app_source = frappe.get_doc("App Source", rg.apps[0].source)
self.assertEqual(new_app_source.branch, "develop")
self.assertEqual(
new_app_source.versions[0].version, previous_app_source.versions[0].version
)
self.assertEqual(new_app_source.repository_url, previous_app_source.repository_url)
self.assertEqual(new_app_source.app, app.name)
def test_new_release_group_loaded_with_correct_dependencies(self):
app = create_test_app("frappe", "Frappe Framework")
frappe_version = frappe.get_doc("Frappe Version", "Version 14")
group = frappe.get_doc(
{
"doctype": "Release Group",
"title": "Test Group",
"version": "Version 14",
"apps": [
{"app": app.name, "source": create_test_app_source("Version 14", app).name}
],
"team": self.team,
}
).insert()
self.assertEqual(
find(group.dependencies, lambda d: d.dependency == "PYTHON_VERSION").version,
find(
frappe_version.dependencies, lambda x: x.dependency == "PYTHON_VERSION"
).version,
) |
test user revoke invalid certified | # Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS
from __future__ import annotations
import pytest
import trio
from quart.testing.connections import WebsocketDisconnectError
from parsec._parsec import (
DateTime,
UserProfile,
)
from parsec.api.data import RevokedUserCertificate
from parsec.api.protocol import (
HandshakeRevokedDevice,
UserRevokeRepAlreadyRevoked,
UserRevokeRepInvalidCertification,
UserRevokeRepNotAllowed,
UserRevokeRepNotFound,
UserRevokeRepOk,
)
from parsec.backend.user import INVITATION_VALIDITY
from tests.backend.common import authenticated_ping, user_revoke
from tests.common import freeze_time
@pytest.mark.trio
async def test_backend_close_on_user_revoke(
backend_asgi_app, alice_ws, backend_authenticated_ws_factory, bob, alice
):
now = DateTime.now()
bob_revocation = RevokedUserCertificate(
author=alice.device_id, timestamp=now, user_id=bob.user_id
).dump_and_sign(alice.signing_key)
async with backend_authenticated_ws_factory(backend_asgi_app, bob) as bob_ws:
with backend_asgi_app.backend.event_bus.listen():
rep = await user_revoke(alice_ws, revoked_user_certificate=bob_revocation)
assert isinstance(rep, UserRevokeRepOk)
# `user.revoked` event schedules connection cancellation, so wait
# for things to settle down to make sure the cancellation is done
await trio.testing.wait_all_tasks_blocked()
# Bob cannot send new command
with pytest.raises(WebsocketDisconnectError):
await authenticated_ping(bob_ws)
@pytest.mark.trio
async def test_user_revoke_ok(
backend_asgi_app, backend_authenticated_ws_factory, adam_ws, alice, adam
):
now = DateTime.now()
alice_revocation = RevokedUserCertificate(
author=adam.device_id, timestamp=now, user_id=alice.user_id
).dump_and_sign(adam.signing_key)
with backend_asgi_app.backend.event_bus.listen():
rep = await user_revoke(adam_ws, revoked_user_certificate=alice_revocation)
assert isinstance(rep, UserRevokeRepOk)
# Alice cannot connect from now on...
with pytest.raises(HandshakeRevokedDevice):
async with backend_authenticated_ws_factory(backend_asgi_app, alice):
pass
@pytest.mark.trio
async def test_user_revoke_not_admin(
backend_asgi_app, backend_authenticated_ws_factory, bob_ws, alice, bob
):
now = DateTime.now()
alice_revocation = RevokedUserCertificate(
author=bob.device_id, timestamp=now, user_id=alice.user_id
).dump_and_sign(bob.signing_key)
rep = await user_revoke(bob_ws, revoked_user_certificate=alice_revocation)
assert isinstance(rep, UserRevokeRepNotAllowed)
@pytest.mark.trio
async def test_cannot_self_revoke(
backend_asgi_app, backend_authenticated_ws_factory, alice_ws, alice
):
now = DateTime.now()
alice_revocation = RevokedUserCertificate(
author=alice.device_id, timestamp=now, user_id=alice.user_id
).dump_and_sign(alice.signing_key)
rep = await user_revoke(alice_ws, revoked_user_certificate=alice_revocation)
assert isinstance(rep, UserRevokeRepNotAllowed)
@pytest.mark.trio
async def test_user_revoke_unknown(backend_asgi_app, alice_ws, alice, mallory):
revoked_user_certificate = RevokedUserCertificate(
author=alice.device_id, timestamp=DateTime.now(), user_id=mallory.user_id
).dump_and_sign(alice.signing_key)
rep = await user_revoke(alice_ws, revoked_user_certificate=revoked_user_certificate)
assert isinstance(rep, UserRevokeRepNotFound)
@pytest.mark.trio
async def test_user_revoke_already_revoked(backend_asgi_app, alice_ws, bob, alice):
now = DateTime.now()
bob_revocation = RevokedUserCertificate(
author=alice.device_id, timestamp=now, user_id=bob.user_id
).dump_and_sign(alice.signing_key)
rep = await user_revoke(alice_ws, revoked_user_certificate=bob_revocation)
assert isinstance(rep, UserRevokeRepOk)
rep = await user_revoke(alice_ws, revoked_user_certificate=bob_revocation)
assert isinstance(rep, UserRevokeRepAlreadyRevoked)
@pytest.mark.trio
async def METHOD_NAME(backend_asgi_app, alice_ws, alice2, bob):
revoked_user_certificate = RevokedUserCertificate(
author=alice2.device_id, timestamp=DateTime.now(), user_id=bob.user_id
).dump_and_sign(alice2.signing_key)
rep = await user_revoke(alice_ws, revoked_user_certificate=revoked_user_certificate)
assert isinstance(rep, UserRevokeRepInvalidCertification)
@pytest.mark.trio
async def test_user_revoke_certify_too_old(backend_asgi_app, alice_ws, alice, bob):
now = DateTime(2000, 1, 1)
revoked_user_certificate = RevokedUserCertificate(
author=alice.device_id, timestamp=now, user_id=bob.user_id
).dump_and_sign(alice.signing_key)
with freeze_time(now.add(seconds=INVITATION_VALIDITY + 1)):
rep = await user_revoke(alice_ws, revoked_user_certificate=revoked_user_certificate)
assert isinstance(rep, UserRevokeRepInvalidCertification)
@pytest.mark.trio
async def test_user_revoke_other_organization(
ws_from_other_organization_factory,
backend_authenticated_ws_factory,
backend_asgi_app,
alice,
bob,
):
# Organizations should be isolated even for organization admins
async with ws_from_other_organization_factory(
backend_asgi_app, mimic=alice.device_id, profile=UserProfile.ADMIN
) as sock:
revocation = RevokedUserCertificate(
author=sock.device.device_id, timestamp=DateTime.now(), user_id=bob.user_id
).dump_and_sign(sock.device.signing_key)
rep = await user_revoke(sock, revoked_user_certificate=revocation)
assert isinstance(rep, UserRevokeRepNotFound)
# Make sure bob still works
async with backend_authenticated_ws_factory(backend_asgi_app, bob):
pass |
test boltzmann typo deprecation | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
from MDAnalysis import units
class TestUnitEncoding(object):
def test_unicode(self):
try:
assert_equal(units.lengthUnit_factor[u"\u212b"], 1.0)
except KeyError:
raise AssertionError("Unicode symbol for Angtrom not supported")
def test_unicode_encoding_with_symbol(self):
try:
assert_equal(units.lengthUnit_factor[u"Å"], 1.0)
except KeyError:
raise AssertionError("UTF-8-encoded symbol for Angtrom not supported")
class TestConstants(object):
# CODATA 2010 (NIST): http://physics.nist.gov/cuu/Constants/
# (accessed 2015-02-15)
# Add a reference value to this dict for every entry in
# units.constants
constants_reference = (
('N_Avogadro', 6.02214129e+23), # mol**-1
('elementary_charge', 1.602176565e-19), # As
('calorie', 4.184), # J
('Boltzmann_constant', 8.314462159e-3), # KJ (mol K)**-1
('Boltzman_constant', 8.314462159e-3), # remove in 2.8.0
('electric_constant', 5.526350e-3), # As (Angstroms Volts)**-1
)
@pytest.mark.parametrize('name, value', constants_reference)
def test_constant(self, name, value):
assert_almost_equal(units.constants[name], value)
def METHOD_NAME(self):
wmsg = ("Please use 'Boltzmann_constant' henceforth. The key "
"'Boltzman_constant' was a typo and will be removed "
"in MDAnalysis 2.8.0.")
with pytest.warns(DeprecationWarning, match=wmsg):
units.constants['Boltzman_constant']
class TestConversion(object):
@staticmethod
def _assert_almost_equal_convert(value, u1, u2, ref):
val = units.convert(value, u1, u2)
assert_almost_equal(val, ref,
err_msg="Conversion {0} --> {1} failed".format(u1, u2))
nm = 12.34567
A = nm * 10.
@pytest.mark.parametrize('quantity, unit1, unit2, ref', (
(nm, 'nm', 'A', A),
(A, 'Angstrom', 'nm', nm),
))
def test_length(self, quantity, unit1, unit2, ref):
self._assert_almost_equal_convert(quantity, unit1, unit2, ref)
@pytest.mark.parametrize('quantity, unit1, unit2, ref', (
(1, 'ps', 'AKMA', 20.45482949774598),
(1, 'AKMA', 'ps', 0.04888821),
(1, 'ps', 'ms', 1e-9),
(1, 'ms', 'ps', 1e9),
(1, 'ps', 'us', 1e-6),
(1, 'us', 'ps', 1e6),
))
def test_time(self, quantity, unit1, unit2, ref):
self._assert_almost_equal_convert(quantity, unit1, unit2, ref)
@pytest.mark.parametrize('quantity, unit1, unit2, ref', (
(1, 'kcal/mol', 'kJ/mol', 4.184),
(1, 'kcal/mol', 'eV', 0.0433641),
))
def test_energy(self, quantity, unit1, unit2, ref):
self._assert_almost_equal_convert(quantity, unit1, unit2, ref)
@pytest.mark.parametrize('quantity, unit1, unit2, ref', (
(1, 'kJ/(mol*A)', 'J/m', 1.66053892103219e-11),
(2.5, 'kJ/(mol*nm)', 'kJ/(mol*A)', 0.25),
(1, 'kcal/(mol*Angstrom)', 'kJ/(mol*Angstrom)', 4.184),
))
def test_force(self, quantity, unit1, unit2, ref):
self._assert_almost_equal_convert(quantity, unit1, unit2, ref)
@pytest.mark.parametrize('quantity, unit1, unit2, ref', (
(1, 'A/ps', 'm/s', 1e-10/1e-12),
(1, 'A/ps', 'nm/ps', 0.1),
(1, 'A/ps', 'pm/ps', 1e2),
(1, 'A/ms', 'A/ps', 1e9),
(1, 'A/us', 'A/ps', 1e6),
(1, 'A/fs', 'A/ps', 1e-3),
(1, 'A/AKMA', 'A/ps', 1/4.888821e-2),
))
def test_speed(self, quantity, unit1, unit2, ref):
self._assert_almost_equal_convert(quantity, unit1, unit2, ref)
@pytest.mark.parametrize('quantity, unit1, unit2', ((nm, 'Stone', 'nm'),
(nm, 'nm', 'Stone')))
def test_unit_unknown(self, quantity, unit1, unit2):
with pytest.raises(ValueError):
units.convert(quantity, unit1, unit2)
def test_unit_unconvertable(self):
nm = 12.34567
A = nm * 10.
with pytest.raises(ValueError):
units.convert(A, 'A', 'ps')
class TestBaseUnits:
@staticmethod
@pytest.fixture
def ref():
# This is a copy of the dictionary we expect.
# We want to know if base units are added or altered.
ref = {"length": "A",
"time": "ps",
"energy": "kJ/mol",
"charge": "e",
"force": "kJ/(mol*A)",
"speed": "A/ps"}
return ref
def test_MDANALYSIS_BASE_UNITS_correct(self, ref):
assert ref == units.MDANALYSIS_BASE_UNITS |
register all arguments | import argparse
import os
import sys
from collections.abc import Callable
from typing import IO
from xdsl.backend.riscv import riscv_scf_to_asm
from xdsl.backend.riscv.lowering import (
convert_arith_to_riscv,
convert_func_to_riscv_func,
convert_memref_to_riscv,
convert_scf_to_riscv_scf,
reduce_register_pressure,
)
from xdsl.dialects.affine import Affine
from xdsl.dialects.arith import Arith
from xdsl.dialects.builtin import Builtin, ModuleOp
from xdsl.dialects.cf import Cf
from xdsl.dialects.cmath import CMath
from xdsl.dialects.experimental.dmp import DMP
from xdsl.dialects.experimental.fir import FIR
from xdsl.dialects.experimental.math import Math
from xdsl.dialects.func import Func
from xdsl.dialects.gpu import GPU
from xdsl.dialects.irdl.irdl import IRDL
from xdsl.dialects.linalg import Linalg
from xdsl.dialects.llvm import LLVM
from xdsl.dialects.memref import MemRef
from xdsl.dialects.mpi import MPI
from xdsl.dialects.pdl import PDL
from xdsl.dialects.printf import Printf
from xdsl.dialects.riscv import RISCV
from xdsl.dialects.riscv_func import RISCV_Func
from xdsl.dialects.riscv_scf import RISCV_Scf
from xdsl.dialects.scf import Scf
from xdsl.dialects.snitch import Snitch
from xdsl.dialects.snitch_runtime import SnitchRuntime
from xdsl.dialects.stencil import Stencil
from xdsl.dialects.test import Test
from xdsl.dialects.vector import Vector
from xdsl.frontend.passes.desymref import DesymrefyPass
from xdsl.frontend.symref import Symref
from xdsl.ir import Dialect, MLContext
from xdsl.parser import Parser
from xdsl.passes import ModulePass
from xdsl.transforms import (
canonicalize,
canonicalize_dmp,
dead_code_elimination,
lower_affine,
lower_mpi,
lower_riscv_func,
lower_snitch,
lower_snitch_runtime,
mlir_opt,
printf_to_llvm,
printf_to_putchar,
reconcile_unrealized_casts,
riscv_register_allocation,
)
from xdsl.transforms.experimental import (
convert_stencil_to_ll_mlir,
stencil_shape_inference,
stencil_storage_materialization,
)
from xdsl.transforms.experimental.dmp import stencil_global_to_local
from xdsl.utils.exceptions import ParseError
def get_all_dialects() -> list[Dialect]:
"""Return the list of all available dialects."""
return [
Affine,
Arith,
Builtin,
Cf,
CMath,
DMP,
FIR,
Func,
GPU,
Linalg,
IRDL,
LLVM,
Math,
MemRef,
MPI,
PDL,
Printf,
RISCV,
RISCV_Func,
RISCV_Scf,
Scf,
Snitch,
SnitchRuntime,
Stencil,
Symref,
Test,
Vector,
]
def get_all_passes() -> list[type[ModulePass]]:
"""Return the list of all available passes."""
return [
canonicalize.CanonicalizePass,
canonicalize_dmp.CanonicalizeDmpPass,
convert_stencil_to_ll_mlir.ConvertStencilToLLMLIRPass,
dead_code_elimination.DeadCodeElimination,
DesymrefyPass,
stencil_global_to_local.GlobalStencilToLocalStencil2DHorizontal,
stencil_global_to_local.LowerHaloToMPI,
lower_affine.LowerAffinePass,
lower_mpi.LowerMPIPass,
lower_riscv_func.LowerRISCVFunc,
lower_snitch.LowerSnitchPass,
lower_snitch_runtime.LowerSnitchRuntimePass,
mlir_opt.MLIROptPass,
printf_to_llvm.PrintfToLLVM,
printf_to_putchar.PrintfToPutcharPass,
reduce_register_pressure.RiscvReduceRegisterPressurePass,
riscv_register_allocation.RISCVRegisterAllocation,
convert_arith_to_riscv.ConvertArithToRiscvPass,
convert_func_to_riscv_func.ConvertFuncToRiscvFuncPass,
convert_memref_to_riscv.ConvertMemrefToRiscvPass,
convert_scf_to_riscv_scf.ConvertScfToRiscvPass,
riscv_scf_to_asm.LowerScfForToLabels,
stencil_shape_inference.StencilShapeInferencePass,
stencil_storage_materialization.StencilStorageMaterializationPass,
reconcile_unrealized_casts.ReconcileUnrealizedCastsPass,
]
class CommandLineTool:
ctx: MLContext
args: argparse.Namespace
"""
The argument parsers namespace which holds the parsed commandline
attributes.
"""
available_frontends: dict[str, Callable[[IO[str]], ModuleOp]]
"""
A mapping from file extension to a frontend that can handle this
file type.
"""
def METHOD_NAME(self, arg_parser: argparse.ArgumentParser):
arg_parser.add_argument(
"input_file", type=str, nargs="?", help="path to input file"
)
frontends = [name for name in self.available_frontends]
arg_parser.add_argument(
"-f",
"--frontend",
type=str,
required=False,
choices=frontends,
help="Frontend to be used for the input. If not set, "
"the xdsl frontend or the one for the file extension "
"is used.",
)
arg_parser.add_argument("--disable-verify", default=False, action="store_true")
arg_parser.add_argument(
"--allow-unregistered-dialect",
default=False,
action="store_true",
help="Allow the parsing of unregistered dialects.",
)
arg_parser.add_argument(
"--no-implicit-module",
default=False,
action="store_true",
help="Disable implicit addition of a top-level module op during parsing.",
)
def get_input_stream(self) -> tuple[IO[str], str]:
"""
Get the input stream to parse from, along with the file extension.
"""
if self.args.input_file is None:
f = sys.stdin
file_extension = "mlir"
else:
f = open(self.args.input_file)
_, file_extension = os.path.splitext(self.args.input_file)
file_extension = file_extension.replace(".", "")
return f, file_extension
def get_input_name(self):
return self.args.input_file or "stdin"
def register_all_dialects(self):
"""
Register all dialects that can be used.
Add other/additional dialects by overloading this function.
"""
for dialect in get_all_dialects():
self.ctx.register_dialect(dialect)
def register_all_frontends(self):
"""
Register all frontends that can be used.
Add other/additional frontends by overloading this function.
"""
def parse_mlir(io: IO[str]):
return Parser(
self.ctx,
io.read(),
self.get_input_name(),
).parse_module(not self.args.no_implicit_module)
self.available_frontends["mlir"] = parse_mlir
def parse_chunk(self, chunk: IO[str], file_extension: str) -> ModuleOp | None:
"""
Parse the input file by invoking the parser specified by the `parser`
argument. If not set, the parser registered for this file extension
is used.
"""
try:
return self.available_frontends[file_extension](chunk)
except ParseError as e:
if "parsing_diagnostics" in self.args and self.args.parsing_diagnostics:
print(e.with_context())
else:
raise Exception("Failed to parse:\n" + e.with_context()) from e
finally:
chunk.close() |
make app | import os
import uuid
from unittest import mock
from pyramid import testing
from kinto.core import initialization
from kinto.core.events import ACTIONS, ResourceChanged, ResourceRead
from kinto.core.listeners import ListenerBase
from kinto.core.testing import unittest
UID = str(uuid.uuid4())
class ViewSet:
def get_name(*args, **kw):
return "mushrooms"
class Service:
viewset = ViewSet()
class Match:
cornice_services = {"watev": Service()}
pattern = "watev"
class Request:
path = "/1/school/students/"
prefixed_userid = "tarek"
matchdict = {"id": UID}
registry = matched_route = Match()
current_resource_name = "school"
class ListenerSetupTest(unittest.TestCase):
def setUp(self):
demo_patch = mock.patch("tests.core.listeners.load_from_config")
self.addCleanup(demo_patch.stop)
self.demo_mocked = demo_patch.start()
def METHOD_NAME(self, extra_settings={}):
settings = {"event_listeners": "tests.core.listeners"}
settings.update(**extra_settings)
config = testing.setUp(settings=settings)
config.commit()
initialization.setup_listeners(config)
return config
def test_listener_module_is_specified_via_settings(self):
self.METHOD_NAME(
{"event_listeners": "demo", "event_listeners.demo.use": "tests.core.listeners"}
)
self.assertTrue(self.demo_mocked.called)
def test_listener_module_can_be_specified_via_listeners_list(self):
self.METHOD_NAME(
{"event_listeners": "demo", "event_listeners.demo.use": "tests.core.listeners"}
)
self.assertTrue(self.demo_mocked.called)
def test_callback_called_when_action_is_not_filtered(self):
config = self.METHOD_NAME(
{"event_listeners": "demo", "event_listeners.demo.use": "tests.core.listeners"}
)
ev = ResourceChanged({"action": ACTIONS.CREATE.value}, [], Request())
config.registry.notify(ev)
self.assertTrue(self.demo_mocked.return_value.called)
def test_callback_is_not_called_when_action_is_filtered(self):
config = self.METHOD_NAME(
{
"event_listeners": "demo",
"event_listeners.demo.use": "tests.core.listeners",
"event_listeners.demo.actions": "delete",
}
)
ev = ResourceChanged({"action": ACTIONS.CREATE.value}, [], Request())
config.registry.notify(ev)
self.assertFalse(self.demo_mocked.return_value.called)
def test_callback_called_when_resource_is_not_filtered(self):
config = self.METHOD_NAME(
{"event_listeners": "demo", "event_listeners.demo.use": "tests.core.listeners"}
)
event = ResourceChanged(
{"action": ACTIONS.CREATE.value, "resource_name": "mushroom"}, [], Request()
)
config.registry.notify(event)
self.assertTrue(self.demo_mocked.return_value.called)
def test_callback_is_not_called_when_resource_is_filtered(self):
config = self.METHOD_NAME(
{
"event_listeners": "demo",
"event_listeners.demo.use": "tests.core.listeners",
"event_listeners.demo.resources": "toad",
}
)
event = ResourceChanged(
{"action": ACTIONS.CREATE.value, "resource_name": "mushroom"}, [], Request()
)
config.registry.notify(event)
self.assertFalse(self.demo_mocked.return_value.called)
def test_callback_is_not_called_on_read_by_default(self):
config = self.METHOD_NAME(
{"event_listeners": "demo", "event_listeners.demo.use": "tests.core.listeners"}
)
event = ResourceRead({"action": ACTIONS.READ.value}, [], Request())
config.registry.notify(event)
self.assertFalse(self.demo_mocked.return_value.called)
def test_callback_is_called_on_read_if_specified(self):
config = self.METHOD_NAME(
{
"event_listeners": "demo",
"event_listeners.demo.use": "tests.core.listeners",
"event_listeners.demo.actions": "read",
}
)
event = ResourceRead({"action": ACTIONS.READ.value}, [], Request())
config.registry.notify(event)
self.assertTrue(self.demo_mocked.return_value.called)
def test_same_callback_is_called_for_read_and_write_specified(self):
config = self.METHOD_NAME(
{
"event_listeners": "demo",
"event_listeners.demo.use": "tests.core.listeners",
"event_listeners.demo.actions": "read create delete",
}
)
ev = ResourceRead({"action": ACTIONS.READ.value}, [], Request())
config.registry.notify(ev)
ev = ResourceChanged({"action": ACTIONS.CREATE.value}, [], Request())
config.registry.notify(ev)
self.assertEqual(self.demo_mocked.return_value.call_count, 2)
def test_loading_can_read_configuration_from_environment(self):
environ = {
"KINTO_EVENT_LISTENERS": "kvstore",
"KINTO_EVENT_LISTENERS_KVSTORE_USE": "tests.core.listeners",
"KINTO_EVENT_LISTENERS_KVSTORE_URL": "demo://demo:6379/0",
"KINTO_EVENT_LISTENERS_KVSTORE_POOL_SIZE": "5",
"KINTO_EVENT_LISTENERS_KVSTORE_LISTNAME": "queue",
"KINTO_EVENT_LISTENERS_KVSTORE_ACTIONS": "delete",
"KINTO_EVENT_LISTENERS_KVSTORE_RESOURCES": "toad",
}
os.environ.update(**environ)
config = self.METHOD_NAME(
{
# With real/full initialization, these should not be necessary:
"settings_prefix": "kinto",
"event_listeners": "kvstore",
}
)
# Listener is instantiated.
self.assertTrue(self.demo_mocked.called)
# Action filtering is read from ENV.
event = ResourceChanged(
{"action": ACTIONS.DELETE.value, "resource_name": "toad"}, [], Request()
)
config.registry.notify(event)
self.assertTrue(self.demo_mocked.return_value.called)
self.demo_mocked.reset_mock()
# Action filtering is read from ENV.
event = ResourceChanged({"action": ACTIONS.CREATE.value}, [], Request())
config.registry.notify(event)
self.assertFalse(self.demo_mocked.return_value.called)
# Resource filtering is read from ENV.
event = ResourceChanged(
{"action": ACTIONS.CREATE.value, "resource_name": "mushroom"}, [], Request()
)
config.registry.notify(event)
self.assertFalse(self.demo_mocked.return_value.called)
# Clean-up.
for k in environ.keys():
os.environ.pop(k)
class ListenerBaseTest(unittest.TestCase):
def test_not_implemented(self):
# make sure we can't use the base listener
listener = ListenerBase()
self.assertRaises(NotImplementedError, listener, object()) |
restore backup from xml file | # coding: utf-8
from datetime import datetime
import errno
import os
import shutil
import sys
import tempfile
import zipfile
from time import sleep
from onadata.apps.logger.import_tools import django_file
from onadata.apps.logger.models import Instance
from onadata.libs.utils.logger_tools import create_instance
from onadata.libs.utils.model_tools import queryset_iterator
DATE_FORMAT = "%Y-%m-%d-%H-%M-%S"
def _date_created_from_filename(filename):
base_name, ext = os.path.splitext(filename)
parts = base_name.split("-")
if len(parts) < 6:
raise ValueError(
"Inavlid filename - it must be in the form"
" 'YYYY-MM-DD-HH-MM-SS[-i].xml'")
parts_dict = dict(
zip(["year", "month", "day", "hour", "min", "sec"], parts))
return datetime.strptime(
"%(year)s-%(month)s-%(day)s-%(hour)s-%(min)s-%(sec)s" %
parts_dict, DATE_FORMAT)
def create_zip_backup(zip_output_file, user, xform=None):
# create a temp dir that we'll create our structure within and zip it
# when we are done
tmp_dir_path = tempfile.mkdtemp()
instances_path = os.path.join(tmp_dir_path, "instances")
# get the xls file from storage
# for each submission in the database - create an xml file in this
# form
# /<id_string>/YYYY/MM/DD/YYYY-MM-DD-HH-MM-SS.xml
qs = Instance.objects.filter(xform__user=user)
if xform:
qs = qs.filter(xform=xform)
num_instances = qs.count()
done = 0
sys.stdout.write("Creating XML Instances\n")
for instance in queryset_iterator(qs, 100):
# get submission time
date_time_str = instance.date_created.strftime(DATE_FORMAT)
date_parts = date_time_str.split("-")
sub_dirs = os.path.join(*date_parts[:3])
# create the directories
full_path = os.path.join(instances_path, sub_dirs)
if not os.path.exists(full_path):
try:
os.makedirs(full_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
full_xml_path = os.path.join(full_path, date_time_str + ".xml")
# check for duplicate file names
file_index = 1
while os.path.exists(full_xml_path):
full_xml_path = os.path.join(
full_path, "%s-%d.xml" % (date_time_str, file_index))
file_index += 1
# create the instance xml
with open(full_xml_path, 'wb') as f:
f.write(instance.xml.encode())
done += 1
sys.stdout.write("\r%.2f %% done" % (
float(done)/float(num_instances) * 100))
sys.stdout.flush()
sleep(0)
# write zip file
sys.stdout.write("\nWriting to ZIP arhive.\n")
zf = zipfile.ZipFile(zip_output_file, "w")
done = 0
for dir_path, dir_names, file_names in os.walk(tmp_dir_path):
for file_name in file_names:
archive_path = dir_path.replace(tmp_dir_path + os.path.sep,
"", 1)
zf.write(os.path.join(dir_path, file_name),
os.path.join(archive_path, file_name))
done += 1
sys.stdout.write("\r%.2f %% done" % (
float(done)/float(num_instances) * 100))
sys.stdout.flush()
sleep(0)
zf.close()
# removed dir tree
shutil.rmtree(tmp_dir_path)
sys.stdout.write("\nBackup saved to %s\n" % zip_output_file)
def restore_backup_from_zip(zip_file_path, username):
try:
temp_directory = tempfile.mkdtemp()
zf = zipfile.ZipFile(zip_file_path)
zf.extractall(temp_directory)
except zipfile.BadZipfile:
sys.stderr.write("Bad zip arhcive.")
else:
return restore_backup_from_path(temp_directory, username, "backup")
finally:
shutil.rmtree(temp_directory)
def METHOD_NAME(xml_instance_path, username):
# check if its a valid xml instance
file_name = os.path.basename(xml_instance_path)
xml_file = django_file(
xml_instance_path,
field_name="xml_file",
content_type="text/xml")
media_files = []
try:
date_created = _date_created_from_filename(file_name)
except ValueError as e:
sys.stderr.write(
"Couldn't determine date created from filename: '%s'\n" %
file_name)
date_created = datetime.now()
sys.stdout.write("Creating instance from '%s'\n" % file_name)
try:
create_instance(
username, xml_file, media_files,
date_created_override=date_created)
return 1
except Exception as e:
sys.stderr.write(
"Could not restore %s, create instance said: %s\n" %
(file_name, e))
return 0
def restore_backup_from_path(dir_path, username, status):
"""
Only restores xml submissions, media files are assumed to still be in
storage and will be retrieved by the filename stored within the submission
"""
num_instances = 0
num_restored = 0
for dir_path, dir_names, file_names in os.walk(dir_path):
for file_name in file_names:
# check if its a valid xml instance
xml_instance_path = os.path.join(dir_path, file_name)
num_instances += 1
num_restored += METHOD_NAME(
xml_instance_path,
username)
return num_instances, num_restored |
always iterable | import sys, os
from glob import glob
from shutil import copyfile
import fnmatch
import yaml, pprint
import subprocess
import jinja2
from itertools import *
import concurrent.futures
def ParseAruments() -> (dict, dict):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('configPath')
parser.add_argument('-n', '--dryRun', action='store_true')
parser.add_argument('-j', '--jobs', type=int, default=1)
parser.add_argument('-o', '--outDir')
parser.add_argument('-t', '--testName', help="Run specific test from config.yaml. By default will run all tests.")
args = parser.parse_args()
with open(args.configPath, "r") as f:
environment = jinja2.Environment()
template = environment.from_string(f.read())
config, tests = yaml.safe_load_all(template.render(glob=glob))
config['dryRun'] = args.dryRun
config['jobs'] = args.jobs
if args.outDir: config['outDir'] = args.outDir
config['onlyRunTestName'] = args.testName
config.setdefault('resolution', [600, 480])
return config, tests
def mkdir(path):
os.makedirs(path, exist_ok=True)
def extract(d:dict, k, default=None):
v = d.get(k, default)
if k in d: del d[k]
return v
def alwaysList(x):
if x is None: return []
if isinstance(x, list): return x
if isinstance(x, dict): return list(x.items())
return [x]
def alwaysDict(x):
if x is None: return {}
if isinstance(x, dict): return x
if isinstance(x, list):
assert len(set(x)) == len(x)
return {k:None for k in x}
return {x: None}
def METHOD_NAME(x):
if x is None: return iter(())
if isinstance(x, str): return iter((x,))
try: return iter(x)
except TypeError: return iter((x,))
def locateRelPath(path, root):
if os.path.isabs(path):
return path
return os.path.join(root, path)
def GetConfigForCompletedRun(newConfig):
if not os.path.exists(newConfig['outputLog']): return None
with open(newConfig['outputLog'], "r") as log:
lines = log.readlines()
oldConfigYaml = ''.join(lines[lines.index("=========== CONFIG ===========\n")+1:lines.index("==============================\n")])
return yaml.safe_load(oldConfigYaml)
import signal
import psutil
gotKeyboardInterrupt = False
def signal_handler(sig, frame):
global gotKeyboardInterrupt
gotKeyboardInterrupt = True
current_process = psutil.Process()
children = current_process.children(recursive=True)
for child in children:
signal.pthread_kill(child.pid, signal.SIGKILL)
print('******* Jobs cancelled *******')
signal.signal(signal.SIGINT, signal_handler)
def RunTest(config):
if os.path.exists(config['output']) and os.path.exists(config['outputLog']):
with open(config['outputLog'], "r") as log:
if "=========== TEST COMPLETE ===========" in log.read():
if config == GetConfigForCompletedRun(config):
# print("Skipping test", config['name'], config['renderer'])
return
else:
print("Config changed for", config['name'])
print("Run test", config['name'], config['renderer'])
cmd = ['python', '-u', 'run_test.py', yaml.dump(config)]
with open(config['outputLog'], "w", 1) as log:
try:
subprocess.run(cmd, stdout=log, stderr=subprocess.STDOUT, text=True)
except Exception:
pass
if gotKeyboardInterrupt:
print("Stopped", config['name'], config['renderer'])
return
print("\n=========== TEST COMPLETE ===========\n", file=log)
if not os.path.exists(config['output']):
copyfile("missing.png", config['output'])
def TestCanBeRunConcurrently(test:dict) -> bool:
return test.get('runConcurrently', True)
def GenerateTests(config, tests):
for name, test in tests.items():
files = [locateRelPath(f, config['dataRootDir']) for f in alwaysList(extract(test, 'files'))]
files = sorted(chain.from_iterable(map(glob, files)))
dataType = extract(test, 'type')
if extract(test, 'skip'):
continue
renderers:dict = alwaysDict(extract(test, 'renderers', config['rendererSets']['default']))
for k in renderers.copy():
if k in config['rendererSets']:
del renderers[k]
for r, v in alwaysDict(config['rendererSets'][k]).items():
renderers.setdefault(r, v)
for ren, renConfig in renderers.items():
if isinstance(renConfig, str):
renConfig = {'variable': renConfig}
renConfig = alwaysDict(renConfig)
variableKeys = [k for k in renConfig if 'variable' in k.lower()]
variables = [i for i in renConfig.items() if i[0] in variableKeys]
for k,_ in variables:
del renConfig[k]
varCombos = [*product(*[[*zip(repeat(k), alwaysList(v))] for k,v in variables])]
for combo in varCombos:
fullName = f"{name}-{ren}"
if len(varCombos) > 1:
fullName += '-'.join(['', *chain.from_iterable(combo)])
fName = fullName + ".png"
fName = fName.replace('/', '-')
yield {
'name': name,
'outputDir': config['outDir'],
'outputName': fName,
'output': f"{config['outDir']}/{fName}",
'outputSession': f"{config['outDir']}/meta/{fName}.vs3",
'outputLog': f"{config['outDir']}/meta/{fName}.log",
'files': files,
'type': dataType,
'renderer': ren,
'dryRun': config['dryRun'],
'resolution': config['resolution'],
**dict(combo),
**renConfig,
**test,
}
if __name__ == "__main__":
config, testConfigs = ParseAruments()
# print("config = ", end='')
# pprint.PrettyPrinter(indent=4).pprint(config)
mkdir(config['outDir'])
mkdir(f"{config['outDir']}/meta")
tests = GenerateTests(config, testConfigs)
if config['onlyRunTestName']:
tests = filter(lambda t: fnmatch.fnmatch(t['name'], config['onlyRunTestName']), tests)
tests = list(tests)
nJobs = config.get('jobs', 1)
if nJobs > 1:
concurrentTests = filter(TestCanBeRunConcurrently, tests)
singleTests = filter(lambda t: not TestCanBeRunConcurrently(t), tests)
else:
singleTests = tests
concurrentTests = []
if concurrentTests:
print(f"Running concurrent tests with {nJobs} jobs")
with concurrent.futures.ThreadPoolExecutor(max_workers=nJobs) as executor:
executor.map(RunTest, concurrentTests)
if singleTests:
print(f"Running single-threaded tests")
for test in singleTests:
RunTest(test) |
eval func | import os
import time
import argparse
from datasets import load_dataset
from transformers import WhisperForConditionalGeneration, WhisperProcessor
import torch
from evaluate import load
parser = argparse.ArgumentParser()
parser.add_argument('--int8', dest='int8', action='store_true')
parser.add_argument('--tune', dest='tune', action='store_true',
help='tune best int8 model with Neural Compressor on calibration dataset')
parser.add_argument('--accuracy_only', dest='accuracy_only', action='store_true',
help='run accuracy_only')
parser.add_argument('--benchmark', dest='benchmark', action='store_true',
help='run benchmark')
parser.add_argument('--batch_size', default=1, type=int,
help='For accuracy measurement only.')
parser.add_argument('--iters', default=0, type=int,
help='For accuracy measurement only.')
parser.add_argument('--warmup_iters', default=5, type=int,
help='For benchmark measurement only.')
parser.add_argument('--output_dir', default="saved_results", type=str,
help='the folder path to save the results.')
parser.add_argument('--cache_dir', default=None, type=str,
help='the folder path to save the results.')
args = parser.parse_args()
model_name = 'openai/whisper-large'
processor = WhisperProcessor.from_pretrained(model_name)
model = WhisperForConditionalGeneration.from_pretrained(model_name)
# dataset
librispeech_test_clean = load_dataset("librispeech_asr", "clean", split="test", cache_dir=args.cache_dir)
# metric
wer = load("wer")
def METHOD_NAME(model):
predictions = []
references = []
for batch in librispeech_test_clean:
audio = batch["audio"]
input_features = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt").input_features
reference = processor.tokenizer._normalize(batch['text'])
references.append(reference)
with torch.no_grad():
predicted_ids = model.generate(input_features)[0]
transcription = processor.decode(predicted_ids)
prediction = processor.tokenizer._normalize(transcription)
predictions.append(prediction)
wer_result = wer.compute(references=references, predictions=predictions)
print(f"Result wer: {wer_result * 100}")
accuracy = 1 - wer_result
print("Accuracy: %.5f" % accuracy)
return accuracy
if args.tune:
from neural_compressor import PostTrainingQuantConfig, quantization
op_type_dict = {
"Embedding": {"weight": {"dtype": ["fp32"]}, "activation": {"dtype": ["fp32"]}}
}
conf = PostTrainingQuantConfig(approach="dynamic", op_type_dict=op_type_dict)
q_model = quantization.fit(model,
conf=conf,
METHOD_NAME=METHOD_NAME)
q_model.save(args.output_dir)
exit(0)
#benchmark
if args.int8:
from neural_compressor.utils.pytorch import load
model = load(
os.path.abspath(os.path.expanduser(args.output_dir)), model)
if args.accuracy_only:
METHOD_NAME(model)
exit(0)
if args.benchmark:
from neural_compressor.config import BenchmarkConfig
from neural_compressor import benchmark
def b_func(model):
total_time = 0
for i, batch in enumerate(librispeech_test_clean):
if i > args.iters:
break
audio = batch["audio"]
input_features = processor(audio["array"], sampling_rate=audio["sampling_rate"], return_tensors="pt").input_features
tic = time.time()
with torch.no_grad():
predicted_ids = model.generate(input_features)[0]
toc = time.time()
if i >= args.warmup_iters:
total_time += (toc - tic)
latency = total_time / (args.iters - args.warmup_iters)
print('Latency: %.3f ms' % (latency * 1000))
print('Throughput: %.3f images/sec' % (args.batch_size / latency))
print('Batch size = %d' % args.batch_size)
b_conf = BenchmarkConfig(
cores_per_instance=4,
num_of_instance=1
)
benchmark.fit(model, b_conf, b_func=b_func)
exit(0) |
test json gather array | import csv
import json
from io import StringIO
from urllib import parse
import numpy as np
from webtest import TestApp
import tangos
import tangos.testing.simulation_generator
import tangos.web
from tangos import testing
def setup_module():
testing.init_blank_db_for_testing()
creator = tangos.testing.simulation_generator.SimulationGeneratorForTests()
halo_offset = 0
for ts in range(1,4):
creator.add_timestep()
creator.add_objects_to_timestep(4)
creator.link_last_halos()
tangos.get_default_session().commit()
tangos.get_item("sim/ts1/halo_1")['test_image'] = np.zeros((500,500,3))
for ts in tangos.get_simulation(1).timesteps:
for h in ts.halos:
h['test_value'] = 1.0
tangos.get_default_session().commit()
creator.add_timestep() # add ts 4
creator.add_objects_to_timestep(3)
tangos.get_default_session().commit()
# now make the object have halo numbers which have a different ordering to the database IDs
tangos.get_item("sim/ts4/1")['test_value'] = 1.0
tangos.get_item("sim/ts4/2")['test_value'] = 2.0
tangos.get_item("sim/ts4/3")['test_value'] = 3.0
tangos.get_item("sim/ts4/1").halo_number = 10
tangos.get_default_session().commit()
creator = tangos.testing.simulation_generator.SimulationGeneratorForTests("simname/has/slashes")
creator.add_timestep()
creator.add_objects_to_timestep(1)
creator.add_timestep()
creator.add_objects_to_timestep(1)
creator.link_last_halos()
tangos.get_simulation(2).timesteps[0].halos[0]['test_value'] = 2.0
tangos.get_default_session().commit()
global app
app = TestApp(tangos.web.main({}))
def teardown_module():
tangos.core.close_db()
def test_root_page():
response = app.get("/")
assert response.status_int==200
assert "table" in response
def test_simulation_page():
response = app.get("/sim")
assert response.status_int==200
assert "table" in response
def test_timestep_page():
response = app.get("/sim/ts1")
assert response.status_int==200
assert "table" in response
def test_halo_page():
response = app.get("/sim/ts1/halo_1")
assert response.status_int == 200
assert "table" in response
def test_plot():
response = app.get("/sim/ts1/halo_1/t()/vs/z().png")
assert response.status_int == 200
assert response.content_type == 'image/png'
def test_plot_as_csv_timeseries():
response = app.get("/sim/ts3/halo_1/test_value/vs/z().csv")
assert response.status_int == 200
assert response.content_type == 'text/csv'
assert "filename=timeseries_test_value_vs_z().csv" in response.content_disposition
csv_reader = csv.reader(StringIO(response.body.decode('utf-8')))
csv_rows = list(csv_reader)
assert csv_rows[0]==['test_value', 'z()']
assert csv_rows[1]==['1.0','6.0']
assert csv_rows[2]==['1.0', '7.0']
assert csv_rows[3]==['1.0', '8.0']
def test_plot_as_csv_timestep():
response = app.get("/sim/ts3/test_value/vs/halo_number().csv")
assert response.status_int == 200
assert response.content_type == 'text/csv'
assert "filename=timestep_test_value_vs_halo_number().csv" in response.content_disposition
csv_reader = csv.reader(StringIO(response.body.decode('utf-8')))
csv_rows = list(csv_reader)
assert csv_rows[0]==['test_value', 'halo_number()']
assert csv_rows[1]==['1.0','1.0']
assert csv_rows[2] == ['1.0', '2.0']
assert csv_rows[3] == ['1.0', '3.0']
assert csv_rows[4] == ['1.0', '4.0']
def test_image_plot():
response = app.get("/sim/ts1/halo_1/test_image.png")
assert response.status_int == 200
assert response.content_type == 'image/png'
def test_json_gather_float():
response = app.get("/sim/ts1/gather/halo/test_value.json")
assert response.content_type == 'application/json'
assert response.status_int == 200
result = json.loads(response.body.decode('utf-8'))
assert result['timestep']=='ts1'
assert result['data_formatted']==["1.00", "1.00", "1.00", "1.00"]
assert result['is_number'] is True
assert result['is_boolean'] is False
assert result['is_array'] is False
def METHOD_NAME():
response = app.get("/sim/ts1/gather/halo/test_image.json")
assert response.content_type == 'application/json'
assert response.status_int == 200
result = json.loads(response.body.decode('utf-8'))
assert result['timestep']=='ts1'
assert result['data_formatted'][0]=="Array"
assert result['is_number'] is False
assert result['is_boolean'] is False
assert result['is_array'] is True
def test_json_gather_bool():
response = app.get("/sim/ts1/gather/halo/has_property(test_image).json")
assert response.content_type == 'application/json'
assert response.status_int == 200
result = json.loads(response.body.decode('utf-8'))
assert result['timestep'] == 'ts1'
assert result['data_formatted'] == ["True", "False", "False", "False"]
assert result['is_number'] is False
assert result['is_boolean'] is True
assert result['is_array'] is False
def test_simulation_with_slash():
response = app.get("/")
assert "simname/has/slashes" in response
simpage_response = response.click("simname/has/slashes")
assert "Simulation: simname/has/slashes" in simpage_response
ts_response = simpage_response.click("Go", index=1)
assert "Timestep: ts1" in ts_response
# Unfortunately timestep page is now generated in javascript, so we can
# no longer test ts_response.click("Go")
halo_response = app.get("/simname_has_slashes/ts1/1")
assert "halo 1 of ts1" in halo_response
calculate_url = halo_response.pyquery("#calculate_url").text()
calculate_url = parse.unquote(calculate_url)
assert "simname_has_slashes" in calculate_url
halo_next_step_response = halo_response.click(r"\+1$").follow()
assert "halo 1 of ts2" in halo_next_step_response
def test_ordering_as_expected():
"""Tests for an issue where data returned to the web interface was in a different order to the initial table,
causing results to be displayed out of order"""
assert (tangos.get_item("sim/ts4").calculate_all("halo_number()") == np.array([10,2,3])).all()
assert (tangos.get_item("sim/ts4").calculate_all("halo_number()",order_by_halo_number=True) == np.array([2,3,10])).all()
response = app.get("/sim/ts4/gather/halo/test_value.json")
assert response.content_type == 'application/json'
assert response.status_int == 200
result = json.loads(response.body.decode('utf-8'))
assert result['timestep'] == 'ts4'
assert result['data_formatted'] == ["2.00", "3.00", "1.00"] |
test duplicated params | # Owner(s): ["module: inductor"]
import copy
import functools
import unittest
import torch
import torch._export
import torch._inductor
import torch.fx._pytree as fx_pytree
from torch._dynamo.testing import same
from torch.testing._internal.common_utils import IS_FBCODE, TEST_WITH_ROCM, TestCase
from torch.testing._internal.inductor_utils import HAS_CUDA
from torch.utils import _pytree as pytree
aten = torch.ops.aten
requires_cuda = functools.partial(unittest.skipIf, not HAS_CUDA, "requires cuda")
class AOTInductorModelRunner:
@classmethod
def load(cls, model, example_inputs, example_outputs, options=None):
# AOTInductorModel relies on the caller to pass in output_tensors,
# so we need to explicitly allocate output tensors here.
output_tensors = []
example_outputs, output_spec = pytree.tree_flatten(example_outputs)
for output in example_outputs:
output_tensors.append(torch.empty_like(output))
# The exact API is subject to change
so_path, exported = torch._export.aot_compile(
model,
example_inputs,
options=options,
)
# Use a utility function for easier testing
source = """
#include <torch/csrc/inductor/aot_runtime/model.h>
torch::aot_inductor::AOTInductorModel model;
void run(
const std::vector<at::Tensor>& input_tensors,
std::vector<at::Tensor>& output_tensors) {
model.run(input_tensors, output_tensors, at::cuda::getCurrentCUDAStream());
}
"""
optimized = torch.utils.cpp_extension.load_inline(
name="aot_inductor",
cpp_sources=[source],
functions=["run"],
extra_ldflags=[so_path],
with_cuda=True,
).run
return optimized, exported, output_tensors, output_spec
@classmethod
def run(cls, model, example_inputs, example_outputs, options=None):
example_outputs = copy.deepcopy(example_outputs)
optimized, exported, output_tensors, output_spec = AOTInductorModelRunner.load(
model, example_inputs, example_outputs, options
)
param_buffer_values = list(exported.state_dict.values())
flat_example_inputs = fx_pytree.tree_flatten_spec(
example_inputs, exported.call_spec.in_spec
)
all_args = (*param_buffer_values, *flat_example_inputs)
optimized(all_args, output_tensors)
return pytree.tree_unflatten(output_tensors, output_spec)
class AotInductorTests(TestCase):
def test_simple(self):
class Repro(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.randn(10, 10, device="cuda")
def forward(self, x, y):
return x + torch.nn.functional.linear(y, self.weight)
model = Repro()
example_inputs = (
torch.randn(10, 10, device="cuda"),
torch.randn(10, 10, device="cuda"),
)
expected = model(*example_inputs)
actual = AOTInductorModelRunner.run(model, example_inputs, expected)
self.assertTrue(same(actual, expected))
def test_missing_output(self):
class Repro(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
a = torch.sin(x)
b = torch.mm(a, y)
c = torch.cos(b)
return c
model = Repro()
example_inputs = (
torch.randn(10, 10, device="cuda"),
torch.randn(10, 10, device="cuda"),
)
expected = model(*example_inputs)
actual = AOTInductorModelRunner.run(model, example_inputs, expected)
self.assertTrue(same(actual, expected))
def test_output_misaligned(self):
class Repro(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
x_unsqueeze = torch.unsqueeze(x, dim=0)
y_unsqueeze = torch.unsqueeze(y, dim=0)
cat = torch.cat([x_unsqueeze, y_unsqueeze], dim=0)
x_getitem = cat[0]
y_getitem = cat[1]
x_sigmoid = torch.sigmoid(x_getitem)
return x_sigmoid, y_getitem
model = Repro()
example_inputs = (
torch.randn(10, 10, device="cuda"),
torch.randn(10, 10, device="cuda"),
)
expected = model(*example_inputs)
actual = AOTInductorModelRunner.run(model, example_inputs, expected)
self.assertTrue(same(actual, expected))
def test_dynamic_smem_above_default_limit(self):
class Repro(torch.nn.Module):
def forward(self, x, y):
return x @ y
model = Repro()
# on A100, the generated Triton kernel for this MM
# requires 55296 bytes of dynamic SMEM which is above
# the A100's default dynamic SMEM limit of 49152 bytes.
example_inputs = (
torch.randn(10285, 96, device="cuda"),
torch.randn(96, 1, device="cuda"),
)
expected = model(*example_inputs)
actual = AOTInductorModelRunner.run(
model,
example_inputs,
expected,
options={
"max_autotune": True,
"max_autotune_gemm_backends": "TRITON",
},
)
self.assertTrue(same(actual, expected))
def test_addmm(self):
class Model(torch.nn.Module):
def __init__(self, n, k):
super().__init__()
self.weight = torch.randn(n, k, device="cuda")
self.bias = torch.randn(n, device="cuda")
def forward(self, a):
return torch.nn.functional.linear(a, self.weight, self.bias)
M = 8
N = 6
K = 16
model = Model(N, K)
batch = 2
a = torch.randn(batch, M, K, device="cuda")
example_inputs = (a,)
expected = model(*example_inputs)
actual = AOTInductorModelRunner.run(model, example_inputs, expected)
self.assertTrue(same(actual, expected))
def METHOD_NAME(self):
class Model(torch.nn.Module):
def __init__(self):
super().__init__()
self.p = torch.nn.Parameter(torch.rand(6))
self.q = self.p
def forward(self, x):
return self.p * x + self.q
model = Model()
example_inputs = (torch.rand(6),)
expected = model(*example_inputs)
actual = torch._export.export(model, example_inputs)(*example_inputs)
self.assertTrue(same(actual, expected))
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
# cpp_extension N/A in fbcode
if HAS_CUDA and not TEST_WITH_ROCM and not IS_FBCODE:
run_tests(needs="filelock") |
test link only | import os
import pandas as pd
import pyspark.sql.functions as f
import pytest
from pyspark.sql.types import StringType, StructField, StructType
import splink.spark.comparison_level_library as cll
import splink.spark.comparison_library as cl
from splink.spark.linker import SparkLinker
from .basic_settings import get_settings_dict, name_comparison
from .decorator import mark_with_dialects_including
from .linker_utils import (
_test_write_functionality,
register_roc_data,
)
def test_full_example_spark(df_spark, tmp_path):
# Annoyingly, this needs an independent linker as csv doesn't
# accept arrays as inputs, which we are adding to df_spark below
linker = SparkLinker(df_spark, get_settings_dict())
# Test that writing to files works as expected
def spark_csv_read(x):
return linker.spark.read.csv(x, header=True).toPandas()
_test_write_functionality(linker, spark_csv_read)
# Convert a column to an array to enable testing intersection
df_spark = df_spark.withColumn("email", f.array("email"))
settings_dict = get_settings_dict()
# Only needed because the value can be overwritten by other tests
settings_dict["comparisons"][1] = cl.exact_match("surname")
settings_dict["comparisons"].append(name_comparison(cll, "surname"))
settings = {
"probability_two_random_records_match": 0.01,
"link_type": "dedupe_only",
"blocking_rules_to_generate_predictions": [
{"blocking_rule": "l.surname = r.surname", "salting_partitions": 3},
],
"comparisons": [
cl.jaro_winkler_at_thresholds("first_name", 0.9),
cl.jaro_at_thresholds("surname", 0.9),
cl.damerau_levenshtein_at_thresholds("dob", 2),
{
"comparison_levels": [
cll.array_intersect_level("email"),
cll.else_level(),
]
},
cl.jaccard_at_thresholds("city", [0.9]),
],
"retain_matching_columns": True,
"retain_intermediate_calculation_columns": True,
"additional_columns_to_retain": ["group"],
"em_convergence": 0.01,
"max_iterations": 2,
}
linker = SparkLinker(
df_spark,
settings,
break_lineage_method="checkpoint",
num_partitions_on_repartition=2,
)
linker.profile_columns(
["first_name", "surname", "first_name || surname", "concat(city, first_name)"]
)
linker.compute_tf_table("city")
linker.compute_tf_table("first_name")
linker.estimate_probability_two_random_records_match(
["l.email = r.email"], recall=0.3
)
linker.estimate_u_using_random_sampling(max_pairs=1e5, seed=1)
blocking_rule = "l.first_name = r.first_name and l.surname = r.surname"
linker.estimate_parameters_using_expectation_maximisation(blocking_rule)
blocking_rule = "l.dob = r.dob"
linker.estimate_parameters_using_expectation_maximisation(blocking_rule)
df_predict = linker.predict()
linker.comparison_viewer_dashboard(
df_predict, os.path.join(tmp_path, "test_scv_spark.html"), True, 2
)
df_clusters = linker.cluster_pairwise_predictions_at_threshold(df_predict, 0.2)
linker.cluster_studio_dashboard(
df_predict,
df_clusters,
cluster_ids=[0, 4],
cluster_names=["cluster_0", "cluster_4"],
out_path=os.path.join(tmp_path, "test_cluster_studio.html"),
)
linker.unlinkables_chart(source_dataset="Testing")
# Test that writing to files works as expected
# spark_csv_read = lambda x: linker.spark.read.csv(x, header=True).toPandas()
# _test_write_functionality(linker, spark_csv_read)
# Check spark tables are being registered correctly
StructType(
[
StructField("firstname", StringType(), True),
StructField("lastname", StringType(), True),
]
)
register_roc_data(linker)
linker.roc_chart_from_labels_table("labels")
linker.accuracy_chart_from_labels_table("labels")
linker.confusion_matrix_from_labels_table("labels")
record = {
"unique_id": 1,
"first_name": "John",
"surname": "Smith",
"dob": "1971-05-24",
"city": "London",
"email": ["[email protected]"],
"group": 10000,
}
linker.find_matches_to_new_records(
[record], blocking_rules=[], match_weight_threshold=-10000
)
# Test differing inputs are accepted
settings["link_type"] = "link_only"
linker = SparkLinker(
[df_spark, df_spark.toPandas()],
settings,
break_lineage_method="checkpoint",
num_partitions_on_repartition=2,
)
# Test saving and loading
path = os.path.join(tmp_path, "model.json")
linker.save_model_to_json(path)
linker_2 = SparkLinker(df_spark)
linker_2.load_model(path)
linker_2.load_settings(path)
linker_2.load_settings_from_json(path)
SparkLinker(df_spark, settings_dict=path)
def METHOD_NAME(df_spark):
settings = get_settings_dict()
settings["link_type"] = "link_only"
settings["source_dataset_column_name"] = "source_dataset"
df_spark_a = df_spark.withColumn("source_dataset", f.lit("my_left_ds"))
df_spark_b = df_spark.withColumn("source_dataset", f.lit("my_right_ds"))
linker = SparkLinker(
[df_spark_a, df_spark_b],
settings,
break_lineage_method="checkpoint",
num_partitions_on_repartition=2,
)
df_predict = linker.predict().as_pandas_dataframe()
assert len(df_predict) == 7257
assert set(df_predict.source_dataset_l.values) == {"my_left_ds"}
assert set(df_predict.source_dataset_r.values) == {"my_right_ds"}
@pytest.mark.parametrize(
("df"),
[
pytest.param(
pd.read_csv("./tests/datasets/fake_1000_from_splink_demos.csv"),
id="Spark load from pandas df",
)
],
)
@mark_with_dialects_including("spark")
def test_spark_load_from_file(df, spark):
settings = get_settings_dict()
linker = SparkLinker(
df,
settings,
spark=spark,
)
assert len(linker.predict().as_pandas_dataframe()) == 3167 |
test storage mock api | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import tempfile
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
from .... import oscar as mo
from .... import tensor as mt
from ....core import tile
from ....serialization import AioDeserializer, AioSerializer
from ....storage import StorageLevel
from ....tests.core import require_ray
from ....utils import get_next_port, lazy_import
from ...cluster import MockClusterAPI
from ...meta import MockMetaAPI
from ...session import MockSessionAPI
from ...web import WebActor
from ..api import MockStorageAPI, WebStorageAPI
ray = lazy_import("ray")
vineyard = lazy_import("vineyard")
require_lib = lambda x: x
storage_configs = []
# plasma backend
plasma_storage_size = 10 * 1024 * 1024
if sys.platform == "darwin":
plasma_dir = "/tmp"
else:
plasma_dir = "/dev/shm"
plasma_setup_params = dict(
store_memory=plasma_storage_size, plasma_directory=plasma_dir, check_dir_size=False
)
if not (sys.platform.lower().startswith("win") or pa.__version__ >= "12.0.0"):
storage_configs.append({"plasma": plasma_setup_params})
# ray backend
if ray is not None:
require_lib = require_ray
storage_configs.append({"ray": dict()})
# vineyard
if vineyard is not None:
storage_configs.append({"vineyard": dict(vineyard_size="256M")})
# shared_memory
storage_configs.append({"shared_memory": dict()})
@pytest.mark.asyncio
@pytest.mark.parametrize("storage_configs", storage_configs)
@pytest.mark.parametrize(
"ray_start_regular", [{"enable": ray is not None}], indirect=True
)
@require_lib
async def METHOD_NAME(ray_start_regular, storage_configs):
start_method = "fork" if sys.platform != "win32" else None
pool = await mo.create_actor_pool(
"127.0.0.1",
2,
labels=["main", "numa-0", "io"],
subprocess_start_method=start_method,
)
async with pool:
session_id = "mock_session_id"
storage_api = await MockStorageAPI.create(
address=pool.external_address,
session_id=session_id,
storage_configs=storage_configs,
)
# test put and get
value1 = np.random.rand(10, 10)
await storage_api.put("data1", value1)
get_value1 = await storage_api.get("data1")
np.testing.assert_array_equal(value1, get_value1)
value2 = pd.DataFrame(
{
"col1": [str(i) for i in range(10)],
"col2": np.random.randint(0, 100, (10,)),
}
)
await storage_api.put("data2", value2)
get_value2 = await storage_api.get("data2")
pd.testing.assert_frame_equal(value2, get_value2)
sliced_value = await storage_api.get(
"data2", conditions=[slice(3, 5), slice(None, None)]
)
pd.testing.assert_frame_equal(value2.iloc[3:5, :], sliced_value)
infos = await storage_api.get_infos("data2")
assert infos[0].store_size > 0
await storage_api.delete("data2")
buffers = await AioSerializer(value2).run()
size = sum(getattr(buf, "nbytes", len(buf)) for buf in buffers)
# test open_reader and open_writer
writer = await storage_api.open_writer("write_key", size, StorageLevel.MEMORY)
async with writer:
for buf in buffers:
await writer.write(buf)
reader = await storage_api.open_reader("write_key")
async with reader:
read_value = await AioDeserializer(reader).run()
pd.testing.assert_frame_equal(value2, read_value)
await MockStorageAPI.cleanup(pool.external_address)
@pytest.mark.asyncio
async def test_web_storage_api():
from ..api.web import StorageWebAPIHandler
tempdir = tempfile.mkdtemp()
start_method = "fork" if sys.platform != "win32" else None
pool = await mo.create_actor_pool(
"127.0.0.1", 1, subprocess_start_method=start_method
)
async with pool:
session_id = "mock_session_id"
await MockClusterAPI.create(address=pool.external_address)
await MockSessionAPI.create(
session_id=session_id, address=pool.external_address
)
meta_api = await MockMetaAPI.create(
session_id=session_id, address=pool.external_address
)
await MockStorageAPI.create(
address=pool.external_address,
session_id=session_id,
storage_configs={
"shared_memory": dict(),
"disk": dict(root_dirs=[tempdir]),
},
)
web_config = {
"port": get_next_port(),
"web_handlers": {
StorageWebAPIHandler.get_root_pattern(): StorageWebAPIHandler
},
}
await mo.create_actor(WebActor, web_config, address=pool.external_address)
web_storage_api = WebStorageAPI(
session_id, f'http://127.0.0.1:{web_config["port"]}', "numa-0"
)
value = np.random.rand(10, 10)
t = mt.random.rand(10, 10)
t = tile(t)
await meta_api.set_chunk_meta(
t.chunks[0], bands=[(pool.external_address, "numa-0")]
)
await web_storage_api.put(t.chunks[0].key, value)
ret_value = await web_storage_api.get(t.chunks[0].key)
np.testing.assert_array_equal(value, ret_value)
sliced_value = await web_storage_api.get(
t.chunks[0].key, conditions=[slice(3, 5), slice(None, None)]
)
np.testing.assert_array_equal(value[3:5, :], sliced_value)
infos = await web_storage_api.get_infos(t.chunks[0].key)
assert len(infos) == 1
assert infos[0].level == StorageLevel.MEMORY
assert infos[0].memory_size == t.chunks[0].nbytes
await MockStorageAPI.cleanup(pool.external_address)
await MockClusterAPI.cleanup(pool.external_address) |
state bar | from logging import DEBUG
import pytest
from mitmproxy.proxy import commands
from mitmproxy.proxy import events
from mitmproxy.proxy import layer
from mitmproxy.proxy.context import Context
from test.mitmproxy.proxy import tutils
class TestLayer:
def test_continue(self, tctx: Context):
class TLayer(layer.Layer):
def _handle_event(
self, event: events.Event
) -> layer.CommandGenerator[None]:
yield commands.OpenConnection(self.context.server)
yield commands.OpenConnection(self.context.server)
assert (
tutils.Playbook(TLayer(tctx))
<< commands.OpenConnection(tctx.server)
>> tutils.reply(None)
<< commands.OpenConnection(tctx.server)
>> tutils.reply(None)
)
def test_debug_messages(self, tctx: Context):
tctx.server.id = "serverid"
class TLayer(layer.Layer):
debug = " "
def _handle_event(
self, event: events.Event
) -> layer.CommandGenerator[None]:
yield from self.state(event)
def state_foo(self, event: events.Event) -> layer.CommandGenerator[None]:
assert isinstance(event, events.Start)
yield commands.OpenConnection(self.context.server)
self.state = self.METHOD_NAME
state = state_foo
def METHOD_NAME(self, event: events.Event) -> layer.CommandGenerator[None]:
assert isinstance(event, events.DataReceived)
yield commands.Log("baz")
tlayer = TLayer(tctx)
assert (
tutils.Playbook(tlayer, hooks=True, logs=True)
<< commands.Log(" >> Start({})", DEBUG)
<< commands.Log(
" << OpenConnection({'connection': Server({'id': '…rverid', 'address': None})})",
DEBUG,
)
<< commands.OpenConnection(tctx.server)
>> events.DataReceived(tctx.client, b"foo")
<< commands.Log(" >! DataReceived(client, b'foo')", DEBUG)
>> tutils.reply(None, to=-3)
<< commands.Log(
" >> Reply(OpenConnection({'connection': Server({'id': '…rverid', 'address': None, "
"'state': <ConnectionState.OPEN: 3>, 'timestamp_start': 1624544785})}), None)",
DEBUG,
)
<< commands.Log(" !> DataReceived(client, b'foo')", DEBUG)
<< commands.Log("baz")
)
assert repr(tlayer) == "TLayer(state: bar)"
def test_debug_shorten(self, tctx):
t = layer.Layer(tctx)
t.debug = " "
assert t._Layer__debug("x" * 600).message == " " + "x" * 512 + "…"
assert t._Layer__debug("x" * 600).message == " " + "x" * 256 + "…"
assert t._Layer__debug("foo").message == " foo"
class TestNextLayer:
def test_simple(self, tctx: Context):
nl = layer.NextLayer(tctx, ask_on_start=True)
nl.debug = " "
playbook = tutils.Playbook(nl, hooks=True)
assert (
playbook
<< layer.NextLayerHook(nl)
>> tutils.reply()
>> events.DataReceived(tctx.client, b"foo")
<< layer.NextLayerHook(nl)
>> tutils.reply()
>> events.DataReceived(tctx.client, b"bar")
<< layer.NextLayerHook(nl)
)
assert nl.data_client() == b"foobar"
assert nl.data_server() == b""
nl.layer = tutils.EchoLayer(tctx)
assert (
playbook
>> tutils.reply()
<< commands.SendData(tctx.client, b"foo")
<< commands.SendData(tctx.client, b"bar")
)
def test_late_hook_reply(self, tctx: Context):
"""
Properly handle case where we receive an additional event while we are waiting for
a reply from the proxy core.
"""
nl = layer.NextLayer(tctx)
playbook = tutils.Playbook(nl)
assert (
playbook
>> events.DataReceived(tctx.client, b"foo")
<< layer.NextLayerHook(nl)
>> events.DataReceived(tctx.client, b"bar")
)
assert nl.data_client() == b"foo" # "bar" is paused.
nl.layer = tutils.EchoLayer(tctx)
assert (
playbook
>> tutils.reply(to=-2)
<< commands.SendData(tctx.client, b"foo")
<< commands.SendData(tctx.client, b"bar")
)
@pytest.mark.parametrize("layer_found", [True, False])
def test_receive_close(self, tctx: Context, layer_found: bool):
"""Test that we abort a client connection which has disconnected without any layer being found."""
nl = layer.NextLayer(tctx)
playbook = tutils.Playbook(nl)
assert (
playbook
>> events.DataReceived(tctx.client, b"foo")
<< layer.NextLayerHook(nl)
>> events.ConnectionClosed(tctx.client)
)
if layer_found:
nl.layer = tutils.RecordLayer(tctx)
assert playbook >> tutils.reply(to=-2)
assert isinstance(nl.layer.event_log[-1], events.ConnectionClosed)
else:
assert (
playbook
>> tutils.reply(to=-2)
<< commands.CloseConnection(tctx.client)
<< None
)
def test_func_references(self, tctx: Context):
nl = layer.NextLayer(tctx)
playbook = tutils.Playbook(nl)
assert (
playbook
>> events.DataReceived(tctx.client, b"foo")
<< layer.NextLayerHook(nl)
)
nl.layer = tutils.EchoLayer(tctx)
handle = nl.handle_event
playbook >> tutils.reply()
playbook << commands.SendData(tctx.client, b"foo")
assert playbook
(sd,) = handle(events.DataReceived(tctx.client, b"bar"))
assert isinstance(sd, commands.SendData)
def test_repr(self, tctx: Context):
nl = layer.NextLayer(tctx)
nl.layer = tutils.EchoLayer(tctx)
assert repr(nl)
assert nl.stack_pos
assert nl.layer.stack_pos |
seeds | import pytest
from dbt.tests.util import run_dbt
models__sample_model_sql = """
select * from {{ source("raw", "sample_seed") }}
"""
models__schema_yml = """
version: 2
sources:
- name: raw
database: "{{ target.database }}"
schema: "{{ target.schema }}"
tables:
- name: sample_seed
columns:
- name: email
tests:
- not_null:
severity: "{{ 'error' if var('strict', false) else 'warn' }}"
models:
- name: sample_model
columns:
- name: email
tests:
- not_null:
severity: "{{ 'error' if var('strict', false) else 'warn' }}"
"""
seeds__sample_seed_csv = """id,first_name,last_name,email,gender,ip_address,updated_at
1,Judith,Kennedy,[email protected],Female,54.60.24.128,2015-12-24 12:19:28
2,Arthur,Kelly,[email protected],Male,62.56.24.215,2015-10-28 16:22:15
3,Rachel,Moreno,[email protected],Female,31.222.249.23,2016-04-05 02:05:30
4,Ralph,Turner,[email protected],Male,157.83.76.114,2016-08-08 00:06:51
5,Laura,Gonzales,[email protected],Female,30.54.105.168,2016-09-01 08:25:38
6,Katherine,Lopez,null,Female,169.138.46.89,2016-08-30 18:52:11
7,Jeremy,Hamilton,[email protected],Male,231.189.13.133,2016-07-17 02:09:46
8,Heather,Rose,[email protected],Female,87.165.201.65,2015-12-29 22:03:56
9,Gregory,Kelly,[email protected],Male,154.209.99.7,2016-03-24 21:18:16
10,Rachel,Lopez,[email protected],Female,237.165.82.71,2016-08-20 15:44:49
11,Donna,Welch,[email protected],Female,103.33.110.138,2016-02-27 01:41:48
12,Russell,Lawrence,[email protected],Male,189.115.73.4,2016-06-11 03:07:09
13,Michelle,Montgomery,[email protected],Female,243.220.95.82,2016-06-18 16:27:19
14,Walter,Castillo,null,Male,71.159.238.196,2016-10-06 01:55:44
15,Robin,Mills,[email protected],Female,172.190.5.50,2016-10-31 11:41:21
16,Raymond,Holmes,[email protected],Male,148.153.166.95,2016-10-03 08:16:38
17,Gary,Bishop,[email protected],Male,161.108.182.13,2016-08-29 19:35:20
18,Anna,Riley,[email protected],Female,253.31.108.22,2015-12-11 04:34:27
19,Sarah,Knight,[email protected],Female,222.220.3.177,2016-09-26 00:49:06
20,Phyllis,Fox,[email protected],Female,163.191.232.95,2016-08-21 10:35:19
"""
tests__sample_test_sql = """
{{ config(severity='error' if var('strict', false) else 'warn') }}
select * from {{ ref("sample_model") }} where email is null
"""
@pytest.fixture(scope="class")
def models():
return {"sample_model.sql": models__sample_model_sql, "schema.yml": models__schema_yml}
@pytest.fixture(scope="class")
def METHOD_NAME():
return {"sample_seed.csv": seeds__sample_seed_csv}
@pytest.fixture(scope="class")
def tests():
return {"null_email.sql": tests__sample_test_sql}
@pytest.fixture(scope="class")
def project_config_update():
return {
"config-version": 2,
"seed-paths": ["seeds"],
"test-paths": ["tests"],
"seeds": {
"quote_columns": False,
},
}
class TestSeverity:
@pytest.fixture(scope="class", autouse=True)
def seed_and_run(self, project):
run_dbt(["seed"])
run_dbt(["run"])
def test_generic_default(self, project):
results = run_dbt(["test", "--select", "test_type:generic"])
assert len(results) == 2
assert all([r.status == "warn" for r in results])
assert all([r.failures == 2 for r in results])
def test_generic_strict(self, project):
results = run_dbt(
["test", "--select", "test_type:generic", "--vars", '{"strict": True}'],
expect_pass=False,
)
assert len(results) == 2
assert all([r.status == "fail" for r in results])
assert all([r.failures == 2 for r in results])
def test_singular_default(self, project):
results = run_dbt(["test", "--select", "test_type:singular"])
assert len(results) == 1
assert all([r.status == "warn" for r in results])
assert all([r.failures == 2 for r in results])
def test_singular_strict(self, project):
results = run_dbt(
["test", "--select", "test_type:singular", "--vars", '{"strict": True}'],
expect_pass=False,
)
assert len(results) == 1
assert all([r.status == "fail" for r in results])
assert all([r.failures == 2 for r in results]) |
id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetGuestConfigurationAssignmentResult',
'AwaitableGetGuestConfigurationAssignmentResult',
'get_guest_configuration_assignment',
'get_guest_configuration_assignment_output',
]
@pulumi.output_type
class GetGuestConfigurationAssignmentResult:
"""
Guest configuration assignment is an association between a machine and guest configuration.
"""
def __init__(__self__, METHOD_NAME=None, location=None, name=None, properties=None, system_data=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
ARM resource id of the guest configuration assignment.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Region where the VM is located.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Name of the guest configuration assignment.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.GuestConfigurationAssignmentPropertiesResponse':
"""
Properties of the Guest configuration assignment.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetGuestConfigurationAssignmentResult(GetGuestConfigurationAssignmentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGuestConfigurationAssignmentResult(
METHOD_NAME=self.METHOD_NAME,
location=self.location,
name=self.name,
properties=self.properties,
system_data=self.system_data,
type=self.type)
def get_guest_configuration_assignment(guest_configuration_assignment_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
vm_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGuestConfigurationAssignmentResult:
"""
Get information about a guest configuration assignment
:param str guest_configuration_assignment_name: The guest configuration assignment name.
:param str resource_group_name: The resource group name.
:param str vm_name: The name of the virtual machine.
"""
__args__ = dict()
__args__['guestConfigurationAssignmentName'] = guest_configuration_assignment_name
__args__['resourceGroupName'] = resource_group_name
__args__['vmName'] = vm_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:guestconfiguration/v20220125:getGuestConfigurationAssignment', __args__, opts=opts, typ=GetGuestConfigurationAssignmentResult).value
return AwaitableGetGuestConfigurationAssignmentResult(
METHOD_NAME=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_guest_configuration_assignment)
def get_guest_configuration_assignment_output(guest_configuration_assignment_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
vm_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGuestConfigurationAssignmentResult]:
"""
Get information about a guest configuration assignment
:param str guest_configuration_assignment_name: The guest configuration assignment name.
:param str resource_group_name: The resource group name.
:param str vm_name: The name of the virtual machine.
"""
... |
clear markers | """
interface interactor.
"""
from .config import interface_color, pick_radius
from .interactor import BaseInteractor
from .util import clip, setpar
MAX_ROUGH=1
class InterfaceInteractor(BaseInteractor):
"""
Control the roughness of the layers.
"""
def __init__(self, profile):
BaseInteractor.__init__(self, profile)
ax = profile.axes
# markers for roughness
style = dict(linestyle = '',
transform = profile.xcoords,
marker = 's', #square
markersize = 7,
color = interface_color,
alpha = 0.6,
pickradius = pick_radius,
zorder = 8, #Prefer this to other lines
visible = False,
)
self.markers=[ax.plot([0],[0.05], label=label, **style)[0]
for label in ('interface marker L','interface marker R')]
# lines for roughness
style = dict(linestyle = '-',
transform = profile.xcoords,
marker = '',
color = interface_color,
visible = False)
self.lines=[ax.plot([0,0],[0.05,0.05], label=label, **style)[0]
for label in ('interface line L','interface line R')]
self.connect_markers(self.markers)
self._left = self._right = None
def set_layer(self):
"""
Move markers to the new layer
"""
n = self.profile.layer_num
z = self.profile.boundary[1:-1]
show_left = n is not None and n>0
show_right = n is not None and n < len(z)
if show_left:
self._left = self.profile.sample_layer(n-1).interface
else:
self._left = None
if show_right:
self._right = self.profile.sample_layer(n).interface
else:
self._right = None
#self.update_markers()
def update_markers(self):
"""
Draw the new roughness on the graph.
"""
n = self.profile.layer_num
z = self.profile.boundary[1:-1]
show_left = self._left is not None
show_right = self._right is not None
self.markers[0].set(visible=show_left)
self.lines[0].set(visible=show_left)
if show_left:
self.markers[0].set_xdata([z[n-1]+self._left.value])
self.lines[0].set_xdata([z[n-1], z[n-1]+self._left.value])
self.markers[1].set(visible=show_right)
self.lines[1].set(visible=show_right)
if show_right:
self.markers[1].set_xdata([z[n]-self._right.value])
self.lines[1].set_xdata([z[n],z[n]-self._right.value])
def METHOD_NAME(self):
"""
Remove interface markers from the graph.
"""
BaseInteractor.METHOD_NAME(self)
for h in self.lines:
h.remove()
self.lines = []
def save(self, event):
"""
Remember the interface for this layer and the next so that we
can restore on Esc.
"""
if self._left is not None:
self._left_value = self._left.value
if self._right is not None:
self._right_value = self._right.value
def restore(self, event):
"""
Restore the roughness for this layer.
"""
if self._left is not None:
self._left.value = self._left_value
if self._right is not None:
self._right.value = self._right_value
def drag(self, event):
"""
Process move to a new position, making sure that the move is allowed.
"""
z = self.profile.boundary
n = self.profile.layer_num
side = self.markers.index(event.artist)
if side == 0: # Left
limit = min(z[n]-z[n-1], z[n+1]-z[n])
v = clip(event.xdata-z[n], 0, limit/MAX_ROUGH)
setpar(self._left, v)
else: # Right
limit = min(z[n+1]-z[n], z[n+2]-z[n+1])
v = clip(z[n+1] - event.xdata, 0, limit/MAX_ROUGH)
setpar(self._right, v)
#self.update_markers() |
test longer | # (C) Copyright 2005-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
import unittest
from pyparsing import ParseException
from enable.savage.svg.pathdata import (
Sequence, closePath, coordinatePair, curve, ellipticalArc, horizontalLine,
lineTo, moveTo, number, quadraticBezierCurveto,
smoothQuadraticBezierCurveto, svg, verticalLine, CaselessLiteral,
)
class TestCaselessLiteral(unittest.TestCase):
def test_instantiation(self):
# regression test for https://github.com/enthought/enable/issues/887
# observed with pyparsing v >= 3
# we just test that instantiating the class doesnt raise exceptions
CaselessLiteral("test")
class TestNumber(unittest.TestCase):
parser = number
valid = ["1.e10", "1e2", "1e+4", "1e-10", "1.", "1.0", "0.1", ".2"]
invalid = ["e10", ".", "f", ""]
def testValid(self):
for num in self.valid:
self.assertEqual(float(num), self.parser.parseString(num)[0])
def testInvalid(self):
for num in self.invalid:
self.assertRaises(
ParseException, lambda: self.parser.parseString(num)
)
class TestNumberSequence(unittest.TestCase):
def testFloatsWithNoSpacing(self):
self.assertEqual(
[0.4, 0.4], list(Sequence(number).parseString("0.4.4"))
)
class TestCoords(unittest.TestCase):
def testCoordPair(self):
self.assertEqual(
coordinatePair.parseString("100 100")[0], (100.0, 100.0)
)
self.assertEqual(coordinatePair.parseString("100,2E7")[0], (100, 2e7))
def testCoordPairWithMinus(self):
self.assertEqual(
coordinatePair.parseString("100-100")[0], (100.0, -100.0)
)
def testCoordPairWithPlus(self):
self.assertEqual(
coordinatePair.parseString("100+100")[0], (100.0, 100.0)
)
def testCoordPairWithPlusAndExponent(self):
self.assertEqual(
coordinatePair.parseString("100+1e+2")[0], (100.0, 100.0)
)
def testNotAPair(self):
self.assertRaises(ParseException, coordinatePair.parseString, "100")
self
def testNoSpacing(self):
self.assertEqual(coordinatePair.parseString("-1.1.1")[0], (-1.1, 0.1))
class TestMoveTo(unittest.TestCase):
def testSimple(self):
self.assertEqual(
moveTo.parseString("M 100 100").asList()[0],
["M", [(100.0, 100.0)]],
)
def METHOD_NAME(self):
self.assertEqual(
moveTo.parseString("m 100 100 94 1e7").asList()[0],
["m", [(100.0, 100.0), (94, 1e7)]],
)
def testLine(self):
self.assertEqual(
lineTo.parseString("l 300 100").asList()[0],
["l", [(300.0, 100.0)]],
)
def testHorizonal(self):
self.assertEqual(
horizontalLine.parseString("H 100 5 20").asList()[0],
["H", [100.0, 5.0, 20.0]],
)
def testVertical(self):
self.assertEqual(
verticalLine.parseString("V 100 5 20").asList()[0],
["V", [100.0, 5.0, 20.0]],
)
class TestEllipticalArc(unittest.TestCase):
def testParse(self):
self.assertEqual(
ellipticalArc.parseString("a25,25 -30 0,1 50,-25").asList()[0],
["a", [[(25.0, 25.0), -30.0, (False, True), (50.0, -25.0)]]],
)
def testExtraArgs(self):
self.assertEqual(
ellipticalArc.parseString(
"a25,25 -30 0,1 50,-25, 10, 10"
).asList()[0],
["a", [[(25.0, 25.0), -30.0, (False, True), (50.0, -25.0)]]],
)
class TestSmoothQuadraticBezierCurveto(unittest.TestCase):
def testParse(self):
self.assertEqual(
smoothQuadraticBezierCurveto.parseString("t1000,300").asList()[0],
["t", [(1000.0, 300.0)]],
)
class TestQuadraticBezierCurveto(unittest.TestCase):
def testParse(self):
self.assertEqual(
quadraticBezierCurveto.parseString("Q1000,300 200 5").asList()[0],
["Q", [[(1000.0, 300.0), (200.0, 5.0)]]],
)
class TestCurve(unittest.TestCase):
def testParse(self):
self.assertEqual(
curve.parseString(
"C 100 200 300 400 500 600 100 200 300 400 500 600"
).asList()[0],
[
"C",
[
[(100.0, 200.0), (300.0, 400.0), (500.0, 600.0)],
[(100.0, 200.0), (300.0, 400.0), (500.0, 600.0)],
],
],
)
class TestClosePath(unittest.TestCase):
def testParse(self):
self.assertEqual(
closePath.parseString("Z").asList()[0], ("Z", (None,))
)
class TestSVG(unittest.TestCase):
def testParse(self):
path = ("M 100 100 L 300 100 L 200 300 z a 100,100 -4 0,1 25 25 "
"z T300 1000 t40, 50 h4 42 2 2,1v1,1,1 Z Q 34,10 1 1")
r = svg.parseString(path).asList()
expected = [
["M", [(100, 100)]],
["L", [(300, 100)]],
["L", [(200, 300)]],
("Z", (None,)),
["a", [[(100, 100), -4, (False, True), (25, 25)]]],
("Z", (None,)),
["T", [(300, 1000)]],
["t", [(40, 50)]],
["h", [4, 42, 2, 2, 1]],
["v", [1, 1, 1]],
("Z", (None,)),
["Q", [[(34, 10), (1, 1)]]],
]
self.assertEqual(len(r), len(expected))
for a, b in zip(expected, r):
self.assertEqual(a, b) |
print dict | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2019, Myrtle Software Limited. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
from metrics import word_error_rate
class Optimization(Enum):
"""Various levels of Optimization.
WARNING: This might have effect on model accuracy."""
nothing = 0
mxprO0 = 1
mxprO1 = 2
mxprO2 = 3
mxprO3 = 4
AmpOptimizations = {Optimization.mxprO0: "O0",
Optimization.mxprO1: "O1",
Optimization.mxprO2: "O2",
Optimization.mxprO3: "O3"}
def add_blank_label(labels):
if not isinstance(labels, list):
raise ValueError("labels must be a list of symbols")
labels.append("<BLANK>")
return labels
def __rnnt_decoder_predictions_tensor(tensor, labels):
"""
Takes output of greedy rnnt decoder and converts to strings.
Args:
tensor: model output tensor
label: A list of labels
Returns:
prediction
"""
hypotheses = []
labels_map = dict([(i, labels[i]) for i in range(len(labels))])
# iterate over batch
for ind in range(len(tensor)):
hypothesis = ''.join([labels_map[c] for c in tensor[ind]])
hypotheses.append(hypothesis)
return hypotheses
def __gather_predictions(predictions_list: list, labels: list) -> list:
results = []
for prediction in predictions_list:
results += __rnnt_decoder_predictions_tensor(prediction, labels=labels)
return results
def __gather_transcripts(transcript_list: list, transcript_len_list: list,
labels: list) -> list:
results = []
labels_map = dict([(i, labels[i]) for i in range(len(labels))])
for i, t in enumerate(transcript_list):
target = t.numpy().tolist()
reference = ''.join([labels_map[c] for c in target])
results.append(reference)
return results
def process_evaluation_batch(tensors: dict, global_vars: dict, labels: list):
"""
Processes results of an iteration and saves it in global_vars
Args:
tensors: dictionary with results of an evaluation iteration, e.g. loss, predictions, transcript, and output
global_vars: dictionary where processes results of iteration are saved
labels: A list of labels
"""
for kv, v in tensors.items():
if kv.startswith('predictions'):
global_vars['predictions'] += __gather_predictions(
v, labels=labels)
elif kv.startswith('transcript_length'):
transcript_len_list = v
elif kv.startswith('transcript'):
transcript_list = v
global_vars['transcripts'] += __gather_transcripts(transcript_list,
transcript_len_list,
labels=labels)
def process_evaluation_epoch(global_vars: dict, tag=None):
"""
Processes results from each worker at the end of evaluation and combine to final result
Args:
global_vars: dictionary containing information of entire evaluation
Return:
wer: final word error rate
loss: final loss
"""
hypotheses = global_vars['predictions']
references = global_vars['transcripts']
wer, scores, num_words = word_error_rate(
hypotheses=hypotheses, references=references)
return wer
def METHOD_NAME(d):
maxLen = max([len(ii) for ii in d.keys()])
fmtString = '\t%' + str(maxLen) + 's : %s'
print('Arguments:')
for keyPair in sorted(d.items()):
print(fmtString % keyPair) |
cleanup | #!/usr/bin/env python
import sys
from direct.showbase.ShowBase import ShowBase
from direct.showbase.InputStateGlobal import inputState
from panda3d.core import AmbientLight
from panda3d.core import DirectionalLight
from panda3d.core import LPoint3
from panda3d.core import TransformState
from panda3d.core import BitMask32
from panda3d.bullet import BulletWorld
from panda3d.bullet import BulletBoxShape
from panda3d.bullet import BulletSphereShape
from panda3d.bullet import BulletRigidBodyNode
from panda3d.bullet import BulletConeTwistConstraint
from panda3d.bullet import BulletDebugNode
class Game(ShowBase):
def __init__(self):
ShowBase.__init__(self)
base.set_background_color(0.1, 0.1, 0.8, 1)
base.set_frame_rate_meter(True)
base.cam.set_pos(0, -20, 5)
base.cam.look_at(0, 0, 0)
# Light
alight = AmbientLight('ambientLight')
alight.set_color((0.5, 0.5, 0.5, 1))
alightNP = render.attach_new_node(alight)
dlight = DirectionalLight('directionalLight')
dlight.set_direction((1, 1, -1))
dlight.set_color((0.7, 0.7, 0.7, 1))
dlightNP = render.attach_new_node(dlight)
render.clear_light()
render.set_light(alightNP)
render.set_light(dlightNP)
# Input
self.accept('escape', self.do_exit)
self.accept('r', self.do_reset)
self.accept('f1', base.toggle_wireframe)
self.accept('f2', base.toggle_texture)
self.accept('f3', self.toggle_debug)
self.accept('f5', self.do_screenshot)
self.accept('enter', self.do_shoot)
# Task
taskMgr.add(self.update, 'updateWorld')
# Physics
self.setup()
def do_exit(self):
self.METHOD_NAME()
sys.exit(1)
def do_reset(self):
self.METHOD_NAME()
self.setup()
def toggle_debug(self):
if self.debugNP.is_hidden():
self.debugNP.show()
else:
self.debugNP.hide()
def do_screenshot(self):
base.screenshot('Bullet')
def do_shoot(self):
# Get from/to points from mouse click
pMouse = base.mouseWatcherNode.get_mouse()
pFrom = LPoint3()
pTo = LPoint3()
base.camLens.extrude(pMouse, pFrom, pTo)
pFrom = render.get_relative_point(base.cam, pFrom)
pTo = render.get_relative_point(base.cam, pTo)
# Calculate initial velocity
v = pTo - pFrom
v.normalize()
v *= 100.0
# Create bullet
shape = BulletSphereShape(0.3)
body = BulletRigidBodyNode('Bullet')
bodyNP = self.worldNP.attach_new_node(body)
bodyNP.node().add_shape(shape)
bodyNP.node().set_mass(1.0)
bodyNP.node().set_linear_velocity(v)
bodyNP.node().set_ccd_motion_threshold(1e-7);
bodyNP.node().set_ccd_swept_sphere_radius(0.50);
bodyNP.set_collide_mask(BitMask32.all_on())
bodyNP.set_pos(pFrom)
visNP = loader.load_model('models/ball.egg')
visNP.set_scale(0.8)
visNP.reparent_to(bodyNP)
self.world.attach(bodyNP.node())
# Remove the bullet again after 2 seconds
taskMgr.do_method_later(2, self.do_remove, 'doRemove',
extraArgs=[bodyNP], appendTask=True)
def do_remove(self, bodyNP, task):
self.world.remove(bodyNP.node())
bodyNP.remove_node()
return task.done
def update(self, task):
dt = globalClock.get_dt()
self.world.do_physics(dt, 20, 1.0/180.0)
return task.cont
def METHOD_NAME(self):
self.worldNP.remove_node()
self.worldNP = None
self.world = None
def setup(self):
self.worldNP = render.attach_new_node('World')
# World
self.debugNP = self.worldNP.attach_new_node(BulletDebugNode('Debug'))
self.debugNP.show()
self.debugNP.node().show_wireframe(True)
self.debugNP.node().show_constraints(True)
self.debugNP.node().show_bounding_boxes(False)
self.debugNP.node().show_normals(False)
self.world = BulletWorld()
self.world.set_gravity((0, 0, -9.81))
self.world.set_debug_node(self.debugNP.node())
# Box A
shape = BulletBoxShape((0.5, 0.5, 0.5))
bodyA = BulletRigidBodyNode('Box A')
bodyNP = self.worldNP.attach_new_node(bodyA)
bodyNP.node().add_shape(shape)
bodyNP.set_collide_mask(BitMask32.all_on())
bodyNP.set_pos(-2, 0, 4)
visNP = loader.load_model('models/box.egg')
visNP.clear_model_nodes()
visNP.reparent_to(bodyNP)
self.world.attach(bodyA)
# Box B
shape = BulletBoxShape((0.5, 0.5, 0.5))
bodyB = BulletRigidBodyNode('Box B')
bodyNP = self.worldNP.attach_new_node(bodyB)
bodyNP.node().add_shape(shape)
bodyNP.node().set_mass(1.0)
bodyNP.node().set_deactivation_enabled(False)
bodyNP.set_collide_mask(BitMask32.all_on())
bodyNP.set_pos(0, 0, 0)
visNP = loader.load_model('models/box.egg')
visNP.clear_model_nodes()
visNP.reparent_to(bodyNP)
self.world.attach(bodyB)
# Cone
frameA = TransformState.make_pos_hpr((0, 0, -2), (0, 0, 90))
frameB = TransformState.make_pos_hpr((-5, 0, 0), (0, 0, 0))
cone = BulletConeTwistConstraint(bodyA, bodyB, frameA, frameB)
cone.set_debug_draw_size(2.0)
cone.set_limit(30, 45, 170, softness=1.0, bias=0.3, relaxation=8.0)
self.world.attach(cone)
game = Game()
game.run() |
start django | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import pytest
from tests import code, debug, log, net, test_data
from tests.debug import runners, targets
from tests.patterns import some
pytestmark = pytest.mark.timeout(60)
django_server = net.WebServer(net.get_test_server_port(8000, 8100))
class paths:
django1 = test_data / "django1"
app_py = django1 / "app.py"
hello_html = django1 / "templates" / "hello.html"
bad_html = django1 / "templates" / "bad.html"
class lines:
app_py = code.get_marked_line_numbers(paths.app_py)
@pytest.fixture
@pytest.mark.parametrize("run", [runners.launch, runners.attach_connect["cli"]])
def METHOD_NAME(run):
def start(session, multiprocess=False):
# No clean way to kill Django server, expect non-zero exit code
session.expected_exit_code = some.int
session.config.update({"django": True, "subProcess": bool(multiprocess)})
args = ["runserver"]
if not multiprocess:
args += ["--noreload"]
args += ["--", str(django_server.port)]
return run(session, targets.Program(paths.app_py, args=args), cwd=paths.django1)
return start
@pytest.mark.parametrize("bp_target", ["code", "template"])
def test_django_breakpoint_no_multiproc(METHOD_NAME, bp_target):
bp_file, bp_line, bp_name = {
"code": (paths.app_py, lines.app_py["bphome"], "home"),
"template": (paths.hello_html, 8, "Django Template"),
}[bp_target]
bp_var_content = "Django-Django-Test"
with debug.Session() as session:
with METHOD_NAME(session):
breakpoints = session.set_breakpoints(bp_file, [bp_line])
for bp in breakpoints:
# They'll be verified later on for templates.
assert bp["verified"] == (bp_target == "code")
with django_server:
home_request = django_server.get("/home")
if bp_target == "template":
breakpoint_body = session.wait_for_next_event("breakpoint")
assert breakpoint_body["reason"] == "changed"
assert breakpoint_body["breakpoint"]["verified"]
session.wait_for_stop(
"breakpoint",
expected_frames=[
some.dap.frame(some.dap.source(bp_file), line=bp_line, name=bp_name)
],
)
var_content = session.get_variable("content")
assert var_content == some.dict.containing(
{
"name": "content",
"type": "str",
"value": repr(bp_var_content),
"presentationHint": {"attributes": ["rawString"]},
"evaluateName": "content",
"variablesReference": 0,
}
)
session.request_continue()
assert bp_var_content in home_request.response_text()
def test_django_template_exception_no_multiproc(METHOD_NAME):
with debug.Session() as session:
with METHOD_NAME(session):
session.request(
"setExceptionBreakpoints", {"filters": ["raised", "uncaught"]}
)
with django_server:
django_server.get("/badtemplate", log_errors=False)
stop = session.wait_for_stop(
"exception",
expected_frames=[
some.dap.frame(
some.dap.source(paths.bad_html),
line=8,
name="Django TemplateSyntaxError",
)
],
)
# Will stop once in the plugin
exception_info = session.request(
"exceptionInfo", {"threadId": stop.thread_id}
)
assert exception_info == some.dict.containing(
{
"exceptionId": some.str.ending_with("TemplateSyntaxError"),
"breakMode": "always",
"description": some.str.containing("doesnotexist"),
"details": some.dict.containing(
{
"message": some.str.containing("doesnotexist"),
"typeName": some.str.ending_with("TemplateSyntaxError"),
}
),
}
)
session.request_continue()
log.info("Exception will be reported again in {0}", paths.app_py)
session.wait_for_stop("exception")
session.request_continue()
@pytest.mark.parametrize("exc_type", ["handled", "unhandled"])
def test_django_exception_no_multiproc(METHOD_NAME, exc_type):
exc_line = lines.app_py["exc_" + exc_type]
with debug.Session() as session:
with METHOD_NAME(session):
session.request(
"setExceptionBreakpoints", {"filters": ["raised", "uncaught"]}
)
with django_server:
django_server.get("/" + exc_type)
stopped = session.wait_for_stop(
"exception",
expected_frames=[
some.dap.frame(
some.dap.source(paths.app_py),
line=exc_line,
name="bad_route_" + exc_type,
)
],
).body
assert stopped == some.dict.containing(
{
"reason": "exception",
"text": some.str.ending_with("ArithmeticError"),
"description": "Hello",
}
)
exception_info = session.request(
"exceptionInfo", {"threadId": stopped["threadId"]}
)
assert exception_info == {
"exceptionId": some.str.ending_with("ArithmeticError"),
"breakMode": "always",
"description": "Hello",
"details": {
"message": "Hello",
"typeName": some.str.ending_with("ArithmeticError"),
"source": some.path(paths.app_py),
"stackTrace": some.str,
},
}
session.request_continue()
def test_django_breakpoint_multiproc(METHOD_NAME):
bp_line = lines.app_py["bphome"]
bp_var_content = "Django-Django-Test"
with debug.Session() as parent_session:
with METHOD_NAME(parent_session, multiprocess=True):
parent_session.set_breakpoints(paths.app_py, [bp_line])
with parent_session.wait_for_next_subprocess() as child_session:
with child_session.start():
child_session.set_breakpoints(paths.app_py, [bp_line])
with django_server:
home_request = django_server.get("/home")
child_session.wait_for_stop(
"breakpoint",
expected_frames=[
some.dap.frame(
some.dap.source(paths.app_py), line=bp_line, name="home"
)
],
)
var_content = child_session.get_variable("content")
assert var_content == some.dict.containing(
{
"name": "content",
"type": "str",
"value": repr(bp_var_content),
"presentationHint": {"attributes": ["rawString"]},
"evaluateName": "content",
}
)
child_session.request_continue()
assert bp_var_content in home_request.response_text() |
test mcol gga ab ks | #!/usr/bin/env python
# Copyright 2022 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
import unittest
import tempfile
import numpy
import copy
from pyscf import lib, gto, scf, dft
from pyscf import tdscf
try:
import mcfun
except ImportError:
mcfun = None
def setUpModule():
global mol, mf_lda
mol = gto.Mole()
mol.verbose = 5
mol.output = '/dev/null'
mol.atom = '''
H 0. 0. 0.
H 0. -0.7 0.7
H 0. 0.7 0.7'''
mol.basis = 'uncsto3g'
mol.spin = 1
mol.build()
mf_lda = mol.DKS().set(xc='lda,', conv_tol=1e-12,
chkfile=tempfile.NamedTemporaryFile().name).newton().run()
def tearDownModule():
global mol, mf_lda
mol.stdout.close()
del mol, mf_lda
def diagonalize(a, b, nroots=4):
nocc, nvir = a.shape[:2]
nov = nocc * nvir
a = a.reshape(nov, nov)
b = b.reshape(nov, nov)
h = numpy.block([[a , b ],
[-b.conj(),-a.conj()]])
e = numpy.linalg.eig(numpy.asarray(h))[0]
lowest_e = numpy.sort(e[e.real > 0].real)[:nroots]
lowest_e = lowest_e[lowest_e > 1e-3]
return lowest_e
class KnownValues(unittest.TestCase):
def test_tddft_lda(self):
td = mf_lda.TDDFT()
es = td.kernel(nstates=4)[0]
a,b = td.get_ab()
e_ref = diagonalize(a, b, 8)
self.assertAlmostEqual(abs(es[:3]-e_ref[:3]).max(), 0, 5)
self.assertAlmostEqual(lib.fp(es[:3] * 27.2114), 3.157449136045103, 5)
def test_tda_lda(self):
td = mf_lda.TDA()
es = td.kernel(nstates=5)[0]
a,b = td.get_ab()
nocc, nvir = a.shape[:2]
nov = nocc * nvir
e_ref = numpy.linalg.eigh(a.reshape(nov,nov))[0]
self.assertAlmostEqual(abs(es[:3]-e_ref[:3]).max(), 0, 5)
self.assertAlmostEqual(lib.fp(es[:3] * 27.2114), 3.220469947746697, 5)
def test_ab_hf(self):
mf = scf.DHF(mol).run()
self._check_against_ab_ks(mf.TDHF(), 3.162381949829394, 0.11755137875158098)
def test_col_lda_ab_ks(self):
self._check_against_ab_ks(mf_lda.TDDFT(), 2.6168030250127075, 0.10466808380307921)
def test_col_gga_ab_ks(self):
mf_b3lyp = dft.DKS(mol).set(xc='b3lyp5')
mf_b3lyp.__dict__.update(scf.chkfile.load(mf_lda.chkfile, 'scf'))
self._check_against_ab_ks(mf_b3lyp.TDDFT(), 2.568259113326634, 0.1531691970098629)
def test_col_mgga_ab_ks(self):
mf_m06l = dft.DKS(mol).set(xc='m06l')
mf_m06l.__dict__.update(scf.chkfile.load(mf_lda.chkfile, 'scf'))
self._check_against_ab_ks(mf_m06l.TDDFT(), 3.382341929143924, 0.6409502830461241)
@unittest.skipIf(mcfun is None, "mcfun library not found.")
def test_mcol_lda_ab_ks(self):
mcol_lda = dft.UDKS(mol).set(xc='lda,', collinear='mcol')
mcol_lda._numint.spin_samples = 6
mcol_lda.__dict__.update(scf.chkfile.load(mf_lda.chkfile, 'scf'))
self._check_against_ab_ks(mcol_lda.TDDFT(), 2.880950836922392, 0.45543578075726204)
@unittest.skipIf(mcfun is None, "mcfun library not found.")
def METHOD_NAME(self):
mcol_b3lyp = dft.UDKS(mol).set(xc='b3lyp5', collinear='mcol')
mcol_b3lyp._numint.spin_samples = 6
mcol_b3lyp.__dict__.update(scf.chkfile.load(mf_lda.chkfile, 'scf'))
self._check_against_ab_ks(mcol_b3lyp.TDDFT(), 2.753430274014454, 0.607433969753113)
@unittest.skipIf(mcfun is None, "mcfun library not found.")
def test_mcol_mgga_ab_ks(self):
mcol_m06l = dft.UDKS(mol).set(xc='m06l', collinear='mcol')
mcol_m06l._numint.spin_samples = 6
mcol_m06l.__dict__.update(scf.chkfile.load(mf_lda.chkfile, 'scf'))
self._check_against_ab_ks(mcol_m06l.TDDFT(), 14.934345395514491, 9.539340104227188)
def _check_against_ab_ks(self, td, refa, refb):
mf = td._scf
a, b = td.get_ab()
self.assertAlmostEqual(lib.fp(abs(a)), refa, 4)
self.assertAlmostEqual(lib.fp(abs(b)), refb, 4)
ftda = mf.TDA().gen_vind()[0]
ftdhf = td.gen_vind()[0]
n2c = mf.mo_occ.size // 2
nocc = numpy.count_nonzero(mf.mo_occ == 1)
nvir = numpy.count_nonzero(mf.mo_occ == 0) - n2c
numpy.random.seed(2)
x, y = xy = (numpy.random.random((2,nocc,nvir)) +
numpy.random.random((2,nocc,nvir)) * 1j)
ax = numpy.einsum('iajb,jb->ia', a, x)
self.assertAlmostEqual(abs(ax - ftda([x]).reshape(nocc,nvir)).max(), 0, 12)
ab1 = ax + numpy.einsum('iajb,jb->ia', b, y)
ab2 =-numpy.einsum('iajb,jb->ia', b.conj(), x)
ab2-= numpy.einsum('iajb,jb->ia', a.conj(), y)
abxy_ref = ftdhf([xy]).reshape(2,nocc,nvir)
self.assertAlmostEqual(abs(ab1 - abxy_ref[0]).max(), 0, 12)
self.assertAlmostEqual(abs(ab2 - abxy_ref[1]).max(), 0, 12)
if __name__ == "__main__":
print("Full Tests for TD-DKS")
unittest.main() |
delete | # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import uuid
class OriginAccessIdentity(object):
def __init__(self, connection=None, config=None, id='',
s3_user_id='', comment=''):
self.connection = connection
self.config = config
self.id = id
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
def startElement(self, name, attrs, connection):
if name == 'CloudFrontOriginAccessIdentityConfig':
self.config = OriginAccessIdentityConfig()
return self.config
else:
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'S3CanonicalUserId':
self.s3_user_id = value
elif name == 'Comment':
self.comment = value
else:
setattr(self, name, value)
def update(self, comment=None):
new_config = OriginAccessIdentityConfig(self.connection,
self.config.caller_reference,
self.config.comment)
if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_origin_identity_config(self.id, self.etag, new_config)
self.config = new_config
def METHOD_NAME(self):
return self.connection.delete_origin_access_identity(self.id, self.etag)
def uri(self):
return 'origin-access-identity/cloudfront/%s' % self.id
class OriginAccessIdentityConfig(object):
def __init__(self, connection=None, caller_reference='', comment=''):
self.connection = connection
if caller_reference:
self.caller_reference = caller_reference
else:
self.caller_reference = str(uuid.uuid4())
self.comment = comment
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<CloudFrontOriginAccessIdentityConfig xmlns="http://cloudfront.amazonaws.com/doc/2009-09-09/">\n'
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += '</CloudFrontOriginAccessIdentityConfig>\n'
return s
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Comment':
self.comment = value
elif name == 'CallerReference':
self.caller_reference = value
else:
setattr(self, name, value)
class OriginAccessIdentitySummary(object):
def __init__(self, connection=None, id='',
s3_user_id='', comment=''):
self.connection = connection
self.id = id
self.s3_user_id = s3_user_id
self.comment = comment
self.etag = None
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'S3CanonicalUserId':
self.s3_user_id = value
elif name == 'Comment':
self.comment = value
else:
setattr(self, name, value)
def get_origin_access_identity(self):
return self.connection.get_origin_access_identity_info(self.id)
|
test run builder | import shutil
from datetime import datetime
from pathlib import Path
import pytest
from click.testing import CliRunner
from maggma.builders import CopyBuilder
from maggma.cli import run
from maggma.stores import MemoryStore, MongoStore
from monty.serialization import dumpfn
@pytest.fixture()
def mongostore():
store = MongoStore("maggma_test", "test")
store.connect()
store.remove_docs({})
yield store
store.remove_docs({})
store._collection.drop()
@pytest.fixture()
def reporting_store():
store = MongoStore("maggma_test", "reporting")
store.connect()
store.remove_docs({})
yield store
store.remove_docs({})
store._collection.drop()
def test_basic_run():
runner = CliRunner()
result = runner.invoke(run, ["--help"])
assert result.exit_code == 0
# Ensure running without any builders fail
result = runner.invoke(run)
assert result.exit_code != 0
def METHOD_NAME(mongostore):
memorystore = MemoryStore("temp")
builder = CopyBuilder(mongostore, memorystore)
mongostore.update([{mongostore.key: i, mongostore.last_updated_field: datetime.utcnow()} for i in range(10)])
runner = CliRunner()
with runner.isolated_filesystem():
dumpfn(builder, "test_builder.json")
result = runner.invoke(run, ["-v", "test_builder.json"])
assert result.exit_code == 0
assert "CopyBuilder" in result.output
assert "SerialProcessor" in result.output
result = runner.invoke(run, ["-vvv", "--no_bars", "test_builder.json"])
assert result.exit_code == 0
assert "Get" not in result.output
assert "Update" not in result.output
result = runner.invoke(run, ["-v", "-n", "2", "test_builder.json"])
assert result.exit_code == 0
assert "CopyBuilder" in result.output
assert "MultiProcessor" in result.output
result = runner.invoke(run, ["-vvv", "-n", "2", "--no_bars", "test_builder.json"])
assert result.exit_code == 0
assert "Get" not in result.output
assert "Update" not in result.output
def test_run_builder_chain(mongostore):
memorystore = MemoryStore("temp")
builder1 = CopyBuilder(mongostore, memorystore)
builder2 = CopyBuilder(mongostore, memorystore)
mongostore.update([{mongostore.key: i, mongostore.last_updated_field: datetime.utcnow()} for i in range(10)])
runner = CliRunner()
with runner.isolated_filesystem():
dumpfn([builder1, builder2], "test_builders.json")
result = runner.invoke(run, ["-v", "test_builders.json"])
assert result.exit_code == 0
assert "CopyBuilder" in result.output
assert "SerialProcessor" in result.output
result = runner.invoke(run, ["-vvv", "--no_bars", "test_builders.json"])
assert result.exit_code == 0
assert "Get" not in result.output
assert "Update" not in result.output
result = runner.invoke(run, ["-v", "-n", "2", "test_builders.json"])
assert result.exit_code == 0
assert "CopyBuilder" in result.output
assert "MultiProcessor" in result.output
result = runner.invoke(run, ["-vvv", "-n", "2", "--no_bars", "test_builders.json"])
assert result.exit_code == 0
assert "Get" not in result.output
assert "Update" not in result.output
def test_reporting(mongostore, reporting_store):
memorystore = MemoryStore("temp")
builder = CopyBuilder(mongostore, memorystore)
mongostore.update([{mongostore.key: i, mongostore.last_updated_field: datetime.utcnow()} for i in range(10)])
runner = CliRunner()
with runner.isolated_filesystem():
dumpfn(builder, "test_builder.json")
dumpfn(reporting_store, "test_reporting_store.json")
result = runner.invoke(run, ["-v", "test_builder.json", "-r", "test_reporting_store.json"])
assert result.exit_code == 0
report_docs = list(reporting_store.query())
assert len(report_docs) == 3
start_doc = next(d for d in report_docs if d["event"] == "BUILD_STARTED")
assert "sources" in start_doc
assert "targets" in start_doc
end_doc = next(d for d in report_docs if d["event"] == "BUILD_ENDED")
assert "errors" in end_doc
assert "warnings" in end_doc
update_doc = next(d for d in report_docs if d["event"] == "UPDATE")
assert "items" in update_doc
def test_python_source():
runner = CliRunner()
with runner.isolated_filesystem():
shutil.copy2(src=Path(__file__).parent / "builder_for_test.py", dst=Path(".").resolve())
result = runner.invoke(run, ["-v", "-n", "2", "builder_for_test.py"])
assert result.exit_code == 0
assert "Ended multiprocessing: DummyBuilder" in result.output
def test_python_notebook_source():
runner = CliRunner()
with runner.isolated_filesystem():
shutil.copy2(
src=Path(__file__).parent / "builder_notebook_for_test.ipynb",
dst=Path(".").resolve(),
)
result = runner.invoke(run, ["-v", "-n", "2", "builder_notebook_for_test.ipynb"])
assert result.exit_code == 0
assert "Ended multiprocessing: DummyBuilder" in result.output
def test_memray_run_builder(mongostore):
memorystore = MemoryStore("temp")
builder = CopyBuilder(mongostore, memorystore)
mongostore.update([{mongostore.key: i, mongostore.last_updated_field: datetime.utcnow()} for i in range(10)])
runner = CliRunner()
with runner.isolated_filesystem():
dumpfn(builder, "test_builder.json")
result = runner.invoke(run, ["-v", "--memray", "on", "test_builder.json"])
assert result.exit_code == 0
assert "CopyBuilder" in result.output
assert "SerialProcessor" in result.output
result = runner.invoke(run, ["-vvv", "--no_bars", "--memray", "on", "test_builder.json"])
assert result.exit_code == 0
assert "Get" not in result.output
assert "Update" not in result.output
result = runner.invoke(run, ["-v", "-n", "2", "--memray", "on", "test_builder.json"])
assert result.exit_code == 0
assert "CopyBuilder" in result.output
assert "MultiProcessor" in result.output
result = runner.invoke(run, ["-vvv", "-n", "2", "--no_bars", "--memray", "on", "test_builder.json"])
assert result.exit_code == 0
assert "Get" not in result.output
assert "Update" not in result.output
def test_memray_user_output_dir(mongostore):
memorystore = MemoryStore("temp")
builder = CopyBuilder(mongostore, memorystore)
mongostore.update([{mongostore.key: i, mongostore.last_updated_field: datetime.utcnow()} for i in range(10)])
runner = CliRunner()
with runner.isolated_filesystem():
dumpfn(builder, "test_builder.json")
result = runner.invoke(run, ["--memray", "on", "-md", "memray_output_dir/", "test_builder.json"])
assert result.exit_code == 0
assert (Path.cwd() / "memray_output_dir").exists() is True |
hull convex | #!BPY
"""
Name: 'Hull'
Blender: 244
Group: 'MESH'
Submenu: 'Box' box
Submenu: 'Sphere' sphere
Submenu: 'Convex' convex
Tooltip: 'Hull Selected Objects'
"""
#Submenu: 'Cylinder' cylinder
# --------------------------------------------------------------------------
# Hull 1.1 by Amorilia
# --------------------------------------------------------------------------
# ***** BEGIN LICENSE BLOCK *****
#
# BSD License
#
# Copyright (c) 2005-2011, NIF File Format Library and Tools
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the NIF File Format Library and Tools project may not be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENCE BLOCK *****
# --------------------------------------------------------------------------
import bpy
import pyffi.utils.quickhull
def hull_box(ob, me, selected_only):
"""Hull mesh in a box."""
# find box hull
# todo: improve algorithm
minx = min(v.co[0] for v in me.vertices if v.sel or not selected_only)
miny = min(v.co[1] for v in me.vertices if v.sel or not selected_only)
minz = min(v.co[2] for v in me.vertices if v.sel or not selected_only)
maxx = max(v.co[0] for v in me.vertices if v.sel or not selected_only)
maxy = max(v.co[1] for v in me.vertices if v.sel or not selected_only)
maxz = max(v.co[2] for v in me.vertices if v.sel or not selected_only)
# create box
box = Blender.Mesh.New('box')
for x in [minx, maxx]:
for y in [miny, maxy]:
for z in [minz, maxz]:
box.vertices.extend(x,y,z)
box.faces.extend(
[[0,1,3,2],[6,7,5,4],[0,2,6,4],[3,1,5,7],[4,5,1,0],[7,6,2,3]])
# link box to scene and set transform
scn = Blender.Scene.GetCurrent()
boxob = scn.objects.new(box, 'box')
boxob.setMatrix(ob.getMatrix('worldspace'))
# set bounds type
boxob.display_type = 'BOUNDS'
boxob.display_bounds_type = 'BOX'
def hull_sphere(ob, me, selected_only):
"""Hull mesh in a sphere."""
# find square box hull
minx = min(v.co[0] for v in me.vertices if v.sel or not selected_only)
miny = min(v.co[1] for v in me.vertices if v.sel or not selected_only)
minz = min(v.co[2] for v in me.vertices if v.sel or not selected_only)
maxx = max(v.co[0] for v in me.vertices if v.sel or not selected_only)
maxy = max(v.co[1] for v in me.vertices if v.sel or not selected_only)
maxz = max(v.co[2] for v in me.vertices if v.sel or not selected_only)
cx = (minx+maxx)*0.5
cy = (miny+maxy)*0.5
cz = (minz+maxz)*0.5
lx = maxx-minx
ly = maxy-miny
lz = maxz-minz
l = max([lx,ly,lz])*0.5
minx = cx-l
miny = cy-l
minz = cz-l
maxx = cx+l
maxy = cy+l
maxz = cz+l
# create sphere
box = Blender.Mesh.New('sphere')
for x in [minx, maxx]:
for y in [miny, maxy]:
for z in [minz, maxz]:
box.vertices.extend(x,y,z)
box.faces.extend(
[[0,1,3,2],[6,7,5,4],[0,2,6,4],[3,1,5,7],[4,5,1,0],[7,6,2,3]])
# link box to scene and set transform
scn = Blender.Scene.GetCurrent()
boxob = scn.objects.new(box, 'sphere')
boxob.setMatrix(ob.getMatrix('worldspace'))
# set bounds type
boxob.display_type = 'BOUNDS'
boxob.display_bounds_type = 'SPHERE'
def METHOD_NAME(ob, me, selected_only, precision = 0.1):
"""Hull mesh in a convex shape."""
# find convex hull
vertices, triangles = pyffi.utils.quickhull.qhull3d(
[tuple(v.co) for v in me.vertices if v.sel or not selected_only],
precision = precision)
# create convex mesh
box = Blender.Mesh.New('convexpoly')
for vert in vertices:
box.vertices.extend(*vert)
for triangle in triangles:
box.faces.extend(triangle)
# link mesh to scene and set transform
scn = Blender.Scene.GetCurrent()
boxob = scn.objects.new(box, 'convexpoly')
boxob.setMatrix(ob.getMatrix('worldspace'))
# set bounds type
boxob.display_type = 'BOUNDS'
boxob.display_bounds_type = 'POLYHEDRON' # FIXME 'CONVEX' not in blender Python API
boxob.show_wire = True
def main(arg):
# get selected meshes
obs = [ob for ob in self.context.selected_objects if ob.type == 'MESH']
# saves editmode state and exit editmode if it is enabled
# (cannot make changes mesh data in editmode)
is_editmode = Window.EditMode()
Window.EditMode(0)
Window.WaitCursor(1)
t = sys.time()
# run script
num_affected = 0
for ob in obs:
me = ob.getData(mesh=1) # get Mesh, not NMesh
# are any vertices selected?
selected_only = is_editmode and (1 in ( vert.sel for vert in me.vertices ))
# create mesh by requested type
if arg == 'box': hull_box(ob, me, selected_only)
elif arg == 'sphere': hull_sphere(ob, me, selected_only)
elif arg == 'convex':
PREF_PRECISION = Blender.Draw.Create(0.1)
pup_block = [
('Precision', PREF_PRECISION, 0.001, 2.0, 'Maximum distance by which a vertex may fall outside the hull: larger values yield simpler hulls at the expense of missing more vertices.') ]
if not Blender.Draw.PupBlock('Convex Hull', pup_block):
return
METHOD_NAME(ob, me, selected_only, precision = PREF_PRECISION.val)
print(f"Hull finished in {(sys.time()-t):.2f} seconds")
Window.WaitCursor(0)
if is_editmode: Window.EditMode(1)
if __name__ == '__main__':
main(__script__['arg']) |
teardown | # Copyright 2019 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from wa import Parameter, ApkWorkload, PackageHandler, TestPackageHandler, ConfigError
from wa.utils.types import list_or_string
from wa.framework.exception import WorkloadError
class Uibenchjanktests(ApkWorkload):
name = 'uibenchjanktests'
description = """
Runs a particular test (or list of tests) of the UIBench JankTests_
test suite. The suite is provided by Google as an automated version
of the UIBench testbench for the Android UI.
The workload supports running the default set of tests without
restarting the app or running an arbitrary set of tests with
restarting the app in between each test.
.. _JankTests: https://android.googlesource.com/platform/platform_testing/+/master/tests/jank/uibench/src/com/android/uibench/janktests
"""
package_names = ['com.android.uibench.janktests']
_DUT_PACKAGE = 'com.android.test.uibench'
_DEFAULT_CLASS = 'UiBenchJankTests'
_OUTPUT_SECTION_REGEX = re.compile(
r'(\s*INSTRUMENTATION_STATUS: gfx-[\w-]+=[-+\d.]+\n)+'
r'\s*INSTRUMENTATION_STATUS_CODE: (?P<code>[-+\d]+)\n?', re.M)
_OUTPUT_GFXINFO_REGEX = re.compile(
r'INSTRUMENTATION_STATUS: (?P<name>[\w-]+)=(?P<value>[-+\d.]+)')
parameters = [
Parameter('tests', kind=list_or_string,
description="""
Tests to be run. Defaults to running every available
subtest in alphabetical order. The app will be restarted
for each subtest, unlike when using full=True.
""", default=None, aliases=['test']),
Parameter('full', kind=bool, default=False,
description="""
Runs the full suite of tests that the app defaults to
when no subtests are specified. The actual tests and their
order might depend on the version of the app. The subtests
will be run back to back without restarting the app in between.
"""),
Parameter('wait', kind=bool, default=True,
description='Forces am instrument to wait until the '
'instrumentation terminates before terminating itself. The '
'net effect is to keep the shell open until the tests have '
'finished. This flag is not required, but if you do not use '
'it, you will not see the results of your tests.'),
Parameter('raw', kind=bool, default=True,
description='Outputs results in raw format. Use this flag '
'when you want to collect performance measurements, so that '
'they are not formatted as test results. This flag is '
'designed for use with the flag -e perf true.'),
Parameter('instrument_args', kind=dict, default={},
description='Extra arguments for am instrument.'),
Parameter('no_hidden_api_checks', kind=bool, default=False,
description='Disables restrictions on the use of hidden '
'APIs.'),
]
def __init__(self, target, **kwargs):
super(Uibenchjanktests, self).__init__(target, **kwargs)
if 'iterations' not in self.instrument_args:
self.instrument_args['iterations'] = 1
self.dut_apk = PackageHandler(
self,
package_name=self._DUT_PACKAGE,
variant=self.variant,
strict=self.strict,
version=self.version,
force_install=self.force_install,
install_timeout=self.install_timeout,
uninstall=self.uninstall,
exact_abi=self.exact_abi,
prefer_host_package=self.prefer_host_package,
clear_data_on_reset=self.clear_data_on_reset)
self.apk = TestPackageHandler(
self,
package_name=self.package_name,
variant=self.variant,
strict=self.strict,
version=self.version,
force_install=self.force_install,
install_timeout=self.install_timeout,
uninstall=self.uninstall,
exact_abi=self.exact_abi,
prefer_host_package=self.prefer_host_package,
clear_data_on_reset=self.clear_data_on_reset,
instrument_args=self.instrument_args,
raw_output=self.raw,
instrument_wait=self.wait,
no_hidden_api_checks=self.no_hidden_api_checks)
def validate(self):
if self.full and self.tests is not None:
raise ConfigError("Can't select subtests while 'full' is True")
def initialize(self, context):
super(Uibenchjanktests, self).initialize(context)
self.dut_apk.initialize(context)
self.dut_apk.initialize_package(context)
self.output = {}
# Full run specified, don't select subtests
if self.full:
self.apk.args['class'] = '{}.{}'.format(
self.package_names[0], self._DEFAULT_CLASS
)
return
self.available_tests = {
test: cl for test, cl in self.apk.apk_info.methods
if test.startswith('test')
}
# default to running all tests in alphabetical order
# pylint: disable=access-member-before-definition
if not self.tests:
self.tests = sorted(self.available_tests.keys())
# raise error if any of the tests are not available
elif any([t not in self.available_tests for t in self.tests]):
msg = 'Unknown test(s) specified. Known tests: {}'
known_tests = '\n'.join(self.available_tests.keys())
raise ValueError(msg.format(known_tests))
def run(self, context):
# Full run, just run the activity directly
if self.full:
self.apk.start_activity()
self.apk.wait_instrument_over()
self.output['full'] = self.apk.instrument_output
return
for test in self.tests:
self.apk.args['class'] = '{}.{}#{}'.format(
self.package_names[0],
self.available_tests[test], test
)
self.apk.setup(context)
self.apk.start_activity()
try:
self.apk.wait_instrument_over()
except WorkloadError as e:
self.logger.warning(str(e))
self.output[test] = self.apk.instrument_output
def update_output(self, context):
super(Uibenchjanktests, self).update_output(context)
for test, test_output in self.output.items():
for section in self._OUTPUT_SECTION_REGEX.finditer(test_output):
if int(section.group('code')) != -1:
msg = 'Run failed (INSTRUMENTATION_STATUS_CODE: {}). See log.'
raise RuntimeError(msg.format(section.group('code')))
for metric in self._OUTPUT_GFXINFO_REGEX.finditer(section.group()):
context.add_metric(metric.group('name'), metric.group('value'),
classifiers={'test_name': test})
def METHOD_NAME(self, context):
super(Uibenchjanktests, self).METHOD_NAME(context)
self.dut_apk.METHOD_NAME() |
on touch move | """
Drag Behavior
=============
The :class:`~kivy.uix.behaviors.drag.DragBehavior`
`mixin <https://en.wikipedia.org/wiki/Mixin>`_ class provides Drag behavior.
When combined with a widget, dragging in the rectangle defined by the
:attr:`~kivy.uix.behaviors.drag.DragBehavior.drag_rectangle` will drag the
widget.
Example
-------
The following example creates a draggable label::
from kivy.uix.label import Label
from kivy.app import App
from kivy.uix.behaviors import DragBehavior
from kivy.lang import Builder
# You could also put the following in your kv file...
kv = '''
<DragLabel>:
# Define the properties for the DragLabel
drag_rectangle: self.x, self.y, self.width, self.height
drag_timeout: 10000000
drag_distance: 0
FloatLayout:
# Define the root widget
DragLabel:
size_hint: 0.25, 0.2
text: 'Drag me'
'''
class DragLabel(DragBehavior, Label):
pass
class TestApp(App):
def build(self):
return Builder.load_string(kv)
TestApp().run()
"""
__all__ = ('DragBehavior', )
from kivy.clock import Clock
from kivy.properties import NumericProperty, ReferenceListProperty
from kivy.config import Config
from kivy.metrics import sp
from functools import partial
# When we are generating documentation, Config doesn't exist
_scroll_timeout = _scroll_distance = 0
if Config:
_scroll_timeout = Config.getint('widgets', 'scroll_timeout')
_scroll_distance = Config.getint('widgets', 'scroll_distance')
class DragBehavior(object):
'''
The DragBehavior `mixin <https://en.wikipedia.org/wiki/Mixin>`_ provides
Drag behavior. When combined with a widget, dragging in the rectangle
defined by :attr:`drag_rectangle` will drag the widget. Please see
the :mod:`drag behaviors module <kivy.uix.behaviors.drag>` documentation
for more information.
.. versionadded:: 1.8.0
'''
drag_distance = NumericProperty(_scroll_distance)
'''Distance to move before dragging the :class:`DragBehavior`, in pixels.
As soon as the distance has been traveled, the :class:`DragBehavior` will
start to drag, and no touch event will be dispatched to the children.
It is advisable that you base this value on the dpi of your target device's
screen.
:attr:`drag_distance` is a :class:`~kivy.properties.NumericProperty` and
defaults to the `scroll_distance` as defined in the user
:class:`~kivy.config.Config` (20 pixels by default).
'''
drag_timeout = NumericProperty(_scroll_timeout)
'''Timeout allowed to trigger the :attr:`drag_distance`, in milliseconds.
If the user has not moved :attr:`drag_distance` within the timeout,
dragging will be disabled, and the touch event will be dispatched to the
children.
:attr:`drag_timeout` is a :class:`~kivy.properties.NumericProperty` and
defaults to the `scroll_timeout` as defined in the user
:class:`~kivy.config.Config` (55 milliseconds by default).
'''
drag_rect_x = NumericProperty(0)
'''X position of the axis aligned bounding rectangle where dragging
is allowed (in window coordinates).
:attr:`drag_rect_x` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
drag_rect_y = NumericProperty(0)
'''Y position of the axis aligned bounding rectangle where dragging
is allowed (in window coordinates).
:attr:`drag_rect_Y` is a :class:`~kivy.properties.NumericProperty` and
defaults to 0.
'''
drag_rect_width = NumericProperty(100)
'''Width of the axis aligned bounding rectangle where dragging is allowed.
:attr:`drag_rect_width` is a :class:`~kivy.properties.NumericProperty` and
defaults to 100.
'''
drag_rect_height = NumericProperty(100)
'''Height of the axis aligned bounding rectangle where dragging is allowed.
:attr:`drag_rect_height` is a :class:`~kivy.properties.NumericProperty` and
defaults to 100.
'''
drag_rectangle = ReferenceListProperty(drag_rect_x, drag_rect_y,
drag_rect_width, drag_rect_height)
'''Position and size of the axis aligned bounding rectangle where dragging
is allowed.
:attr:`drag_rectangle` is a :class:`~kivy.properties.ReferenceListProperty`
of (:attr:`drag_rect_x`, :attr:`drag_rect_y`, :attr:`drag_rect_width`,
:attr:`drag_rect_height`) properties.
'''
def __init__(self, **kwargs):
self._drag_touch = None
super(DragBehavior, self).__init__(**kwargs)
def _get_uid(self, prefix='sv'):
return '{0}.{1}'.format(prefix, self.uid)
def on_touch_down(self, touch):
xx, yy, w, h = self.drag_rectangle
x, y = touch.pos
if not self.collide_point(x, y):
touch.ud[self._get_uid('svavoid')] = True
return super(DragBehavior, self).on_touch_down(touch)
if self._drag_touch or ('button' in touch.profile and
touch.button.startswith('scroll')) or\
not ((xx < x <= xx + w) and (yy < y <= yy + h)):
return super(DragBehavior, self).on_touch_down(touch)
# no mouse scrolling, so the user is going to drag with this touch.
self._drag_touch = touch
uid = self._get_uid()
touch.grab(self)
touch.ud[uid] = {
'mode': 'unknown',
'dx': 0,
'dy': 0}
Clock.schedule_once(self._change_touch_mode,
self.drag_timeout / 1000.)
return True
def METHOD_NAME(self, touch):
if self._get_uid('svavoid') in touch.ud or\
self._drag_touch is not touch:
return super(DragBehavior, self).METHOD_NAME(touch) or\
self._get_uid() in touch.ud
if touch.grab_current is not self:
return True
uid = self._get_uid()
ud = touch.ud[uid]
mode = ud['mode']
if mode == 'unknown':
ud['dx'] += abs(touch.dx)
ud['dy'] += abs(touch.dy)
if ud['dx'] > sp(self.drag_distance):
mode = 'drag'
if ud['dy'] > sp(self.drag_distance):
mode = 'drag'
ud['mode'] = mode
if mode == 'drag':
self.x += touch.dx
self.y += touch.dy
return True
def on_touch_up(self, touch):
if self._get_uid('svavoid') in touch.ud:
return super(DragBehavior, self).on_touch_up(touch)
if self._drag_touch and self in [x() for x in touch.grab_list]:
touch.ungrab(self)
self._drag_touch = None
ud = touch.ud[self._get_uid()]
if ud['mode'] == 'unknown':
super(DragBehavior, self).on_touch_down(touch)
Clock.schedule_once(partial(self._do_touch_up, touch), .1)
else:
if self._drag_touch is not touch:
super(DragBehavior, self).on_touch_up(touch)
return self._get_uid() in touch.ud
def _do_touch_up(self, touch, *largs):
super(DragBehavior, self).on_touch_up(touch)
# don't forget about grab event!
for x in touch.grab_list[:]:
touch.grab_list.remove(x)
x = x()
if not x:
continue
touch.grab_current = x
super(DragBehavior, self).on_touch_up(touch)
touch.grab_current = None
def _change_touch_mode(self, *largs):
if not self._drag_touch:
return
uid = self._get_uid()
touch = self._drag_touch
ud = touch.ud[uid]
if ud['mode'] != 'unknown':
return
touch.ungrab(self)
self._drag_touch = None
touch.push()
touch.apply_transform_2d(self.parent.to_widget)
super(DragBehavior, self).on_touch_down(touch)
touch.pop()
return |
clear reactions | import logging
import re
import sys
import os
from difflib import get_close_matches
from enum import IntEnum
from logging.handlers import RotatingFileHandler
from string import Formatter
import discord
from discord.ext import commands
import _string
try:
from colorama import Fore, Style
except ImportError:
Fore = Style = type("Dummy", (object,), {"__getattr__": lambda self, item: ""})()
if ".heroku" in os.environ.get("PYTHONHOME", ""):
# heroku
Fore = Style = type("Dummy", (object,), {"__getattr__": lambda self, item: ""})()
class PermissionLevel(IntEnum):
OWNER = 5
ADMINISTRATOR = 4
ADMIN = 4
MODERATOR = 3
MOD = 3
SUPPORTER = 2
RESPONDER = 2
REGULAR = 1
INVALID = -1
class InvalidConfigError(commands.BadArgument):
def __init__(self, msg, *args):
super().__init__(msg, *args)
self.msg = msg
@property
def embed(self):
# Single reference of Color.red()
return discord.Embed(title="Error", description=self.msg, color=discord.Color.red())
class ModmailLogger(logging.Logger):
@staticmethod
def _debug_(*msgs):
return f'{Fore.CYAN}{" ".join(msgs)}{Style.RESET_ALL}'
@staticmethod
def _info_(*msgs):
return f'{Fore.LIGHTMAGENTA_EX}{" ".join(msgs)}{Style.RESET_ALL}'
@staticmethod
def _error_(*msgs):
return f'{Fore.RED}{" ".join(msgs)}{Style.RESET_ALL}'
def debug(self, msg, *args, **kwargs):
if self.isEnabledFor(logging.DEBUG):
self._log(logging.DEBUG, self._debug_(msg), args, **kwargs)
def info(self, msg, *args, **kwargs):
if self.isEnabledFor(logging.INFO):
self._log(logging.INFO, self._info_(msg), args, **kwargs)
def warning(self, msg, *args, **kwargs):
if self.isEnabledFor(logging.WARNING):
self._log(logging.WARNING, self._error_(msg), args, **kwargs)
def error(self, msg, *args, **kwargs):
if self.isEnabledFor(logging.ERROR):
self._log(logging.ERROR, self._error_(msg), args, **kwargs)
def critical(self, msg, *args, **kwargs):
if self.isEnabledFor(logging.CRITICAL):
self._log(logging.CRITICAL, self._error_(msg), args, **kwargs)
def line(self, level="info"):
if level == "info":
level = logging.INFO
elif level == "debug":
level = logging.DEBUG
else:
level = logging.INFO
if self.isEnabledFor(level):
self._log(
level,
Fore.BLACK + Style.BRIGHT + "-------------------------" + Style.RESET_ALL,
[],
)
logging.setLoggerClass(ModmailLogger)
log_level = logging.INFO
loggers = set()
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(log_level)
formatter = logging.Formatter(
"%(asctime)s %(name)s[%(lineno)d] - %(levelname)s: %(message)s", datefmt="%m/%d/%y %H:%M:%S"
)
ch.setFormatter(formatter)
ch_debug = None
def getLogger(name=None) -> ModmailLogger:
logger = logging.getLogger(name)
logger.setLevel(log_level)
logger.addHandler(ch)
if ch_debug is not None:
logger.addHandler(ch_debug)
loggers.add(logger)
return logger
class FileFormatter(logging.Formatter):
ansi_escape = re.compile(r"\x1B\[[0-?]*[ -/]*[@-~]")
def format(self, record):
record.msg = self.ansi_escape.sub("", record.msg)
return super().format(record)
def configure_logging(name, level=None):
global ch_debug, log_level
ch_debug = RotatingFileHandler(name, mode="a+", maxBytes=48000, backupCount=1, encoding="utf-8")
formatter_debug = FileFormatter(
"%(asctime)s %(name)s[%(lineno)d] - %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
ch_debug.setFormatter(formatter_debug)
ch_debug.setLevel(logging.DEBUG)
if level is not None:
log_level = level
ch.setLevel(log_level)
for logger in loggers:
logger.setLevel(log_level)
logger.addHandler(ch_debug)
class _Default:
pass
Default = _Default()
class SafeFormatter(Formatter):
def get_field(self, field_name, args, kwargs):
first, rest = _string.formatter_field_name_split(field_name)
try:
obj = self.get_value(first, args, kwargs)
except (IndexError, KeyError):
return "<Invalid>", first
# loop through the rest of the field_name, doing
# getattr or getitem as needed
# stops when reaches the depth of 2 or starts with _.
try:
for n, (is_attr, i) in enumerate(rest):
if n >= 2:
break
if is_attr:
if str(i).startswith("_"):
break
obj = getattr(obj, i)
else:
obj = obj[i]
else:
return obj, first
except (IndexError, KeyError):
pass
return "<Invalid>", first
class UnseenFormatter(Formatter):
def get_value(self, key, args, kwds):
if isinstance(key, str):
try:
return kwds[key]
except KeyError:
return "{" + key + "}"
else:
return super().get_value(key, args, kwds)
class SimilarCategoryConverter(commands.CategoryChannelConverter):
async def convert(self, ctx, argument):
bot = ctx.bot
guild = ctx.guild
try:
return await super().convert(ctx, argument)
except commands.ChannelNotFound:
if guild:
categories = {c.name.casefold(): c for c in guild.categories}
else:
categories = {
c.name.casefold(): c
for c in bot.get_all_channels()
if isinstance(c, discord.CategoryChannel)
}
result = get_close_matches(argument.casefold(), categories.keys(), n=1, cutoff=0.75)
if result:
result = categories[result[0]]
if not isinstance(result, discord.CategoryChannel):
raise commands.ChannelNotFound(argument)
return result
class DummyMessage:
"""
A class mimicking the original :class:discord.Message
where all functions that require an actual message to exist
is replaced with a dummy function
"""
def __init__(self, message):
if message:
message.attachments = []
self._message = message
def __getattr__(self, name: str):
return getattr(self._message, name)
def __bool__(self):
return bool(self._message)
async def delete(self, *, delay=None):
return
async def edit(self, **fields):
return
async def add_reaction(self, emoji):
return
async def remove_reaction(self, emoji):
return
async def clear_reaction(self, emoji):
return
async def METHOD_NAME(self):
return
async def pin(self, *, reason=None):
return
async def unpin(self, *, reason=None):
return
async def publish(self):
return
async def ack(self):
return
class DMDisabled(IntEnum):
NONE = 0
NEW_THREADS = 1
ALL_THREADS = 2
class HostingMethod(IntEnum):
HEROKU = 0
PM2 = 1
SYSTEMD = 2
SCREEN = 3
DOCKER = 4
OTHER = 5 |
wrap py func | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Pyfunc creation utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import script_ops
class MatchDType(namedtuple('MatchDType', ('arg_number',))):
"""Allows matching the dtype of an argument.
Used in conjunction with function calls. For example, MatchDType(0) will
match the DType of the first argument.
"""
pass
def METHOD_NAME(f, return_dtypes, args, kwargs=None, use_dummy_return=False):
"""Helper that wraps a callable to py_func.
The helper passes tensor arguments through the py_func interface. Non-tensor
arguments are allowed, and will be passed to f directly. Note that non-tensor
arguments are captured by f will not update every time the wrapper is
called (this is consistent with its argument list, which only includes
the tensor arguments). In general, it's safest not to reuse this wrapper.
Args:
f: Callable
return_dtypes: None, individual of tuple/list of DType or MatchDType, the
data type for each of f's return value(s). Set to None if f has no
return values or use_dummy_return is True. Use MatchDType to define a
dtype identical to that of `i`th argument (argument 0 is the first);
an argument must of Tensor type if it is to be used with MatchDType.
args: Positional arguments for f, as list or tuple.
kwargs: Keyword arguments for f, as dict with string keys. May be None.
use_dummy_return: If True, the function will return a dummy value of 1
and discard its actual return value.
Returns:
The return values of f converted to tensor.
Raises:
ValueError: if any of the arguments are incorrect.
"""
if return_dtypes and use_dummy_return:
raise ValueError('if use_dummy_return is True, return_dtypes must be empty')
tensor_args = []
tensor_args_idx = {}
# Of the positional arguments, only grab the tensor ones to be passed through
# the py_func.
n_args = len(args)
arg_is_tensor = tuple(map(tensor_util.is_tensor, args))
for i in range(n_args):
if arg_is_tensor[i]:
tensor_args_idx[i] = len(tensor_args)
tensor_args.append(args[i])
# We essentially take the tensor kwargs, if any, and add them to the list of
# positional arguments. The kwargs are then reconstructed inside the py_func.
#
# For example, if
#
# args = [Tensor(1), 'foo']
# kwargs = {'a': Tensor(2), 'b': 'bar'}
#
# Then
#
# tensor_args = (Tensor(1), Tensor(2))
# kwarg_keys = ('a', 'b')
if kwargs:
kwarg_keys = tuple(kwargs.keys())
kwarg_is_tensor = {k: tensor_util.is_tensor(kwargs[k]) for k in kwarg_keys}
for k in kwarg_keys:
if kwarg_is_tensor[k]:
tensor_args_idx[k] = len(tensor_args)
tensor_args.append(kwargs[k])
else:
kwarg_keys = ()
# Set up return dtypes.
def match_arg_dtype(arg_number):
arg = args[arg_number]
if not arg_is_tensor[arg_number]:
raise ValueError(
'argument %d was used with MatchDType and must be a tf.Tensor, but '
'was %s instead' % (arg_number, type(arg)))
return arg.dtype
if return_dtypes:
if isinstance(return_dtypes, MatchDType):
return_dtypes = match_arg_dtype(return_dtypes.arg_number)
elif isinstance(return_dtypes, (list, tuple)):
return_dtypes = tuple(
match_arg_dtype(a.arg_number) if isinstance(a, MatchDType) else a
for a in return_dtypes)
else:
assert isinstance(return_dtypes, dtypes.DType)
def f_wrapper(*tensor_args):
f_args = tuple(tensor_args[tensor_args_idx[i]] if arg_is_tensor[i] else a
for i, a in enumerate(args))
f_kwargs = {
k: tensor_args[tensor_args_idx[k]] if kwarg_is_tensor[k] else kwargs[k]
for i, k in enumerate(kwarg_keys)
}
retval = f(*f_args, **f_kwargs)
return 1 if use_dummy_return else retval
if use_dummy_return:
return_dtypes = dtypes.int32
return script_ops.eager_py_func(f_wrapper, tensor_args, return_dtypes) |
sku | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetNotificationHubResult',
'AwaitableGetNotificationHubResult',
'get_notification_hub',
'get_notification_hub_output',
]
@pulumi.output_type
class GetNotificationHubResult:
"""
Notification Hub Resource.
"""
def __init__(__self__, id=None, location=None, name=None, properties=None, METHOD_NAME=None, system_data=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", METHOD_NAME)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. E.g. "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}"
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.NotificationHubPropertiesResponse':
"""
NotificationHub properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional['outputs.SkuResponse']:
"""
The Sku description for a namespace
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetNotificationHubResult(GetNotificationHubResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNotificationHubResult(
id=self.id,
location=self.location,
name=self.name,
properties=self.properties,
METHOD_NAME=self.METHOD_NAME,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_notification_hub(namespace_name: Optional[str] = None,
notification_hub_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNotificationHubResult:
"""
Notification Hub Resource.
:param str namespace_name: Namespace name
:param str notification_hub_name: Notification Hub name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
__args__ = dict()
__args__['namespaceName'] = namespace_name
__args__['notificationHubName'] = notification_hub_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:notificationhubs/v20230901:getNotificationHub', __args__, opts=opts, typ=GetNotificationHubResult).value
return AwaitableGetNotificationHubResult(
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
METHOD_NAME=pulumi.get(__ret__, 'sku'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_notification_hub)
def get_notification_hub_output(namespace_name: Optional[pulumi.Input[str]] = None,
notification_hub_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNotificationHubResult]:
"""
Notification Hub Resource.
:param str namespace_name: Namespace name
:param str notification_hub_name: Notification Hub name
:param str resource_group_name: The name of the resource group. The name is case insensitive.
"""
... |
add | """
Manage groups on Solaris
.. important::
If you feel that Salt should be using this module to manage groups on a
minion, and it is using a different module (or gives an error similar to
*'group.info' is not available*), see :ref:`here
<module-provider-override>`.
"""
import logging
try:
import grp
except ImportError:
pass
log = logging.getLogger(__name__)
# Define the module's virtual name
__virtualname__ = "group"
def __virtual__():
"""
Set the group module if the kernel is AIX
"""
if __grains__["kernel"] == "AIX":
return __virtualname__
return (
False,
"The aix_group execution module failed to load: only available on AIX systems.",
)
def METHOD_NAME(name, gid=None, system=False, root=None, **kwargs):
"""
Add the specified group
CLI Example:
.. code-block:: bash
salt '*' group.add foo 3456
"""
cmd = "mkgroup "
if system and root is not None:
cmd += "-a "
if gid:
cmd += "id={} ".format(gid)
cmd += name
ret = __salt__["cmd.run_all"](cmd, python_shell=False)
return not ret["retcode"]
def delete(name):
"""
Remove the named group
CLI Example:
.. code-block:: bash
salt '*' group.delete foo
"""
ret = __salt__["cmd.run_all"]("rmgroup {}".format(name), python_shell=False)
return not ret["retcode"]
def info(name):
"""
Return information about a group
CLI Example:
.. code-block:: bash
salt '*' group.info foo
"""
try:
grinfo = grp.getgrnam(name)
except KeyError:
return {}
else:
return {
"name": grinfo.gr_name,
"passwd": grinfo.gr_passwd,
"gid": grinfo.gr_gid,
"members": grinfo.gr_mem,
}
def getent(refresh=False):
"""
Return info on all groups
CLI Example:
.. code-block:: bash
salt '*' group.getent
"""
if "group.getent" in __context__ and not refresh:
return __context__["group.getent"]
ret = []
for grinfo in grp.getgrall():
ret.append(info(grinfo.gr_name))
__context__["group.getent"] = ret
return ret
def chgid(name, gid):
"""
Change the gid for a named group
CLI Example:
.. code-block:: bash
salt '*' group.chgid foo 4376
"""
pre_gid = __salt__["file.group_to_gid"](name)
if gid == pre_gid:
return True
cmd = "chgroup id={} {}".format(gid, name)
__salt__["cmd.run"](cmd, python_shell=False)
post_gid = __salt__["file.group_to_gid"](name)
if post_gid != pre_gid:
return post_gid == gid
return False
def adduser(name, username, root=None):
"""
Add a user in the group.
CLI Example:
.. code-block:: bash
salt '*' group.adduser foo bar
Verifies if a valid username 'bar' as a member of an existing group 'foo',
if not then adds it.
"""
cmd = "chgrpmem -m + {} {}".format(username, name)
retcode = __salt__["cmd.retcode"](cmd, python_shell=False)
return not retcode
def deluser(name, username, root=None):
"""
Remove a user from the group.
CLI Example:
.. code-block:: bash
salt '*' group.deluser foo bar
Removes a member user 'bar' from a group 'foo'. If group is not present
then returns True.
"""
grp_info = __salt__["group.info"](name)
try:
if username in grp_info["members"]:
cmd = "chgrpmem -m - {} {}".format(username, name)
ret = __salt__["cmd.run"](cmd, python_shell=False)
return not ret["retcode"]
else:
return True
except Exception: # pylint: disable=broad-except
return True
def members(name, members_list, root=None):
"""
Replaces members of the group with a provided list.
CLI Example:
.. code-block:: bash
salt '*' group.members foo 'user1,user2,user3,...'
Replaces a membership list for a local group 'foo'.
foo:x:1234:user1,user2,user3,...
"""
cmd = "chgrpmem -m = {} {}".format(members_list, name)
retcode = __salt__["cmd.retcode"](cmd, python_shell=False)
return not retcode |
wait on systemd | import queue
import subprocess
import threading
from steps.common import debug
class Podman:
def __init__(self, context, container_name):
self.context = context
self.container_name = container_name
self.container = None
debug(self.context, "Podman.__init__()")
self.new_container()
def __del__(self):
debug(self.context, "Podman.__del__()")
try:
self.kill()
except Exception:
pass
def kill(self):
debug(self.context, "Podman.kill()")
if not self.container:
return
self.container.kill()
self.container = None
def new_container(self):
debug(self.context, "Podman.new_container()")
# no need to stop the running container
# becuse the new container replaces an old container with the identical name
self.container = Container(self.context, name=self.container_name)
class ThreadedPodman:
def __init__(self, context, container_name_prefix, max_containers=1):
self.context = context
self.container = None
debug(self.context, "ThreadedPodman.__init__()")
self.max_containers = max_containers
self.container_name_prefix = container_name_prefix
self.container_name_num = 0
# produce new containers
self.container_producer_queue = queue.Queue(maxsize=self.max_containers)
self.container_producer_queue_is_stopping = threading.Event()
self.container_producer_queue_is_stopped = threading.Event()
self.container_producer_thread = threading.Thread(target=self.container_producer, daemon=True)
self.container_producer_thread.start()
# consume (kill) used containers
self.container_consumer_queue = queue.Queue()
self.container_consumer_thread = threading.Thread(target=self.container_consumer, daemon=True)
self.container_consumer_thread.start()
self.new_container()
def __del__(self):
debug(self.context, "ThreadedPodman.__del__()")
try:
self.kill()
except Exception:
pass
def kill(self):
debug(self.context, "ThreadedPodman.kill()")
self.container_producer_queue_is_stopping.set()
container = getattr(self, "container", None)
if container:
self.container_consumer_queue.put(container)
self.container = None
while not self.container_producer_queue_is_stopped.is_set():
try:
container = self.container_producer_queue.get(block=True, timeout=1)
self.container_consumer_queue.put(container)
except queue.Empty:
continue
# 'None' is a signal to finish processing the queue
self.container_consumer_queue.put(None)
self.container_producer_thread.join()
self.container_consumer_thread.join()
def container_producer(self):
while not self.container_producer_queue_is_stopping.is_set():
if self.container_name_prefix:
self.container_name_num += 1
container_name = f"{self.container_name_prefix}{self.container_name_num}"
else:
container_name = None
container = Container(self.context, name=container_name)
self.container_producer_queue.put(container, block=True)
self.container_producer_queue_is_stopped.set()
def container_consumer(self):
while True:
container = self.container_consumer_queue.get(block=True)
if container is None:
break
container.kill()
def new_container(self):
debug(self.context, "ThreadedPodman.new_container()")
if getattr(self, "container", None):
self.container_consumer_queue.put(self.container)
self.container = self.container_producer_queue.get(block=True)
debug(self.context, f"> {self.container}")
class Container:
def __init__(self, context, name=None):
self.context = context
debug(self.context, "Container.__init__()")
self.container_name = name
self.container_id = None
self.port = None
self.start()
def __del__(self):
try:
self.kill()
except Exception:
pass
def __repr__(self):
result = super().__repr__()
result += f"(port:{self.port}, id:{self.container_id}, name:{self.container_name})"
return result
def _run(self, args, check=True):
cmd = ["podman"] + args
debug(self.context, "Running command:", cmd)
proc = subprocess.run(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
check=check,
)
debug(self.context, "> return code:", proc.returncode)
debug(self.context, "> stdout:", proc.stdout)
debug(self.context, "> stderr:", proc.stderr)
return proc
def start(self):
debug(self.context, "Container.start()")
args = [
"run",
"--hostname", "obs-server-behave",
]
if self.container_name:
args += [
"--name", self.container_name,
"--replace",
"--stop-signal", "SIGKILL",
]
args += [
"--rm",
"--detach",
"--interactive",
"--tty",
"-p", "443",
"obs-server"
]
proc = self._run(args)
lines = proc.stdout.strip().splitlines()
self.container_id = lines[-1]
self.METHOD_NAME()
self.port = self.get_port()
def kill(self):
if not self.container_id:
return
debug(self.context, "Container.kill()")
args = ["kill", self.container_id]
self._run(args)
self.container_id = None
def restart(self):
debug(self.context, "Container.restart()")
self.kill()
self.start()
def METHOD_NAME(self):
args = [
"exec",
self.container_id,
"/usr/bin/systemctl", "is-system-running", "--wait"
]
self._run(args, check=False)
def get_port(self):
args = ["port", self.container_id]
proc = self._run(args)
lines = proc.stdout.strip().splitlines()
for line in lines:
if line.startswith("443/tcp"):
# return <port> from: "443/tcp -> 0.0.0.0:<port>"
return line.split(":")[-1]
raise RuntimeError(f"Could not determine port of container {self.container_id}") |
l2 norm scale | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle import fluid
from paddle.fluid.param_attr import ParamAttr
class VGG(object):
"""
VGG, see https://arxiv.org/abs/1409.1556
"""
def __init__(self):
super(VGG, self).__init__()
def __call__(self, input):
layers = []
layers += self._vgg_block(input)
layers += self._add_extras_block(layers[-1])
norm_cfg = [20., -1, -1, -1, -1, -1]
for k, v in enumerate(layers):
if not norm_cfg[k] == -1:
layers[k] = self.METHOD_NAME(v, init_scale=norm_cfg[k])
return layers
def _vgg_block(self, input):
num_layers = [2, 2, 3, 3, 3]
vgg_base = [64, 128, 256, 512, 512]
conv = input
layers = []
for k, v in enumerate(vgg_base):
conv = self._conv_block(
conv, v, num_layers[k], name="conv{}_".format(k + 1))
layers.append(conv)
if k == 4:
conv = self._pooling_block(conv, 3, 1, pool_padding=1)
else:
conv = self._pooling_block(conv, 2, 2)
fc6 = self._conv_layer(conv, 1024, 3, 1, 6, dilation=6, name="fc6")
fc7 = self._conv_layer(fc6, 1024, 1, 1, 0, name="fc7")
return [layers[3], fc7]
def _add_extras_block(self, input):
cfg = [[256, 512, 1, 2, 3], [128, 256, 1, 2, 3],
[128, 256, 0, 1, 3], [128, 256, 0, 1, 3]]
conv = input
layers = []
for k, v in enumerate(cfg):
conv = self._extra_block(
conv, v[0], v[1], v[2], v[3], v[4],
name="conv{}_".format(6 + k))
layers.append(conv)
return layers
def _conv_block(self, input, num_filter, groups, name=None):
conv = input
for i in range(groups):
conv = self._conv_layer(
input=conv,
num_filters=num_filter,
filter_size=3,
stride=1,
padding=1,
act='relu',
name=name + str(i + 1))
return conv
def _extra_block(self,
input,
num_filters1,
num_filters2,
padding_size,
stride_size,
filter_size,
name=None):
# 1x1 conv
conv_1 = self._conv_layer(
input=input,
num_filters=int(num_filters1),
filter_size=1,
stride=1,
act='relu',
padding=0,
name=name + "1")
# 3x3 conv
conv_2 = self._conv_layer(
input=conv_1,
num_filters=int(num_filters2),
filter_size=filter_size,
stride=stride_size,
act='relu',
padding=padding_size,
name=name + "2")
return conv_2
def _conv_layer(self,
input,
num_filters,
filter_size,
stride,
padding,
dilation=1,
act='relu',
use_cudnn=True,
name=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
dilation=dilation,
act=act,
use_cudnn=use_cudnn,
param_attr=ParamAttr(name=name + "_weights"),
bias_attr=ParamAttr(name=name + "_biases"),
name=name + '.conv2d.output.1')
return conv
def _pooling_block(self,
conv,
pool_size,
pool_stride,
pool_padding=0,
ceil_mode=True):
pool = fluid.layers.pool2d(
input=conv,
pool_size=pool_size,
pool_type='max',
pool_stride=pool_stride,
pool_padding=pool_padding,
ceil_mode=ceil_mode)
return pool
def METHOD_NAME(self, input, init_scale=1.0, channel_shared=False):
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.initializer import Constant
helper = LayerHelper("Scale")
l2_norm = fluid.layers.l2_normalize(
input, axis=1) # l2 norm along channel
shape = [1] if channel_shared else [input.shape[1]]
scale = helper.create_parameter(
attr=helper.param_attr,
shape=shape,
dtype=input.dtype,
default_initializer=Constant(init_scale))
out = fluid.layers.elementwise_mul(
x=l2_norm, y=scale, axis=-1 if channel_shared else 1,
name="conv4_3_norm_scale")
return out
class SSD(object):
"""
Single Shot MultiBox Detector, see https://arxiv.org/abs/1512.02325
"""
def __init__(self, num_classes=81):
super(SSD, self).__init__()
self.backbone = VGG()
self.num_classes = num_classes
def __call__(self, image, gt_box, gt_label):
body_feats = self.backbone(image)
locs, confs, box, box_var = fluid.layers.multi_box_head(
inputs=body_feats,
image=image,
num_classes=self.num_classes,
min_ratio=15,
max_ratio=90,
base_size=300,
min_sizes=[30.0, 60.0, 111.0, 162.0, 213.0, 264.0],
max_sizes=[60.0, 111.0, 162.0, 213.0, 264.0, 315.0],
aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2.], [2.]],
steps=[8, 16, 32, 64, 100, 300],
offset=0.5,
flip=True,
min_max_aspect_ratios_order=False,
kernel_size=3,
pad=1)
loss = fluid.layers.ssd_loss(locs, confs, gt_box, gt_label, box,
box_var)
loss = fluid.layers.reduce_sum(loss)
return loss |
up | import os
import click
from aimcore.cli.utils import set_log_level, start_uvicorn_app
from aimcore.cli.ui.utils import build_db_upgrade_command, get_free_port_num
from aimcore.web.configs import (
AIM_UI_BASE_PATH,
AIM_UI_DEFAULT_HOST,
AIM_UI_DEFAULT_PORT,
AIM_UI_MOUNTED_REPO_PATH,
AIM_UI_TELEMETRY_KEY,
AIM_PROXY_URL,
AIM_PROFILER_KEY
)
from aim._sdk.configs import AIM_ENV_MODE_KEY
from aim._sdk.repo import Repo
from aimcore.web.utils import exec_cmd
from aimcore.web.utils import ShellCommandException
from aim._ext.tracking import analytics
@click.command('ui')
@click.option('-h', '--host', default=AIM_UI_DEFAULT_HOST, type=str)
@click.option('-p', '--port', default=AIM_UI_DEFAULT_PORT, type=int)
@click.option('-w', '--workers', default=1, type=int)
@click.option('--uds', required=False, type=click.Path(exists=False,
file_okay=True,
dir_okay=False,
readable=True))
@click.option('--repo', required=False, default=os.getcwd(), type=click.Path(exists=True,
file_okay=False,
dir_okay=True,
writable=True))
@click.option('--package', '--pkg', required=False, default='', show_default='asp', type=str)
@click.option('--dev', is_flag=True, default=False)
@click.option('--ssl-keyfile', required=False, type=click.Path(exists=True,
file_okay=True,
dir_okay=False,
readable=True))
@click.option('--ssl-certfile', required=False, type=click.Path(exists=True,
file_okay=True,
dir_okay=False,
readable=True))
@click.option('--base-path', required=False, default='', type=str)
@click.option('--profiler', is_flag=True, default=False)
@click.option('--log-level', required=False, default='', type=str)
@click.option('-y', '--yes', is_flag=True, help='Automatically confirm prompt')
def ui(dev, host, port, workers, uds,
repo,
package,
ssl_keyfile, ssl_certfile,
base_path,
profiler, log_level, yes):
"""
Start Aim UI with the --repo repository.
"""
if not log_level:
log_level = 'debug' if dev else 'warning'
set_log_level(log_level)
os.environ[AIM_ENV_MODE_KEY] = 'dev' if dev else 'prod'
if base_path:
# process `base_path` as ui requires leading slash
if base_path.endswith('/'):
base_path = base_path[:-1]
if base_path and not base_path.startswith('/'):
base_path = f'/{base_path}'
os.environ[AIM_UI_BASE_PATH] = base_path
if not Repo.exists(repo):
init_repo = yes or click.confirm(f'\'{repo}\' is not a valid Aim repository. Do you want to initialize it?')
if not init_repo:
click.echo('To initialize repo please run the following command:')
click.secho('aim init', fg='yellow')
return
Repo.init(repo)
repo_inst = Repo.from_path(repo, read_only=True)
os.environ[AIM_UI_MOUNTED_REPO_PATH] = repo
dev_package_dir = repo_inst.dev_package_dir
if package:
repo_inst.set_active_package(pkg_name=package)
try:
db_cmd = build_db_upgrade_command()
exec_cmd(db_cmd, stream_output=True)
except ShellCommandException:
click.echo('Failed to initialize Aim DB. '
'Please see the logs above for details.')
exit(1)
if port == 0:
try:
port = get_free_port_num()
except Exception:
pass
if not dev and os.getenv(AIM_UI_TELEMETRY_KEY, 1) == '0':
click.echo(f'"{AIM_UI_TELEMETRY_KEY}" is ignored. Read how to opt-out here: '
f'https://aimstack.readthedocs.io/en/latest/community/telemetry.html')
if dev:
analytics.dev_mode = True
click.secho('Running Aim UI on repo `{}`'.format(repo_inst), fg='yellow')
if uds:
click.echo('Aim UI running on {}'.format(uds))
else:
scheme = 'https' if ssl_keyfile or ssl_certfile else 'http'
click.echo('Open {}://{}:{}{}'.format(scheme, host, port, base_path), err=True)
proxy_url = os.environ.get(AIM_PROXY_URL)
if proxy_url:
click.echo(f'Proxy {proxy_url}{base_path}/')
click.echo('Press Ctrl+C to exit')
analytics.track_event(event_name='[Aim UI] Start UI')
if profiler:
os.environ[AIM_PROFILER_KEY] = '1'
if dev:
import aim
import aimstack
import aimcore
reload_dirs = [os.path.dirname(aim.__file__), os.path.dirname(aimcore.__file__), os.path.dirname(aimstack.__file__), dev_package_dir]
else:
reload_dirs = []
try:
start_uvicorn_app('aimcore.web.run:app',
host=host, port=port, workers=workers, uds=uds,
ssl_keyfile=ssl_keyfile, ssl_certfile=ssl_certfile, log_level=log_level,
reload=dev, reload_dirs=reload_dirs)
except Exception:
click.echo('Failed to run Aim UI. '
'Please see the logs above for details.')
exit(1)
@click.command('up', context_settings={'ignore_unknown_options': True, 'allow_extra_args': True}, hidden=True)
def METHOD_NAME(**kwargs):
click.secho('Command `aim up` has been removed. Use `aim ui` instead.', fg='yellow') |
train forward | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import math
import net
class DygraphModel():
# define model
def create_model(self, config):
emb_path = config.get("hyper_parameters.emb_path")
vocab_size = config.get("hyper_parameters.vocab_size")
emb_size = config.get("hyper_parameters.emb_size")
kernel_num = config.get("hyper_parameters.kernel_num")
conv_filter = config.get("hyper_parameters.conv_filter")
conv_act = config.get("hyper_parameters.conv_act")
hidden_size = config.get("hyper_parameters.hidden_size")
out_size = config.get("hyper_parameters.out_size")
pool_size = config.get("hyper_parameters.pool_size")
pool_stride = config.get("hyper_parameters.pool_stride")
pool_padding = config.get("hyper_parameters.pool_padding")
pool_type = config.get("hyper_parameters.pool_type")
hidden_act = config.get("hyper_parameters.hidden_act")
pyramid_model = net.MatchPyramidLayer(
emb_path, vocab_size, emb_size, kernel_num, conv_filter, conv_act,
hidden_size, out_size, pool_size, pool_stride, pool_padding,
pool_type, hidden_act)
return pyramid_model
# define feeds which convert numpy of batch data to paddle.tensor
def create_feeds(self, batch_data, sentence_left_size,
sentence_right_size):
sentence_left = paddle.to_tensor(batch_data[0].numpy().astype('int64')
.reshape(-1, sentence_left_size))
sentence_right = paddle.to_tensor(batch_data[1].numpy().astype('int64')
.reshape(-1, sentence_right_size))
return [sentence_left, sentence_right]
# define loss function by predicts and label
def create_loss(self, prediction):
pos = paddle.slice(
prediction, axes=[0, 1], starts=[0, 0], ends=[64, 1])
neg = paddle.slice(
prediction, axes=[0, 1], starts=[64, 0], ends=[128, 1])
loss_part1 = paddle.subtract(
paddle.full(
shape=[64, 1], fill_value=1.0, dtype='float32'), pos)
loss_part2 = paddle.add(loss_part1, neg)
loss_part3 = paddle.maximum(
paddle.full(
shape=[64, 1], fill_value=0.0, dtype='float32'),
loss_part2)
avg_cost = paddle.mean(loss_part3)
return avg_cost
# define optimizer
def create_optimizer(self, dy_model, config):
lr = config.get("hyper_parameters.optimizer.learning_rate", 0.001)
optimizer = paddle.optimizer.Adam(
learning_rate=lr, parameters=dy_model.parameters())
return optimizer
# define metrics such as auc/acc
# multi-task need to define multi metric
def create_metrics(self):
metrics_list_name = []
metrics_list = []
return metrics_list, metrics_list_name
# construct train forward phase
def METHOD_NAME(self, dy_model, metrics_list, batch_data, config):
sentence_left_size = config.get("hyper_parameters.sentence_left_size")
sentence_right_size = config.get(
"hyper_parameters.sentence_right_size")
batch_size = config.get("runner.train_batch_size", 128)
inputs = self.create_feeds(batch_data, sentence_left_size,
sentence_right_size)
prediction = dy_model.forward(inputs)
loss = self.create_loss(prediction)
# update metrics
print_dict = {"loss": loss}
return loss, metrics_list, print_dict
def infer_forward(self, dy_model, metrics_list, batch_data, config):
sentence_left_size = config.get("hyper_parameters.sentence_left_size")
sentence_right_size = config.get(
"hyper_parameters.sentence_right_size")
batch_size = config.get("runner.infer_batch_size", 128)
inputs = self.create_feeds(batch_data, sentence_left_size,
sentence_right_size)
prediction = dy_model.forward(inputs)
# update metrics
print_dict = {"prediction": prediction}
return metrics_list, print_dict |
get config settings | # Copyright 2015 Red Hat Inc.
#
# SPDX-License-Identifier: Apache-2.0
"""Bandit is a tool designed to find common security issues in Python code."""
import argparse
import importlib
import logging
import os
import sys
import yaml
from bandit.core import extension_loader
PROG_NAME = "bandit_conf_generator"
LOG = logging.getLogger(__name__)
template = """
### Bandit config file generated from:
# '{cli}'
### This config may optionally select a subset of tests to run or skip by
### filling out the 'tests' and 'skips' lists given below. If no tests are
### specified for inclusion then it is assumed all tests are desired. The skips
### set will remove specific tests from the include set. This can be controlled
### using the -t/-s CLI options. Note that the same test ID should not appear
### in both 'tests' and 'skips', this would be nonsensical and is detected by
### Bandit at runtime.
# Available tests:
{test_list}
# (optional) list included test IDs here, eg '[B101, B406]':
{test}
# (optional) list skipped test IDs here, eg '[B101, B406]':
{skip}
### (optional) plugin settings - some test plugins require configuration data
### that may be given here, per-plugin. All bandit test plugins have a built in
### set of sensible defaults and these will be used if no configuration is
### provided. It is not necessary to provide settings for every (or any) plugin
### if the defaults are acceptable.
{settings}
"""
def init_logger():
"""Init logger."""
LOG.handlers = []
log_level = logging.INFO
log_format_string = "[%(levelname)5s]: %(message)s"
logging.captureWarnings(True)
LOG.setLevel(log_level)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(log_format_string))
LOG.addHandler(handler)
def parse_args():
"""Parse arguments."""
help_description = """Bandit Config Generator
This tool is used to generate an optional profile. The profile may be used
to include or skip tests and override values for plugins.
When used to store an output profile, this tool will output a template that
includes all plugins and their default settings. Any settings which aren't
being overridden can be safely removed from the profile and default values
will be used. Bandit will prefer settings from the profile over the built
in values."""
parser = argparse.ArgumentParser(
description=help_description,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
"--show-defaults",
dest="show_defaults",
action="store_true",
help="show the default settings values for each "
"plugin but do not output a profile",
)
parser.add_argument(
"-o",
"--out",
dest="output_file",
action="store",
help="output file to save profile",
)
parser.add_argument(
"-t",
"--tests",
dest="tests",
action="store",
default=None,
type=str,
help="list of test names to run",
)
parser.add_argument(
"-s",
"--skip",
dest="skips",
action="store",
default=None,
type=str,
help="list of test names to skip",
)
args = parser.parse_args()
if not args.output_file and not args.show_defaults:
parser.print_help()
parser.exit(1)
return args
def METHOD_NAME():
"""Get configuration settings."""
config = {}
for plugin in extension_loader.MANAGER.plugins:
fn_name = plugin.name
function = plugin.plugin
# if a function takes config...
if hasattr(function, "_takes_config"):
fn_module = importlib.import_module(function.__module__)
# call the config generator if it exists
if hasattr(fn_module, "gen_config"):
config[fn_name] = fn_module.gen_config(function._takes_config)
return yaml.safe_dump(config, default_flow_style=False)
def main():
"""Config generator to write configuration file."""
init_logger()
args = parse_args()
yaml_settings = METHOD_NAME()
if args.show_defaults:
print(yaml_settings)
if args.output_file:
if os.path.exists(os.path.abspath(args.output_file)):
LOG.error("File %s already exists, exiting", args.output_file)
sys.exit(2)
try:
with open(args.output_file, "w") as f:
skips = args.skips.split(",") if args.skips else []
tests = args.tests.split(",") if args.tests else []
for skip in skips:
if not extension_loader.MANAGER.check_id(skip):
raise RuntimeError(f"unknown ID in skips: {skip}")
for test in tests:
if not extension_loader.MANAGER.check_id(test):
raise RuntimeError(f"unknown ID in tests: {test}")
tpl = "# {0} : {1}"
test_list = [
tpl.format(t.plugin._test_id, t.name)
for t in extension_loader.MANAGER.plugins
]
others = [
tpl.format(k, v["name"])
for k, v in (
extension_loader.MANAGER.blacklist_by_id.items()
)
]
test_list.extend(others)
test_list.sort()
contents = template.format(
cli=" ".join(sys.argv),
settings=yaml_settings,
test_list="\n".join(test_list),
skip="skips: " + str(skips) if skips else "skips:",
test="tests: " + str(tests) if tests else "tests:",
)
f.write(contents)
except OSError:
LOG.error("Unable to open %s for writing", args.output_file)
except Exception as e:
LOG.error("Error: %s", e)
else:
LOG.info("Successfully wrote profile: %s", args.output_file)
return 0
if __name__ == "__main__":
sys.exit(main()) |
test future cancel no underlying future | import os
import concurrent.futures as cf
import pytest
from parsl.app.errors import AppException
from parsl.executors.flux.executor import (
FluxExecutor,
FluxFutureWrapper,
_complete_future,
)
try:
import flux.job.executor # noqa: F401
except ImportError:
FLUX_AVAIL = False
else:
FLUX_AVAIL = True
require_flux = pytest.mark.skipif(
not FLUX_AVAIL, reason="Flux not available, test will fail"
)
ERRMSG = "Some error message"
def multiply(x, y):
return x * y
def bad_foo():
raise ValueError(ERRMSG)
@require_flux
@pytest.mark.local
def test_multiply():
with FluxExecutor() as executor:
executor.start()
futures = [executor.submit(multiply, {}, i, 7) for i in range(5)]
for i, future in enumerate(futures):
assert future.result() == i * 7
assert future.done()
assert future.exception() is None
assert isinstance(future, FluxFutureWrapper)
@require_flux
@pytest.mark.local
def test_except():
with FluxExecutor() as executor:
executor.start()
future = executor.submit(bad_foo, {})
with pytest.raises(ValueError, match=ERRMSG):
future.result()
assert isinstance(future.exception(), ValueError)
@require_flux
@pytest.mark.local
@pytest.mark.skipif(
not hasattr(os, "sched_getaffinity") or len(os.sched_getaffinity(0)) < 2,
reason="Not Linux or too few CPUs",
)
def test_affinity():
with FluxExecutor() as executor:
executor.start()
future = executor.submit(os.sched_getaffinity, {"cores_per_task": 2}, 0)
assert len(future.result()) > 1
@require_flux
@pytest.mark.local
def test_cancel():
with FluxExecutor() as executor:
executor.start()
futures = [executor.submit(multiply, {}, i, 9) for i in range(3)]
for i, future in enumerate(futures):
if future.cancel():
assert future.cancelled()
assert future.done()
with pytest.raises(cf.CancelledError):
future.exception()
with pytest.raises(cf.CancelledError):
future.result()
else:
assert future.running()
assert future.done()
assert not future.cancelled()
assert future.result() == i * 9
@pytest.mark.local
def test_future_cancel():
underlying_future = cf.Future()
wrapper_future = FluxFutureWrapper()
wrapper_future._flux_future = underlying_future
assert not wrapper_future.done()
assert not wrapper_future.running()
assert not wrapper_future.cancelled()
assert wrapper_future.cancel() # should cancel underlying future
assert wrapper_future.cancel() # try again for good measure
assert wrapper_future.cancelled()
assert wrapper_future.done()
assert underlying_future.cancelled()
assert underlying_future.done()
@pytest.mark.local
def test_future_running():
underlying_future = cf.Future()
wrapper_future = FluxFutureWrapper()
assert not underlying_future.running()
assert underlying_future.set_running_or_notify_cancel()
assert underlying_future.running()
assert not wrapper_future.running()
wrapper_future._flux_future = underlying_future
assert wrapper_future.running()
@pytest.mark.local
def test_future_callback_returncode():
testfile = ".fluxexecutortest.txt"
returncode = 1
underlying_future = cf.Future()
wrapper_future = FluxFutureWrapper()
wrapper_future._flux_future = underlying_future
underlying_future.add_done_callback(
lambda fut: _complete_future(testfile, wrapper_future, fut)
)
underlying_future.set_result(returncode)
assert wrapper_future.done()
assert isinstance(wrapper_future.exception(), AppException)
@pytest.mark.local
def test_future_callback_nofile():
testfile = ".fluxexecutortest.txt"
returncode = 0
if os.path.isfile(testfile):
os.remove(testfile)
underlying_future = cf.Future()
wrapper_future = FluxFutureWrapper()
wrapper_future._flux_future = underlying_future
underlying_future.add_done_callback(
lambda fut: _complete_future(testfile, wrapper_future, fut)
)
underlying_future.set_result(returncode)
assert wrapper_future.done()
assert isinstance(wrapper_future.exception(), FileNotFoundError)
@pytest.mark.local
def test_future_callback_flux_exception():
underlying_future = cf.Future()
wrapper_future = FluxFutureWrapper()
wrapper_future._flux_future = underlying_future
underlying_future.add_done_callback(
lambda fut: _complete_future(".fluxexecutortest.txt", wrapper_future, fut)
)
underlying_future.set_exception(ValueError())
assert wrapper_future.done()
assert isinstance(wrapper_future.exception(), ValueError)
@pytest.mark.local
def METHOD_NAME():
wrapper_future = FluxFutureWrapper()
assert wrapper_future.cancel()
assert wrapper_future.cancelled()
assert not wrapper_future.running() |
set | """
Support for Debconf
"""
import logging
import os
import re
import salt.utils.files
import salt.utils.path
import salt.utils.stringutils
import salt.utils.versions
log = logging.getLogger(__name__)
__func_alias__ = {"set_": "set"}
# Define the module's virtual name
__virtualname__ = "debconf"
def __virtual__():
"""
Confirm this module is on a Debian based system and that debconf-utils
is installed.
"""
if __grains__["os_family"] != "Debian":
return (
False,
"The debconfmod module could not be loaded: unsupported OS family",
)
if salt.utils.path.which("debconf-get-selections") is None:
return (
False,
"The debconfmod module could not be loaded: "
"debconf-utils is not installed.",
)
return __virtualname__
def _unpack_lines(out):
"""
Unpack the debconf lines
"""
rexp = (
"(?ms)"
"^(?P<package>[^#]\\S+)[\t ]+"
"(?P<question>\\S+)[\t ]+"
"(?P<type>\\S+)[\t ]+"
"(?P<value>[^\n]*)$"
)
lines = re.findall(rexp, out)
return lines
def get_selections(fetchempty=True):
"""
Answers to debconf questions for all packages in the following format::
{'package': [['question', 'type', 'value'], ...]}
CLI Example:
.. code-block:: bash
salt '*' debconf.get_selections
"""
selections = {}
cmd = "debconf-get-selections"
out = __salt__["cmd.run_stdout"](cmd)
lines = _unpack_lines(out)
for line in lines:
package, question, type_, value = line
if fetchempty or value:
(selections.setdefault(package, []).append([question, type_, value]))
return selections
def show(name):
"""
Answers to debconf questions for a package in the following format::
[['question', 'type', 'value'], ...]
If debconf doesn't know about a package, we return None.
CLI Example:
.. code-block:: bash
salt '*' debconf.show <package name>
"""
selections = get_selections()
result = selections.get(name)
return result
def _set_file(path):
"""
Execute the set selections command for debconf
"""
cmd = "debconf-set-selections {}".format(path)
__salt__["cmd.run_stdout"](cmd, python_shell=False)
def METHOD_NAME(package, question, type, value, *extra):
"""
Set answers to debconf questions for a package.
CLI Example:
.. code-block:: bash
salt '*' debconf.set <package> <question> <type> <value> [<value> ...]
"""
if extra:
value = " ".join((value,) + tuple(extra))
fd_, fname = salt.utils.files.mkstemp(prefix="salt-", close_fd=False)
line = "{} {} {} {}".format(package, question, type, value)
os.write(fd_, salt.utils.stringutils.to_bytes(line))
os.close(fd_)
_set_file(fname)
os.unlink(fname)
return True
def set_template(path, template, context, defaults, saltenv="base", **kwargs):
"""
Set answers to debconf questions from a template.
path
location of the file containing the package selections
template
template format
context
variables to add to the template environment
default
default values for the template environment
CLI Example:
.. code-block:: bash
salt '*' debconf.set_template salt://pathto/pkg.selections.jinja jinja None None
"""
path = __salt__["cp.get_template"](
path=path,
dest=None,
template=template,
saltenv=saltenv,
context=context,
defaults=defaults,
**kwargs
)
return set_file(path, saltenv, **kwargs)
def set_file(path, saltenv="base", **kwargs):
"""
Set answers to debconf questions from a file.
CLI Example:
.. code-block:: bash
salt '*' debconf.set_file salt://pathto/pkg.selections
"""
if "__env__" in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop("__env__")
path = __salt__["cp.cache_file"](path, saltenv)
if path:
_set_file(path)
return True
return False |
setup | """ Substitution of osparc variables and secrets
"""
import logging
from collections.abc import Callable, Mapping
from copy import deepcopy
from typing import Any
from fastapi import FastAPI
from models_library.projects import ProjectID
from models_library.projects_nodes_io import NodeID
from models_library.services import ServiceKey, ServiceVersion
from models_library.users import UserID
from models_library.utils.specs_substitution import SpecsSubstitutionsResolver
from pydantic import EmailStr
from ..utils.db import get_repository
from ..utils.osparc_variables import (
ContextDict,
OsparcVariablesTable,
resolve_variables_from_context,
)
from .db.repositories.services_environments import ServicesEnvironmentsRepository
_logger = logging.getLogger(__name__)
async def substitute_vendor_secrets_in_specs(
app: FastAPI,
specs: dict[str, Any],
*,
service_key: ServiceKey,
service_version: ServiceVersion,
) -> dict[str, Any]:
assert specs # nosec
resolver = SpecsSubstitutionsResolver(specs, upgrade=False)
repo = get_repository(app, ServicesEnvironmentsRepository)
if any(repo.is_vendor_secret_identifier(idr) for idr in resolver.get_identifiers()):
# checks before to avoid unnecesary calls to pg
vendor_secrets = await repo.get_vendor_secrets(
service_key=service_key, service_version=service_version
)
# resolve substitutions
resolver.set_substitutions(environs=vendor_secrets)
new_specs: dict[str, Any] = resolver.run()
return new_specs
return deepcopy(specs)
async def resolve_and_substitute_session_variables_in_specs(
app: FastAPI,
specs: dict[str, Any],
*,
user_id: UserID,
product_name: str,
project_id: ProjectID,
node_id: NodeID,
) -> dict[str, Any]:
assert specs # nosec
table: OsparcVariablesTable = app.state.session_variables_table
resolver = SpecsSubstitutionsResolver(specs, upgrade=False)
if requested := set(resolver.get_identifiers()):
available = set(table.variables_names())
if identifiers := available.intersection(requested):
environs = await resolve_variables_from_context(
table.copy(include=identifiers),
context=ContextDict(
app=app,
user_id=user_id,
product_name=product_name,
project_id=project_id,
node_id=node_id,
),
)
resolver.set_substitutions(environs=environs)
new_specs: dict[str, Any] = resolver.run()
return new_specs
return deepcopy(specs)
async def resolve_and_substitute_lifespan_variables_in_specs(
_app: FastAPI,
_specs: dict[str, Any],
*,
_callbacks_registry: Mapping[str, Callable],
):
raise NotImplementedError
async def _request_user_email(app: FastAPI, user_id: UserID) -> EmailStr:
repo = get_repository(app, ServicesEnvironmentsRepository)
return await repo.get_user_email(user_id=user_id)
async def _request_user_role(app: FastAPI, user_id: UserID):
repo = get_repository(app, ServicesEnvironmentsRepository)
return await repo.get_user_role(user_id=user_id)
def _setup_session_osparc_variables(app: FastAPI):
app.state.session_variables_table = table = OsparcVariablesTable()
# Registers some session osparc_variables
# WARNING: context_name needs to match session_context!
for name, context_name in [
("OSPARC_VARIABLE_PRODUCT_NAME", "product_name"),
("OSPARC_VARIABLE_STUDY_UUID", "project_id"),
("OSPARC_VARIABLE_NODE_ID", "node_id"),
]:
table.register_from_context(name, context_name)
table.register_from_handler("OSPARC_VARIABLE_USER_EMAIL")(_request_user_email)
table.register_from_handler("OSPARC_VARIABLE_USER_ROLE")(_request_user_role)
_logger.debug(
"Registered session_variables_table=%s", sorted(table.variables_names())
)
def METHOD_NAME(app: FastAPI):
"""
**o2sparc variables and secrets** are identifiers-value maps that are substituted on the service specs (e.g. docker-compose).
- **vendor secrets**: information set by a vendor on the platform. e.g. a vendor service license
- **session variables**: some session information as "current user email" or the "current product name"
- **lifespan variables**: produced before a service is started and cleaned up after it finishes (e.g. API tokens )
"""
def on_startup() -> None:
_setup_session_osparc_variables(app)
app.add_event_handler("startup", on_startup) |
finalize submodules | from collections.abc import Iterable
from ..._utils import flatten, deprecated
from ...hdl import dsl, ir
__all__ = ["Module", "FinalizeError"]
def _flat_list(e):
if isinstance(e, Iterable):
return list(flatten(e))
else:
return [e]
class CompatFinalizeError(Exception):
pass
FinalizeError = CompatFinalizeError
class _CompatModuleProxy:
def __init__(self, cm):
object.__setattr__(self, "_cm", cm)
class _CompatModuleComb(_CompatModuleProxy):
@deprecated("instead of `self.comb +=`, use `m.d.comb +=`")
def __iadd__(self, assigns):
self._cm._module._add_statement(assigns, domain=None, depth=0, compat_mode=True)
return self
class _CompatModuleSyncCD:
def __init__(self, cm, cd):
self._cm = cm
self._cd = cd
@deprecated("instead of `self.sync.<domain> +=`, use `m.d.<domain> +=`")
def __iadd__(self, assigns):
self._cm._module._add_statement(assigns, domain=self._cd, depth=0, compat_mode=True)
return self
class _CompatModuleSync(_CompatModuleProxy):
@deprecated("instead of `self.sync +=`, use `m.d.sync +=`")
def __iadd__(self, assigns):
self._cm._module._add_statement(assigns, domain="sync", depth=0, compat_mode=True)
return self
def __getattr__(self, name):
return _CompatModuleSyncCD(self._cm, name)
def __setattr__(self, name, value):
if not isinstance(value, _CompatModuleSyncCD):
raise AttributeError("Attempted to assign sync property - use += instead")
class _CompatModuleSpecials(_CompatModuleProxy):
@deprecated("instead of `self.specials.<name> =`, use `m.submodules.<name> =`")
def __setattr__(self, name, value):
self._cm._submodules.append((name, value))
setattr(self._cm, name, value)
@deprecated("instead of `self.specials +=`, use `m.submodules +=`")
def __iadd__(self, other):
self._cm._submodules += [(None, e) for e in _flat_list(other)]
return self
class _CompatModuleSubmodules(_CompatModuleProxy):
@deprecated("instead of `self.submodules.<name> =`, use `m.submodules.<name> =`")
def __setattr__(self, name, value):
self._cm._submodules.append((name, value))
setattr(self._cm, name, value)
@deprecated("instead of `self.submodules +=`, use `m.submodules +=`")
def __iadd__(self, other):
self._cm._submodules += [(None, e) for e in _flat_list(other)]
return self
class _CompatModuleClockDomains(_CompatModuleProxy):
@deprecated("instead of `self.clock_domains.<name> =`, use `m.domains.<name> =`")
def __setattr__(self, name, value):
self.__iadd__(value)
setattr(self._cm, name, value)
@deprecated("instead of `self.clock_domains +=`, use `m.domains +=`")
def __iadd__(self, other):
self._cm._module.domains += _flat_list(other)
return self
class CompatModule(ir.Elaboratable):
_MustUse__silence = True
# Actually returns another Amaranth Elaboratable (amaranth.dsl.Module), not a Fragment.
def get_fragment(self):
assert not self.get_fragment_called
self.get_fragment_called = True
self.finalize()
return self._module
def elaborate(self, platform):
if not self.get_fragment_called:
self.get_fragment()
return self._module
def __getattr__(self, name):
if name == "comb":
return _CompatModuleComb(self)
elif name == "sync":
return _CompatModuleSync(self)
elif name == "specials":
return _CompatModuleSpecials(self)
elif name == "submodules":
return _CompatModuleSubmodules(self)
elif name == "clock_domains":
return _CompatModuleClockDomains(self)
elif name == "finalized":
self.finalized = False
return self.finalized
elif name == "_module":
self._module = dsl.Module()
return self._module
elif name == "_submodules":
self._submodules = []
return self._submodules
elif name == "_clock_domains":
self._clock_domains = []
return self._clock_domains
elif name == "get_fragment_called":
self.get_fragment_called = False
return self.get_fragment_called
else:
raise AttributeError("'{}' object has no attribute '{}'"
.format(type(self).__name__, name))
def finalize(self, *args, **kwargs):
def METHOD_NAME():
for name, submodule in self._submodules:
if not hasattr(submodule, "finalize"):
continue
if submodule.finalized:
continue
submodule.finalize(*args, **kwargs)
if not self.finalized:
self.finalized = True
METHOD_NAME()
self.do_finalize(*args, **kwargs)
METHOD_NAME()
for name, submodule in self._submodules:
self._module._add_submodule(submodule, name)
def do_finalize(self):
pass
Module = CompatModule |
import success message | """Display information to help diagnose install issues.
Currently shows
- OS info
- Python info
- Gtk3 availability (gi.repository vs pgi)
This can be complemented by running the unittests.
"""
import os
import platform
import sys
import traceback
from collections import namedtuple
COL_WIDTH = 10
AvailableModules = namedtuple("AvailableModules", "gi pgi meta pango pubsub rsvg vext")
def display_platform():
# environment info
is_virtualenv = "VIRTUAL_ENV" in os.environ
# operating system info
def linux_distribution():
try:
return platform.linux_distribution()
except:
return "N/A"
# operating system info
def platform_dist():
try:
return platform.dist()
except:
return "N/A"
print(
"""Python:
sys.executable: %s
virtualenv: %s
version: %s
dist: %s
linux_distribution: %s
system: %s
machine: %s
platform: %s
version: %s
mac_ver: %s
win32_ver: %s
"""
% (
sys.executable,
is_virtualenv or "no",
" ".join(sys.version.split("\n")),
str(" ".join(platform_dist())),
" ".join(linux_distribution()),
platform.system(),
platform.machine(),
platform.platform(),
platform.version(),
platform.mac_ver(),
platform.win32_ver(),
),
)
def METHOD_NAME(module, name):
return "\n".join(
[
f" import {name.ljust(COL_WIDTH)} [success]:",
" " + module.__file__,
],
)
def import_fail_message(mn, reason):
return "\n".join(
[f" import {mn.ljust(COL_WIDTH)} [failed]:", " " + reason],
)
def test_import(name, failmsg=None, gi_require=None, gi=None):
if all([gi_require, gi]):
try:
gi.require_version(*gi_require)
except ValueError as ex:
# import will be attempted anyway
err = ex.args[0]
print(import_fail_message(name, err))
return
try:
module = __import__(name)
print(METHOD_NAME(module, name))
return module
except ImportError as e:
print(import_fail_message(name, failmsg or str(e)))
except Exception as e:
print(import_fail_message(name, str(e)))
def test_imports():
"""Attempt to import dependencies."""
print("Test Imports:")
# gtk
gi = test_import("gi")
pgi = test_import("pgi")
_gi = gi or pgi
if gi:
pango = test_import("gi.repository.Pango", gi_require=("Pango", "1.0"), gi=_gi)
else:
print(" No gi.repository.Pango implementation, text will not be available")
pango = None
# virtualenv help
vext = test_import("vext")
# internal dependencies
pubsub = test_import("pubsub")
meta = test_import("meta")
rsvg = test_import(
"gi.repository.Rsvg", "SVG Support unavailable", gi_require=("Rsvg", "2.0"),
)
return (
test_import("shoebot"),
AvailableModules(
gi=gi, pgi=pgi, meta=meta, pubsub=pubsub, rsvg=rsvg, vext=vext, pango=pango,
),
)
def shoebot_example(**shoebot_kwargs):
"""Decorator to run some code in a bot instance."""
def decorator(f):
def run():
from shoebot.util import ShoebotInstallError
print(f" Shoebot - {f.__name__.replace('_', ' ')}:")
try:
import shoebot
outputfile = f"/tmp/shoebot-{f.__name__}.png"
bot = shoebot.create_bot(outputfile=outputfile)
f(bot)
bot.finish()
print(f" [passed] : {outputfile}")
print("")
except ShoebotInstallError as e:
print(" [failed]", e.args[0])
print("")
except Exception:
print(" [failed] - traceback:")
for line in traceback.format_exc().splitlines():
print(f" {line}")
print("")
return run
return decorator
@shoebot_example()
def standard_module_example(bot):
bot.size(640, 480)
bot.fill(1, 0.5, 0.1)
bot.rect(10, 10, 100, 100)
@shoebot_example()
def module_using_text(bot):
bot.size(640, 480)
bot.stroke(0)
bot.text("Should work with gi not pgi", 0, 0)
def display_graphics_implementation():
print("Graphics Implementation:")
try:
from shoebot.core.backend import driver
for k, v in list(driver.get_libs().items()):
print(f" {k}: {v}")
except Exception as e:
raise
def diagnose():
display_platform()
shoebot_module, available_modules = test_imports()
if not shoebot_module:
print("Skipping shoebot module tests.")
return
display_graphics_implementation()
try:
import shoebot
except ImportError as e:
print("Cannot 'import shoebot'")
traceback.print_exc()
return False
print("\nShoebot Tests:")
# shoebot itself
standard_module_example()
# shoebot with text (will fail under pypy or pgi)
if available_modules.pango:
module_using_text()
if __name__ == "__main__":
diagnose() |
rebuild ctype | #
# Module which supports allocation of ctypes objects from shared memory
#
# multiprocessing/sharedctypes.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import sys
import ctypes
import weakref
from multiprocessing import heap, RLock
from multiprocessing.forking import assert_spawning, ForkingPickler
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
#
#
#
typecode_to_type = {
'c': ctypes.c_char,
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
'h': ctypes.c_short, 'H': ctypes.c_ushort,
'i': ctypes.c_int, 'I': ctypes.c_uint,
'l': ctypes.c_long, 'L': ctypes.c_ulong,
'f': ctypes.c_float, 'd': ctypes.c_double
}
try:
typecode_to_type['u'] = ctypes.c_wchar
except AttributeError:
pass
#
#
#
def _new_value(type_):
size = ctypes.sizeof(type_)
wrapper = heap.BufferWrapper(size)
return METHOD_NAME(type_, wrapper, None)
def RawValue(typecode_or_type, *args):
'''
Returns a ctypes object allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
obj.__init__(*args)
return obj
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a ctypes array allocated from shared memory
'''
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
if isinstance(size_or_initializer, (int, long)):
type_ = type_ * size_or_initializer
obj = _new_value(type_)
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
return obj
else:
type_ = type_ * len(size_or_initializer)
result = _new_value(type_)
result.__init__(*size_or_initializer)
return result
def Value(typecode_or_type, *args, **kwds):
'''
Return a synchronization wrapper for a Value
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
obj = RawValue(typecode_or_type, *args)
if lock is False:
return obj
if lock in (True, None):
lock = RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Return a synchronization wrapper for a RawArray
'''
lock = kwds.pop('lock', None)
if kwds:
raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
obj = RawArray(typecode_or_type, size_or_initializer)
if lock is False:
return obj
if lock in (True, None):
lock = RLock()
if not hasattr(lock, 'acquire'):
raise AttributeError("'%r' has no method 'acquire'" % lock)
return synchronized(obj, lock)
def copy(obj):
new_obj = _new_value(type(obj))
ctypes.pointer(new_obj)[0] = obj
return new_obj
def synchronized(obj, lock=None):
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
if isinstance(obj, ctypes._SimpleCData):
return Synchronized(obj, lock)
elif isinstance(obj, ctypes.Array):
if obj._type_ is ctypes.c_char:
return SynchronizedString(obj, lock)
return SynchronizedArray(obj, lock)
else:
cls = type(obj)
try:
scls = class_cache[cls]
except KeyError:
names = [field[0] for field in cls._fields_]
d = dict((name, make_property(name)) for name in names)
classname = 'Synchronized' + cls.__name__
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
return scls(obj, lock)
#
# Functions for pickling/unpickling
#
def reduce_ctype(obj):
assert_spawning(obj)
if isinstance(obj, ctypes.Array):
return METHOD_NAME, (obj._type_, obj._wrapper, obj._length_)
else:
return METHOD_NAME, (type(obj), obj._wrapper, None)
def METHOD_NAME(type_, wrapper, length):
if length is not None:
type_ = type_ * length
ForkingPickler.register(type_, reduce_ctype)
obj = type_.from_address(wrapper.get_address())
obj._wrapper = wrapper
return obj
#
# Function to create properties
#
def make_property(name):
try:
return prop_cache[name]
except KeyError:
d = {}
exec template % ((name,)*7) in d
prop_cache[name] = d[name]
return d[name]
template = '''
def get%s(self):
self.acquire()
try:
return self._obj.%s
finally:
self.release()
def set%s(self, value):
self.acquire()
try:
self._obj.%s = value
finally:
self.release()
%s = property(get%s, set%s)
'''
prop_cache = {}
class_cache = weakref.WeakKeyDictionary()
#
# Synchronized wrappers
#
class SynchronizedBase(object):
def __init__(self, obj, lock=None):
self._obj = obj
self._lock = lock or RLock()
self.acquire = self._lock.acquire
self.release = self._lock.release
def __reduce__(self):
assert_spawning(self)
return synchronized, (self._obj, self._lock)
def get_obj(self):
return self._obj
def get_lock(self):
return self._lock
def __repr__(self):
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
class Synchronized(SynchronizedBase):
value = make_property('value')
class SynchronizedArray(SynchronizedBase):
def __len__(self):
return len(self._obj)
def __getitem__(self, i):
self.acquire()
try:
return self._obj[i]
finally:
self.release()
def __setitem__(self, i, value):
self.acquire()
try:
self._obj[i] = value
finally:
self.release()
def __getslice__(self, start, stop):
self.acquire()
try:
return self._obj[start:stop]
finally:
self.release()
def __setslice__(self, start, stop, values):
self.acquire()
try:
self._obj[start:stop] = values
finally:
self.release()
class SynchronizedString(SynchronizedArray):
value = make_property('value')
raw = make_property('raw') |
test default realizations | from argparse import Namespace
from uuid import UUID
import pytest
from ert.cli import model_factory
from ert.libres_facade import LibresFacade
from ert.run_models import (
EnsembleExperiment,
EnsembleSmoother,
IteratedEnsembleSmoother,
MultipleDataAssimilation,
SingleTestRun,
)
@pytest.mark.parametrize(
"target_case, format_mode, expected",
[
("test", False, "test"),
(None, False, "default_smoother_update"),
(None, True, "default_%d"),
],
)
def test_target_case_name(target_case, expected, format_mode, poly_case):
ert = poly_case
args = Namespace(current_case="default", target_case=target_case)
assert (
model_factory._target_case_name(ert, args, format_mode=format_mode) == expected
)
def METHOD_NAME(poly_case):
facade = LibresFacade(poly_case)
args = Namespace(realizations=None)
assert (
model_factory._realizations(args, facade.get_ensemble_size())
== [True] * facade.get_ensemble_size()
)
def test_custom_realizations(poly_case):
facade = LibresFacade(poly_case)
args = Namespace(realizations="0-4,7,8")
ensemble_size = facade.get_ensemble_size()
active_mask = [False] * ensemble_size
active_mask[0] = True
active_mask[1] = True
active_mask[2] = True
active_mask[3] = True
active_mask[4] = True
active_mask[7] = True
active_mask[8] = True
assert model_factory._realizations(args, ensemble_size) == active_mask
def test_setup_single_test_run(poly_case, storage):
ert = poly_case
model = model_factory._setup_single_test_run(
ert, storage, Namespace(current_case="default"), UUID(int=0)
)
assert isinstance(model, SingleTestRun)
assert len(model._simulation_arguments.keys()) == 3
assert "active_realizations" in model._simulation_arguments
def test_setup_ensemble_experiment(poly_case, storage):
ert = poly_case
args = Namespace(realizations=None, iter_num=1, current_case="default")
model = model_factory._setup_ensemble_experiment(
ert,
storage,
args,
UUID(int=0),
)
assert isinstance(model, EnsembleExperiment)
assert len(model._simulation_arguments.keys()) == 4
assert "active_realizations" in model._simulation_arguments
def test_setup_ensemble_smoother(poly_case, storage):
ert = poly_case
args = Namespace(
realizations="0-4,7,8", current_case="default", target_case="test_case"
)
model = model_factory._setup_ensemble_smoother(
ert,
storage,
args,
UUID(int=0),
)
assert isinstance(model, EnsembleSmoother)
assert len(model._simulation_arguments.keys()) == 5
assert "active_realizations" in model._simulation_arguments
assert "target_case" in model._simulation_arguments
assert "analysis_module" in model._simulation_arguments
def test_setup_multiple_data_assimilation(poly_case, storage):
ert = poly_case
args = Namespace(
realizations="0-4,7,8",
weights="6,4,2",
current_case="default",
target_case="test_case_%d",
start_iteration="0",
restart_run=False,
prior_ensemble="default",
)
model = model_factory._setup_multiple_data_assimilation(
ert,
storage,
args,
UUID(int=0),
)
assert isinstance(model, MultipleDataAssimilation)
assert len(model._simulation_arguments.keys()) == 8
assert "active_realizations" in model._simulation_arguments
assert "target_case" in model._simulation_arguments
assert "analysis_module" in model._simulation_arguments
assert "weights" in model._simulation_arguments
def test_setup_iterative_ensemble_smoother(poly_case, storage):
ert = poly_case
args = Namespace(
realizations="0-4,7,8",
current_case="default",
target_case="test_case_%d",
num_iterations="10",
)
model = model_factory._setup_iterative_ensemble_smoother(
ert,
storage,
args,
UUID(int=0),
)
assert isinstance(model, IteratedEnsembleSmoother)
assert len(model._simulation_arguments.keys()) == 6
assert "active_realizations" in model._simulation_arguments
assert "target_case" in model._simulation_arguments
assert "analysis_module" in model._simulation_arguments
assert "num_iterations" in model._simulation_arguments
assert LibresFacade(ert).get_number_of_iterations() == 10 |
run | """Support for OK-formatted test files"""
import os
import io
import doctest
import warnings
import pathlib
from contextlib import redirect_stderr, redirect_stdout
from textwrap import dedent
from .abstract_test import TestFile, TestCase, TestCaseResult
from ..utils import hide_outputs
def run_doctest(name, doctest_string, global_environment):
"""
Run a single test with given ``global_environment``. Returns ``(True, '')`` if the doctest passes.
Returns ``(False, failure_message)`` if the doctest fails.
Args:
name (``str``): name of doctest
doctest_string (``str``): doctest in string form
global_environment (``dict``): global environment resulting from the execution of a python
script/notebook
Returns:
``tuple`` of (``bool``, ``str``): results from running the test
"""
examples = doctest.DocTestParser().parse(
doctest_string,
name
)
test = doctest.DocTest(
[e for e in examples if isinstance(e, doctest.Example)],
global_environment,
name,
None,
None,
doctest_string
)
doctestrunner = doctest.DocTestRunner(verbose=True)
runresults = io.StringIO()
with redirect_stdout(runresults), redirect_stderr(runresults), hide_outputs():
doctestrunner.METHOD_NAME(test, clear_globs=False)
with open(os.devnull, 'w') as f, redirect_stderr(f), redirect_stdout(f):
result = doctestrunner.summarize(verbose=True)
# An individual test can only pass or fail
if result.failed == 0:
return (True, '')
else:
return False, runresults.getvalue()
class OKTestFile(TestFile):
"""
A single OK-formatted test file for Otter.
Args:
name (``str``): the name of test file
path (``str``): the path to the test file
test_cases (``list`` of ``TestCase``): a list of parsed tests to be run
value (``int``, optional): the point value of this test, defaults to 1
all_or_nothing (``bool``, optional): whether the test should be graded all-or-nothing across
cases
Attributes:
name (``str``): the name of test file
path (``str``): the path to the test file
test_cases (``list`` of ``TestCase``): a list of parsed tests to be run
value (``int``): the point value of this test, defaults to 1
all_or_nothing (``bool``): whether the test should be graded all-or-nothing across
cases
passed_all (``bool``): whether all of the test cases were passed
test_case_results (``list`` of ``TestCaseResult``): a list of results for the test cases in
``test_cases``
grade (``float``): the percentage of ``points`` earned for this test file as a decimal
"""
def METHOD_NAME(self, global_environment):
"""
Run the test cases on ``global_environment``, saving the results in
``self.test_case_results``.
Arguments:
``global_environment`` (``dict``): result of executing a Python notebook/script
"""
for i, test_case in enumerate(self.test_cases):
passed, result = run_doctest(self.name + ' ' + str(i), test_case.body, global_environment)
if passed:
result = '✅ Test case passed'
else:
result = '❌ Test case failed\n' + result
self.test_case_results.append(TestCaseResult(
test_case = test_case,
message = result,
passed = passed,
))
@classmethod
def from_spec(cls, test_spec, path=""):
"""
Parse an OK-formatted ``dict`` and return an ``OKTestFile``.
Args:
test_spec (``dict[str: object]``): the OK-formatted ``dict``
path (``str``, optional): the path to the test file this ``dict`` was parsed from
Returns:
``OKTestFile``: the new ``OKTestFile`` object created from the given file
"""
# Make sure there is a name
assert 'name' in test_spec
# Do not support multiple suites in the same file
assert len(test_spec['suites']) == 1
test_suite = test_spec['suites'][0]
# Only support doctest. I am unsure if other tests are implemented
assert test_suite.get('type', 'doctest') == 'doctest'
# Not setup and teardown supported
assert not bool(test_suite.get('setup'))
assert not bool(test_suite.get('teardown'))
test_cases = []
for i, test_case in enumerate(test_spec['suites'][0]['cases']):
test_cases.append(TestCase(
name = test_case.get('name', f"{test_spec['name']} - {i + 1}"),
body = dedent(test_case['code']),
hidden = test_case.get('hidden', True),
points = test_case.get('points', None),
success_message = test_case.get('success_message', None),
failure_message = test_case.get('failure_message', None)
))
# resolve point values for each test case
spec_pts = test_spec.get('points', None)
test_cases = cls.resolve_test_file_points(spec_pts, test_cases)
# convert path into PurePosixPath for test name
path = str(pathlib.Path(path).as_posix())
# grab whether the tests are all-or-nothing
all_or_nothing = test_spec.get('all_or_nothing', True)
return cls(test_spec['name'], path, test_cases, all_or_nothing)
@classmethod
def from_file(cls, path):
"""
Parse an OK-formatted test file & return an ``OKTestFile``.
Args:
path (``str``): the path to the test file
Returns:
``OKTestFile``: the new ``OKTestFile`` object created from the given file
"""
# ok test files are python files, with a global 'test' defined
test_globals = {}
with open(path) as f:
exec(f.read(), test_globals)
test_spec = test_globals['test']
return cls.from_spec(test_spec, path=path) |
biosample characterization | import pytest
@pytest.fixture
def biosample_characterization_no_review(testapp, award, lab, biosample, attachment):
item = {
'characterizes': biosample['@id'],
'award': award['@id'],
'lab': lab['@id'],
'attachment': attachment,
}
return testapp.post_json('/biosample_characterization', item).json['@graph'][0]
@pytest.fixture
def METHOD_NAME(testapp, award, lab, biosample, attachment):
item = {
'characterizes': biosample['@id'],
'award': award['@id'],
'lab': lab['@id'],
'attachment': attachment,
}
return testapp.post_json('/biosample_characterization', item).json['@graph'][0]
@pytest.fixture
def biosample_characterization_2nd_opinion(testapp, award, lab, submitter, biosample, attachment):
item = {
'characterizes': biosample['@id'],
'award': award['@id'],
'lab': lab['@id'],
'attachment': attachment,
'review': {
'status': 'requires secondary opinion',
'lab': lab['@id'],
'reviewed_by': submitter['@id'],
},
}
return testapp.post_json('/biosample_characterization', item).json['@graph'][0]
@pytest.fixture
def biosample_characterization_exempt(testapp, award, lab, submitter, biosample, attachment):
item = {
'characterizes': biosample['@id'],
'award': award['@id'],
'lab': lab['@id'],
'attachment': attachment,
'review': {
'status': 'exempt from standards',
'lab': lab['@id'],
'reviewed_by': submitter['@id'],
},
}
return testapp.post_json('/biosample_characterization', item).json['@graph'][0]
@pytest.fixture
def biosample_characterization_not_compliant(testapp, award, lab, submitter, biosample, attachment):
item = {
'characterizes': biosample['@id'],
'award': award['@id'],
'lab': lab['@id'],
'attachment': attachment,
'review': {
'status': 'not compliant',
'lab': lab['@id'],
'reviewed_by': submitter['@id'],
},
}
return testapp.post_json('/biosample_characterization', item).json['@graph'][0]
@pytest.fixture
def biosample_characterization_compliant(testapp, award, lab, submitter, biosample, attachment):
item = {
'characterizes': biosample['@id'],
'award': award['@id'],
'lab': lab['@id'],
'attachment': attachment,
'review': {
'status': 'compliant',
'lab': lab['@id'],
'reviewed_by': submitter['@id'],
},
}
return testapp.post_json('/biosample_characterization', item).json['@graph'][0]
@pytest.fixture
def biosample_characterization_1(biosample_characterization_base):
item = biosample_characterization_base.copy()
item.update({
'schema_version': '2',
'status': 'APPROVED',
'characterization_method': 'immunofluorescence',
})
return item
@pytest.fixture
def biosample_characterization_2(biosample_characterization_base):
item = biosample_characterization_base.copy()
item.update({
'schema_version': '3',
'status': 'IN PROGRESS',
'award': '1a4d6443-8e29-4b4a-99dd-f93e72d42418'
})
return item
@pytest.fixture
def antibody_characterization_3(antibody_characterization):
item = antibody_characterization.copy()
item.update({
'schema_version': '4',
'characterization_method': 'immunoblot',
})
return item
@pytest.fixture
def biosample_characterization_4(root, METHOD_NAME, publication):
item = root.get_by_uuid(METHOD_NAME['uuid'])
properties = item.properties.copy()
properties.update({
'schema_version': '4',
'references': [publication['identifiers'][0]],
})
return properties
@pytest.fixture
def antibody_characterization_10(antibody_characterization_1):
item = antibody_characterization_1.copy()
item.update({
'status': 'pending dcc review',
'characterization_method': 'immunoprecipitation followed by mass spectrometry',
'comment': 'We tried really hard to characterize this antibody.',
'notes': 'Your plea has been noted.'
})
return item
@pytest.fixture
def antibody_characterization_11(antibody_characterization):
item = antibody_characterization.copy()
item.update({
'characterization_reviews': [{
'biosample_term_name': 'K562',
'biosample_term_id': 'EFO:0002067',
'lane_status': 'exempt from standards',
'biosample_type': 'immortalized cell line',
'lane': 2,
'organism': '/organisms/human/'
}]
})
return item
@pytest.fixture
def antibody_characterization_13(antibody_characterization):
item = antibody_characterization.copy()
item.update({
'characterization_reviews': [{
'biosample_term_name': 'HUES62',
'biosample_term_id': 'EFO:0007087',
'lane_status': 'exempt from standards',
'biosample_type': 'induced pluripotent stem cell line',
'lane': 2,
'organism': '/organisms/human/'
}]
})
return item
@pytest.fixture
def antibody_characterization_14(antibody_characterization):
item = antibody_characterization.copy()
item.update({
'characterization_reviews': [{
'biosample_term_name': 'A549',
'biosample_term_id': 'EFO:0001086',
'lane_status': 'exempt from standards',
'biosample_type': 'cell line',
'lane': 2,
'organism': '/organisms/human/'
}]
})
return item |
target positive classification loss | import numpy as np
import pytest
from paz.optimization.losses.multi_box_loss import MultiBoxLoss
@pytest.fixture
def loss():
return MultiBoxLoss()
@pytest.fixture
def y_true():
y_true = np.array(
[[38.38629, 48.666668, 10.362101, 11.512976, 0., 1.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.],
[27.143208, 34.41253, 8.629259, 9.7801285, 1., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.],
[27.143208, 68.82506, 8.629259, 13.245829, 1., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0.,
0.]], dtype='float32')
return np.expand_dims(y_true, axis=0)
@pytest.fixture
def y_pred():
y_pred = np.array(
[[36.99653894, 46.4176432, 10.35266677, 10.1656072, 0.05621409,
0.98060555, 0.01017545, 0.03181472, 0.02227341, 0.00503445,
0.00746015, 0.15980312, 0.10174269, 0.01672697, 0.0111077,
0.02144868, 0.07346129, 0.03899017, 0.01071656, 0.03946776,
0.0830264, 0.06763985, 0.04077367, 0.07804006, 0.04347721],
[26.614379, 32.0909085, 4.2000501, 7.0869583, 0.0423508, 0.91125538,
0.04441671, 0.03053759, 0.07411292, 0.03454058, 0.04849431,
0.0592223, 0.0134144, 0.09800261, 0.0433236, 0.04486571,
0.01135817, 0.08123691, 0.02096761, 0.03070671, 0.04680151,
0.12522466, 0.06783583, 0.05873021, 0.01260151],
[2.16936564, 4.4787911, 6.314962, 4.42737758, 0.83406942,
0.04166197, 0.01605819, 0.04750001, 0.01329675, 0.0126452,
0.02085183, 0.0172693, 0.03088947, 0.02661936, 0.01231482,
0.04099588, 0.02453831, 0.07038483, 0.06579002, 0.13424149,
0.04614118, 0.03297557, 0.1374058, 0.15315633, 0.02119431]],
dtype='float32')
return np.expand_dims(y_pred, axis=0)
@pytest.fixture
def target_multibox_loss():
return 6.8489789962768555
@pytest.fixture
def target_smooth_l1_loss():
return np.array([[3.5220284, 8.989227, 98.507996]], dtype='float32')
@pytest.fixture
def target_cross_entropy_loss():
return np.array([[0.019584997, 3.161768, 0.18143862]], dtype='float32')
@pytest.fixture
def target_localization_loss():
return np.array(3.4861877, dtype='float32')
@pytest.fixture
def METHOD_NAME():
return np.array(0.019584997, dtype='float32')
@pytest.fixture
def target_negative_classification_loss():
return np.array(3.3432066, dtype='float32')
def test_multiboxloss(y_true, y_pred, loss, target_multibox_loss):
total_loss = loss.compute_loss(y_true, y_pred)
assert (float(total_loss) == target_multibox_loss)
def test_smooth_l1_loss(y_true, y_pred, loss, target_smooth_l1_loss):
smooth_l1_loss = loss._smooth_l1(y_true, y_pred)
smooth_l1_loss = np.asarray(smooth_l1_loss, dtype='float32')
print(smooth_l1_loss - target_smooth_l1_loss)
assert np.allclose(smooth_l1_loss, target_smooth_l1_loss)
def test_cross_entropy_loss(y_true, y_pred, loss, target_cross_entropy_loss):
cross_entropy_loss = loss._cross_entropy(y_true, y_pred)
cross_entropy_loss = np.asarray(cross_entropy_loss, dtype='float32')
assert np.allclose(cross_entropy_loss, target_cross_entropy_loss)
def test_localization_loss(y_true, y_pred, loss, target_localization_loss):
localization_loss = loss.localization(y_true, y_pred)
localization_loss = np.asarray(localization_loss, dtype='float32')
assert np.allclose(localization_loss, target_localization_loss)
def test_positive_classification_loss(y_true, y_pred, loss,
METHOD_NAME):
positive_classification_loss = loss.positive_classification(y_true, y_pred)
positive_classification_loss = np.asarray(
positive_classification_loss, dtype='float32')
assert np.allclose(
positive_classification_loss, METHOD_NAME)
def test_negative_classification_loss(y_true, y_pred, loss,
target_negative_classification_loss):
negative_classification_loss = loss.negative_classification(y_true, y_pred)
negative_classification_loss = np.asarray(
negative_classification_loss, dtype='float32')
assert np.allclose(
negative_classification_loss, target_negative_classification_loss) |
get eeprom dom raw | # Platform-specific SFP transceiver interface for SONiC
# This plugin supports QSFP and SFP.
try:
import time
from sonic_sfp.sfputilbase import SfpUtilBase
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
class SfpUtil(SfpUtilBase):
"""Platform-specific SfpUtil class"""
PORT_START = 0
PORT_END = 34
QSFP_PORT_START = 0
QSFP_PORT_END = 32
SFP_PORT_START = 33
SFP_PORT_END = 34
EEPROM_OFFSET = 11
PORT_INFO_PATH = '/sys/class/t7132s_cpld'
_port_name = ""
_port_to_eeprom_mapping = {}
_port_to_i2cbus_mapping = {}
_port_to_offset = [11, 30, 12, 29, 13, 28, 14, 27, 15, 34,
16, 33, 17, 32, 18, 31, 19, 38, 20, 37,
21, 36, 22, 35, 23, 42, 24, 41, 25, 40,
26, 39,
43, 44]
_global_port_pres_dict = {}
@property
def port_start(self):
return self.PORT_START
@property
def port_end(self):
return self.PORT_END
@property
def qsfp_ports(self):
return []
@property
def qsfp_ports(self):
return list(range(self.QSFP_PORT_START, self.QSFP_PORT_END))
@property
def port_to_eeprom_mapping(self):
return self._port_to_eeprom_mapping
@property
def port_to_i2cbus_mapping(self):
return self._port_to_i2cbus_mapping
def get_port_name(self, port_num):
if port_num in self.qsfp_ports:
self._port_name = "QSFP" + str(port_num + 1)
else:
self._port_name = "SFP" + str(port_num - self.QSFP_PORT_END + 1)
return self._port_name
def METHOD_NAME(self, port_num):
if port_num in self.qsfp_ports:
# QSFP DOM EEPROM is also at addr 0x50 and thus also stored in eeprom_ifraw
return None
else:
# Read dom eeprom at addr 0x51
return self._read_eeprom_devid(port_num, self.DOM_EEPROM_ADDR, 256)
def __init__(self):
# Override port_to_eeprom_mapping for class initialization
eeprom_path = '/sys/bus/i2c/devices/i2c-{0}/{0}-0050/eeprom'
for x in range(self.PORT_START, self.PORT_END):
self.port_to_i2cbus_mapping[x] = (x + self.EEPROM_OFFSET)
self.port_to_eeprom_mapping[x] = eeprom_path.format(
self._port_to_offset[x])
self.init_global_port_presence()
SfpUtilBase.__init__(self)
def init_global_port_presence(self):
for port_num in range(self.port_start, (self.port_end)):
presence = self.get_presence(port_num)
if(presence):
self._global_port_pres_dict[port_num] = '1'
else:
self._global_port_pres_dict[port_num] = '0'
def get_presence(self, port_num):
# Check for invalid port_num
if port_num not in list(range(self.port_start, self.port_end + 1)):
return False
# Get path for access port presence status
port_name = self.get_port_name(port_num)
sysfs_filename = "qsfp_modprs" if port_num in self.qsfp_ports else "sfp_modabs"
reg_path = "/".join([self.PORT_INFO_PATH, port_name, sysfs_filename])
# Read status
try:
reg_file = open(reg_path)
content = reg_file.readline().rstrip()
reg_value = int(content)
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
# Module present is active low
if reg_value == 0:
return True
return False
def get_low_power_mode(self, port_num):
# Check for invalid QSFP port_num
if port_num not in self.qsfp_ports:
return False
try:
port_name = self.get_port_name(port_num)
reg_file = open("/".join([self.PORT_INFO_PATH,
port_name, "qsfp_lpmode"]))
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
# Read status
content = reg_file.readline().rstrip()
reg_value = int(content)
# low power mode is active high
if reg_value == 0:
return False
return True
def set_low_power_mode(self, port_num, lpmode):
# Check for invalid QSFP port_num
if port_num not in self.qsfp_ports:
return False
try:
port_name = self.get_port_name(port_num)
reg_file = open("/".join([self.PORT_INFO_PATH,
port_name, "qsfp_lpmode"]), "r+")
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
content = hex(lpmode)
reg_file.seek(0)
reg_file.write(content)
reg_file.close()
return True
def reset(self, port_num):
# Check for invalid QSFP port_num
if port_num not in self.qsfp_ports:
return False
try:
port_name = self.get_port_name(port_num)
reg_file = open("/".join([self.PORT_INFO_PATH,
port_name, "qsfp_reset"]), "w")
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
# Convert our register value back to a hex string and write back
reg_file.seek(0)
reg_file.write(hex(0))
reg_file.close()
# Sleep 1 second to allow it to settle
time.sleep(1)
# Flip the bit back high and write back to the register to take port out of reset
try:
reg_file = open(
"/".join([self.PORT_INFO_PATH, port_name, "qsfp_reset"]), "w")
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
reg_file.seek(0)
reg_file.write(hex(1))
reg_file.close()
return True
def get_transceiver_change_event(self):
port_dict = {}
while True:
for port_num in range(self.port_start, (self.port_end + 1)):
presence = self.get_presence(port_num)
if(presence and self._global_port_pres_dict[port_num] == '0'):
self._global_port_pres_dict[port_num] = '1'
port_dict[port_num] = '1'
elif(not presence and
self._global_port_pres_dict[port_num] == '1'):
self._global_port_pres_dict[port_num] = '0'
port_dict[port_num] = '0'
if(len(port_dict) > 0):
return True, port_dict
time.sleep(0.5) |
impute inactive | from __future__ import annotations
from abc import abstractmethod
from typing import Any
import numpy as np
import sklearn.gaussian_process
from ConfigSpace import ConfigurationSpace
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Kernel, KernelOperator
from smac.model.abstract_model import AbstractModel
from smac.model.gaussian_process.priors.abstract_prior import AbstractPrior
from smac.model.gaussian_process.priors.tophat_prior import SoftTopHatPrior, TophatPrior
__copyright__ = "Copyright 2022, automl.org"
__license__ = "3-clause BSD"
class AbstractGaussianProcess(AbstractModel):
"""Abstract base class for all Gaussian process models.
Parameters
----------
configspace : ConfigurationSpace
kernel : Kernel
Kernel which is used for the Gaussian process.
instance_features : dict[str, list[int | float]] | None, defaults to None
Features (list of int or floats) of the instances (str). The features are incorporated into the X data,
on which the model is trained on.
pca_components : float, defaults to 7
Number of components to keep when using PCA to reduce dimensionality of instance features.
seed : int
"""
def __init__(
self,
configspace: ConfigurationSpace,
kernel: Kernel,
instance_features: dict[str, list[int | float]] | None = None,
pca_components: int | None = 7,
seed: int = 0,
):
super().__init__(
configspace=configspace,
instance_features=instance_features,
pca_components=pca_components,
seed=seed,
)
self._kernel = kernel
self._gp = self._get_gaussian_process()
@property
def meta(self) -> dict[str, Any]: # noqa: D102
meta = super().meta
meta.update({"kernel": self._kernel.meta})
return meta
@abstractmethod
def _get_gaussian_process(self) -> GaussianProcessRegressor:
"""Generates a Gaussian process."""
raise NotImplementedError()
def _normalize(self, y: np.ndarray) -> np.ndarray:
"""Normalize data to zero mean unit standard deviation.
Parameters
----------
y : np.ndarray
Target values for the Gaussian process.
Returns
-------
normalized_y : np.ndarray
Normalized y values.
"""
self.mean_y_ = np.mean(y)
self.std_y_ = np.std(y)
if self.std_y_ == 0:
self.std_y_ = 1
return (y - self.mean_y_) / self.std_y_
def _untransform_y(
self,
y: np.ndarray,
var: np.ndarray | None = None,
) -> np.ndarray | tuple[np.ndarray, np.ndarray]:
"""Transform zero mean unit standard deviation data into the regular space.
Warning
-------
This function should be used after a prediction with the Gaussian process which was
trained on normalized data.
Parameters
----------
y : np.ndarray
Normalized data.
var : np.ndarray | None, defaults to None
Normalized variance.
Returns
-------
untransformed_y : np.ndarray | tuple[np.ndarray, np.ndarray]
"""
y = y * self.std_y_ + self.mean_y_
if var is not None:
var = var * self.std_y_**2
return y, var # type: ignore
return y
def _get_all_priors(
self,
add_bound_priors: bool = True,
add_soft_bounds: bool = False,
) -> list[list[AbstractPrior]]:
"""Returns all priors."""
# Obtain a list of all priors for each tunable hyperparameter of the kernel
all_priors = []
to_visit = []
to_visit.append(self._gp.kernel.k1)
to_visit.append(self._gp.kernel.k2)
while len(to_visit) > 0:
current_param = to_visit.pop(0)
if isinstance(current_param, KernelOperator):
to_visit.insert(0, current_param.k1)
to_visit.insert(1, current_param.k2)
continue
elif isinstance(current_param, Kernel):
hps = current_param.hyperparameters
assert len(hps) == 1
hp = hps[0]
if hp.fixed:
continue
bounds = hps[0].bounds
for i in range(hps[0].n_elements):
priors_for_hp = []
if current_param.prior is not None:
priors_for_hp.append(current_param.prior)
if add_bound_priors:
if add_soft_bounds:
priors_for_hp.append(
SoftTopHatPrior(
lower_bound=bounds[i][0],
upper_bound=bounds[i][1],
seed=self._rng.randint(0, 2**20),
exponent=2,
)
)
else:
priors_for_hp.append(
TophatPrior(
lower_bound=bounds[i][0],
upper_bound=bounds[i][1],
seed=self._rng.randint(0, 2**20),
)
)
all_priors.append(priors_for_hp)
return all_priors
def _set_has_conditions(self) -> None:
"""Sets `has_conditions` on `current_param`."""
has_conditions = len(self._configspace.get_conditions()) > 0
to_visit = []
to_visit.append(self._kernel)
while len(to_visit) > 0:
current_param = to_visit.pop(0)
if isinstance(current_param, sklearn.gaussian_process.kernels.KernelOperator):
to_visit.insert(0, current_param.k1)
to_visit.insert(1, current_param.k2)
current_param.has_conditions = has_conditions
elif isinstance(current_param, sklearn.gaussian_process.kernels.Kernel):
current_param.has_conditions = has_conditions
else:
raise ValueError(current_param)
def METHOD_NAME(self, X: np.ndarray) -> np.ndarray:
"""Imputes inactives."""
X = X.copy()
X[~np.isfinite(X)] = -1
return X |
get video url | # -*- coding: utf-8 -*-
# Copyright: (c) 2017, SylvainCecchetto
# GNU General Public License v2.0+ (see LICENSE.txt or https://www.gnu.org/licenses/gpl-2.0.txt)
# This file is part of Catch-up TV & More
from __future__ import unicode_literals
from builtins import str
import re
from codequick import Listitem, Resolver, Route, utils
import urlquick
from resources.lib import resolver_proxy
from resources.lib.menu_utils import item_post_treatment
# TO DO
# Play Spanish Videos
URL_ROOT = utils.urljoin_partial('https://www.tetesaclaques.tv')
@Route.register
def website_root(plugin, item_id, **kwargs):
"""Add modes in the listing"""
resp = urlquick.get(URL_ROOT(''))
root = resp.parse("li", attrs={"id": "menu-videos"})
for category in root.iterfind(".//li"):
if 'clips_espagnol' not in category.find('.//a').get('href'):
item = Listitem()
if 'personnages' in category.find('.//a').get('href'):
value_next = 'list_shows'
else:
value_next = 'list_videos_1'
item.label = category.find('.//a').text
category_url = URL_ROOT(category.find('.//a').get('href'))
item.set_callback(eval(value_next),
item_id=item_id,
category_url=category_url,
page=1)
item_post_treatment(item)
yield item
@Route.register
def list_shows(plugin, item_id, category_url, page, **kwargs):
"""Build categories listing"""
resp = urlquick.get(category_url)
root = resp.parse("div", attrs={"class": "personnages"})
for personnage in root.iterfind(".//a"):
item = Listitem()
item.label = personnage.get('title')
item.art['thumb'] = item.art['landscape'] = URL_ROOT(personnage.find('.//img').get('src'))
show_url = URL_ROOT(personnage.get('href'))
item.set_callback(list_videos_2,
item_id=item_id,
category_url=show_url)
item_post_treatment(item)
yield item
@Route.register
def list_videos_1(plugin, item_id, category_url, page, **kwargs):
"""Build videos listing"""
resp = urlquick.get(category_url + '/par_date/%s' % str(page))
at_least_one_item = False
if 'serietele' in category_url or 'zarbi' in category_url:
root = resp.parse("div", attrs={"class": "serieTele"})
for episode in root.iterfind(".//div"):
if episode.find('.//a') is not None and \
episode.find(".//img[@class='thumb']") is not None:
at_least_one_item = True
item = Listitem()
item.label = episode.find(".//span[@class='saison-episode']"
).text.strip() + ' ' + episode.find(
'.//img').get('alt')
video_url = URL_ROOT(episode.find('.//a').get('href'))
item.art['thumb'] = item.art['landscape'] = URL_ROOT(episode.find('.//img').get('src'))
item.set_callback(METHOD_NAME,
item_id=item_id,
video_url=video_url)
item_post_treatment(item,
is_playable=True,
is_downloadable=True)
yield item
else:
root = resp.parse()
for episode in root.iterfind(".//a[@class='lienThumbCollection']"):
at_least_one_item = True
item = Listitem()
item.label = episode.find('.//img').get('alt')
video_url = URL_ROOT(episode.get('href'))
item.art['thumb'] = item.art['landscape'] = URL_ROOT(episode.find('.//img').get('src'))
item.set_callback(METHOD_NAME,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
if at_least_one_item:
# More videos...
yield Listitem.next_page(item_id=item_id,
category_url=category_url,
page=page + 1)
else:
plugin.notify(plugin.localize(30718), '')
yield False
@Route.register
def list_videos_2(plugin, item_id, category_url, **kwargs):
"""Build videos listing"""
resp = urlquick.get(category_url)
root = resp.parse()
for episode in root.iterfind(".//a[@class='lienThumbCollection']"):
item = Listitem()
item.label = episode.find('.//img').get('alt')
video_url = URL_ROOT(episode.get('href'))
item.art['thumb'] = item.art['landscape'] = URL_ROOT(episode.find('.//img').get('src'))
item.set_callback(METHOD_NAME,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
@Resolver.register
def METHOD_NAME(plugin,
item_id,
video_url,
download_mode=False,
**kwargs):
"""Get video URL and start video player"""
video_html = urlquick.get(video_url).text
if re.compile('AtedraVideo.video_id = "(.*?)"').findall(video_html):
video_id = re.compile('AtedraVideo.video_id = "(.*?)"').findall(
video_html)[0]
else:
# TO DO Espagnol Video / Return 404 (TO REMOVE)
return False
return resolver_proxy.get_stream_youtube(plugin, video_id, download_mode) |
getpwent r | #
# Module for simulation of utility "getent passwd -s sss" from coreutils
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from ctypes import (c_int, c_char_p, c_ulong, POINTER,
Structure, create_string_buffer, get_errno)
from sssd_nss import NssReturnCode, SssdNssError, nss_sss_ctypes_loader
PASSWD_BUFLEN = 1024
class Passwd(Structure):
_fields_ = [("pw_name", c_char_p),
("pw_passwd", c_char_p),
("pw_uid", c_int),
("pw_gid", c_int),
("pw_gecos", c_char_p),
("pw_dir", c_char_p),
("pw_shell", c_char_p)]
def set_user_dict(res, result_p):
if res != NssReturnCode.SUCCESS:
return dict()
user_dict = dict()
user_dict['name'] = result_p[0].pw_name.decode('utf-8')
user_dict['passwd'] = result_p[0].pw_passwd.decode('utf-8')
user_dict['uid'] = result_p[0].pw_uid
user_dict['gid'] = result_p[0].pw_gid
user_dict['gecos'] = result_p[0].pw_gecos.decode('utf-8')
user_dict['dir'] = result_p[0].pw_dir.decode('utf-8')
user_dict['shell'] = result_p[0].pw_shell.decode('utf-8')
return user_dict
def getpwnam_r(name, result_p, buffer_p, buflen):
"""
ctypes wrapper for:
enum nss_status _nss_sss_getpwnam_r(const char *name,
struct passwd *result,
char *buffer,
size_t buflen,
int *errnop)
"""
func = nss_sss_ctypes_loader("_nss_sss_getpwnam_r")
func.restype = c_int
func.argtypes = [c_char_p, POINTER(Passwd),
c_char_p, c_ulong, POINTER(c_int)]
errno = POINTER(c_int)(c_int(0))
name = name.encode('utf-8')
res = func(c_char_p(name), result_p, buffer_p, buflen, errno)
return (int(res), int(errno[0]), result_p)
def getpwuid_r(uid, result_p, buffer_p, buflen):
"""
ctypes wrapper for:
enum nss_status _nss_sss_getpwuid_r(uid_t uid,
struct passwd *result,
char *buffer,
size_t buflen,
int *errnop)
"""
func = nss_sss_ctypes_loader("_nss_sss_getpwuid_r")
func.restype = c_int
func.argtypes = [c_ulong, POINTER(Passwd),
c_char_p, c_ulong, POINTER(c_int)]
errno = POINTER(c_int)(c_int(0))
res = func(uid, result_p, buffer_p, buflen, errno)
return (int(res), int(errno[0]), result_p)
def setpwent():
"""
ctypes wrapper for:
void setpwent(void)
"""
func = nss_sss_ctypes_loader("_nss_sss_setpwent")
func.argtypes = []
res = func()
assert res == NssReturnCode.SUCCESS
errno = get_errno()
if errno != 0:
raise SssdNssError(errno, "setpwent")
def endpwent():
"""
ctypes wrapper for:
void endpwent(void)
"""
func = nss_sss_ctypes_loader("_nss_sss_endpwent")
func.argtypes = []
res = func()
assert res == NssReturnCode.SUCCESS
errno = get_errno()
if errno != 0:
raise SssdNssError(errno, "endpwent")
def METHOD_NAME(result_p, buffer_p, buflen):
"""
ctypes wrapper for:
enum nss_status _nss_sss_getpwent_r(struct passwd *result,
char *buffer, size_t buflen,
int *errnop)
"""
func = nss_sss_ctypes_loader("_nss_sss_getpwent_r")
func.restype = c_int
func.argtypes = [POINTER(Passwd), c_char_p, c_ulong, POINTER(c_int)]
errno = POINTER(c_int)(c_int(0))
res = func(result_p, buffer_p, buflen, errno)
return (int(res), int(errno[0]), result_p)
def getpwent():
result = Passwd()
result_p = POINTER(Passwd)(result)
buff = create_string_buffer(PASSWD_BUFLEN)
res, errno, result_p = METHOD_NAME(result_p, buff, PASSWD_BUFLEN)
if errno != 0:
raise SssdNssError(errno, "getpwent_r")
user_dict = set_user_dict(res, result_p)
return res, user_dict
def call_sssd_getpwnam(name):
"""
A Python wrapper to retrieve a user by name. Returns:
(res, user_dict)
if res is NssReturnCode.SUCCESS, then user_dict contains the keys
corresponding to the C passwd structure fields. Otherwise, the dictionary
is empty and errno indicates the error code
"""
result = Passwd()
result_p = POINTER(Passwd)(result)
buff = create_string_buffer(PASSWD_BUFLEN)
res, errno, result_p = getpwnam_r(name, result_p, buff, PASSWD_BUFLEN)
if errno != 0:
raise SssdNssError(errno, "getpwnam_r")
user_dict = set_user_dict(res, result_p)
return res, user_dict
def call_sssd_getpwuid(uid):
"""
A Python wrapper to retrieve a user by UID. Returns:
(res, user_dict)
if res is NssReturnCode.SUCCESS, then user_dict contains the keys
corresponding to the C passwd structure fields. Otherwise, the dictionary
is empty and errno indicates the error code
"""
result = Passwd()
result_p = POINTER(Passwd)(result)
buff = create_string_buffer(PASSWD_BUFLEN)
res, errno, result_p = getpwuid_r(uid, result_p, buff, PASSWD_BUFLEN)
if errno != 0:
raise SssdNssError(errno, "getpwuid_r")
user_dict = set_user_dict(res, result_p)
return res, user_dict
def call_sssd_enumeration():
"""
enumerate users from sssd module only
"""
setpwent()
user_list = []
res, user = getpwent()
while res == NssReturnCode.SUCCESS:
user_list.append(user)
res, user = getpwent()
endpwent()
return user_list |
run patten check | # -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
""" Utility for checking model architechture prior to using AIMET feature. """
from typing import Callable, Union, Tuple
import torch
from aimet_common.utils import AimetLogger
from aimet_torch.meta.connectedgraph import ConnectedGraph
from aimet_torch.arch_checker.arch_checker_rules import TorchActivations
from aimet_torch.arch_checker.arch_checker_utils import (ArchCheckerReport,
OpStructure,
check_type_deco,
get_node_check_dict,
get_pattern_check_list,
check_module)
logger = AimetLogger.get_area_logger(AimetLogger.LogAreas.Utils)
class ArchChecker:
"""
ArchChecker object to check pytorch model architechture and suggest better architechture prior
to training.
"""
_node_check_dict = get_node_check_dict()
_pattern_checks = get_pattern_check_list()
_arch_checker_report = ArchCheckerReport()
@staticmethod
def add_node_check(check_target_type: torch.nn.Module, arch_check: Callable):
"""
Add extra checks for node checks in architecture checker.
:param check_target_type: layer type to be checked.
:param arch_check: node checker function.
"""
# All TorchActivations are included to one type.
if isinstance(check_target_type, TorchActivations):
# Init TorchActivations check if not exist.
if TorchActivations not in ArchChecker._node_check_dict:
ArchChecker._node_check_dict[TorchActivations] = []
if check_target_type is TorchActivations:
ArchChecker._node_check_dict[TorchActivations].append(arch_check)
else:
# Add check_type_deco wrapper if check_target_type is a subclass of TorchActivations.
ArchChecker._node_check_dict[TorchActivations].append(check_type_deco(check_target_type)(arch_check))
else:
if check_target_type in ArchChecker._node_check_dict:
ArchChecker._node_check_dict[check_target_type].append(arch_check)
else:
ArchChecker._node_check_dict[check_target_type] = [arch_check]
@staticmethod
def add_pattern_check(arch_check: Callable):
"""
Add extra checks for pattern checks in architecture checker.
:param arch_check: pattern checker function.
:param check_kind: Check kind. Different check kind requires different arguments.
"""
ArchChecker._pattern_checks.append(arch_check)
@staticmethod
def check_model_arch(model: torch.nn.Module, dummy_input: Union[torch.Tensor, Tuple], result_dir: str = None)-> ArchCheckerReport:
"""
Check each node in the model using checks in _node_check_dict. Record only the nodes and
failed tests.
:param model: Torch model to be checked.
:param dummy_input: A dummy input to the model. Can be a Tensor or a Tuple of Tensors
:return arch_checker_report: {op.dotted_name_op: NodeErrorReportObject }
"""
def run_node_checks():
"""
Walk through connected_graph and applies node checks on each node.
"""
for op in connected_graph.ordered_ops:
module = op.get_module()
if module and isinstance(module, tuple(ArchChecker._node_check_dict.keys())):
if isinstance(module, TorchActivations):
checks = ArchChecker._node_check_dict[TorchActivations]
else:
checks = ArchChecker._node_check_dict[type(module)]
failed_checks_set = check_module(module, checks)
if failed_checks_set:
ArchChecker._arch_checker_report.update_raw_report(op, failed_checks_set)
logger.info("Graph/Node: %s: %s fails check: %s", op.dotted_name, module,
failed_checks_set)
def METHOD_NAME():
"""
Applies pattern checks on connected graph.
"""
for _check in ArchChecker._pattern_checks:
failed_check_ops = _check(connected_graph)
if failed_check_ops:
# Pattern check that marks structure returns List[List[Op]]
# Transform List[List[Op]] to List[OpStructure]
if isinstance(failed_check_ops[0], list):
failed_check_ops = [OpStructure(_op_tuple) for _op_tuple in failed_check_ops]
ArchChecker._arch_checker_report.update_raw_report(failed_check_ops, _check.__name__)
for op in failed_check_ops:
logger.info("Graph/Node: %s: %s fails check: %s", op.dotted_name, op.get_module(), {_check.__name__})
connected_graph = ConnectedGraph(model, dummy_input)
# Run all node checkes
logger.info("Running node checkes.")
run_node_checks()
# Run all pattern checkes
logger.info("Running pattern checkes.")
METHOD_NAME()
if result_dir is not None:
ArchChecker.set_export_dir(result_dir)
ArchChecker._arch_checker_report.export_to_html()
@staticmethod
def set_export_dir(dir_path: str):
""" Set export dir. """
ArchChecker._arch_checker_report.result_dir = dir_path |
dense strategy rocm | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Definition of ROCm operator strategy."""
# pylint: disable=invalid-name,unused-argument,unused-wildcard-import,wildcard-import
from tvm import topi
from tvm.te import SpecializedCondition
from tvm.contrib.thrust import can_use_rocthrust
from tvm.contrib import miopen
from .generic import *
from .. import op as _op
from .cuda import batch_matmul_strategy_cuda, conv2d_strategy_cuda, dense_strategy_cuda
@conv2d_strategy.register("rocm")
def conv2d_strategy_rocm(attrs, inputs, out_type, target):
"""conv2d rocm strategy"""
groups = attrs.groups
layout = attrs.data_layout
padding = attrs.get_int_tuple("padding")
strategy = conv2d_strategy_cuda(attrs, inputs, out_type, target)
# add miopen implementation
if (
"miopen" in target.libs
and groups == 1
and layout == "NCHW"
and padding[0] == padding[2]
and padding[1] == padding[3]
):
strategy.add_implementation(
wrap_compute_conv2d(topi.rocm.conv2d_nchw_miopen, need_data_layout=True),
wrap_topi_schedule(topi.rocm.schedule_conv2d_nchw_miopen),
name="conv2d_nchw_miopen.rocm",
plevel=50,
)
return strategy
@dense_strategy.register("rocm")
def METHOD_NAME(attrs, inputs, out_type, target):
"""Dense strategy for ROCM"""
assert len(inputs[0].shape) == 2 and len(inputs[1].shape) == 2, "Only support 2-dim dense"
strategy = dense_strategy_cuda(attrs, inputs, out_type, target)
if target.kind.name == "rocm" and "rocblas" in target.libs:
assert out_type.dtype == inputs[0].dtype, "Mixed precision not supported."
strategy.add_implementation(
wrap_compute_dense(topi.rocm.dense_rocblas),
wrap_topi_schedule(topi.rocm.schedule_dense_rocblas),
name="dense_rocblas.rocm",
plevel=15,
)
return strategy
@batch_matmul_strategy.register("rocm")
def batch_matmul_strategy_rocm(attrs, inputs, out_type, target):
"""Batch matmul strategy for ROCM"""
strategy = batch_matmul_strategy_cuda(attrs, inputs, out_type, target)
if target.kind.name == "rocm" and "rocblas" in target.libs:
assert out_type.dtype == inputs[0].dtype, "Mixed precision not supported."
strategy.add_implementation(
wrap_compute_batch_matmul(topi.rocm.batch_matmul_rocblas),
wrap_topi_schedule(topi.rocm.schedule_batch_matmul_rocblas),
name="batch_matmul_rocblas.rocm",
plevel=12,
)
return strategy
@argsort_strategy.register(["rocm"])
def argsort_strategy_cuda(attrs, inputs, out_type, target):
"""argsort rocm strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_argsort(topi.cuda.argsort),
wrap_topi_schedule(topi.cuda.schedule_argsort),
name="argsort.rocm",
)
if can_use_rocthrust(target, "tvm.contrib.thrust.sort"):
strategy.add_implementation(
wrap_compute_argsort(topi.cuda.argsort_thrust),
wrap_topi_schedule(topi.cuda.schedule_argsort),
name="argsort_thrust.rocm",
plevel=15,
)
return strategy
@scatter_elements_strategy.register(["rocm"])
def scatter_elements_cuda(attrs, inputs, out_type, target):
"""scatter rocm strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_scatter_elements(topi.cuda.scatter_elements),
wrap_topi_schedule(topi.cuda.schedule_extern),
name="scatter_elements.rocm",
plevel=10,
)
rank = len(inputs[0].shape)
with SpecializedCondition(rank == 1 and attrs.reduction == "update"):
if can_use_rocthrust(target, "tvm.contrib.thrust.stable_sort_by_key"):
strategy.add_implementation(
wrap_compute_scatter_elements(topi.cuda.scatter_via_sort),
wrap_topi_schedule(topi.cuda.schedule_scatter_via_sort),
name="scatter_via_sort.rocm",
plevel=9, # use the sequential version by default
)
return strategy
@sort_strategy.register(["rocm"])
def sort_strategy_cuda(attrs, inputs, out_type, target):
"""sort rocm strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_sort(topi.cuda.sort),
wrap_topi_schedule(topi.cuda.schedule_sort),
name="sort.rocm",
)
if can_use_rocthrust(target, "tvm.contrib.thrust.sort"):
strategy.add_implementation(
wrap_compute_sort(topi.cuda.sort_thrust),
wrap_topi_schedule(topi.cuda.schedule_sort),
name="sort_thrust.cuda",
plevel=15,
)
return strategy
@topk_strategy.register(["rocm"])
def topk_strategy_cuda(attrs, inputs, out_type, target):
"""topk rocm strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_topk(topi.cuda.topk),
wrap_topi_schedule(topi.cuda.schedule_topk),
name="topk.rocm",
)
if can_use_rocthrust(target, "tvm.contrib.thrust.sort"):
strategy.add_implementation(
wrap_compute_topk(topi.cuda.topk_thrust),
wrap_topi_schedule(topi.cuda.schedule_topk),
name="topk_thrust.rocm",
plevel=15,
)
return strategy
@softmax_strategy.register(["rocm"])
def softmax_strategy_rocm(attrs, inputs, out_type, target):
"""rocm strategy for softmax"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.softmax),
wrap_topi_schedule(topi.cuda.schedule_softmax),
name="softmax.rocm",
)
if "miopen" in target.libs:
strategy.add_implementation(
wrap_compute_softmax(miopen.softmax),
wrap_topi_schedule(topi.generic.schedule_extern),
name="softmax.miopen",
plevel=15,
)
return strategy
@log_softmax_strategy.register(["rocm"])
def log_softmax_strategy_rocm(attrs, inputs, out_type, target):
"""rocm strategy for log softmax"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_softmax(topi.nn.log_softmax),
wrap_topi_schedule(topi.cuda.schedule_softmax),
name="log_softmax.rocm",
)
if "miopen" in target.libs:
strategy.add_implementation(
wrap_compute_softmax(miopen.log_softmax),
wrap_topi_schedule(topi.generic.schedule_extern),
name="log_softmax.miopen",
plevel=15,
)
return strategy |
test calibration repr | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import pytest
import matplotlib as mpl
import matplotlib.pyplot as plt
from google.protobuf.text_format import Merge
import cirq
import cirq_google as cg
from cirq_google.api import v2
_CALIBRATION_DATA = Merge(
"""
timestamp_ms: 1562544000021,
metrics: [{
name: 'two_qubit_xeb',
targets: ['0_0', '0_1'],
values: [{
double_val: .9999
}]
}, {
name: 'two_qubit_xeb',
targets: ['0_0', '1_0'],
values: [{
double_val: .9998
}]
}, {
name: 't1',
targets: ['0_0'],
values: [{
double_val: 321
}]
}, {
name: 't1',
targets: ['0_1'],
values: [{
double_val: 911
}]
}, {
name: 't1',
targets: ['1_0'],
values: [{
double_val: 505
}]
}, {
name: 'globalMetric',
values: [{
int32_val: 12300
}]
}]
""",
v2.metrics_pb2.MetricsSnapshot(),
)
def test_calibration_metrics_dictionary():
calibration = cg.Calibration(_CALIBRATION_DATA)
t1s = calibration['t1']
assert t1s == {
(cirq.GridQubit(0, 0),): [321],
(cirq.GridQubit(0, 1),): [911],
(cirq.GridQubit(1, 0),): [505],
}
assert len(calibration) == 3
assert 't1' in calibration
assert 't2' not in calibration
for qubits, values in t1s.items():
assert len(qubits) == 1
assert len(values) == 1
with pytest.raises(TypeError, match="was 1"):
_ = calibration[1]
with pytest.raises(KeyError, match='not-it'):
_ = calibration['not-it']
def test_calibration_str():
calibration = cg.Calibration(_CALIBRATION_DATA)
assert str(calibration) == "Calibration(keys=['globalMetric', 't1', 'two_qubit_xeb'])"
def METHOD_NAME():
calibration = cg.Calibration(_CALIBRATION_DATA)
cirq.testing.assert_equivalent_repr(calibration, setup_code="import cirq\nimport cirq_google")
def test_calibration_timestamp_str():
calibration = cg.Calibration(_CALIBRATION_DATA)
assert calibration.timestamp_str(tz=datetime.timezone.utc) == '2019-07-08 00:00:00.021021+00:00'
assert (
calibration.timestamp_str(tz=datetime.timezone(datetime.timedelta(hours=1)))
== '2019-07-08 01:00:00.021021+01:00'
)
def test_to_proto():
calibration = cg.Calibration(_CALIBRATION_DATA)
assert calibration == cg.Calibration(calibration.to_proto())
invalid_value = cg.Calibration(metrics={'metric': {(cirq.GridQubit(1, 1),): [1.1, {}]}})
with pytest.raises(ValueError, match='Unsupported metric value'):
invalid_value.to_proto()
def test_value_to_float():
assert cg.Calibration.value_to_float([1.1]) == 1.1
assert cg.Calibration.value_to_float([0.7, 0.5]) == 0.7
assert cg.Calibration.value_to_float([7]) == 7
with pytest.raises(ValueError, match='was empty'):
cg.Calibration.value_to_float([])
with pytest.raises(ValueError, match='could not convert string to float'):
cg.Calibration.value_to_float(['went for a walk'])
def test_calibrations_with_string_key():
calibration = cg.Calibration(metrics={'metric1': {('alpha',): [0.1]}})
expected_proto = Merge(
"""
metrics: [{
name: 'metric1'
targets: ['alpha']
values: [{double_val: 0.1}]
}]
""",
v2.metrics_pb2.MetricsSnapshot(),
)
assert expected_proto == calibration.to_proto()
assert calibration == cg.Calibration(expected_proto)
assert calibration == cg.Calibration(calibration.to_proto())
def test_key_to_qubits():
qubits = tuple([cirq.GridQubit(1, 1), cirq.GridQubit(1, 2)])
assert cg.Calibration.key_to_qubit(qubits) == cirq.GridQubit(1, 1)
assert cg.Calibration.key_to_qubits(qubits) == (cirq.GridQubit(1, 1), cirq.GridQubit(1, 2))
with pytest.raises(ValueError, match='was not a tuple of qubits'):
cg.Calibration.key_to_qubit('alpha')
with pytest.raises(ValueError, match='was not a tuple of grid qubits'):
cg.Calibration.key_to_qubits('alpha')
def test_calibration_heatmap():
calibration = cg.Calibration(_CALIBRATION_DATA)
heatmap = calibration.heatmap('t1')
figure = mpl.figure.Figure()
axes = figure.add_subplot(111)
heatmap.plot(axes)
assert axes.get_title() == 'T1'
heatmap = calibration.heatmap('two_qubit_xeb')
figure = mpl.figure.Figure()
axes = figure.add_subplot(999)
heatmap.plot(axes)
assert axes.get_title() == 'Two Qubit Xeb'
with pytest.raises(ValueError, match="one or two qubits.*multi_qubit"):
multi_qubit_data = Merge(
"""metrics: [{
name: 'multi_qubit',
targets: ['0_0', '0_1', '1_0'],
values: [{double_val: 0.999}]}]""",
v2.metrics_pb2.MetricsSnapshot(),
)
cg.Calibration(multi_qubit_data).heatmap('multi_qubit')
with pytest.raises(ValueError, match="single metric values.*multi_value"):
multi_qubit_data = Merge(
"""metrics: [{
name: 'multi_value',
targets: ['0_0'],
values: [{double_val: 0.999}, {double_val: 0.001}]}]""",
v2.metrics_pb2.MetricsSnapshot(),
)
cg.Calibration(multi_qubit_data).heatmap('multi_value')
@pytest.mark.usefixtures('closefigures')
def test_calibration_plot_histograms():
calibration = cg.Calibration(_CALIBRATION_DATA)
_, ax = plt.subplots(1, 1)
calibration.plot_histograms(['t1', 'two_qubit_xeb'], ax, labels=['T1', 'XEB'])
assert len(ax.get_lines()) == 4
with pytest.raises(ValueError, match="single metric values.*multi_value"):
multi_qubit_data = Merge(
"""metrics: [{
name: 'multi_value',
targets: ['0_0'],
values: [{double_val: 0.999}, {double_val: 0.001}]}]""",
v2.metrics_pb2.MetricsSnapshot(),
)
cg.Calibration(multi_qubit_data).plot_histograms('multi_value')
@pytest.mark.usefixtures('closefigures')
def test_calibration_plot():
calibration = cg.Calibration(_CALIBRATION_DATA)
_, axs = calibration.plot('two_qubit_xeb')
assert axs[0].get_title() == 'Two Qubit Xeb'
assert len(axs[1].get_lines()) == 2 |
test pluto verify underflow | import pytest
hardware = ["pluto", "pluto_rev_c"]
classname = "adi.Pluto"
#########################################
@pytest.mark.iio_hardware(hardware)
@pytest.mark.parametrize("classname", [(classname)])
@pytest.mark.parametrize(
"attr, start, stop, step, tol, repeats",
[
("tx_hardwaregain_chan0", -89.75, 0.0, 0.25, 0, 100),
("rx_lo", 325000000, 3800000000, 1, 8, 100),
("tx_lo", 325000000, 3800000000, 1, 8, 100),
("sample_rate", 2084000, 61440000, 1, 4, 100),
("loopback", 0, 0, 1, 0, 0),
("loopback", 1, 1, 1, 0, 0),
("loopback", 2, 2, 1, 0, 0),
],
)
def test_pluto_attr(
test_attribute_single_value,
iio_uri,
classname,
attr,
start,
stop,
step,
tol,
repeats,
):
test_attribute_single_value(
iio_uri, classname, attr, start, stop, step, tol, repeats
)
#########################################
@pytest.mark.iio_hardware(hardware, True)
@pytest.mark.parametrize("classname", [(classname)])
@pytest.mark.parametrize("channel", [0])
def test_pluto_rx_data(test_dma_rx, iio_uri, classname, channel):
test_dma_rx(iio_uri, classname, channel)
#########################################
@pytest.mark.iio_hardware(hardware)
@pytest.mark.parametrize("classname", [(classname)])
@pytest.mark.parametrize("channel", [0])
def test_pluto_tx_data(test_dma_tx, iio_uri, classname, channel):
test_dma_tx(iio_uri, classname, channel)
#########################################
@pytest.mark.iio_hardware(hardware)
@pytest.mark.parametrize("classname", [(classname)])
@pytest.mark.parametrize("channel", [0])
@pytest.mark.parametrize(
"param_set",
[
dict(
tx_lo=1000000000,
rx_lo=1000000000,
gain_control_mode_chan0="slow_attack",
tx_hardwaregain_chan0=-20,
sample_rate=4000000,
)
],
)
def test_pluto_cyclic_buffers(
test_cyclic_buffer, iio_uri, classname, channel, param_set
):
test_cyclic_buffer(iio_uri, classname, channel, param_set)
#########################################
@pytest.mark.iio_hardware(hardware)
@pytest.mark.parametrize("classname", [(classname)])
@pytest.mark.parametrize("channel", [0])
@pytest.mark.parametrize(
"param_set",
[
dict(
tx_lo=1000000000,
rx_lo=1000000000,
gain_control_mode_chan0="slow_attack",
tx_hardwaregain_chan0=-20,
sample_rate=4000000,
)
],
)
def test_pluto_cyclic_buffers_exception(
test_cyclic_buffer_exception, iio_uri, classname, channel, param_set
):
test_cyclic_buffer_exception(iio_uri, classname, channel, param_set)
#########################################
@pytest.mark.iio_hardware(hardware, True)
@pytest.mark.parametrize("classname", [(classname)])
@pytest.mark.parametrize("channel", [0])
def test_pluto_loopback(test_dma_loopback, iio_uri, classname, channel):
test_dma_loopback(iio_uri, classname, channel)
#########################################
@pytest.mark.iio_hardware(hardware, True)
@pytest.mark.parametrize("classname", [(classname)])
@pytest.mark.parametrize("channel", [0])
@pytest.mark.parametrize(
"param_set",
[
dict(
tx_lo=1000000000,
rx_lo=1000000000,
gain_control_mode_chan0="slow_attack",
tx_hardwaregain_chan0=-20,
sample_rate=4000000,
)
],
)
@pytest.mark.parametrize("sfdr_min", [40])
def test_pluto_sfdr(test_sfdr, iio_uri, classname, channel, param_set, sfdr_min):
test_sfdr(iio_uri, classname, channel, param_set, sfdr_min)
#########################################
@pytest.mark.iio_hardware(hardware, True)
@pytest.mark.parametrize("classname", [(classname)])
@pytest.mark.parametrize("channel", [0])
@pytest.mark.parametrize("frequency, scale", [(1000000, 1)])
@pytest.mark.parametrize(
"param_set",
[
dict(
tx_lo=1000000000,
rx_lo=1000000000,
gain_control_mode_chan0="slow_attack",
tx_hardwaregain_chan0=-30,
sample_rate=4000000,
)
],
)
@pytest.mark.parametrize("peak_min", [-40])
def test_pluto_dds_loopback(
test_dds_loopback,
iio_uri,
classname,
param_set,
channel,
frequency,
scale,
peak_min,
):
test_dds_loopback(
iio_uri, classname, param_set, channel, frequency, scale, peak_min
)
#########################################
@pytest.mark.iio_hardware(hardware, True)
@pytest.mark.parametrize("classname", [(classname)])
@pytest.mark.parametrize("channel", [0])
@pytest.mark.parametrize(
"param_set",
[
dict(
tx_lo=1000000000,
rx_lo=1000000000,
gain_control_mode_chan0="slow_attack",
tx_hardwaregain_chan0=-20,
sample_rate=4000000,
),
dict(
tx_lo=2000000000,
rx_lo=2000000000,
gain_control_mode_chan0="slow_attack",
tx_hardwaregain_chan0=-20,
sample_rate=4000000,
),
dict(
tx_lo=3000000000,
rx_lo=3000000000,
gain_control_mode_chan0="slow_attack",
tx_hardwaregain_chan0=-20,
sample_rate=4000000,
),
],
)
def test_pluto_iq_loopback(test_iq_loopback, iio_uri, classname, channel, param_set):
test_iq_loopback(iio_uri, classname, channel, param_set)
#########################################
@pytest.mark.iio_hardware(hardware, True)
@pytest.mark.parametrize("classname", [(classname)])
@pytest.mark.parametrize("channel", [0])
def test_pluto_loopback_zeros(test_dma_dac_zeros, iio_uri, classname, channel):
test_dma_dac_zeros(iio_uri, classname, channel)
#########################################
@pytest.mark.iio_hardware(hardware, True)
@pytest.mark.parametrize("classname", [(classname)])
@pytest.mark.parametrize("channel", [0])
@pytest.mark.parametrize("buffer_size", [2 ** 20])
@pytest.mark.parametrize("sample_rate", [600e3])
def test_pluto_verify_overflow(
test_verify_overflow, iio_uri, classname, channel, buffer_size, sample_rate
):
test_verify_overflow(iio_uri, classname, channel, buffer_size, sample_rate)
#########################################
@pytest.mark.iio_hardware(hardware, True)
@pytest.mark.parametrize("classname", [(classname)])
@pytest.mark.parametrize("channel", [0])
@pytest.mark.parametrize("buffer_size", [2 ** 20])
@pytest.mark.parametrize("sample_rate", [600e3])
def METHOD_NAME(
test_verify_underflow, iio_uri, classname, channel, buffer_size, sample_rate
):
test_verify_underflow(iio_uri, classname, channel, buffer_size, sample_rate) |
get num psus | #
# psuutil.py
# Platform-specific PSU status interface for SONiC
#
import logging
import os.path
try:
from sonic_psu.psu_base import PsuBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
class PsuUtil(PsuBase):
"""Platform-specific PSUutil class"""
HWMON_PATH = '/sys/class/hwmon/hwmon1/'
PSU1_PREFIX = 'power43_'
PSU2_PREFIX = 'power54_'
MAX_PSUS = 2
def __init__(self):
PsuBase.__init__(self)
# Get sysfs attribute
def get_attr_value(self, attr_path):
retval = 'ERR'
if (not os.path.isfile(attr_path)):
return retval
try:
with open(attr_path, 'r') as fd:
retval = fd.read()
except Exception:
logging.error("Unable to open ", attr_path, " file !")
retval = retval.rstrip('\r\n')
return retval
def get_attr_filename(self, index, attr):
if (index == 1):
attr_file = self.PSU1_PREFIX + attr
elif (index == 2):
attr_file = self.PSU2_PREFIX + attr
else:
logging.error("Invalid PSU number:", index)
return ''
return attr_file
def METHOD_NAME(self):
"""
Retrieves the number of PSUs available on the device
:return: An integer, the number of PSUs available on the device
"""
return self.MAX_PSUS
def get_psu_status(self, index):
"""
Retrieves the oprational status of power supply unit (PSU) defined
by index <index>
:param index: An integer, index of the PSU of which to query status
:return: Boolean, True if PSU is operating properly, False if PSU is\
faulty
"""
status = False
attr_filename = self.get_attr_filename(index, 'input')
if attr_filename == '':
return status
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
attr_value = float(attr_value)
# Check PSU status
if (attr_value != 0.0):
status = True
return status
def get_psu_presence(self, index):
"""
Retrieves the presence status of power supply unit (PSU) defined
by index <index>
:param index: An integer, index of the PSU of which to query status
:return: Boolean, True if PSU is plugged, False if not
"""
status = False
attr_filename = self.get_attr_filename(index, 'present')
if attr_filename == '':
return status
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
attr_value = int(attr_value, 16)
# Check PSU status
if (attr_value == 1):
status = True
return status
def get_powergood_status(self, index):
status = False
attr_filename = self.get_attr_filename(index, 'input')
if attr_filename == '':
return status
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
attr_value = float(attr_value)
# Check PSU status
if (attr_value != 0.0):
status = True
return status
def get_model(self, index):
attr_filename = self.get_attr_filename(index, 'model')
if attr_filename == '':
return None
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
return attr_value.rstrip()
def get_mfr_id(self, index):
attr_filename = self.get_attr_filename(index, 'mfrid')
if attr_filename == '':
return None
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
return attr_value.rstrip()
def get_serial(self, index):
attr_filename = self.get_attr_filename(index, 'sn')
if attr_filename == '':
return None
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
return attr_value.rstrip()
def get_direction(self, index):
if (index == 1):
direction_file = 'fan41_direction'
elif (index == 2):
direction_file = 'fan52_direction'
else:
logging.error("Invalid PSU number:", index)
return None
direction = self.get_attr_value(self.HWMON_PATH + direction_file)
direction = direction.rstrip()
"""
1: FB 2: BF
Since the fan is at rear of the switch, FB means Exhaust; BF means Intake
"""
if direction == '2':
return "INTAKE"
else:
return "EXHAUST"
def get_output_voltage(self, index):
if (index == 1):
attr_file = 'in48_input'
elif (index == 2):
attr_file = 'in59_input'
else:
logging.error("Invalid PSU number:", index)
return 0.0
voltage = self.get_attr_value(self.HWMON_PATH + attr_file)
voltage = voltage.rstrip()
if (voltage != 'ERR'):
voltage, dummy = voltage.split('.', 1)
else:
return 0.0
return float(voltage)/1000
def get_output_current(self, index):
if (index == 1):
attr_file = 'curr40_input'
elif (index == 2):
attr_file = 'curr51_input'
else:
logging.error("Invalid PSU number:", index)
return 0.0
current = self.get_attr_value(self.HWMON_PATH + attr_file)
current = current.rstrip()
if (current != 'ERR'):
current, dummy = current.split('.',1)
else:
return 0.0
return float(current)/1000
def get_output_power(self, index):
attr_filename = self.get_attr_filename(index, 'input')
if attr_filename == '':
return 0.0
attr_path = self.HWMON_PATH + attr_filename
attr_value = self.get_attr_value(attr_path)
if (attr_value != 'ERR'):
attr_value = float(attr_value)
else:
return 0.0
return float(attr_value/1000)
def get_fan_rpm(self, index, fan_idx):
if (index == 1):
rpm_file = 'fan41_input'
elif (index == 2):
rpm_file = 'fan52_input'
else:
logging.error("Invalid PSU number:", index)
return 0
rpm = self.get_attr_value(self.HWMON_PATH + rpm_file)
rpm = rpm.rstrip()
if (rpm != 'ERR'):
rpm = float(rpm)
else:
return 0
return int(rpm) |
update compute cost | import copy
from typing import List
from colossalai.auto_parallel.tensor_shard.sharding_strategy import MemoryCost, ShardingStrategy, TrainCycleItem
from .strategy_generator import FollowingStrategyGenerator
__all__ = ['UnaryElementwiseGenerator']
class UnaryElementwiseGenerator(FollowingStrategyGenerator):
"""
UnaryElementwiseGenerator which deals with the sharding strategies of UnaryElementwiseOp.
"""
def validate(self) -> bool:
return super().validate()
def METHOD_NAME(self, strategy: ShardingStrategy):
compute_cost = TrainCycleItem(fwd=10, bwd=10, total=20)
strategy.compute_cost = compute_cost
def update_memory_cost(self, strategy: ShardingStrategy):
'''
Compute the memory cost per device with this specific strategy.
'''
forward_size_mapping = {
'input': self._compute_size_in_bytes(strategy, "input"),
'output': self._compute_size_in_bytes(strategy, "output")
}
backward_size_mapping = copy.deepcopy(forward_size_mapping)
backward_size_mapping.pop("output")
# compute fwd cost incurred
# fwd_cost = input + output
fwd_activation_cost = sum([v for k, v in forward_size_mapping.items() if not self.is_param(k)])
fwd_parameter_cost = sum([v for k, v in forward_size_mapping.items() if self.is_param(k)])
fwd_mem_cost = MemoryCost(activation=fwd_activation_cost, parameter=fwd_parameter_cost)
# compute bwd cost incurred
# bwd_cost = input_grad
bwd_activation_cost = sum([v for k, v in backward_size_mapping.items() if not self.is_param(k)])
bwd_parameter_cost = sum([v for k, v in backward_size_mapping.items() if self.is_param(k)])
bwd_mem_cost = MemoryCost(activation=bwd_activation_cost, parameter=bwd_parameter_cost)
# compute total cost
total_mem_cost = MemoryCost(activation=fwd_activation_cost + bwd_activation_cost,
parameter=fwd_parameter_cost + bwd_parameter_cost)
memory_cost = TrainCycleItem(fwd=fwd_mem_cost, bwd=bwd_mem_cost, total=total_mem_cost)
strategy.memory_cost = memory_cost
def collate_strategies(self) -> List[ShardingStrategy]:
strategy_list = []
# For element-wise function, we keep the sharding spec of output node same as
# the input. Therefore, the different strategies of input node with same
# output sharding spec will generate same strategy for element-wise function.
for index, strategy in enumerate(self.predecessor_node.strategies_vector):
dim_partition_dict_mapping = {}
communication_action_mapping = {}
input_sharding_spec = strategy.output_sharding_specs[self.op_data["input"]]
dim_partition_dict_for_input = input_sharding_spec.dim_partition_dict
dim_partition_dict_for_output = copy.deepcopy(dim_partition_dict_for_input)
dim_partition_dict_mapping = {
"input": dim_partition_dict_for_input,
"output": dim_partition_dict_for_output,
}
sharding_spec_mapping = self.to_sharding_spec_mapping(dim_partition_dict_mapping)
# add index into name to pass the duplicated check
# we keep same strategies with different name for node merging, and it will not increase the searching space,
# because in solver, this node will be merged into other nodes, and solver will not create a new variable for this node.
name = f'{sharding_spec_mapping["input"].sharding_sequence} -> {sharding_spec_mapping["output"].sharding_sequence}_{index}'
strategy = self.get_sharding_strategy(name=name,
sharding_spec_mapping=sharding_spec_mapping,
communication_action_mapping=communication_action_mapping)
strategy_list.append(strategy)
return strategy_list |
send error | import os
import signal
import subprocess
import sys
from contextlib import contextmanager
from io import TextIOWrapper
from subprocess import Popen
from time import sleep
from typing import Any, Iterator, NamedTuple, Optional, Sequence, Tuple
import dagster._check as check
from dagster._core.errors import DagsterError
from dagster._serdes.serdes import (
deserialize_value,
serialize_value,
whitelist_for_serdes,
)
from dagster._utils.error import (
ExceptionInfo,
SerializableErrorInfo,
serializable_error_info_from_exc_info,
)
def write_unary_input(input_file: str, obj: NamedTuple) -> None:
check.str_param(input_file, "input_file")
check.not_none_param(obj, "obj")
with open(os.path.abspath(input_file), "w", encoding="utf8") as fp:
fp.write(serialize_value(obj))
def read_unary_input(input_file: str) -> Tuple[object, ...]:
check.str_param(input_file, "input_file")
with open(os.path.abspath(input_file), "r", encoding="utf8") as fp:
return deserialize_value(fp.read(), NamedTuple)
def ipc_write_unary_response(output_file: str, obj: NamedTuple) -> None:
check.not_none_param(obj, "obj")
with ipc_write_stream(output_file) as stream:
stream.send(obj)
def read_unary_response(
output_file: str, timeout: int = 30, ipc_process: "Optional[Popen[bytes]]" = None
) -> Optional[NamedTuple]:
messages = list(ipc_read_event_stream(output_file, timeout=timeout, ipc_process=ipc_process))
check.invariant(len(messages) == 1)
return messages[0]
@whitelist_for_serdes
class IPCStartMessage(NamedTuple("_IPCStartMessage", [])):
def __new__(cls):
return super(IPCStartMessage, cls).__new__(cls)
@whitelist_for_serdes
class IPCErrorMessage(
NamedTuple(
"_IPCErrorMessage",
[("serializable_error_info", SerializableErrorInfo), ("message", Optional[str])],
)
):
"""This represents a user error encountered during the IPC call. This indicates a business
logic error, rather than a protocol. Consider this a "task failed successfully"
use case.
"""
def __new__(cls, serializable_error_info: SerializableErrorInfo, message: Optional[str]):
return super(IPCErrorMessage, cls).__new__(
cls,
serializable_error_info=check.inst_param(
serializable_error_info, "serializable_error_info", SerializableErrorInfo
),
message=check.opt_str_param(message, "message"),
)
@whitelist_for_serdes
class IPCEndMessage(NamedTuple("_IPCEndMessage", [])):
def __new__(cls):
return super(IPCEndMessage, cls).__new__(cls)
class DagsterIPCProtocolError(DagsterError):
"""This indicates that something went wrong with the protocol. E.g. the
process being called did not emit an IPCStartMessage first.
"""
def __init__(self, message: str):
self.message = message
super(DagsterIPCProtocolError, self).__init__(message)
class FileBasedWriteStream:
def __init__(self, file_path: str):
check.str_param("file_path", file_path)
self._file_path = file_path
def send(self, dagster_named_tuple: NamedTuple) -> None:
_send(self._file_path, dagster_named_tuple)
def send_error(self, exc_info: ExceptionInfo, message: Optional[str] = None) -> None:
METHOD_NAME(self._file_path, exc_info, message=message)
def _send(file_path: str, obj: NamedTuple) -> None:
with open(os.path.abspath(file_path), "a+", encoding="utf8") as fp:
fp.write(serialize_value(obj) + "\n")
def METHOD_NAME(file_path: str, exc_info: ExceptionInfo, message: Optional[str]) -> None:
return _send(
file_path,
IPCErrorMessage(
serializable_error_info=serializable_error_info_from_exc_info(exc_info), message=message
),
)
@contextmanager
def ipc_write_stream(file_path: str) -> Iterator[FileBasedWriteStream]:
check.str_param("file_path", file_path)
_send(file_path, IPCStartMessage())
try:
yield FileBasedWriteStream(file_path)
except Exception:
METHOD_NAME(file_path, sys.exc_info(), message=None)
finally:
_send(file_path, IPCEndMessage())
def _process_line(file_pointer: TextIOWrapper, sleep_interval: float = 0.1) -> Optional[NamedTuple]:
while True:
line = file_pointer.readline()
if line:
return deserialize_value(line.rstrip(), NamedTuple)
sleep(sleep_interval)
def _poll_process(ipc_process: "Optional[Popen[bytes]]") -> None:
if not ipc_process:
return
if ipc_process.poll() is not None:
raise DagsterIPCProtocolError(
"Process exited with return code {return_code} while waiting for events".format(
return_code=ipc_process.returncode
)
)
def ipc_read_event_stream(
file_path: str, timeout: int = 30, ipc_process: "Optional[Popen[bytes]]" = None
) -> Iterator[Optional[NamedTuple]]:
# Wait for file to be ready
sleep_interval = 0.1
elapsed_time = 0
while elapsed_time < timeout and not os.path.exists(file_path):
_poll_process(ipc_process)
elapsed_time += sleep_interval
sleep(sleep_interval)
if not os.path.exists(file_path):
raise DagsterIPCProtocolError(
"Timeout: read stream has not received any data in {timeout} seconds".format(
timeout=timeout
)
)
with open(os.path.abspath(file_path), "r", encoding="utf8") as file_pointer:
message = _process_line(file_pointer)
while elapsed_time < timeout and message is None:
_poll_process(ipc_process)
elapsed_time += sleep_interval
sleep(sleep_interval)
message = _process_line(file_pointer)
# Process start message
if not isinstance(message, IPCStartMessage):
raise DagsterIPCProtocolError(
"Attempted to read stream at file {file_path}, but first message was not an "
"IPCStartMessage".format(file_path=file_path)
)
message = _process_line(file_pointer)
while not isinstance(message, IPCEndMessage):
if message is None:
_poll_process(ipc_process)
yield message
message = _process_line(file_pointer)
# Windows subprocess termination utilities
# https://stefan.sofa-rockers.org/2013/08/15/handling-sub-process-hierarchies-python-linux-os-x/
def open_ipc_subprocess(parts: Sequence[str], **kwargs: Any) -> "Popen[bytes]":
"""Sets the correct flags to support graceful termination."""
check.list_param(parts, "parts", str)
creationflags = 0
if sys.platform == "win32":
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
return subprocess.Popen( # type: ignore # (unclear whether this is actually guaranteed to return Popen[bytes])
parts,
creationflags=creationflags,
**kwargs,
)
def interrupt_ipc_subprocess(proc: "Popen[bytes]") -> None:
"""Send CTRL_BREAK on Windows, SIGINT on other platforms."""
if sys.platform == "win32":
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
def interrupt_ipc_subprocess_pid(pid: int) -> None:
"""Send CTRL_BREAK on Windows, SIGINT on other platforms."""
check.int_param(pid, "pid")
if sys.platform == "win32":
os.kill(pid, signal.CTRL_BREAK_EVENT)
else:
os.kill(pid, signal.SIGINT) |
arn | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetParameterResult',
'AwaitableGetParameterResult',
'get_parameter',
'get_parameter_output',
]
@pulumi.output_type
class GetParameterResult:
"""
A collection of values returned by getParameter.
"""
def __init__(__self__, METHOD_NAME=None, id=None, insecure_value=None, name=None, type=None, value=None, version=None, with_decryption=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if insecure_value and not isinstance(insecure_value, str):
raise TypeError("Expected argument 'insecure_value' to be a str")
pulumi.set(__self__, "insecure_value", insecure_value)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", value)
if version and not isinstance(version, int):
raise TypeError("Expected argument 'version' to be a int")
pulumi.set(__self__, "version", version)
if with_decryption and not isinstance(with_decryption, bool):
raise TypeError("Expected argument 'with_decryption' to be a bool")
pulumi.set(__self__, "with_decryption", with_decryption)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="insecureValue")
def insecure_value(self) -> str:
return pulumi.get(self, "insecure_value")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@property
@pulumi.getter
def version(self) -> int:
return pulumi.get(self, "version")
@property
@pulumi.getter(name="withDecryption")
def with_decryption(self) -> Optional[bool]:
return pulumi.get(self, "with_decryption")
class AwaitableGetParameterResult(GetParameterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetParameterResult(
METHOD_NAME=self.METHOD_NAME,
id=self.id,
insecure_value=self.insecure_value,
name=self.name,
type=self.type,
value=self.value,
version=self.version,
with_decryption=self.with_decryption)
def get_parameter(name: Optional[str] = None,
with_decryption: Optional[bool] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetParameterResult:
"""
Provides an SSM Parameter data source.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
foo = aws.ssm.get_parameter(name="foo")
```
> **Note:** The unencrypted value of a SecureString will be stored in the raw state as plain-text.
:param str name: Name of the parameter.
:param bool with_decryption: Whether to return decrypted `SecureString` value. Defaults to `true`.
In addition to all arguments above, the following attributes are exported:
"""
__args__ = dict()
__args__['name'] = name
__args__['withDecryption'] = with_decryption
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:ssm/getParameter:getParameter', __args__, opts=opts, typ=GetParameterResult).value
return AwaitableGetParameterResult(
METHOD_NAME=pulumi.get(__ret__, 'arn'),
id=pulumi.get(__ret__, 'id'),
insecure_value=pulumi.get(__ret__, 'insecure_value'),
name=pulumi.get(__ret__, 'name'),
type=pulumi.get(__ret__, 'type'),
value=pulumi.get(__ret__, 'value'),
version=pulumi.get(__ret__, 'version'),
with_decryption=pulumi.get(__ret__, 'with_decryption'))
@_utilities.lift_output_func(get_parameter)
def get_parameter_output(name: Optional[pulumi.Input[str]] = None,
with_decryption: Optional[pulumi.Input[Optional[bool]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetParameterResult]:
"""
Provides an SSM Parameter data source.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
foo = aws.ssm.get_parameter(name="foo")
```
> **Note:** The unencrypted value of a SecureString will be stored in the raw state as plain-text.
:param str name: Name of the parameter.
:param bool with_decryption: Whether to return decrypted `SecureString` value. Defaults to `true`.
In addition to all arguments above, the following attributes are exported:
"""
... |
properties | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetContainerRegistryResult',
'AwaitableGetContainerRegistryResult',
'get_container_registry',
'get_container_registry_output',
]
@pulumi.output_type
class GetContainerRegistryResult:
"""
Container registry resource payload.
"""
def __init__(__self__, id=None, name=None, METHOD_NAME=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", METHOD_NAME)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def METHOD_NAME(self) -> 'outputs.ContainerRegistryPropertiesResponse':
"""
Properties of the container registry resource payload.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetContainerRegistryResult(GetContainerRegistryResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetContainerRegistryResult(
id=self.id,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
system_data=self.system_data,
type=self.type)
def get_container_registry(container_registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetContainerRegistryResult:
"""
Get the container registries resource.
:param str container_registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str service_name: The name of the Service resource.
"""
__args__ = dict()
__args__['containerRegistryName'] = container_registry_name
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:appplatform/v20230701preview:getContainerRegistry', __args__, opts=opts, typ=GetContainerRegistryResult).value
return AwaitableGetContainerRegistryResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_container_registry)
def get_container_registry_output(container_registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetContainerRegistryResult]:
"""
Get the container registries resource.
:param str container_registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str service_name: The name of the Service resource.
"""
... |
test get zone normal | """
:codeauthor: Jayesh Kariya <[email protected]>
"""
import pytest
import salt.modules.win_timezone as win_timezone
from salt.exceptions import CommandExecutionError
from tests.support.mock import MagicMock, patch
pytestmark = [
pytest.mark.skipif(not win_timezone.HAS_PYTZ, reason="This test requires pytz"),
]
@pytest.fixture
def configure_loader_modules():
return {
win_timezone: {
"__opts__": {},
"__salt__": {},
"__utils__": {},
},
}
def METHOD_NAME():
"""
Test if it get current timezone (i.e. Asia/Calcutta)
"""
mock_read_ok = MagicMock(
return_value={
"pid": 78,
"retcode": 0,
"stderr": "",
"stdout": "India Standard Time",
}
)
with patch.dict(win_timezone.__salt__, {"cmd.run_all": mock_read_ok}):
assert win_timezone.get_zone() == "Asia/Calcutta"
def test_get_zone_normal_dstoff():
"""
Test if it gets current timezone with dst off (i.e. America/Denver)
"""
mock_read_ok = MagicMock(
return_value={
"pid": 78,
"retcode": 0,
"stderr": "",
"stdout": "Mountain Standard Time_dstoff",
}
)
with patch.dict(win_timezone.__salt__, {"cmd.run_all": mock_read_ok}):
assert win_timezone.get_zone() == "America/Denver"
def test_get_zone_normal_dstoff_issue():
"""
Test regression with dstoff fix stripping unwanted characters
"""
mock_read_ok = MagicMock(
return_value={
"pid": 78,
"retcode": 0,
"stderr": "",
"stdout": "FLE Standard Time",
}
)
with patch.dict(win_timezone.__salt__, {"cmd.run_all": mock_read_ok}):
assert win_timezone.get_zone() == "Europe/Kiev"
@pytest.mark.parametrize("timezone", win_timezone.mapper.list_win())
def test_get_zone_all(timezone):
"""
Test all Win zones are properly resolved and none returns Unknown
"""
mock_read_ok = MagicMock(
return_value={
"pid": 78,
"retcode": 0,
"stderr": "",
"stdout": timezone,
}
)
with patch.dict(win_timezone.__salt__, {"cmd.run_all": mock_read_ok}):
assert win_timezone.get_zone() != "Unknown"
def test_get_zone_unknown():
"""
Test get_zone with unknown timezone (i.e. Indian Standard Time)
"""
mock_read_error = MagicMock(
return_value={
"pid": 78,
"retcode": 0,
"stderr": "",
"stdout": "Indian Standard Time",
}
)
with patch.dict(win_timezone.__salt__, {"cmd.run_all": mock_read_error}):
assert win_timezone.get_zone() == "Unknown"
def test_get_zone_error():
"""
Test get_zone when it encounters an error
"""
mock_read_fatal = MagicMock(
return_value={"pid": 78, "retcode": 1, "stderr": "", "stdout": ""}
)
with patch.dict(win_timezone.__salt__, {"cmd.run_all": mock_read_fatal}):
with pytest.raises(CommandExecutionError):
win_timezone.get_zone()
def test_get_offset():
"""
Test if it get current numeric timezone offset from UCT (i.e. +0530)
"""
mock_read = MagicMock(
return_value={
"pid": 78,
"retcode": 0,
"stderr": "",
"stdout": "India Standard Time",
}
)
with patch.dict(win_timezone.__salt__, {"cmd.run_all": mock_read}):
assert win_timezone.get_offset() == "+0530"
def test_get_zonecode():
"""
Test if it get current timezone (i.e. PST, MDT, etc)
"""
mock_read = MagicMock(
return_value={
"pid": 78,
"retcode": 0,
"stderr": "",
"stdout": "India Standard Time",
}
)
with patch.dict(win_timezone.__salt__, {"cmd.run_all": mock_read}):
assert win_timezone.get_zonecode() == "IST"
def test_set_zone():
"""
Test if it unlinks, then symlinks /etc/localtime to the set timezone.
"""
mock_write = MagicMock(
return_value={"pid": 78, "retcode": 0, "stderr": "", "stdout": ""}
)
mock_read = MagicMock(
return_value={
"pid": 78,
"retcode": 0,
"stderr": "",
"stdout": "India Standard Time",
}
)
with patch.dict(win_timezone.__salt__, {"cmd.run_all": mock_write}), patch.dict(
win_timezone.__salt__, {"cmd.run_all": mock_read}
):
assert win_timezone.set_zone("Asia/Calcutta")
def test_zone_compare():
"""
Test if it checks the md5sum between the given timezone, and
the one set in /etc/localtime. Returns True if they match,
and False if not. Mostly useful for running state checks.
"""
mock_read = MagicMock(
return_value={
"pid": 78,
"retcode": 0,
"stderr": "",
"stdout": "India Standard Time",
}
)
with patch.dict(win_timezone.__salt__, {"cmd.run_all": mock_read}):
assert win_timezone.zone_compare("Asia/Calcutta")
def test_get_hwclock():
"""
Test if it get current hardware clock setting (UTC or localtime)
"""
assert win_timezone.get_hwclock() == "localtime"
def test_set_hwclock():
"""
Test if it sets the hardware clock to be either UTC or localtime
"""
assert not win_timezone.set_hwclock("UTC") |
prepare request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", "2022-11-01-preview")
)
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.ServiceLinker/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.servicelinker.ServiceLinkerManagementClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists the available ServiceLinker REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.servicelinker.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-11-01-preview"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = METHOD_NAME(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.ServiceLinker/operations"} |
publish | import asyncio
import pickle
try: # pragma: no cover
from redis import asyncio as aioredis
from redis.exceptions import RedisError
except ImportError: # pragma: no cover
try:
import aioredis
from aioredis.exceptions import RedisError
except ImportError:
aioredis = None
RedisError = None
from .asyncio_pubsub_manager import AsyncPubSubManager
class AsyncRedisManager(AsyncPubSubManager): # pragma: no cover
"""Redis based client manager for asyncio servers.
This class implements a Redis backend for event sharing across multiple
processes.
To use a Redis backend, initialize the :class:`AsyncServer` instance as
follows::
url = 'redis://hostname:port/0'
server = socketio.AsyncServer(
client_manager=socketio.AsyncRedisManager(url))
:param url: The connection URL for the Redis server. For a default Redis
store running on the same host, use ``redis://``. To use an
SSL connection, use ``rediss://``.
:param channel: The channel name on which the server sends and receives
notifications. Must be the same in all the servers.
:param write_only: If set to ``True``, only initialize to emit events. The
default of ``False`` initializes the class for emitting
and receiving.
:param redis_options: additional keyword arguments to be passed to
``aioredis.from_url()``.
"""
name = 'aioredis'
def __init__(self, url='redis://localhost:6379/0', channel='socketio',
write_only=False, logger=None, redis_options=None):
if aioredis is None:
raise RuntimeError('Redis package is not installed '
'(Run "pip install redis" in your virtualenv).')
if not hasattr(aioredis.Redis, 'from_url'):
raise RuntimeError('Version 2 of aioredis package is required.')
self.redis_url = url
self.redis_options = redis_options or {}
self._redis_connect()
super().__init__(channel=channel, write_only=write_only, logger=logger)
def _redis_connect(self):
self.redis = aioredis.Redis.from_url(self.redis_url,
**self.redis_options)
self.pubsub = self.redis.pubsub(ignore_subscribe_messages=True)
async def METHOD_NAME(self, data):
retry = True
while True:
try:
if not retry:
self._redis_connect()
return await self.redis.publish(
self.channel, pickle.dumps(data))
except RedisError:
if retry:
self._get_logger().error('Cannot publish to redis... '
'retrying')
retry = False
else:
self._get_logger().error('Cannot publish to redis... '
'giving up')
break
async def _redis_listen_with_retries(self):
retry_sleep = 1
connect = False
while True:
try:
if connect:
self._redis_connect()
await self.pubsub.subscribe(self.channel)
retry_sleep = 1
async for message in self.pubsub.listen():
yield message
except RedisError:
self._get_logger().error('Cannot receive from redis... '
'retrying in '
'{} secs'.format(retry_sleep))
connect = True
await asyncio.sleep(retry_sleep)
retry_sleep *= 2
if retry_sleep > 60:
retry_sleep = 60
async def _listen(self):
channel = self.channel.encode('utf-8')
await self.pubsub.subscribe(self.channel)
async for message in self._redis_listen_with_retries():
if message['channel'] == channel and \
message['type'] == 'message' and 'data' in message:
yield message['data']
await self.pubsub.unsubscribe(self.channel) |
test withdraw twice | # These tests are auto-generated with test data from:
# https://github.com/exercism/problem-specifications/tree/main/exercises/bank-account/canonical-data.json
# File last updated on 2023-07-20
import unittest
from bank_account import (
BankAccount,
)
class BankAccountTest(unittest.TestCase):
def test_newly_opened_account_has_zero_balance(self):
account = BankAccount()
account.open()
self.assertEqual(account.get_balance(), 0)
def test_single_deposit(self):
account = BankAccount()
account.open()
account.deposit(100)
self.assertEqual(account.get_balance(), 100)
def test_multiple_deposits(self):
account = BankAccount()
account.open()
account.deposit(100)
account.deposit(50)
self.assertEqual(account.get_balance(), 150)
def test_withdraw_once(self):
account = BankAccount()
account.open()
account.deposit(100)
account.withdraw(75)
self.assertEqual(account.get_balance(), 25)
def METHOD_NAME(self):
account = BankAccount()
account.open()
account.deposit(100)
account.withdraw(80)
account.withdraw(20)
self.assertEqual(account.get_balance(), 0)
def test_can_do_multiple_operations_sequentially(self):
account = BankAccount()
account.open()
account.deposit(100)
account.deposit(110)
account.withdraw(200)
account.deposit(60)
account.withdraw(50)
self.assertEqual(account.get_balance(), 20)
def test_cannot_check_balance_of_closed_account(self):
account = BankAccount()
account.open()
account.close()
with self.assertRaises(ValueError) as err:
account.get_balance()
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "account not open")
def test_cannot_deposit_into_closed_account(self):
account = BankAccount()
account.open()
account.close()
with self.assertRaises(ValueError) as err:
account.deposit(50)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "account not open")
def test_cannot_deposit_into_unopened_account(self):
account = BankAccount()
with self.assertRaises(ValueError) as err:
account.deposit(50)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "account not open")
def test_cannot_withdraw_from_closed_account(self):
account = BankAccount()
account.open()
account.close()
with self.assertRaises(ValueError) as err:
account.withdraw(50)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "account not open")
def test_cannot_close_an_account_that_was_not_opened(self):
account = BankAccount()
with self.assertRaises(ValueError) as err:
account.close()
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "account not open")
def test_cannot_open_an_already_opened_account(self):
account = BankAccount()
account.open()
with self.assertRaises(ValueError) as err:
account.open()
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "account already open")
def test_reopened_account_does_not_retain_balance(self):
account = BankAccount()
account.open()
account.deposit(50)
account.close()
account.open()
self.assertEqual(account.get_balance(), 0)
def test_cannot_withdraw_more_than_deposited(self):
account = BankAccount()
account.open()
account.deposit(25)
with self.assertRaises(ValueError) as err:
account.withdraw(50)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "amount must be less than balance")
def test_cannot_withdraw_negative(self):
account = BankAccount()
account.open()
account.deposit(100)
with self.assertRaises(ValueError) as err:
account.withdraw(-50)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "amount must be greater than 0")
def test_cannot_deposit_negative(self):
account = BankAccount()
account.open()
with self.assertRaises(ValueError) as err:
account.deposit(-50)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "amount must be greater than 0") |
add publication trustlevels | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SubscriberFeatures(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SubscriberFeatures()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsSubscriberFeatures(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# SubscriberFeatures
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# SubscriberFeatures
def PublisherIdentification(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PatternBasedSubscription(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PublicationTrustlevels(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def SubscriptionRevocation(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def EventHistory(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def AcknowledgeSubscriberReceived(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PayloadTransparency(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# SubscriberFeatures
def PayloadEncryptionCryptobox(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def SubscriberFeaturesStart(builder): builder.StartObject(8)
def Start(builder):
return SubscriberFeaturesStart(builder)
def SubscriberFeaturesAddPublisherIdentification(builder, publisherIdentification): builder.PrependBoolSlot(0, publisherIdentification, 0)
def AddPublisherIdentification(builder, publisherIdentification):
return SubscriberFeaturesAddPublisherIdentification(builder, publisherIdentification)
def SubscriberFeaturesAddPatternBasedSubscription(builder, patternBasedSubscription): builder.PrependBoolSlot(1, patternBasedSubscription, 0)
def AddPatternBasedSubscription(builder, patternBasedSubscription):
return SubscriberFeaturesAddPatternBasedSubscription(builder, patternBasedSubscription)
def SubscriberFeaturesAddPublicationTrustlevels(builder, publicationTrustlevels): builder.PrependBoolSlot(2, publicationTrustlevels, 0)
def METHOD_NAME(builder, publicationTrustlevels):
return SubscriberFeaturesAddPublicationTrustlevels(builder, publicationTrustlevels)
def SubscriberFeaturesAddSubscriptionRevocation(builder, subscriptionRevocation): builder.PrependBoolSlot(3, subscriptionRevocation, 0)
def AddSubscriptionRevocation(builder, subscriptionRevocation):
return SubscriberFeaturesAddSubscriptionRevocation(builder, subscriptionRevocation)
def SubscriberFeaturesAddEventHistory(builder, eventHistory): builder.PrependBoolSlot(4, eventHistory, 0)
def AddEventHistory(builder, eventHistory):
return SubscriberFeaturesAddEventHistory(builder, eventHistory)
def SubscriberFeaturesAddAcknowledgeSubscriberReceived(builder, acknowledgeSubscriberReceived): builder.PrependBoolSlot(5, acknowledgeSubscriberReceived, 0)
def AddAcknowledgeSubscriberReceived(builder, acknowledgeSubscriberReceived):
return SubscriberFeaturesAddAcknowledgeSubscriberReceived(builder, acknowledgeSubscriberReceived)
def SubscriberFeaturesAddPayloadTransparency(builder, payloadTransparency): builder.PrependBoolSlot(6, payloadTransparency, 0)
def AddPayloadTransparency(builder, payloadTransparency):
return SubscriberFeaturesAddPayloadTransparency(builder, payloadTransparency)
def SubscriberFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox): builder.PrependBoolSlot(7, payloadEncryptionCryptobox, 0)
def AddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox):
return SubscriberFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox)
def SubscriberFeaturesEnd(builder): return builder.EndObject()
def End(builder):
return SubscriberFeaturesEnd(builder |
send signal | import os
import re
import pwd, grp
from . import interfaces
import tuned.logs
import tuned.consts as consts
from inspect import ismethod
import socket
import json
import select
log = tuned.logs.get()
class UnixSocketExporter(interfaces.ExporterInterface):
"""
Export method calls through Unix Domain Socket Interface.
We take a method to be exported and create a simple wrapper function
to call it. This is required as we need the original function to be
bound to the original object instance. While the wrapper will be bound
to an object we dynamically construct.
"""
def __init__(self, socket_path=consts.CFG_DEF_UNIX_SOCKET_PATH,
signal_paths=consts.CFG_DEF_UNIX_SOCKET_SIGNAL_PATHS,
ownership=consts.CFG_DEF_UNIX_SOCKET_OWNERSHIP,
permissions=consts.CFG_DEF_UNIX_SOCKET_PERMISIONS,
connections_backlog=consts.CFG_DEF_UNIX_SOCKET_CONNECTIONS_BACKLOG):
self._socket_path = socket_path
self._socket_object = None
self._socket_signal_paths = re.split(r",;", signal_paths) if signal_paths else []
self._socket_signal_objects = []
self._ownership = [-1, -1]
if ownership:
ownership = ownership.split()
for i, o in enumerate(ownership[:2]):
try:
self._ownership[i] = int(o)
except ValueError:
try:
# user
if i == 0:
self._ownership[i] = pwd.getpwnam(o).pw_uid
# group
else:
self._ownership[i] = grp.getgrnam(o).gr_gid
except KeyError:
log.error("%s '%s' does not exists, leaving default" % ("User" if i == 0 else "Group", o))
self._permissions = permissions
self._connections_backlog = connections_backlog
self._unix_socket_methods = {}
self._signals = set()
self._conn = None
self._channel = None
def running(self):
return self._socket_object is not None
def export(self, method, in_signature, out_signature):
if not ismethod(method):
raise Exception("Only bound methods can be exported.")
method_name = method.__name__
if method_name in self._unix_socket_methods:
raise Exception("Method with this name (%s) is already exported." % method_name)
class wrapper(object):
def __init__(self, in_signature, out_signature):
self._in_signature = in_signature
self._out_signature = out_signature
def __call__(self, *args, **kwargs):
return method(*args, **kwargs)
self._unix_socket_methods[method_name] = wrapper(in_signature, out_signature)
def signal(self, method, out_signature):
if not ismethod(method):
raise Exception("Only bound methods can be exported.")
method_name = method.__name__
if method_name in self._unix_socket_methods:
raise Exception("Method with this name (%s) is already exported." % method_name)
class wrapper(object):
def __init__(self, out_signature):
self._out_signature = out_signature
def __call__(self, *args, **kwargs):
return method(*args, **kwargs)
self._unix_socket_methods[method_name] = wrapper(out_signature)
self._signals.add(method_name)
def METHOD_NAME(self, signal, *args, **kwargs):
if not signal in self._signals:
raise Exception("Signal '%s' doesn't exist." % signal)
for p in self._socket_signal_paths:
log.debug("Sending signal on socket %s" % p)
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.setblocking(False)
s.connect(p)
self._send_data(s, {"jsonrpc": "2.0", "method": signal, "params": args})
s.close()
except OSError as e:
log.warning("Error while sending signal '%s' to socket '%s': %s" % (signal, p, e))
def register_signal_path(self, path):
self._socket_signal_paths.append(path)
def _construct_socket_object(self):
if self._socket_path:
if os.path.exists(self._socket_path):
os.unlink(self._socket_path)
self._socket_object = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._socket_object.bind(self._socket_path)
self._socket_object.listen(self._connections_backlog)
os.chown(self._socket_path, self._ownership[0], self._ownership[1])
if self._permissions:
os.chmod(self._socket_path, self._permissions)
def start(self):
if self.running():
return
self.stop()
self._construct_socket_object()
def stop(self):
if self._socket_object:
self._socket_object.close()
def _send_data(self, s, data):
log.debug("Sending socket data: %s)" % data)
try:
s.send(json.dumps(data).encode("utf-8"))
except Exception as e:
log.warning("Failed to send data '%s': %s" % (data, e))
def _create_response(self, data, id, error=False):
res = {
"jsonrpc": "2.0",
"id": id
}
if error:
res["error"] = data
else:
res["result"] = data
return res
def _create_error_responce(self, code, message, id=None, data=None):
return self._create_response({
"code": code,
"message": message,
"data": data,
}, error=True, id=id)
def _create_result_response(self, result, id):
return self._create_response(result, id)
def _check_id(self, data):
if data.get("id"):
return data
return None
def _process_request(self, req):
if type(req) != dict or req.get("jsonrpc") != "2.0" or not req.get("method"):
return self._create_error_responce(-32600, "Invalid Request")
id = req.get("id")
ret = None
if req["method"] not in self._unix_socket_methods:
return self._check_id(self._create_error_responce(-32601, "Method not found", id))
try:
if not req.get("params"):
ret = self._unix_socket_methods[req["method"]]()
elif type(req["params"]) in (list, tuple):
ret = self._unix_socket_methods[req["method"]](*req["params"])
elif type(req["params"]) == dict:
ret = self._unix_socket_methods[req["method"]](**req["params"])
else:
return self._check_id(self._create_error_responce(-32600, "Invalid Request", id))
except TypeError as e:
return self._check_id(self._create_error_responce(-32602, "Invalid params", id, str(e)))
except Exception as e:
return self._check_id(self._create_error_responce(1, "Error", id, str(e)))
return self._check_id(self._create_result_response(ret, id))
def period_check(self):
"""
Periodically checks socket object for new calls. This allows to function without special thread.
Interface is according JSON-RPC 2.0 Specification (see https://www.jsonrpc.org/specification)
Example calls:
printf '[{"jsonrpc": "2.0", "method": "active_profile", "id": 1}, {"jsonrpc": "2.0", "method": "profiles", "id": 2}]' | nc -U /run/tuned/tuned.sock
printf '{"jsonrpc": "2.0", "method": "switch_profile", "params": {"profile_name": "balanced"}, "id": 1}' | nc -U /run/tuned/tuned.sock
"""
if not self.running():
return
while True:
r, _, _ = select.select([self._socket_object], (), (), 0)
if r:
conn, _ = self._socket_object.accept()
try:
data = ""
while True:
rec_data = conn.recv(4096).decode()
if not rec_data:
break
data += rec_data
except Exception as e:
log.error("Failed to load data of message: %s" % e)
continue
if data:
try:
data = json.loads(data)
except Exception as e:
log.error("Failed to load json data '%s': %s" % (data, e))
self._send_data(conn, self._create_error_responce(-32700, "Parse error", str(e)))
continue
if type(data) not in (tuple, list, dict):
log.error("Wrong format of call")
self._send_data(conn, self._create_error_responce(-32700, "Parse error", str(e)))
continue
if type(data) in (tuple, list):
if len(data) == 0:
self._send_data(conn, self._create_error_responce(-32600, "Invalid Request", str(e)))
continue
res = []
for req in data:
r = self._process_request(req)
if r:
res.append(r)
if res:
self._send_data(conn, res)
else:
res = self._process_request(data)
if r:
self._send_data(conn, res)
else:
return
|
test sythesize sequences | import datetime
import pandas as pd
from deepecho import load_demo
from sdv.datasets.demo import download_demo
from sdv.metadata import SingleTableMetadata
from sdv.sequential import PARSynthesizer
def _get_par_data_and_metadata():
date = datetime.datetime.strptime('2020-01-01', '%Y-%m-%d')
data = pd.DataFrame({
'column1': [1.0, 2.0, 1.5, 1.3],
'date': [date, date, date, date],
'column2': ['b', 'a', 'a', 'c'],
'entity': [1, 1, 2, 2],
'context': ['a', 'a', 'b', 'b']
})
metadata = SingleTableMetadata()
metadata.detect_from_dataframe(data)
metadata.update_column('entity', sdtype='id')
metadata.set_sequence_key('entity')
metadata.set_sequence_index('date')
return data, metadata
def test_par():
"""Test the ``PARSynthesizer`` end to end."""
# Setup
data = load_demo()
data['date'] = pd.to_datetime(data['date'])
metadata = SingleTableMetadata()
metadata.detect_from_dataframe(data)
metadata.update_column('store_id', sdtype='id')
metadata.set_sequence_key('store_id')
metadata.set_sequence_index('date')
model = PARSynthesizer(
metadata=metadata,
context_columns=['region'],
epochs=1,
)
# Run
model.fit(data)
sampled = model.sample(100)
# Assert
assert sampled.shape == data.shape
assert (sampled.dtypes == data.dtypes).all()
assert (sampled.notna().sum(axis=1) != 0).all()
def test_column_after_date_simple():
"""Test that adding a column after the ``sequence_index`` column works."""
# Setup
date = datetime.datetime.strptime('2020-01-01', '%Y-%m-%d')
data = pd.DataFrame({
'col': [1, 1],
'date': [date, date],
'col2': ['hello', 'world'],
})
metadata = SingleTableMetadata()
metadata.detect_from_dataframe(data)
metadata.update_column('col', sdtype='id')
metadata.set_sequence_key('col')
metadata.set_sequence_index('date')
# Run
model = PARSynthesizer(metadata=metadata, epochs=1)
model.fit(data)
sampled = model.sample(1)
# Assert
assert sampled.shape == data.shape
assert (sampled.dtypes == data.dtypes).all()
assert (sampled.notna().sum(axis=1) != 0).all()
def test_column_after_date_complex():
"""Test that adding multiple columns after the ``sequence_index`` column works."""
# Setup
data, metadata = _get_par_data_and_metadata()
# Run
model = PARSynthesizer(metadata=metadata, context_columns=['context'], epochs=1)
model.fit(data)
sampled = model.sample(2)
# Assert
assert sampled.shape == data.shape
assert (sampled.dtypes == data.dtypes).all()
assert (sampled.notna().sum(axis=1) != 0).all()
def test_save_and_load(tmp_path):
"""Test that synthesizers can be saved and loaded properly."""
# Setup
_, metadata = _get_par_data_and_metadata()
instance = PARSynthesizer(metadata)
synthesizer_path = tmp_path / 'synthesizer.pkl'
instance.save(synthesizer_path)
# Run
loaded_instance = PARSynthesizer.load(synthesizer_path)
# Assert
assert isinstance(loaded_instance, PARSynthesizer)
assert metadata == instance.metadata
def METHOD_NAME(tmp_path):
"""End to end test for synthesizing sequences.
The following functionalities are being tested:
* Fit a ``PARSynthesizer`` with the demo dataset.
* Fit a ``PARSynthesizer`` with custom context.
* Sample from the model.
* Conditionally sample from the model.
* Save and Load.
"""
# Setup
real_data, metadata = download_demo(
modality='sequential',
dataset_name='nasdaq100_2019'
)
real_data[real_data['Symbol'] == 'AMZN']['Sector'].unique()
synthesizer = PARSynthesizer(
metadata,
epochs=5,
context_columns=['Sector', 'Industry']
)
custom_synthesizer = PARSynthesizer(
metadata,
epochs=5,
context_columns=['Sector', 'Industry'],
verbose=True
)
scenario_context = pd.DataFrame(data={
'Symbol': ['COMPANY-A', 'COMPANY-B', 'COMPANY-C', 'COMPANY-D', 'COMPANY-E'],
'Sector': ['Technology'] * 2 + ['Consumer Services'] * 3,
'Industry': [
'Computer Manufacturing', 'Computer Software: Prepackaged Software',
'Hotels/Resorts', 'Restaurants', 'Clothing/Shoe/Accessory Stores'
]
})
# Run - Fit
synthesizer.fit(real_data)
custom_synthesizer.fit(real_data)
# Run - Sample
synthetic_data = synthesizer.sample(num_sequences=10)
custom_synthetic_data = custom_synthesizer.sample(num_sequences=3, sequence_length=2)
custom_synthetic_data_conditional = custom_synthesizer.sample_sequential_columns(
context_columns=scenario_context,
sequence_length=2
)
# Save and Load
model_path = tmp_path / 'my_synthesizer.pkl'
synthesizer.save(model_path)
loaded_synthesizer = PARSynthesizer.load(model_path)
loaded_sample = loaded_synthesizer.sample(100)
# Assert
assert all(custom_synthetic_data_conditional['Symbol'].value_counts() == 2)
companies = ['COMPANY-A', 'COMPANY-B', 'COMPANY-C', 'COMPANY-D', 'COMPANY-E']
assert companies in custom_synthetic_data_conditional['Symbol'].unique()
assert custom_synthetic_data_conditional['Sector'].value_counts()['Technology'] == 4
assert custom_synthetic_data_conditional['Sector'].value_counts()['Consumer Services'] == 6
industries = [
'Computer Manufacturing',
'Computer Software: Prepackaged Software',
'Hotels/Resorts',
'Restaurants',
'Clothing/Shoe/Accessory Stores'
]
assert industries in custom_synthetic_data_conditional['Industry'].unique()
assert model_path.exists()
assert model_path.is_file()
assert loaded_synthesizer.get_info() == synthesizer.get_info()
assert loaded_synthesizer.metadata.to_dict() == metadata.to_dict()
synthesizer.validate(synthetic_data)
synthesizer.validate(custom_synthetic_data)
synthesizer.validate(custom_synthetic_data_conditional)
synthesizer.validate(loaded_sample)
loaded_synthesizer.validate(synthetic_data)
loaded_synthesizer.validate(loaded_sample) |
from server address and domain | # -*- test-case-name: twisted.names.test.test_names -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
__all__ = ["SecondaryAuthority", "SecondaryAuthorityService"]
from twisted.application import service
from twisted.internet import defer, task
from twisted.names import client, common, dns, resolve
from twisted.names.authority import FileAuthority
from twisted.python import failure, log
from twisted.python.compat import nativeString
class SecondaryAuthorityService(service.Service):
"""
A service that keeps one or more authorities up to date by doing hourly
zone transfers from a master.
@ivar primary: IP address of the master.
@type primary: L{str}
@ivar domains: An authority for each domain mirrored from the master.
@type domains: L{list} of L{SecondaryAuthority}
"""
calls = None
_port = 53
def __init__(self, primary, domains):
"""
@param primary: The IP address of the server from which to perform
zone transfers.
@type primary: L{str}
@param domains: A sequence of domain names for which to perform
zone transfers.
@type domains: L{list} of L{bytes}
"""
self.primary = nativeString(primary)
self.domains = [SecondaryAuthority(primary, d) for d in domains]
@classmethod
def fromServerAddressAndDomains(cls, serverAddress, domains):
"""
Construct a new L{SecondaryAuthorityService} from a tuple giving a
server address and a C{str} giving the name of a domain for which this
is an authority.
@param serverAddress: A two-tuple, the first element of which is a
C{str} giving an IP address and the second element of which is a
C{int} giving a port number. Together, these define where zone
transfers will be attempted from.
@param domains: Domain names for which to perform zone transfers.
@type domains: sequence of L{bytes}
@return: A new instance of L{SecondaryAuthorityService}.
"""
primary, port = serverAddress
service = cls(primary, [])
service._port = port
service.domains = [
SecondaryAuthority.METHOD_NAME(serverAddress, d)
for d in domains
]
return service
def getAuthority(self):
"""
Get a resolver for the transferred domains.
@rtype: L{ResolverChain}
"""
return resolve.ResolverChain(self.domains)
def startService(self):
service.Service.startService(self)
self.calls = [task.LoopingCall(d.transfer) for d in self.domains]
i = 0
from twisted.internet import reactor
for c in self.calls:
# XXX Add errbacks, respect proper timeouts
reactor.callLater(i, c.start, 60 * 60)
i += 1
def stopService(self):
service.Service.stopService(self)
for c in self.calls:
c.stop()
class SecondaryAuthority(FileAuthority):
"""
An Authority that keeps itself updated by performing zone transfers.
@ivar primary: The IP address of the server from which zone transfers will
be attempted.
@type primary: L{str}
@ivar _port: The port number of the server from which zone transfers will
be attempted.
@type _port: L{int}
@ivar domain: The domain for which this is the secondary authority.
@type domain: L{bytes}
@ivar _reactor: The reactor to use to perform the zone transfers, or
L{None} to use the global reactor.
"""
transferring = False
soa = records = None
_port = 53
_reactor = None
def __init__(self, primaryIP, domain):
"""
@param domain: The domain for which this will be the secondary
authority.
@type domain: L{bytes} or L{str}
"""
# Yep. Skip over FileAuthority.__init__. This is a hack until we have
# a good composition-based API for the complicated DNS record lookup
# logic we want to share.
common.ResolverBase.__init__(self)
self.primary = nativeString(primaryIP)
self.domain = dns.domainString(domain)
@classmethod
def METHOD_NAME(cls, serverAddress, domain):
"""
Construct a new L{SecondaryAuthority} from a tuple giving a server
address and a C{bytes} giving the name of a domain for which this is an
authority.
@param serverAddress: A two-tuple, the first element of which is a
C{str} giving an IP address and the second element of which is a
C{int} giving a port number. Together, these define where zone
transfers will be attempted from.
@param domain: A C{bytes} giving the domain to transfer.
@type domain: L{bytes}
@return: A new instance of L{SecondaryAuthority}.
"""
primary, port = serverAddress
secondary = cls(primary, domain)
secondary._port = port
return secondary
def transfer(self):
"""
Attempt a zone transfer.
@returns: A L{Deferred} that fires with L{None} when attempted zone
transfer has completed.
"""
# FIXME: This logic doesn't avoid duplicate transfers
# https://twistedmatrix.com/trac/ticket/9754
if self.transferring: # <-- never true
return
self.transfering = True # <-- speling
reactor = self._reactor
if reactor is None:
from twisted.internet import reactor
resolver = client.Resolver(
servers=[(self.primary, self._port)], reactor=reactor
)
return (
resolver.lookupZone(self.domain)
.addCallback(self._cbZone)
.addErrback(self._ebZone)
)
def _lookup(self, name, cls, type, timeout=None):
if not self.soa or not self.records:
# No transfer has occurred yet. Fail non-authoritatively so that
# the caller can try elsewhere.
return defer.fail(failure.Failure(dns.DomainError(name)))
return FileAuthority._lookup(self, name, cls, type, timeout)
def _cbZone(self, zone):
ans, _, _ = zone
self.records = r = {}
for rec in ans:
if not self.soa and rec.type == dns.SOA:
self.soa = (rec.name.name.lower(), rec.payload)
else:
r.setdefault(rec.name.name.lower(), []).append(rec.payload)
def _ebZone(self, failure):
log.msg(
"Updating %s from %s failed during zone transfer"
% (self.domain, self.primary)
)
log.err(failure)
def update(self):
self.transfer().addCallbacks(self._cbTransferred, self._ebTransferred)
def _cbTransferred(self, result):
self.transferring = False
def _ebTransferred(self, failure):
self.transferred = False
log.msg(
"Transferring %s from %s failed after zone transfer"
% (self.domain, self.primary)
)
log.err(failure) |
test policy state spec is empty | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for tf_agents.utils.random_py_policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.policies import random_py_policy
from tf_agents.specs import array_spec
from tf_agents.trajectories import time_step
from tf_agents.utils import test_utils
class RandomPyPolicyTest(test_utils.TestCase):
def setUp(self):
super(RandomPyPolicyTest, self).setUp()
self._time_step_spec = time_step.time_step_spec(
observation_spec=array_spec.ArraySpec((1,), np.int32)
)
self._time_step = time_step.restart(observation=np.array([1]))
def testGeneratesActions(self):
action_spec = [
array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10),
array_spec.BoundedArraySpec((1, 2), np.int32, -10, 10),
]
policy = random_py_policy.RandomPyPolicy(
time_step_spec=self._time_step_spec, action_spec=action_spec
)
action_step = policy.action(self._time_step)
tf.nest.assert_same_structure(action_spec, action_step.action)
self.assertTrue(np.all(action_step.action[0] >= -10))
self.assertTrue(np.all(action_step.action[0] <= 10))
self.assertTrue(np.all(action_step.action[1] >= -10))
self.assertTrue(np.all(action_step.action[1] <= 10))
def testGeneratesBatchedActions(self):
action_spec = [
array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10),
array_spec.BoundedArraySpec((1, 2), np.int32, -10, 10),
]
policy = random_py_policy.RandomPyPolicy(
time_step_spec=self._time_step_spec,
action_spec=action_spec,
outer_dims=(3,),
)
action_step = policy.action(self._time_step)
tf.nest.assert_same_structure(action_spec, action_step.action)
self.assertEqual((3, 2, 3), action_step.action[0].shape)
self.assertEqual((3, 1, 2), action_step.action[1].shape)
self.assertTrue(np.all(action_step.action[0] >= -10))
self.assertTrue(np.all(action_step.action[0] <= 10))
self.assertTrue(np.all(action_step.action[1] >= -10))
self.assertTrue(np.all(action_step.action[1] <= 10))
def testGeneratesBatchedActionsWithoutSpecifyingOuterDims(self):
action_spec = [
array_spec.BoundedArraySpec((2, 3), np.int32, -10, 10),
array_spec.BoundedArraySpec((1, 2), np.int32, -10, 10),
]
time_step_spec = time_step.time_step_spec(
observation_spec=array_spec.ArraySpec((1,), np.int32)
)
policy = random_py_policy.RandomPyPolicy(
time_step_spec=time_step_spec, action_spec=action_spec
)
action_step = policy.action(
time_step.restart(np.array([[1], [2], [3]], dtype=np.int32))
)
tf.nest.assert_same_structure(action_spec, action_step.action)
self.assertEqual((3, 2, 3), action_step.action[0].shape)
self.assertEqual((3, 1, 2), action_step.action[1].shape)
self.assertTrue(np.all(action_step.action[0] >= -10))
self.assertTrue(np.all(action_step.action[0] <= 10))
self.assertTrue(np.all(action_step.action[1] >= -10))
self.assertTrue(np.all(action_step.action[1] <= 10))
def METHOD_NAME(self):
policy = random_py_policy.RandomPyPolicy(
time_step_spec=self._time_step_spec, action_spec=[]
)
self.assertEqual(policy.policy_state_spec, ())
def testMasking(self):
batch_size = 1000
time_step_spec = time_step.time_step_spec(
observation_spec=array_spec.ArraySpec((1,), np.int32)
)
action_spec = array_spec.BoundedArraySpec((), np.int64, -5, 5)
# We create a fixed mask here for testing purposes. Normally the mask would
# be part of the observation.
mask = [0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0]
np_mask = np.array(mask)
batched_mask = np.array([mask for _ in range(batch_size)])
policy = random_py_policy.RandomPyPolicy(
time_step_spec=time_step_spec,
action_spec=action_spec,
observation_and_action_constraint_splitter=(
lambda obs: (obs, batched_mask)
),
)
my_time_step = time_step.restart(time_step_spec, batch_size)
action_step = policy.action(my_time_step)
tf.nest.assert_same_structure(action_spec, action_step.action)
# Sample from the policy 1000 times, and ensure that actions considered
# invalid according to the mask are never chosen.
action_ = self.evaluate(action_step.action)
self.assertTrue(np.all(action_ >= -5))
self.assertTrue(np.all(action_ <= 5))
self.assertAllEqual(
np_mask[action_ - action_spec.minimum], np.ones([batch_size])
)
# Ensure that all valid actions occur somewhere within the batch. Because we
# sample 1000 times, the chance of this failing for any particular action is
# (2/3)^1000, roughly 1e-176.
for index in range(action_spec.minimum, action_spec.maximum + 1):
if np_mask[index - action_spec.minimum]:
self.assertIn(index, action_)
if __name__ == '__main__':
test_utils.main() |
build list by location request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(location: str, subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2021-02-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages"
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"location": _SERIALIZER.url("location", location, "str"),
}
_url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class UsagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2021_02_01.StorageManagementClient`'s
:attr:`usages` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list_by_location(self, location: str, **kwargs: Any) -> Iterable["_models.Usage"]:
"""Gets the current usage count and the limit for the resources of the location under the
subscription.
:param location: The location of the Azure Storage resource. Required.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Usage or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2021_02_01.models.Usage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2021-02-01"))
cls: ClsType[_models.UsageListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = METHOD_NAME(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_location.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("UsageListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_location.metadata = {
"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Storage/locations/{location}/usages"
} |
perform http01 | """Example ACME-V2 API for HTTP-01 challenge.
Brief:
This a complete usage example of the python-acme API.
Limitations of this example:
- Works for only one Domain name
- Performs only HTTP-01 challenge
- Uses ACME-v2
Workflow:
(Account creation)
- Create account key
- Register account and accept TOS
(Certificate actions)
- Select HTTP-01 within offered challenges by the CA server
- Set up http challenge resource
- Set up standalone web server
- Create domain private key and CSR
- Issue certificate
- Renew certificate
- Revoke certificate
(Account update actions)
- Change contact information
- Deactivate Account
"""
from contextlib import contextmanager
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
import josepy as jose
import OpenSSL
from acme import challenges
from acme import client
from acme import crypto_util
from acme import errors
from acme import messages
from acme import standalone
# Constants:
# This is the staging point for ACME-V2 within Let's Encrypt.
DIRECTORY_URL = 'https://acme-staging-v02.api.letsencrypt.org/directory'
USER_AGENT = 'python-acme-example'
# Account key size
ACC_KEY_BITS = 2048
# Certificate private key size
CERT_PKEY_BITS = 2048
# Domain name for the certificate.
DOMAIN = 'client.example.com'
# If you are running Boulder locally, it is possible to configure any port
# number to execute the challenge, but real CA servers will always use port
# 80, as described in the ACME specification.
PORT = 80
# Useful methods and classes:
def new_csr_comp(domain_name, pkey_pem=None):
"""Create certificate signing request."""
if pkey_pem is None:
# Create private key.
pkey = OpenSSL.crypto.PKey()
pkey.generate_key(OpenSSL.crypto.TYPE_RSA, CERT_PKEY_BITS)
pkey_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM,
pkey)
csr_pem = crypto_util.make_csr(pkey_pem, [domain_name])
return pkey_pem, csr_pem
def select_http01_chall(orderr):
"""Extract authorization resource from within order resource."""
# Authorization Resource: authz.
# This object holds the offered challenges by the server and their status.
authz_list = orderr.authorizations
for authz in authz_list:
# Choosing challenge.
# authz.body.challenges is a set of ChallengeBody objects.
for i in authz.body.challenges:
# Find the supported challenge.
if isinstance(i.chall, challenges.HTTP01):
return i
raise Exception('HTTP-01 challenge was not offered by the CA server.')
@contextmanager
def challenge_server(http_01_resources):
"""Manage standalone server set up and shutdown."""
# Setting up a fake server that binds at PORT and any address.
address = ('', PORT)
try:
servers = standalone.HTTP01DualNetworkedServers(address,
http_01_resources)
# Start client standalone web server.
servers.serve_forever()
yield servers
finally:
# Shutdown client web server and unbind from PORT
servers.shutdown_and_server_close()
def METHOD_NAME(client_acme, challb, orderr):
"""Set up standalone webserver and perform HTTP-01 challenge."""
response, validation = challb.response_and_validation(client_acme.net.key)
resource = standalone.HTTP01RequestHandler.HTTP01Resource(
chall=challb.chall, response=response, validation=validation)
with challenge_server({resource}):
# Let the CA server know that we are ready for the challenge.
client_acme.answer_challenge(challb, response)
# Wait for challenge status and then issue a certificate.
# It is possible to set a deadline time.
finalized_orderr = client_acme.poll_and_finalize(orderr)
return finalized_orderr.fullchain_pem
# Main examples:
def example_http():
"""This example executes the whole process of fulfilling a HTTP-01
challenge for one specific domain.
The workflow consists of:
(Account creation)
- Create account key
- Register account and accept TOS
(Certificate actions)
- Select HTTP-01 within offered challenges by the CA server
- Set up http challenge resource
- Set up standalone web server
- Create domain private key and CSR
- Issue certificate
- Renew certificate
- Revoke certificate
(Account update actions)
- Change contact information
- Deactivate Account
"""
# Create account key
acc_key = jose.JWKRSA(
key=rsa.generate_private_key(public_exponent=65537,
key_size=ACC_KEY_BITS,
backend=default_backend()))
# Register account and accept TOS
net = client.ClientNetwork(acc_key, user_agent=USER_AGENT)
directory = client.ClientV2.get_directory(DIRECTORY_URL, net)
client_acme = client.ClientV2(directory, net=net)
# Terms of Service URL is in client_acme.directory.meta.terms_of_service
# Registration Resource: regr
# Creates account with contact information.
email = ('[email protected]')
regr = client_acme.new_account(
messages.NewRegistration.from_data(
email=email, terms_of_service_agreed=True))
# Create domain private key and CSR
pkey_pem, csr_pem = new_csr_comp(DOMAIN)
# Issue certificate
orderr = client_acme.new_order(csr_pem)
# Select HTTP-01 within offered challenges by the CA server
challb = select_http01_chall(orderr)
# The certificate is ready to be used in the variable "fullchain_pem".
fullchain_pem = METHOD_NAME(client_acme, challb, orderr)
# Renew certificate
_, csr_pem = new_csr_comp(DOMAIN, pkey_pem)
orderr = client_acme.new_order(csr_pem)
challb = select_http01_chall(orderr)
# Performing challenge
fullchain_pem = METHOD_NAME(client_acme, challb, orderr)
# Revoke certificate
fullchain_com = jose.ComparableX509(
OpenSSL.crypto.load_certificate(
OpenSSL.crypto.FILETYPE_PEM, fullchain_pem))
try:
client_acme.revoke(fullchain_com, 0) # revocation reason = 0
except errors.ConflictError:
# Certificate already revoked.
pass
# Query registration status.
client_acme.net.account = regr
try:
regr = client_acme.query_registration(regr)
except errors.Error as err:
if err.typ == messages.ERROR_PREFIX + 'unauthorized':
# Status is deactivated.
pass
raise
# Change contact information
email = '[email protected]'
regr = client_acme.update_registration(
regr.update(
body=regr.body.update(
contact=('mailto:' + email,)
)
)
)
# Deactivate account/registration
regr = client_acme.deactivate_registration(regr)
if __name__ == "__main__":
example_http() |
check single value default metric | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from federatedml.util import consts, LOGGER
from federatedml.param.base_param import BaseParam
class EvaluateParam(BaseParam):
"""
Define the evaluation method of binary/multiple classification and regression
Parameters
----------
eval_type : {'binary', 'regression', 'multi'}
support 'binary' for HomoLR, HeteroLR and Secureboosting,
support 'regression' for Secureboosting,
'multi' is not support these version
unfold_multi_result : bool
unfold multi result and get several one-vs-rest binary classification results
pos_label : int or float or str
specify positive label type, depend on the data's label. this parameter effective only for 'binary'
need_run: bool, default True
Indicate if this module needed to be run
"""
def __init__(self, eval_type="binary", pos_label=1, need_run=True, metrics=None,
run_clustering_arbiter_metric=False, unfold_multi_result=False):
super().__init__()
self.eval_type = eval_type
self.pos_label = pos_label
self.need_run = need_run
self.metrics = metrics
self.unfold_multi_result = unfold_multi_result
self.run_clustering_arbiter_metric = run_clustering_arbiter_metric
self.default_metrics = {
consts.BINARY: consts.ALL_BINARY_METRICS,
consts.MULTY: consts.ALL_MULTI_METRICS,
consts.REGRESSION: consts.ALL_REGRESSION_METRICS,
consts.CLUSTERING: consts.ALL_CLUSTER_METRICS
}
self.allowed_metrics = {
consts.BINARY: consts.ALL_BINARY_METRICS,
consts.MULTY: consts.ALL_MULTI_METRICS,
consts.REGRESSION: consts.ALL_REGRESSION_METRICS,
consts.CLUSTERING: consts.ALL_CLUSTER_METRICS
}
def _use_single_value_default_metrics(self):
self.default_metrics = {
consts.BINARY: consts.DEFAULT_BINARY_METRIC,
consts.MULTY: consts.DEFAULT_MULTI_METRIC,
consts.REGRESSION: consts.DEFAULT_REGRESSION_METRIC,
consts.CLUSTERING: consts.DEFAULT_CLUSTER_METRIC
}
def _check_valid_metric(self, metrics_list):
metric_list = consts.ALL_METRIC_NAME
alias_name: dict = consts.ALIAS
full_name_list = []
metrics_list = [str.lower(i) for i in metrics_list]
for metric in metrics_list:
if metric in metric_list:
if metric not in full_name_list:
full_name_list.append(metric)
continue
valid_flag = False
for alias, full_name in alias_name.items():
if metric in alias:
if full_name not in full_name_list:
full_name_list.append(full_name)
valid_flag = True
break
if not valid_flag:
raise ValueError('metric {} is not supported'.format(metric))
allowed_metrics = self.allowed_metrics[self.eval_type]
for m in full_name_list:
if m not in allowed_metrics:
raise ValueError('metric {} is not used for {} task'.format(m, self.eval_type))
if consts.RECALL in full_name_list and consts.PRECISION not in full_name_list:
full_name_list.append(consts.PRECISION)
if consts.RECALL not in full_name_list and consts.PRECISION in full_name_list:
full_name_list.append(consts.RECALL)
return full_name_list
def check(self):
descr = "evaluate param's "
self.eval_type = self.check_and_change_lower(self.eval_type,
[consts.BINARY, consts.MULTY, consts.REGRESSION,
consts.CLUSTERING],
descr)
if type(self.pos_label).__name__ not in ["str", "float", "int"]:
raise ValueError(
"evaluate param's pos_label {} not supported, should be str or float or int type".format(
self.pos_label))
if type(self.need_run).__name__ != "bool":
raise ValueError(
"evaluate param's need_run {} not supported, should be bool".format(
self.need_run))
if self.metrics is None or len(self.metrics) == 0:
self.metrics = self.default_metrics[self.eval_type]
LOGGER.warning('use default metric {} for eval type {}'.format(self.metrics, self.eval_type))
self.check_boolean(self.unfold_multi_result, 'multi_result_unfold')
self.metrics = self._check_valid_metric(self.metrics)
return True
def METHOD_NAME(self):
self._use_single_value_default_metrics()
# in validation strategy, psi f1-score and confusion-mat pr-quantile are not supported in cur version
if self.metrics is None or len(self.metrics) == 0:
self.metrics = self.default_metrics[self.eval_type]
LOGGER.warning('use default metric {} for eval type {}'.format(self.metrics, self.eval_type))
ban_metric = [consts.PSI, consts.F1_SCORE, consts.CONFUSION_MAT, consts.QUANTILE_PR]
for metric in self.metrics:
if metric in ban_metric:
self.metrics.remove(metric)
self.check() |
test last | # Licensed under the GPLv3 - see LICENSE
import sys
from importlib import reload
from importlib.metadata import EntryPoint, entry_points
import pytest
from .. import io as bio, vdif, base
class TestExistingIOFormat:
def setup_method(self):
dir(bio) # Ensure entries are loaded.
self.vdif_entry = bio._entries['vdif']
def teardown_method(self):
bio._entries['vdif'] = self.vdif_entry
bio._bad_entries.discard('vdif')
dir(bio) # does update.
def test_io_entry_point(self):
assert hasattr(bio, 'vdif')
assert 'vdif' in bio._entries
assert 'vdif' in bio.FORMATS
del bio.vdif
assert 'vdif' not in bio.__dict__
assert 'vdif' in bio.FORMATS
assert 'vdif' in dir(bio)
assert hasattr(bio, 'vdif')
assert 'vdif' in bio.__dict__
assert 'vdif' in bio.FORMATS
def test_fake_bad_vdif(self):
assert bio.vdif is vdif
del bio.vdif
bio._entries['vdif'] = EntryPoint('vdif', 'bad.vdif', '')
with pytest.raises(AttributeError, match='not loadable'):
bio.vdif
assert 'vdif' not in dir(bio)
assert 'vdif' in bio._bad_entries
# Does not auto-reload since already known as bad.
with pytest.raises(AttributeError, match='has no attribute'):
bio.vdif
# But will reload if we reload and thus start over.
reload(bio)
assert bio.vdif is vdif
assert 'vdif' in bio.FORMATS
class TestNewIOFormats:
def setup_entry(self, entry):
self.added = entry.name
bio._entries[entry.name] = entry
bio.FORMATS.append(entry.name)
def teardown_method(self):
bio._entries.pop(self.added, None)
if self.added in bio.FORMATS:
bio.FORMATS.remove(self.added)
bio._bad_entries.discard(self.added)
bio.__dict__.pop(self.added, None)
def test_find_new(self):
self.setup_entry(EntryPoint('new', 'baseband.vdif', ''))
assert 'new' in dir(bio)
assert 'new' in bio.FORMATS
assert bio.new is vdif
# Check that it comes back if we remove it from the module.
bio.__dict__.pop('new', None)
assert 'new' not in bio.__dict__
assert 'new' in bio.FORMATS
assert 'new' in dir(bio)
assert bio.new is vdif
def test_cannot_getattr_bad(self):
self.setup_entry(EntryPoint('bad', 'really_bad', ''))
assert 'bad' in dir(bio)
assert 'bad' in bio.FORMATS
with pytest.raises(AttributeError, match='not loadable'):
bio.bad
assert 'bad' not in dir(bio)
assert 'bad' not in bio.FORMATS
with pytest.raises(AttributeError, match='has no attribute'):
bio.bad
def test_not_hasattr_bad(self):
self.setup_entry(EntryPoint('bad', 'really_bad', ''))
assert 'bad' in dir(bio)
assert not hasattr(bio, 'bad')
assert 'bad' not in dir(bio)
class TestTasks:
def setup_method(self):
"""Remove baseband.tasks so we can reload and find test entry point."""
# Also ensures that stuff we add gets removed.
sys.modules.pop('baseband.tasks', None)
def test_first(self):
import baseband.tasks as tasks
assert 'vdif_payload_module' not in dir(tasks)
def test_task_discovery(self, tmpdir, monkeypatch):
with open(tmpdir.mkdir('task_tests-0.1.dist-info')
.join('entry_points.txt'), 'wt') as fw:
fw.write('[baseband.tasks]\n'
'vdif_payload_module = baseband.vdif.payload\n'
'vdif_header_all = baseband.vdif.header:__all__\n'
'_nomod = baseband.base.utils:__all__\n')
monkeypatch.syspath_prepend(str(tmpdir))
# Now (re-)import tasks
import baseband.tasks as tasks
# We loaded just the vdif module.
assert 'vdif_payload_module' in dir(tasks)
assert 'vdif' not in dir(tasks)
assert 'payload' not in dir(tasks)
assert tasks.vdif_payload_module is vdif.payload
assert 'VDIFPayload' not in dir(tasks)
# But helpers and everything in it.
assert 'vdif_header_all' in dir(tasks)
assert 'header' not in dir(tasks)
assert 'VDIFHeader' in dir(tasks)
assert tasks.vdif_header_all is vdif.header
assert tasks.VDIFHeader is vdif.header.VDIFHeader
# And what's in utils, but not the name.
assert '_nomod' not in dir(tasks)
assert 'CRC' in dir(tasks)
assert tasks.CRC is base.utils.CRC
def test_bad_task_definition(self, tmpdir, monkeypatch):
with open(tmpdir.mkdir('bad_task_tests-0.1.dist-info')
.join('entry_points.txt'), 'wt') as fw:
fw.write('[baseband.tasks]\n'
'vdif_payload_module = baseband.vdif.payload\n'
'utils = baseband.base.utils.__all__\n' # typo: . not :
'does_not_exist = baseband.does_not_exist\n')
monkeypatch.syspath_prepend(str(tmpdir))
import baseband.tasks as tasks
assert tasks.vdif_payload_module is vdif.payload
assert not hasattr(tasks, 'utils')
assert 'utils' not in dir(tasks)
assert 'does_not_exist' not in dir(tasks)
assert len(tasks._bad_entries) == 2
assert (set(entry.name for entry in tasks._bad_entries)
== {'utils', 'does_not_exist'})
@pytest.mark.xfail(entry_points(group="baseband.tasks")
if sys.version_info >= (3, 10) else
entry_points().get("baseband.tasks", []),
reason='cannot test for lack of entry points')
def test_message_on_empty_tasks(self):
import baseband.tasks as tasks
with pytest.raises(AttributeError, match='No.*entry points found'):
tasks.does_not_exist
def METHOD_NAME(self):
import baseband.tasks as tasks
assert 'vdif_payload_module' not in dir(tasks) |
create module obj | # ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2022 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file pylith/bc/NeumannTimeDependent.py
#
# @brief Python object for managing a time-dependent Neumann (natural) boundary condition.
#
# Factory: boundary_condition
from .BoundaryCondition import BoundaryCondition
from .bc import NeumannTimeDependent as ModuleNeumannTimeDependent
from pylith.utils.NullComponent import NullComponent
def validateDir(value):
"""Validate direction.
"""
msg = "Direction must be a 3 component vector (list)."
if not isinstance(value, list):
raise ValueError(msg)
if 3 != len(value):
raise ValueError(msg)
try:
nums = list(map(float, value))
except:
raise ValueError(msg)
return nums
class NeumannTimeDependent(BoundaryCondition, ModuleNeumannTimeDependent):
"""
Neumann time-dependent boundary condition. Implements `BoundaryCondition`.
This boundary condition applies a Neumann boundary condition for a single solution subfield on a boundary.
To apply Neumann boundary conditions for multiple solution subfields on a boundary, use multiple Neumann boundary conditions.
:::{important}
The components are specified in the local normal-tangential coordinate system for the boundary. Ambiguities in specifying the shear (tangential) tractions in 3D problems are resolved using the `ref_dir_1` and `ref_dir_2` properties.
The first tangential direction is $\\vec{z} \\times \\vec{r}_1$ unless these are colinear, then $\\vec{r}_2$ (`ref_dir_2`) is used.
The second tangential direction is $\\vec{n} \\times \\vec{t}_1$.
:::
:::{seealso}
See [`AuxSubfieldsTimeDependent` Component](AuxSubfieldsTimeDependent.md) for the functional form of the time depenence.
:::
"""
DOC_CONFIG = {
"cfg": """
# Neumann (traction) boundary condition in 2D on -y boundary.
[pylithapp.problem.bc.bc_yneg]
label = boundary_yneg
field = displacement
scale_name = pressure
use_initial = False
use_time_history = True
db_auxiliary_field = spatialdata.spatialdb.UniformDB
db_auxiliary_field.description = Displacement Neumann BC +y boundary
db_auxiliary_field.values = [time_history_amplitude_tangential, time_history_amplitude_normal, time_history_start_time]
db_auxiliary_field.data = [2.0*MPa, -1.0*MPa, 0.0]
time_history = spatialdata.spatialdb.TimeHistory
time_history.description = Impulse time history
time_history.filename = impulse.timedb
""",
}
import pythia.pyre.inventory
scaleName = pythia.pyre.inventory.str("scale_name", default="pressure",
validator=pythia.pyre.inventory.choice(["length", "time", "pressure", "density", "velocity"]))
scaleName.meta['tip'] = "Type of scale for nondimensionalizing Neumann boundary condition ('pressure' for elasticity)."
useInitial = pythia.pyre.inventory.bool("use_initial", default=True)
useInitial.meta['tip'] = "Use initial term in time-dependent expression."
useRate = pythia.pyre.inventory.bool("use_rate", default=False)
useRate.meta['tip'] = "Use rate term in time-dependent expression."
useTimeHistory = pythia.pyre.inventory.bool("use_time_history", default=False)
useTimeHistory.meta['tip'] = "Use time history term in time-dependent expression."
dbTimeHistory = pythia.pyre.inventory.facility("time_history", factory=NullComponent, family="temporal_database")
dbTimeHistory.meta['tip'] = "Time history with normalized amplitude as a function of time."
refDir1 = pythia.pyre.inventory.list("ref_dir_1", default=[0.0, 0.0, 1.0], validator=validateDir)
refDir1.meta['tip'] = "First choice for reference direction to discriminate among tangential directions in 3D."
refDir2 = pythia.pyre.inventory.list("ref_dir_2", default=[0.0, 1.0, 0.0], validator=validateDir)
refDir2.meta['tip'] = "Second choice for reference direction to discriminate among tangential directions in 3D."
def __init__(self, name="neumanntimedependent"):
"""Constructor.
"""
BoundaryCondition.__init__(self, name)
return
def _defaults(self):
from .AuxSubfieldsTimeDependent import AuxSubfieldsTimeDependent
self.auxiliarySubfields = AuxSubfieldsTimeDependent("auxiliary_subfields")
def preinitialize(self, problem):
"""Do pre-initialization setup.
"""
from pylith.mpi.Communicator import mpi_is_root
if mpi_is_root():
self._info.log(
"Performing minimal initialization of time-dependent Neumann boundary condition '%s'." % self.aliases[-1])
BoundaryCondition.preinitialize(self, problem)
ModuleNeumannTimeDependent.setScaleName(self, self.scaleName)
ModuleNeumannTimeDependent.setRefDir1(self, self.refDir1)
ModuleNeumannTimeDependent.setRefDir2(self, self.refDir2)
ModuleNeumannTimeDependent.useInitial(self, self.useInitial)
ModuleNeumannTimeDependent.useRate(self, self.useRate)
ModuleNeumannTimeDependent.useTimeHistory(self, self.useTimeHistory)
if not isinstance(self.dbTimeHistory, NullComponent):
ModuleNeumannTimeDependent.setTimeHistoryDB(self, self.dbTimeHistory)
return
def _validate(self, context):
if self.inventory.useTimeHistory and isinstance(self.inventory.dbTimeHistory, NullComponent):
trait = self.inventory.getTrait("time_history")
self._validationError(context, trait,
f"Missing time history database for time-dependent Neumann boundary condition '{self.aliases[-1]}'.")
if not self.inventory.useTimeHistory and not isinstance(self.inventory.dbTimeHistory, NullComponent):
self._warning.log(
f"Time history for time-dependent Neumann boundary condition '{self.aliases[-1]}' not enabled. Ignoring provided time history database.")
def _validationError(self, context, trait, msg):
from pythia.pyre.inventory.Item import Item
error = ValueError(msg)
descriptor = self.getTraitDescriptor(trait.name)
context.error(error, items=[Item(trait, descriptor)])
def _configure(self):
"""Setup members using inventory.
"""
BoundaryCondition._configure(self)
return
def METHOD_NAME(self):
"""Create handle to corresponding C++ object.
"""
ModuleNeumannTimeDependent.__init__(self)
return
# Factories
def boundary_condition():
"""Factory associated with NeumannTimeDependent.
"""
return NeumannTimeDependent()
# End of file |