id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
200 | get | from collections import defaultdict
import logging
import re
from awxkit.utils import is_list_or_tuple, not_provided
log = logging.getLogger(__name__)
class URLRegistry(object):
def __init__(self):
self.store = defaultdict(dict)
self.default = {}
def url_pattern(self, pattern_str):
"""Converts some regex-friendly url pattern (Resources().resource string)
to a compiled pattern.
"""
# should account for any relative endpoint w/ query parameters
pattern = r'^' + pattern_str + r'(\?.*)*$'
return re.compile(pattern)
def _generate_url_iterable(self, url_iterable):
parsed_urls = []
for url in url_iterable:
method = not_provided
if is_list_or_tuple(url):
url, method = url
if not is_list_or_tuple(method):
methods = (method,)
else:
methods = method
for method in methods:
method_pattern = re.compile(method)
url_pattern = self.url_pattern(url)
parsed_urls.append((url_pattern, method_pattern))
return parsed_urls
def register(self, *args):
"""Registers a single resource (generic python type or object) to either
1. a single url string (internally coverted via URLRegistry.url_pattern) and optional method or method iterable
2. a list or tuple of url string and optional method or method iterables
for retrieval via get().
reg.register('/some/path/', ResourceOne)
reg.get('/some/path/')
-> ResourceOne
reg.register('/some/other/path/', 'method', ResourceTwo)
reg.get('/some/other/path/', 'method')
-> ResourceTwo
reg.register('/some/additional/path/', ('method_one', 'method_two'), ResourceThree)
reg.get('/some/additional/path/', 'method_one')
-> ResourceThree
reg.get('/some/additional/path/', 'method_two')
-> ResourceThree
reg.register(('/some/new/path/one/', '/some/new/path/two/',
('/some/other/new/path', 'method'),
('/some/other/additional/path/, ('method_one', 'method_two')), ResourceFour))
reg.get('/some/other/new/path/', 'method')
-> ResourceFour
"""
if not args or len(args) == 1:
raise TypeError('register needs at least a url and Resource.')
elif len(args) not in (2, 3):
raise TypeError('register takes at most 3 arguments ({} given).'.format(len(args)))
if len(args) == 3: # url, method (iterable), and Resource
url_iterable = (args[:2],)
resource = args[2]
else:
urls, resource = args
if not is_list_or_tuple(urls):
url_iterable = [(urls, not_provided)]
else:
url_iterable = urls
url_iterable = self._generate_url_iterable(url_iterable)
for url_pattern, method_pattern in url_iterable:
if url_pattern in self.store and method_pattern in self.store[url_pattern]:
if method_pattern.pattern == not_provided:
exc_msg = '"{0.pattern}" already has methodless registration.'.format(url_pattern)
else:
exc_msg = '"{0.pattern}" already has registered method "{1.pattern}"'.format(url_pattern, method_pattern)
raise TypeError(exc_msg)
self.store[url_pattern][method_pattern] = resource
def setdefault(self, *args):
"""Establishes a default return value for get() by optional method (iterable).
reg.setdefault(ResourceOne)
reg.get('/some/unregistered/path')
-> ResourceOne
reg.setdefault('method', ResourceTwo)
reg.get('/some/registered/methodless/path/', 'method')
-> ResourceTwo
reg.setdefault(('method_one', 'method_two'), ResourceThree)
reg.get('/some/unregistered/path', 'method_two')
-> ResourceThree
reg.setdefault('supports.*regex', ResourceFour)
reg.get('supports123regex')
-> ResourceFour
"""
if not args:
raise TypeError('setdefault needs at least a Resource.')
if len(args) == 1: # all methods
self.default[re.compile('.*')] = args[0]
elif len(args) == 2:
if is_list_or_tuple(args[0]):
methods = args[0]
else:
methods = (args[0],)
for method in methods:
method_pattern = re.compile(method)
self.default[method_pattern] = args[1]
else:
raise TypeError('setdefault takes at most 2 arguments ({} given).'.format(len(args)))
def METHOD_NAME(self, url, method=not_provided):
"""Returns a single resource by previously registered path and optional method where
1. If a registration was methodless and a method is provided to get() the return value will be
None or, if applicable, a registry default (see setdefault()).
2. If a registration included a method (excluding the method wildcard '.*') and no method is provided to get()
the return value will be None or, if applicable, a registry default.
reg.register('/some/path/', ResourceOne)
reg.get('/some/path/')
-> ResourceOne
reg.get('/some/path/', 'method')
-> None
reg.register('/some/other/path/', 'method', ResourceTwo)
reg.get('/some/other/path/', 'method')
-> ResourceTwo
reg.get('/some/other/path')
-> None
reg.register('/some/additional/path/', '.*', ResourceThree)
reg.get('/some/additional/path/', 'method')
-> ResourceThree
reg.get('/some/additional/path/')
-> ResourceThree
"""
registered_type = None
default_methods = list(self.default)
# Make sure dot character evaluated last
default_methods.sort(key=lambda x: x.pattern == '.*')
for method_key in default_methods:
if method_key.match(method):
registered_type = self.default[method_key]
break
for re_key in self.store:
if re_key.match(url):
keys = list(self.store[re_key])
keys.sort(key=lambda x: x.pattern == '.*')
for method_key in keys:
if method_key.match(method):
registered_type = self.store[re_key][method_key]
break
log.debug('Retrieved {} by url: {}'.format(registered_type, url))
return registered_type |
201 | logtx | #!/usr/bin/env python
#
# Copyright (c) 2017, Massachusetts Institute of Technology All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import transport
import array
import os
# import pexpect
debug = 0
if os.getenv("LOGCMD", "NO") == "YES":
logcmd = 1
else:
logcmd = 0
def lprint(s):
if logcmd:
print(s)
def dprint(s):
if debug:
print(s)
class Connection:
def __init__(self, _p):
self.p = _p
class DT100(transport.Transport):
'connects to remote dt100 server, holds open connections and handles transactions'
def METHOD_NAME(self, s):
global logcmd
if logcmd > 0:
print("%s => \"%s\"" % (self.host, s))
def logrx(self, s):
global logcmd
if logcmd > 0:
print("%s <= \"%s\"" % (self.host, s))
def _connect(self):
import pexpect
hp = self.host.split(":")
if len(hp) == 2:
# it's a tunnel ...
target = hp[0] + ' ' + hp[1]
else:
target = self.host + ' ' + '53504'
return Connection(pexpect.spawn('nc ' + target))
def connectMaster(self):
dprint("connectMaster( " + self.host + " )")
self.acq = self._connect()
self.acq.p.expect('MasterInterpreter')
self.acq.p.sendline('dt100 open master 1')
i = self.acq.p.expect("DT100:\r", timeout=60)
if i == 0:
dprint("OK")
else:
print("Timeout")
def connectShell(self):
dprint("connectShell( " + self.host + " )")
self.sh = self._connect()
self.sh.p.expect('MasterInterpreter')
self.sh.p.sendline('dt100 open shell 1')
i = self.sh.p.expect("DT100:\r", timeout=60)
if i == 0:
dprint("OK")
else:
print("Timeout")
def connectStatemon(self):
import pexpect
hp = self.host.split(":")
if len(hp) == 2:
# it's a tunnel ...
port = int(hp[1]) + 1
target = hp[0] + ' ' + str(port)
else:
target = self.host + ' ' + '53535'
dprint("connectStatemon(" + target)
self.statemon = pexpect.spawn('nc ' + target)
self.statemon.first_time = 1
self.statemon.arm_time = self.statemon.stop_time = 0
#self.statemon = pexpect.spawn('nc ' + self.host + ' ' + '53535')
def connectChannel(self, channel):
channel_dev = "/dev/acq32/acq32.1.%02d" % channel
dprint("connectChannel( " + self.host + " " + channel_dev + " )")
ch = self._connect()
ch.p.expect('MasterInterpreter')
dprint("sendline dt100 open data1 " + channel_dev)
ch.p.sendline('dt100 open data1 ' + channel_dev)
dprint("expect:")
i = ch.p.expect("DT100:\r", timeout=60)
if i == 0:
dprint("OK")
else:
print("Timeout")
return ch
def acqcmd(self, command):
tx = "acqcmd " + command
self.METHOD_NAME(tx)
self.acq.p.sendline(tx)
self.acq.m = re.compile('ACQ32:(.*)\r')
i = self.acq.p.expect(self.acq.m, timeout=60)
if i == 0:
self.logrx(self.acq.p.match.group(0))
return self.acq.p.match.group(1)
else:
print("Timeout")
return 0
def acq2sh(self, command):
self.METHOD_NAME(command)
self.sh.p.sendline(command)
self.sh.m = re.compile('(.*)\r\nEOF(.*)\r\n')
i = self.sh.p.expect(self.sh.m, timeout=60)
if i == 0:
dprint("OK")
return self.sh.p.match.group(1)
else:
print("Timeout")
return 0
def waitState(self, state):
import pexpect
regstr = '([0-9\.]*) [0-9] ST_(.*)\r\n'
dprint("waitState %s" % regstr)
wantex = re.compile(regstr)
while self.statemon.expect([wantex, pexpect.TIMEOUT], timeout=60) == 0:
dprint(self.statemon.after)
if self.statemon.match.group(2) == "ARM":
self.statemon.arm_time = (float)(self.statemon.match.group(1))
self.statemon.stop_time = 0
elif self.statemon.match.group(2) == "STOP":
self.statemon.stop_time = (float)(self.statemon.match.group(1))
if self.statemon.match.group(2) == state:
if state == "STOP" and self.statemon.first_time == 1:
dprint("pass first time")
pass
else:
break
self.statemon.first_time = 0
# print(self.statemon.match.group(0)
# print("match %s" % (self.statemon.match.group(1))
active_time = 0
if self.statemon.arm_time != 0 and self.statemon.stop_time != 0:
active_time = self.statemon.stop_time - self.statemon.arm_time
self.statemon.arm_time = self.statemon.stop_time = 0
dprint("Active time: %f" % active_time)
return [self.statemon.match.group(2), active_time]
def readChannel(self, channel, nsamples, start=0, stride=1, format='h'):
ch = self.connectChannel(channel)
read_spec = "dt100 read %d %d %d" % (start, start+nsamples, stride)
self.METHOD_NAME("sendline:" + read_spec)
ch.p.sendline(read_spec)
read_rep = re.compile('DT100:([0-9]*) bytes\r\n')
rc = ch.p.expect(read_rep, timeout=60)
if rc == 0:
nbytes = int(ch.p.match.group(1))
data = array.array(format)
data.fromstring(ch.p.read(nbytes))
dprint(data)
return data
else:
dprint("ERROR")
return "ERROR"
def __init__(self, _host):
'create a transport host is a DNS name, or \'.D\' in A.B.C.D where $SUBNET=A.B.C'
dprint("Dt100__init__ host:" + _host)
if _host.startswith('.'):
self.host = str(os.getenv("SUBNET")) + _host
else:
self.host = _host
if os.getenv("CONNECT", "YES") == "YES":
self.connectMaster()
self.connectShell()
self.connectStatemon() |
202 | derive time | from matplotlib import rcParams
import os
rcParams.update({'figure.autolayout': True})
KINDS = ["bar", "hist", "line"]
METRICS = {"bandwidth": "bandwidth(GiB/s)",
"FLOPS": "GFLOPS",
"speedup": "time(s)/time(s)",
"throughput": "throughput(GProblemsize/s)",
"time/rep": "time/rep(s)",
}
def _derive_bandwidth(thicket, header):
return thicket.dataframe[header, "Bytes/Rep"] / METHOD_NAME(thicket, header) / 10**9
def _derive_flops(thicket, header):
return thicket.dataframe[header, "Flops/Rep"] / METHOD_NAME(thicket, header) / 10**9
def _derive_speedup(thicket, header_list):
return thicket.dataframe[header_list[0], "Total time"] / thicket.dataframe[header_list[1], "Total time"]
def _derive_throughput(thicket, header):
return thicket.dataframe[header, "ProblemSize"] / METHOD_NAME(thicket, header) / 10**9
def METHOD_NAME(thicket, header):
return thicket.dataframe[header, "Total time"] / thicket.dataframe[header, "Reps"]
def _graph_bar(df, metric, prefix):
num_xticks = len(df)
plt = df.plot(kind="bar",
title=f"{METRICS[metric]}",
ylabel=METRICS[metric],
grid=True,
figsize=(max(num_xticks*0.5, 4), 6,),
)
plt.figure.savefig(f"{prefix}/bar_{metric}.png")
def _graph_hist(df, metric, prefix):
num_xticks = len(df)
plt = df.plot(kind="hist",
title=f"{METRICS[metric]}",
xlabel=METRICS[metric],
grid=True,
figsize=(max(num_xticks*0.5, 4), 6,),
subplots=True,
)
plt[0].figure.savefig(f"{prefix}/hist_{metric}.png")
def _graph_line(df, metric, prefix, name):
plt = df.plot(kind="line",
marker='o',
title=f"{name}",
ylabel=METRICS[metric],
logx=True,
logy=True,
grid=True,
)
plt.figure.savefig(f"{prefix}/{name}.png")
def plot(thicket, kind=None, metric=None, prefix=None):
"""Prepares dataframe for plotting and calls appropriate plotting function
Arguments:
thicket (Thicket): Thicket object
kind (str): Type of plot to make
metric (str): Metric to plot
prefix (str): Prefix for output file
Returns:
df (DataFrame): Dataframe used for plotting
"""
if kind is None:
raise ValueError(f"kind must be specified from: {KINDS}")
if metric is None:
raise ValueError(f"metric must be specified from: {list(METRICS.keys())}")
func = None
if metric == "bandwidth":
func = _derive_bandwidth
if prefix is None:
prefix = "graphs/graph_bandwidth"
elif metric == "FLOPS":
func = _derive_flops
if prefix is None:
prefix = "graphs/graph_flops"
elif metric == "speedup":
func = _derive_speedup
if prefix is None:
prefix = "graphs"
elif metric == "throughput":
func = _derive_throughput
if prefix is None:
prefix = "graphs/graph_throughput"
elif metric == "time/rep":
func = METHOD_NAME
if prefix is None:
prefix = "graphs/graph_time"
g_func = None
if kind == "bar":
g_func = _graph_bar
elif kind == "hist":
g_func = _graph_hist
elif kind == "line":
g_func = _graph_line
# Make dir
if not os.path.exists(prefix):
os.makedirs(prefix)
# Add calculated column to dataframe
header_list = [h for h in thicket.dataframe.columns.get_level_values(0).unique() if "name" not in h]
if metric == "speedup":
thicket.dataframe[f"{header_list[0]}/{header_list[1]}", metric] = func(thicket, header_list)
else:
for header in header_list:
thicket.dataframe[header, metric] = func(thicket, header)
# Make copy
df = thicket.dataframe.copy(deep=True)
if kind == "bar" or kind == "hist":
df.reset_index(inplace=True)
drop_cols = [col for col in df.columns if not "name" in col and not metric in col]
df.drop(columns=drop_cols, inplace=True)
df.set_index([("name", "")], inplace=True)
df.columns = df.columns.droplevel(1)
g_func(df, metric, prefix)
elif kind == "line":
# Plot for each node
for node in set(thicket.dataframe.index.get_level_values("node")):
df = thicket.dataframe.loc[node]
name = df[("name", "")].iloc[0]
drop_cols = [col for col in df.columns if col[1] != metric or df[col].isnull().values.all()]
df = df.drop(columns=drop_cols, axis=1)
df.columns = df.columns.droplevel(1)
g_func(df, metric, prefix, name)
return df
|
203 | test mask input shapes | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the mask module.
"""
import astropy.units as u
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_almost_equal
from regions.core.bounding_box import RegionBoundingBox
from regions.core.mask import RegionMask
from regions.core.pixcoord import PixCoord
from regions.shapes import CircleAnnulusPixelRegion, CirclePixelRegion
POSITIONS = [(-20, -20), (-20, 20), (20, -20), (60, 60)]
def METHOD_NAME():
with pytest.raises(ValueError):
mask_data = np.ones((10, 10))
bbox = RegionBoundingBox(5, 10, 5, 10)
RegionMask(mask_data, bbox)
def test_mask_array():
mask_data = np.ones((10, 10))
bbox = RegionBoundingBox(5, 15, 5, 15)
mask = RegionMask(mask_data, bbox)
data = np.array(mask)
assert_allclose(data, mask.data)
def test_mask_get_overlap_slices():
aper = CirclePixelRegion(PixCoord(5, 5), radius=10.)
mask = aper.to_mask()
slc = ((slice(0, 16, None), slice(0, 16, None)),
(slice(5, 21, None), slice(5, 21, None)))
assert mask.get_overlap_slices((25, 25)) == slc
def test_mask_cutout_shape():
mask_data = np.ones((10, 10))
bbox = RegionBoundingBox(5, 15, 5, 15)
mask = RegionMask(mask_data, bbox)
with pytest.raises(ValueError):
mask.cutout(np.arange(10))
with pytest.raises(ValueError):
mask.to_image((10,))
def test_mask_cutout_copy():
data = np.ones((50, 50))
aper = CirclePixelRegion(PixCoord(25, 25), radius=10.)
mask = aper.to_mask()
cutout = mask.cutout(data, copy=True)
data[25, 25] = 100.
assert cutout[10, 10] == 1.
# test quantity data
data2 = np.ones((50, 50)) * u.adu
cutout2 = mask.cutout(data2, copy=True)
assert cutout2.unit == data2.unit
data2[25, 25] = 100. * u.adu
assert cutout2[10, 10].value == 1.
@pytest.mark.parametrize('position', POSITIONS)
def test_mask_cutout_no_overlap(position):
data = np.ones((50, 50))
aper = CirclePixelRegion(PixCoord(position[0], position[1]), radius=10.)
mask = aper.to_mask()
cutout = mask.cutout(data)
assert cutout is None
weighted_data = mask.multiply(data)
assert weighted_data is None
image = mask.to_image(data.shape)
assert image is None
@pytest.mark.parametrize('position', POSITIONS)
def test_mask_cutout_partial_overlap(position):
data = np.ones((50, 50))
aper = CirclePixelRegion(PixCoord(position[0], position[1]), radius=30.)
mask = aper.to_mask()
cutout = mask.cutout(data)
assert cutout.shape == mask.shape
weighted_data = mask.multiply(data)
assert weighted_data.shape == mask.shape
image = mask.to_image(data.shape)
assert image.shape == data.shape
def test_mask_multiply():
radius = 10.
data = np.ones((50, 50))
region = CirclePixelRegion(PixCoord(25, 25), radius=radius)
mask = region.to_mask(mode='exact')
data_weighted = mask.multiply(data)
assert_almost_equal(np.sum(data_weighted), np.pi * radius**2)
# test that multiply() returns a copy
data[25, 25] = 100.
assert data_weighted[10, 10] == 1.
def test_mask_multiply_quantity():
radius = 10.
data = np.ones((50, 50)) * u.adu
region = CirclePixelRegion(PixCoord(25, 25), radius=radius)
mask = region.to_mask(mode='exact')
data_weighted = mask.multiply(data)
assert data_weighted.unit == u.adu
assert_almost_equal(np.sum(data_weighted.value), np.pi * radius**2)
# test that multiply() returns a copy
data[25, 25] = 100. * u.adu
assert data_weighted[10, 10].value == 1.
@pytest.mark.parametrize('value', (np.nan, np.inf))
def test_mask_nonfinite_fill_value(value):
region = CircleAnnulusPixelRegion(PixCoord(0, 0), 10, 20)
data = np.ones((101, 101)).astype(int)
cutout = region.to_mask().cutout(data, fill_value=value)
assert ~np.isfinite(cutout[0, 0])
def test_mask_multiply_fill_value():
region = CircleAnnulusPixelRegion(PixCoord(0, 0), 10, 20)
data = np.ones((101, 101)).astype(int)
cutout = region.to_mask().multiply(data, fill_value=np.nan)
xypos = ((20, 20), (5, 5), (5, 35), (35, 5), (35, 35))
for x, y in xypos:
assert np.isnan(cutout[y, x])
def test_mask_nonfinite_in_bbox():
"""
Regression test that non-finite data values outside of the mask but
within the bounding box are set to zero.
"""
data = np.ones((101, 101))
data[33, 33] = np.nan
data[67, 67] = np.inf
data[33, 67] = -np.inf
data[22, 22] = np.nan
data[22, 23] = np.inf
radius = 20.
reg1 = CirclePixelRegion(PixCoord(50, 50), radius)
reg2 = CirclePixelRegion(PixCoord(5, 5), radius)
wdata1 = reg1.to_mask(mode='exact').multiply(data)
assert_allclose(np.sum(wdata1), np.pi * radius**2)
wdata2 = reg2.to_mask(mode='exact').multiply(data)
assert_allclose(np.sum(wdata2), 561.6040111923013)
@pytest.mark.parametrize('x, y, exp_shape',
[(0, 0, 245), (50, 50, 940), (100, 100, 245)])
def test_mask_get_values(x, y, exp_shape):
aper = CircleAnnulusPixelRegion(PixCoord(x, y), inner_radius=10,
outer_radius=20)
data = np.ones((101, 101))
values = aper.to_mask(mode='center').get_values(data)
assert values.shape == (exp_shape,)
assert_allclose(np.sum(values), exp_shape)
def test_mask_get_values_no_overlap():
aper = CirclePixelRegion(PixCoord(-100, -100), radius=3)
data = np.ones((51, 51))
values = aper.to_mask().get_values(data)
assert values.shape == (0,)
def test_mask_get_values_mask():
aper = CirclePixelRegion(PixCoord(24.5, 24.5), radius=10.)
data = np.ones((51, 51))
mask = aper.to_mask(mode='exact')
with pytest.raises(ValueError):
mask.get_values(data, mask=np.ones(3))
arr = mask.get_values(data, mask=None)
assert_allclose(np.sum(arr), 100. * np.pi)
data_mask = np.zeros(data.shape, dtype=bool)
data_mask[25:] = True
arr2 = mask.get_values(data, mask=data_mask)
assert_allclose(np.sum(arr2), 100. * np.pi / 2.) |
204 | construct solidity single data | import os
from enum import Enum
import json
OUTPUT_DIR = os.getenv("OUTPUT_DIR")
DATASET_DIR = os.getenv("DATASET")
if DATASET_DIR is None:
print("Path to the smart-contract-fiesta dataset must be specified (env `DATASET=`)")
exit(1)
ROOT_CONTRACTS_DIR = os.path.join(DATASET_DIR, "organized_contracts")
INDEX_FILE = os.path.join(DATASET_DIR, "address_bytecodehash_index")
class VerificationMethod(Enum):
SOLIDITY_SINGLE = 1
SOLIDITY_MULTIPLE = 2
SOLIDITY_STANDARD = 3
VYPER_SINGLE = 4
def to_string(self):
if self == VerificationMethod.SOLIDITY_SINGLE:
return "solidity_single"
elif self == VerificationMethod.SOLIDITY_MULTIPLE:
return "solidity_multiple"
elif self == VerificationMethod.SOLIDITY_STANDARD:
return "solidity_standard"
elif self == VerificationMethod.VYPER_SINGLE:
return "vyper_single"
else:
assert(False, "unknown verification method")
def get_bytecode_hashes():
bytecode_hashes = set()
prefixes = os.listdir(ROOT_CONTRACTS_DIR)
for prefix in prefixes:
path = os.path.join(ROOT_CONTRACTS_DIR, prefix)
for subdir in os.listdir(path):
bytecode_hashes.add(subdir)
return bytecode_hashes
def construct_verification_data(contract_address: str, bytecode_hash: 'str') -> (VerificationMethod, dict):
directory = build_dir(bytecode_hash)
verification_method = calculate_verification_method(directory)
if verification_method == VerificationMethod.SOLIDITY_SINGLE:
data = METHOD_NAME(contract_address, directory)
elif verification_method == VerificationMethod.SOLIDITY_MULTIPLE:
data = construct_solidity_multiple_data(contract_address, directory)
elif verification_method == VerificationMethod.SOLIDITY_STANDARD:
data = construct_solidity_standard_data(contract_address, directory)
elif verification_method == VerificationMethod.VYPER_SINGLE:
data = construct_vyper_single_data(contract_address, directory)
else:
assert(False, "unknown verification method")
return verification_method, data
def build_dir(bytecode_hash: 'str') -> str:
return os.path.join(ROOT_CONTRACTS_DIR, bytecode_hash[0:2], bytecode_hash)
def calculate_verification_method(directory: bytes) -> VerificationMethod:
files = os.listdir(directory)
if "main.vy" in files:
return VerificationMethod.VYPER_SINGLE
elif "contract.json" in files:
return VerificationMethod.SOLIDITY_STANDARD
elif len(files) > 2:
return VerificationMethod.SOLIDITY_MULTIPLE
else:
return VerificationMethod.SOLIDITY_SINGLE
def METHOD_NAME(contract_address: str, directory: bytes) -> dict:
with open(os.path.join(directory, "metadata.json"), 'r') as metadata_file:
metadata = json.load(metadata_file)
with open(os.path.join(directory, "main.sol"), 'r') as source_file:
source = source_file.read()
data = dict()
data["contract_address"] = contract_address
data["contract_name"] = metadata["ContractName"]
data["compiler_version"] = metadata["CompilerVersion"]
data["optimizations"] = metadata["OptimizationUsed"]
data["optimization_runs"] = metadata["Runs"]
data["source"] = source
return data
def construct_solidity_multiple_data(contract_address: str, directory: bytes) -> dict:
with open(os.path.join(directory, "metadata.json"), 'r') as metadata_file:
metadata = json.load(metadata_file)
sources = dict()
for source_name in os.listdir(directory):
if source_name == "metadata.json": continue
with open(os.path.join(directory, source_name), 'r') as source_file:
source = source_file.read()
sources[source_name] = source
data = dict()
data["contract_address"] = contract_address
data["contract_name"] = metadata["ContractName"]
data["compiler_version"] = metadata["CompilerVersion"]
data["optimizations"] = metadata["OptimizationUsed"]
data["optimization_runs"] = metadata["Runs"]
data["sources"] = sources
return data
def construct_solidity_standard_data(contract_address: str, directory: bytes) -> dict:
with open(os.path.join(directory, "metadata.json"), 'r') as metadata_file:
metadata = json.load(metadata_file)
with open(os.path.join(directory, "contract.json"), 'r') as standard_json_file:
standard_json = json.load(standard_json_file)
data = dict()
data["contract_address"] = contract_address
data["contract_name"] = metadata["ContractName"]
data["compiler_version"] = metadata["CompilerVersion"]
data["standard_json"] = standard_json
return data
def construct_vyper_single_data(contract_address: str, directory: bytes) -> dict:
with open(os.path.join(directory, "metadata.json"), 'r') as metadata_file:
metadata = json.load(metadata_file)
with open(os.path.join(directory, "main.vy"), 'r') as source_file:
source = source_file.read()
data = dict()
data["contract_address"] = contract_address
data["contract_name"] = metadata["ContractName"]
data["compiler_version"] = metadata["CompilerVersion"]
data["optimizations"] = metadata["OptimizationUsed"]
data["optimization_runs"] = metadata["Runs"]
data["source"] = source
return data
def main():
print("Prepare the fiesta dataset. Contracts left:")
bytecode_hashes = get_bytecode_hashes()
print(len(bytecode_hashes))
output_dir = OUTPUT_DIR if OUTPUT_DIR is not None else "."
results_dir = os.path.join(output_dir, "dataset")
with open(INDEX_FILE, 'r') as index_file:
for line in index_file:
line = line.strip().split(':')
contract_address, bytecode_hash = line[0], line[1]
if bytecode_hash in bytecode_hashes:
(verification_method, data) = construct_verification_data(contract_address, bytecode_hash)
filename = os.path.join(results_dir, verification_method.to_string(), contract_address)
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, 'w') as file:
file.write(json.dumps(data))
bytecode_hashes.remove(bytecode_hash)
if len(bytecode_hashes) % 10000 == 0:
print(len(bytecode_hashes))
if __name__ == '__main__':
main( |
205 | add dep doc | """Class for recording and reporting deprecations"""
from __future__ import annotations
import functools
import re
import typing as ty
import warnings
if ty.TYPE_CHECKING: # pragma: no cover
T = ty.TypeVar('T')
P = ty.ParamSpec('P')
_LEADING_WHITE = re.compile(r'^(\s*)')
TESTSETUP = """
.. testsetup::
>>> import pytest
>>> import warnings
>>> _suppress_warnings = pytest.deprecated_call()
>>> _ = _suppress_warnings.__enter__()
"""
TESTCLEANUP = """
.. testcleanup::
>>> warnings.warn("Avoid error if no doctests to run...", DeprecationWarning)
>>> _ = _suppress_warnings.__exit__(None, None, None)
"""
class ExpiredDeprecationError(RuntimeError):
"""Error for expired deprecation
Error raised when a called function or method has passed out of its
deprecation period.
"""
pass
def _ensure_cr(text: str) -> str:
"""Remove trailing whitespace and add carriage return
Ensures that `text` always ends with a carriage return
"""
return text.rstrip() + '\n'
def METHOD_NAME(
old_doc: str,
dep_doc: str,
setup: str = '',
cleanup: str = '',
) -> str:
"""Add deprecation message `dep_doc` to docstring in `old_doc`
Parameters
----------
old_doc : str
Docstring from some object.
dep_doc : str
Deprecation warning to add to top of docstring, after initial line.
setup : str, optional
Doctest setup text
cleanup : str, optional
Doctest teardown text
Returns
-------
new_doc : str
`old_doc` with `dep_doc` inserted after any first lines of docstring.
"""
dep_doc = _ensure_cr(dep_doc)
if not old_doc:
return dep_doc
old_doc = _ensure_cr(old_doc)
old_lines = old_doc.splitlines()
new_lines = []
for line_no, line in enumerate(old_lines):
if line.strip():
new_lines.append(line)
else:
break
next_line = line_no + 1
if next_line >= len(old_lines):
# nothing following first paragraph, just append message
return old_doc + '\n' + dep_doc
leading_white = _LEADING_WHITE.match(old_lines[next_line])
assert leading_white is not None # Type narrowing, since this always matches
indent = leading_white.group()
setup_lines = [indent + L for L in setup.splitlines()]
dep_lines = [indent + L for L in [''] + dep_doc.splitlines() + ['']]
cleanup_lines = [indent + L for L in cleanup.splitlines()]
return '\n'.join(
new_lines + dep_lines + setup_lines + old_lines[next_line:] + cleanup_lines + ['']
)
class Deprecator:
"""Class to make decorator marking function or method as deprecated
The decorated function / method will:
* Raise the given `warning_class` warning when the function / method gets
called, up to (and including) version `until` (if specified);
* Raise the given `error_class` error when the function / method gets
called, when the package version is greater than version `until` (if
specified).
Parameters
----------
version_comparator : callable
Callable accepting string as argument, and return 1 if string
represents a higher version than encoded in the `version_comparator`, 0
if the version is equal, and -1 if the version is lower. For example,
the `version_comparator` may compare the input version string to the
current package version string.
warn_class : class, optional
Class of warning to generate for deprecation.
error_class : class, optional
Class of error to generate when `version_comparator` returns 1 for a
given argument of ``until`` in the ``__call__`` method (see below).
"""
def __init__(
self,
version_comparator: ty.Callable[[str], int],
warn_class: type[Warning] = DeprecationWarning,
error_class: type[Exception] = ExpiredDeprecationError,
) -> None:
self.version_comparator = version_comparator
self.warn_class = warn_class
self.error_class = error_class
def is_bad_version(self, version_str: str) -> bool:
"""Return True if `version_str` is too high
Tests `version_str` with ``self.version_comparator``
Parameters
----------
version_str : str
String giving version to test
Returns
-------
is_bad : bool
True if `version_str` is for version below that expected by
``self.version_comparator``, False otherwise.
"""
return self.version_comparator(version_str) == -1
def __call__(
self,
message: str,
since: str = '',
until: str = '',
warn_class: type[Warning] | None = None,
error_class: type[Exception] | None = None,
) -> ty.Callable[[ty.Callable[P, T]], ty.Callable[P, T]]:
"""Return decorator function function for deprecation warning / error
Parameters
----------
message : str
Message explaining deprecation, giving possible alternatives.
since : str, optional
Released version at which object was first deprecated.
until : str, optional
Last released version at which this function will still raise a
deprecation warning. Versions higher than this will raise an
error.
warn_class : None or class, optional
Class of warning to generate for deprecation (overrides instance
default).
error_class : None or class, optional
Class of error to generate when `version_comparator` returns 1 for a
given argument of ``until`` (overrides class default).
Returns
-------
deprecator : func
Function returning a decorator.
"""
exception = error_class if error_class is not None else self.error_class
warning = warn_class if warn_class is not None else self.warn_class
messages = [message]
if (since, until) != ('', ''):
messages.append('')
if since:
messages.append('* deprecated from version: ' + since)
if until:
messages.append(
f"* {'Raises' if self.is_bad_version(until) else 'Will raise'} "
f'{exception} as of version: {until}'
)
message = '\n'.join(messages)
def deprecator(func: ty.Callable[P, T]) -> ty.Callable[P, T]:
@functools.wraps(func)
def deprecated_func(*args: P.args, **kwargs: P.kwargs) -> T:
if until and self.is_bad_version(until):
raise exception(message)
warnings.warn(message, warning, stacklevel=2)
return func(*args, **kwargs)
keep_doc = deprecated_func.__doc__
if keep_doc is None:
keep_doc = ''
setup = TESTSETUP
cleanup = TESTCLEANUP
# After expiration, remove all but the first paragraph.
# The details are no longer relevant, but any code will likely
# raise exceptions we don't need.
if keep_doc and until and self.is_bad_version(until):
lines = '\n'.join(line.rstrip() for line in keep_doc.splitlines())
keep_doc = lines.split('\n\n', 1)[0]
setup = ''
cleanup = ''
deprecated_func.__doc__ = METHOD_NAME(keep_doc, message, setup, cleanup)
return deprecated_func
return deprecator |
206 | configure step | ##
# Copyright 2013-2023 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for Scalasca v1.x, implemented as an easyblock
@author: Kenneth Hoste (Ghent University)
@author: Bernd Mohr (Juelich Supercomputing Centre)
"""
import os
from distutils.version import LooseVersion
import easybuild.tools.toolchain as toolchain
from easybuild.easyblocks.generic.configuremake import ConfigureMake
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.modules import get_software_root
class EB_Scalasca1(ConfigureMake):
"""Support for building and installing Scalasca v1.x."""
def check_readiness_step(self):
"""Make sure this easyblock is applicable to the Scalasca version being built."""
ver = LooseVersion(self.version)
if ver >= LooseVersion('2.0') or ver < LooseVersion('1.0'):
raise EasyBuildError("The %s easyblock should only be used for Scalasca v1.x; "
"for Scalasca v2.0 and more recent, try the EB_Score_minus_P easyblock.",
self.__class__.__name__)
super(EB_Scalasca1, self).check_readiness_step()
def METHOD_NAME(self, *args, **kwargs):
"""Configure Scalasca build, set configure options for compiler, MPI and dependencies."""
# compiler and MPI suite should always be specified -- MUCH quicker and SAFER than autodetect
# --compiler=(gnu|pgi|intel|path|ibm|sun|clang)
# --mpi=(mpich|mpich2|mpich3|lam|openmpi|intel|intel2|hp|scali|mpibull2|bullxmpi|sun|ibmpoe|intelpoe)
comp_opts = {
toolchain.GCC: 'gnu',
toolchain.INTELCOMP: 'intel',
}
comp_fam = self.toolchain.comp_family()
if comp_fam in comp_opts:
self.cfg.update('configopts', "--compiler=%s" % comp_opts[comp_fam])
else:
raise EasyBuildError("Compiler family %s not supported yet (only: %s)",
comp_fam, ', '.join(comp_opts.keys()))
mpi_opts = {
toolchain.INTELMPI: 'intel2', # intel: Intel MPI v1.x (ancient)
toolchain.OPENMPI: 'openmpi',
toolchain.MPICH: 'mpich3', # In EB terms, MPICH means MPICH 3.x; MPICH 1.x is ancient and unsupported
toolchain.MPICH2: 'mpich2',
}
mpi_fam = self.toolchain.mpi_family()
if mpi_fam in mpi_opts:
self.cfg.update('configopts', "--mpi=%s --enable-all-mpi-wrappers" % mpi_opts[mpi_fam])
else:
raise EasyBuildError("MPI family %s not supported yet (only: %s)", mpi_fam, ', '.join(mpi_opts.keys()))
# auto-detection for dependencies mostly works fine, but hard specify paths anyway to have full control
deps = {
'binutils': ['--with-binutils=%s'],
'OTF': ['--with-otf=%s'],
'OPARI2': ['--with-opari2=%s'],
'PAPI': ['--with-papi=%s'],
'PDT': ['--with-pdt=%s'],
}
for (dep_name, dep_opts) in deps.items():
dep_root = get_software_root(dep_name)
if dep_root:
for dep_opt in dep_opts:
self.cfg.update('configopts', dep_opt % dep_root)
if get_software_root('Cube'):
self.cfg.update('configopts', '--disable-gui')
super(EB_Scalasca1, self).METHOD_NAME(*args, **kwargs)
def build_step(self):
"""Build Scalasca using make, after stepping into the build dir."""
build_dir_found = False
try:
for entry in os.listdir(os.getcwd()):
if entry.startswith('build-linux-') and os.path.isdir(entry):
os.chdir(entry)
build_dir_found = True
self.log.info("Stepped into build dir %s" % entry)
except OSError as err:
raise EasyBuildError("Failed to step into build dir before starting actual build: %s", err)
if not build_dir_found:
raise EasyBuildError("Could not find build dir to step into.")
super(EB_Scalasca1, self).build_step() |
207 | create ws server | import http.server
import socketserver
import threading
import os
import asyncio
import websockets
import sys
import json
import tempfile
class Server:
def __init__(self,port=None,tempname=None):
self.connections={}
self.wsport=9000
self.httpport=None
if (port != None):
self.wsport=port;
self.lastIndex=0
self.httpd=None
print('__ Created temp directory:',tempname);
self.tempdir=tempname;
self.connections={}
async def listen(self,websocket):
async for message in websocket:
try:
b=json.loads(message);
try:
command=b['command'];
except:
command='';
try:
index=b['index'];
except:
index=0
print('\n___\n___\n___ server received command=',b,'index=',index)
if (command=='exit'):
self.httpd.server_close();
try:
sys.exit(0);
except:
print(sys.exc_info())
if (command=='getInfo'):
out = {
'index' : self.lastIndex,
'temp' : self.tempdir,
'url' : "http://localhost:"+str(self.httpport)+'/',
'path' : os.path.basename(self.tempdir)
}
self.lastIndex=self.lastIndex+1;
print('___ Responding',out,'\n___');
await websocket.send(json.dumps(out));
if (command == 'viewerReady'):
self.connections[index]=websocket;
try:
a=self.connections[index];
except:
self.connections[index]=None;
if (self.connections[index]!=None):
if (command == 'done'):
coords = [ 50+index,50+index*2,20+index*3];
await self.setCoordinates(index,coords,0)
if (command == 'forward' and self.connections[index]!=None):
payload=json.dumps(b['payload']);
try:
await self.connections[index].send(payload)
except:
print('Failed here')
e = sys.exc_info()[0]
print(sys.exc_info())
else:
print('___ viewer',index,'not connected yet')
except websockets.exceptions.ConnectionClosed:
print("Client disconnected. Do cleanup")
except:
e = sys.exc_info()[0]
print(sys.exc_info())
def print(self):
print('---- Connections=',self.connections);
async def METHOD_NAME(self):
async with websockets.serve(self.listen, "localhost", self.wsport):
print('---- Websocket server started on port',self.wsport);
await asyncio.Future() # run forever
def createHTTPServer(self):
if (self.httpport==None):
self.httpport=self.wsport+1;
Handler = http.server.SimpleHTTPRequestHandler
self.httpd=http.server.ThreadingHTTPServer(("127.0.0.1", self.httpport), Handler);
print("---- HTTP Server started at port",self.httpport,' root=',os.getcwd())
server_thread = threading.Thread(target=self.httpd.serve_forever, daemon=True)
server_thread.start()
async def setImage(self,url,index=0,viewer=0,overlay=False):
port="8080";
if (self.httpport!=None):
port=str(self.httpport)
a= {
"command" : "load",
"filename" : url,
"viewer" : viewer,
"overlay" : overlay
};
print(a,index);
await self.connections[index].send(json.dumps(a));
async def setCoordinates(self,index,coords,viewer=0):
c= {
"command" : "crosshairs",
"coords" : [ 20+index*10,20+index*20,20+index*30 ],
"viewer" : viewer
};
await self.connections[index].send(json.dumps(c));
# Commandline Version
def main(port=None,dir=None):
print('__ switching to directory',dir);
os.chdir(dir)
with tempfile.TemporaryDirectory(dir=dir) as tempdname:
print('.... Starting main function',tempdname)
v=Server(port,tempdname)
v.createHTTPServer()
asyncio.run(v.METHOD_NAME())
def start(port=None,dir=None):
import subprocess;
my_path=os.path.dirname(os.path.realpath(__file__));
subprocess.Popen(["python",__file__,str(port), dir]);
if __name__ == '__main__':
a=sys.argv[1];
if (a==None):
a="9000"
main(int(sys.argv[1]),sys.argv[2])
|
208 | test info | # -*- coding: utf-8 -*-
# Copyright (c) Ansible project
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible_collections.community.general.plugins.modules.ipbase_info import IpbaseInfo
from ansible_collections.community.general.tests.unit.compat import unittest
from ansible_collections.community.general.tests.unit.compat.mock import Mock
IPBASE_DATA = {
"response": b"""
{
"data": {
"ip": "1.1.1.1",
"hostname": "one.one.one.one",
"type": "v4",
"range_type": {
"type": "PUBLIC",
"description": "Public address"
},
"connection": {
"asn": 13335,
"organization": "Cloudflare, Inc.",
"isp": "APNIC Research and Development",
"range": "1.1.1.1/32"
},
"location": {
"geonames_id": 5332870,
"latitude": 34.053611755371094,
"longitude": -118.24549865722656,
"zip": "90012",
"continent": {
"code": "NA",
"name": "North America",
"name_translated": "North America"
},
"country": {
"alpha2": "US",
"alpha3": "USA",
"calling_codes": [
"+1"
],
"currencies": [
{
"symbol": "$",
"name": "US Dollar",
"symbol_native": "$",
"decimal_digits": 2,
"rounding": 0,
"code": "USD",
"name_plural": "US dollars"
}
],
"emoji": "...",
"ioc": "USA",
"languages": [
{
"name": "English",
"name_native": "English"
}
],
"name": "United States",
"name_translated": "United States",
"timezones": [
"America/New_York",
"America/Detroit",
"America/Kentucky/Louisville",
"America/Kentucky/Monticello",
"America/Indiana/Indianapolis",
"America/Indiana/Vincennes",
"America/Indiana/Winamac",
"America/Indiana/Marengo",
"America/Indiana/Petersburg",
"America/Indiana/Vevay",
"America/Chicago",
"America/Indiana/Tell_City",
"America/Indiana/Knox",
"America/Menominee",
"America/North_Dakota/Center",
"America/North_Dakota/New_Salem",
"America/North_Dakota/Beulah",
"America/Denver",
"America/Boise",
"America/Phoenix",
"America/Los_Angeles",
"America/Anchorage",
"America/Juneau",
"America/Sitka",
"America/Metlakatla",
"America/Yakutat",
"America/Nome",
"America/Adak",
"Pacific/Honolulu"
],
"is_in_european_union": false,
"fips": "US",
"geonames_id": 6252001,
"hasc_id": "US",
"wikidata_id": "Q30"
},
"city": {
"fips": "644000",
"alpha2": null,
"geonames_id": 5368753,
"hasc_id": null,
"wikidata_id": "Q65",
"name": "Los Angeles",
"name_translated": "Los Angeles"
},
"region": {
"fips": "US06",
"alpha2": "US-CA",
"geonames_id": 5332921,
"hasc_id": "US.CA",
"wikidata_id": "Q99",
"name": "California",
"name_translated": "California"
}
},
"tlds": [
".us"
],
"timezone": {
"id": "America/Los_Angeles",
"current_time": "2023-05-04T04:30:28-07:00",
"code": "PDT",
"is_daylight_saving": true,
"gmt_offset": -25200
},
"security": {
"is_anonymous": false,
"is_datacenter": false,
"is_vpn": false,
"is_bot": false,
"is_abuser": true,
"is_known_attacker": true,
"is_proxy": false,
"is_spam": false,
"is_tor": false,
"is_icloud_relay": false,
"threat_score": 100
},
"domains": {
"count": 10943,
"domains": [
"eliwise.academy",
"accountingprose.academy",
"pistola.academy",
"1and1-test-ntlds-fr.accountant",
"omnergy.africa"
]
}
}
}
"""
}
class TestIpbase(unittest.TestCase):
def METHOD_NAME(self,):
"test the json data extraction"
params = {
"ip": "1.1.1.1",
"apikey": "aaa",
"hostname": True,
"language": "de",
}
module = Mock()
module.params = params
data = json.loads(IPBASE_DATA['response'].decode("utf-8"))
IpbaseInfo._get_url_data = Mock()
IpbaseInfo._get_url_data.return_value = data
jenkins_plugin = IpbaseInfo(module)
json_data = jenkins_plugin.info()
self.maxDiff = None
self.assertDictEqual(json_data, data) |
209 | mask replace | # -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2019-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Fetch and parse an event catalog from GWOSC.
"""
import numbers
from collections import OrderedDict
import numpy
from astropy import constants
from astropy import units
from gwosc.api import DEFAULT_URL as DEFAULT_GWOSC_URL
from gwosc.api import fetch_catalog_json
from .. import EventTable
from .utils import (
read_with_columns,
read_with_selection,
)
#: suffix indicating a unit name
UNIT_SUFFIX = "_unit"
#: custom GWOSC unit mapping
UNITS = {
"M_sun X c^2": units.M_sun * constants.c ** 2,
}
#: set of values corresponding to 'missing' or 'null' data
MISSING_DATA = {
"NA",
}
#: values to fill missing data, based on dtype
_FILL_VALUE = OrderedDict([
(str, str()),
(bytes, bytes()),
(numbers.Integral, int()),
(numbers.Number, numpy.nan),
])
__author__ = 'Duncan Macleod <[email protected]>'
def _get_unit(colname, unitdict):
"""Return the unit (name) for a column, or `None` if a match isn't found.
The return types is either `astropy.units.Unit` or `str`, it doesn't
matter because setting `column.unit` will automatically convert things
into `astropy.units.Unit` for us.
"""
if colname.endswith(("_lower", "_upper")):
colname = colname.rsplit("_", 1)[0]
try:
rawunit = unitdict[colname]
except KeyError:
return None
return UNITS.get(rawunit, rawunit)
def METHOD_NAME(value, dtype):
"""Replace `value` with the default for the given `dtype`
If not default is set for the ``dtype``, just return the
value unchanged.
"""
for type_, replacement in _FILL_VALUE.items():
if issubclass(dtype, type_):
return replacement
return value
def _mask_column(col):
"""Find and replace missing data in a column
Returns the new data, and the mask as `list`
"""
# find masked indices
mask = [v in MISSING_DATA for v in col]
# find common dtype of unmasked values
dtype = numpy.array(x for i, x in enumerate(col) if not mask[i]).dtype.type
# replace the column with a new version that has the masked
# values replaced by a 'sensible' default for the relevant dtype
return (
[METHOD_NAME(x, dtype) if mask[i] else x for i, x in enumerate(col)],
mask,
)
@read_with_columns
@read_with_selection
def fetch_catalog(catalog, host=DEFAULT_GWOSC_URL):
catalog = fetch_catalog_json(catalog, host=host)
data = catalog["events"]
# get the list of all parameters
parameters = set(
key for event in data.values() for key in event
if not key.endswith(UNIT_SUFFIX)
)
# get the list of all units
unitlist = {}
for event in data.values():
dictpartial = {
k[:-len(UNIT_SUFFIX)]: v for k, v in event.items()
if k.endswith(UNIT_SUFFIX)
}
unitlist.update(dictpartial)
# unpack the catalogue data into a dict of columns
names = ["name"] + list(parameters)
cols = {n: [] for n in names}
for event, plist in data.items():
cols["name"].append(event)
for n in names[1:]:
cols[n].append(plist[n])
# rebuild the columns by replacing the masked values
mask = {}
for name, col in cols.items():
cols[name], mask[name] = _mask_column(col)
# convert to columns
tab = EventTable(
cols,
meta={
"catalog": catalog,
"host": host,
},
masked=True,
)
# add column metadata
for name in parameters:
tab[name].mask = mask[name]
tab[name].description = name
tab[name].unit = _get_unit(name, unitlist)
# add an index on the event name
tab.add_index('name')
return tab |
210 | gen sites | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
import json
import os
import random
from collections import namedtuple
random.seed(int(os.getenv("SEED"), 16))
from prjxray import util
from prjxray import verilog
from prjxray.lut_maker import LutMaker
from prjxray.db import Database
INT = "INT"
BIN = "BIN"
BOOL = "BOOL"
STR = "STR"
def METHOD_NAME(site):
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
already_used = list()
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
if gridinfo.tile_type not in [
"GTP_CHANNEL_0",
"GTP_CHANNEL_1",
"GTP_CHANNEL_2",
"GTP_CHANNEL_3",
"GTP_CHANNEL_0_MID_LEFT",
"GTP_CHANNEL_1_MID_LEFT",
"GTP_CHANNEL_2_MID_LEFT",
"GTP_CHANNEL_3_MID_LEFT",
"GTP_CHANNEL_0_MID_RIGHT",
"GTP_CHANNEL_1_MID_RIGHT",
"GTP_CHANNEL_2_MID_RIGHT",
"GTP_CHANNEL_3_MID_RIGHT",
] or gridinfo.tile_type in already_used:
continue
else:
tile_type = gridinfo.tile_type
already_used.append(tile_type)
for site_name, site_type in gridinfo.sites.items():
if site_type != site:
continue
if "RIGHT" in tile_type and "X0" in site_name:
continue
if "LEFT" in tile_type and "X1" in site_name:
continue
yield tile_name, tile_type, site_name, site_type
def main():
print(
'''
module top(
input wire in,
output wire out
);
assign out = in;
''')
luts = LutMaker()
primitives_list = list()
for tile_name, tile_type, site_name, site_type in METHOD_NAME(
"GTPE2_CHANNEL"):
params_list = list()
params_dict = dict()
params_dict["tile_type"] = tile_type
params = dict()
params['site'] = site_name
verilog_attr = ""
verilog_attr = "#("
fuz_dir = os.getenv("FUZDIR", None)
assert fuz_dir
with open(os.path.join(fuz_dir, "attrs.json"), "r") as attrs_file:
attrs = json.load(attrs_file)
in_use = bool(random.randint(0, 9))
params["IN_USE"] = in_use
if in_use:
for param, param_info in attrs.items():
param_type = param_info["type"]
param_values = param_info["values"]
param_digits = param_info["digits"]
if param_type == INT:
value = random.choice(param_values)
value_str = value
elif param_type == BIN:
value = random.randint(0, param_values[0])
value_str = "{digits}'b{value:0{digits}b}".format(
value=value, digits=param_digits)
elif param_type in [BOOL, STR]:
value = random.choice(param_values)
value_str = verilog.quote(value)
params[param] = value
verilog_attr += """
.{}({}),""".format(param, value_str)
verilog_ports = ""
for param in ["TXUSRCLK", "TXUSRCLK2", "TXPHDLYTSTCLK",
"SIGVALIDCLK", "RXUSRCLK", "RXUSRCLK2", "DRPCLK",
"DMONITORCLK", "CLKRSVD0", "CLKRSVD1"]:
is_inverted = random.randint(0, 1)
params[param] = is_inverted
verilog_attr += """
.IS_{}_INVERTED({}),""".format(param, is_inverted)
verilog_ports += """
.{}({}),""".format(param, luts.get_next_output_net())
verilog_attr = verilog_attr.rstrip(",")
verilog_attr += "\n)"
print("(* KEEP, DONT_TOUCH, LOC=\"{}\" *)".format(site_name))
print(
"""GTPE2_CHANNEL {attrs} {site} (
{ports}
);
""".format(
attrs=verilog_attr,
site=tile_type.lower(),
ports=verilog_ports.rstrip(",")))
params_list.append(params)
params_dict["params"] = params_list
primitives_list.append(params_dict)
for l in luts.create_wires_and_luts():
print(l)
print("endmodule")
with open('params.json', 'w') as f:
json.dump(primitives_list, f, indent=2)
if __name__ == '__main__':
main() |
211 | register pyinstrument | import os
import typing as t
import jinja2.ext
import logging
import sentry_sdk
from flask import (
Flask,
current_app,
redirect,
render_template,
request,
url_for,
g,
make_response,
Response,
)
from flask.typing import ResponseValue, ResponseReturnValue
from flask_babel import Babel
from flask_login import current_user, LoginManager
from jinja2 import select_autoescape
from werkzeug.datastructures import ImmutableDict
from sentry_sdk.integrations.flask import FlaskIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from werkzeug.exceptions import HTTPException
from hades_logs import HadesLogs
from pycroft.helpers.i18n import gettext
from pycroft.model import session
from web import api
from web.blueprints import task
from . import template_filters, template_tests
from .blueprints import (
facilities, finance, infrastructure, login, properties, user, host
)
from .blueprints.login import login_manager
from .commands import register_commands
from .templates import page_resources
class PycroftFlask(Flask):
"""
Extend the Flask class to set Jinja options.
"""
jinja_options = ImmutableDict(
Flask.jinja_options,
extensions=[
jinja2.ext.do,
jinja2.ext.loopcontrols,
],
autoescape=select_autoescape(),
undefined=jinja2.StrictUndefined,
)
login_manager: LoginManager
def __init__(self, *a: t.Any, **kw: t.Any) -> None:
super().__init__(*a, **kw)
# config keys to support:
self.maybe_add_config_from_env([
'PYCROFT_API_KEY',
'HADES_CELERY_APP_NAME',
'HADES_BROKER_URI',
'HADES_RESULT_BACKEND_URI',
'HADES_TIMEOUT',
'HADES_ROUTING_KEY',
])
def maybe_add_config_from_env(self, keys: t.Iterable[str]) -> None:
"""Write keys from the environment to the app's config
If a key does not exist in the environment, it will just be
skipped.
"""
for key in keys:
try:
self.config[key] = os.environ[key]
except KeyError:
self.logger.debug("Config key %s not present in environment, skipping", key)
continue
else:
self.logger.debug("Config key %s successfuly read from environment", key)
def make_app(debug: bool = False, hades_logs: bool = True) -> PycroftFlask:
"""Create and configure the main? Flask app object"""
app = PycroftFlask(__name__)
app.debug = debug
# initialization code
login_manager.init_app(app)
app.register_blueprint(user.bp, url_prefix="/user")
app.register_blueprint(facilities.bp, url_prefix="/facilities")
app.register_blueprint(infrastructure.bp, url_prefix="/infrastructure")
app.register_blueprint(properties.bp, url_prefix="/properties")
app.register_blueprint(finance.bp, url_prefix="/finance")
app.register_blueprint(host.bp, url_prefix="/host")
app.register_blueprint(task.bp, url_prefix="/task")
app.register_blueprint(login.bp)
app.register_blueprint(api.bp, url_prefix="/api/v0")
template_filters.register_filters(app)
template_tests.register_checks(app)
Babel(app)
if hades_logs:
try:
HadesLogs(app)
except KeyError as e:
app.logger.info("HadesLogs configuration incomplete, skipping.")
app.logger.info("Original error: %s", str(e))
else:
app.logger.info("HadesLogs configuration disabled. Skipping.")
page_resources.init_app(app)
user.nav.register_on(app)
finance.nav.register_on(app)
facilities.nav.register_on(app)
infrastructure.nav.register_on(app)
task.nav.register_on(app)
properties.nav.register_on(app)
@app.errorhandler(403)
@app.errorhandler(404)
@app.errorhandler(500)
def errorpage(e: Exception) -> ResponseReturnValue:
"""Handle errors according to their error code
:param e: The error from the errorhandler
"""
# We need this path hard-coding because the global app errorhandlers have higher
# precedence than anything registered to a blueprint.
# A clean solution would be flask supporting nested blueprints (see flask #539)
if request.path.startswith('/api/'):
return api.errorpage(e)
code = getattr(e, "code", 500)
if code == 500:
message = str(e)
elif code == 403:
message = gettext("You are not allowed to access this page.")
elif code == 404:
message = gettext("Page not found.")
else:
raise AssertionError()
return render_template('error.html', error=message), code
@app.route('/')
def redirect_to_index() -> ResponseValue:
return redirect(url_for('user.overview'))
@app.route('/debug-sentry')
def debug_sentry() -> t.NoReturn:
app.logger.warning("Someone used the debug-sentry endpoint! Also, this is a test warning.")
app.logger.info("An info log for inbetween")
app.logger.error("Someone used the debug-sentry endpoint! Also, this is a test error.",
extra={'pi': 3.141})
div_by_zero = 1 / 0 # noqa
assert False # noqa: B011
@app.teardown_request
def shutdown_session(exception: BaseException | None = None) -> None:
if app.testing:
# things are not necessarily committed here,
# so `remove` would result in a `ROLLBACK TO SAVEPOINT` to a pre-setup state.
return
session.Session.remove()
@app.before_request
def require_login() -> ResponseReturnValue | None:
"""Request a login for every page
except the login blueprint and the static folder.
Blueprint "None" is needed for "/static/*" GET requests.
"""
if current_user.is_anonymous and request.blueprint not in ("login", 'api', None):
lm = t.cast(LoginManager, current_app.login_manager) # type: ignore[attr-defined]
return lm.unauthorized()
return None
if app.debug:
METHOD_NAME(app)
register_commands(app)
return app
def METHOD_NAME(app: Flask) -> None:
try:
from pyinstrument import Profiler
except ImportError:
app.logger.info("in debug mode, but pyinstrument not installed.")
return
@app.before_request
def before_request() -> None:
if "profile" in request.args:
g.profiler = Profiler()
g.profiler.start()
@app.after_request
def after_request(response: Response) -> Response:
if not hasattr(g, "profiler"):
return response
g.profiler.stop()
output_html = g.profiler.output_html()
return make_response(output_html)
IGNORED_EXCEPTION_TYPES = (HTTPException,)
if dsn := os.getenv('PYCROFT_SENTRY_DSN'):
_TE = t.TypeVar("_TE")
def before_send(event: _TE, hint: dict[str, t.Any]) -> _TE | None:
if 'exc_info' in hint:
exc_type, exc_value, _tb = hint['exc_info']
if isinstance(exc_value, IGNORED_EXCEPTION_TYPES):
return None
return event
logging_integration = LoggingIntegration(
level=logging.INFO, # INFO / WARN create breadcrumbs, just as SQL queries
event_level=logging.ERROR, # errors and above create breadcrumbs
)
sentry_sdk.init(
dsn=dsn,
integrations=[FlaskIntegration(), logging_integration],
traces_sample_rate=1.0,
before_send=before_send
) |
212 | test head failure | #
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email [email protected] or [email protected].
#
"""
Unit Test for CORTXS3Client API.
"""
from http.client import HTTPConnection
from http.client import HTTPResponse
from unittest.mock import Mock
import pytest
from s3backgrounddelete.cortx_s3_client import CORTXS3Client
from s3backgrounddelete.cortx_s3_config import CORTXS3Config
from s3backgrounddelete.cortx_s3_constants import CONNECTION_TYPE_PRODUCER
def test_get_connection_success():
"""Test if HTTPConnection object is returned"""
config = CORTXS3Config()
response = CORTXS3Client(config, CONNECTION_TYPE_PRODUCER)._get_producer_connection()
assert isinstance(response, HTTPConnection)
def test_get_connection_as_none():
"""
Test if get_connection does not has endpoint configured then
it should return "None"
"""
config = Mock(spec=CORTXS3Config)
config.get_cortx_s3_endpoint_for_producer = Mock(side_effect=KeyError())
assert CORTXS3Client(config, CONNECTION_TYPE_PRODUCER)._get_producer_connection() is None
def test_get_failure():
"""
Test if connection object is "None" then GET method should throw TypeError.
"""
with pytest.raises(TypeError):
config = Mock(spec=CORTXS3Config)
config.get_cortx_s3_endpoint = Mock(side_effect=KeyError())
assert CORTXS3Client(config, CONNECTION_TYPE_PRODUCER).get('/indexes/test_index1')
def test_get_success():
"""Test GET request should return success response."""
result = b'{"Key": "test_key1", "Value": "testValue1"}'
httpconnection = Mock(spec=HTTPConnection)
httpresponse = Mock(spec=HTTPResponse)
httpresponse.status = 200
httpresponse.getheaders.return_value = \
'Content-Type:text/html;Content-Length:14'
httpresponse.read.return_value = result
httpresponse.reason = 'OK'
httpconnection.getresponse.return_value = httpresponse
config = CORTXS3Config()
response = CORTXS3Client(config, CONNECTION_TYPE_PRODUCER, connection=httpconnection).get(
'/indexes/test_index1')
assert response['status'] == 200
def test_put_failure():
"""
Test if connection object is "None" then PUT method should throw TypeError.
"""
with pytest.raises(TypeError):
config = Mock(spec=CORTXS3Config)
config.get_cortx_s3_endpoint = Mock(side_effect=KeyError())
assert CORTXS3Client(config, CONNECTION_TYPE_PRODUCER).put('/indexes/test_index1')
def test_put_success():
"""Test PUT request should return success response."""
httpconnection = Mock(spec=HTTPConnection)
httpresponse = Mock(spec=HTTPResponse)
httpresponse.status = 201
httpresponse.getheaders.return_value = \
'Content-Type:text/html;Content-Length:14'
httpresponse.read.return_value = b'{}'
httpresponse.reason = 'CREATED'
httpconnection.getresponse.return_value = httpresponse
config = CORTXS3Config()
request_uri = '/indexes/test_index1'
response = CORTXS3Client(config, CONNECTION_TYPE_PRODUCER, connection=httpconnection).put(request_uri)
assert response['status'] == 201
def test_delete_failure():
"""
Test if connection object is "None" then DELETE should throw TypeError.
"""
with pytest.raises(TypeError):
config = Mock(spec=CORTXS3Config)
config.get_cortx_s3_endpoint = Mock(side_effect=KeyError())
assert CORTXS3Client(config, CONNECTION_TYPE_PRODUCER).delete('/indexes/test_index1')
def test_delete_success():
"""Test DELETE request should return success response."""
httpconnection = Mock(spec=HTTPConnection)
httpresponse = Mock(spec=HTTPResponse)
httpresponse.status = 204
httpresponse.getheaders.return_value = \
'Content-Type:text/html;Content-Length:14'
httpresponse.read.return_value = b'test body'
httpresponse.reason = 'OK'
httpconnection.getresponse.return_value = httpresponse
config = CORTXS3Config()
response = CORTXS3Client(config, CONNECTION_TYPE_PRODUCER, connection=httpconnection).delete(
'/indexes/test_index1')
assert response['status'] == 204
def METHOD_NAME():
"""
Test if connection object is "None" then HEAD should throw TypeError.
"""
with pytest.raises(TypeError):
config = Mock(spec=CORTXS3Config)
config.get_cortx_s3_endpoint = Mock(side_effect=KeyError())
assert CORTXS3Client(config, CONNECTION_TYPE_PRODUCER).head('/indexes/test_index1')
def test_head_success():
"""Test HEAD request should return success response."""
httpconnection = Mock(spec=HTTPConnection)
httpresponse = Mock(spec=HTTPResponse)
httpresponse.status = 200
httpresponse.getheaders.return_value = \
'Content-Type:text/html;Content-Length:14'
httpresponse.read.return_value = b'test body'
httpresponse.reason = 'OK'
httpconnection.getresponse.return_value = httpresponse
config = CORTXS3Config()
response = CORTXS3Client(config, CONNECTION_TYPE_PRODUCER, connection=httpconnection).head(
'/indexes/test_index1')
assert response['status'] == 200
|
213 | register message receive handlers | import logging
import numpy as np
from .message_define import MyMessage
from .utils import transform_tensor_to_list, post_complete_message_to_sweep_process
from ....core.distributed.fedml_comm_manager import FedMLCommManager
from ....core.distributed.communication.message import Message
class FedOptServerManager(FedMLCommManager):
def __init__(
self,
args,
aggregator,
comm=None,
rank=0,
size=0,
backend="MPI",
is_preprocessed=False,
preprocessed_client_lists=None,
):
super().__init__(args, comm, rank, size, backend)
self.args = args
self.aggregator = aggregator
self.round_num = args.comm_round
self.round_idx = 0
self.is_preprocessed = is_preprocessed
self.preprocessed_client_lists = preprocessed_client_lists
def run(self):
super().run()
def send_init_msg(self):
# sampling clients
client_indexes = self.aggregator.client_sampling(
self.round_idx,
self.args.client_num_in_total,
self.args.client_num_per_round,
)
client_schedule = self.aggregator.generate_client_schedule(self.round_idx, client_indexes)
average_weight_dict = self.aggregator.get_average_weight(client_indexes)
global_model_params = self.aggregator.get_global_model_params()
for process_id in range(1, self.size):
self.send_message_init_config(
process_id, global_model_params,
average_weight_dict, client_schedule
)
def METHOD_NAME(self):
self.register_message_receive_handler(
MyMessage.MSG_TYPE_C2S_SEND_MODEL_TO_SERVER,
self.handle_message_receive_model_from_client,
)
def handle_message_receive_model_from_client(self, msg_params):
sender_id = msg_params.get(MyMessage.MSG_ARG_KEY_SENDER)
model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS)
# local_sample_number = msg_params.get(MyMessage.MSG_ARG_KEY_NUM_SAMPLES)
client_runtime_info = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_RUNTIME_INFO)
self.aggregator.record_client_runtime(sender_id - 1, client_runtime_info)
self.aggregator.add_local_trained_result(
sender_id - 1, model_params,
)
b_all_received = self.aggregator.check_whether_all_receive()
logging.info("b_all_received = " + str(b_all_received))
if b_all_received:
global_model_params = self.aggregator.aggregate()
self.aggregator.test_on_server_for_all_clients(self.round_idx)
# start the next round
self.round_idx += 1
if self.round_idx == self.round_num:
post_complete_message_to_sweep_process(self.args)
self.finish()
return
# sampling clients
if self.is_preprocessed:
if self.preprocessed_client_lists is None:
# sampling has already been done in data preprocessor
client_indexes = [self.round_idx] * self.args.client_num_per_round
else:
client_indexes = self.preprocessed_client_lists[self.round_idx]
else:
# # sampling clients
client_indexes = self.aggregator.client_sampling(
self.round_idx,
self.args.client_num_in_total,
self.args.client_num_per_round,
)
client_schedule = self.aggregator.generate_client_schedule(self.round_idx, client_indexes)
average_weight_dict = self.aggregator.get_average_weight(client_indexes)
global_model_params = self.aggregator.get_global_model_params()
print("size = %d" % self.size)
for receiver_id in range(1, self.size):
self.send_message_sync_model_to_client(
receiver_id, global_model_params,
average_weight_dict, client_schedule
)
def send_message_init_config(self, receive_id, global_model_params,
average_weight_dict, client_schedule):
message = Message(
MyMessage.MSG_TYPE_S2C_INIT_CONFIG, self.get_sender_id(), receive_id
)
message.add_params(MyMessage.MSG_ARG_KEY_MODEL_PARAMS, global_model_params)
# message.add_params(MyMessage.MSG_ARG_KEY_CLIENT_INDEX, str(client_index))
message.add_params(MyMessage.MSG_ARG_KEY_AVG_WEIGHTS, average_weight_dict)
message.add_params(MyMessage.MSG_ARG_KEY_CLIENT_SCHEDULE, client_schedule)
self.send_message(message)
def send_message_sync_model_to_client(self, receive_id, global_model_params,
average_weight_dict, client_schedule):
logging.info("send_message_sync_model_to_client. receive_id = %d" % receive_id)
message = Message(
MyMessage.MSG_TYPE_S2C_SYNC_MODEL_TO_CLIENT,
self.get_sender_id(),
receive_id,
)
message.add_params(MyMessage.MSG_ARG_KEY_MODEL_PARAMS, global_model_params)
# message.add_params(MyMessage.MSG_ARG_KEY_CLIENT_INDEX, str(client_index))
message.add_params(MyMessage.MSG_ARG_KEY_AVG_WEIGHTS, average_weight_dict)
message.add_params(MyMessage.MSG_ARG_KEY_CLIENT_SCHEDULE, client_schedule)
self.send_message(message)
|
214 | overpass request | """"
Large portions of this code were adopted from OSMNx, by Geoff Boeing.
Although attempts to use OSMNx were made (including refactoring its
entire code base as a contribution to that package), it became clear
that its integration with libraries not available with QGIS' Python
distribution was too tight, and was therefore not practical to
detach them in order to use OSMNx as a dependency or submodule
For the original work, please see https://github.com/gboeing/osmnx
"""
import logging
import time
import re
import requests
from .osm_utils.osm_params import http_headers, memory
from aequilibrae.parameters import Parameters
from aequilibrae.context import get_logger
import gc
import importlib.util as iutil
from ...utils import WorkerThread
spec = iutil.find_spec("PyQt5")
pyqt = spec is not None
if pyqt:
from PyQt5.QtCore import pyqtSignal
class OSMDownloader(WorkerThread):
if pyqt:
downloading = pyqtSignal(object)
def __emit_all(self, *args):
if pyqt:
self.downloading.emit(*args)
def __init__(self, polygons, modes, logger: logging.Logger = None):
WorkerThread.__init__(self, None)
self.logger = logger or get_logger()
self.polygons = polygons
self.filter = self.get_osm_filter(modes)
self.report = []
self.json = []
par = Parameters().parameters["osm"]
self.overpass_endpoint = par["overpass_endpoint"]
self.timeout = par["timeout"]
self.sleeptime = par["sleeptime"]
def doWork(self):
infrastructure = 'way["highway"]'
query_template = (
"{memory}[out:json][timeout:{timeout}];({infrastructure}{filters}({south:.6f},{west:.6f},"
"{north:.6f},{east:.6f});>;);out;"
)
self.__emit_all(["maxValue", len(self.polygons)])
self.__emit_all(["Value", 0])
m = ""
if memory > 0:
m = f"[maxsize: {memory}]"
for counter, poly in enumerate(self.polygons):
msg = f"Downloading polygon {counter + 1} of {len(self.polygons)}"
self.logger.debug(msg)
self.__emit_all(["Value", counter])
self.__emit_all(["text", msg])
west, south, east, north = poly
query_str = query_template.format(
north=north,
south=south,
east=east,
west=west,
infrastructure=infrastructure,
filters=self.filter,
timeout=self.timeout,
memory=m,
)
json = self.METHOD_NAME(data={"data": query_str}, timeout=self.timeout)
if json["elements"]:
self.json.extend(json["elements"])
del json
gc.collect()
self.__emit_all(["Value", len(self.polygons)])
self.__emit_all(["FinishedDownloading", 0])
def METHOD_NAME(self, data, pause_duration=None, timeout=180, error_pause_duration=None):
"""Send a request to the Overpass API via HTTP POST and return the JSON response.
:Arguments:
**data**(:obj:`dict` or `OrderedDict`): key-value pairs of parameters to post to the API
**pause_duration** (:obj:`int`): how long to pause in seconds before requests, if None, will query API
status endpoint to find when next slot is available
**timeout** (:obj:`int`): the timeout interval for the requests library
**error_pause_duration**(:obj:`int`): how long to pause in seconds before re-trying requests if error
:Returns:
:obj:`dict`
"""
# define the Overpass API URL, then construct a GET-style URL as a string to
url = self.overpass_endpoint.rstrip("/") + "/interpreter"
if pause_duration is None:
time.sleep(self.sleeptime)
start_time = time.time()
self.report.append(f'Posting to {url} with timeout={timeout}, "{data}"')
self.logger.debug(f'Posting to {url} with timeout={timeout}, "{data}"')
response = requests.post(url, data=data, timeout=timeout, headers=http_headers)
# get the response size and the domain, log result
size_kb = len(response.content) / 1000.0
domain = re.findall(r"(?s)//(.*?)/", url)[0]
msg = "Downloaded {:,.1f}KB from {} in {:,.2f} seconds".format(size_kb, domain, time.time() - start_time)
self.report.append(msg)
self.logger.info(msg)
try:
response_json = response.json()
if "remark" in response_json:
msg = f'Server remark: "{response_json["remark"]}"'
self.report.append(msg)
self.logger.info(msg)
except Exception:
# 429 is 'too many requests' and 504 is 'gateway timeout' from server
# overload - handle these errors by recursively calling
# overpass_request until we get a valid response
if response.status_code in [429, 504]:
# pause for error_pause_duration seconds before re-trying request
if error_pause_duration is None:
error_pause_duration = self.sleeptime + 1
msg = "Server at {} returned status code {} and no JSON data. Re-trying request in {:.2f} seconds.".format(
domain, response.status_code, error_pause_duration
)
self.report.append(msg)
self.logger.info(msg)
time.sleep(error_pause_duration)
response_json = self.METHOD_NAME(data=data, pause_duration=pause_duration, timeout=timeout)
# else, this was an unhandled status_code, throw an exception
else:
self.report.append(f"Server at {domain} returned status code {response.status_code} and no JSON data")
raise Exception(f"Server returned no JSON data.\n{response} {response.reason}\n{response.text}")
return response_json
def get_osm_filter(self, modes: list) -> str:
"""
loosely adapted from http://www.github.com/gboeing/osmnx
"""
p = Parameters().parameters["network"]["osm"]
all_tags = p["all_link_types"]
p = p["modes"]
all_modes = list(p.keys())
tags_to_keep = []
for m in modes:
if m not in all_modes:
raise ValueError(f"Mode {m} not listed in the parameters file")
tags_to_keep += p[m]["link_types"]
tags_to_keep = list(set(tags_to_keep))
# Default to remove
service = '["service"!~"parking|parking_aisle|driveway|private|emergency_access"]'
access = '["access"!~"private"]'
filtered = [x for x in all_tags if x not in tags_to_keep]
filtered = "|".join(filtered)
filter = f'["area"!~"yes"]["highway"!~"{filtered}"]{service}{access}'
return filter |
215 | package info | from conan import ConanFile
from conan.tools.files import get, chdir, save, replace_in_file, rmdir, rm
from conan.tools.microsoft import is_msvc
from conans import AutoToolsBuildEnvironment, MSBuild, tools
import os
import re
required_conan_version = ">=1.52.0"
class LibsassConan(ConanFile):
name = "libsass"
license = "MIT"
homepage = "libsass.org"
url = "https://github.com/conan-io/conan-center-index"
description = "A C/C++ implementation of a Sass compiler"
topics = ("Sass", "LibSass", "compiler")
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False]
}
default_options = {
"shared": False,
"fPIC": True
}
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _is_mingw(self):
return self.settings.os == "Windows" and self.settings.compiler == "gcc"
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
del self.options.fPIC
def build_requirements(self):
if self.settings.os != "Windows":
self.tool_requires("libtool/2.4.7")
def source(self):
get(self, **self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_autotools(self):
if self._autotools:
return self._autotools
self._autotools = AutoToolsBuildEnvironment(self)
args = []
args.append("--disable-tests")
args.append("--enable-%s" % ("shared" if self.options.shared else "static"))
args.append("--disable-%s" % ("static" if self.options.shared else "shared"))
self._autotools.configure(args=args)
return self._autotools
def _build_autotools(self):
with chdir(self, self._source_subfolder):
save(self, path="VERSION", content=f"{self.version}")
self.run("{} -fiv".format(tools.get_env("AUTORECONF")))
autotools = self._configure_autotools()
autotools.make()
@property
def _make_program(self):
return tools.get_env("CONAN_MAKE_PROGRAM", tools.which("make") or tools.which("mingw32-make"))
def _build_mingw(self):
makefile = os.path.join(self._source_subfolder, "Makefile")
replace_in_file(self, makefile, "CFLAGS += -O2", "")
replace_in_file(self, makefile, "CXXFLAGS += -O2", "")
replace_in_file(self, makefile, "LDFLAGS += -O2", "")
with chdir(self, self._source_subfolder):
env_vars = AutoToolsBuildEnvironment(self).vars
env_vars.update({
"BUILD": "shared" if self.options.shared else "static",
"PREFIX": tools.unix_path(os.path.join(self.package_folder)),
# Don't force static link to mingw libs, leave this decision to consumer (through LDFLAGS in env)
"STATIC_ALL": "0",
"STATIC_LIBGCC": "0",
"STATIC_LIBSTDCPP": "0",
})
with tools.environment_append(env_vars):
self.run(f"{self._make_program} -f Makefile")
def _build_visual_studio(self):
with chdir(self, self._source_subfolder):
properties = {
"LIBSASS_STATIC_LIB": "" if self.options.shared else "true",
"WholeProgramOptimization": "true" if any(re.finditer("(^| )[/-]GL($| )", tools.get_env("CFLAGS", ""))) else "false",
}
platforms = {
"x86": "Win32",
"x86_64": "Win64"
}
msbuild = MSBuild(self)
msbuild.build(os.path.join("win", "libsass.sln"), platforms=platforms, properties=properties)
def build(self):
if self._is_mingw:
self._build_mingw()
elif is_msvc(self):
self._build_visual_studio()
else:
self._build_autotools()
def _install_autotools(self):
with chdir(self, self._source_subfolder):
autotools = self._configure_autotools()
autotools.install()
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rm(self, "*.la", self.package_folder, recursive=True)
def _install_mingw(self):
self.copy("*.h", dst="include", src=os.path.join(self._source_subfolder, "include"))
self.copy("*.dll", dst="bin", src=os.path.join(self._source_subfolder, "lib"))
self.copy("*.a", dst="lib", src=os.path.join(self._source_subfolder, "lib"))
def _install_visual_studio(self):
self.copy("*.h", dst="include", src=os.path.join(self._source_subfolder, "include"))
self.copy("*.dll", dst="bin", src=os.path.join(self._source_subfolder, "win", "bin"), keep_path=False)
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "win", "bin"), keep_path=False)
def package(self):
self.copy("LICENSE", src=self._source_subfolder, dst="licenses")
if self._is_mingw:
self._install_mingw()
elif is_msvc(self):
self._install_visual_studio()
else:
self._install_autotools()
def METHOD_NAME(self):
self.cpp_info.names["pkg_config"] = "libsass"
self.cpp_info.libs = ["libsass" if is_msvc(self) else "sass"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.extend(["dl", "m"])
if not self.options.shared and tools.stdcpp_library(self):
self.cpp_info.system_libs.append(tools.stdcpp_library(self)) |
216 | test handles multiple models | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.batch_jobs.user_stats_computation_jobs."""
from __future__ import annotations
import datetime
from core import feconf
from core.jobs import job_test_utils
from core.jobs.batch_jobs import user_stats_computation_jobs
from core.jobs.types import job_run_result
from core.platform import models
from typing import Final, Type
MYPY = False
if MYPY:
from mypy_imports import user_models
(user_models,) = models.Registry.import_models([models.Names.USER])
class CollectWeeklyDashboardStatsJobTests(job_test_utils.JobTestBase):
JOB_CLASS: Type[
user_stats_computation_jobs.CollectWeeklyDashboardStatsJob
] = user_stats_computation_jobs.CollectWeeklyDashboardStatsJob
VALID_USER_ID_1: Final = 'uid_%s' % (
'a' * feconf.USER_ID_RANDOM_PART_LENGTH
)
VALID_USER_ID_2: Final = 'uid_%s' % (
'b' * feconf.USER_ID_RANDOM_PART_LENGTH
)
def setUp(self) -> None:
super().setUp()
self.formated_datetime = datetime.datetime.utcnow().strftime(
feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT)
def test_empty_storage(self) -> None:
self.assert_job_output_is_empty()
def test_updates_existing_stats_model_when_no_values_are_provided(
self
) -> None:
user_settings_model = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID_1, email='[email protected]')
user_stats_model = self.create_model(
user_models.UserStatsModel,
id=self.VALID_USER_ID_1,
)
self.put_multi([user_settings_model, user_stats_model])
self.assert_job_output_is([
job_run_result.JobRunResult(stdout='OLD MODELS SUCCESS: 1')
])
new_user_stats_model = (
user_models.UserStatsModel.get(self.VALID_USER_ID_1))
# Ruling out the possibility of None for mypy type checking.
assert new_user_stats_model is not None
self.assertEqual(
new_user_stats_model.weekly_creator_stats_list,
[{
self.formated_datetime: {
'num_ratings': 0,
'average_ratings': None,
'total_plays': 0
}
}]
)
def test_fails_when_existing_stats_has_wrong_schema_version(self) -> None:
user_settings_model = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID_1, email='[email protected]')
user_stats_model = self.create_model(
user_models.UserStatsModel,
id=self.VALID_USER_ID_1,
schema_version=0
)
self.put_multi([user_settings_model, user_stats_model])
with self.assertRaisesRegex(
Exception,
'Sorry, we can only process v1-v%d dashboard stats schemas at '
'present.' % feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION
):
self.assert_job_output_is([
job_run_result.JobRunResult(stdout='OLD MODELS SUCCESS: 1')
])
new_user_stats_model = (
user_models.UserStatsModel.get(self.VALID_USER_ID_1))
# Ruling out the possibility of None for mypy type checking.
assert new_user_stats_model is not None
self.assertEqual(new_user_stats_model.weekly_creator_stats_list, [])
def test_updates_existing_stats_model_when_values_are_provided(
self
) -> None:
user_settings_model = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID_1, email='[email protected]')
user_stats_model = self.create_model(
user_models.UserStatsModel,
id=self.VALID_USER_ID_1,
num_ratings=10,
average_ratings=4.5,
total_plays=22,
)
self.put_multi([user_settings_model, user_stats_model])
self.assert_job_output_is([
job_run_result.JobRunResult(stdout='OLD MODELS SUCCESS: 1')
])
new_user_stats_model = (
user_models.UserStatsModel.get(self.VALID_USER_ID_1))
# Ruling out the possibility of None for mypy type checking.
assert new_user_stats_model is not None
self.assertEqual(
new_user_stats_model.weekly_creator_stats_list,
[{
self.formated_datetime: {
'num_ratings': 10,
'average_ratings': 4.5,
'total_plays': 22
}
}]
)
def test_creates_new_stats_model_if_not_existing(self) -> None:
user_settings_model = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID_1, email='[email protected]')
user_settings_model.update_timestamps()
user_settings_model.put()
self.assert_job_output_is([
job_run_result.JobRunResult(stdout='NEW MODELS SUCCESS: 1')
])
user_stats_model = user_models.UserStatsModel.get(self.VALID_USER_ID_1)
# Ruling out the possibility of None for mypy type checking.
assert user_stats_model is not None
self.assertEqual(
user_stats_model.weekly_creator_stats_list,
[{
self.formated_datetime: {
'num_ratings': 0,
'average_ratings': None,
'total_plays': 0
}
}]
)
def METHOD_NAME(self) -> None:
user_settings_model_1 = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID_1, email='[email protected]')
user_settings_model_2 = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID_2, email='[email protected]')
user_stats_model_1 = self.create_model(
user_models.UserStatsModel,
id=self.VALID_USER_ID_1)
self.put_multi([
user_settings_model_1, user_settings_model_2, user_stats_model_1])
self.assert_job_output_is([
job_run_result.JobRunResult(stdout='OLD MODELS SUCCESS: 1'),
job_run_result.JobRunResult(stdout='NEW MODELS SUCCESS: 1')
])
user_stats_model = user_models.UserStatsModel.get(self.VALID_USER_ID_2)
self.assertIsNotNone(user_stats_model) |
217 | get address | class Component(object):
"""
Entity class representing any a sample or sample container
"""
def __init__(self, container, address, scannable):
self.container = container
self.address = address
self.scannable = scannable
self.id = None
self.present = False
self.selected = False
self.scanned = False
self.dirty = False
self._leaf = False
self._name = ""
######################### PUBLIC #########################
def get_name(self):
return self._name
def get_id(self):
"""
Returns an unique ID of an element - typically scanned from the real object
Can be None if sample is unknown or not present
:rtype: str
"""
return self.id
def METHOD_NAME(self):
"""
Returns an unique identifier of the slot of the element ()
Can never be None - even if the component is not present
:rtype: str
"""
return self.address
def get_coords(self):
coords_list = [self.get_index() + 1]
x = self.get_container()
while x:
idx = x.get_index()
if idx is not None:
coords_list.append(idx + 1)
x = x.get_container()
coords_list.reverse()
return tuple(coords_list)
def get_index(self):
"""
Returns the index of the object within the parent's component list,
:rtype: int
"""
try:
container = self.get_container()
if container is not None:
components = container.get_components()
for i in range(len(components)):
if components[i] is self:
return i
except Exception:
return -1
def is_leaf(self):
return self._leaf
def is_present(self):
"""
Returns true if the element is known to be currently present
:rtype: bool
"""
return self.present
def is_selected(self):
"""
Returns if the element is currently selected
:rtype: bool
"""
return self.selected
def is_scanned(self):
"""
Returns if the element has been scanned for ID (for scannable components)
:rtype: bool
"""
if self.is_scannable() == False:
return False
return self.scanned
def is_scannable(self):
"""
Returns if the element can be scanned for ID
:rtype: bool
"""
return self.scannable
def assert_is_scannable(self):
if not self.is_scannable():
raise "Element is not scannable"
def get_container(self):
"""
Returns the parent of this element
:rtype: Container
"""
return self.container
def get_siblings(self):
"""
Returns the parent of this element
:rtype: Container
"""
ret = []
if self.get_container() is not None:
for c in self.get_container().get_components():
if c != self:
ret.append(c)
return ret
def clear_info(self):
"""
Clears all sample info (also in components if object is a container)
"""
changed = False
if self.id is not None:
self.id = None
changed = True
if self.present:
self.present = False
changed = True
if self.scanned:
self.scanned = False
changed = True
if changed:
self._set_dirty()
######################### PROTECTED #########################
def _set_info(self, present=False, id=None, scanned=False):
changed = False
if self.id != id:
self.id = id
changed = True
if self.id:
present = True
if self.present != present:
self.present = present
changed = True
if self.is_scannable() == False:
scanned = False
if self.scanned != scanned:
self.scanned = scanned
changed = True
if changed:
self._set_dirty()
def _set_selected(self, selected):
if selected:
for c in self.get_siblings():
c._set_selected(False)
if self.get_container() is not None:
self.get_container()._set_selected(True)
self.selected = selected
def _is_dirty(self):
return self.dirty
def _set_dirty(self):
self.dirty = True
container = self.get_container()
if container is not None:
container._set_dirty()
def _reset_dirty(self):
self.dirty = False |
218 | test inheritance detection | """
This file contains an initial proposal that can be scrapped and reworked if/when appropriate.
Either way, this test file should probably be removed once the actual FastAPI implementation
is complete and has integration tests with pydantic v2. However, we are including it here for now
to get an early warning if this approach would require modification for compatibility with
any future changes to the JSON schema generation logic, etc.
See the original PR for more details: https://github.com/pydantic/pydantic/pull/5094
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any
from dirty_equals import HasRepr, IsInstance, IsStr
from pydantic import BaseModel, ConfigDict
from pydantic._internal._core_metadata import CoreMetadataHandler
from pydantic._internal._core_utils import CoreSchemaOrField
from pydantic.errors import PydanticInvalidForJsonSchema
from pydantic.json_schema import GenerateJsonSchema, JsonSchemaValue
class _ErrorKey(str):
pass
class FastAPIGenerateJsonSchema(GenerateJsonSchema):
"""
Idea: This class would be exported from FastAPI, and if users want to modify the way JSON schema is generated
in FastAPI, they should inherit from it and override it as appropriate.
In the JSON schema generation logic, FastAPI _could_ also attempt to work with classes that inherit directly from
GenerateJsonSchema by doing something like:
if UserGenerateJsonSchema.handle_invalid_for_json_schema is GenerateJsonSchema.handle_invalid_for_json_schema:
# The method has not been overridden; inherit from FastAPIGenerateJsonSchema
UserGenerateJsonSchema = type(
"UserGenerateJsonSchema", (FastAPIGenerateJsonSchema, UserGenerateJsonSchema), {}
)
else:
raise TypeError(f"{UserGenerateJsonSchema.__name__} should inherit from FastAPIGenerateJsonSchema")
I'm not sure which approach is better.
"""
def handle_invalid_for_json_schema(self, schema: CoreSchemaOrField, error_info: str) -> JsonSchemaValue:
# NOTE: I think it may be a good idea to rework this method to either not use CoreMetadataHandler,
# and/or to make CoreMetadataHandler a public API.
if CoreMetadataHandler(schema).metadata.get('pydantic_js_modify_function') is not None:
# Since there is a json schema modify function, assume that this type is meant to be handled,
# and the modify function will set all properties as appropriate
return {}
else:
error = PydanticInvalidForJsonSchema(f'Cannot generate a JsonSchema for {error_info}')
return {_ErrorKey('error'): error}
@dataclass
class ErrorDetails:
path: list[Any]
error: PydanticInvalidForJsonSchema
def collect_errors(schema: JsonSchemaValue) -> list[ErrorDetails]:
errors: list[ErrorDetails] = []
def _collect_errors(schema: JsonSchemaValue, path: list[Any]) -> None:
if isinstance(schema, dict):
for k, v in schema.items():
if isinstance(k, _ErrorKey):
errors.append(ErrorDetails(path, schema[k]))
_collect_errors(v, list(path) + [k])
elif isinstance(schema, list):
for i, v in enumerate(schema):
_collect_errors(v, list(path) + [i])
_collect_errors(schema, [])
return errors
def METHOD_NAME() -> None:
class GenerateJsonSchema2(GenerateJsonSchema):
pass
assert GenerateJsonSchema2.handle_invalid_for_json_schema is GenerateJsonSchema.handle_invalid_for_json_schema
# this is just a quick proof of the note above indicating that you can detect whether a specific method
# is overridden, for the purpose of allowing direct inheritance from GenerateJsonSchema.
assert (
FastAPIGenerateJsonSchema.handle_invalid_for_json_schema
is not GenerateJsonSchema.handle_invalid_for_json_schema
)
def test_collect_errors() -> None:
class Car:
def __init__(self, make: str, model: str, year: int):
self.make = make
self.model = model
self.year = year
class Model(BaseModel):
f1: int = 1
f2: Car
model_config = ConfigDict(arbitrary_types_allowed=True)
schema = Model.model_json_schema(schema_generator=FastAPIGenerateJsonSchema)
assert schema == {
'title': 'Model',
'type': 'object',
'properties': {
'f1': {'type': 'integer', 'default': 1, 'title': 'F1'},
'f2': {
'error': HasRepr(IsStr(regex=r'PydanticInvalidForJsonSchema\(.*\)')),
'title': 'F2',
},
},
'required': ['f2'],
}
collected_errors = collect_errors(schema)
assert collected_errors == [
ErrorDetails(
path=['properties', 'f2'],
error=IsInstance(PydanticInvalidForJsonSchema),
)
] |
219 | application config html | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import current_app as app
from flask import jsonify, make_response, request
from flask_jwt_extended import create_access_token, get_jwt, jwt_required
from . import jwt
from .store import Store
@jwt.expired_token_loader
def my_expired_token_callback(jwt_header, jwt_payload):
return jsonify({"status": "unauthenticated"}), 401
@app.route("/application-config")
def METHOD_NAME():
return app.send_static_file("application-config.html")
@app.route("/downloads")
def downloads_html():
return app.send_static_file("downloads.html")
@app.route("/")
def index_html():
return app.send_static_file("index.html")
@app.route("/logout")
def logout_html():
return app.send_static_file("logout.html")
@app.route("/project-admin-dashboard")
def project_admin_dashboard_html():
return app.send_static_file("project-admin-dashboard.html")
@app.route("/project-configuration")
def project_configuration_html():
return app.send_static_file("project-configuration.html")
@app.route("/registration-form")
def registration_form_html():
return app.send_static_file("registration-form.html")
@app.route("/server-config")
def server_config_html():
return app.send_static_file("server-config.html")
@app.route("/site-dashboard")
def site_dashboard_html():
return app.send_static_file("site-dashboard.html")
@app.route("/user-dashboard")
def user_dashboard_html():
return app.send_static_file("user-dashboard.html")
@app.route("/api/v1/login", methods=["POST"])
def login():
req = request.json
email = req.get("email", None)
password = req.get("password", None)
user = Store.verify_user(email, password)
if user:
additional_claims = {"role": user.role.name, "organization": user.organization.name}
access_token = create_access_token(identity=user.email, additional_claims=additional_claims)
return jsonify(
{
"status": "ok",
"user": {"id": user.id, "email": user.email, "role": user.role.name},
"access_token": access_token,
}
)
else:
return jsonify({"status": "unauthenticated"}), 401
@app.route("/api/v1/overseer/blob", methods=["POST"])
@jwt_required()
def overseer_blob():
claims = get_jwt()
if claims.get("role") == "project_admin":
pin = request.json.get("pin")
fileobj, filename = Store.get_overseer_blob(pin)
response = make_response(fileobj.read())
response.headers.set("Content-Type", "zip")
response.headers.set("Content-Disposition", f'attachment; filename="{filename}"')
return response
else:
return jsonify({"status": "unauthorized"}), 403
@app.route("/api/v1/servers/<int:id>/blob", methods=["POST"])
@jwt_required()
def server_blob(id):
claims = get_jwt()
if claims.get("role") == "project_admin":
pin = request.json.get("pin")
fileobj, filename = Store.get_server_blob(pin, id == 1)
response = make_response(fileobj.read())
response.headers.set("Content-Type", "zip")
response.headers.set("Content-Disposition", f'attachment; filename="{filename}"')
return response
else:
return jsonify({"status": "unauthorized"}), 403
@app.route("/api/v1/project", methods=["PATCH"])
@jwt_required()
def set_project():
claims = get_jwt()
if claims.get("role") == "project_admin":
req = request.json
return jsonify(Store.set_project(req))
else:
return jsonify({"status": "unauthorized"}), 403
@app.route("/api/v1/project", methods=["GET"])
def get_project():
return jsonify(Store.get_project())
@app.route("/api/v1/organizations", methods=["GET"])
def get_orgs():
return jsonify(Store.get_orgs()) |
220 | unmix | """ Takes the analysis part of spltter.py from PYMEAcquire.
Functions for unmixing and unsplitting multichannel data that has been acquired
using an image splitting device which splits the channels onto a single camera.
"""
import os
import numpy as np
def LoadShiftField(filename = None):
if not filename:
import wx
fdialog = wx.FileDialog(None, 'Select shift field',
wildcard='*.sf;*.h5;*.h5r', style=wx.FD_OPEN)
succ = fdialog.ShowModal()
if (succ == wx.ID_OK):
filename = fdialog.GetPath()
else:
return None
ext = os.path.splitext(filename)[1]
if ext in ['sf']:
return np.load(filename)
else:
#try and extract shiftfield from h5 / h5r file
try:
import tables
from PYME.IO.MetaDataHandler import HDFMDHandler
h5file = tables.open_file(filename)
mdh = HDFMDHandler(h5file)
dx = mdh.getEntry('chroma.dx')
dy = mdh.getEntry('chroma.dy')
return [dx,dy]
except:
return None
class Unmixer(object):
def __init__(self, shiftfield=None, pixelsize=70., flip=True, axis='up_down'):
self.pixelsize = pixelsize
self.flip = flip
self.axis = axis
if shiftfield:
self.SetShiftField(shiftfield)
def SetShiftField(self, shiftField, scope):
#self.shiftField = shiftField
#self.shiftFieldname = sfname
if self.axis == 'up_down':
X, Y = np.ogrid[:512, :256]
else:
X, Y = np.ogrid[:scope.cam.GetPicWidth()/2, :scope.cam.GetPicHeight()]
self.X2 = np.round(X - shiftField[0](X*70., Y*70.)/70.).astype('i')
self.Y2 = np.round(Y - shiftField[1](X*70., Y*70.)/70.).astype('i')
def _deshift(self, red_chan, ROI=[0,0,512, 512]):
if 'X2' in dir(self):
x1, y1, x2, y2 = ROI
#print self.X2.shape
if self.axis == 'up_down':
Xn = self.X2[x1:x2, y1:(y1 + red_chan.shape[1])] - x1
Yn = self.Y2[x1:x2, y1:(y1 + red_chan.shape[1])] - y1
else:
Xn = self.X2[x1:(x1 + red_chan.shape[0]), y1:y2-1] - x1
Yn = self.Y2[x1:(x1 + red_chan.shape[0]), y1:y2-1] - y1
#print Xn.shape
Xn = np.maximum(np.minimum(Xn, red_chan.shape[0]-1), 0)
Yn = np.maximum(np.minimum(Yn, red_chan.shape[1]-1), 0)
return red_chan[Xn, Yn]
else:
return red_chan
def METHOD_NAME(self, data, mixMatrix, offset, ROI=[0,0,512, 512]):
import scipy.linalg
from PYME.localisation import splitting
#from pylab import *
#from PYME.DSView.dsviewer_npy import View3D
umm = scipy.linalg.inv(mixMatrix)
dsa = data.squeeze() - offset
g_, r_ = [dsa[roi[0]:roi[2], roi[1]:roi[3]] for roi in rois]
if self.flip:
if self.axis == 'up_down':
r_ = np.fliplr(r_)
else:
r_ = np.flipud(r_)
r_ = self._deshift(r_, ROI)
#print g_.shape, r_.shape
g = umm[0,0]*g_ + umm[0,1]*r_
r = umm[1,0]*g_ + umm[1,1]*r_
g = g*(g > 0)
r = r*(r > 0)
# figure()
# subplot(211)
# imshow(g.T, cmap=cm.hot)
#
# subplot(212)
# imshow(r.T, cmap=cm.hot)
#View3D([r.reshape(r.shape + (1,)),g.reshape(r.shape + (1,))])
return [r.reshape(r.shape + (1,)),g.reshape(r.shape + (1,))]
def Unmix(self, data, mixMatrix, offset, ROI=[0,0,512, 512]):
import scipy.linalg
#from pylab import *
#from PYME.DSView.dsviewer_npy import View3D
umm = scipy.linalg.inv(mixMatrix)
dsa = data.squeeze() - offset
if self.axis == 'up_down':
g_ = dsa[:, :int(dsa.shape[1]/2)]
r_ = dsa[:, int(dsa.shape[1]/2):]
if self.flip:
r_ = np.fliplr(r_)
r_ = self._deshift(r_, ROI)
else:
g_ = dsa[:int(dsa.shape[0]/2), :]
r_ = dsa[int(dsa.shape[0]/2):, :]
if self.flip:
r_ = np.flipud(r_)
r_ = self._deshift(r_, ROI)
#print g_.shape, r_.shape
g = umm[0,0]*g_ + umm[0,1]*r_
r = umm[1,0]*g_ + umm[1,1]*r_
g = g*(g > 0)
r = r*(r > 0)
# figure()
# subplot(211)
# imshow(g.T, cmap=cm.hot)
#
# subplot(212)
# imshow(r.T, cmap=cm.hot)
#View3D([r.reshape(r.shape + (1,)),g.reshape(r.shape + (1,))])
return [r.reshape(r.shape + (1,)),g.reshape(r.shape + (1,)) |
221 | test input cells have a value | # These tests are auto-generated with test data from:
# https://github.com/exercism/problem-specifications/tree/main/exercises/react/canonical-data.json
# File last updated on 2023-07-19
from functools import partial
import unittest
from react import (
InputCell,
ComputeCell,
)
class ReactTest(unittest.TestCase):
def METHOD_NAME(self):
input = InputCell(10)
self.assertEqual(input.value, 10)
def test_an_input_cell_s_value_can_be_set(self):
input = InputCell(4)
input.value = 20
self.assertEqual(input.value, 20)
def test_compute_cells_calculate_initial_value(self):
input = InputCell(1)
output = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
self.assertEqual(output.value, 2)
def test_compute_cells_take_inputs_in_the_right_order(self):
one = InputCell(1)
two = InputCell(2)
output = ComputeCell(
[
one,
two,
],
lambda inputs: inputs[0] + inputs[1] * 10,
)
self.assertEqual(output.value, 21)
def test_compute_cells_update_value_when_dependencies_are_changed(self):
input = InputCell(1)
output = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
input.value = 3
self.assertEqual(output.value, 4)
def test_compute_cells_can_depend_on_other_compute_cells(self):
input = InputCell(1)
times_two = ComputeCell(
[
input,
],
lambda inputs: inputs[0] * 2,
)
times_thirty = ComputeCell(
[
input,
],
lambda inputs: inputs[0] * 30,
)
output = ComputeCell(
[
times_two,
times_thirty,
],
lambda inputs: inputs[0] + inputs[1],
)
self.assertEqual(output.value, 32)
input.value = 3
self.assertEqual(output.value, 96)
def test_compute_cells_fire_callbacks(self):
input = InputCell(1)
output = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
cb1_observer = []
callback1 = self.callback_factory(cb1_observer)
output.add_callback(callback1)
input.value = 3
self.assertEqual(cb1_observer[-1], 4)
def test_callback_cells_only_fire_on_change(self):
input = InputCell(1)
output = ComputeCell([input], lambda inputs: 111 if inputs[0] < 3 else 222)
cb1_observer = []
callback1 = self.callback_factory(cb1_observer)
output.add_callback(callback1)
input.value = 2
self.assertEqual(cb1_observer, [])
input.value = 4
self.assertEqual(cb1_observer[-1], 222)
def test_callbacks_do_not_report_already_reported_values(self):
input = InputCell(1)
output = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
cb1_observer = []
callback1 = self.callback_factory(cb1_observer)
output.add_callback(callback1)
input.value = 2
self.assertEqual(cb1_observer[-1], 3)
input.value = 3
self.assertEqual(cb1_observer[-1], 4)
def test_callbacks_can_fire_from_multiple_cells(self):
input = InputCell(1)
plus_one = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
minus_one = ComputeCell(
[
input,
],
lambda inputs: inputs[0] - 1,
)
cb1_observer = []
cb2_observer = []
callback1 = self.callback_factory(cb1_observer)
callback2 = self.callback_factory(cb2_observer)
plus_one.add_callback(callback1)
minus_one.add_callback(callback2)
input.value = 10
self.assertEqual(cb1_observer[-1], 11)
self.assertEqual(cb2_observer[-1], 9)
def test_callbacks_can_be_added_and_removed(self):
input = InputCell(11)
output = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
cb1_observer = []
cb2_observer = []
cb3_observer = []
callback1 = self.callback_factory(cb1_observer)
callback2 = self.callback_factory(cb2_observer)
callback3 = self.callback_factory(cb3_observer)
output.add_callback(callback1)
output.add_callback(callback2)
input.value = 31
self.assertEqual(cb1_observer[-1], 32)
self.assertEqual(cb2_observer[-1], 32)
output.remove_callback(callback1)
output.add_callback(callback3)
input.value = 41
self.assertEqual(len(cb1_observer), 1)
self.assertEqual(cb2_observer[-1], 42)
self.assertEqual(cb3_observer[-1], 42)
def test_removing_a_callback_multiple_times_doesn_t_interfere_with_other_callbacks(
self,
):
input = InputCell(1)
output = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
cb1_observer = []
cb2_observer = []
callback1 = self.callback_factory(cb1_observer)
callback2 = self.callback_factory(cb2_observer)
output.add_callback(callback1)
output.add_callback(callback2)
output.remove_callback(callback1)
output.remove_callback(callback1)
output.remove_callback(callback1)
input.value = 2
self.assertEqual(cb1_observer, [])
self.assertEqual(cb2_observer[-1], 3)
def test_callbacks_should_only_be_called_once_even_if_multiple_dependencies_change(
self,
):
input = InputCell(1)
plus_one = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
minus_one1 = ComputeCell(
[
input,
],
lambda inputs: inputs[0] - 1,
)
minus_one2 = ComputeCell(
[
minus_one1,
],
lambda inputs: inputs[0] - 1,
)
output = ComputeCell(
[
plus_one,
minus_one2,
],
lambda inputs: inputs[0] * inputs[1],
)
cb1_observer = []
callback1 = self.callback_factory(cb1_observer)
output.add_callback(callback1)
input.value = 4
self.assertEqual(cb1_observer[-1], 10)
def test_callbacks_should_not_be_called_if_dependencies_change_but_output_value_doesn_t_change(
self,
):
input = InputCell(1)
plus_one = ComputeCell(
[
input,
],
lambda inputs: inputs[0] + 1,
)
minus_one = ComputeCell(
[
input,
],
lambda inputs: inputs[0] - 1,
)
always_two = ComputeCell(
[
plus_one,
minus_one,
],
lambda inputs: inputs[0] - inputs[1],
)
cb1_observer = []
callback1 = self.callback_factory(cb1_observer)
always_two.add_callback(callback1)
input.value = 2
self.assertEqual(cb1_observer, [])
input.value = 3
self.assertEqual(cb1_observer, [])
input.value = 4
self.assertEqual(cb1_observer, [])
input.value = 5
self.assertEqual(cb1_observer, [])
# Utility functions.
def callback_factory(self, observer):
def callback(observer, value):
observer.append(value)
return partial(callback, observer) |
222 | set parameters | from test import support
from test.support import import_helper, warnings_helper
import warnings
support.requires('audio')
from test.support import findfile
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
ossaudiodev = import_helper.import_module('ossaudiodev')
audioop = warnings_helper.import_deprecated('audioop')
sunau = warnings_helper.import_deprecated('sunau')
import errno
import sys
import time
import unittest
# Arggh, AFMT_S16_NE not defined on all platforms -- seems to be a
# fairly recent addition to OSS.
try:
from ossaudiodev import AFMT_S16_NE
except ImportError:
if sys.byteorder == "little":
AFMT_S16_NE = ossaudiodev.AFMT_S16_LE
else:
AFMT_S16_NE = ossaudiodev.AFMT_S16_BE
def read_sound_file(path):
with open(path, 'rb') as fp:
au = sunau.open(fp)
rate = au.getframerate()
nchannels = au.getnchannels()
encoding = au._encoding
fp.seek(0)
data = fp.read()
if encoding != sunau.AUDIO_FILE_ENCODING_MULAW_8:
raise RuntimeError("Expect .au file with 8-bit mu-law samples")
# Convert the data to 16-bit signed.
data = audioop.ulaw2lin(data, 2)
return (data, rate, 16, nchannels)
class OSSAudioDevTests(unittest.TestCase):
def play_sound_file(self, data, rate, ssize, nchannels):
try:
dsp = ossaudiodev.open('w')
except OSError as msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT,
errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
# at least check that these methods can be invoked
dsp.bufsize()
dsp.obufcount()
dsp.obuffree()
dsp.getptr()
dsp.fileno()
# Make sure the read-only attributes work.
self.assertFalse(dsp.closed)
self.assertEqual(dsp.name, "/dev/dsp")
self.assertEqual(dsp.mode, "w", "bad dsp.mode: %r" % dsp.mode)
# And make sure they're really read-only.
for attr in ('closed', 'name', 'mode'):
try:
setattr(dsp, attr, 42)
except (TypeError, AttributeError):
pass
else:
self.fail("dsp.%s not read-only" % attr)
# Compute expected running time of sound sample (in seconds).
expected_time = float(len(data)) / (ssize/8) / nchannels / rate
# set parameters based on .au file headers
dsp.setparameters(AFMT_S16_NE, nchannels, rate)
self.assertTrue(abs(expected_time - 3.51) < 1e-2, expected_time)
t1 = time.monotonic()
dsp.write(data)
dsp.close()
t2 = time.monotonic()
elapsed_time = t2 - t1
percent_diff = (abs(elapsed_time - expected_time) / expected_time) * 100
self.assertTrue(percent_diff <= 10.0,
"elapsed time (%s) > 10%% off of expected time (%s)" %
(elapsed_time, expected_time))
def METHOD_NAME(self, dsp):
# Two configurations for testing:
# config1 (8-bit, mono, 8 kHz) should work on even the most
# ancient and crufty sound card, but maybe not on special-
# purpose high-end hardware
# config2 (16-bit, stereo, 44.1kHz) should work on all but the
# most ancient and crufty hardware
config1 = (ossaudiodev.AFMT_U8, 1, 8000)
config2 = (AFMT_S16_NE, 2, 44100)
for config in [config1, config2]:
(fmt, channels, rate) = config
if (dsp.setfmt(fmt) == fmt and
dsp.channels(channels) == channels and
dsp.speed(rate) == rate):
break
else:
raise RuntimeError("unable to set audio sampling parameters: "
"you must have really weird audio hardware")
# setparameters() should be able to set this configuration in
# either strict or non-strict mode.
result = dsp.setparameters(fmt, channels, rate, False)
self.assertEqual(result, (fmt, channels, rate),
"setparameters%r: returned %r" % (config, result))
result = dsp.setparameters(fmt, channels, rate, True)
self.assertEqual(result, (fmt, channels, rate),
"setparameters%r: returned %r" % (config, result))
def set_bad_parameters(self, dsp):
# Now try some configurations that are presumably bogus: eg. 300
# channels currently exceeds even Hollywood's ambitions, and
# negative sampling rate is utter nonsense. setparameters() should
# accept these in non-strict mode, returning something other than
# was requested, but should barf in strict mode.
fmt = AFMT_S16_NE
rate = 44100
channels = 2
for config in [(fmt, 300, rate), # ridiculous nchannels
(fmt, -5, rate), # impossible nchannels
(fmt, channels, -50), # impossible rate
]:
(fmt, channels, rate) = config
result = dsp.setparameters(fmt, channels, rate, False)
self.assertNotEqual(result, config,
"unexpectedly got requested configuration")
try:
result = dsp.setparameters(fmt, channels, rate, True)
except ossaudiodev.OSSAudioError as err:
pass
else:
self.fail("expected OSSAudioError")
def test_playback(self):
sound_info = read_sound_file(findfile('audiotest.au'))
self.play_sound_file(*sound_info)
def test_set_parameters(self):
dsp = ossaudiodev.open("w")
try:
self.METHOD_NAME(dsp)
# Disabled because it fails under Linux 2.6 with ALSA's OSS
# emulation layer.
#self.set_bad_parameters(dsp)
finally:
dsp.close()
self.assertTrue(dsp.closed)
def test_mixer_methods(self):
# Issue #8139: ossaudiodev didn't initialize its types properly,
# therefore some methods were unavailable.
with ossaudiodev.openmixer() as mixer:
self.assertGreaterEqual(mixer.fileno(), 0)
def test_with(self):
with ossaudiodev.open('w') as dsp:
pass
self.assertTrue(dsp.closed)
def test_on_closed(self):
dsp = ossaudiodev.open('w')
dsp.close()
self.assertRaises(ValueError, dsp.fileno)
self.assertRaises(ValueError, dsp.read, 1)
self.assertRaises(ValueError, dsp.write, b'x')
self.assertRaises(ValueError, dsp.writeall, b'x')
self.assertRaises(ValueError, dsp.bufsize)
self.assertRaises(ValueError, dsp.obufcount)
self.assertRaises(ValueError, dsp.obufcount)
self.assertRaises(ValueError, dsp.obuffree)
self.assertRaises(ValueError, dsp.getptr)
mixer = ossaudiodev.openmixer()
mixer.close()
self.assertRaises(ValueError, mixer.fileno)
def setUpModule():
try:
dsp = ossaudiodev.open('w')
except (ossaudiodev.error, OSError) as msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT,
errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
dsp.close()
if __name__ == "__main__":
unittest.main() |
223 | norm | """Classes to handle overlapping surfaces."""
# Authors: Guillaume Favelier <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
from collections import OrderedDict
import numpy as np
from ..utils import logger
class _Overlay:
def __init__(self, scalars, colormap, rng, opacity, name):
self._scalars = scalars
self._colormap = colormap
assert rng is not None
self._rng = rng
self._opacity = opacity
self._name = name
def to_colors(self):
from ._3d import _get_cmap
from matplotlib.colors import Colormap, ListedColormap
if isinstance(self._colormap, str):
cmap = _get_cmap(self._colormap)
elif isinstance(self._colormap, Colormap):
cmap = self._colormap
else:
cmap = ListedColormap(
self._colormap / 255.0, name=str(type(self._colormap))
)
logger.debug(
f"Color mapping {repr(self._name)} with {cmap.name} "
f"colormap and range {self._rng}"
)
rng = self._rng
assert rng is not None
scalars = self.METHOD_NAME(rng)
colors = cmap(scalars)
if self._opacity is not None:
colors[:, 3] *= self._opacity
return colors
def METHOD_NAME(self, rng):
if rng[0] == rng[1]:
factor = 1 if rng[0] == 0 else 1e-6 * rng[0]
else:
factor = rng[1] - rng[0]
return (self._scalars - rng[0]) / factor
class _LayeredMesh:
def __init__(self, renderer, vertices, triangles, normals):
self._renderer = renderer
self._vertices = vertices
self._triangles = triangles
self._normals = normals
self._polydata = None
self._actor = None
self._is_mapped = False
self._current_colors = None
self._cached_colors = None
self._overlays = OrderedDict()
self._default_scalars = np.ones(vertices.shape)
self._default_scalars_name = "Data"
def map(self):
kwargs = {
"color": None,
"pickable": True,
"rgba": True,
}
mesh_data = self._renderer.mesh(
x=self._vertices[:, 0],
y=self._vertices[:, 1],
z=self._vertices[:, 2],
triangles=self._triangles,
normals=self._normals,
scalars=self._default_scalars,
**kwargs,
)
self._actor, self._polydata = mesh_data
self._is_mapped = True
def _compute_over(self, B, A):
assert A.ndim == B.ndim == 2
assert A.shape[1] == B.shape[1] == 4
A_w = A[:, 3:] # * 1
B_w = B[:, 3:] * (1 - A_w)
C = A.copy()
C[:, :3] *= A_w
C[:, :3] += B[:, :3] * B_w
C[:, 3:] += B_w
C[:, :3] /= C[:, 3:]
return np.clip(C, 0, 1, out=C)
def _compose_overlays(self):
B = cache = None
for overlay in self._overlays.values():
A = overlay.to_colors()
if B is None:
B = A
else:
cache = B
B = self._compute_over(cache, A)
return B, cache
def add_overlay(self, scalars, colormap, rng, opacity, name):
overlay = _Overlay(
scalars=scalars,
colormap=colormap,
rng=rng,
opacity=opacity,
name=name,
)
self._overlays[name] = overlay
colors = overlay.to_colors()
if self._current_colors is None:
self._current_colors = colors
else:
# save previous colors to cache
self._cached_colors = self._current_colors
self._current_colors = self._compute_over(self._cached_colors, colors)
# apply the texture
self._apply()
def remove_overlay(self, names):
to_update = False
if not isinstance(names, list):
names = [names]
for name in names:
if name in self._overlays:
del self._overlays[name]
to_update = True
if to_update:
self.update()
def _apply(self):
if self._current_colors is None or self._renderer is None:
return
self._renderer._set_mesh_scalars(
mesh=self._polydata,
scalars=self._current_colors,
name=self._default_scalars_name,
)
def update(self, colors=None):
if colors is not None and self._cached_colors is not None:
self._current_colors = self._compute_over(self._cached_colors, colors)
else:
self._current_colors, self._cached_colors = self._compose_overlays()
self._apply()
def _clean(self):
mapper = self._actor.GetMapper()
mapper.SetLookupTable(None)
self._actor.SetMapper(None)
self._actor = None
self._polydata = None
self._renderer = None
def update_overlay(self, name, scalars=None, colormap=None, opacity=None, rng=None):
overlay = self._overlays.get(name, None)
if overlay is None:
return
if scalars is not None:
overlay._scalars = scalars
if colormap is not None:
overlay._colormap = colormap
if opacity is not None:
overlay._opacity = opacity
if rng is not None:
overlay._rng = rng
# partial update: use cache if possible
if name == list(self._overlays.keys())[-1]:
self.update(colors=overlay.to_colors())
else: # full update
self.update() |
224 | test hard clipping log not expected | # Copyright (c) 2017 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Unit tests for the input mixer module.
"""
import logging
import os
import shutil
import sys
import tempfile
import unittest
SRC = os.path.abspath(os.path.join(
os.path.dirname((__file__)), os.pardir, os.pardir, os.pardir, os.pardir))
sys.path.append(os.path.join(SRC, 'third_party', 'pymock'))
import mock
from . import exceptions
from . import input_mixer
from . import signal_processing
class TestApmInputMixer(unittest.TestCase):
"""Unit tests for the ApmInputMixer class.
"""
# Audio track file names created in setUp().
_FILENAMES = ['capture', 'echo_1', 'echo_2', 'shorter', 'longer']
# Target peak power level (dBFS) of each audio track file created in setUp().
# These values are hand-crafted in order to make saturation happen when
# capture and echo_2 are mixed and the contrary for capture and echo_1.
# None means that the power is not changed.
_MAX_PEAK_POWER_LEVELS = [-10.0, -5.0, 0.0, None, None]
# Audio track file durations in milliseconds.
_DURATIONS = [1000, 1000, 1000, 800, 1200]
_SAMPLE_RATE = 48000
def setUp(self):
"""Creates temporary data."""
self._tmp_path = tempfile.mkdtemp()
# Create audio track files.
self._audio_tracks = {}
for filename, peak_power, duration in zip(
self._FILENAMES, self._MAX_PEAK_POWER_LEVELS, self._DURATIONS):
audio_track_filepath = os.path.join(self._tmp_path, '{}.wav'.format(
filename))
# Create a pure tone with the target peak power level.
template = signal_processing.SignalProcessingUtils.GenerateSilence(
duration=duration, sample_rate=self._SAMPLE_RATE)
signal = signal_processing.SignalProcessingUtils.GeneratePureTone(
template)
if peak_power is not None:
signal = signal.apply_gain(-signal.max_dBFS + peak_power)
signal_processing.SignalProcessingUtils.SaveWav(
audio_track_filepath, signal)
self._audio_tracks[filename] = {
'filepath': audio_track_filepath,
'num_samples': signal_processing.SignalProcessingUtils.CountSamples(
signal)
}
def tearDown(self):
"""Recursively deletes temporary folders."""
shutil.rmtree(self._tmp_path)
def testCheckMixSameDuration(self):
"""Checks the duration when mixing capture and echo with same duration."""
mix_filepath = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_1']['filepath'])
self.assertTrue(os.path.exists(mix_filepath))
mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath)
self.assertEqual(self._audio_tracks['capture']['num_samples'],
signal_processing.SignalProcessingUtils.CountSamples(mix))
def testRejectShorterEcho(self):
"""Rejects echo signals that are shorter than the capture signal."""
try:
_ = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._audio_tracks['shorter']['filepath'])
self.fail('no exception raised')
except exceptions.InputMixerException:
pass
def testCheckMixDurationWithLongerEcho(self):
"""Checks the duration when mixing an echo longer than the capture."""
mix_filepath = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._audio_tracks['longer']['filepath'])
self.assertTrue(os.path.exists(mix_filepath))
mix = signal_processing.SignalProcessingUtils.LoadWav(mix_filepath)
self.assertEqual(self._audio_tracks['capture']['num_samples'],
signal_processing.SignalProcessingUtils.CountSamples(mix))
def testCheckOutputFileNamesConflict(self):
"""Checks that different echo files lead to different output file names."""
mix1_filepath = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_1']['filepath'])
self.assertTrue(os.path.exists(mix1_filepath))
mix2_filepath = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_2']['filepath'])
self.assertTrue(os.path.exists(mix2_filepath))
self.assertNotEqual(mix1_filepath, mix2_filepath)
def testHardClippingLogExpected(self):
"""Checks that hard clipping warning is raised when occurring."""
logging.warning = mock.MagicMock(name='warning')
_ = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_2']['filepath'])
logging.warning.assert_called_once_with(
input_mixer.ApmInputMixer.HardClippingLogMessage())
def METHOD_NAME(self):
"""Checks that hard clipping warning is not raised when not occurring."""
logging.warning = mock.MagicMock(name='warning')
_ = input_mixer.ApmInputMixer.Mix(
self._tmp_path,
self._audio_tracks['capture']['filepath'],
self._audio_tracks['echo_1']['filepath'])
self.assertNotIn(
mock.call(input_mixer.ApmInputMixer.HardClippingLogMessage()),
logging.warning.call_args_list) |
225 | test print | # Copyright HeteroCL authors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import heterocl as hcl
import __test_codegen_harness as harness
import pytest
target = "vhls"
def test_dtype():
harness.test_dtype(
target,
[
"ap_int<3>",
"ap_uint<3>",
"int8_t", # ap_int<8>
"ap_fixed<5, 2>",
"ap_ufixed<5, 2>",
"ap_fixed<7, 3>",
],
)
@pytest.mark.skip(reason="print op to be supported")
def METHOD_NAME():
harness.METHOD_NAME(target)
def test_pragma():
harness.test_pragma(
target,
[
"#pragma HLS unroll factor=4",
"#pragma HLS pipeline II=2",
r"#pragma HLS array_partition variable=v\d* block dim=2 factor=2",
],
)
def test_set_bit():
harness.test_set_bit(target, "[4] = 1")
def test_set_slice():
harness.test_set_slice(target, "(4, 1) = 1")
def test_pack():
def pack(A):
return hcl.pack(A, factor=5)
A = hcl.placeholder((40,), "A", dtype=hcl.UInt(3))
s = hcl.create_schedule([A], pack)
code = hcl.build(s, target="vhls")
slice_range = "< 5"
assert slice_range in code
def test_index_split():
hcl.init()
A = hcl.placeholder((10, 10), "A")
B = hcl.compute(A.shape, lambda y, x: A[y][x], "B")
s = hcl.create_schedule([A])
s[B].split(B.axis[0], 5)
code = hcl.build(s, target="vhls")
assert "(y_outer * 5)" in code
assert "y_inner +" in code
def test_index_split_reshape():
hcl.init()
A = hcl.placeholder((10, 10), "A")
B = hcl.compute(A.shape, lambda y, x: A[y][x], "B")
s = hcl.create_schedule([A])
s[B].split(B.axis[0], 5)
s.reshape(B, (2, 5, 10))
code = hcl.build(s, target="vhls")
assert "[2][5][10]" in code
def test_index_fuse():
hcl.init()
A = hcl.placeholder((10, 10), "A")
B = hcl.compute(A.shape, lambda y, x: A[y][x], "B")
s = hcl.create_schedule([A])
s[B].fuse(B.axis[0], B.axis[1])
code = hcl.build(s, target="vhls")
assert "(y_x_fused % 10)" in code
assert "(y_x_fused / 10)" in code
def test_binary_conv():
hcl.init()
A = hcl.placeholder((1, 32, 14, 14), dtype=hcl.UInt(1), name="A")
B = hcl.placeholder((64, 32, 3, 3), dtype=hcl.UInt(1), name="B")
rc = hcl.reduce_axis(0, 32)
ry = hcl.reduce_axis(0, 3)
rx = hcl.reduce_axis(0, 3)
C = hcl.compute(
(1, 64, 12, 12),
lambda nn, ff, yy, xx: hcl.sum(
A[nn, rc, yy + ry, xx + rx] * B[ff, rc, ry, rx],
axis=[rc, ry, rx],
dtype=hcl.UInt(8),
),
dtype=hcl.UInt(8),
name="C",
)
s = hcl.create_schedule([A, B])
s[C].split(C.axis[1], factor=5)
code = hcl.build(s, target="vhls")
assert "for (int ff_outer = 0; ff_outer < 13; ff_outer++)" in code
assert (
"for (int ff_inner = 0; ff_inner < min(5, ((ff_outer * -5) + 64)); ff_inner++)"
in code
)
def test_legacy_interface():
hcl.init()
A = hcl.placeholder((10, 10), "A")
B = hcl.compute(A.shape, lambda y, x: A[y][x], "B")
s = hcl.create_schedule([A, B])
s[B].fuse(B.axis[0], B.axis[1])
code = hcl.build(s, target="vhls")
assert "v0[10][10]" in code
assert "v1[10][10]" in code
@pytest.mark.skip(reason="assertion error in type casting")
def test_select_type_cast():
def test_imm_ops():
A = hcl.placeholder((10, 10), "A")
def kernel(A):
return hcl.compute(
(8, 8),
lambda y, x: hcl.select(x < 4, A[y][x] + A[y + 2][x + 2], 0),
"B",
)
s = hcl.create_scheme(A, kernel)
s = hcl.create_schedule_from_scheme(s)
code = hcl.build(s, target="vhls")
assert "((-x) + 3) >= 0" in code
def test_uint_imm_ops():
A = hcl.placeholder((10, 10), "A", dtype=hcl.UInt(1))
def kernel(A):
return hcl.compute((8, 8), lambda y, x: hcl.select(x < 4, A[y][x], 0), "B")
s = hcl.create_scheme(A, kernel)
s = hcl.create_schedule_from_scheme(s)
code = hcl.build(s, target="vhls")
assert "(unsigned int)0U)" in code
def test_binary_ops():
A = hcl.placeholder((8, 8), "A", dtype=hcl.Int(20))
B = hcl.placeholder((8, 8), "B", dtype=hcl.Fixed(16, 12))
def kernel(A, B):
return hcl.compute(
(8, 8),
lambda y, x: hcl.select(x < 4, A[y][x], B[y][x]),
"C",
dtype=hcl.Int(8),
)
s = hcl.create_scheme([A, B], kernel)
s = hcl.create_schedule_from_scheme(s)
code = hcl.build(s, target="vhls")
assert "(ap_fixed<32, 20>)B" in code
def test_uint_int():
A = hcl.placeholder((8, 8), "A", dtype=hcl.Fixed(20, 12))
B = hcl.placeholder((8, 8), "B", dtype=hcl.UFixed(16, 12))
def kernel(A, B):
return hcl.compute(
(8, 8),
lambda y, x: hcl.select(x < 4, A[y][x], B[y][x]),
"C",
dtype=hcl.Int(8),
)
s = hcl.create_scheme([A, B], kernel)
s = hcl.create_schedule_from_scheme(s)
code = hcl.build(s, target="vhls")
assert "ap_ufixed<20, 8>)A" in code
test_imm_ops()
test_binary_ops()
test_uint_int()
test_uint_imm_ops() |
226 | main | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Return status of active recommendations on projects.
python get_projects_active_recommendation.py \
--organization="organizations/[YOUR-ORGANIZATION-ID]" \
--service_account_file_path="[FILE-PATH-TO-SERVICE-ACCOUNT]" \
--to_csv="[FILE-PATH-TO-STORE-THE-DATA]"
"""
import argparse
import collections
import logging
import common
from googleapiclient.discovery import build
import prettytable
from google.cloud import asset_v1
from google.oauth2 import service_account
# scopes for the credentials.
SCOPES = ["https://www.googleapis.com/auth/cloud-platform"]
RECOMMENDATION_TYPE = "google.iam.policy.Recommender"
# The rate-limit decides the maximum number of request that you can send in a
# time-window. This rate-limit could help with not exhausting the resource
# quota.
# RATE_LIMIT = (Number of request, duration (in seconds))
RATE_LIMIT = (6000, 60)
def get_all_projects_using_asset_manager(organization, credentials):
"""Returns project ids using asset manager apis.
Args:
organization: (str) organization/[ORGANIZATION_ID]
credentials: client credentials
"""
project_prefix = "//cloudresourcemanager.googleapis.com/projects/"
client_v1 = asset_v1.AssetServiceClient(credentials=credentials)
all_projects = client_v1.search_all_resources(
scope=organization,
asset_types=["cloudresourcemanager.googleapis.com/Project"])
return [p.name[len(project_prefix):] for p in all_projects]
def accounts_can_made_safe(project_id, state, recommendations):
"""Compute the hero metrics number of accounts that can be made safe."""
principal_types = ["serviceAccount", "user", "group"]
columns = [
"Number of recommendations on {}".format(principal_type)
for principal_type in principal_types
]
safe_accounts = collections.defaultdict(set)
for recommendation in recommendations:
if recommendation.state != state:
continue
safe_accounts[recommendation.principal_type].add(
recommendation.principal)
return {
"project_id": project_id,
"stats": {
column: len(safe_accounts[principal_type])
for column, principal_type in zip(columns, principal_types)
}
}
def get_recommendation_summary_of_projects(project_ids, state, credentials):
"""Returns the summary of recommendations on all the given projects.
Args:
project_ids: List(str) project to which recommendation is needed.
state: state of recommendations
credentials: client credentials.
"""
recommender = build("recommender",
"v1",
credentials=credentials,
cache_discovery=False)
def get_metric(project_id):
recommendation_metric = common.get_recommendations(
project_id,
recommender=recommender,
state=state,
credentials=credentials)
return accounts_can_made_safe(project_id, state, recommendation_metric)
recommendation_stats = common.rate_limit_execution(get_metric, RATE_LIMIT,
project_ids)
recommendation_stats_sorted = sorted(
recommendation_stats, key=lambda metric: -sum(metric["stats"].values()))
return recommendation_stats_sorted
def to_print(metrics):
"""Print the recommendation data to console.
Args:
metrics: Recommendation data
"""
fields = [
"Metric Description", "Resource", "Service Accounts", "Users", "Groups",
"Total"
]
table = prettytable.PrettyTable(fields)
metric_name = "Number of active IAM recommendations"
for metric in metrics:
project_id = "projects/" + metric["project_id"]
stats = list(metric["stats"].values())
combine_stats = sum(stats)
table.add_row([metric_name, project_id, *stats, combine_stats])
print(table)
def to_csv(metrics, output_file):
"""Save the recommendation data into a csv.
Args:
metrics: Recommendation data
output_file: Location of output file
"""
fields = [
"Metric Description", "Resource", "Service Accounts", "Users", "Groups",
"Total"
]
columns = ",".join(fields) + "\n"
with open(output_file, "w") as f:
f.write(columns)
metric_name = "Number of active recommendation"
for metric in metrics:
project_id = "projects/" + metric["project_id"]
stats = list(metric["stats"].values())
combine_stats = sum(stats)
row = ",".join(
[metric_name, project_id, *map(str, stats),
str(combine_stats)]) + "\n"
f.write(row)
def METHOD_NAME():
parser = argparse.ArgumentParser(
description=
"Find recommendation status of projects from your organization.")
parser.add_argument(
"--organization",
required=True,
type=str,
help=
"Enter the organization id in the format organizations/[ORGANIZATION_ID]."
)
parser.add_argument(
"--service_account_file_path",
required=True,
type=str,
help="Enter the location of service account key for the resources.")
parser.add_argument(
"--to_csv",
type=str,
nargs="?",
default="",
help="Enter the csv file name to store the recommendation data.")
parser.add_argument("--recommendation_state",
type=str,
nargs="?",
default="ACTIVE",
help="Enter the state of recommendation.")
parser.add_argument("--log",
type=str,
nargs="?",
default="INFO",
help="Enter the log level.")
args = parser.parse_args()
logging.basicConfig(format="%(levelname)s[%(asctime)s]:%(message)s",
level="INFO")
credentials = service_account.Credentials.from_service_account_file(
args.service_account_file_path, scopes=SCOPES)
projects = get_all_projects_using_asset_manager(args.organization,
credentials)
recommendation_data = get_recommendation_summary_of_projects(
projects, args.recommendation_state, credentials)
if not args.to_csv:
to_print(recommendation_data)
else:
to_csv(recommendation_data, args.to_csv)
logging.info("The security status of your organization has been exported to %s.",
args.to_csv)
if __name__ == "__main__":
METHOD_NAME() |
227 | css classes | from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.forms.fields import EmailField
from django.forms.forms import DeclarativeFieldsMetaclass
from django.utils.safestring import mark_safe
from django_file_form.forms import FileFormMixin
from wagtail.contrib.forms.forms import BaseForm
from hypha.apply.users.utils import get_user_by_email, is_user_already_registered
class MixedFieldMetaclass(DeclarativeFieldsMetaclass):
"""Stores all fields passed to the class and not just the field type.
This allows the form to be rendered when Field-like blocks are passed
in as part of the definition
"""
def __new__(mcs, name, bases, attrs):
display = attrs.copy()
new_class = super(MixedFieldMetaclass, mcs).__new__(mcs, name, bases, attrs)
new_class.display = display
return new_class
class StreamBaseForm(FileFormMixin, forms.Form, metaclass=MixedFieldMetaclass):
def swap_fields_for_display(func):
def wrapped(self, *args, **kwargs):
# Replaces the form fields with the display fields
# should only add new streamblocks and wont affect validation
fields = self.fields.copy()
self.fields = self.display
yield from func(self, *args, **kwargs)
self.fields = fields
return wrapped
@swap_fields_for_display
def __iter__(self):
yield from super().__iter__()
@swap_fields_for_display
def _html_output(self, *args, **kwargs):
return super()._html_output(*args, **kwargs)
def hidden_fields(self):
# No hidden fields are returned by default because of MixedFieldMetaclass
return [self[f] for f in self.fields.keys() if self[f].is_hidden]
def _update_files_data(self):
"""
Overridden method of django_file_form's FileFormMixin, to handle multiple forms on the same page.
"""
# handle two form_id, use case PAF and SOW
form_id = self.data.getlist(self.add_prefix("form_id"))
if not form_id:
return
form_id = form_id[0]
for field_name in self._file_form_field_names():
field = self.fields[field_name]
prefixed_field_name = self.add_prefix(field_name)
file_data = field.get_file_data(prefixed_field_name, form_id)
if file_data:
# NB: django-formtools wizard uses dict instead of MultiValueDict
if isinstance(file_data, list) and hasattr(self.files, "setlist"):
self.files.setlist(prefixed_field_name, file_data)
else:
self.files[prefixed_field_name] = file_data
def delete_temporary_files(self):
"""
Overridden method of django_file_form's FileFormMixin, to handle multiple forms on the same page.
"""
form_id = self.data.getlist(self.add_prefix("form_id"))
if not form_id:
return
form_id = form_id[0]
for field_name, field in self.fields.items():
if hasattr(field, "delete_file_data"):
prefixed_field_name = self.add_prefix(field_name)
field.delete_file_data(prefixed_field_name, form_id)
class PageStreamBaseForm(BaseForm, StreamBaseForm):
""" Adds page and user reference to the form class"""
def clean(self):
cleaned_data = super().clean()
for field, value in self.fields.items():
# email validation of submission form
if isinstance(value, EmailField):
email = self.data.get(field)
if email:
is_registered, _ = is_user_already_registered(email=self.data.get(field))
if is_registered:
user = get_user_by_email(email=email)
if not user:
self.add_error(field, 'Found multiple account')
raise ValidationError(mark_safe('Found multiple account for the same email. '
'Please login with the correct credentials or '
'<a href="mailto:{}">'
'contact to the support team'
'</a>.'.format(settings.ORG_EMAIL)))
elif not user.is_active:
self.add_error(field, 'Found an inactive account')
raise ValidationError(mark_safe('Found an inactive account for the same email. '
'Please use different email or '
'<a href="mailto:{}">'
'contact to the support team'
'</a>.'.format(settings.ORG_EMAIL)))
return cleaned_data
class BlockFieldWrapper:
"""Wraps stream blocks so that they can be rendered as a field within a form"""
is_hidden = False
label = None
help_text = None
def __init__(self, block):
self.block = block
def get_bound_field(self, *args, **kwargs):
return self
def METHOD_NAME(self):
return []
@property
def errors(self):
return []
@property
def html_name(self):
return self.block.id
def __str__(self):
return str(self.block.value) |
228 | snapshot | # This file is part of Scapy
# Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
# 2015, 2016, 2017 Maxence Tury
# This program is published under a GPLv2 license
"""
Block ciphers.
"""
from __future__ import absolute_import
from scapy.config import conf
from scapy.layers.tls.crypto.common import CipherError
import scapy.modules.six as six
if conf.crypto_valid:
from cryptography.utils import register_interface
from cryptography.hazmat.primitives.ciphers import (Cipher, algorithms, modes, # noqa: E501
BlockCipherAlgorithm,
CipherAlgorithm)
from cryptography.hazmat.backends.openssl.backend import (backend,
GetCipherByName)
_tls_block_cipher_algs = {}
class _BlockCipherMetaclass(type):
"""
Cipher classes are automatically registered through this metaclass.
Furthermore, their name attribute is extracted from their class name.
"""
def __new__(cls, ciph_name, bases, dct):
if ciph_name != "_BlockCipher":
dct["name"] = ciph_name[7:] # remove leading "Cipher_"
the_class = super(_BlockCipherMetaclass, cls).__new__(cls, ciph_name,
bases, dct)
if ciph_name != "_BlockCipher":
_tls_block_cipher_algs[ciph_name[7:]] = the_class
return the_class
class _BlockCipher(six.with_metaclass(_BlockCipherMetaclass, object)):
type = "block"
def __init__(self, key=None, iv=None):
self.ready = {"key": True, "iv": True}
if key is None:
self.ready["key"] = False
if hasattr(self, "expanded_key_len"):
key_len = self.expanded_key_len
else:
key_len = self.key_len
key = b"\0" * key_len
if not iv:
self.ready["iv"] = False
iv = b"\0" * self.block_size
# we use super() in order to avoid any deadlock with __setattr__
super(_BlockCipher, self).__setattr__("key", key)
super(_BlockCipher, self).__setattr__("iv", iv)
self._cipher = Cipher(self.pc_cls(key),
self.pc_cls_mode(iv),
backend=backend)
def __setattr__(self, name, val):
if name == "key":
if self._cipher is not None:
self._cipher.algorithm.key = val
self.ready["key"] = True
elif name == "iv":
if self._cipher is not None:
self._cipher.mode._initialization_vector = val
self.ready["iv"] = True
super(_BlockCipher, self).__setattr__(name, val)
def encrypt(self, data):
"""
Encrypt the data. Also, update the cipher iv. This is needed for SSLv3
and TLS 1.0. For TLS 1.1/1.2, it is overwritten in TLS.post_build().
"""
if False in six.itervalues(self.ready):
raise CipherError(data)
encryptor = self._cipher.encryptor()
tmp = encryptor.update(data) + encryptor.finalize()
self.iv = tmp[-self.block_size:]
return tmp
def decrypt(self, data):
"""
Decrypt the data. Also, update the cipher iv. This is needed for SSLv3
and TLS 1.0. For TLS 1.1/1.2, it is overwritten in TLS.pre_dissect().
If we lack the key, we raise a CipherError which contains the input.
"""
if False in six.itervalues(self.ready):
raise CipherError(data)
decryptor = self._cipher.decryptor()
tmp = decryptor.update(data) + decryptor.finalize()
self.iv = data[-self.block_size:]
return tmp
def METHOD_NAME(self):
c = self.__class__(self.key, self.iv)
c.ready = self.ready.copy()
return c
if conf.crypto_valid:
class Cipher_AES_128_CBC(_BlockCipher):
pc_cls = algorithms.AES
pc_cls_mode = modes.CBC
block_size = 16
key_len = 16
class Cipher_AES_256_CBC(Cipher_AES_128_CBC):
key_len = 32
class Cipher_CAMELLIA_128_CBC(_BlockCipher):
pc_cls = algorithms.Camellia
pc_cls_mode = modes.CBC
block_size = 16
key_len = 16
class Cipher_CAMELLIA_256_CBC(Cipher_CAMELLIA_128_CBC):
key_len = 32
# Mostly deprecated ciphers
if conf.crypto_valid:
class Cipher_DES_CBC(_BlockCipher):
pc_cls = algorithms.TripleDES
pc_cls_mode = modes.CBC
block_size = 8
key_len = 8
class Cipher_DES40_CBC(Cipher_DES_CBC):
"""
This is an export cipher example. The key length has been weakened to 5
random bytes (i.e. 5 bytes will be extracted from the master_secret).
Yet, we still need to know the original length which will actually be
fed into the encryption algorithm. This is what expanded_key_len
is for, and it gets used in PRF.postprocess_key_for_export().
We never define this attribute with non-export ciphers.
"""
expanded_key_len = 8
key_len = 5
class Cipher_3DES_EDE_CBC(_BlockCipher):
pc_cls = algorithms.TripleDES
pc_cls_mode = modes.CBC
block_size = 8
key_len = 24
class Cipher_IDEA_CBC(_BlockCipher):
pc_cls = algorithms.IDEA
pc_cls_mode = modes.CBC
block_size = 8
key_len = 16
class Cipher_SEED_CBC(_BlockCipher):
pc_cls = algorithms.SEED
pc_cls_mode = modes.CBC
block_size = 16
key_len = 16
_sslv2_block_cipher_algs = {}
if conf.crypto_valid:
_sslv2_block_cipher_algs.update({
"IDEA_128_CBC": Cipher_IDEA_CBC,
"DES_64_CBC": Cipher_DES_CBC,
"DES_192_EDE3_CBC": Cipher_3DES_EDE_CBC
})
# We need some black magic for RC2, which is not registered by default
# to the openssl backend of the cryptography library.
# If the current version of openssl does not support rc2, the RC2 ciphers are
# silently not declared, and the corresponding suites will have 'usable' False.
if conf.crypto_valid:
@register_interface(BlockCipherAlgorithm)
@register_interface(CipherAlgorithm)
class _ARC2(object):
name = "RC2"
block_size = 64
key_sizes = frozenset([128])
def __init__(self, key):
self.key = algorithms._verify_key_size(self, key)
@property
def key_size(self):
return len(self.key) * 8
_gcbn_format = "{cipher.name}-{mode.name}"
if GetCipherByName(_gcbn_format)(backend, _ARC2, modes.CBC) != \
backend._ffi.NULL:
class Cipher_RC2_CBC(_BlockCipher):
pc_cls = _ARC2
pc_cls_mode = modes.CBC
block_size = 8
key_len = 16
class Cipher_RC2_CBC_40(Cipher_RC2_CBC):
expanded_key_len = 16
key_len = 5
backend.register_cipher_adapter(Cipher_RC2_CBC.pc_cls,
Cipher_RC2_CBC.pc_cls_mode,
GetCipherByName(_gcbn_format))
_sslv2_block_cipher_algs["RC2_128_CBC"] = Cipher_RC2_CBC
_tls_block_cipher_algs.update(_sslv2_block_cipher_algs) |
229 | get total subsampling factor | """ASR Interface module."""
import argparse
from espnet.bin.asr_train import get_parser
from espnet.utils.dynamic_import import dynamic_import
from espnet.utils.fill_missing_args import fill_missing_args
class ASRInterface:
"""ASR Interface for ESPnet model implementation."""
@staticmethod
def add_arguments(parser):
"""Add arguments to parser."""
return parser
@classmethod
def build(cls, idim: int, odim: int, **kwargs):
"""Initialize this class with python-level args.
Args:
idim (int): The number of an input feature dim.
odim (int): The number of output vocab.
Returns:
ASRinterface: A new instance of ASRInterface.
"""
def wrap(parser):
return get_parser(parser, required=False)
args = argparse.Namespace(**kwargs)
args = fill_missing_args(args, wrap)
args = fill_missing_args(args, cls.add_arguments)
return cls(idim, odim, args)
def forward(self, xs, ilens, ys):
"""Compute loss for training.
:param xs:
For pytorch, batch of padded source sequences torch.Tensor (B, Tmax, idim)
For chainer, list of source sequences chainer.Variable
:param ilens: batch of lengths of source sequences (B)
For pytorch, torch.Tensor
For chainer, list of int
:param ys:
For pytorch, batch of padded source sequences torch.Tensor (B, Lmax)
For chainer, list of source sequences chainer.Variable
:return: loss value
:rtype: torch.Tensor for pytorch, chainer.Variable for chainer
"""
raise NotImplementedError("forward method is not implemented")
def recognize(self, x, recog_args, char_list=None, rnnlm=None):
"""Recognize x for evaluation.
:param ndarray x: input acouctic feature (B, T, D) or (T, D)
:param namespace recog_args: argment namespace contraining options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
raise NotImplementedError("recognize method is not implemented")
def recognize_batch(self, x, recog_args, char_list=None, rnnlm=None):
"""Beam search implementation for batch.
:param torch.Tensor x: encoder hidden state sequences (B, Tmax, Henc)
:param namespace recog_args: argument namespace containing options
:param list char_list: list of characters
:param torch.nn.Module rnnlm: language model module
:return: N-best decoding results
:rtype: list
"""
raise NotImplementedError("Batch decoding is not supported yet.")
def calculate_all_attentions(self, xs, ilens, ys):
"""Calculate attention.
:param list xs: list of padded input sequences [(T1, idim), (T2, idim), ...]
:param ndarray ilens: batch of lengths of input sequences (B)
:param list ys: list of character id sequence tensor [(L1), (L2), (L3), ...]
:return: attention weights (B, Lmax, Tmax)
:rtype: float ndarray
"""
raise NotImplementedError("calculate_all_attentions method is not implemented")
def calculate_all_ctc_probs(self, xs, ilens, ys):
"""Calculate CTC probability.
:param list xs_pad: list of padded input sequences [(T1, idim), (T2, idim), ...]
:param ndarray ilens: batch of lengths of input sequences (B)
:param list ys: list of character id sequence tensor [(L1), (L2), (L3), ...]
:return: CTC probabilities (B, Tmax, vocab)
:rtype: float ndarray
"""
raise NotImplementedError("calculate_all_ctc_probs method is not implemented")
@property
def attention_plot_class(self):
"""Get attention plot class."""
from espnet.asr.asr_utils import PlotAttentionReport
return PlotAttentionReport
@property
def ctc_plot_class(self):
"""Get CTC plot class."""
from espnet.asr.asr_utils import PlotCTCReport
return PlotCTCReport
def METHOD_NAME(self):
"""Get total subsampling factor."""
raise NotImplementedError(
"get_total_subsampling_factor method is not implemented"
)
def encode(self, feat):
"""Encode feature in `beam_search` (optional).
Args:
x (numpy.ndarray): input feature (T, D)
Returns:
torch.Tensor for pytorch, chainer.Variable for chainer:
encoded feature (T, D)
"""
raise NotImplementedError("encode method is not implemented")
def scorers(self):
"""Get scorers for `beam_search` (optional).
Returns:
dict[str, ScorerInterface]: dict of `ScorerInterface` objects
"""
raise NotImplementedError("decoders method is not implemented")
predefined_asr = {
"pytorch": {
"rnn": "espnet.nets.pytorch_backend.e2e_asr:E2E",
"transducer": "espnet.nets.pytorch_backend.e2e_asr_transducer:E2E",
"transformer": "espnet.nets.pytorch_backend.e2e_asr_transformer:E2E",
"conformer": "espnet.nets.pytorch_backend.e2e_asr_conformer:E2E",
},
"chainer": {
"rnn": "espnet.nets.chainer_backend.e2e_asr:E2E",
"transformer": "espnet.nets.chainer_backend.e2e_asr_transformer:E2E",
},
}
def dynamic_import_asr(module, backend):
"""Import ASR models dynamically.
Args:
module (str): module_name:class_name or alias in `predefined_asr`
backend (str): NN backend. e.g., pytorch, chainer
Returns:
type: ASR class
"""
model_class = dynamic_import(module, predefined_asr.get(backend, dict()))
assert issubclass(
model_class, ASRInterface
), f"{module} does not implement ASRInterface"
return model_class |
230 | get device xml | """PyOhNet: control point device support
"""
import PyOhNet
import ctypes
import re
import sys
import types
import xml.etree.ElementTree as ET
import _Cp._GenProxy as GenProxy
try:
# python 2.x
from urlparse import urljoin
from urllib2 import urlopen
except:
# python 3.x
from urllib.parse import urljoin
from urllib.request import urlopen
class Device():
"""UPnP Device (from perspective of a control point)"""
def __init__( self, aHandle ):
self.lib = PyOhNet.lib
self.handle = ctypes.c_void_p( aHandle )
self.lib.CpDeviceCAddRef( self.handle )
self.proxies = []
PyOhNet.devices.append( self )
def __str__( self ):
msg = 'Device %s (%s)' % (self.friendlyName, self.udn)
for proxy in self.proxies:
msg += '\n'
msg += eval( 'self.%s.__str__()' % proxy )
return msg
#
# ==== Internal methods ====
#
def _GetAttribute( self, aAttr ):
result = ''
request = ctypes.c_char_p( aAttr.encode( 'utf8', 'replace' ))
response = ctypes.c_char_p()
self.lib.CpDeviceCGetAttribute( self.handle, request, ctypes.byref( response ))
if response.value:
result = response.value.decode( 'utf8', 'replace' )
return result
def _GetServices( self ):
"""Returns list of services reported by device"""
result = []
baseUrl = re.match('^(.+)(//)([\w\.:]+)', self.location ).group()
xml = re.sub( ' xmlns="[^"]+"', '', self.deviceXml ) # remove namespace
root = ET.fromstring( xml )
devices = root.findall( 'device' )
for device in devices:
serviceList = device.find( 'serviceList' )
services = serviceList.findall( 'service' )
for service in services:
svType = service.find( 'serviceType' ).text
url = urljoin( baseUrl, service.find( 'SCPDURL' ).text )
m = re.match('urn:([\w\-\.]+):service:([\w\-\.]+):(\d+)', svType )
domain, name, version = m.groups()
domainName = ''
fields = domain.replace( '.', '-' ).split( '-' )
for field in fields:
domainName += field[0].upper()
domainName += field[1:]
result.append( {'type': svType, 'url': url, 'domain': domainName, 'name': name, 'version': int( version )} )
return result
# noinspection PyUnusedLocal
def _AddProxy( self, aService ):
"""Generate and add proxy for specified service"""
# The proxy code is auto-generated (from the service XML) and then
# imported and added as a class attribute to the device. Named as
# 'DomainService'. All ohNet proxy actions and properties are
# accessible via these attributes
#
# TestBasic service -> device.testBasic
# AVTransport service -> device.aVTRansport
attrName = aService['domain'] + aService['name']
proxyName = 'CpProxy%s%s%s' % \
(aService['domain'], aService['name'][0].upper() + aService['name'][1:], aService['version'])
# generate the proxy from the service XML
serviceXml = urlopen( aService['url'] ).read() # <<<<<<<<<==================================== This is failure point in discovery
# we get list of services back from ohnet, but fail to open URL for FIRST service
# so cannot create proxy (and everything eventually times out). No idea why - most
# likely product doesn't respond for whatever reason. Pissing around with delays
# doesn't seem to help much (adding prints did a bit, so some delays somewhere will
# probably reduce this)
proxy = GenProxy.GenProxy( aService['type'], serviceXml )
# 'import' the generated proxies
# we always rebuild the module with the most recently read service
# definition, as service implementation details can vary between
# different implementations of the same same services. More efficient
# to only build module on first request and re-use it, but this DOES
# give issues in case described above
if proxyName in sys.modules.keys():
proxyModule = sys.modules[proxyName]
else:
proxyModule = types.ModuleType( proxyName )
sys.modules[proxyName] = proxyModule
exec( proxy.text, proxyModule.__dict__ )
# # FOR DEBUG - write generated proxies to, and import from file
# # NOTE - no mutex protection on file access, so do NOT use this
# # technique except when debugging is necessary
# import os
# head, tail = os.path.split( os.path.dirname( __file__ ))
# proxyDir = os.path.join( head, '.GeneratedProxies' )
# if not os.path.exists( proxyDir ):
# os.mkdir( proxyDir )
# if proxyDir not in sys.path:
# sys.path.append( proxyDir )
# proxyPath = os.path.join( proxyDir, proxyName + '.py' )
# proxy.Write( proxyPath )
# exec( 'import %s as proxyModule' % proxyName )
# add to list of available proxies
setattr( self, attrName, eval( 'proxyModule.%s( self )' % proxyName ))
self.proxies.append( attrName )
def _GetUdn( self ):
udn = ctypes.c_char_p()
length = ctypes.c_int()
self.lib.CpDeviceCGetUdn( self.handle, ctypes.byref( udn ), ctypes.byref( length ))
return udn.value.decode( 'utf8', 'replace' )
def _GetFriendlyName( self ):
return self._GetAttribute( 'Upnp.FriendlyName' )
def METHOD_NAME( self ):
return self._GetAttribute( 'Upnp.DeviceXml' )
def _GetLocation( self ):
return self._GetAttribute( 'Upnp.Location' )
#
# ==== Public interface ====
#
def Start( self, aProxies=None ):
"""Start device - add proxies for all or specified services on device"""
if not aProxies:
aProxies = ['all']
services = self._GetServices()
for service in services:
if service['name'] in aProxies or 'all' in aProxies:
self._AddProxy( service )
def Shutdown( self ):
if self.handle:
try:
PyOhNet.devices.remove( self )
except:
pass
self.lib.CpDeviceCRemoveRef( self.handle )
self.handle = None
friendlyName = property( _GetFriendlyName, None, None, '' )
udn = property( _GetUdn, None, None, '' )
deviceXml = property( METHOD_NAME, None, None, '' )
location = property( _GetLocation, None, None, '' ) |
231 | unbind control lora | import torch
from contextlib import contextmanager
from typing import Union, Tuple
_size_2_t = Union[int, Tuple[int, int]]
class LinearWithLoRA(torch.nn.Module):
def __init__(
self,
in_features: int,
out_features: int,
bias: bool = True,
device=None,
dtype=None) -> None:
super().__init__()
self.weight_module = None
self.up = None
self.down = None
self.bias = None
self.in_features = in_features
self.out_features = out_features
self.device = device
self.dtype = dtype
self.weight = None
def bind_lora(self, weight_module):
self.weight_module = [weight_module]
def unbind_lora(self):
if self.up is not None and self.down is not None: # SAI's model is weird and needs this
self.weight_module = None
def get_original_weight(self):
if self.weight_module is None:
return None
return self.weight_module[0].weight
def forward(self, x):
if self.weight is not None:
return torch.nn.functional.linear(x, self.weight.to(x),
self.bias.to(x) if self.bias is not None else None)
original_weight = self.get_original_weight()
if original_weight is None:
return None # A1111 needs first_time_calculation
if self.up is not None and self.down is not None:
weight = original_weight.to(x) + torch.mm(self.up, self.down).to(x)
else:
weight = original_weight.to(x)
return torch.nn.functional.linear(x, weight, self.bias.to(x) if self.bias is not None else None)
class Conv2dWithLoRA(torch.nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: Union[str, _size_2_t] = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = 'zeros',
device=None,
dtype=None
) -> None:
super().__init__()
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
self.weight_module = None
self.bias = None
self.up = None
self.down = None
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.padding_mode = padding_mode
self.device = device
self.dtype = dtype
self.weight = None
def bind_lora(self, weight_module):
self.weight_module = [weight_module]
def unbind_lora(self):
if self.up is not None and self.down is not None: # SAI's model is weird and needs this
self.weight_module = None
def get_original_weight(self):
if self.weight_module is None:
return None
return self.weight_module[0].weight
def forward(self, x):
if self.weight is not None:
return torch.nn.functional.conv2d(x, self.weight.to(x), self.bias.to(x) if self.bias is not None else None,
self.stride, self.padding, self.dilation, self.groups)
original_weight = self.get_original_weight()
if original_weight is None:
return None # A1111 needs first_time_calculation
if self.up is not None and self.down is not None:
weight = original_weight.to(x) + torch.mm(self.up.flatten(start_dim=1), self.down.flatten(start_dim=1)).reshape(original_weight.shape).to(x)
else:
weight = original_weight.to(x)
return torch.nn.functional.conv2d(x, weight, self.bias.to(x) if self.bias is not None else None,
self.stride, self.padding, self.dilation, self.groups)
@contextmanager
def controlnet_lora_hijack():
linear, conv2d = torch.nn.Linear, torch.nn.Conv2d
torch.nn.Linear, torch.nn.Conv2d = LinearWithLoRA, Conv2dWithLoRA
try:
yield
finally:
torch.nn.Linear, torch.nn.Conv2d = linear, conv2d
def recursive_set(obj, key, value):
if obj is None:
return
if '.' in key:
k1, k2 = key.split('.', 1)
recursive_set(getattr(obj, k1, None), k2, value)
else:
setattr(obj, key, value)
def force_load_state_dict(model, state_dict):
for k in list(state_dict.keys()):
recursive_set(model, k, torch.nn.Parameter(state_dict[k]))
del state_dict[k]
return
def recursive_bind_lora(obj, key, value):
if obj is None:
return
if '.' in key:
k1, k2 = key.split('.', 1)
recursive_bind_lora(getattr(obj, k1, None), k2, value)
else:
target = getattr(obj, key, None)
if target is not None and hasattr(target, 'bind_lora'):
target.bind_lora(value)
def recursive_get(obj, key):
if obj is None:
return
if '.' in key:
k1, k2 = key.split('.', 1)
return recursive_get(getattr(obj, k1, None), k2)
else:
return getattr(obj, key, None)
def bind_control_lora(base_model, control_lora_model):
sd = base_model.state_dict()
keys = list(sd.keys())
keys = list(set([k.rsplit('.', 1)[0] for k in keys]))
module_dict = {k: recursive_get(base_model, k) for k in keys}
for k, v in module_dict.items():
recursive_bind_lora(control_lora_model, k, v)
def torch_dfs(model: torch.nn.Module):
result = [model]
for child in model.children():
result += torch_dfs(child)
return result
def METHOD_NAME(control_lora_model):
for m in torch_dfs(control_lora_model):
if hasattr(m, 'unbind_lora'):
m.unbind_lora()
return |
232 | add | """
A convenience system to manage reactors
Beginning in the 2017.7 release, the reactor runner requires that the reactor
system is running. This is accomplished one of two ways, either
by having reactors configured or by including ``reactor`` in the
engine configuration for the Salt master.
.. code-block:: yaml
engines:
- reactor
"""
import logging
import salt.config
import salt.syspaths
import salt.utils.event
import salt.utils.master
import salt.utils.process
import salt.utils.reactor
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
__func_alias__ = {
"list_": "list",
}
def _reactor_system_available():
"""
Helper to see if the reactor system is available
"""
if __opts__.get("engines", {}):
if any([True for engine in __opts__["engines"] if "reactor" in engine]):
return True
elif __opts__.get("reactor", {}) and __opts__["reactor"]:
return True
return False
def list_(saltenv="base", test=None):
"""
List currently configured reactors
CLI Example:
.. code-block:: bash
salt-run reactor.list
"""
if not _reactor_system_available():
raise CommandExecutionError("Reactor system is not running.")
with salt.utils.event.get_event(
"master",
__opts__["sock_dir"],
opts=__opts__,
listen=True,
) as sevent:
master_key = salt.utils.master.get_master_key("root", __opts__)
__jid_event__.fire_event({"key": master_key}, "salt/reactors/manage/list")
results = sevent.get_event(wait=30, tag="salt/reactors/manage/list-results")
reactors = results.get("reactors")
return reactors
def METHOD_NAME(event, reactors, saltenv="base", test=None):
"""
Add a new reactor
CLI Example:
.. code-block:: bash
salt-run reactor.add 'salt/cloud/*/destroyed' reactors='/srv/reactor/destroy/*.sls'
"""
if not _reactor_system_available():
raise CommandExecutionError("Reactor system is not running.")
if isinstance(reactors, str):
reactors = [reactors]
with salt.utils.event.get_event(
"master",
__opts__["sock_dir"],
opts=__opts__,
listen=True,
) as sevent:
master_key = salt.utils.master.get_master_key("root", __opts__)
__jid_event__.fire_event(
{"event": event, "reactors": reactors, "key": master_key},
"salt/reactors/manage/add",
)
res = sevent.get_event(wait=30, tag="salt/reactors/manage/add-complete")
return res.get("result")
def delete(event, saltenv="base", test=None):
"""
Delete a reactor
CLI Example:
.. code-block:: bash
salt-run reactor.delete 'salt/cloud/*/destroyed'
"""
if not _reactor_system_available():
raise CommandExecutionError("Reactor system is not running.")
with salt.utils.event.get_event(
"master",
__opts__["sock_dir"],
opts=__opts__,
listen=True,
) as sevent:
master_key = salt.utils.master.get_master_key("root", __opts__)
__jid_event__.fire_event(
{"event": event, "key": master_key}, "salt/reactors/manage/delete"
)
res = sevent.get_event(wait=30, tag="salt/reactors/manage/delete-complete")
return res.get("result")
def is_leader():
"""
Return whether the running reactor is acting as a leader (responding to events).
CLI Example:
.. code-block:: bash
salt-run reactor.is_leader
"""
if not _reactor_system_available():
raise CommandExecutionError("Reactor system is not running.")
with salt.utils.event.get_event(
"master",
__opts__["sock_dir"],
opts=__opts__,
listen=True,
) as sevent:
master_key = salt.utils.master.get_master_key("root", __opts__)
__jid_event__.fire_event({"key": master_key}, "salt/reactors/manage/is_leader")
res = sevent.get_event(wait=30, tag="salt/reactors/manage/leader/value")
return res["result"]
def set_leader(value=True):
"""
Set the current reactor to act as a leader (responding to events). Defaults to True
CLI Example:
.. code-block:: bash
salt-run reactor.set_leader True
"""
if not _reactor_system_available():
raise CommandExecutionError("Reactor system is not running.")
with salt.utils.event.get_event(
"master",
__opts__["sock_dir"],
opts=__opts__,
listen=True,
) as sevent:
master_key = salt.utils.master.get_master_key("root", __opts__)
__jid_event__.fire_event(
{"id": __opts__["id"], "value": value, "key": master_key},
"salt/reactors/manage/set_leader",
)
res = sevent.get_event(wait=30, tag="salt/reactors/manage/leader/value")
return res["result"] |
233 | pass retrieve condition | #!/usr/bin/env python
"""
_WorkQueueManagerPoller_
Pull work out of the work queue.
"""
import logging
import random
import threading
import time
from Utils.Timers import timeFunction
from WMComponent.JobSubmitter.JobSubmitAPI import availableScheddSlots
from WMCore.DAOFactory import DAOFactory
from WMCore.Services.PyCondor.PyCondorAPI import PyCondorAPI
from WMCore.Services.ReqMgrAux.ReqMgrAux import isDrainMode
from WMCore.WorkerThreads.BaseWorkerThread import BaseWorkerThread
class WorkQueueManagerWorkPoller(BaseWorkerThread):
"""
Polls for Work
"""
def __init__(self, queue, config):
"""
Initialise class members
"""
BaseWorkerThread.__init__(self)
myThread = threading.currentThread()
self.queue = queue
self.config = config
self.condorAPI = PyCondorAPI()
self.daoFactory = DAOFactory(package="WMCore.WMBS", logger=logging, dbinterface=myThread.dbi)
self.listSubsWithoutJobs = self.daoFactory(classname="Subscriptions.GetSubsWithoutJobGroup")
def setup(self, parameters):
"""
Called at startup - introduce random delay
to avoid workers all starting at once
"""
t = random.randrange(self.idleTime)
self.logger.info('Sleeping for %d seconds before 1st loop', t)
time.sleep(t)
@timeFunction
def algorithm(self, parameters):
"""
Pull in work
"""
self.logger.info("Starting WorkQueueManagerWorkPoller thread ...")
try:
self.pullWork()
except Exception as ex:
self.queue.logger.error("Error in work pull loop: %s", str(ex))
try:
# process if we get work or not - we may have to split old work
# i.e. if transient errors were seen during splitting
self.processWork()
except Exception as ex:
self.queue.logger.error("Error in new work split loop: %s", str(ex))
return
def METHOD_NAME(self):
"""
_passRetrieveCondition_
Return true if the component can proceed with fetching work.
False if the component should skip pulling work this cycle.
For now, it only checks whether the agent is in drain mode or
MAX_JOBS_PER_OWNER is reached or if the condor schedd is overloaded.
"""
passCond = "OK"
myThread = threading.currentThread()
if isDrainMode(self.config):
passCond = "agent is in drain mode"
elif availableScheddSlots(myThread.dbi) <= 0:
passCond = "schedd slot is maxed: MAX_JOBS_PER_OWNER"
elif self.condorAPI.isScheddOverloaded():
passCond = "schedd is overloaded"
else:
subscriptions = self.listSubsWithoutJobs.execute()
if subscriptions:
passCond = "JobCreator hasn't created jobs for subscriptions %s" % subscriptions
return passCond
def pullWork(self):
"""Get work from parent"""
self.queue.logger.info("Pulling work from %s", self.queue.parent_queue.queueUrl)
myThread = threading.currentThread()
try:
cond = self.METHOD_NAME()
if cond == "OK":
work = self.queue.pullWork()
self.queue.logger.info("Obtained %s unit(s) of work", work)
myThread.logdbClient.delete("LocalWorkQueue_pullWork", "warning", this_thread=True)
else:
self.queue.logger.warning("No work will be pulled, reason: %s", cond)
myThread.logdbClient.post("LocalWorkQueue_pullWork", cond, "warning")
except IOError as ex:
self.queue.logger.exception("Error opening connection to work queue: %s", str(ex))
except Exception as ex:
self.queue.logger.exception("Unable to pull work from parent Error: %s", str(ex))
def processWork(self):
"""Process new work"""
self.queue.logger.info("Splitting new work")
try:
self.queue.processInboundWork()
except Exception as ex:
self.queue.logger.exception('Error during split: %s', str(ex))
self.logger.info('Splitting finished')
return |
234 | test yaml | # Copyright (c) Alibaba, Inc. and its affiliates.
import argparse
import copy
import tempfile
import unittest
import json
from modelscope.utils.config import Config, check_config
obj = {'a': 1, 'b': {'c': [1, 2, 3], 'd': 'dd'}}
class ConfigTest(unittest.TestCase):
def test_json(self):
config_file = 'configs/examples/configuration.json'
cfg = Config.from_file(config_file)
self.assertEqual(cfg.a, 1)
self.assertEqual(cfg.b, obj['b'])
def METHOD_NAME(self):
config_file = 'configs/examples/configuration.yaml'
cfg = Config.from_file(config_file)
self.assertEqual(cfg.a, 1)
self.assertEqual(cfg.b, obj['b'])
def test_py(self):
config_file = 'configs/examples/configuration.py'
cfg = Config.from_file(config_file)
self.assertEqual(cfg.a, 1)
self.assertEqual(cfg.b, obj['b'])
def test_dump(self):
config_file = 'configs/examples/configuration.py'
cfg = Config.from_file(config_file)
self.assertEqual(cfg.a, 1)
self.assertEqual(cfg.b, obj['b'])
pretty_text = 'a = 1\n'
pretty_text += "b = dict(c=[1, 2, 3], d='dd')\n"
json_str = '{"a": 1, "b": {"c": [1, 2, 3], "d": "dd"}}'
yaml_str = 'a: 1\nb:\n c:\n - 1\n - 2\n - 3\n d: dd\n'
with tempfile.NamedTemporaryFile(suffix='.json') as ofile:
self.assertEqual(pretty_text, cfg.dump())
cfg.dump(ofile.name)
with open(ofile.name, 'r') as infile:
self.assertDictEqual(
json.loads(json_str), json.loads(infile.read()))
with tempfile.NamedTemporaryFile(suffix='.yaml') as ofile:
cfg.dump(ofile.name)
with open(ofile.name, 'r') as infile:
self.assertEqual(yaml_str, infile.read())
def test_to_dict(self):
config_file = 'configs/examples/configuration.json'
cfg = Config.from_file(config_file)
d = cfg.to_dict()
print(d)
self.assertTrue(isinstance(d, dict))
def test_to_args(self):
def parse_fn(args):
parser = argparse.ArgumentParser(prog='PROG')
parser.add_argument('--model-dir', default='')
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--optimizer', default='')
parser.add_argument('--weight-decay', type=float, default=1e-7)
parser.add_argument(
'--save-checkpoint-epochs', type=int, default=30)
return parser.parse_args(args)
cfg = Config.from_file('configs/examples/plain_args.yaml')
args = cfg.to_args(parse_fn)
self.assertEqual(args.model_dir, 'path/to/model')
self.assertAlmostEqual(args.lr, 0.01)
self.assertAlmostEqual(args.weight_decay, 1e-6)
self.assertEqual(args.optimizer, 'Adam')
self.assertEqual(args.save_checkpoint_epochs, 20)
def test_check_config(self):
check_config('configs/cv/configuration.json')
check_config('configs/nlp/sbert_sentence_similarity.json')
def test_merge_from_dict(self):
base_cfg = copy.deepcopy(obj)
base_cfg.update({'dict_list': [dict(l1=1), dict(l2=2)]})
cfg = Config(base_cfg)
merge_dict = {
'a': 2,
'b.d': 'ee',
'b.c': [3, 3, 3],
'dict_list': {
'0': dict(l1=3)
},
'c': 'test'
}
cfg1 = copy.deepcopy(cfg)
cfg1.merge_from_dict(merge_dict)
self.assertDictEqual(
cfg1._cfg_dict, {
'a': 2,
'b': {
'c': [3, 3, 3],
'd': 'ee'
},
'dict_list': [dict(l1=3), dict(l2=2)],
'c': 'test'
})
cfg2 = copy.deepcopy(cfg)
cfg2.merge_from_dict(merge_dict, force=False)
self.assertDictEqual(
cfg2._cfg_dict, {
'a': 1,
'b': {
'c': [1, 2, 3],
'd': 'dd'
},
'dict_list': [dict(l1=1), dict(l2=2)],
'c': 'test'
})
def test_merge_from_dict_with_list(self):
base_cfg = {
'a':
1,
'b': {
'c': [1, 2, 3],
'd': 'dd'
},
'dict_list': [dict(type='l1', v=1),
dict(type='l2', v=2)],
'dict_list2': [
dict(
type='l1',
v=[dict(type='l1_1', v=1),
dict(type='l1_2', v=2)]),
dict(type='l2', v=2)
]
}
cfg = Config(base_cfg)
merge_dict_for_list = {
'a':
2,
'b.c': [3, 3, 3],
'b.d':
'ee',
'dict_list': [dict(type='l1', v=8),
dict(type='l3', v=8)],
'dict_list2': [
dict(
type='l1',
v=[
dict(type='l1_1', v=8),
dict(type='l1_2', v=2),
dict(type='l1_3', v=8),
]),
dict(type='l2', v=8)
],
'c':
'test'
}
cfg1 = copy.deepcopy(cfg)
cfg1.merge_from_dict(merge_dict_for_list, force=False)
self.assertDictEqual(
cfg1._cfg_dict, {
'a':
1,
'b': {
'c': [1, 2, 3],
'd': 'dd'
},
'dict_list': [
dict(type='l1', v=1),
dict(type='l2', v=2),
dict(type='l3', v=8)
],
'dict_list2': [
dict(
type='l1',
v=[
dict(type='l1_1', v=1),
dict(type='l1_2', v=2),
dict(type='l1_3', v=8),
]),
dict(type='l2', v=2)
],
'c':
'test'
})
cfg2 = copy.deepcopy(cfg)
cfg2.merge_from_dict(merge_dict_for_list, force=True)
self.assertDictEqual(
cfg2._cfg_dict, {
'a':
2,
'b': {
'c': [3, 3, 3],
'd': 'ee'
},
'dict_list': [
dict(type='l1', v=8),
dict(type='l2', v=2),
dict(type='l3', v=8)
],
'dict_list2': [
dict(
type='l1',
v=[
dict(type='l1_1', v=8),
dict(type='l1_2', v=2),
dict(type='l1_3', v=8),
]),
dict(type='l2', v=8)
],
'c':
'test'
})
if __name__ == '__main__':
unittest.main() |
235 | perform call | import logging
from ...errors import AngrCallableError, AngrCallableMultistateError
from ...calling_conventions import DEFAULT_CC
l = logging.getLogger(name=__name__)
# l.setLevel("DEBUG")
class IdentifierCallable:
"""
Callable is a representation of a function in the binary that can be
interacted with like a native python function.
If you set perform_merge=True (the default), the result will be returned to you, and
you can get the result state with callable.result_state.
Otherwise, you can get the resulting path group (immutable) at callable.result_path_group.
"""
def __init__(
self,
project,
addr,
concrete_only=False,
perform_merge=False,
base_state=None,
toc=None,
cc=None,
max_steps=None,
):
"""
:param project: The project to operate on
:param addr: The address of the function to use
The following parameters are optional:
:param concrete_only: Throw an exception if the execution splits into multiple paths
:param perform_merge: Merge all result states into one at the end (only relevant if concrete_only=False)
:param base_state: The state from which to do these runs
:param toc: The address of the table of contents for ppc64
:param cc: The SimCC to use for a calling convention
"""
self._project = project
self._addr = addr
self._concrete_only = concrete_only
self._perform_merge = perform_merge
self._base_state = base_state
self._toc = toc
self._caller = None
self._cc = cc if cc is not None else DEFAULT_CC[project.arch.name](project.arch)
self._deadend_addr = project.simos.return_deadend
self._max_steps = max_steps
self.result_path_group = None
self.result_state = None
def set_base_state(self, state):
"""
Swap out the state you'd like to use to perform the call
:param state: The state to use to perform the call
"""
self._base_state = state
def __call__(self, *args):
prototype = self._cc.guess_prototype(args)
self.METHOD_NAME(*args, prototype=prototype)
if self.result_state is not None:
loc = self._cc.return_val(prototype.returnty)
return self.result_state.solver.simplify(
loc.get_value(self.result_state, stack_base=self.result_state.regs.sp - self._cc.STACKARG_SP_DIFF)
)
return None
def get_base_state(self, *args):
prototype = self._cc.guess_prototype(args)
self._base_state.ip = self._addr
state = self._project.factory.call_state(
self._addr,
*args,
prototype=prototype,
cc=self._cc,
base_state=self._base_state,
ret_addr=self._deadend_addr,
toc=self._toc,
)
return state
def METHOD_NAME(self, *args, prototype=None):
if prototype is None:
prototype = self._cc.guess_prototype(args)
self._base_state.ip = self._addr
state = self._project.factory.call_state(
self._addr,
*args,
cc=self._cc,
prototype=prototype,
base_state=self._base_state,
ret_addr=self._deadend_addr,
toc=self._toc,
)
def step_func(pg):
pg2 = pg.prune()
if len(pg2.active) > 1:
raise AngrCallableMultistateError("Execution split on symbolic condition!")
return pg2
caller = self._project.factory.simulation_manager(state)
for _ in range(self._max_steps):
if len(caller.active) == 0:
break
if caller.active[0].history.block_count > 100000:
l.debug("super long path %s", caller.active[0])
raise AngrCallableError("Super long path")
caller.step(step_func=step_func if self._concrete_only else None)
if len(caller.active) > 0:
raise AngrCallableError("didn't make it to the end of the function")
caller.unstash(from_stash="deadended")
caller.prune(filter_func=lambda pt: pt.addr == self._deadend_addr)
if len(caller.active) == 0:
raise AngrCallableError("No paths returned from function")
self.result_path_group = caller.copy()
if self._perform_merge:
caller.merge()
self.result_state = caller.active[0] |
236 | insert into table | import datetime
import random
import string
from testflows.core import *
from clickhouse_backup.tests.common import random_datetime
@TestStep
def drop_table(self, node, table_name, database="default", sync=True):
"""Helper function, drops the ClickHouse table from the given node.
"""
with When(f"dropping table {table_name} from database {database}"):
node.query(f"DROP TABLE IF EXISTS {database}.{table_name}{' SYNC' if sync else ''}")
@TestStep(Given)
def create_table(self, node, table_name, columns, database="default", engine="MergeTree", order_by=None, sign=None,
version=None, config_section="graphite_rollup_params", settings=None):
"""Helper step, creates a ClickHouse table in the given node.
"""
with When(f"creating table {table_name} with {engine} engine in database {database}"):
schema = ""
for i, j in columns.items():
schema += f"{i} {j}, "
schema = schema[:-2]
if not order_by:
order_by = str(list(columns.keys())[0])
if not sign:
sign = str(list(columns.keys())[1])
if not version:
version = str(list(columns.keys())[2])
engine_params = ""
if "VersionedCollapsingMergeTree" in engine:
engine_params = f"{sign}, {version}"
elif "CollapsingMergeTree" in engine:
engine_params = f"{sign}"
elif "GraphiteMergeTree" in engine:
engine_params = f"'{config_section}'"
if "Replicated" in engine:
zoo_path = "/clickhouse/tables/{shard}" \
f"/{table_name}"
if engine_params == "":
engine_params = f"'{zoo_path}', '{{replica}}'"
else:
engine_params = f"'{zoo_path}', '{{replica}}', {engine_params}"
query = f"CREATE TABLE {database}.{table_name} ({schema}) " \
f"Engine = {engine}({engine_params}) ORDER BY {order_by}"
if settings:
query += f" SETTINGS {settings}"
with By("execute CREATE TABLE query"):
node.query(query)
@TestStep
def METHOD_NAME(self, node, table_name, values, database="default", columns="(*)"):
"""Helper function, inserts given data into a ClickHouse table in the given node.
"""
with When(f"inserting into table {table_name}"):
node.query(f"INSERT INTO {database}.{table_name} {columns} VALUES {values} ")
@TestStep
def populate_table(self, node, table_name, columns, database="default", size=10, native=True):
"""Helper function, inserts given data into a ClickHouse table in the given node.
"""
if not native:
with When(f"populate table {table_name} with random generated data"):
letters = string.ascii_lowercase + string.ascii_uppercase + string.ascii_letters + string.digits
values = []
for i in range(size):
portion = []
for col_name, col_type in columns.items():
if col_type == "String":
portion.append((''.join(random.choice(letters) for _ in range(10))))
elif "Int" in col_type:
portion.append(random.randint(1, 51) if col_name != "Sign" else random.choice((1, -1)))
elif col_type == "DateTime":
d1 = datetime.datetime(1980, 1, 1)
d2 = datetime.datetime(2030, 12, 31)
portion.append(str(random_datetime(d1, d2)))
values.append('(' + str(portion)[1:-1] + ')')
values = str(values)[1:-1].replace("\"(", "(").replace(")\"", ")").replace("\"", "'")
column_selector = str(list(columns.keys())).replace('\'', '')[1:-1]
METHOD_NAME(
node=node, table_name=table_name, database=database, values=values, columns=f"({column_selector})"
)
else:
random_schema = []
insert_columns = []
for i, j in columns.items():
if not (
"Map" in j or "LowCardinality" in j or "Nested" in j
or (type(j) == str and j.startswith("Aggregate"))
):
insert_columns.append(i)
if "'" in j:
j_mod = j.replace("'", "\\'")
random_schema.append(f"{i} {j_mod}")
else:
random_schema.append(f"{i} {j}")
str_random_schema = ", ".join(random_schema)
str_insert_columns = ", ".join(insert_columns)
node.query(f"INSERT INTO {database}.{table_name} ({str_insert_columns}) SELECT * FROM generateRandom('{str_random_schema}', NULL, 10, 2) LIMIT {size}")
@TestStep(Given)
def create_and_populate_table(self, node, table_name, database="default", columns=None, engine="MergeTree",
size=10, settings=None, native=True):
"""Helper function combining table creation and population.
"""
if not columns:
columns = self.context.columns
try:
create_table(
node=node, table_name=f"{table_name}", database=database, engine=engine, columns=columns, settings=settings
)
populate_table(
node=node, table_name=f"{table_name}", database=database, columns=columns, size=size, native=native
)
yield
finally:
with Finally("remove created table"):
drop_table(node=node, database=database, table_name=table_name)
@TestStep(Given)
def delete_any_old_topic_and_consumer_group(self, kafka_node, bootstrap_server, topic, consumer_group):
"""Delete any old topic and consumer group.
"""
with By("deleting topic"):
command = f"kafka-topics --bootstrap-server {bootstrap_server} --delete --topic {topic}"
kafka_node.cmd(command)
with By("deleting consumer group"):
command = f"kafka-consumer-groups --bootstrap-server {bootstrap_server} --delete --group {consumer_group}"
kafka_node.cmd(command)
@TestStep(Given)
def create_topic(self, kafka_node, bootstrap_server, topic, consumer_group, replication_factor, partitions):
"""Create Kafka topic.
"""
try:
command = (f"kafka-topics --create --bootstrap-server {bootstrap_server} "
f"--replication-factor {replication_factor} --partitions {partitions} --topic {topic}")
kafka_node.cmd(command, exitcode=0)
yield topic
finally:
with Finally("I cleanup Kafka topic and consumer group"):
command = f"kafka-topics --bootstrap-server {bootstrap_server} --delete --topic {topic}"
kafka_node.cmd(command)
command = f"kafka-consumer-groups --bootstrap-server {bootstrap_server} --delete --group {consumer_group}"
kafka_node.cmd(command) |
237 | test merge importance dfs base | import numpy as np
import pandas as pd
import pytest
from numpy.core.fromnumeric import sort
from autogluon.core.utils.feature_selection import *
from autogluon.core.utils.utils import unevaluated_fi_df_template
def evaluated_fi_df_template(features, importance=None, n=None):
rng = np.random.default_rng(0)
importance_df = pd.DataFrame({"name": features})
importance_df["importance"] = rng.standard_normal(len(features)) if importance is None else importance
importance_df["stddev"] = rng.standard_normal(len(features))
importance_df["p_value"] = None
importance_df["n"] = 5 if n is None else n
importance_df.set_index("name", inplace=True)
importance_df.index.name = None
return importance_df
@pytest.fixture
def sample_features():
return ["a", "b", "c", "d", "e"]
@pytest.fixture
def sample_importance_df_1(sample_features):
return evaluated_fi_df_template(sample_features, importance=[0.2, 0.2, None, 1.0, None], n=[10, 5, 0, 5, 0])
@pytest.fixture
def sample_importance_df_2(sample_features):
return evaluated_fi_df_template(sample_features, importance=[-0.1, -0.1, 0.1, None, None], n=[5, 10, 10, 0, 0])
def test_add_noise_column_df():
# test noise columns are appended to input dataframe and feature_metadata
X = pd.DataFrame({"a": [1, 2]})
args = {"rng": np.random.default_rng(0), "count": 2}
X_noised, noise_columns = add_noise_column(X, **args)
expected_features = X.columns.tolist() + noise_columns
assert expected_features == X_noised.columns.tolist()
def METHOD_NAME(sample_features):
# test the scenario when previous feature importance df is none
prev_df, curr_df = None, unevaluated_fi_df_template(sample_features)
assert merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set()) is curr_df
def test_merge_importance_dfs_same_model(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from the same fitted model
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=set())
assert [score if score == score else None for score in result_df["importance"].tolist()] == [0.0, 0.1, 0.1, 1.0, None]
assert result_df["n"].tolist() == [15, 15, 10, 5, 0]
def test_merge_importance_dfs_different_model(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from a different fitted model
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
using_prev_fit_fi = set(sample_features)
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index()
assert len(using_prev_fit_fi) == 2
assert [score if score == score else None for score in result_df["importance"].tolist()] == [-0.1, -0.1, 0.1, 1.0, None]
assert result_df["n"].tolist() == [5, 10, 10, 5, 0]
def test_merge_importance_dfs_all(sample_features, sample_importance_df_1, sample_importance_df_2):
# test the scenario where previous feature importance df exists and its importance estimates come from both same and different fitted models
prev_df, curr_df = sample_importance_df_1, sample_importance_df_2
using_prev_fit_fi = set([sample_features[0]])
result_df = merge_importance_dfs(prev_df, curr_df, using_prev_fit_fi=using_prev_fit_fi).sort_index()
assert [score if score == score else None for score in result_df["importance"].tolist()] == [-0.1, 0.0, 0.1, 1.0, None]
assert result_df["n"].tolist() == [5, 15, 10, 5, 0]
assert using_prev_fit_fi == set()
def test_sort_features_by_priority_base(sample_features):
# test the ordering of feature importance computation when no prior feature importance computation was done
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=None, using_prev_fit_fi=set())
assert sorted_features == sample_features
def test_sort_features_by_priority_same_model(sample_features):
# test the ordering of feature importance computation when prior feature importance computation from the same fitted model was done
prev_importance_df = evaluated_fi_df_template(sample_features)
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=set())
assert sorted_features == prev_importance_df.sort_values("importance").index.tolist()
def test_sort_features_by_priority_different_model(sample_features):
# test the ordering of feature importance computation when prior feature importance computation from a different fitted model was done
prev_importance_df = evaluated_fi_df_template(sample_features)
using_prev_fit_fi = sample_features[-2:]
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi)
sorted_prev_fit_features = prev_importance_df[prev_importance_df.index.isin(using_prev_fit_fi)].sort_values("importance").index.tolist()
sorted_curr_fit_features = prev_importance_df[~prev_importance_df.index.isin(using_prev_fit_fi)].sort_values("importance").index.tolist()
expected_features = sorted_prev_fit_features + sorted_curr_fit_features
assert sorted_features == expected_features
def test_sort_features_by_priority_all(sample_features):
# test the ordering of feature importance computation when feature impotance computation comes from mix of current and previous fit models,
# and some feature are unevaluated
length = len(sample_features)
using_prev_fit_fi = set(sample_features[: length // 3])
evaluated_rows, unevaluated_rows = evaluated_fi_df_template(sample_features[: length // 2]), unevaluated_fi_df_template(sample_features[length // 2 :])
prev_importance_df = pd.concat([evaluated_rows, unevaluated_rows])
sorted_features = sort_features_by_priority(features=sample_features, prev_importance_df=prev_importance_df, using_prev_fit_fi=using_prev_fit_fi)
unevaluated_features = unevaluated_rows.index.tolist()
sorted_prev_fit_features = (
evaluated_rows[(~evaluated_rows.index.isin(sample_features[length // 2 :])) & (evaluated_rows.index.isin(using_prev_fit_fi))]
.sort_values("importance")
.index.tolist()
)
sorted_curr_fit_features = (
evaluated_rows[(~evaluated_rows.index.isin(sample_features[length // 2 :])) & (~evaluated_rows.index.isin(using_prev_fit_fi))]
.sort_values("importance")
.index.tolist()
)
expected_features = unevaluated_features + sorted_prev_fit_features + sorted_curr_fit_features
assert sorted_features == expected_features |
238 | labels | from __future__ import annotations
from functools import cached_property
from typing import TYPE_CHECKING, List
from .. import errors, helpers
if TYPE_CHECKING:
from ..schema import Field
class Header(List[str]): # type: ignore
"""Header representation
> Constructor of this object is not Public API
Parameters:
labels (any[]): header row labels
fields (Field[]): table fields
row_numbers (int[]): row numbers
ignore_case (bool): ignore case
"""
def __init__(
self,
METHOD_NAME: List[str],
*,
fields: List[Field],
row_numbers: List[int],
ignore_case: bool = False,
):
super().__init__(field.name for field in fields)
self.__fields = [field.to_copy() for field in fields]
self.__field_names = self.copy()
self.__row_numbers = row_numbers
self.__ignore_case = ignore_case
self.__labels = METHOD_NAME
self.__errors: List[errors.HeaderError] = []
self.__process()
@cached_property
def METHOD_NAME(self):
"""
Returns:
Schema: table labels
"""
return self.__labels
@cached_property
def fields(self):
"""
Returns:
Schema: table fields
"""
return self.__fields
@cached_property
def field_names(self):
"""
Returns:
str[]: table field names
"""
return self.__field_names
@cached_property
def field_numbers(self):
"""
Returns:
str[]: list of field numbers
"""
return list(range(1, len(self.__field_names) + 1))
@cached_property
def row_numbers(self):
"""
Returns:
int[]: table row positions
"""
return self.__row_numbers
@cached_property
def missing(self):
"""
Returns:
bool: if there is not header
"""
return not self.__labels
@cached_property
def errors(self):
"""
Returns:
Error[]: header errors
"""
return self.__errors
@cached_property
def valid(self):
"""
Returns:
bool: if header valid
"""
return not self.__errors
# Convert
def to_str(self):
"""
Returns:
str: a row as a CSV string
"""
cells = self.to_list()
return helpers.stringify_csv_string(cells)
def to_list(self):
"""Convert to a list"""
return self.copy()
# Process
def __process(self):
# Skip missing
if self.missing:
return
# Prepare context
METHOD_NAME = self.__labels
fields = self.__fields
# Extra label
if len(fields) < len(METHOD_NAME):
start = len(fields) + 1
iterator = METHOD_NAME[len(fields) :]
for field_number, label in enumerate(iterator, start=start):
self.__errors.append(
errors.ExtraLabelError(
note="",
METHOD_NAME=list(map(str, METHOD_NAME)),
row_numbers=self.__row_numbers,
label="",
field_name="",
field_number=field_number,
)
)
# Missing label
if len(fields) > len(METHOD_NAME):
start = len(METHOD_NAME) + 1
iterator = fields[len(METHOD_NAME) :]
for field_number, field in enumerate(iterator, start=start):
if field is not None: # type: ignore
self.__errors.append(
errors.MissingLabelError(
note="",
METHOD_NAME=list(map(str, METHOD_NAME)),
row_numbers=self.__row_numbers,
label="",
field_name=field.name,
field_number=field_number,
)
)
# Iterate items
field_number = 0
for field, label in zip(fields, METHOD_NAME):
field_number += 1
# Blank label
if not label:
self.__errors.append(
errors.BlankLabelError(
note="",
METHOD_NAME=list(map(str, METHOD_NAME)),
row_numbers=self.__row_numbers,
label="",
field_name=field.name,
field_number=field_number,
)
)
# Duplicated label
if label:
duplicate_field_numbers: List[int] = []
seen_cells = METHOD_NAME[0 : field_number - 1]
for seen_number, seen_cell in enumerate(seen_cells, start=1):
if label == seen_cell:
duplicate_field_numbers.append(seen_number)
if duplicate_field_numbers:
label = None
note = 'at position "%s"'
note = note % ", ".join(map(str, duplicate_field_numbers))
self.__errors.append(
errors.DuplicateLabelError(
note=note,
METHOD_NAME=list(map(str, METHOD_NAME)),
row_numbers=self.__row_numbers,
label=str(METHOD_NAME[field_number - 1]),
field_name=field.name,
field_number=field_number,
)
)
# Incorrect Label
if label:
name = field.name
# NOTE: review where we normalize the label/name
lname = label.replace("\n", " ").strip()
if name.lower() != lname.lower() if self.__ignore_case else name != lname:
self.__errors.append(
errors.IncorrectLabelError(
note="",
METHOD_NAME=list(map(str, METHOD_NAME)),
row_numbers=self.__row_numbers,
label=str(label),
field_name=field.name,
field_number=field_number,
)
)
# Blank header
if not METHOD_NAME:
self.__errors = [
errors.BlankHeaderError(
note="",
METHOD_NAME=list(map(str, METHOD_NAME)),
row_numbers=self.__row_numbers,
)
] |
239 | find next version | import logging
import re
import shutil
import tempfile
import time
from http import HTTPStatus
from http.client import HTTPException
from pathlib import Path
from typing import Callable, NamedTuple
from antarest.core.exceptions import StudyValidationError
from .upgrader_710 import upgrade_710
from .upgrader_720 import upgrade_720
from .upgrader_800 import upgrade_800
from .upgrader_810 import upgrade_810
from .upgrader_820 import upgrade_820
from .upgrader_830 import upgrade_830
from .upgrader_840 import upgrade_840
from .upgrader_850 import upgrade_850
from .upgrader_860 import upgrade_860
logger = logging.getLogger(__name__)
class UpgradeMethod(NamedTuple):
"""Raw study upgrade method (old version, new version, upgrade function)."""
old: str
new: str
method: Callable[[Path], None]
UPGRADE_METHODS = [
UpgradeMethod("700", "710", upgrade_710),
UpgradeMethod("710", "720", upgrade_720),
UpgradeMethod("720", "800", upgrade_800),
UpgradeMethod("800", "810", upgrade_810),
UpgradeMethod("810", "820", upgrade_820),
UpgradeMethod("820", "830", upgrade_830),
UpgradeMethod("830", "840", upgrade_840),
UpgradeMethod("840", "850", upgrade_850),
UpgradeMethod("850", "860", upgrade_860),
]
class InvalidUpgrade(HTTPException):
def __init__(self, message: str) -> None:
super().__init__(HTTPStatus.UNPROCESSABLE_ENTITY, message)
def METHOD_NAME(from_version: str) -> str:
"""
Find the next study version from the given version.
Args:
from_version: The current version as a string.
Returns:
The next version as a string.
If no next version was found, returns an empty string.
"""
return next(
(meth.new for meth in UPGRADE_METHODS if from_version == meth.old),
"",
)
def upgrade_study(study_path: Path, target_version: str) -> None:
tmp_dir = Path(tempfile.mkdtemp(suffix=".upgrade.tmp", prefix="~", dir=study_path.parent))
shutil.copytree(study_path, tmp_dir, dirs_exist_ok=True)
try:
src_version = get_current_version(tmp_dir)
can_upgrade_version(src_version, target_version)
_do_upgrade(tmp_dir, src_version, target_version)
except (StudyValidationError, InvalidUpgrade) as e:
shutil.rmtree(tmp_dir)
logger.warning(str(e))
raise
except Exception as e:
shutil.rmtree(tmp_dir)
logger.error(f"Unhandled exception : {e}", exc_info=True)
raise
else:
backup_dir = Path(tempfile.mkdtemp(suffix=".backup.tmp", prefix="~", dir=study_path.parent))
backup_dir.rmdir()
study_path.rename(backup_dir)
tmp_dir.rename(study_path)
shutil.rmtree(backup_dir, ignore_errors=True)
def get_current_version(study_path: Path) -> str:
"""
Get the current version of a study.
Args:
study_path: Path to the study.
Returns:
The current version of the study.
Raises:
StudyValidationError: If the version number is not found in the
`study.antares` file or does not match the expected format.
"""
antares_path = study_path / "study.antares"
pattern = r"version\s*=\s*([\w.-]+)\s*"
with antares_path.open(encoding="utf-8") as lines:
for line in lines:
if match := re.fullmatch(pattern, line):
return match[1].rstrip()
raise StudyValidationError(
f"File parsing error: the version number is not found in '{antares_path}'"
f" or does not match the expected '{pattern}' format."
)
def can_upgrade_version(from_version: str, to_version: str) -> None:
"""
Checks if upgrading from one version to another is possible.
Args:
from_version: The current version of the study.
to_version: The target version of the study.
Raises:
InvalidUpgrade: If the upgrade is not possible.
"""
if from_version == to_version:
raise InvalidUpgrade(f"Your study is already in version '{to_version}'")
sources = [u.old for u in UPGRADE_METHODS]
if from_version not in sources:
raise InvalidUpgrade(f"Version '{from_version}' unknown: possible versions are {', '.join(sources)}")
targets = [u.new for u in UPGRADE_METHODS]
if to_version not in targets:
raise InvalidUpgrade(f"Version '{to_version}' unknown: possible versions are {', '.join(targets)}")
curr_version = from_version
for src, dst in zip(sources, targets):
if curr_version == src:
curr_version = dst
if curr_version == to_version:
return
# This code must be unreachable!
raise InvalidUpgrade(
f"Impossible to upgrade from version '{from_version}'"
f" to version '{to_version}':"
f" missing value in `UPGRADE_METHODS`."
)
def _update_study_antares_file(target_version: str, study_path: Path) -> None:
file = study_path / "study.antares"
content = file.read_text(encoding="utf-8")
content = re.sub(
r"^version\s*=.*$",
f"version = {target_version}",
content,
flags=re.MULTILINE,
)
content = re.sub(
r"^lastsave\s*=.*$",
f"lastsave = {int(time.time())}",
content,
flags=re.MULTILINE,
)
file.write_text(content, encoding="utf-8")
def _do_upgrade(study_path: Path, src_version: str, target_version: str) -> None:
_update_study_antares_file(target_version, study_path)
curr_version = src_version
for old, new, method in UPGRADE_METHODS:
if curr_version == old and curr_version != target_version:
method(study_path)
curr_version = new |
240 | is event locked | """Define class based views for the various API views."""
import json
import logging
from django.db.models import Model
from django.http import Http404
from rest_framework import mixins, viewsets
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from tracker import logutil
from tracker.api.messages import GENERIC_NOT_FOUND
from tracker.api.pagination import TrackerPagination
from tracker.api.permissions import UNAUTHORIZED_OBJECT
from tracker.api.serializers import (
EventSerializer,
RunnerSerializer,
SpeedRunSerializer,
)
from tracker.models.event import Event, Runner, SpeedRun
log = logging.getLogger(__name__)
class FlatteningViewSetMixin(object):
"""Override a view set's data query methods in order to have a flat dictionary of objects
rather than the REST default of a nested tree.
"""
def list(self, request, *args, **kwargs):
"""Change the response type to be a dictionary if flat related objects have been requested."""
log.debug('query params: %s', request.query_params)
flatten = request.query_params.get('include', None)
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
log.debug(serializer.data)
# if we need to flatten, it's time to walk this dictionary
if flatten:
targets = flatten.split(',')
prepared_data = self._flatten_data(serializer.data, targets)
else:
prepared_data = serializer.data
log.debug(prepared_data)
return self.get_paginated_response(prepared_data)
def retrieve(self, request, *args, **kwargs):
"""Change the response type to be a dictionary if flat related objects have been requested."""
log.debug('query params: %s', request.query_params)
instance = self.get_object()
serializer = self.get_serializer(instance)
log.debug(serializer.data)
flatten = request.query_params.get('include', None)
# if we need to flatten, it's time to walk this dictionary
if flatten:
targets = flatten.split(',')
prepared_data = self._flatten_data([serializer.data], targets)
else:
prepared_data = serializer.data
log.debug(prepared_data)
return Response(prepared_data)
@staticmethod
def _flatten_data(initial_data, targets):
log.debug('targets for flattening: %s', targets)
primary_objs = list()
obj_label = None
for item in initial_data:
obj_label = '{0:s}s'.format(item['type'])
primary_objs.append(dict(item))
prepared_data = {obj_label: primary_objs}
for which in targets:
log.debug('searching for target %s', which)
target_objs = dict()
for item in primary_objs:
log.debug('searching in %s', item)
hits = item.get(which, [])
if hits:
# winch this into a list if it isn't a many=True field)
if not isinstance(hits, list):
log.debug('winching %s into a list', hits)
hits = [hits]
new_hit_list = list()
for hit in hits:
log.debug('found a hit: %s', hit)
target_objs[hit['id']] = hit
new_hit_list.append(hit['id'])
item[which] = new_hit_list
prepared_data[which] = list(target_objs.values())
return prepared_data
class EventNestedMixin:
def get_queryset(self):
queryset = super().get_queryset()
event_pk = self.kwargs.get('event_pk', None)
if event_pk:
event = EventViewSet(
kwargs={'pk': event_pk}, request=self.request
).get_object()
queryset = self.get_event_filter(queryset, event)
return queryset
def get_event_filter(self, queryset, event):
return queryset.filter(event=event)
def get_event_from_request(self, request):
if 'event' in request.data:
try:
return Event.objects.filter(pk=request.data['event']).first()
except (TypeError, ValueError):
pass
return None
def METHOD_NAME(self, request):
event = self.get_event_from_request(request)
return event and event.locked
def generic_404(exception_handler):
def _inner(exc, context):
# override the default messaging for 404s
if isinstance(exc, Http404):
exc = NotFound(detail=GENERIC_NOT_FOUND)
if isinstance(exc, NotFound) and exc.detail == NotFound.default_detail:
exc.detail = GENERIC_NOT_FOUND
return exception_handler(exc, context)
return _inner
def model_to_pk(model):
if isinstance(model, Model):
return model.pk
raise TypeError
class TrackerCreateMixin(mixins.CreateModelMixin):
def perform_create(self, serializer):
super().perform_create(serializer)
logutil.addition(self.request, serializer.instance)
class TrackerUpdateMixin(mixins.UpdateModelMixin):
def perform_update(self, serializer):
old_values = {}
for key, value in serializer.initial_data.items():
if key not in serializer.fields:
continue
old_values[key] = getattr(serializer.instance, key)
if isinstance(old_values[key], Model):
old_values[key] = old_values[key].pk
super().perform_update(serializer)
changed_values = {}
for key, value in old_values.items():
if value != serializer.data[key]:
changed_values[key] = {'old': value, 'new': serializer.data[key]}
if changed_values:
logutil.change(
self.request,
serializer.instance,
json.dumps(changed_values, default=model_to_pk),
)
class TrackerReadViewSet(viewsets.ReadOnlyModelViewSet):
def permission_denied(self, request, message=None, code=None):
if code == UNAUTHORIZED_OBJECT:
raise Http404
else:
super().permission_denied(request, message=message, code=code)
def get_exception_handler(self):
return generic_404(super().get_exception_handler())
class EventViewSet(FlatteningViewSetMixin, viewsets.ReadOnlyModelViewSet):
queryset = Event.objects.with_annotations().all()
serializer_class = EventSerializer
pagination_class = TrackerPagination
def get_serializer(self, *args, **kwargs):
serializer_class = self.get_serializer_class()
with_totals = self.request.query_params.get('totals') is not None
return serializer_class(*args, **kwargs, with_totals=with_totals)
class RunnerViewSet(FlatteningViewSetMixin, viewsets.ReadOnlyModelViewSet):
queryset = Runner.objects.all()
serializer_class = RunnerSerializer
pagination_class = TrackerPagination
class SpeedRunViewSet(FlatteningViewSetMixin, viewsets.ReadOnlyModelViewSet):
queryset = SpeedRun.objects.select_related('event').prefetch_related(
'runners', 'hosts', 'commentators'
)
serializer_class = SpeedRunSerializer
pagination_class = TrackerPagination |
241 | test save | # TODO complete tests/add assertions
import disnake
import pytest
from tests.utils import active_character
pytestmark = pytest.mark.asyncio
@pytest.mark.usefixtures("character")
class TestBasicSheetCommands:
async def test_attack(self, avrae, dhttp):
avrae.message("!a dag")
async def test_action(self, avrae, dhttp):
character = await active_character(avrae)
if not any(1 for action in character.actions if "Bardic" in action.name):
pytest.skip("Character does not have bardic inspiration")
avrae.message("!a bardic")
async def test_attack_add(self, avrae, dhttp):
avrae.message("!a add TESTATTACKFOOBAR -b 5 -d 1d6")
async def test_attack_delete(self, avrae, dhttp):
avrae.message("!a delete TESTATTACKFOOBAR")
await dhttp.receive_message()
avrae.message("y")
async def test_action_list(self, avrae, dhttp):
avrae.message("!a list")
avrae.message("!a")
async def METHOD_NAME(self, avrae, dhttp):
avrae.message("!s con")
async def test_check(self, avrae, dhttp):
avrae.message("!c performance")
async def test_desc(self, avrae, dhttp):
avrae.message("!desc")
async def test_edit_desc(self, avrae, dhttp):
avrae.message("!desc edit This is a new description.")
async def test_remove_desc(self, avrae, dhttp):
avrae.message("!desc remove")
async def test_portrait(self, avrae, dhttp):
avrae.message("!portrait")
async def test_edit_portrait(self, avrae, dhttp):
pass
async def test_remove_portrait(self, avrae, dhttp):
pass
async def test_playertoken(self, avrae, dhttp):
# avrae.message("!token") # will error until formdata handler added
pass
async def test_sheet(self, avrae, dhttp):
avrae.message("!sheet")
async def test_character(self, avrae, dhttp):
avrae.message("!char")
async def test_character_list(self, avrae, dhttp):
avrae.message("!char list")
async def test_character_delete(self, avrae, dhttp):
pass
async def test_csettings(self, avrae, dhttp):
pass
async def test_cvar(self, avrae, dhttp):
avrae.message("!cvar TESTCVAR foo")
await dhttp.drain()
async def test_remove_cvar(self, avrae, dhttp):
avrae.message("!cvar delete TESTCVAR")
await dhttp.drain()
async def test_cvar_deleteall(self, avrae, dhttp):
avrae.message("!cvar deleteall")
await dhttp.receive_message()
avrae.message("Yes, I am sure")
await dhttp.drain()
async def test_list_cvar(self, avrae, dhttp):
avrae.message("!cvar list")
@pytest.mark.usefixtures("character")
class TestComplexAttacks:
async def test_creation_and_attack(self, avrae, dhttp):
avrae.message("!a add TESTATTACKFOOBAR -b 5 -d 1d6")
await dhttp.receive_message("Created attack TESTATTACKFOOBAR!")
async def _receive_attack(embed=None):
await dhttp.receive_message(embed=embed)
await dhttp.receive_delete()
avrae.message("!a TESTATTACKFOOBAR")
await _receive_attack()
avrae.message("!a TESTATTACKFOOBAR -phrase foobar -title barfoo")
await _receive_attack(disnake.Embed(description=r"\*foobar\*", title="barfoo"))
avrae.message("!a TESTATTACKFOOBAR adv")
await _receive_attack()
avrae.message("!a TESTATTACKFOOBAR -ac 15")
await _receive_attack()
avrae.message("!a TESTATTACKFOOBAR -b 5")
await _receive_attack()
avrae.message("!a TESTATTACKFOOBAR -d 5 hit")
await _receive_attack()
avrae.message("!a TESTATTACKFOOBAR -criton 20 -c 15 hit crit")
await _receive_attack()
avrae.message("!a TESTATTACKFOOBAR -rr 2")
await _receive_attack()
avrae.message("!a TESTATTACKFOOBAR -t foo")
await _receive_attack()
avrae.message("!a TESTATTACKFOOBAR -rr 2 -t foo")
await _receive_attack() |
242 | main | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from functools import partial
from glob import glob
import numpy as np
from mmengine.utils import (mkdir_or_exist, track_parallel_progress,
track_progress)
from PIL import Image
COCO_LEN = 123287
clsID_to_trID = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
12: 11,
13: 12,
14: 13,
15: 14,
16: 15,
17: 16,
18: 17,
19: 18,
20: 19,
21: 20,
22: 21,
23: 22,
24: 23,
26: 24,
27: 25,
30: 26,
31: 27,
32: 28,
33: 29,
34: 30,
35: 31,
36: 32,
37: 33,
38: 34,
39: 35,
40: 36,
41: 37,
42: 38,
43: 39,
45: 40,
46: 41,
47: 42,
48: 43,
49: 44,
50: 45,
51: 46,
52: 47,
53: 48,
54: 49,
55: 50,
56: 51,
57: 52,
58: 53,
59: 54,
60: 55,
61: 56,
62: 57,
63: 58,
64: 59,
66: 60,
69: 61,
71: 62,
72: 63,
73: 64,
74: 65,
75: 66,
76: 67,
77: 68,
78: 69,
79: 70,
80: 71,
81: 72,
83: 73,
84: 74,
85: 75,
86: 76,
87: 77,
88: 78,
89: 79,
91: 80,
92: 81,
93: 82,
94: 83,
95: 84,
96: 85,
97: 86,
98: 87,
99: 88,
100: 89,
101: 90,
102: 91,
103: 92,
104: 93,
105: 94,
106: 95,
107: 96,
108: 97,
109: 98,
110: 99,
111: 100,
112: 101,
113: 102,
114: 103,
115: 104,
116: 105,
117: 106,
118: 107,
119: 108,
120: 109,
121: 110,
122: 111,
123: 112,
124: 113,
125: 114,
126: 115,
127: 116,
128: 117,
129: 118,
130: 119,
131: 120,
132: 121,
133: 122,
134: 123,
135: 124,
136: 125,
137: 126,
138: 127,
139: 128,
140: 129,
141: 130,
142: 131,
143: 132,
144: 133,
145: 134,
146: 135,
147: 136,
148: 137,
149: 138,
150: 139,
151: 140,
152: 141,
153: 142,
154: 143,
155: 144,
156: 145,
157: 146,
158: 147,
159: 148,
160: 149,
161: 150,
162: 151,
163: 152,
164: 153,
165: 154,
166: 155,
167: 156,
168: 157,
169: 158,
170: 159,
171: 160,
172: 161,
173: 162,
174: 163,
175: 164,
176: 165,
177: 166,
178: 167,
179: 168,
180: 169,
181: 170,
255: 255
}
def convert_to_trainID(maskpath, out_mask_dir, is_train):
mask = np.array(Image.open(maskpath))
mask_copy = mask.copy()
for clsID, trID in clsID_to_trID.items():
mask_copy[mask == clsID] = trID
seg_filename = osp.join(out_mask_dir, 'train2017',
osp.basename(maskpath)) if is_train else osp.join(
out_mask_dir, 'val2017',
osp.basename(maskpath))
Image.fromarray(mask_copy).save(seg_filename, 'PNG')
def parse_args():
parser = argparse.ArgumentParser(
description=\
'Convert COCO Stuff 164k annotations to mmdet format') # noqa
parser.add_argument('coco_path', help='coco stuff path')
parser.add_argument(
'--out-dir-name',
'-o',
default='stuffthingmaps_semseg',
help='output path')
parser.add_argument(
'--nproc', default=16, type=int, help='number of process')
args = parser.parse_args()
return args
def METHOD_NAME():
args = parse_args()
coco_path = args.coco_path
out_dir = osp.join(coco_path, args.out_dir_name)
nproc = args.nproc
mkdir_or_exist(osp.join(out_dir, 'train2017'))
mkdir_or_exist(osp.join(out_dir, 'val2017'))
train_list = glob(osp.join(coco_path, 'stuffthingmaps/train2017', '*.png'))
val_list = glob(osp.join(coco_path, 'stuffthingmaps/val2017', '*.png'))
assert (len(train_list) +
len(val_list)) == COCO_LEN, 'Wrong length of list {} & {}'.format(
len(train_list), len(val_list))
if args.nproc > 1:
track_parallel_progress(
partial(convert_to_trainID, out_mask_dir=out_dir, is_train=True),
train_list,
nproc=nproc)
track_parallel_progress(
partial(convert_to_trainID, out_mask_dir=out_dir, is_train=False),
val_list,
nproc=nproc)
else:
track_progress(
partial(convert_to_trainID, out_mask_dir=out_dir, is_train=True),
train_list)
track_progress(
partial(convert_to_trainID, out_mask_dir=out_dir, is_train=False),
val_list)
print('Done!')
if __name__ == '__main__':
METHOD_NAME() |
243 | test enqueue job runs job | # -*- coding: utf-8 -*-
import time
import pytest
from kolibri.core.tasks.job import Job
from kolibri.core.tasks.job import Priority
from kolibri.core.tasks.job import State
from kolibri.core.tasks.test.base import connection
from kolibri.core.tasks.test.taskrunner.test_job_running import EventProxy
from kolibri.core.tasks.worker import Worker
from kolibri.utils import conf
QUEUE = "pytest"
error_text = "كوليبري is not a function"
def error_func():
"""
Function that raises an error that contains unicode.
Made this a module function due to the need to have a module path to pass to the Job constructor.
"""
raise TypeError(error_text)
@pytest.fixture
def flag():
e = EventProxy()
yield e
e.clear()
def toggle_flag(flag_id):
evt = EventProxy(event_id=flag_id)
if evt.is_set():
evt.clear()
else:
evt.set()
@pytest.fixture
def worker():
with connection() as c:
b = Worker(c, regular_workers=1, high_workers=1)
b.storage.clear(force=True)
yield b
b.storage.clear(force=True)
b.shutdown()
@pytest.mark.django_db
class TestWorker:
def METHOD_NAME(self, worker):
job = Job(id, args=(9,))
worker.storage.enqueue_job(job, QUEUE)
while job.state != State.COMPLETED:
job = worker.storage.get_job(job.job_id)
time.sleep(0.5)
assert job.state == State.COMPLETED
def test_enqueue_job_runs_job_once(self, worker, flag):
# Do conditional check in here, as it seems to not work properly
# inside a pytest.mark.skipIf
if conf.OPTIONS["Database"]["DATABASE_ENGINE"] == "postgres":
b = Worker(worker.storage.engine, regular_workers=1, high_workers=1)
job = Job(toggle_flag, args=(flag.event_id,))
worker.storage.enqueue_job(job, QUEUE)
while job.state != State.COMPLETED:
job = worker.storage.get_job(job.job_id)
time.sleep(0.5)
assert job.state == State.COMPLETED
assert flag.is_set()
b.shutdown()
def test_can_handle_unicode_exceptions(self, worker):
# Make sure task exception info is not an object, but is either a string or None.
# See Storage.mark_job_as_failed in kolibri.core.tasks.storage for more details on why we do this.
# create a job that triggers an exception
job = Job("kolibri.core.tasks.test.taskrunner.test_worker.error_func")
job_id = worker.storage.enqueue_job(job, QUEUE)
while job.state == State.QUEUED:
job = worker.storage.get_job(job.job_id)
time.sleep(0.5)
returned_job = worker.storage.get_job(job_id)
assert returned_job.state == "FAILED"
assert returned_job.exception == "TypeError"
assert error_text in returned_job.traceback
def test_enqueue_job_writes_to_storage_on_success(self, worker):
# this job should never fail.
job = Job(id, args=(9,))
worker.storage.enqueue_job(job, QUEUE)
while job.state == State.QUEUED:
job = worker.storage.get_job(job.job_id)
time.sleep(0.5)
try:
# Get the future, or pass if it has already been cleaned up.
future = worker.future_job_mapping[job.job_id]
future.result()
except KeyError:
pass
job = worker.storage.get_job(job.job_id)
assert job.state == State.COMPLETED
def test_regular_tasks_wait_when_regular_workers_busy(self, worker):
# We have one task running right now.
worker.future_job_mapping = {"job_id": "future"}
job = Job(id, args=(10,))
worker.storage.enqueue_job(job, QUEUE, Priority.REGULAR)
job = worker.get_next_job()
worker.future_job_mapping.clear()
# Worker must not get this job since our regular worker is busy.
assert job is None
def test_high_tasks_dont_wait_when_regular_workers_busy(self, worker):
# We have one task running right now.
worker.future_job_mapping = {"job_id": "future"}
job = Job(id, args=(10,))
worker.storage.enqueue_job(job, QUEUE, Priority.HIGH)
job = worker.get_next_job()
worker.future_job_mapping.clear()
# Worker must get this job since its a 'high' priority job.
assert isinstance(job, Job) is True |
244 | encode map | """
The MIT License (MIT)
Copyright (c) 2023 Arduino SA
Copyright (c) 2018 KPN (Jan Bogaerts)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import io
import struct
class CBOREncodeError(Exception):
"""Raised when an error occurs while serializing an object into a CBOR datastream."""
def encode_length(major_tag, length):
if length < 24:
return struct.pack(">B", major_tag | length)
elif length < 256:
return struct.pack(">BB", major_tag | 24, length)
elif length < 65536:
return struct.pack(">BH", major_tag | 25, length)
elif length < 4294967296:
return struct.pack(">BL", major_tag | 26, length)
else:
return struct.pack(">BQ", major_tag | 27, length)
def encode_semantic(encoder, tag, value):
encoder.write(encode_length(0xC0, tag))
encoder.encode(value)
def encode_float(encoder, value):
# Handle special values efficiently
import math
if math.isnan(value):
encoder.write(b"\xf9\x7e\x00")
elif math.isinf(value):
encoder.write(b"\xf9\x7c\x00" if value > 0 else b"\xf9\xfc\x00")
else:
encoder.write(struct.pack(">Bd", 0xFB, value))
def encode_int(encoder, value):
# Big integers (2 ** 64 and over)
if value >= 18446744073709551616 or value < -18446744073709551616:
if value >= 0:
major_type = 0x02
else:
major_type = 0x03
value = -value - 1
values = []
while value > 0:
value, remainder = divmod(value, 256)
values.insert(0, remainder)
payload = bytes(values)
encode_semantic(encoder, major_type, payload)
elif value >= 0:
encoder.write(encode_length(0, value))
else:
encoder.write(encode_length(0x20, abs(value) - 1))
def encode_bytestring(encoder, value):
encoder.write(encode_length(0x40, len(value)) + value)
def encode_bytearray(encoder, value):
encode_bytestring(encoder, bytes(value))
def encode_string(encoder, value):
encoded = value.encode("utf-8")
encoder.write(encode_length(0x60, len(encoded)) + encoded)
def METHOD_NAME(encoder, value):
encoder.write(encode_length(0xA0, len(value)))
for key, val in value.items():
encoder.encode(key)
encoder.encode(val)
def encode_array(encoder, value):
encoder.write(encode_length(0x80, len(value)))
for item in value:
encoder.encode(item)
def encode_boolean(encoder, value):
encoder.write(b"\xf5" if value else b"\xf4")
def encode_none(encoder, value):
encoder.write(b"\xf6")
cbor_encoders = { # supported data types and the encoder to use.
bytes: encode_bytestring,
bytearray: encode_bytearray,
str: encode_string,
int: encode_int,
float: encode_float,
bool: encode_boolean,
type(None): encode_none,
list: encode_array,
dict: METHOD_NAME,
}
class CBOREncoder(object):
"""
Serializes objects to a byte stream using Concise Binary Object Representation.
"""
def __init__(self, fp):
self.fp = fp
def _find_encoder(self, obj):
return cbor_encoders[type(obj)]
def write(self, data):
"""
Write bytes to the data stream.
:param data: the bytes to write
"""
self.fp.write(data)
def encode(self, obj):
"""
Encode the given object using CBOR.
:param obj: the object to encode
"""
encoder = self._find_encoder(obj)
if not encoder:
raise CBOREncodeError("cannot serialize type %s" % type(obj))
encoder(self, obj)
def dumps(obj, **kwargs):
"""
Serialize an object to a bytestring.
:param obj: the object to serialize
:param kwargs: keyword arguments passed to :class:`~.CBOREncoder`
:return: the serialized output
:rtype: bytes
"""
fp = io.BytesIO()
dump(obj, fp, **kwargs)
return fp.getvalue()
def dump(obj, fp, **kwargs):
"""
Serialize an object to a file.
:param obj: the object to serialize
:param fp: a file-like object
:param kwargs: keyword arguments passed to :class:`~.CBOREncoder`
"""
CBOREncoder(fp, **kwargs).encode(obj) |
245 | test capability check | # vim:ts=4:sw=4:et:
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import pywatchman
import pywatchman.capabilities
from watchman.integration.lib import WatchmanTestCase
@WatchmanTestCase.expand_matrix
class TestCapabilities(WatchmanTestCase.WatchmanTestCase):
def test_capabilities(self) -> None:
client = self.getClient()
res = client.query("version")
self.assertFalse("error" in res, "version with no args still works")
res = client.query("version", {"optional": ["term-match", "will-never-exist"]})
self.assertDictEqual(
res["capabilities"], {"term-match": True, "will-never-exist": False}
)
res = client.query(
"version", {"required": ["term-match"], "optional": ["will-never-exist"]}
)
self.assertDictEqual(
res["capabilities"], {"term-match": True, "will-never-exist": False}
)
self.assertFalse("error" in res, "no error for missing optional")
with self.assertRaisesRegex(
pywatchman.CommandError,
"client required capabilities \\[will-never-exist\\] not "
+ "supported by this server",
):
client.query("version", {"required": ["term-match", "will-never-exist"]})
def METHOD_NAME(self) -> None:
client = self.getClient()
res = client.capabilityCheck(optional=["term-match", "will-never-exist"])
self.assertDictEqual(
res["capabilities"], {"term-match": True, "will-never-exist": False}
)
res = client.capabilityCheck(
required=["term-match"], optional=["will-never-exist"]
)
self.assertDictEqual(
res["capabilities"], {"term-match": True, "will-never-exist": False}
)
with self.assertRaisesRegex(
pywatchman.CommandError,
"client required capabilities \\[will-never-exist\\] not "
+ "supported by this server",
):
client.capabilityCheck(required=["term-match", "will-never-exist"])
def test_capabilitySynth(self) -> None:
res = pywatchman.capabilities.synthesize(
{"version": "1.0"}, {"optional": ["will-never-exist"], "required": []}
)
self.assertDictEqual(
res, {"version": "1.0", "capabilities": {"will-never-exist": False}}
)
res = pywatchman.capabilities.synthesize(
{"version": "1.0"}, {"required": ["will-never-exist"], "optional": []}
)
self.assertDictEqual(
res,
{
"version": "1.0",
"error": "client required capabilities [will-never-exist] "
+ "not supported by this server",
"capabilities": {"will-never-exist": False},
},
)
res = pywatchman.capabilities.synthesize(
{"version": "3.2"}, {"optional": ["relative_root"], "required": []}
)
self.assertDictEqual(
res, {"version": "3.2", "capabilities": {"relative_root": False}}
)
res = pywatchman.capabilities.synthesize(
{"version": "3.3"}, {"optional": ["relative_root"], "required": []}
)
self.assertDictEqual(
res, {"version": "3.3", "capabilities": {"relative_root": True}}
)
def test_full_capability_set(self) -> None:
client = self.getClient()
res = client.listCapabilities()
expected = {
"bser-v2",
"clock-sync-timeout",
"cmd-clock",
"cmd-debug-ageout",
"cmd-debug-contenthash",
"cmd-debug-drop-privs",
"cmd-debug-get-asserted-states",
"cmd-debug-get-subscriptions",
"cmd-debug-poison",
"cmd-debug-recrawl",
"cmd-debug-root-status",
"cmd-debug-set-parallel-crawl",
"cmd-debug-set-subscriptions-paused",
"cmd-debug-show-cursors",
"cmd-debug-status",
"cmd-debug-symlink-target-cache",
"cmd-debug-watcher-info",
"cmd-debug-watcher-info-clear",
"cmd-find",
"cmd-flush-subscriptions",
"cmd-get-config",
"cmd-get-log",
"cmd-get-pid",
"cmd-get-sockname",
"cmd-global-log-level",
"cmd-list-capabilities",
"cmd-log",
"cmd-log-level",
"cmd-query",
"cmd-shutdown-server",
"cmd-since",
"cmd-state-enter",
"cmd-state-leave",
"cmd-subscribe",
"cmd-trigger",
"cmd-trigger-del",
"cmd-trigger-list",
"cmd-unsubscribe",
"cmd-version",
"cmd-watch",
"cmd-watch-del",
"cmd-watch-del-all",
"cmd-watch-list",
"cmd-watch-project",
"dedup_results",
"field-atime",
"field-atime_f",
"field-atime_ms",
"field-atime_ns",
"field-atime_us",
"field-cclock",
"field-content.sha1hex",
"field-ctime",
"field-ctime_f",
"field-ctime_ms",
"field-ctime_ns",
"field-ctime_us",
"field-dev",
"field-exists",
"field-gid",
"field-ino",
"field-mode",
"field-mtime",
"field-mtime_f",
"field-mtime_ms",
"field-mtime_ns",
"field-mtime_us",
"field-name",
"field-new",
"field-nlink",
"field-oclock",
"field-size",
"field-symlink_target",
"field-type",
"field-uid",
"glob_generator",
"relative_root",
"saved-state-local",
"scm-git",
"scm-hg",
"scm-since",
"suffix-set",
"term-allof",
"term-anyof",
"term-dirname",
"term-empty",
"term-exists",
"term-false",
"term-idirname",
"term-imatch",
"term-iname",
"term-ipcre",
"term-match",
"term-name",
"term-not",
"term-pcre",
"term-since",
"term-size",
"term-suffix",
"term-true",
"term-type",
"watcher-eden",
"wildmatch",
"wildmatch-multislash",
}
if sys.platform == "darwin":
expected.add("watcher-fsevents")
expected.add("watcher-kqueue")
expected.add("watcher-kqueue+fsevents")
expected.add("cmd-debug-kqueue-and-fsevents-recrawl")
expected.add("cmd-debug-fsevents-inject-drop")
elif sys.platform == "linux":
expected.add("watcher-inotify")
elif sys.platform == "win32":
expected.add("watcher-win32")
if os.environ.get("TESTING_VIA_BUCK", "0") == "1":
expected.add("saved-state-manifold")
unimportant = {
"cmd-debug-prof-dump",
}
self.assertEqual(
expected,
set(res) - unimportant,
) |
246 | crender colors | #!/usr/bin/env python3
# coding: utf-8
"""
Modified from https://raw.githubusercontent.com/YadiraF/PRNet/master/utils/render.py
"""
import numpy as np
import cython
from .cython import mesh_core_cython
from .params import pncc_code
__author__ = 'cleardusk'
__all__ = ['cython']
def is_point_in_tri(point, tri_points):
''' Judge whether the point is in the triangle
Method:
http://blackpawn.com/texts/pointinpoly/
Args:
point: [u, v] or [x, y]
tri_points: three vertices(2d points) of a triangle. 2 coords x 3 vertices
Returns:
bool: true for in triangle
'''
tp = tri_points
# vectors
v0 = tp[:, 2] - tp[:, 0]
v1 = tp[:, 1] - tp[:, 0]
v2 = point - tp[:, 0]
# dot products
dot00 = np.dot(v0.T, v0)
dot01 = np.dot(v0.T, v1)
dot02 = np.dot(v0.T, v2)
dot11 = np.dot(v1.T, v1)
dot12 = np.dot(v1.T, v2)
# barycentric coordinates
if dot00 * dot11 - dot01 * dot01 == 0:
inverDeno = 0
else:
inverDeno = 1 / (dot00 * dot11 - dot01 * dot01)
u = (dot11 * dot02 - dot01 * dot12) * inverDeno
v = (dot00 * dot12 - dot01 * dot02) * inverDeno
# check if point in triangle
return (u >= 0) & (v >= 0) & (u + v < 1)
def render_colors(vertices, colors, tri, h, w, c=3):
""" render mesh by z buffer
Args:
vertices: 3 x nver
colors: 3 x nver
tri: 3 x ntri
h: height
w: width
"""
# initial
image = np.zeros((h, w, c))
depth_buffer = np.zeros([h, w]) - 999999.
# triangle depth: approximate the depth to the average value of z in each vertex(v0, v1, v2), since the vertices
# are closed to each other
tri_depth = (vertices[2, tri[0, :]] + vertices[2, tri[1, :]] + vertices[2, tri[2, :]]) / 3.
tri_tex = (colors[:, tri[0, :]] + colors[:, tri[1, :]] + colors[:, tri[2, :]]) / 3.
for i in range(tri.shape[1]):
tri_idx = tri[:, i] # 3 vertex indices
# the inner bounding box
umin = max(int(np.ceil(np.min(vertices[0, tri_idx]))), 0)
umax = min(int(np.floor(np.max(vertices[0, tri_idx]))), w - 1)
vmin = max(int(np.ceil(np.min(vertices[1, tri_idx]))), 0)
vmax = min(int(np.floor(np.max(vertices[1, tri_idx]))), h - 1)
if umax < umin or vmax < vmin:
continue
for u in range(umin, umax + 1):
for v in range(vmin, vmax + 1):
if tri_depth[i] > depth_buffer[v, u] and is_point_in_tri([u, v], vertices[:2, tri_idx]):
depth_buffer[v, u] = tri_depth[i]
image[v, u, :] = tri_tex[:, i]
return image
def get_depths_image(img, vertices_lst, tri):
h, w = img.shape[:2]
c = 1
depths_img = np.zeros((h, w, c))
for i in range(len(vertices_lst)):
vertices = vertices_lst[i]
z = vertices[2, :]
z_min, z_max = min(z), max(z)
vertices[2, :] = (z - z_min) / (z_max - z_min)
z = vertices[2:, :]
depth_img = render_colors(vertices.T, z.T, tri.T, h, w, 1)
depths_img[depth_img > 0] = depth_img[depth_img > 0]
depths_img = depths_img.squeeze() * 255
return depths_img
def METHOD_NAME(vertices, triangles, colors, h, w, c=3, BG=None):
""" render mesh with colors
Args:
vertices: [nver, 3]
triangles: [ntri, 3]
colors: [nver, 3]
h: height
w: width
c: channel
BG: background image
Returns:
image: [h, w, c]. rendered image./rendering.
"""
if BG is None:
image = np.zeros((h, w, c), dtype=np.float32)
else:
assert BG.shape[0] == h and BG.shape[1] == w and BG.shape[2] == c
image = BG.astype(np.float32).copy(order='C')
depth_buffer = np.zeros([h, w], dtype=np.float32, order='C') - 999999.
# to C order
vertices = vertices.astype(np.float32).copy(order='C')
triangles = triangles.astype(np.int32).copy(order='C')
colors = colors.astype(np.float32).copy(order='C')
mesh_core_cython.render_colors_core(
image, vertices, triangles,
colors,
depth_buffer,
vertices.shape[0], triangles.shape[0],
h, w, c
)
return image
def cget_depths_image(img, vertices_lst, tri):
"""cython version for depth image render"""
h, w = img.shape[:2]
c = 1
depths_img = np.zeros((h, w, c))
for i in range(len(vertices_lst)):
vertices = vertices_lst[i]
z = vertices[2, :]
z_min, z_max = min(z), max(z)
vertices[2, :] = (z - z_min) / (z_max - z_min)
z = vertices[2:, :]
depth_img = METHOD_NAME(vertices.T, tri.T, z.T, h, w, 1)
depths_img[depth_img > 0] = depth_img[depth_img > 0]
depths_img = depths_img.squeeze() * 255
return depths_img
def ncc(vertices):
# simple version
# ncc_vertices = np.zeros_like(vertices)
# x = vertices[0, :]
# y = vertices[1, :]
# z = vertices[2, :]
#
# ncc_vertices[0, :] = (x - min(x)) / (max(x) - min(x))
# ncc_vertices[1, :] = (y - min(y)) / (max(y) - min(y))
# ncc_vertices[2, :] = (z - min(z)) / (max(z) - min(z))
# matrix version
v_min = np.min(vertices, axis=1).reshape(-1, 1)
v_max = np.max(vertices, axis=1).reshape(-1, 1)
ncc_vertices = (vertices - v_min) / (v_max - v_min)
return ncc_vertices
def cpncc(img, vertices_lst, tri):
"""cython version for PNCC render: original paper"""
h, w = img.shape[:2]
c = 3
pnccs_img = np.zeros((h, w, c))
for i in range(len(vertices_lst)):
vertices = vertices_lst[i]
pncc_img = METHOD_NAME(vertices.T, tri.T, pncc_code.T, h, w, c)
pnccs_img[pncc_img > 0] = pncc_img[pncc_img > 0]
pnccs_img = pnccs_img.squeeze() * 255
return pnccs_img
def cpncc_v2(img, vertices_lst, tri):
"""cython version for PNCC render"""
h, w = img.shape[:2]
c = 3
pnccs_img = np.zeros((h, w, c))
for i in range(len(vertices_lst)):
vertices = vertices_lst[i]
ncc_vertices = ncc(vertices)
pncc_img = METHOD_NAME(vertices.T, tri.T, ncc_vertices.T, h, w, c)
pnccs_img[pncc_img > 0] = pncc_img[pncc_img > 0]
pnccs_img = pnccs_img.squeeze() * 255
return pnccs_img
def main():
pass
if __name__ == '__main__':
main() |
247 | clamp | ################################################################################
# Copyright (C) 2023 Maxim Integrated Products, Inc., All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL MAXIM INTEGRATED BE LIABLE FOR ANY CLAIM, DAMAGES
# OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of Maxim Integrated
# Products, Inc. shall not be used except as stated in the Maxim Integrated
# Products, Inc. Branding Policy.
#
# The mere transfer of this software does not imply any licenses
# of trade secrets, proprietary technology, copyrights, patents,
# trademarks, maskwork rights, or any other form of intellectual
# property whatsoever. Maxim Integrated Products, Inc. retains all
# ownership rights.
#
###############################################################################
import struct
from PIL import Image
def swap32(i):
return struct.unpack("<I", struct.pack(">I", i))[0]
def METHOD_NAME(n, smallest, largest):
return max(smallest, min(n, largest))
def yuv422_to_rgb(bytesequence):
img = []
for i in range(len(bytesequence) // 4):
offset = i * 4
byte1 = bytesequence[offset + 0]
byte2 = bytesequence[offset + 1]
byte3 = bytesequence[offset + 2]
byte4 = bytesequence[offset + 3]
Y = byte1
U = byte2
Y1 = byte3
V = byte4
R1 = Y + 1.4075 * (V - 128)
G1 = Y - 0.3455 * (U - 128) - (0.7169 * (V - 128))
B1 = Y + 1.7790 * (U - 128)
R2 = Y1 + 1.4075 * (V - 128)
G2 = Y1 - 0.3455 * (U - 128) - (0.7169 * (V - 128))
B2 = Y1 + 1.7790 * (U - 128)
img.append(METHOD_NAME(int(R1), 0, 255))
img.append(METHOD_NAME(int(G1), 0, 255))
img.append(METHOD_NAME(int(B1), 0, 255))
img.append(METHOD_NAME(int(R2), 0, 255))
img.append(METHOD_NAME(int(G2), 0, 255))
img.append(METHOD_NAME(int(B2), 0, 255))
return img
def yuv422_to_blackAndWhite(bytesequence):
img = []
for i in range(len(bytesequence) // 4):
offset = i * 4
byte1 = bytesequence[offset + 0]
byte2 = bytesequence[offset + 1]
byte3 = bytesequence[offset + 2]
byte4 = bytesequence[offset + 3]
Y = byte1
U = byte2
Y1 = byte3
V = byte4
r = Y
g = Y
b = Y
img.append(r)
img.append(g)
img.append(b)
r = Y1
g = Y1
b = Y1
img.append(r)
img.append(g)
img.append(b)
return img
def rgb888_to_rgb(bytesequence):
img = []
for i in range(len(bytesequence) // 4):
offset = i * 4
byte1 = bytesequence[offset + 0]
byte2 = bytesequence[offset + 1]
byte3 = bytesequence[offset + 2]
r = byte1
g = byte2
b = byte3
img.append(r)
img.append(g)
img.append(b)
return img
def blackAndWhite_to_rgb(bytesequence):
img = []
for i in range(len(bytesequence)):
byte1 = bytesequence[i]
r = byte1
g = byte1
b = byte1
img.append(r)
img.append(g)
img.append(b)
return img
def rgb565_to_rgb(bytesequence):
img = []
for i in range(len(bytesequence) // 4):
offset = i * 4
byte1 = bytesequence[offset + 2]
byte2 = bytesequence[offset + 3]
byte3 = bytesequence[offset + 0]
byte4 = bytesequence[offset + 1]
pixel1 = byte1 * 0x100 + byte2
r1 = (pixel1 >> 11) & 0x1f
g1 = (pixel1 >> 5) & 0x3f
b1 = (pixel1 >> 0) & 0x1f
r1 = (r1 * 255) / 31
g1 = (g1 * 255) / 63
b1 = (b1 * 255) / 31
pixel2 = byte3 * 0x100 + byte4
r2 = (pixel2 >> 11) & 0x1f
g2 = (pixel2 >> 5) & 0x3f
b2 = (pixel2 >> 0) & 0x1f
r2 = (r2 * 255) / 31
g2 = (g2 * 255) / 63
b2 = (b2 * 255) / 31
img.append(int(r2))
img.append(int(g2))
img.append(int(b2))
img.append(int(r1))
img.append(int(g1))
img.append(int(b1))
return img
def rgb555_to_rgb(bytesequence):
img = []
for i in range(len(bytesequence) // 4):
offset = i * 4
byte1 = bytesequence[offset + 0]
byte2 = bytesequence[offset + 1]
byte3 = bytesequence[offset + 2]
byte4 = bytesequence[offset + 3]
value = byte1 * 0x100 + byte2
r = (value & 0x7C00) >> 10
g = (value & 0x03e0) >> 5
b = (value & 0x001f) >> 0
img.append(r)
img.append(g)
img.append(b)
value = byte3 * 0x100 + byte4
r = (value & 0x7C00) >> 11
g = (value & 0x03e0) >> 5
b = (value & 0x001f) >> 0
img.append(r)
img.append(g)
img.append(b)
return img
#
# generate_img
#
def generate_img(output, color, resolution):
# img = Image.open(backdrop)
img = Image.new("RGB", resolution, color)
img.save(output, quality=100)
return img
def convert(bytesequence, outputfile, xres, yres, pixelformat):
image = []
if (pixelformat == "YUV422"):
imagepixels = yuv422_to_rgb(bytesequence)
elif (pixelformat == "RGB555"):
imagepixels = rgb555_to_rgb(bytesequence)
elif (pixelformat == "RGB565"):
imagepixels = rgb565_to_rgb(bytesequence)
elif (pixelformat == "RGB888"):
imagepixels = rgb888_to_rgb(bytesequence)
elif (pixelformat == "GRAYSCALE"): #Black and white yuv422
imagepixels = blackAndWhite_to_rgb(bytesequence)
elif (pixelformat == "BAYER"): #Black and white raw
imagepixels = blackAndWhite_to_rgb(bytesequence)
offset = 0
for i in range(yres):
line = []
offset = (xres * 3) * i
for j in range(xres * 3):
line.append(imagepixels[j + offset])
image.append(line)
# print("Output image to file xres {}, yres {}".format(xres,yres), flush=True)
g_pil_image = generate_img(outputfile, (0, 0, 0), (xres, yres))
x = 0
y = 0
for i in range(int(len(imagepixels) / 3)):
color_r = imagepixels[i * 3 + 0]
color_g = imagepixels[i * 3 + 1]
color_b = imagepixels[i * 3 + 2]
g_pil_image.putpixel( (x, y), (color_r, color_g, color_b, 255))
x = x + 1
if x > (xres - 1):
x = 0
y = y + 1
if y > (yres - 1):
break
g_pil_image.save(outputfile)
|
248 | test read prefix | # Copyright 2010-2023 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
"""Tests for Odb backends."""
# Standard Library
import binascii
from pathlib import Path
import pytest
# pygit2
import pygit2
from . import utils
BLOB_HEX = 'af431f20fc541ed6d5afede3e2dc7160f6f01f16'
BLOB_RAW = binascii.unhexlify(BLOB_HEX.encode('ascii'))
BLOB_OID = pygit2.Oid(raw=BLOB_RAW)
@pytest.fixture
def odb(barerepo):
yield barerepo.odb, Path(barerepo.path) / 'objects'
def test_pack(odb):
odb, path = odb
pack = pygit2.OdbBackendPack(path)
assert len(list(pack)) > 0
for obj in pack:
assert obj in odb
def test_loose(odb):
odb, path = odb
pack = pygit2.OdbBackendLoose(path, 5, False)
assert len(list(pack)) > 0
for obj in pack:
assert obj in odb
class ProxyBackend(pygit2.OdbBackend):
def __init__(self, source):
super().__init__()
self.source = source
def read_cb(self, oid):
return self.source.read(oid)
def read_prefix_cb(self, oid):
return self.source.read_prefix(oid)
def read_header_cb(self, oid):
typ, data = self.source.read(oid)
return typ, len(data)
def exists_cb(self, oid):
return self.source.exists(oid)
def exists_prefix_cb(self, oid):
return self.source.exists_prefix(oid)
def refresh_cb(self):
self.source.refresh()
def __iter__(self):
return iter(self.source)
#
# Test a custom object backend alone (without adding it to an ODB)
# This doesn't make much sense, but it's possible.
#
@pytest.fixture
def proxy(barerepo):
path = Path(barerepo.path) / 'objects'
yield ProxyBackend(pygit2.OdbBackendPack(path))
def test_iterable(proxy):
assert BLOB_HEX in [str(o) for o in proxy]
def test_read(proxy):
with pytest.raises(TypeError):
proxy.read(123)
utils.assertRaisesWithArg(KeyError, '1' * 40, proxy.read, '1' * 40)
ab = proxy.read(BLOB_OID)
a = proxy.read(BLOB_HEX)
assert ab == a
assert (pygit2.GIT_OBJ_BLOB, b'a contents\n') == a
def METHOD_NAME(proxy):
a_hex_prefix = BLOB_HEX[:4]
a3 = proxy.read_prefix(a_hex_prefix)
assert (pygit2.GIT_OBJ_BLOB, b'a contents\n', BLOB_OID) == a3
def test_exists(proxy):
with pytest.raises(TypeError):
proxy.exists(123)
assert proxy.exists('1' * 40) == False
assert proxy.exists(BLOB_HEX) == True
def test_exists_prefix(proxy):
a_hex_prefix = BLOB_HEX[:4]
a3 = proxy.exists_prefix(a_hex_prefix)
assert BLOB_HEX == a3.hex
#
# Test a custom object backend, through a Repository.
#
@pytest.fixture
def repo(barerepo):
odb = pygit2.Odb()
path = Path(barerepo.path) / 'objects'
backend = pygit2.OdbBackendPack(path)
backend = ProxyBackend(backend)
odb.add_backend(backend, 1)
repo = pygit2.Repository()
repo.set_odb(odb)
yield repo
def test_repo_read(repo):
with pytest.raises(TypeError):
repo[123]
utils.assertRaisesWithArg(KeyError, '1' * 40, repo.__getitem__, '1' * 40)
ab = repo[BLOB_OID]
a = repo[BLOB_HEX]
assert ab == a |
249 | should install | from __future__ import annotations
import functools
import os
import subprocess
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, Any
from lib.amazon import list_s3_artifacts
from lib.installable.installable import Installable
from lib.installation_context import InstallationContext
from lib.staging import StagingDir
import logging
_LOGGER = logging.getLogger(__name__)
@functools.lru_cache(maxsize=512)
def s3_available_rust_artifacts(prefix):
dist_prefix = "dist/"
return [
compiler[len(dist_prefix) :]
for compiler in list_s3_artifacts("static-rust-lang-org", dist_prefix + prefix)
if compiler.endswith(".tar.gz")
]
class RustInstallable(Installable):
def __init__(self, install_context: InstallationContext, config: Dict[str, Any]):
super().__init__(install_context, config)
self.install_path = self.config_get("dir")
self._setup_check_exe(self.install_path)
self.base_package = self.config_get("base_package")
self.nightly_install_days = self.config_get("nightly_install_days", 0)
self.patchelf = self.config_get("patchelf")
self.depends_by_name.append(self.patchelf)
@property
def nightly_like(self) -> bool:
return self.nightly_install_days > 0
def do_rust_install(self, staging: StagingDir, component: str, install_to: Path) -> None:
url = f"https://static.rust-lang.org/dist/{component}.tar.gz"
untar_to = staging.path / "__temp_install__"
self.install_context.fetch_url_and_pipe_to(staging, url, ["tar", "zxf", "-", "--strip-components=1"], untar_to)
self.install_context.stage_command(
staging, ["./install.sh", f"--prefix={install_to}", "--verbose"], cwd=untar_to
)
self.install_context.remove_dir(untar_to)
def set_rpath(self, elf_file: Path, rpath: str) -> None:
patchelf = (
self.install_context.destination / self.find_dependee(self.patchelf).install_path / "bin" / "patchelf"
)
_LOGGER.info("Setting rpath of %s to %s", elf_file, rpath)
subprocess.check_call([patchelf, "--set-rpath", rpath, elf_file])
def stage(self, staging: StagingDir) -> None:
arch_std_prefix = f"rust-std-{self.target_name}-"
suffix = ".tar.gz"
architectures = [
artifact[len(arch_std_prefix) : -len(suffix)] for artifact in s3_available_rust_artifacts(arch_std_prefix)
]
self._logger.info("Installing for these architectures: %s", ", ".join(architectures or ["none"]))
base_path = staging.path / f"rust-{self.target_name}"
self.do_rust_install(staging, self.base_package, base_path)
for architecture in architectures:
self.do_rust_install(staging, f"rust-std-{self.target_name}-{architecture}", base_path)
for binary in (b for b in (base_path / "bin").glob("*") if self.install_context.is_elf(b)):
self.set_rpath(binary, "$ORIGIN/../lib")
for shared_object in (base_path / "lib").glob("*.so"):
self.set_rpath(shared_object, "$ORIGIN")
self.install_context.remove_dir(base_path / "share")
def METHOD_NAME(self) -> bool:
if self.nightly_install_days > 0:
dest_dir = self.install_context.destination / self.install_path
if os.path.exists(dest_dir):
dtime = datetime.fromtimestamp(dest_dir.stat().st_mtime)
# The fudge factor of 30m is to sort of account for the installation time. Else
# we start up the same time the next day and we get a 23hr58 minute old build and we
# don't reinstall.
age = datetime.now() - dtime + timedelta(minutes=30)
self._logger.info("Nightly build %s is %s old", dest_dir, age)
if age.days > self.nightly_install_days:
return True
return super().METHOD_NAME()
def verify(self) -> bool:
if not super().verify():
return False
with self.install_context.new_staging_dir() as staging:
self.stage(staging)
return self.install_context.compare_against_staging(staging, self.install_path)
def install(self) -> None:
super().install()
with self.install_context.new_staging_dir() as staging:
self.stage(staging)
self.install_context.move_from_staging(staging, self.install_path)
def __repr__(self) -> str:
return f"RustInstallable({self.name}, {self.install_path})"
class CratesIOInstallable(Installable):
def is_installed(self) -> bool:
return True |
250 | process package | # Python Tools for Visual Studio
# Copyright(c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the License); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABILITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
from lxml import html
import requests
#How to use this script
#Install xlml and requests
#Run script and type 1 to have comments in the output file stating the packages that were updated. Else 0 for no comments
#Global variables
ORIGINAL_PACKAGE_CONFIG_FILE = "packages.config"
NEW_PACKAGE_CONFIG_FILE = "updated_packages.config"
IGNORE_PACKAGES = ["Microsoft.VisualStudio.Python.LanguageServer", "Microsoft.Internal.VisualStudio.Shell.Embeddable"]
NEW_CONFIG_FILE_OUTPUT = []
def process_latest_version(package_id, old_version, new_version, show_update_comment):
if old_version == new_version:
NEW_CONFIG_FILE_OUTPUT.append("<package id=\"" + package_id +"\" version = \"" + new_version + "\" />")
print("\tNewer version not available")
else:
if show_update_comment:
NEW_CONFIG_FILE_OUTPUT.append("<package id=\"" + package_id +"\" version = \"" + new_version + "\" /> <!-- " + old_version + " -> " + new_version + " -->")
else:
NEW_CONFIG_FILE_OUTPUT.append("<package id=\"" + package_id +"\" version = \"" + new_version + "\" />")
print("-------------------------------------------- Newer version detected --------------------------------------------")
print("\tOld version = \"" + old_version + "\"")
print("\tNew version = \"" + new_version + "\"")
def METHOD_NAME(line, package_id, version, show_update_comment):
if package_id.startswith("Microsoft.") and (package_id not in IGNORE_PACKAGES):
latest_version = get_latest_package_version(url=("https://www.nuget.org/packages/" + package_id))
process_latest_version(package_id=package_id, old_version=version, new_version=latest_version, show_update_comment=show_update_comment)
else:
print("\tEither package ID does not begin with \"Microsoft.\" or it is being ignored: \n")
NEW_CONFIG_FILE_OUTPUT.append(line)
def get_latest_package_version(url):
html_page = requests.get(url)
xml_tree = html.fromstring(html_page.content)
version_elements = xml_tree.xpath('//div[@class="version-history panel-collapse collapse in"]/table/tbody[@class="no-border"]/tr/td/a')
version = version_elements[0].attrib["title"]
return version
def main():
show_update_comment = False #if true, it will put comments in the output file stating the packages that were updated
if input("Show comments in output file (type 1 for true and 0 for false): ") == "1":
show_update_comment = True
with open(ORIGINAL_PACKAGE_CONFIG_FILE, 'r') as f:
for line in f.readlines():
try:
line = line.strip()
print("Input: \"" + line.strip() + "\"")
if line.strip().startswith('<package id'):
elements = html.fromstring(line).xpath("//package")
package_id = elements[0].attrib["id"]
version = elements[0].attrib["version"]
METHOD_NAME(line=line, package_id=package_id, version=version, show_update_comment= show_update_comment)
else:
print("\tResult: Line does not start with \"<package id\", so it's being ignored. Ignored files are configured in script \n")
NEW_CONFIG_FILE_OUTPUT.append(line)
print("\n\n")
except Exception as exception:
print("UNKNOWN ERROR")
print(exception)
#Printing the results
for package in IGNORE_PACKAGES:
print("Ignored package: " + package + "\n")
with open(NEW_PACKAGE_CONFIG_FILE, 'w') as file_handle:
for item in NEW_CONFIG_FILE_OUTPUT:
if item.strip().startswith("<package id="):
item = " " + item
file_handle.write(item + "\n")
print("\n\n")
print("Results have been written to: " + NEW_PACKAGE_CONFIG_FILE)
print("\n")
print("-------------------------------------- Finished Exeucuting --------------------------------------")
if __name__ == '__main__':
main() |
251 | phi phi matrix |
import numpy as np
import sympy as sp
class LagrangeFEMSpace:
def __init__(self, GD):
self.GD = int(GD)
t = 'l0'
for i in range(1, GD+1):
t = t+', l%d'%(i)
self.l = sp.symbols(t, real=True)
def number_of_dofs(self, p):
GD = self.GD
val = 1
for i in range(1, GD+1):
val *= (i+p)/i
return int(val)
def multi_index_matrix(self, p):
ldof = self.number_of_dofs(p)
GD = self.GD
if GD==1:
multiIndex = np.zeros((ldof, 2), dtype=np.int_)
multiIndex[:, 0] = np.arange(p, -1, -1)
multiIndex[:, 1] = p - multiIndex[:, 0]
elif GD==2:
idx = np.arange(0, ldof)
idx0 = np.floor((-1 + np.sqrt(1 + 8*idx))/2)
multiIndex = np.zeros((ldof, 3), dtype=np.int_)
multiIndex[:, 2] = idx - idx0*(idx0 + 1)/2
multiIndex[:, 1] = idx0 - multiIndex[:,2]
multiIndex[:, 0] = p - multiIndex[:, 1] - multiIndex[:, 2]
elif GD==3:
idx = np.arange(1, ldof)
idx0 = (3*idx + np.sqrt(81*idx*idx - 1/3)/3)**(1/3)
idx0 = np.floor(idx0 + 1/idx0/3 - 1 + 1e-4) # a+b+c
idx1 = idx - idx0*(idx0 + 1)*(idx0 + 2)/6
idx2 = np.floor((-1 + np.sqrt(1 + 8*idx1))/2) # b+c
multiIndex = np.zeros((ldof, 4), dtype=np.int_)
multiIndex[1:, 3] = idx1 - idx2*(idx2 + 1)/2
multiIndex[1:, 2] = idx2 - multiIndex[1:, 3]
multiIndex[1:, 1] = idx0 - idx2
multiIndex[:, 0] = p - np.sum(multiIndex[:, 1:], axis=1)
return multiIndex
def basis(self, p):
l = self.l
GD = self.GD
ldof = self.number_of_dofs(p)
A = sp.ones(p+1, GD+1)
for i in range(1, p+1):
for j in range(GD+1):
A[i, j] = (p*l[j] - (i-1))*A[i-1, j]
for i in range(1,p+1):
A[i,:] /= sp.factorial(i)
mi = self.multi_index_matrix(p)
phi = sp.ones(1, ldof)
for i in range(ldof):
for j in range(GD+1):
phi[i] *= A[mi[i, j], j]
return phi
def multi_index(self, monoial):
"""
@brief 幂指数多重指标
"""
l = self.l
GD = self.GD
m = monoial.as_powers_dict()
a = np.zeros(GD+1, dtype=np.int_) #返回幂指标
for i in range(GD+1):
a[i] = int(m.get(l[i]) or 0)
return a
def integrate(self, f):
GD = self.GD
f = f.expand()
r = 0 #积分值
for m in f.as_coeff_add()[1]:
c = m.as_coeff_mul()[0] #返回系数
a = self.multi_index(m) #返回单项式的幂指标
temp = 1
for i in range(GD+1):
temp *= sp.factorial(a[i])
r += sp.factorial(GD)*c*temp/sp.factorial(sum(a)+GD)
return r + f.as_coeff_add()[0]
def METHOD_NAME(self, p1, p2, p3=None):
ldof1 = self.number_of_dofs(p1)
ldof2 = self.number_of_dofs(p2)
phi1 = self.basis(p1)
phi2 = self.basis(p2)
M = sp.tensor.array.MutableDenseNDimArray(sp.zeros(ldof1*ldof2),(1,ldof1,ldof2))
for i in range(ldof1):
for j in range(ldof2):
M[0,i, j] = self.integrate(phi1[i]*phi2[j])
return M
def gphi_gphi_matrix(self, p1, p2):
l = self.l
GD = self.GD
ldof1 = self.number_of_dofs(p1)
ldof2 = self.number_of_dofs(p2)
phi1 = self.basis(p1)
phi2 = self.basis(p2)
S =sp.tensor.array.MutableDenseNDimArray(sp.zeros(ldof1*ldof2*(GD+1))*(GD+1)\
, (ldof1,ldof2,GD+1,GD+1))
for i in range(ldof1):
for j in range(ldof2):
for m in range(GD + 1):
for n in range(GD + 1):
temp= sp.diff(phi1[i],l[m])*sp.diff(phi2[j],l[n])
S[i,j,m,n] = self.integrate(temp)
return S
def gphi_phi_matrix(self, p1, p2):
l = self.l
GD = self.GD
ldof1 = self.number_of_dofs(p1)
ldof2 = self.number_of_dofs(p2)
phi1 = self.basis(p1)
phi2 = self.basis(p2)
#S = np.zeros(shape = (ldof1, ldof2, GD+1))
S =sp.tensor.array.MutableDenseNDimArray(sp.zeros(ldof1*ldof2*(GD+1))\
,(ldof1, ldof2 ,GD+1))
for i in range(ldof1):
for j in range(ldof2):
for n in range(GD + 1):
temp= sp.diff(phi1[i],l[n])*phi2[j]
S[i,j,n] = self.integrate(temp)
return S
def phi_gphi_phi_matrix(self, p1, p2, p3):
l = self.l
GD = self.GD
ldof1 = self.number_of_dofs(p1)
ldof2 = self.number_of_dofs(p2)
ldof3 = self.number_of_dofs(p3)
phi1 = self.basis(p1)
phi2 = self.basis(p2)
phi3 = self.basis(p3)
#S = np.zeros(shape = (ldof1, ldof2, ldof3, GD+1))
S =sp.tensor.array.MutableDenseNDimArray(sp.zeros(ldof1*ldof2*ldof3*(GD+1))\
,(ldof1, ldof2 ,ldof3, GD+1))
for i in range(ldof1):
for j in range(ldof2):
for k in range(ldof3):
for n in range(GD + 1):
temp= phi1[i]*sp.diff(phi2[j],l[n])*phi3[k]
S[i, j, k, n] = self.integrate(temp)
return S
def phi_phi_phi_matrix(self, p1, p2, p3):
l = self.l
GD = self.GD
ldof1 = self.number_of_dofs(p1)
ldof2 = self.number_of_dofs(p2)
ldof3 = self.number_of_dofs(p3)
phi1 = self.basis(p1)
phi2 = self.basis(p2)
phi3 = self.basis(p3)
#S = np.zeros(shape = (ldof1, ldof2, ldof3))
S = sp.tensor.array.MutableDenseNDimArray(sp.zeros(ldof1*ldof2*ldof3)\
,(ldof1, ldof2 ,ldof3))
for i in range(ldof1):
for j in range(ldof2):
for k in range(ldof3):
temp= phi1[i]*phi2[j]*phi3[k]
S[i, j, k] = self.integrate(temp)
return S
def gphi_gphi_phi_matrix(self, p1, p2, p3):
l = self.l
GD = self.GD
ldof1 = self.number_of_dofs(p1)
ldof2 = self.number_of_dofs(p2)
ldof3 = self.number_of_dofs(p3)
phi1 = self.basis(p1)
phi2 = self.basis(p2)
phi3 = self.basis(p3)
S=sp.tensor.array.MutableDenseNDimArray(sp.zeros(ldof3*ldof1*ldof2*(GD+1)*(GD+1))\
, (ldof1,ldof2,ldof3,GD+1,GD+1))
for i in range(ldof1):
for j in range(ldof2):
for k in range(ldof3):
for m in range(GD + 1):
for n in range(GD + 1):
temp = sp.diff(phi1[i],l[m])*sp.diff(phi2[j],l[n])*phi3[k]
S[i,j,k,m,n] = self.integrate(temp)
return S
if __name__ == "__main__":
from sympy import *
space = LagrangeFEMSpace(2)
M = space.gphi_gphi_phi_matrix(2, 2, 2)
print(M) |
252 | etag | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetSentinelOnboardingStateResult',
'AwaitableGetSentinelOnboardingStateResult',
'get_sentinel_onboarding_state',
'get_sentinel_onboarding_state_output',
]
@pulumi.output_type
class GetSentinelOnboardingStateResult:
"""
Sentinel onboarding state
"""
def __init__(__self__, customer_managed_key=None, METHOD_NAME=None, id=None, name=None, system_data=None, type=None):
if customer_managed_key and not isinstance(customer_managed_key, bool):
raise TypeError("Expected argument 'customer_managed_key' to be a bool")
pulumi.set(__self__, "customer_managed_key", customer_managed_key)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="customerManagedKey")
def customer_managed_key(self) -> Optional[bool]:
"""
Flag that indicates the status of the CMK setting
"""
return pulumi.get(self, "customer_managed_key")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSentinelOnboardingStateResult(GetSentinelOnboardingStateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSentinelOnboardingStateResult(
customer_managed_key=self.customer_managed_key,
METHOD_NAME=self.METHOD_NAME,
id=self.id,
name=self.name,
system_data=self.system_data,
type=self.type)
def get_sentinel_onboarding_state(resource_group_name: Optional[str] = None,
sentinel_onboarding_state_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSentinelOnboardingStateResult:
"""
Get Sentinel onboarding state
Azure REST API version: 2023-02-01.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str sentinel_onboarding_state_name: The Sentinel onboarding state name. Supports - default
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['sentinelOnboardingStateName'] = sentinel_onboarding_state_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights:getSentinelOnboardingState', __args__, opts=opts, typ=GetSentinelOnboardingStateResult).value
return AwaitableGetSentinelOnboardingStateResult(
customer_managed_key=pulumi.get(__ret__, 'customer_managed_key'),
METHOD_NAME=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_sentinel_onboarding_state)
def get_sentinel_onboarding_state_output(resource_group_name: Optional[pulumi.Input[str]] = None,
sentinel_onboarding_state_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSentinelOnboardingStateResult]:
"""
Get Sentinel onboarding state
Azure REST API version: 2023-02-01.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str sentinel_onboarding_state_name: The Sentinel onboarding state name. Supports - default
:param str workspace_name: The name of the workspace.
"""
... |
253 | test piptools works with pyscaffold | import json
import os
import sys
from functools import partial
from pathlib import Path
from subprocess import CalledProcessError
import pytest
from pyscaffold import __version__ as pyscaffold_version
from pyscaffold.api import create_project
from pyscaffold.extensions.venv import Venv
from .helpers import find_venv_bin, run
pytestmark = [pytest.mark.slow, pytest.mark.system]
@pytest.fixture(autouse=True)
def dont_load_dotenv(monkeypatch):
"""pytest-virtualenv creates a `.env` directory by default, but `.env`
entries in the file system are loaded by Pipenv as dotenv files.
To prevent errors for happening we have to disable this feature.
Additionally, it seems that env vars have to be changed before using
venv, so an autouse fixture is required (cannot put this part in the
beginning of the test function.
"""
monkeypatch.setenv("PIPENV_DONT_LOAD_ENV", "1")
monkeypatch.setenv("PIPENV_IGNORE_VIRTUALENVS", "1")
monkeypatch.setenv("PIP_IGNORE_INSTALLED", "1")
monkeypatch.setenv("PIPENV_VERBOSITY", "-1")
@pytest.mark.skipif(
os.name == "nt", reason="pipenv fails due to colors (non-utf8) under Windows 10"
)
def test_pipenv_works_with_pyscaffold(tmpfolder, monkeypatch, venv):
# Given a project is created with pyscaffold
# and it has some dependencies in setup.cfg
create_project(project_path="myproj", requirements=["platformdirs"])
if any(ch in pyscaffold_version for ch in ("b", "a", "pre", "rc")):
flags = "--pre"
else:
flags = ""
with tmpfolder.join("myproj").as_cwd():
# When we install pipenv,
venv.run("pip install -v pipenv")
try:
venv.run("pipenv --bare install certifi")
# use it to proxy setup.cfg
venv.run(f"pipenv --bare install {flags} -e .")
# and install things to the dev env,
venv.run("pipenv --bare install --dev flake8")
# Then it should be able to generate a Pipfile.lock
venv.run("pipenv lock")
except Exception:
if sys.version_info[:2] <= (3, 6):
# TODO: Remove try...except when 3.6 is no longer supported
pytest.skip("Skip Pipenv specific problem for 3.6")
else:
raise
assert Path("Pipfile.lock").exists()
# with the correct dependencies
with open("Pipfile.lock") as fp:
content = json.load(fp)
assert content["default"]["platformdirs"]
assert content["develop"]["flake8"]
# and run things from inside pipenv's venv
pipenv_path = venv.run("pipenv --venv")
assert pipenv_path in venv.run("pipenv run which flake8")
venv.run("pipenv --bare run flake8 src/myproj/skeleton.py")
@pytest.mark.xfail(
sys.version_info < (3, 7), reason="pip-compile may fail in old Python"
)
def METHOD_NAME(tmpfolder, monkeypatch):
venv_path = Path(str(tmpfolder), "myproj/.venv").resolve()
find = partial(find_venv_bin, venv_path)
# Given a project is created with pyscaffold
# and it has some dependencies in setup.cfg
create_project(
project_path="myproj", extensions=[Venv()], requirements=["platformdirs"]
)
with tmpfolder.join("myproj").as_cwd():
requirements_in = Path("requirements.in")
# When we install pip-tools
run(f"{find('python')} -m pip install -v pip-tools certifi")
# and write a requirements.in file that proxies setup.cfg
# and install other things,
requirements_in.write_text("-e file:.\nflake8")
# Then we should be able to generate a requirements.txt
run(find("pip-compile"))
requirements_txt = Path("requirements.txt")
assert requirements_txt.exists()
# with the correct dependencies
content = requirements_txt.read_text()
assert "platformdirs==" in content
assert "flake8==" in content
assert "file:." in content
# install the dependencies
# and run things from inside pipenv's venv
pip_sync = find("pip-sync")
try:
# pip-tools have problems on windows inside a test env with relative paths
run(pip_sync)
run(f"{find('flake8')} src/myproj/skeleton.py")
except CalledProcessError as ex:
if "assert" in ex.output:
pytest.skip(
"pip-tools tries to assert a path is absolute, which fails "
"inside test env for some OSs"
)
else:
raise |
254 | set params | import numpy as np
__all__ = ["PointSourceParam"]
from lenstronomy.Sampling.param_group import ModelParamGroup, SingleParam, ArrayParam
class SourcePositionParam(SingleParam):
"""Source position parameter, ra_source and dec_source."""
param_names = ["ra_source", "dec_source"]
_kwargs_lower = {"ra_source": -100, "dec_source": -100}
_kwargs_upper = {"ra_source": 100, "dec_source": 100}
class LensedPosition(ArrayParam):
"""Represents lensed positions, possibly many. ra_image and dec_image.
:param num_images: integer. The number of lensed positions to model.
"""
_kwargs_lower = {
"ra_image": -100,
"dec_image": -100,
}
_kwargs_upper = {
"ra_image": 100,
"dec_image": 100,
}
def __init__(self, num_images):
ArrayParam.__init__(self, int(num_images) > 0)
self.param_names = {"ra_image": int(num_images), "dec_image": int(num_images)}
class SourceAmp(SingleParam):
"""Source amplification."""
param_names = ["source_amp"]
_kwargs_lower = {"source_amp": 0}
_kwargs_upper = {"source_amp": 100}
class ImageAmp(ArrayParam):
"""Observed amplification of lensed images of a point source. Can model arbitrarily
many magnified images.
:param num_point_sources: integer. The number of lensed images without fixed
magnification.
"""
_kwargs_lower = {"point_amp": 0}
_kwargs_upper = {"point_amp": 100}
def __init__(self, num_point_sources):
ArrayParam.__init__(self, int(num_point_sources) > 0)
self.param_names = {"point_amp": int(num_point_sources)}
class PointSourceParam(object):
"""Point source parameters."""
def __init__(
self,
model_list,
kwargs_fixed,
num_point_source_list=None,
linear_solver=True,
fixed_magnification_list=None,
kwargs_lower=None,
kwargs_upper=None,
):
"""
:param model_list: list of point source model names
:param kwargs_fixed: list of keyword arguments with parameters to be held fixed
:param num_point_source_list: list of number of point sources per point source model class
:param linear_solver: bool, if True, does not return linear parameters for the sampler
(will be solved linearly instead)
:param fixed_magnification_list: list of booleans, if entry is True, keeps one overall scaling among the
point sources in this class
"""
self.model_list = model_list
if num_point_source_list is None:
num_point_source_list = [1] * len(model_list)
self._num_point_sources_list = num_point_source_list
if fixed_magnification_list is None:
fixed_magnification_list = [False] * len(model_list)
self._fixed_magnification_list = fixed_magnification_list
self.kwargs_fixed = kwargs_fixed
if linear_solver is True:
self.kwargs_fixed = self.add_fix_linear(kwargs_fixed)
self._linear_solver = linear_solver
self.param_groups = []
for i, model in enumerate(self.model_list):
params = []
num = num_point_source_list[i]
if model in ["LENSED_POSITION", "UNLENSED"]:
params.append(LensedPosition(num))
elif model == "SOURCE_POSITION":
params.append(SourcePositionParam(True))
else:
raise ValueError("%s not a valid point source model" % model)
if fixed_magnification_list[i] and model in [
"LENSED_POSITION",
"SOURCE_POSITION",
]:
params.append(SourceAmp(True))
else:
params.append(ImageAmp(num))
self.param_groups.append(params)
if kwargs_lower is None:
kwargs_lower = []
for model_params in self.param_groups:
fixed_lower = {}
for param_group in model_params:
fixed_lower = dict(fixed_lower, **param_group.kwargs_lower)
kwargs_lower.append(fixed_lower)
if kwargs_upper is None:
kwargs_upper = []
for model_params in self.param_groups:
fixed_upper = {}
for param_group in model_params:
fixed_upper = dict(fixed_upper, **param_group.kwargs_upper)
kwargs_upper.append(fixed_upper)
self.lower_limit = kwargs_lower
self.upper_limit = kwargs_upper
def get_params(self, args, i):
"""
:param args: sorted list of floats corresponding to the parameters being sampled
:param i: int, index of first entry relevant for being managed by this class
:return: keyword argument list of point sources, index relevant for the next class
"""
kwargs_list = []
for k, param_group in enumerate(self.param_groups):
kwargs, i = ModelParamGroup.compose_get_params(
param_group, args, i, kwargs_fixed=self.kwargs_fixed[k]
)
kwargs_list.append(kwargs)
return kwargs_list, i
def METHOD_NAME(self, kwargs_list):
"""
:param kwargs_list: keyword argument list
:return: sorted list of parameters being sampled extracted from kwargs_list
"""
args = []
for k, param_group in enumerate(self.param_groups):
kwargs = kwargs_list[k]
kwargs_fixed = self.kwargs_fixed[k]
args.extend(
ModelParamGroup.compose_set_params(
param_group, kwargs, kwargs_fixed=kwargs_fixed
)
)
return args
def num_param(self):
"""Number of parameters and their names.
:return: int, list of parameter names
"""
num, name_list = 0, []
for k, param_group in enumerate(self.param_groups):
n, names = ModelParamGroup.compose_num_params(
param_group, kwargs_fixed=self.kwargs_fixed[k]
)
num += n
name_list += names
return num, name_list
def add_fix_linear(self, kwargs_fixed):
"""Updates fixed keyword argument list with linear parameters.
:param kwargs_fixed: list of keyword arguments held fixed during sampling
:return: updated keyword argument list
"""
for k, model in enumerate(self.model_list):
if self._fixed_magnification_list[k] is True and model in [
"LENSED_POSITION",
"SOURCE_POSITION",
]:
kwargs_fixed[k]["source_amp"] = 1
else:
kwargs_fixed[k]["point_amp"] = np.ones(self._num_point_sources_list[k])
return kwargs_fixed
def num_param_linear(self):
"""
:return: number of linear parameters
"""
num = 0
if self._linear_solver is True:
for k, model in enumerate(self.model_list):
if self._fixed_magnification_list[k] is True and model in [
"LENSED_POSITION",
"SOURCE_POSITION",
]:
num += 1
else:
num += self._num_point_sources_list[k]
return num |
255 | test sklearn cross validation iterators | # -*- coding: utf-8 -*-
"""Unit tests for aeon classifier compatability with sklearn interfaces."""
__author__ = ["MatthewMiddlehurst"]
__all__ = [
"test_sklearn_cross_validation",
"test_sklearn_cross_validation_iterators",
"test_sklearn_parameter_tuning",
"test_sklearn_composite_classifiers",
]
import numpy as np
import pytest
from sklearn.calibration import CalibratedClassifierCV
from sklearn.ensemble import VotingClassifier
from sklearn.experimental import enable_halving_search_cv # noqa
from sklearn.model_selection import (
GridSearchCV,
GroupKFold,
GroupShuffleSplit,
HalvingGridSearchCV,
HalvingRandomSearchCV,
KFold,
LeaveOneOut,
LeavePGroupsOut,
LeavePOut,
RandomizedSearchCV,
RepeatedKFold,
ShuffleSplit,
StratifiedKFold,
StratifiedShuffleSplit,
TimeSeriesSplit,
cross_val_score,
)
from sklearn.pipeline import Pipeline
from aeon.classification.interval_based import CanonicalIntervalForestClassifier
from aeon.transformations.collection.interpolate import TSInterpolator
from aeon.utils._testing.collection import make_3d_test_data
# StratifiedGroupKFold(n_splits=2), removed because it is not available in sklearn 0.24
CROSS_VALIDATION_METHODS = [
KFold(n_splits=2),
RepeatedKFold(n_splits=2, n_repeats=2),
LeaveOneOut(),
LeavePOut(p=5),
ShuffleSplit(n_splits=2, test_size=0.25),
StratifiedKFold(n_splits=2),
StratifiedShuffleSplit(n_splits=2, test_size=0.25),
GroupKFold(n_splits=2),
LeavePGroupsOut(n_groups=5),
GroupShuffleSplit(n_splits=2, test_size=0.25),
TimeSeriesSplit(n_splits=2),
]
PARAMETER_TUNING_METHODS = [
GridSearchCV,
RandomizedSearchCV,
HalvingGridSearchCV,
HalvingRandomSearchCV,
]
COMPOSITE_ESTIMATORS = [
Pipeline(
[
("transform", TSInterpolator(length=10)),
("clf", CanonicalIntervalForestClassifier.create_test_instance()),
]
),
VotingClassifier(
estimators=[
("clf1", CanonicalIntervalForestClassifier.create_test_instance()),
("clf2", CanonicalIntervalForestClassifier.create_test_instance()),
("clf3", CanonicalIntervalForestClassifier.create_test_instance()),
]
),
CalibratedClassifierCV(
base_estimator=CanonicalIntervalForestClassifier.create_test_instance(),
cv=3,
),
]
def test_sklearn_cross_validation():
"""Test sklearn cross-validation works with aeon time series data and
classifiers."""
clf = CanonicalIntervalForestClassifier.create_test_instance()
X, y = make_3d_test_data(n_cases=20, n_channels=2, n_timepoints=30)
scores = cross_val_score(clf, X, y=y, cv=KFold(n_splits=2))
assert isinstance(scores, np.ndarray)
@pytest.mark.parametrize("cross_validation_method", CROSS_VALIDATION_METHODS)
def METHOD_NAME(cross_validation_method):
"""Test if sklearn cross-validation iterators can handle aeon time series data."""
X, y = make_3d_test_data(n_cases=20, n_channels=2, n_timepoints=30)
groups = [1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10]
for train, test in cross_validation_method.split(X=X, y=y, groups=groups):
assert isinstance(train, np.ndarray) and isinstance(test, np.ndarray)
@pytest.mark.parametrize("parameter_tuning_method", PARAMETER_TUNING_METHODS)
def test_sklearn_parameter_tuning(parameter_tuning_method):
"""Test if sklearn parameter tuners can handle aeon data and classifiers."""
clf = CanonicalIntervalForestClassifier.create_test_instance()
param_grid = {"n_intervals": [2, 3], "att_subsample_size": [2, 3]}
X, y = make_3d_test_data(n_cases=20, n_channels=2, n_timepoints=30)
parameter_tuning_method = parameter_tuning_method(
clf, param_grid, cv=KFold(n_splits=3)
)
parameter_tuning_method.fit(X, y)
assert isinstance(
parameter_tuning_method.best_estimator_, CanonicalIntervalForestClassifier
)
@pytest.mark.parametrize("composite_classifier", COMPOSITE_ESTIMATORS)
def test_sklearn_composite_classifiers(composite_classifier):
"""Test if sklearn composite classifiers can handle aeon data and classifiers."""
X, y = make_3d_test_data(n_cases=20, n_channels=2, n_timepoints=30)
composite_classifier.fit(X, y)
preds = composite_classifier.predict(X=X)
assert isinstance(preds, np.ndarray) |
256 | findmatch | """Mailcap file handling. See RFC 1524."""
import os
__all__ = ["getcaps","findmatch"]
# Part 1: top-level interface.
def getcaps():
"""Return a dictionary containing the mailcap database.
The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain')
to a list of dictionaries corresponding to mailcap entries. The list
collects all the entries for that MIME type from all available mailcap
files. Each dictionary contains key-value pairs for that MIME type,
where the viewing command is stored with the key "view".
"""
caps = {}
for mailcap in listmailcapfiles():
try:
fp = open(mailcap, 'r')
except IOError:
continue
morecaps = readmailcapfile(fp)
fp.close()
for key, value in morecaps.iteritems():
if not key in caps:
caps[key] = value
else:
caps[key] = caps[key] + value
return caps
def listmailcapfiles():
"""Return a list of all mailcap files found on the system."""
# XXX Actually, this is Unix-specific
if 'MAILCAPS' in os.environ:
str = os.environ['MAILCAPS']
mailcaps = str.split(':')
else:
if 'HOME' in os.environ:
home = os.environ['HOME']
else:
# Don't bother with getpwuid()
home = '.' # Last resort
mailcaps = [home + '/.mailcap', '/etc/mailcap',
'/usr/etc/mailcap', '/usr/local/etc/mailcap']
return mailcaps
# Part 2: the parser.
def readmailcapfile(fp):
"""Read a mailcap file and return a dictionary keyed by MIME type.
Each MIME type is mapped to an entry consisting of a list of
dictionaries; the list will contain more than one such dictionary
if a given MIME type appears more than once in the mailcap file.
Each dictionary contains key-value pairs for that MIME type, where
the viewing command is stored with the key "view".
"""
caps = {}
while 1:
line = fp.readline()
if not line: break
# Ignore comments and blank lines
if line[0] == '#' or line.strip() == '':
continue
nextline = line
# Join continuation lines
while nextline[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: nextline = '\n'
line = line[:-2] + nextline
# Parse the line
key, fields = parseline(line)
if not (key and fields):
continue
# Normalize the key
types = key.split('/')
for j in range(len(types)):
types[j] = types[j].strip()
key = '/'.join(types).lower()
# Update the database
if key in caps:
caps[key].append(fields)
else:
caps[key] = [fields]
return caps
def parseline(line):
"""Parse one entry in a mailcap file and return a dictionary.
The viewing command is stored as the value with the key "view",
and the rest of the fields produce key-value pairs in the dict.
"""
fields = []
i, n = 0, len(line)
while i < n:
field, i = parsefield(line, i, n)
fields.append(field)
i = i+1 # Skip semicolon
if len(fields) < 2:
return None, None
key, view, rest = fields[0], fields[1], fields[2:]
fields = {'view': view}
for field in rest:
i = field.find('=')
if i < 0:
fkey = field
fvalue = ""
else:
fkey = field[:i].strip()
fvalue = field[i+1:].strip()
if fkey in fields:
# Ignore it
pass
else:
fields[fkey] = fvalue
return key, fields
def parsefield(line, i, n):
"""Separate one key-value pair in a mailcap entry."""
start = i
while i < n:
c = line[i]
if c == ';':
break
elif c == '\\':
i = i+2
else:
i = i+1
return line[start:i].strip(), i
# Part 3: using the database.
def METHOD_NAME(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
"""Find a match for a mailcap entry.
Return a tuple containing the command line, and the mailcap entry
used; (None, None) if no match is found. This may invoke the
'test' command of several matching entries before deciding which
entry to use.
"""
entries = lookup(caps, MIMEtype, key)
# XXX This code should somehow check for the needsterminal flag.
for e in entries:
if 'test' in e:
test = subst(e['test'], filename, plist)
if test and os.system(test) != 0:
continue
command = subst(e[key], MIMEtype, filename, plist)
return command, e
return None, None
def lookup(caps, MIMEtype, key=None):
entries = []
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
MIMEtypes = MIMEtype.split('/')
MIMEtype = MIMEtypes[0] + '/*'
if MIMEtype in caps:
entries = entries + caps[MIMEtype]
if key is not None:
entries = filter(lambda e, key=key: key in e, entries)
return entries
def subst(field, MIMEtype, filename, plist=[]):
# XXX Actually, this is Unix-specific
res = ''
i, n = 0, len(field)
while i < n:
c = field[i]; i = i+1
if c != '%':
if c == '\\':
c = field[i:i+1]; i = i+1
res = res + c
else:
c = field[i]; i = i+1
if c == '%':
res = res + c
elif c == 's':
res = res + filename
elif c == 't':
res = res + MIMEtype
elif c == '{':
start = i
while i < n and field[i] != '}':
i = i+1
name = field[start:i]
i = i+1
res = res + findparam(name, plist)
# XXX To do:
# %n == number of parts if type is multipart/*
# %F == list of alternating type and filename for parts
else:
res = res + '%' + c
return res
def findparam(name, plist):
name = name.lower() + '='
n = len(name)
for p in plist:
if p[:n].lower() == name:
return p[n:]
return ''
# Part 4: test program.
def test():
import sys
caps = getcaps()
if not sys.argv[1:]:
show(caps)
return
for i in range(1, len(sys.argv), 2):
args = sys.argv[i:i+2]
if len(args) < 2:
print "usage: mailcap [MIMEtype file] ..."
return
MIMEtype = args[0]
file = args[1]
command, e = METHOD_NAME(caps, MIMEtype, 'view', file)
if not command:
print "No viewer found for", type
else:
print "Executing:", command
sts = os.system(command)
if sts:
print "Exit status:", sts
def show(caps):
print "Mailcap files:"
for fn in listmailcapfiles(): print "\t" + fn
print
if not caps: caps = getcaps()
print "Mailcap entries:"
print
ckeys = caps.keys()
ckeys.sort()
for type in ckeys:
print type
entries = caps[type]
for e in entries:
keys = e.keys()
keys.sort()
for k in keys:
print " %-15s" % k, e[k]
print
if __name__ == '__main__':
test() |
257 | instance | """
Test cases for salt.modules.etcd_mod
Note: No functional tests are required as of now, as this is
essentially a wrapper around salt.utils.etcd_util.
If the contents of this module were to add more logic besides
acting as a wrapper, then functional tests would be required.
:codeauthor: Jayesh Kariya <[email protected]>
"""
import pytest
import salt.modules.etcd_mod as etcd_mod
import salt.utils.etcd_util as etcd_util
from tests.support.mock import MagicMock, create_autospec, patch
@pytest.fixture
def configure_loader_modules():
return {etcd_mod: {}}
@pytest.fixture
def METHOD_NAME():
return create_autospec(etcd_util.EtcdBase)
@pytest.fixture
def etcd_client_mock(METHOD_NAME):
mocked_client = MagicMock()
mocked_client.return_value = METHOD_NAME
return mocked_client
# 'get_' function tests: 1
def test_get(etcd_client_mock, METHOD_NAME):
"""
Test if it get a value from etcd, by direct path
"""
with patch.dict(etcd_mod.__utils__, {"etcd_util.get_conn": etcd_client_mock}):
METHOD_NAME.get.return_value = "stack"
assert etcd_mod.get_("salt") == "stack"
METHOD_NAME.get.assert_called_with("salt", recurse=False)
METHOD_NAME.get.return_value = {"salt": "stack"}
assert etcd_mod.get_("salt", recurse=True) == {"salt": "stack"}
METHOD_NAME.get.assert_called_with("salt", recurse=True)
METHOD_NAME.get.side_effect = Exception
pytest.raises(Exception, etcd_mod.get_, "err")
# 'set_' function tests: 1
def test_set(etcd_client_mock, METHOD_NAME):
"""
Test if it set a key in etcd, by direct path
"""
with patch.dict(etcd_mod.__utils__, {"etcd_util.get_conn": etcd_client_mock}):
METHOD_NAME.set.return_value = "stack"
assert etcd_mod.set_("salt", "stack") == "stack"
METHOD_NAME.set.assert_called_with("salt", "stack", directory=False, ttl=None)
METHOD_NAME.set.return_value = True
assert etcd_mod.set_("salt", "", directory=True) is True
METHOD_NAME.set.assert_called_with("salt", "", directory=True, ttl=None)
assert etcd_mod.set_("salt", "", directory=True, ttl=5) is True
METHOD_NAME.set.assert_called_with("salt", "", directory=True, ttl=5)
assert etcd_mod.set_("salt", "", None, 10, True) is True
METHOD_NAME.set.assert_called_with("salt", "", directory=True, ttl=10)
METHOD_NAME.set.side_effect = Exception
pytest.raises(Exception, etcd_mod.set_, "err", "stack")
# 'update' function tests: 1
def test_update(etcd_client_mock, METHOD_NAME):
"""
Test if can set multiple keys in etcd
"""
with patch.dict(etcd_mod.__utils__, {"etcd_util.get_conn": etcd_client_mock}):
args = {
"x": {"y": {"a": "1", "b": "2"}},
"z": "4",
"d": {},
}
result = {
"/some/path/x/y/a": "1",
"/some/path/x/y/b": "2",
"/some/path/z": "4",
"/some/path/d": {},
}
METHOD_NAME.update.return_value = result
assert etcd_mod.update(args, path="/some/path") == result
METHOD_NAME.update.assert_called_with(args, "/some/path")
assert etcd_mod.update(args) == result
METHOD_NAME.update.assert_called_with(args, "")
# 'ls_' function tests: 1
def test_ls(etcd_client_mock, METHOD_NAME):
"""
Test if it return all keys and dirs inside a specific path
"""
with patch.dict(etcd_mod.__utils__, {"etcd_util.get_conn": etcd_client_mock}):
METHOD_NAME.ls.return_value = {"/some-dir": {}}
assert etcd_mod.ls_("/some-dir") == {"/some-dir": {}}
METHOD_NAME.ls.assert_called_with("/some-dir")
METHOD_NAME.ls.return_value = {"/": {}}
assert etcd_mod.ls_() == {"/": {}}
METHOD_NAME.ls.assert_called_with("/")
METHOD_NAME.ls.side_effect = Exception
pytest.raises(Exception, etcd_mod.ls_, "err")
# 'rm_' function tests: 1
def test_rm(etcd_client_mock, METHOD_NAME):
"""
Test if it delete a key from etcd
"""
with patch.dict(etcd_mod.__utils__, {"etcd_util.get_conn": etcd_client_mock}):
METHOD_NAME.rm.return_value = False
assert not etcd_mod.rm_("dir")
METHOD_NAME.rm.assert_called_with("dir", recurse=False)
METHOD_NAME.rm.return_value = True
assert etcd_mod.rm_("dir", recurse=True)
METHOD_NAME.rm.assert_called_with("dir", recurse=True)
METHOD_NAME.rm.side_effect = Exception
pytest.raises(Exception, etcd_mod.rm_, "err")
# 'tree' function tests: 1
def test_tree(etcd_client_mock, METHOD_NAME):
"""
Test if it recurses through etcd and return all values
"""
with patch.dict(etcd_mod.__utils__, {"etcd_util.get_conn": etcd_client_mock}):
METHOD_NAME.tree.return_value = {}
assert etcd_mod.tree("/some-dir") == {}
METHOD_NAME.tree.assert_called_with("/some-dir")
assert etcd_mod.tree() == {}
METHOD_NAME.tree.assert_called_with("/")
METHOD_NAME.tree.side_effect = Exception
pytest.raises(Exception, etcd_mod.tree, "err")
# 'watch' function tests: 1
def test_watch(etcd_client_mock, METHOD_NAME):
"""
Test if watch returns the right tuples
"""
with patch.dict(etcd_mod.__utils__, {"etcd_util.get_conn": etcd_client_mock}):
METHOD_NAME.watch.return_value = {
"value": "stack",
"changed": True,
"dir": False,
"mIndex": 1,
"key": "/salt",
}
assert etcd_mod.watch("/salt") == METHOD_NAME.watch.return_value
METHOD_NAME.watch.assert_called_with("/salt", recurse=False, timeout=0, index=None)
METHOD_NAME.watch.return_value["dir"] = True
assert (
etcd_mod.watch("/some-dir", recurse=True, timeout=5, index=10)
== METHOD_NAME.watch.return_value
)
METHOD_NAME.watch.assert_called_with(
"/some-dir", recurse=True, timeout=5, index=10
)
assert (
etcd_mod.watch("/some-dir", True, None, 5, 10)
== METHOD_NAME.watch.return_value
)
METHOD_NAME.watch.assert_called_with(
"/some-dir", recurse=True, timeout=5, index=10
) |
258 | test python op exception | # Owner(s): ["oncall: jit"]
from torch.testing._internal.common_utils import TestCase
import torch
from torch import nn
r"""
Test TorchScript exception handling.
"""
class TestException(TestCase):
def test_pyop_exception_message(self):
class Foo(torch.jit.ScriptModule):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(1, 10, kernel_size=5)
@torch.jit.script_method
def forward(self, x):
return self.conv(x)
foo = Foo()
# testing that the correct error message propagates
with self.assertRaisesRegex(RuntimeError, r"Expected 3D \(unbatched\) or 4D \(batched\) input to conv2d"):
foo(torch.ones([123])) # wrong size
def test_builtin_error_messsage(self):
with self.assertRaisesRegex(RuntimeError, "Arguments for call are not valid"):
@torch.jit.script
def close_match(x):
return x.masked_fill(True)
with self.assertRaisesRegex(RuntimeError, "This op may not exist or may not be currently "
"supported in TorchScript"):
@torch.jit.script
def unknown_op(x):
torch.set_anomaly_enabled(True)
return x
def test_exceptions(self):
cu = torch.jit.CompilationUnit('''
def foo(cond):
if bool(cond):
raise ValueError(3)
return 1
''')
cu.foo(torch.tensor(0))
with self.assertRaisesRegex(torch.jit.Error, "3"):
cu.foo(torch.tensor(1))
def foo(cond):
a = 3
if bool(cond):
raise ArbitraryError(a, "hi")
if 1 == 2:
raise ArbitraryError
return a
with self.assertRaisesRegex(RuntimeError, "undefined value ArbitraryError"):
torch.jit.script(foo)
def exception_as_value():
a = Exception()
print(a)
with self.assertRaisesRegex(RuntimeError, "cannot be used as a value"):
torch.jit.script(exception_as_value)
@torch.jit.script
def foo_no_decl_always_throws():
raise RuntimeError("Hi")
# function that has no declared type but always throws set to None
output_type = next(foo_no_decl_always_throws.graph.outputs()).type()
self.assertTrue(str(output_type) == "NoneType")
@torch.jit.script
def foo_decl_always_throws():
# type: () -> Tensor
raise Exception("Hi")
output_type = next(foo_decl_always_throws.graph.outputs()).type()
self.assertTrue(str(output_type) == "Tensor")
def foo():
raise 3 + 4
with self.assertRaisesRegex(RuntimeError, "must derive from BaseException"):
torch.jit.script(foo)
# a escapes scope
@torch.jit.script
def foo():
if 1 == 1:
a = 1
else:
if 1 == 1:
raise Exception("Hi")
else:
raise Exception("Hi")
return a
self.assertEqual(foo(), 1)
@torch.jit.script
def tuple_fn():
raise RuntimeError("hello", "goodbye")
with self.assertRaisesRegex(torch.jit.Error, "hello, goodbye"):
tuple_fn()
@torch.jit.script
def no_message():
raise RuntimeError
with self.assertRaisesRegex(torch.jit.Error, "RuntimeError"):
no_message()
def test_assertions(self):
cu = torch.jit.CompilationUnit('''
def foo(cond):
assert bool(cond), "hi"
return 0
''')
cu.foo(torch.tensor(1))
with self.assertRaisesRegex(torch.jit.Error, "AssertionError: hi"):
cu.foo(torch.tensor(0))
@torch.jit.script
def foo(cond):
assert bool(cond), "hi"
foo(torch.tensor(1))
# we don't currently validate the name of the exception
with self.assertRaisesRegex(torch.jit.Error, "AssertionError: hi"):
foo(torch.tensor(0))
def METHOD_NAME(self):
@torch.jit.ignore
def python_op(x):
raise Exception("bad!")
@torch.jit.script
def fn(x):
return python_op(x)
with self.assertRaisesRegex(RuntimeError, "operation failed in the TorchScript interpreter"):
fn(torch.tensor(4))
def test_dict_expansion_raises_error(self):
def fn(self):
d = {"foo": 1, "bar": 2, "baz": 3}
return {**d}
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError,
"Dict expansion "):
torch.jit.script(fn)
def test_custom_python_exception(self):
class MyValueError(ValueError):
pass
@torch.jit.script
def fn():
raise MyValueError("test custom exception")
with self.assertRaisesRegex(torch.jit.Error, "jit.test_exception.MyValueError: test custom exception"):
fn()
def test_custom_python_exception_defined_elsewhere(self):
from jit.myexception import MyKeyError
@torch.jit.script
def fn():
raise MyKeyError("This is a user defined key error")
with self.assertRaisesRegex(torch.jit.Error, "jit.myexception.MyKeyError: This is a user defined key error"):
fn() |
259 | decrypt secret | # Note: this module is imported inside settings.py! Make sure to avoid circular imports
import logging
import os
import six
from google.cloud import kms
from google.cloud.storage import Client
from google_crc32c import value as _crc32c
ENV_VARS = "ENV_VARS"
KMS_GCS = 2
SECRET_STORAGE_GCP_KMS_DEFAULT_LOCATION = "global"
SECRET_STORAGE_DEFAULT_ENVIRONMENT = "dev"
def get_secret(secret_name, secret_storage=None):
"""
Return the secret value stored in the secret key called secret_name.
The following secret storage locations are supported:
- environment variables
- Google Cloud Storage, encrypted with KMS
If secret_storage is None (the default), then it will read the SECRET_STORAGE
environment variable. If it's not defined, or the value is "ENV_VARS", then continue
reading the secret from the environment variables, returning an empty string if
that secret is not found.
If the SECRET_STORAGE env var is defined and is set to "KMS_GCS", then it will download
small files from GCS, decrypt them using GCloud KMS keys. You will need to make sure that
the VM or environment has permission to access GCS and the KMS keys. You can do that by either
running `gcloud auth login` on your local dev machine, or setting the right service account
on your GCE VM.
If you're using the KMS_GCS option, you will need to define the following additional environment variables.
SECRET_STORAGE_GCP_PROJECT_ID: the env var to point to your GCP project ID where the secrets are stored.
SECRET_STORAGE_ENVIRONMENT: Either one of dev, staging or prod. There are secrets specific to each environment
that help isolate data to each environment. See the deploy/secretmanage executable found at the root of
this repo. If not defined, this defaults to "dev".
In addition, you can define an optional SECRET_STORAGE_GCP_KMS_LOCATION to point to the datacenter
where your KMS keys live. If not defined, this defaults to 'global'. See GCloud KMS docs for more detail.
"""
secret_storage = secret_storage or os.getenv("SECRET_STORAGE")
if secret_storage in [None, "", ENV_VARS]:
return os.getenv(secret_name)
if secret_storage == "KMS_GCS":
env = os.getenv("SECRET_STORAGE_ENVIRONMENT") or SECRET_STORAGE_DEFAULT_ENVIRONMENT
project_id = os.getenv("SECRET_STORAGE_GCP_PROJECT_ID")
kms_location = (
os.getenv("SECRET_STORAGE_GCP_KMS_LOCATION")
or SECRET_STORAGE_GCP_KMS_DEFAULT_LOCATION
)
if not project_id:
raise KeyError("The env variable SECRET_STORAGE_GCP_PROJECT_ID was not defined!")
ciphertext = get_encrypted_secret(secret_name, project_id, env)
return METHOD_NAME(ciphertext, project_id, kms_location, env, secret_name)
logging.warning(
"Invalid SECRET_STORAGE value! Defaulting to reading environment variables for now."
)
return os.getenv(secret_name)
def METHOD_NAME(ciphertext, project_id, loc, env, secret_name):
"""
Decrypt the ciphertext by using the GCloud KMS keys for that secret.
"""
kms_client = kms.KeyManagementServiceClient()
key_path = kms_client.crypto_key_path(project_id, loc, env, secret_name)
# Optional, but recommended: compute ciphertext's CRC32C.
# See crc32c() function defined below.
ciphertext_crc32c = crc32c(ciphertext)
response = kms_client.decrypt(
request={'name': key_path, 'ciphertext': ciphertext, 'ciphertext_crc32c': ciphertext_crc32c})
# Optional, but recommended: perform integrity verification on decrypt_response.
# For more details on ensuring E2E in-transit integrity to and from Cloud KMS visit:
# https://cloud.google.com/kms/docs/data-integrity-guidelines
if not response.plaintext_crc32c == crc32c(response.plaintext):
raise Exception('The response received from the server was corrupted in-transit.')
return response.plaintext
def get_encrypted_secret(secret_name, project_id, env):
"""
Fetch the encrypted secret stored in project_id's secrets bucket. Return the encrypted string.
Bucket names are globally unique. The secrets bucket for a project is called "{project_id}-secrets".
Inside the bucket are folders corresponding to different environments; currently either dev, staging or
prod. Inside each folder are files that are encrypted by GCloud KMS keys that are specific to that
secret.
"""
bucket_name = "{id}-secrets".format(id=project_id)
loc = "{env}/{name}".format(env=env, name=secret_name)
bucket = Client().get_bucket(bucket_name)
try:
ret = bucket.blob(loc).download_as_string()
except AttributeError:
logging.warning(
"Secret '{name}' in env '{env}' does not exist! Defaulting to an empty string.".format(
env=env, name=secret_name
)
)
return ret
def crc32c(data):
"""
Calculates the CRC32C checksum of the provided data.
Args:
data: the bytes over which the checksum should be calculated.
Returns:
An int representing the CRC32C checksum of the provided bytes.
"""
return _crc32c(six.ensure_binary(data)) |
260 | process mapping rule | """Used by the make system to generate a rules.mk
"""
from pathlib import Path
from dotty_dict import dotty
from argcomplete.completers import FilesCompleter
from milc import cli
from qmk.info import info_json
from qmk.json_schema import json_load
from qmk.keyboard import keyboard_completer, keyboard_folder
from qmk.commands import dump_lines, parse_configurator_json
from qmk.path import normpath, FileType
from qmk.constants import GPL2_HEADER_SH_LIKE, GENERATED_HEADER_SH_LIKE
def METHOD_NAME(kb_info_json, rules_key, info_dict):
"""Return the rules.mk line(s) for a mapping rule.
"""
if not info_dict.get('to_c', True):
return None
info_key = info_dict['info_key']
key_type = info_dict.get('value_type', 'raw')
try:
rules_value = kb_info_json[info_key]
except KeyError:
return None
if key_type in ['array', 'list']:
return f'{rules_key} ?= {" ".join(rules_value)}'
elif key_type == 'bool':
return f'{rules_key} ?= {"yes" if rules_value else "no"}'
elif key_type == 'mapping':
return '\n'.join([f'{key} ?= {value}' for key, value in rules_value.items()])
elif key_type == 'str':
return f'{rules_key} ?= "{rules_value}"'
return f'{rules_key} ?= {rules_value}'
@cli.argument('filename', nargs='?', arg_only=True, type=FileType('r'), completer=FilesCompleter('.json'), help='A configurator export JSON to be compiled and flashed or a pre-compiled binary firmware file (bin/hex) to be flashed.')
@cli.argument('-o', '--output', arg_only=True, type=normpath, help='File to write to')
@cli.argument('-q', '--quiet', arg_only=True, action='store_true', help="Quiet mode, only output error messages")
@cli.argument('-e', '--escape', arg_only=True, action='store_true', help="Escape spaces in quiet mode")
@cli.argument('-kb', '--keyboard', arg_only=True, type=keyboard_folder, completer=keyboard_completer, help='Keyboard to generate rules.mk for.')
@cli.subcommand('Used by the make system to generate rules.mk from info.json', hidden=True)
def generate_rules_mk(cli):
"""Generates a rules.mk file from info.json.
"""
converter = None
# Determine our keyboard/keymap
if cli.args.filename:
user_keymap = parse_configurator_json(cli.args.filename)
kb_info_json = dotty(user_keymap.get('config', {}))
converter = user_keymap.get('converter', None)
elif cli.args.keyboard:
kb_info_json = dotty(info_json(cli.args.keyboard))
else:
cli.log.error('You must supply a configurator export or `--keyboard`.')
cli.subcommands['generate-rules-mk'].print_help()
return False
info_rules_map = json_load(Path('data/mappings/info_rules.hjson'))
rules_mk_lines = [GPL2_HEADER_SH_LIKE, GENERATED_HEADER_SH_LIKE]
# Iterate through the info_rules map to generate basic rules
for rules_key, info_dict in info_rules_map.items():
new_entry = METHOD_NAME(kb_info_json, rules_key, info_dict)
if new_entry:
rules_mk_lines.append(new_entry)
# Iterate through features to enable/disable them
if 'features' in kb_info_json:
for feature, enabled in kb_info_json['features'].items():
feature = feature.upper()
enabled = 'yes' if enabled else 'no'
rules_mk_lines.append(f'{feature}_ENABLE ?= {enabled}')
# Set SPLIT_TRANSPORT, if needed
if kb_info_json.get('split', {}).get('transport', {}).get('protocol') == 'custom':
rules_mk_lines.append('SPLIT_TRANSPORT ?= custom')
# Set CUSTOM_MATRIX, if needed
if kb_info_json.get('matrix_pins', {}).get('custom'):
if kb_info_json.get('matrix_pins', {}).get('custom_lite'):
rules_mk_lines.append('CUSTOM_MATRIX ?= lite')
else:
rules_mk_lines.append('CUSTOM_MATRIX ?= yes')
if converter:
rules_mk_lines.append(f'CONVERT_TO ?= {converter}')
# Show the results
dump_lines(cli.args.output, rules_mk_lines)
if cli.args.output:
if cli.args.quiet:
if cli.args.escape:
print(cli.args.output.as_posix().replace(' ', '\\ '))
else:
print(cli.args.output)
else:
cli.log.info('Wrote rules.mk to %s.', cli.args.output) |
261 | test check params | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2023
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import numpy as np
import pytest
from art.attacks.poisoning import BadDetObjectGenerationAttack, PoisoningAttackBackdoor
from art.attacks.poisoning.perturbations import add_single_bd, add_pattern_bd, insert_image
from tests.utils import ARTTestException
logger = logging.getLogger(__name__)
@pytest.mark.framework_agnostic
@pytest.mark.parametrize("percent_poison", [0.3, 1.0])
@pytest.mark.parametrize("channels_first", [True, False])
def test_poison_single_bd(art_warning, image_batch, percent_poison, channels_first):
x, y = image_batch
backdoor = PoisoningAttackBackdoor(add_single_bd)
try:
attack = BadDetObjectGenerationAttack(
backdoor=backdoor,
bbox_height=8,
bbox_width=8,
class_target=1,
percent_poison=percent_poison,
channels_first=channels_first,
)
poison_data, poison_labels = attack.poison(x, y)
np.testing.assert_equal(poison_data.shape, x.shape)
if percent_poison == 1.0:
assert poison_labels[0]["boxes"].shape != y[0]["boxes"].shape
assert poison_labels[0]["labels"].shape != y[0]["labels"].shape
except ARTTestException as e:
art_warning(e)
@pytest.mark.framework_agnostic
@pytest.mark.parametrize("percent_poison", [0.3, 1.0])
@pytest.mark.parametrize("channels_first", [True, False])
def test_poison_pattern_bd(art_warning, image_batch, percent_poison, channels_first):
x, y = image_batch
backdoor = PoisoningAttackBackdoor(add_pattern_bd)
try:
attack = BadDetObjectGenerationAttack(
backdoor=backdoor,
bbox_height=8,
bbox_width=8,
class_target=1,
percent_poison=percent_poison,
channels_first=channels_first,
)
poison_data, poison_labels = attack.poison(x, y)
np.testing.assert_equal(poison_data.shape, x.shape)
if percent_poison == 1.0:
assert poison_labels[0]["boxes"].shape != y[0]["boxes"].shape
assert poison_labels[0]["labels"].shape != y[0]["labels"].shape
except ARTTestException as e:
art_warning(e)
@pytest.mark.framework_agnostic
@pytest.mark.parametrize("percent_poison", [0.3, 1.0])
@pytest.mark.parametrize("channels_first", [True, False])
def test_poison_image(art_warning, image_batch, percent_poison, channels_first):
x, y = image_batch
file_path = os.path.join(os.getcwd(), "utils/data/backdoors/alert.png")
def perturbation(x):
return insert_image(x, backdoor_path=file_path, channels_first=False, size=(2, 2), mode="RGB")
backdoor = PoisoningAttackBackdoor(perturbation)
try:
attack = BadDetObjectGenerationAttack(
backdoor=backdoor,
bbox_height=8,
bbox_width=8,
class_target=1,
percent_poison=percent_poison,
channels_first=channels_first,
)
poison_data, poison_labels = attack.poison(x, y)
np.testing.assert_equal(poison_data.shape, x.shape)
if percent_poison == 1.0:
assert poison_labels[0]["boxes"].shape != y[0]["boxes"].shape
assert poison_labels[0]["labels"].shape != y[0]["labels"].shape
except ARTTestException as e:
art_warning(e)
@pytest.mark.framework_agnostic
def METHOD_NAME(art_warning):
backdoor = PoisoningAttackBackdoor(add_single_bd)
try:
with pytest.raises(ValueError):
_ = BadDetObjectGenerationAttack(None, bbox_height=8, bbox_width=-1)
with pytest.raises(ValueError):
_ = BadDetObjectGenerationAttack(backdoor=backdoor, bbox_height=-1, bbox_width=8)
with pytest.raises(ValueError):
_ = BadDetObjectGenerationAttack(backdoor=backdoor, bbox_height=8, bbox_width=-1)
with pytest.raises(ValueError):
_ = BadDetObjectGenerationAttack(backdoor=backdoor, bbox_height=8, bbox_width=8, percent_poison=-0.1)
with pytest.raises(ValueError):
_ = BadDetObjectGenerationAttack(backdoor=backdoor, bbox_height=8, bbox_width=8, percent_poison=0)
with pytest.raises(ValueError):
_ = BadDetObjectGenerationAttack(backdoor=backdoor, bbox_height=8, bbox_width=8, percent_poison=1.1)
except ARTTestException as e:
art_warning(e)
@pytest.mark.framework_agnostic
def test_non_image_data_error(art_warning, tabular_batch):
x, y = tabular_batch
backdoor = PoisoningAttackBackdoor(add_single_bd)
try:
attack = BadDetObjectGenerationAttack(backdoor=backdoor, bbox_height=8, bbox_width=8)
exc_msg = "Unrecognized input dimension. BadDet OGA can only be applied to image data."
with pytest.raises(ValueError, match=exc_msg):
_, _ = attack.poison(x, y)
except ARTTestException as e:
art_warning(e) |
262 | start | #!/usr/bin/python
"""
PyChess arena tournament script.
This script executes a tournament between the engines installed on your
system. The script is executed from a terminal with the usual environment.
"""
import os
import sys
###############################################################################
# Set up important things
from gi.repository import GLib
from gi.repository import GObject
GObject.threads_init()
mainloop = GLib.MainLoop()
from pychess.Utils.const import *
###############################################################################
# Fix environment
if "PYTHONPATH" in os.environ:
os.environ["PYTHONPATH"] = os.pathsep.join(
os.path.abspath(p) for p in os.environ["PYTHONPATH"].split(os.pathsep)
)
###############################################################################
from pychess.System import Log
Log.DEBUG = False
###############################################################################
# Do the rest of the imports
from pychess.Players.engineNest import discoverer
from pychess.Savers.pgn import save
from pychess.Utils.GameModel import GameModel
from pychess.Utils.TimeModel import TimeModel
from pychess.Variants import variants
###############################################################################
# Look up engines
def prepare():
print("Discovering engines", end=" ")
discoverer.connect("discovering_started", cb_started)
discoverer.connect("engine_discovered", cb_gotone)
discoverer.connect("all_engines_discovered", METHOD_NAME)
discoverer.discover()
def cb_started(discoverer, binnames):
print("Wait a moment while we discover %d engines" % len(binnames))
def cb_gotone(discoverer, binname, engine):
sys.stdout.write(".")
###############################################################################
# Ask the user for details
engines = []
results = []
minutes = 0
current = [0, 0]
def METHOD_NAME(discoverer):
global engines, results, minutes
engines = discoverer.getEngines()
n = len(engines)
for i in range(n):
results.append([None] * n)
print()
print("Your installed engines are:")
for i, engine in enumerate(engines):
name = discoverer.getName(engine)
print(f"[{name[:3]}] {name}")
print("The total amount of fights will be %d" % (n * (n - 1)))
print()
minutes = int(input("Please enter the clock minutes for each game [n]: "))
print("The games will last up to %d minutes." % (2 * n * (n - 1) * minutes))
print("You will be informed of the progress as the games finish.")
print()
runGame()
###############################################################################
# Run games
def runGame():
a, b = findMatch()
if a is None:
print("All games have now been played. Here are the final scores:")
printResults()
mainloop.quit()
return
current[0] = a
current[1] = b
game = GameModel(TimeModel(minutes * 60, 0))
game.connect("game_started", cb_gamestarted)
game.connect("game_ended", cb_gameended)
p0 = discoverer.initPlayerEngine(
engines[a],
WHITE,
8,
variants[NORMALCHESS],
secs=minutes * 60,
incr=0,
forcePonderOff=True,
)
p1 = discoverer.initPlayerEngine(
engines[b],
BLACK,
8,
variants[NORMALCHESS],
secs=minutes * 60,
incr=0,
forcePonderOff=True,
)
game.setPlayers([p0, p1])
game.METHOD_NAME()
def cb_gamestarted(game):
print("Starting the game between %s and %s" % tuple(game.players))
def cb_gameended(game, reason):
print(
"The game between %s and %s ended %s"
% (tuple(game.players) + (reprResult[game.status],))
)
if game.status not in (DRAW, WHITEWON, BLACKWON):
print("Something must have gone wrong. But we'll just try to continue!")
else:
i, j = current
results[i][j] = game.status
print("The current scores are:")
printScoreboard()
print()
with open("arena.pgn", "a+") as fh:
save(fh, game)
runGame()
###############################################################################
# A few helpers
def printScoreboard():
names = [discoverer.getName(e)[:3] for e in engines]
print(r"W\B", " ".join(names))
for i, nameA in enumerate(names):
print(nameA, end=" ")
for j, nameB in enumerate(names):
if i == j:
print(" # ", end=" ")
elif results[i][j] == DRAW:
print("½-½", end=" ")
elif results[i][j] == WHITEWON:
print("1-0", end=" ")
elif results[i][j] == BLACKWON:
print("0-1", end=" ")
else:
print(" . ", end=" ")
print()
def printResults():
scores = []
for i in range(len(engines)):
points = (
sum(2 for j in range(len(engines)) if results[i][j] == WHITEWON)
+ sum(1 for j in range(len(engines)) if results[i][j] == DRAW)
+ sum(2 for j in range(len(engines)) if results[j][i] == BLACKWON)
+ sum(1 for j in range(len(engines)) if results[j][i] == DRAW)
)
scores.append((points, i))
scores.sort(reverse=True)
for points, i in scores:
print(discoverer.getName(engines[i]), ":", points / 2, "½" * (points % 2))
# def findMatch():
# for i, engineA in enumerate(engines):
# for j, engineB in enumerate(engines):
# if i != j and results[i][j] == None:
# return i, j
# return None, None
import random
def findMatch():
pos = [
(i, j)
for i in range(len(engines))
for j in range(len(engines))
if i != j and results[i][j] is None
]
# pos = [(i,j) for i,j in pos if
# "pychess" in discoverer.getName(engines[i]).lower() or
# "pychess" in discoverer.getName(engines[j]).lower()]
if not pos:
return None, None
return random.choice(pos)
###############################################################################
# Push onto the mainloop and start it
# glib.idle_add(prepare)
prepare()
def do(discoverer):
game = GameModel(TimeModel(60, 0))
# game.connect('game_started', cb_gamestarted2)
game.connect("game_ended", lambda *a: mainloop.quit())
p0 = discoverer.initPlayerEngine(
discoverer.getEngines()["rybka"], WHITE, 7, variants[NORMALCHESS], 60
)
p1 = discoverer.initPlayerEngine(
discoverer.getEngines()["gnuchess"], BLACK, 7, variants[NORMALCHESS], 60
)
game.setPlayers([p0, p1])
game.METHOD_NAME()
# discoverer.connect('all_engines_discovered', do)
# discoverer.start()
mainloop.run() |
263 | zookeeper container | """
Integration tests for the zookeeper states
"""
import logging
import pytest
from saltfactories.utils import random_string
pytest.importorskip("kazoo")
pytest.importorskip("docker")
log = logging.getLogger(__name__)
pytestmark = [
pytest.mark.slow_test,
pytest.mark.skip_if_binaries_missing("dockerd"),
pytest.mark.slow_test,
]
@pytest.fixture(scope="module")
def minion_config_overrides(zookeeper_port):
zookeeper_grains = {
"prod": {
"hosts": "localhost:{}".format(zookeeper_port),
"default_acl": [
{
"username": "daniel",
"password": "test",
"read": True,
"write": True,
"create": True,
"delete": True,
"admin": True,
}
],
"username": "daniel",
"password": "test",
},
"hosts": "localhost:{}".format(zookeeper_port),
"default_acl": [
{
"username": "daniel",
"password": "test",
"read": True,
"write": True,
"create": True,
"delete": True,
"admin": True,
}
],
"username": "daniel",
"password": "test",
}
return {"grains": {"zookeeper": zookeeper_grains}}
@pytest.fixture(scope="module")
def METHOD_NAME(salt_factories):
container = salt_factories.get_container(
random_string("zookeeper-"),
"ghcr.io/saltstack/salt-ci-containers/zookeeper",
container_run_kwargs={
"ports": {
"2181/tcp": None,
}
},
pull_before_start=True,
skip_on_pull_failure=True,
skip_if_docker_client_not_connectable=True,
)
with container.started() as factory:
yield factory
@pytest.fixture(scope="module")
def zookeeper_port(METHOD_NAME):
return METHOD_NAME.get_host_port_binding(2181, protocol="tcp", ipv6=False)
@pytest.fixture(scope="module")
def zookeeper(METHOD_NAME, states):
return states.zookeeper
def test_zookeeper_present(zookeeper):
ret = zookeeper.present(name="/test/name-1", value="testuser", makepath=True)
assert ret.result is True
def test_zookeeper_present_acls_and_profile(zookeeper):
ret = zookeeper.present(name="/test/name-2", value="testuser", makepath=True)
assert ret.result is True
ret = zookeeper.present(
name="/test/name-2",
value="daniel",
acls=[
{
"username": "daniel",
"password": "test",
"read": True,
"admin": True,
"write": True,
},
{"username": "testuser", "password": "test", "read": True},
],
profile="prod",
)
assert ret.result is True
def test_zookeeper_absent(zookeeper):
ret = zookeeper.present(name="/test/name-3", value="testuser", makepath=True)
assert ret.result is True
ret = zookeeper.absent(name="/test/name-3")
assert ret.result is True
assert ret.changes
ret = zookeeper.absent(name="/test/name-3")
assert ret.result is True
assert not ret.changes
def test_zookeeper_acls(zookeeper):
ret = zookeeper.acls(
name="/test/name-4",
acls=[
{
"username": "daniel",
"password": "test",
"read": True,
"admin": True,
"write": True,
},
{"username": "testuser", "password": "test", "read": True},
],
)
assert ret.result is False
ret = zookeeper.present(name="/test/name-4", value="testuser", makepath=True)
assert ret.result is True
ret = zookeeper.acls(
name="/test/name-4",
acls=[
{
"username": "daniel",
"password": "test",
"read": True,
"admin": True,
"write": True,
},
{"username": "testuser", "password": "test", "read": True},
],
)
assert ret.result is True |
264 | run selected tests | from __future__ import absolute_import, division, print_function
from mmtbx.validation.clashscore import clashscore
import libtbx.load_env
import iotbx.pdb
import unittest
import os
from six.moves import map
__author__ = 'Youval'
test_pdb_str = '''\
CRYST1 77.977 77.977 66.800 90.00 90.00 120.00 P 31 2 1 6
SCALE1 0.012824 0.007404 0.000000 0.00000
SCALE2 0.000000 0.014808 0.000000 0.00000
SCALE3 0.000000 0.000000 0.014970 0.00000
ATOM 489 N TRP A 96 27.616 -26.119 6.863 1.00 11.38 N
ATOM 490 CA TRP A 96 28.969 -26.184 6.322 1.00 10.73 C
ATOM 491 C TRP A 96 29.155 -25.427 5.023 1.00 10.54 C
ATOM 492 O TRP A 96 28.250 -25.321 4.198 1.00 10.74 O
ATOM 493 CB TRP A 96 29.451 -27.607 6.139 1.00 10.00 C
ATOM 494 CG TRP A 96 29.781 -28.324 7.405 1.00 9.99 C
ATOM 495 CD1 TRP A 96 29.155 -28.212 8.613 1.00 11.22 C
ATOM 496 CD2 TRP A 96 30.773 -29.342 7.558 1.00 10.57 C
ATOM 497 NE1 TRP A 96 29.720 -29.094 9.512 1.00 11.57 N
ATOM 498 CE2 TRP A 96 30.713 -29.795 8.890 1.00 10.98 C
ATOM 499 CE3 TRP A 96 31.720 -29.904 6.696 1.00 10.60 C
ATOM 500 CZ2 TRP A 96 31.547 -30.793 9.373 1.00 12.52 C
ATOM 501 CZ3 TRP A 96 32.560 -30.881 7.186 1.00 12.06 C
ATOM 502 CH2 TRP A 96 32.479 -31.313 8.512 1.00 11.72 C
ATOM 503 N THR A 97 30.383 -24.936 4.864 1.00 10.88 N
ATOM 504 CA THR A 97 30.827 -24.204 3.693 1.00 10.65 C
ATOM 505 C THR A 97 31.766 -25.066 2.858 1.00 10.84 C
ATOM 506 O THR A 97 32.223 -26.130 3.296 1.00 10.69 O
ATOM 507 CB THR A 97 31.602 -22.960 4.143 1.00 11.07 C
ATOM 508 OG1 THR A 97 32.717 -23.404 4.923 1.00 12.40 O
ATOM 509 CG2 THR A 97 30.701 -22.026 4.977 1.00 12.69 C
ATOM 510 N ILE A 98 32.069 -24.587 1.655 1.00 10.75 N
ATOM 511 CA ILE A 98 33.014 -25.241 0.774 1.00 10.93 C
ATOM 512 C ILE A 98 34.349 -25.612 1.442 1.00 11.69 C
ATOM 513 O ILE A 98 34.775 -26.756 1.341 1.00 11.92 O
ATOM 514 CB ILE A 98 33.229 -24.393 -0.515 1.00 10.59 C
ATOM 515 CG1 ILE A 98 31.994 -24.515 -1.415 1.00 10.44 C
ATOM 516 CG2 ILE A 98 34.467 -24.832 -1.258 1.00 11.99 C
ATOM 517 CD1 ILE A 98 31.865 -23.451 -2.472 1.00 11.19 C
ATOM 518 N PRO A 99 35.028 -24.654 2.103 1.00 12.86 N
ATOM 519 CA PRO A 99 36.312 -25.036 2.697 1.00 12.90 C
ATOM 520 C PRO A 99 36.208 -26.072 3.817 1.00 12.70 C
ATOM 521 O PRO A 99 37.131 -26.870 4.015 1.00 13.09 O
ATOM 522 CB PRO A 99 36.890 -23.697 3.193 1.00 13.38 C
ATOM 523 CG PRO A 99 35.777 -22.758 3.263 1.00 13.70 C
ATOM 524 CD PRO A 99 34.770 -23.207 2.227 1.00 13.18 C
ATOM 525 N GLN A 100 35.081 -26.100 4.520 1.00 12.56 N
ATOM 526 CA GLN A 100 34.892 -27.115 5.545 1.00 12.83 C
ATOM 527 C GLN A 100 34.722 -28.505 4.925 1.00 12.30 C
ATOM 528 O GLN A 100 35.279 -29.478 5.404 1.00 12.28 O
ATOM 529 CB GLN A 100 33.705 -26.765 6.421 1.00 13.53 C
ATOM 530 CG GLN A 100 33.883 -25.509 7.218 1.00 16.43 C
ATOM 531 CD GLN A 100 32.648 -25.287 8.021 1.00 19.16 C
ATOM 532 NE2 GLN A 100 31.646 -24.831 7.508 1.00 17.90 O
ATOM 533 OE1 GLN A 100 32.675 -25.714 9.280 1.00 24.40 N
'''
class MyTestCase(unittest.TestCase):
def setUp(self):
self.file_to_delete = []
# import files used in tests
self.file_name = 'test_do_flips_clashscore.pdb'
with open(self.file_name,'w') as f:
f.write(test_pdb_str)
self.file_to_delete.append(self.file_name)
def test_identifying_and_addition_of_hydrogen(self):
""" test identifying and addition of hydrogen """
has_reduce = libtbx.env.has_module(name="reduce")
if has_reduce:
pdb_inp = iotbx.pdb.input(file_name=self.file_name)
pdb_hierarchy = pdb_inp.construct_hierarchy()
# don't do flip
result = clashscore(
pdb_hierarchy=pdb_hierarchy,
keep_hydrogens=False)#verbose=True)
self.assertAlmostEqual(result.clashscore,22.9885057471,places=4)
# do flip
result = clashscore(
pdb_hierarchy=pdb_hierarchy,
keep_hydrogens=False,#verbose=True)
do_flips=True)
self.assertEqual(result.clashscore,0)
else:
# Skip test if reduce is not present
pass
def tearDown(self):
""" delete files created in during testing"""
if self.file_to_delete:
for fn in self.file_to_delete:
if os.path.isfile(fn): os.remove(fn)
def METHOD_NAME():
""" Run selected tests
1) List in "tests" the names of the particular test you want to run
2) Comment out unittest.main()
3) Un-comment unittest.TextTestRunner().run(run_selected_tests())
"""
tests = ['test_something']
suite = unittest.TestSuite(list(map(MyTestCase, tests)))
return suite
if __name__ == '__main__':
# use for individual tests
# unittest.TextTestRunner().run(run_selected_tests())
# Use to run all tests
unittest.main(verbosity=0) |
265 | set folder status | # coding=utf-8
from http import HTTPStatus
from pyramid.config import Configurator
import transaction
import typing
from tracim_backend.app_models.contents import ContentTypeSlug
from tracim_backend.config import CFG # noqa: F401
from tracim_backend.exceptions import ContentFilenameAlreadyUsedInFolder
from tracim_backend.exceptions import EmptyLabelNotAllowed
from tracim_backend.extensions import hapic
from tracim_backend.lib.core.content import ContentApi
from tracim_backend.lib.utils.authorization import ContentTypeChecker
from tracim_backend.lib.utils.authorization import check_right
from tracim_backend.lib.utils.authorization import is_contributor
from tracim_backend.lib.utils.authorization import is_reader
from tracim_backend.lib.utils.request import TracimRequest
from tracim_backend.lib.utils.utils import generate_documentation_swagger_tag
from tracim_backend.models.context_models import ContentInContext
from tracim_backend.models.context_models import RevisionInContext
from tracim_backend.models.revision_protection import new_revision
from tracim_backend.views.controllers import Controller
from tracim_backend.views.core_api.schemas import ContentSchema
from tracim_backend.views.core_api.schemas import FolderContentModifySchema
from tracim_backend.views.core_api.schemas import NoContentSchema
from tracim_backend.views.core_api.schemas import RevisionSchema
from tracim_backend.views.core_api.schemas import SetContentStatusSchema
from tracim_backend.views.core_api.schemas import WorkspaceAndContentIdPathSchema
from tracim_backend.views.swagger_generic_section import SWAGGER_TAG__CONTENT_ENDPOINTS
SWAGGER_TAG__CONTENT_FOLDER_SECTION = "Folders"
SWAGGER_TAG__CONTENT_FOLDER_ENDPOINTS = generate_documentation_swagger_tag(
SWAGGER_TAG__CONTENT_ENDPOINTS, SWAGGER_TAG__CONTENT_FOLDER_SECTION
)
is_folder_content = ContentTypeChecker([ContentTypeSlug.FOLDER.value])
class FolderController(Controller):
@hapic.with_api_doc(tags=[SWAGGER_TAG__CONTENT_FOLDER_ENDPOINTS])
@check_right(is_reader)
@check_right(is_folder_content)
@hapic.input_path(WorkspaceAndContentIdPathSchema())
@hapic.output_body(ContentSchema())
def get_folder(self, context, request: TracimRequest, hapic_data=None) -> ContentInContext:
"""
Get folder info
"""
app_config = request.registry.settings["CFG"] # type: CFG
api = ContentApi(
show_archived=True,
show_deleted=True,
current_user=request.current_user,
session=request.dbsession,
config=app_config,
)
content = api.get_one(hapic_data.path.content_id, content_type=ContentTypeSlug.ANY.value)
return api.get_content_in_context(content)
@hapic.with_api_doc(tags=[SWAGGER_TAG__CONTENT_FOLDER_ENDPOINTS])
@hapic.handle_exception(EmptyLabelNotAllowed, HTTPStatus.BAD_REQUEST)
@hapic.handle_exception(ContentFilenameAlreadyUsedInFolder, HTTPStatus.BAD_REQUEST)
@check_right(is_contributor)
@check_right(is_folder_content)
@hapic.input_path(WorkspaceAndContentIdPathSchema())
@hapic.input_body(FolderContentModifySchema())
@hapic.output_body(ContentSchema())
def update_folder(self, context, request: TracimRequest, hapic_data=None) -> ContentInContext:
"""
update folder
"""
app_config = request.registry.settings["CFG"] # type: CFG
api = ContentApi(
show_archived=True,
show_deleted=True,
current_user=request.current_user,
session=request.dbsession,
config=app_config,
)
content = api.get_one(hapic_data.path.content_id, content_type=ContentTypeSlug.ANY.value)
with new_revision(session=request.dbsession, tm=transaction.manager, content=content):
api.update_container_content(
item=content,
new_label=hapic_data.body.label,
new_description=hapic_data.body.description,
allowed_content_type_slug_list=hapic_data.body.sub_content_types,
)
api.save(content)
return api.get_content_in_context(content)
@hapic.with_api_doc(tags=[SWAGGER_TAG__CONTENT_FOLDER_ENDPOINTS])
@check_right(is_reader)
@check_right(is_folder_content)
@hapic.input_path(WorkspaceAndContentIdPathSchema())
@hapic.output_body(RevisionSchema(many=True))
def get_folder_revisions(
self, context, request: TracimRequest, hapic_data=None
) -> typing.List[RevisionInContext]:
"""
get folder revisions
"""
app_config = request.registry.settings["CFG"] # type: CFG
api = ContentApi(
show_archived=True,
show_deleted=True,
current_user=request.current_user,
session=request.dbsession,
config=app_config,
)
content = api.get_one(hapic_data.path.content_id, content_type=ContentTypeSlug.ANY.value)
revisions = content.get_revisions()
return [api.get_revision_in_context(revision, number) for revision, number in revisions]
@hapic.with_api_doc(tags=[SWAGGER_TAG__CONTENT_FOLDER_ENDPOINTS])
@check_right(is_contributor)
@check_right(is_folder_content)
@hapic.input_path(WorkspaceAndContentIdPathSchema())
@hapic.input_body(SetContentStatusSchema())
@hapic.output_body(NoContentSchema(), default_http_code=HTTPStatus.NO_CONTENT)
def METHOD_NAME(self, context, request: TracimRequest, hapic_data=None) -> None:
"""
set folder status
"""
app_config = request.registry.settings["CFG"] # type: CFG
api = ContentApi(
show_archived=True,
show_deleted=True,
current_user=request.current_user,
session=request.dbsession,
config=app_config,
)
content = api.get_one(hapic_data.path.content_id, content_type=ContentTypeSlug.ANY.value)
with new_revision(session=request.dbsession, tm=transaction.manager, content=content):
api.set_status(content, hapic_data.body.status)
api.save(content)
return
def bind(self, configurator: Configurator) -> None:
# Get folder
configurator.add_route(
"folder",
"/workspaces/{workspace_id}/folders/{content_id}",
request_method="GET",
)
configurator.add_view(self.get_folder, route_name="folder")
# update folder
configurator.add_route(
"update_folder",
"/workspaces/{workspace_id}/folders/{content_id}",
request_method="PUT",
)
configurator.add_view(self.update_folder, route_name="update_folder")
# get folder revisions
configurator.add_route(
"folder_revisions",
"/workspaces/{workspace_id}/folders/{content_id}/revisions",
request_method="GET",
)
configurator.add_view(self.get_folder_revisions, route_name="folder_revisions")
# get folder revisions
configurator.add_route(
"set_folder_status",
"/workspaces/{workspace_id}/folders/{content_id}/status",
request_method="PUT",
)
configurator.add_view(self.METHOD_NAME, route_name="set_folder_status") |
266 | ingest waze jsons | import datetime
import logging
import json
import requests
from pandas import json_normalize
import pandas as pd
from google.cloud import storage
from anyway.parsers.waze.waze_db_functions import (
insert_waze_alerts,
insert_waze_traffic_jams,
enrich_waze_alerts_ended_at_timestamp,
enrich_waze_traffic_jams_ended_at_timestamp,
)
from anyway.models import WazeAlert, WazeTrafficJams
ISRAEL_POLYGON = [
("33.717000", "32.547000"),
("34.722000", "33.004000"),
("35.793000", "33.331000"),
("35.914000", "32.953000"),
("35.750000", "32.723000"),
("35.395000", "31.084000"),
("34.931000", "29.473000"),
("33.717000", "32.547000"),
("33.717000", "32.547000"),
]
WAZE_ALERTS_API_PARAMS = {
"format": "JSON",
"tk": "ccp_partner",
"ccp_partner_name": "The Public Knowledge Workshop",
"types": "traffic,alerts,irregularities",
"polygon": ";".join([",".join(point) for point in ISRAEL_POLYGON]),
}
WAZE_ALERTS_API_URL = "https://il-georss.waze.com/rtserver/web/TGeoRSS"
logger = logging.getLogger("waze_data")
def list_blobs(bucket_name):
"""
Lists all the blobs in the bucket.
"""
storage_client = storage.Client()
blobs = storage_client.list_blobs(bucket_name)
return blobs
def parse_waze_alerts_data(waze_alerts, back_filled=False):
"""
parse waze alert json into a Dataframe.
param waze_alerts: waze raw alert json data
return: parsed Dataframe
"""
waze_df = json_normalize(waze_alerts)
waze_df["created_at"] = pd.to_datetime(waze_df["pubMillis"], unit="ms")
waze_df.rename(
{
"location.x": "longitude",
"location.y": "latitude",
"nThumbsUp": "number_thumbs_up",
"reportRating": "report_rating",
"reportDescription": "report_description",
"reportByMunicipalityUser": "report_by_municipality_user",
"jamUuid": "jam_uuid",
"type": "alert_type",
"subtype": "alert_subtype",
"roadType": "road_type",
},
axis=1,
inplace=True,
)
waze_df["geom"] = waze_df.apply(
lambda row: "POINT({} {})".format(row["longitude"], row["latitude"]), axis=1
)
waze_df["road_type"] = int(waze_df["road_type"].fillna(-1)[0])
waze_df["number_thumbs_up"] = int(waze_df.get("number_thumbs_up").fillna(0)[0])
waze_df["report_by_municipality_user"] = _convert_to_bool(
waze_df.get("report_by_municipality_user", False)
)
waze_df["back_filled"] = back_filled
waze_df.drop(["country", "pubMillis"], axis=1, inplace=True, errors="ignore")
for key in waze_df.keys():
if waze_df[key] is None or key not in [field.name for field in WazeAlert.__table__.columns]:
waze_df.drop([key], axis=1, inplace=True)
return waze_df.to_dict("records")
def parse_waze_traffic_jams_data(waze_jams, back_filled=False):
"""
parse waze traffic jams json into a Dataframe.
param waze_jams: waze raw traffic jams json data
return: parsed Dataframe
"""
waze_df = json_normalize(waze_jams)
waze_df["created_at"] = pd.to_datetime(waze_df["pubMillis"], unit="ms")
waze_df["geom"] = waze_df["line"].apply(
lambda l: "LINESTRING({})".format(",".join(["{} {}".format(nz["x"], nz["y"]) for nz in l]))
)
waze_df["line"] = waze_df["line"].apply(str)
waze_df["segments"] = waze_df["segments"].apply(str)
waze_df["turnType"] = waze_df["roadType"].fillna(-1)
waze_df.drop(["country", "pubMillis"], axis=1, inplace=True)
waze_df.rename(
{
"speedKMH": "speed_kmh",
"turnType": "turn_type",
"roadType": "road_type",
"endNode": "end_node",
"blockingAlertUuid": "blocking_alert_uuid",
"startNode": "start_node",
},
axis=1,
inplace=True,
)
waze_df["back_filled"] = back_filled
for key in waze_df.keys():
if waze_df[key] is None or key not in [
field.name for field in WazeTrafficJams.__table__.columns
]:
waze_df.drop([key], axis=1, inplace=True)
return waze_df.to_dict("records")
def ingest_waze_from_files(bucket_name, start_date, end_date):
"""
iterate over waze files in google cloud bucket, parse them and insert them to db
param bucket_name: google cloud bucket name
param start_date: date to start fetch waze files
param end_date: date to end fetch waze files
return: parsed Dataframe
"""
blobs = []
total_ingested_alerts = 0
total_ingested_traffic = 0
dates_range = pd.date_range(start=start_date, end=end_date, freq="D")
prefixs = ["waze-api-dumps-TGeoRSS/{}/".format(d.strftime("%Y/%-m/%-d")) for d in dates_range]
storage_client = storage.Client()
for prefix in prefixs:
blobs.extend(storage_client.list_blobs(bucket_name, prefix=prefix, delimiter="/"))
bulk_size = 50
bulk_jsons = []
for waze_file in blobs:
waze_data = waze_file.download_as_string()
waze_json = json.loads(waze_data)
bulk_jsons.append(waze_json)
if len(bulk_jsons) % bulk_size == 0:
alerts_count, jams_count = METHOD_NAME(bulk_jsons, True)
total_ingested_alerts += alerts_count
total_ingested_traffic += jams_count
bulk_jsons = []
# ingest remaining
alerts_count, jams_count = METHOD_NAME(bulk_jsons, True)
total_ingested_alerts += alerts_count
total_ingested_traffic += jams_count
logger.info(f"Ingested {total_ingested_alerts} alerts, {jams_count} jams")
def ingest_waze_from_api():
"""
iterate over waze files in google cloud bucket, parse them and insert them to db
param bucket_name: google cloud bucket name
param start_date: date to start fetch waze files
param end_date: date to end fetch waze files
return: parsed Dataframe
"""
response = requests.get(WAZE_ALERTS_API_URL, params=WAZE_ALERTS_API_PARAMS)
response.raise_for_status()
waze_data = json.loads(response.content)
alerts_count, jams_count = METHOD_NAME([waze_data])
logger.info(f"Ingested {alerts_count} alerts, {jams_count} jams")
def METHOD_NAME(waze_jsons, back_filled=False):
waze_alerts = []
waze_traffic_jams = []
waze_file_timestamp = None
for waze_data in waze_jsons:
waze_file_timestamp = datetime.datetime.fromisoformat(waze_data["endTime"])
waze_alerts.extend(parse_waze_alerts_data(waze_data["alerts"], back_filled))
waze_traffic_jams.extend(parse_waze_traffic_jams_data(waze_data.get("jams"), back_filled))
logger.debug(f"Ingesting #{len(waze_alerts)} waze_alert records in bulk")
insert_waze_alerts(waze_alerts)
logger.debug(f"Ingesting #{len(waze_traffic_jams)} waze_traffic_jams records in bulk")
insert_waze_traffic_jams(waze_traffic_jams)
enrich_waze_alerts_ended_at_timestamp(waze_file_timestamp, waze_alerts, back_filled)
enrich_waze_traffic_jams_ended_at_timestamp(waze_file_timestamp, waze_traffic_jams, back_filled)
return len(waze_alerts), len(waze_traffic_jams)
def _convert_to_bool(value):
if isinstance(value, bool):
return value
else:
return str(value).lower() in ("yes", "true", "t", "1") |
267 | test getpeerinfo | #!/usr/bin/env python3
# Copyright (c) 2017-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC calls related to net.
Tests correspond to code in rpc/net.cpp.
"""
from decimal import Decimal
from test_framework.test_framework import MuntTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
assert_greater_than,
assert_raises_rpc_error,
connect_nodes_bi,
p2p_port,
wait_until,
)
from test_framework.mininode import P2PInterface
from test_framework.messages import CAddress, msg_addr, NODE_NETWORK, NODE_WITNESS
class NetTest(MuntTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [["-minrelaytxfee=0.00001000"],["-minrelaytxfee=0.00000500"]]
def run_test(self):
self._test_connection_count()
self._test_getnettotals()
self._test_getnetworkinginfo()
self._test_getaddednodeinfo()
self.METHOD_NAME()
#fixme: Munt doesn't have this RPC command yet
#self._test_getnodeaddresses()
def _test_connection_count(self):
# connect_nodes_bi connects each node to the other
assert_equal(self.nodes[0].getconnectioncount(), 2)
def _test_getnettotals(self):
# getnettotals totalbytesrecv and totalbytessent should be
# consistent with getpeerinfo. Since the RPC calls are not atomic,
# and messages might have been recvd or sent between RPC calls, call
# getnettotals before and after and verify that the returned values
# from getpeerinfo are bounded by those values.
net_totals_before = self.nodes[0].getnettotals()
peer_info = self.nodes[0].getpeerinfo()
net_totals_after = self.nodes[0].getnettotals()
assert_equal(len(peer_info), 2)
peers_recv = sum([peer['bytesrecv'] for peer in peer_info])
peers_sent = sum([peer['bytessent'] for peer in peer_info])
assert_greater_than_or_equal(peers_recv, net_totals_before['totalbytesrecv'])
assert_greater_than_or_equal(net_totals_after['totalbytesrecv'], peers_recv)
assert_greater_than_or_equal(peers_sent, net_totals_before['totalbytessent'])
assert_greater_than_or_equal(net_totals_after['totalbytessent'], peers_sent)
# test getnettotals and getpeerinfo by doing a ping
# the bytes sent/received should change
# note ping and pong are 32 bytes each
self.nodes[0].ping()
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytessent'] >= net_totals_after['totalbytessent'] + 32 * 2), timeout=1)
wait_until(lambda: (self.nodes[0].getnettotals()['totalbytesrecv'] >= net_totals_after['totalbytesrecv'] + 32 * 2), timeout=1)
peer_info_after_ping = self.nodes[0].getpeerinfo()
for before, after in zip(peer_info, peer_info_after_ping):
assert_greater_than_or_equal(after['bytesrecv_per_msg'].get('pong', 0), before['bytesrecv_per_msg'].get('pong', 0) + 32)
assert_greater_than_or_equal(after['bytessent_per_msg'].get('ping', 0), before['bytessent_per_msg'].get('ping', 0) + 32)
def _test_getnetworkinginfo(self):
assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
# fixme: this fails for some reason; temporarily disabled but must be fixed.
#self.nodes[0].setnetworkactive(state=False)
#assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], False)
# Wait a bit for all sockets to close
#wait_until(lambda: self.nodes[0].getnetworkinfo()['connections'] == 0, timeout=3)
#self.nodes[0].setnetworkactive(state=True)
#connect_nodes_bi(self.nodes, 0, 1)
#assert_equal(self.nodes[0].getnetworkinfo()['networkactive'], True)
#assert_equal(self.nodes[0].getnetworkinfo()['connections'], 2)
def _test_getaddednodeinfo(self):
assert_equal(self.nodes[0].getaddednodeinfo(), [])
# add a node (node2) to node0
ip_port = "127.0.0.1:{}".format(p2p_port(2))
self.nodes[0].addnode(node=ip_port, command='add')
# check that the node has indeed been added
added_nodes = self.nodes[0].getaddednodeinfo(ip_port)
assert_equal(len(added_nodes), 1)
assert_equal(added_nodes[0]['addednode'], ip_port)
# check that a non-existent node returns an error
assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1')
def METHOD_NAME(self):
peer_info = [x.getpeerinfo() for x in self.nodes]
# check both sides of bidirectional connection between nodes
# the address bound to on one side will be the source address for the other node
assert_equal(peer_info[0][0]['addrbind'], peer_info[1][0]['addr'])
assert_equal(peer_info[1][0]['addrbind'], peer_info[0][0]['addr'])
#fixme: get working and reenable
#assert_equal(peer_info[0][0]['minfeefilter'], Decimal("0.00000500"))
#assert_equal(peer_info[1][0]['minfeefilter'], Decimal("0.00001000"))
def _test_getnodeaddresses(self):
self.nodes[0].add_p2p_connection(P2PInterface())
# send some addresses to the node via the p2p message addr
msg = msg_addr()
imported_addrs = []
for i in range(256):
a = "123.123.123.{}".format(i)
imported_addrs.append(a)
addr = CAddress()
addr.time = 100000000
addr.nServices = NODE_NETWORK | NODE_WITNESS
addr.ip = a
addr.port = 8333
msg.addrs.append(addr)
self.nodes[0].p2p.send_and_ping(msg)
# obtain addresses via rpc call and check they were ones sent in before
REQUEST_COUNT = 10
node_addresses = self.nodes[0].getnodeaddresses(REQUEST_COUNT)
assert_equal(len(node_addresses), REQUEST_COUNT)
for a in node_addresses:
assert_greater_than(a["time"], 1527811200) # 1st June 2018
assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS)
assert a["address"] in imported_addrs
assert_equal(a["port"], 8333)
assert_raises_rpc_error(-8, "Address count out of range", self.nodes[0].getnodeaddresses, -1)
# addrman's size cannot be known reliably after insertion, as hash collisions may occur
# so only test that requesting a large number of addresses returns less than that
LARGE_REQUEST_COUNT = 10000
node_addresses = self.nodes[0].getnodeaddresses(LARGE_REQUEST_COUNT)
assert_greater_than(LARGE_REQUEST_COUNT, len(node_addresses))
if __name__ == '__main__':
NetTest().main() |
268 | delete multi | '''
s3.py - this file is part of S3QL.
Copyright © 2008 Nikolaus Rath <[email protected]>
This work can be distributed under the terms of the GNU GPLv3.
'''
import hashlib
import hmac
import logging
import re
import time
import urllib.parse
from xml.sax.saxutils import escape as xml_escape
from ..logging import QuietError
from . import s3c
from .common import retry
from .s3c import get_S3Error
log = logging.getLogger(__name__)
# Maximum number of keys that can be deleted at once
MAX_KEYS = 1000
# Pylint goes berserk with false positives
# pylint: disable=E1002,E1101
class Backend(s3c.Backend):
"""A backend to store data in Amazon S3
This class uses standard HTTP connections to connect to S3.
"""
known_options = (s3c.Backend.known_options | {'sse', 'rrs', 'ia', 'oia', 'it'}) - {
'dumb-copy',
'disable-expect100',
}
def __init__(self, options):
self.region = None
self.signing_key = None
super().__init__(options)
self._set_storage_options(self._extra_put_headers)
def _parse_storage_url(self, storage_url, ssl_context):
hit = re.match(r'^s3s?://([^/]+)/([^/]+)(?:/(.*))?$', storage_url)
if not hit:
raise QuietError('Invalid storage URL', exitcode=2)
self.region = hit.group(1)
bucket_name = hit.group(2)
# http://docs.amazonwebservices.com/AmazonS3/2006-03-01/dev/BucketRestrictions.html
if not re.match('^[a-z0-9][a-z0-9.-]{1,60}[a-z0-9]$', bucket_name):
raise QuietError('Invalid bucket name.', exitcode=2)
if self.region.startswith('cn-'):
hostname = 's3.%s.amazonaws.com.cn' % self.region
else:
hostname = 's3.%s.amazonaws.com' % self.region
prefix = hit.group(3) or ''
port = 443 if ssl_context else 80
return (hostname, port, bucket_name, prefix)
def __str__(self):
return 'Amazon S3 bucket %s, prefix %s' % (self.bucket_name, self.prefix)
@property
def has_delete_multi(self):
return True
def delete_multi(self, keys):
log.debug('started with %s', keys)
while len(keys) > 0:
tmp = keys[:MAX_KEYS]
try:
self.METHOD_NAME(tmp)
finally:
keys[:MAX_KEYS] = tmp
def _set_storage_options(self, headers):
if 'sse' in self.options:
headers['x-amz-server-side-encryption'] = 'AES256'
if 'ia' in self.options:
sc = 'STANDARD_IA'
elif 'oia' in self.options:
sc = 'ONEZONE_IA'
elif 'rrs' in self.options:
sc = 'REDUCED_REDUNDANCY'
elif 'it' in self.options:
sc = 'INTELLIGENT_TIERING'
else:
sc = 'STANDARD'
headers['x-amz-storage-class'] = sc
@retry
def METHOD_NAME(self, keys):
body = ['<Delete>']
esc_prefix = xml_escape(self.prefix)
for key in keys:
body.append('<Object><Key>%s%s</Key></Object>' % (esc_prefix, xml_escape(key)))
body.append('</Delete>')
body = '\n'.join(body).encode('utf-8')
headers = {'content-type': 'text/xml; charset=utf-8'}
resp = self._do_request('POST', '/', subres='delete', body=body, headers=headers)
try:
root = self._parse_xml_response(resp)
ns_p = self.xml_ns_prefix
error_tags = root.findall(ns_p + 'Error')
if not error_tags:
# No errors occurred, everything has been deleted
del keys[:]
return
# Some errors occurred, so we need to determine what has
# been deleted and what hasn't
offset = len(self.prefix)
for tag in root.findall(ns_p + 'Deleted'):
fullkey = tag.find(ns_p + 'Key').text
assert fullkey.startswith(self.prefix)
keys.remove(fullkey[offset:])
if log.isEnabledFor(logging.DEBUG):
for errtag in error_tags:
log.debug(
'Delete %s failed with %s',
errtag.findtext(ns_p + 'Key')[offset:],
errtag.findtext(ns_p + 'Code'),
)
errcode = error_tags[0].findtext(ns_p + 'Code')
errmsg = error_tags[0].findtext(ns_p + 'Message')
errkey = error_tags[0].findtext(ns_p + 'Key')[offset:]
if errcode == 'NoSuchKeyError':
pass
else:
raise get_S3Error(errcode, 'Error deleting %s: %s' % (errkey, errmsg))
except:
self.conn.discard()
def _authorize_request(self, method, path, headers, subres, query_string):
'''Add authorization information to *headers*'''
# See http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html
now = time.gmtime()
# now = time.strptime('Fri, 24 May 2013 00:00:00 GMT',
# '%a, %d %b %Y %H:%M:%S GMT')
ymd = time.strftime('%Y%m%d', now)
ymdhms = time.strftime('%Y%m%dT%H%M%SZ', now)
headers['x-amz-date'] = ymdhms
headers['x-amz-content-sha256'] = 'UNSIGNED-PAYLOAD'
# headers['x-amz-content-sha256'] = hashlib.sha256(body).hexdigest()
headers.pop('Authorization', None)
auth_strs = [method]
auth_strs.append(urllib.parse.quote(path))
if query_string:
s = urllib.parse.urlencode(
query_string, doseq=True, quote_via=urllib.parse.quote
).split('&')
else:
s = []
if subres:
s.append(urllib.parse.quote(subres) + '=')
if s:
s = '&'.join(sorted(s))
else:
s = ''
auth_strs.append(s)
# Headers
sig_hdrs = sorted(x.lower() for x in headers.keys())
for hdr in sig_hdrs:
auth_strs.append('%s:%s' % (hdr, headers[hdr].strip()))
auth_strs.append('')
auth_strs.append(';'.join(sig_hdrs))
auth_strs.append(headers['x-amz-content-sha256'])
can_req = '\n'.join(auth_strs)
# log.debug('canonical request: %s', can_req)
can_req_hash = hashlib.sha256(can_req.encode()).hexdigest()
str_to_sign = (
"AWS4-HMAC-SHA256\n"
+ ymdhms
+ '\n'
+ '%s/%s/s3/aws4_request\n' % (ymd, self.region)
+ can_req_hash
)
# log.debug('string to sign: %s', str_to_sign)
if self.signing_key is None or self.signing_key[1] != ymd:
self.update_signing_key(ymd)
signing_key = self.signing_key[0]
sig = hmac_sha256(signing_key, str_to_sign.encode(), hex=True)
cred = '%s/%04d%02d%02d/%s/s3/aws4_request' % (
self.login,
now.tm_year,
now.tm_mon,
now.tm_mday,
self.region,
)
headers['Authorization'] = (
'AWS4-HMAC-SHA256 '
'Credential=%s,'
'SignedHeaders=%s,'
'Signature=%s' % (cred, ';'.join(sig_hdrs), sig)
)
def update_signing_key(self, ymd):
date_key = hmac_sha256(("AWS4" + self.password).encode(), ymd.encode())
region_key = hmac_sha256(date_key, self.region.encode())
service_key = hmac_sha256(region_key, b's3')
signing_key = hmac_sha256(service_key, b'aws4_request')
self.signing_key = (signing_key, ymd)
def hmac_sha256(key, msg, hex=False):
d = hmac.new(key, msg, hashlib.sha256)
if hex:
return d.hexdigest()
else:
return d.digest() |
269 | attendee content delete | """ Views for attendees to manage their own content."""
from models.cfp import PYTHON_CFP_TYPES, Proposal, Venue, AGE_RANGE_OPTIONS
from sqlalchemy import or_
from flask_login import login_required, current_user
from flask import (
current_app as app,
render_template,
redirect,
url_for,
request,
flash,
)
from wtforms import (
StringField,
TextAreaField,
SelectField,
IntegerField,
DecimalField,
TimeField,
BooleanField,
SubmitField,
)
from wtforms.validators import DataRequired, Optional, NumberRange
from datetime import date, datetime, timedelta
from main import db
from ..common.forms import Form
from ..common import feature_flag
from . import schedule
def venues_for_user(user):
venues = []
if user.village:
venues.extend(user.village.venues)
public_venues = Venue.query.filter_by(
village_id=None, scheduled_content_only=False
).all()
venues.extend(public_venues)
return venues
class ContentForm(Form):
def day_choices(self):
d = date.fromisoformat(app.config["EVENT_START"].split(" ")[0])
end_date = date.fromisoformat(app.config["EVENT_END"].split(" ")[0])
choices = []
while d <= end_date:
choices.append((d.isoformat(), d.strftime("%A - %d-%m-%Y")))
d += timedelta(days=1)
return choices
def populate_choices(self, user):
self.day.choices = self.day_choices()
venues = venues_for_user(user)
self.venue.choices = [(v.id, v.name) for v in venues]
type = SelectField(
"Type of content",
default="workshop",
choices=[
("workshop", "Workshop"),
("youthworkshop", "Youth Workshop"),
("talk", "Talk"),
("performance", "Performance"),
],
)
venue = SelectField("Venue", [DataRequired()], coerce=int)
published_names = StringField("Name", [DataRequired()])
title = StringField("Title", [DataRequired()])
description = TextAreaField("Description", [DataRequired()])
day = SelectField(
"Day",
)
scheduled_time = TimeField("Start time", [DataRequired()])
scheduled_duration = IntegerField("Length", [DataRequired(), NumberRange(min=1)])
attendees = IntegerField("Attendees", [Optional(), NumberRange(min=0)])
cost = DecimalField("Cost per attendee", [Optional(), NumberRange(min=0)], places=2)
participant_equipment = StringField("Attendee equipment")
age_range = SelectField("Age range", choices=AGE_RANGE_OPTIONS)
acknowledge_conflicts = BooleanField("Acknowledge conflicts")
def populate(proposal, form):
proposal.type = form.type.data
proposal.scheduled_venue_id = form.venue.data
proposal.published_names = form.published_names.data
proposal.title = proposal.published_title = form.title.data
proposal.description = proposal.published_description = form.description.data
proposal.scheduled_time = datetime.fromisoformat(
"{}T{}".format(form.day.data, form.scheduled_time.data.strftime("%H:%M"))
)
proposal.length = proposal.scheduled_duration = form.scheduled_duration.data
proposal.allowed_times = "{} > {}"
proposal.attendees = form.attendees.data
proposal.cost = proposal.published_cost = form.cost.data
proposal.age_range = proposal.published_age_range = form.age_range.data
proposal.participant_equipment = (
proposal.published_participant_equipment
) = form.participant_equipment.data
@schedule.route("/attendee-content", methods=["GET", "POST"])
@login_required
@feature_flag("ATTENDEE_CONTENT")
def attendee_content():
venue_ids = [v.id for v in venues_for_user(current_user)]
content = Proposal.query.filter(
Proposal.user_id == current_user.id,
or_(
Proposal.user_scheduled is True, Proposal.scheduled_venue_id.in_(venue_ids)
),
Proposal.state.in_(["accepted", "finished"]),
).all()
form = ContentForm()
form.populate_choices(current_user)
if request.method == "POST" and form.validate():
proposal = PYTHON_CFP_TYPES[form.type.data]()
proposal.user_id = current_user.id
proposal.user_scheduled = True
proposal.state = "finished"
populate(proposal, form)
conflicts = proposal.get_conflicting_content()
if len(conflicts) > 0 and form.acknowledge_conflicts.data is not True:
return render_template(
"schedule/attendee_content/index.html",
content=content,
form=form,
conflicts=conflicts,
)
db.session.add(proposal)
db.session.commit()
return redirect(url_for("schedule.attendee_content"))
return render_template(
"schedule/attendee_content/index.html",
content=content,
form=form,
action=url_for("schedule.attendee_content"),
)
@schedule.route("/attendee-content/<int:id>/edit", methods=["GET", "POST"])
@login_required
@feature_flag("ATTENDEE_CONTENT")
def attendee_content_edit(id):
proposal = Proposal.query.filter_by(id=id).first()
if not proposal or (
proposal.user_id != current_user.id
and proposal.scheduled_venue.village_id != current_user.village.id
):
return redirect(url_for("schedule.attendee_content"))
form = ContentForm(obj=proposal)
form.populate_choices(current_user)
if request.method == "POST" and form.validate():
populate(proposal, form)
conflicts = proposal.get_conflicting_content()
if len(conflicts) > 0 and form.acknowledge_conflicts.data is not True:
return render_template(
"schedule/attendee_content/index.html",
form=form,
conflicts=conflicts,
)
db.session.add(proposal)
db.session.commit()
return redirect(url_for("schedule.attendee_content"))
return render_template(
"schedule/attendee_content/edit.html",
proposal=proposal,
form=form,
action=url_for("schedule.attendee_content_edit", id=id),
)
class DeleteAttendeeContentForm(Form):
delete = SubmitField("Delete content")
@schedule.route("/attendee-content/<int:id>/delete", methods=["GET", "POST"])
@login_required
@feature_flag("ATTENDEE_CONTENT")
def METHOD_NAME(id):
proposal = Proposal.query.get_or_404(id)
can_delete = proposal.user_id == current_user.id and proposal.user_scheduled
if not can_delete:
app.logger.warn(f"{current_user} cannot delete proposal {proposal}")
flash("You can't delete this content")
return redirect(url_for("schedule.attendee_content"))
form = DeleteAttendeeContentForm()
if form.validate_on_submit():
db.session.delete(proposal)
db.session.commit()
return redirect(url_for("schedule.attendee_content"))
return render_template(
"schedule/attendee_content/delete.html",
proposal=proposal,
form=form,
) |
270 | logistic curve | # This code is from https://github.com/automl/pybnn
# pybnn authors: Aaron Klein, Moritz Freidank
import numpy as np
# all the models that we considered at some point
all_models = {}
model_defaults = {}
display_name_mapping = {}
def pow3(x, c, a, alpha):
return c - a * x ** (-alpha)
all_models["pow3"] = pow3
model_defaults["pow3"] = {"c": 0.84, "a": 0.52, "alpha": 0.01}
display_name_mapping["pow3"] = "pow$_3$"
def linear(x, a, b):
return a * x + b
# models["linear"] = linear
all_models["linear"] = linear
"""
Source: curve expert
"""
def log_power(x, a, b, c):
# logistic power
return a / (1.0 + (x / np.exp(b)) ** c)
all_models["log_power"] = log_power
model_defaults["log_power"] = {"a": 0.77, "c": -0.51, "b": 2.98}
display_name_mapping["log_power"] = "log power"
def weibull(x, alpha, beta, kappa, delta):
"""
Weibull modell
http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm
alpha: upper asymptote
beta: lower asymptote
k: growth rate
delta: controls the x-ordinate for the point of inflection
"""
return alpha - (alpha - beta) * np.exp(-((kappa * x) ** delta))
all_models["weibull"] = weibull
model_defaults["weibull"] = {"alpha": 0.7, "beta": 0.1, "kappa": 0.01, "delta": 1}
display_name_mapping["weibull"] = "Weibull"
def mmf(x, alpha, beta, kappa, delta):
"""
Morgan-Mercer-Flodin
description:
Nonlinear Regression page 342
http://bit.ly/1jodG17
http://www.pisces-conservation.com/growthhelp/index.html?morgan_mercer_floden.htm
alpha: upper asymptote
kappa: growth rate
beta: initial value
delta: controls the point of inflection
"""
return alpha - (alpha - beta) / (1.0 + (kappa * x) ** delta)
all_models["mmf"] = mmf
model_defaults["mmf"] = {"alpha": 0.7, "kappa": 0.01, "beta": 0.1, "delta": 5}
display_name_mapping["mmf"] = "MMF"
def janoschek(x, a, beta, k, delta):
"""
http://www.pisces-conservation.com/growthhelp/janoschek.htm
"""
return a - (a - beta) * np.exp(-k * x ** delta)
all_models["janoschek"] = janoschek
model_defaults["janoschek"] = {"a": 0.73, "beta": 0.07, "k": 0.355, "delta": 0.46}
display_name_mapping["janoschek"] = "Janoschek"
def ilog2(x, c, a):
x = 1 + x
assert np.all(x > 1)
return c - a / np.log(x)
all_models["ilog2"] = ilog2
model_defaults["ilog2"] = {"a": 0.43, "c": 0.78}
display_name_mapping["ilog2"] = "ilog$_2$"
def dr_hill_zero_background(x, theta, eta, kappa):
x_eta = x ** eta
return (theta * x_eta) / (kappa ** eta + x_eta)
all_models["dr_hill_zero_background"] = dr_hill_zero_background
model_defaults["dr_hill_zero_background"] = {
"theta": 0.772320,
"eta": 0.586449,
"kappa": 2.460843,
}
display_name_mapping["dr_hill_zero_background"] = "Hill$_3$"
def logx_linear(x, a, b):
x = np.log(x)
return a * x + b
all_models["logx_linear"] = logx_linear
model_defaults["logx_linear"] = {"a": 0.378106, "b": 0.046506}
display_name_mapping["logx_linear"] = "log x linear"
def vap(x, a, b, c):
"""Vapor pressure model"""
return np.exp(a + b / x + c * np.log(x))
all_models["vap"] = vap
model_defaults["vap"] = {"a": -0.622028, "c": 0.042322, "b": -0.470050}
display_name_mapping["vap"] = "vapor pressure"
def loglog_linear(x, a, b):
x = np.log(x)
return np.log(a * x + b)
all_models["loglog_linear"] = loglog_linear
display_name_mapping["loglog_linear"] = "log log linear"
# Models that we chose not to use in the ensembles/model combinations:
# source: http://aclweb.org/anthology//P/P12/P12-1003.pdf
def exp3(x, c, a, b):
return c - np.exp(-a * x + b)
all_models["exp3"] = exp3
model_defaults["exp3"] = {"c": 0.7, "a": 0.01, "b": -1}
display_name_mapping["exp3"] = "exp$_3$"
def exp4(x, c, a, b, alpha):
return c - np.exp(-a * (x ** alpha) + b)
all_models["exp4"] = exp4
model_defaults["exp4"] = {"c": 0.7, "a": 0.8, "b": -0.8, "alpha": 0.3}
display_name_mapping["exp4"] = "exp$_4$"
# not bounded!
# def logy_linear(x, a, b):
# return np.log(a*x + b)
# all_models["logy_linear"] = logy_linear
def pow2(x, a, alpha):
return a * x ** (-alpha)
all_models["pow2"] = pow2
model_defaults["pow2"] = {"a": 0.1, "alpha": -0.3}
display_name_mapping["pow2"] = "pow$_2$"
def pow4(x, c, a, b, alpha):
return c - (a * x + b) ** -alpha
all_models["pow4"] = pow4
model_defaults["pow4"] = {"alpha": 0.1, "a": 200, "b": 0.0, "c": 0.8}
display_name_mapping["pow4"] = "pow$_4$"
def sat_growth(x, a, b):
return a * x / (b + x)
all_models["sat_growth"] = sat_growth
model_defaults["sat_growth"] = {"a": 0.7, "b": 20}
display_name_mapping["sat_growth"] = "saturated growth rate"
def dr_hill(x, alpha, theta, eta, kappa):
return alpha + (theta * (x ** eta)) / (kappa ** eta + x ** eta)
all_models["dr_hill"] = dr_hill
model_defaults["dr_hill"] = {
"alpha": 0.1,
"theta": 0.772320,
"eta": 0.586449,
"kappa": 2.460843,
}
display_name_mapping["dr_hill"] = "Hill$_4$"
def gompertz(x, a, b, c):
"""
Gompertz growth function.
sigmoidal family
a is the upper asymptote, since
b, c are negative numbers
b sets the displacement along the x axis (translates the graph to the left or right)
c sets the growth rate (y scaling)
e.g. used to model the growth of tumors
http://en.wikipedia.org/wiki/Gompertz_function
"""
return a * np.exp(-b * np.exp(-c * x))
# return a + b * np.exp(np.exp(-k*(x-i)))
all_models["gompertz"] = gompertz
model_defaults["gompertz"] = {"a": 0.8, "b": 1000, "c": 0.05}
display_name_mapping["gompertz"] = "Gompertz"
def METHOD_NAME(x, a, k, b):
"""
a: asymptote
k:
b: inflection point
http://www.pisces-conservation.com/growthhelp/logistic_curve.htm
"""
return a / (1.0 + np.exp(-k * (x - b)))
all_models["logistic_curve"] = METHOD_NAME
model_defaults["logistic_curve"] = {"a": 0.8, "k": 0.01, "b": 1.0}
display_name_mapping["logistic_curve"] = "logistic curve"
def bertalanffy(x, a, k):
"""
a: asymptote
k: growth rate
http://www.pisces-conservation.com/growthhelp/von_bertalanffy.htm
"""
return a * (1.0 - np.exp(-k * x))
all_models["bertalanffy"] = bertalanffy
model_defaults["bertalanffy"] = {"a": 0.8, "k": 0.01}
display_name_mapping["bertalanffy"] = "Bertalanffy"
curve_combination_models_old = [
"vap",
"ilog2",
"weibull",
"pow3",
"pow4",
"loglog_linear",
"mmf",
"janoschek",
"dr_hill_zero_background",
"log_power",
"exp4",
]
curve_combination_models_original = [
"weibull",
"pow4",
"mmf",
"pow3",
"loglog_linear",
"janoschek",
"dr_hill_zero_background",
"log_power",
"exp4",
]
# note: removing some of the models was found to improve performance
curve_combination_models = [
"mmf",
"loglog_linear",
"dr_hill_zero_background",
"log_power",
]
curve_ensemble_models = [
"vap",
"ilog2",
"weibull",
"pow3",
"pow4",
"loglog_linear",
"mmf",
"janoschek",
"dr_hill_zero_background",
"log_power",
"exp4",
] |
271 | test should raise command error if database | # -*- coding: utf-8 -*-
from io import StringIO
from django.core.management import CommandError, call_command
from django.test import TestCase
from django.test.utils import override_settings
from unittest.mock import patch
MYSQL_DATABASE_SETTINGS = {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'dbatabase',
'USER': 'foo',
'PASSWORD': 'bar',
'HOST': '127.0.0.1',
'PORT': '3306',
}
SQLITE3_DATABASE_SETTINGS = {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.sqlite3',
}
POSTGRESQL_DATABASE_SETTINGS = {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'database',
'USER': 'foo',
'PASSWORD': 'bar',
'HOST': 'localhost',
'PORT': '5432',
}
POSTGRESQL_DATABASE_SETTINGS_SOCKET_MODE = {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'database',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
class SqlcreateExceptionsTests(TestCase):
"""Test for sqlcreate exception."""
def METHOD_NAME(self):
with self.assertRaisesRegex(CommandError, "Unknown database unknown"):
call_command('sqlcreate', '--database=unknown')
class SqlCreateTests(TestCase):
"""Tests for sqlcreate command."""
@override_settings(DATABASES={'default': MYSQL_DATABASE_SETTINGS})
@patch('sys.stderr', new_callable=StringIO)
@patch('sys.stdout', new_callable=StringIO)
@patch('django_extensions.management.commands.sqlcreate.socket')
def test_should_print_SQL_create_database_statement_for_mysql(self, m_socket, m_stdout, m_stderr):
m_socket.gethostname.return_value = 'tumbleweed'
expected_error = """-- WARNING!: https://docs.djangoproject.com/en/dev/ref/databases/#collation-settings
-- Please read this carefully! Collation will be set to utf8_bin to have case-sensitive data.
"""
expected_statement = """CREATE DATABASE dbatabase CHARACTER SET utf8 COLLATE utf8_bin;
GRANT ALL PRIVILEGES ON dbatabase.* to 'foo'@'tumbleweed' identified by 'bar';
"""
call_command('sqlcreate')
self.assertEqual(expected_statement, m_stdout.getvalue())
self.assertEqual(expected_error, m_stderr.getvalue())
@override_settings(DATABASES={'default': POSTGRESQL_DATABASE_SETTINGS})
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_SQL_create_database_statement_for_postgresql(self, m_stdout):
expected_statement = """CREATE USER foo WITH ENCRYPTED PASSWORD 'bar' CREATEDB;
CREATE DATABASE database WITH ENCODING 'UTF-8' OWNER "foo";
GRANT ALL PRIVILEGES ON DATABASE database TO foo;
"""
call_command('sqlcreate')
self.assertEqual(expected_statement, m_stdout.getvalue())
@override_settings(DATABASES={'default': POSTGRESQL_DATABASE_SETTINGS_SOCKET_MODE})
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_SQL_create_database_statement_only_for_postgresql_when_unix_domain_socket_mode_is_used(self, m_stdout):
expected_statement = """-- Assuming that unix domain socket connection mode is being used because
-- USER or PASSWORD are blank in Django DATABASES configuration.
CREATE DATABASE database WITH ENCODING 'UTF-8';
"""
call_command('sqlcreate')
self.assertEqual(expected_statement, m_stdout.getvalue())
@override_settings(DATABASES={'default': POSTGRESQL_DATABASE_SETTINGS})
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_SQL_drop_and_create_database_statement_for_postgresql(self, m_stdout):
expected_statement = """DROP DATABASE IF EXISTS database;
DROP USER IF EXISTS foo;
CREATE USER foo WITH ENCRYPTED PASSWORD 'bar' CREATEDB;
CREATE DATABASE database WITH ENCODING 'UTF-8' OWNER "foo";
GRANT ALL PRIVILEGES ON DATABASE database TO foo;
"""
call_command('sqlcreate', '--drop')
self.assertEqual(expected_statement, m_stdout.getvalue())
@override_settings(DATABASES={'default': SQLITE3_DATABASE_SETTINGS})
@patch('sys.stderr', new_callable=StringIO)
def test_should_print_stderr_for_sqlite3(self, m_stderr):
expected_error = "-- manage.py migrate will automatically create a sqlite3 database file.\n"
call_command('sqlcreate')
self.assertEqual(expected_error, m_stderr.getvalue())
@override_settings(DATABASES={
'unknown': {
'ENGINE': 'django.db.backends.unknown',
'NAME': 'database',
'USER': 'foo',
}
})
@patch('sys.stderr', new_callable=StringIO)
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_stderr_and_standard_create_database_statement_for_unsupported_engine(self, m_stdout, m_stderr):
expected_error = "-- Don't know how to handle 'django.db.backends.unknown' falling back to SQL.\n"
expected_statement = """CREATE DATABASE database;
GRANT ALL PRIVILEGES ON DATABASE database to foo;
"""
call_command('sqlcreate', '--database=unknown')
self.assertEqual(expected_error, m_stderr.getvalue())
self.assertEqual(expected_statement, m_stdout.getvalue()) |
272 | port start | # sfputil.py
#
# Platform-specific SFP transceiver interface for SONiC
#
try:
import time
from sonic_sfp.sfputilbase import SfpUtilBase
except ImportError as e:
raise ImportError("%s - required module not found" % str(e))
class SfpUtil(SfpUtilBase):
"""Platform-specific SfpUtil class"""
PORT_START = 0
PORT_END = 55
PORTS_IN_BLOCK = 56
QSFP_PORT_START = 48
QSFP_PORT_END = 55
_port_to_eeprom_mapping = {}
port_to_i2c_mapping = {
0: 18,
1: 19,
2: 20,
3: 21,
4: 22,
5: 23,
6: 24,
7: 25,
8: 26,
9: 27,
10: 28,
11: 29,
12: 30,
13: 31,
14: 32,
15: 33,
16: 34,
17: 35,
18: 36,
19: 37,
20: 38,
21: 39,
22: 40,
23: 41,
24: 42,
25: 43,
26: 44,
27: 45,
28: 46,
29: 47,
30: 48,
31: 49,
32: 50,
33: 51,
34: 52,
35: 53,
36: 54,
37: 55,
38: 56,
39: 57,
40: 58,
41: 59,
42: 60,
43: 61,
44: 62,
45: 63,
46: 64,
47: 65,
48: 10,
49: 11,
50: 12,
51: 13,
52: 14,
53: 15,
54: 16,
55: 17
}
@property
def METHOD_NAME(self):
return self.PORT_START
@property
def port_end(self):
return self.PORT_END
@property
def qsfp_port_start(self):
return self.QSFP_PORT_START
@property
def qsfp_port_end(self):
return self.QSFP_PORT_END
@property
def qsfp_ports(self):
return list(range(self.QSFP_PORT_START, self.PORTS_IN_BLOCK + 1))
@property
def port_to_eeprom_mapping(self):
return self._port_to_eeprom_mapping
def __init__(self):
eeprom_path = "/sys/bus/i2c/devices/{0}-0050/eeprom"
for x in range(0, self.port_end + 1):
port_eeprom_path = eeprom_path.format(self.port_to_i2c_mapping[x])
self.port_to_eeprom_mapping[x] = port_eeprom_path
SfpUtilBase.__init__(self)
def get_presence(self, port_num):
# Check for invalid port_num
if port_num < self.METHOD_NAME or port_num > self.port_end:
return False
try:
reg_file = open("/sys/class/swps/port"+str(port_num)+"/present")
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
reg_value = int(reg_file.readline().rstrip())
if reg_value == 0:
return True
return False
def get_low_power_mode(self, port_num):
# Check for invalid port_num
if port_num < self.METHOD_NAME or port_num > self.port_end:
return False
if port_num < self.qsfp_port_start or port_num > self.qsfp_port_end:
return False
try:
reg_file = open("/sys/class/swps/port"+str(port_num)+"/lpmod")
except IOError as e:
print("Error: unable to open file: %s" % str(e))
reg_value = int(reg_file.readline().rstrip())
if reg_value == 0:
return False
return True
def set_low_power_mode(self, port_num, lpmode):
# Check for invalid port_num
if port_num < self.METHOD_NAME or port_num > self.port_end:
return False
if port_num < self.qsfp_port_start or port_num > self.qsfp_port_end:
print("\nError:SFP's don't support this property")
return False
try:
reg_file = open("/sys/class/swps/port"+str(port_num)+"/lpmod", "r+")
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
reg_value = int(reg_file.readline().rstrip())
# LPMode is active high; set or clear the bit accordingly
if lpmode is True:
reg_value = 1
else:
reg_value = 0
reg_file.write(hex(reg_value))
reg_file.close()
return True
def reset(self, port_num):
QSFP_RESET_REGISTER_DEVICE_FILE = "/sys/class/swps/port"+str(port_num)+"/reset"
# Check for invalid port_num
if port_num < self.METHOD_NAME or port_num > self.port_end:
return False
if port_num < self.qsfp_port_start or port_num > self.qsfp_port_end:
print("\nError:SFP's don't support this property")
return False
try:
reg_file = open(QSFP_RESET_REGISTER_DEVICE_FILE, "r+")
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
reg_value = 0
reg_file.write(hex(reg_value))
reg_file.close()
# Sleep 2 second to allow it to settle
time.sleep(2)
# Flip the value back write back to the register to take port out of reset
try:
reg_file = open(QSFP_RESET_REGISTER_DEVICE_FILE, "r+")
except IOError as e:
print("Error: unable to open file: %s" % str(e))
return False
reg_value = 1
reg_file.write(hex(reg_value))
reg_file.close()
return True |
273 | chunk targets | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import asyncio
import logging
from typing import Dict, List, Optional
from magma.common.job import Job
from magma.magmad.check.network_check.ping import (
PingCommandResult,
PingInterfaceCommandParams,
ping_interface_async,
)
NUM_PACKETS = 2
DEFAULT_POLLING_INTERVAL = 60
TIMEOUT_SECS = 3
CHECKIN_INTERVAL = 10
CHUNK_SIZE = 100
def METHOD_NAME(hosts: List[str]):
"""
Yields successive n-sized chunks from target hosts.
"""
for i in range(0, len(hosts), CHUNK_SIZE):
logging.debug(
'Yielding [%s:%s] from target hosts', i, i + CHUNK_SIZE,
)
yield hosts[i:i + CHUNK_SIZE]
class ICMPJob(Job):
"""
Class that handles main loop to send ICMP ping to valid subscribers.
"""
def __init__(
self, monitoring_module, polling_interval: int, service_loop,
mtr_interface: str,
):
super().__init__(interval=CHECKIN_INTERVAL, loop=service_loop)
self._MTR_PORT = mtr_interface
logging.info("Running on interface %s...", self._MTR_PORT)
# Matching response time output to get latency
self._polling_interval = max(
polling_interval,
DEFAULT_POLLING_INTERVAL,
)
self._loop = service_loop
self._module = monitoring_module
self._sem = asyncio.BoundedSemaphore(5)
async def _ping_targets(
self, hosts: List[str],
targets: Optional[Dict] = None,
):
"""
Sends a count of ICMP pings to target IP address, returns response.
Args:
hosts: List of ip addresses to ping
targets: List of valid subscribers to ping to
Returns: (stdout, stderr)
"""
if targets:
for chunked_hosts in METHOD_NAME(hosts):
ping_params = [
PingInterfaceCommandParams(
host, NUM_PACKETS, self._MTR_PORT,
TIMEOUT_SECS,
) for host in chunked_hosts
]
async with self._sem:
try:
ping_results = await ping_interface_async(ping_params, self._loop)
ping_results_list = list(ping_results)
for host, sub, result in zip(hosts, targets, ping_results_list):
self._save_ping_response(sub, host, result)
except OSError:
logging.warning('Too many connections opened, sleeping while connections are closed...')
await asyncio.sleep(TIMEOUT_SECS, self._loop)
def _save_ping_response(
self, target_id: str, ip_addr: str,
ping_resp: PingCommandResult,
) -> None:
"""
Saves ping response to in-memory subscriber dict.
Args:
target_id: target ID to ping
ip_addr: IP Address to ping
ping_resp: response of ICMP ping command
"""
if ping_resp.error:
logging.debug(
'Failed to ping %s with error: %s',
target_id, ping_resp.error,
)
else:
self._module.save_ping_response(target_id, ip_addr, ping_resp)
async def _run(self) -> None:
targets, addresses = await self._module.get_ping_targets(self._loop)
if len(targets) > 0:
await self._ping_targets(addresses, targets)
else:
logging.warning('No subscribers/ping targets found')
await asyncio.sleep(self._polling_interval, self._loop) |
274 | create token response | """
oauthlib.oauth2.rfc6749.grant_types
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import json
import logging
from .. import errors, utils
from .base import GrantTypeBase
log = logging.getLogger(__name__)
class RefreshTokenGrant(GrantTypeBase):
"""`Refresh token grant`_
.. _`Refresh token grant`: https://tools.ietf.org/html/rfc6749#section-6
"""
def __init__(self, request_validator=None,
issue_new_refresh_tokens=True,
**kwargs):
super().__init__(
request_validator,
issue_new_refresh_tokens=issue_new_refresh_tokens,
**kwargs)
def METHOD_NAME(self, request, token_handler):
"""Create a new access token from a refresh_token.
:param request: OAuthlib request.
:type request: oauthlib.common.Request
:param token_handler: A token handler instance, for example of type
oauthlib.oauth2.BearerToken.
If valid and authorized, the authorization server issues an access
token as described in `Section 5.1`_. If the request failed
verification or is invalid, the authorization server returns an error
response as described in `Section 5.2`_.
The authorization server MAY issue a new refresh token, in which case
the client MUST discard the old refresh token and replace it with the
new refresh token. The authorization server MAY revoke the old
refresh token after issuing a new refresh token to the client. If a
new refresh token is issued, the refresh token scope MUST be
identical to that of the refresh token included by the client in the
request.
.. _`Section 5.1`: https://tools.ietf.org/html/rfc6749#section-5.1
.. _`Section 5.2`: https://tools.ietf.org/html/rfc6749#section-5.2
"""
headers = self._get_default_headers()
try:
log.debug('Validating refresh token request, %r.', request)
self.validate_token_request(request)
except errors.OAuth2Error as e:
log.debug('Client error in token request, %s.', e)
headers.update(e.headers)
return headers, e.json, e.status_code
token = token_handler.create_token(request,
refresh_token=self.issue_new_refresh_tokens)
for modifier in self._token_modifiers:
token = modifier(token, token_handler, request)
self.request_validator.save_token(token, request)
log.debug('Issuing new token to client id %r (%r), %r.',
request.client_id, request.client, token)
headers.update(self._create_cors_headers(request))
return headers, json.dumps(token), 200
def validate_token_request(self, request):
"""
:param request: OAuthlib request.
:type request: oauthlib.common.Request
"""
# REQUIRED. Value MUST be set to "refresh_token".
if request.grant_type != 'refresh_token':
raise errors.UnsupportedGrantTypeError(request=request)
for validator in self.custom_validators.pre_token:
validator(request)
if request.refresh_token is None:
raise errors.InvalidRequestError(
description='Missing refresh token parameter.',
request=request)
# Because refresh tokens are typically long-lasting credentials used to
# request additional access tokens, the refresh token is bound to the
# client to which it was issued. If the client type is confidential or
# the client was issued client credentials (or assigned other
# authentication requirements), the client MUST authenticate with the
# authorization server as described in Section 3.2.1.
# https://tools.ietf.org/html/rfc6749#section-3.2.1
if self.request_validator.client_authentication_required(request):
log.debug('Authenticating client, %r.', request)
if not self.request_validator.authenticate_client(request):
log.debug('Invalid client (%r), denying access.', request)
raise errors.InvalidClientError(request=request)
# Ensure that request.client_id is set.
if request.client_id is None and request.client is not None:
request.client_id = request.client.client_id
elif not self.request_validator.authenticate_client_id(request.client_id, request):
log.debug('Client authentication failed, %r.', request)
raise errors.InvalidClientError(request=request)
# Ensure client is authorized use of this grant type
self.validate_grant_type(request)
# REQUIRED. The refresh token issued to the client.
log.debug('Validating refresh token %s for client %r.',
request.refresh_token, request.client)
if not self.request_validator.validate_refresh_token(
request.refresh_token, request.client, request):
log.debug('Invalid refresh token, %s, for client %r.',
request.refresh_token, request.client)
raise errors.InvalidGrantError(request=request)
original_scopes = utils.scope_to_list(
self.request_validator.get_original_scopes(
request.refresh_token, request))
if request.scope:
request.scopes = utils.scope_to_list(request.scope)
if (not all(s in original_scopes for s in request.scopes)
and not self.request_validator.is_within_original_scope(
request.scopes, request.refresh_token, request)):
log.debug('Refresh token %s lack requested scopes, %r.',
request.refresh_token, request.scopes)
raise errors.InvalidScopeError(request=request)
else:
request.scopes = original_scopes
for validator in self.custom_validators.post_token:
validator(request) |
275 | count | import math
import os.path
import sys
import textwrap
from test import support
def format_duration(seconds):
ms = math.ceil(seconds * 1e3)
seconds, ms = divmod(ms, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
parts = []
if hours:
parts.append('%s hour' % hours)
if minutes:
parts.append('%s min' % minutes)
if seconds:
if parts:
# 2 min 1 sec
parts.append('%s sec' % seconds)
else:
# 1.0 sec
parts.append('%.1f sec' % (seconds + ms / 1000))
if not parts:
return '%s ms' % ms
parts = parts[:2]
return ' '.join(parts)
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def METHOD_NAME(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4, file=None):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks),
file=file)
def print_warning(msg):
support.print_warning(msg)
orig_unraisablehook = None
def flush_std_streams():
if sys.stdout is not None:
sys.stdout.flush()
if sys.stderr is not None:
sys.stderr.flush()
def regrtest_unraisable_hook(unraisable):
global orig_unraisablehook
support.environment_altered = True
print_warning("Unraisable exception")
old_stderr = sys.stderr
try:
flush_std_streams()
sys.stderr = sys.__stderr__
orig_unraisablehook(unraisable)
sys.stderr.flush()
finally:
sys.stderr = old_stderr
def setup_unraisable_hook():
global orig_unraisablehook
orig_unraisablehook = sys.unraisablehook
sys.unraisablehook = regrtest_unraisable_hook
orig_threading_excepthook = None
def regrtest_threading_excepthook(args):
global orig_threading_excepthook
support.environment_altered = True
print_warning(f"Uncaught thread exception: {args.exc_type.__name__}")
old_stderr = sys.stderr
try:
flush_std_streams()
sys.stderr = sys.__stderr__
orig_threading_excepthook(args)
sys.stderr.flush()
finally:
sys.stderr = old_stderr
def setup_threading_excepthook():
global orig_threading_excepthook
import threading
orig_threading_excepthook = threading.excepthook
threading.excepthook = regrtest_threading_excepthook
def clear_caches():
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
# Clear assorted module caches.
# Don't worry about resetting the cache if the module is not loaded
try:
distutils_dir_util = sys.modules['distutils.dir_util']
except KeyError:
pass
else:
distutils_dir_util._path_created.clear()
try:
re = sys.modules['re']
except KeyError:
pass
else:
re.purge()
try:
_strptime = sys.modules['_strptime']
except KeyError:
pass
else:
_strptime._regex_cache.clear()
try:
urllib_parse = sys.modules['urllib.parse']
except KeyError:
pass
else:
urllib_parse.clear_cache()
try:
urllib_request = sys.modules['urllib.request']
except KeyError:
pass
else:
urllib_request.urlcleanup()
try:
linecache = sys.modules['linecache']
except KeyError:
pass
else:
linecache.clearcache()
try:
mimetypes = sys.modules['mimetypes']
except KeyError:
pass
else:
mimetypes._default_mime_types()
try:
filecmp = sys.modules['filecmp']
except KeyError:
pass
else:
filecmp._cache.clear()
try:
struct = sys.modules['struct']
except KeyError:
pass
else:
struct._clearcache()
try:
doctest = sys.modules['doctest']
except KeyError:
pass
else:
doctest.master = None
try:
ctypes = sys.modules['ctypes']
except KeyError:
pass
else:
ctypes._reset_cache()
try:
typing = sys.modules['typing']
except KeyError:
pass
else:
for f in typing._cleanups:
f()
support.gc_collect() |
276 | configure loader modules | """
Integration tests for the rabbitmq_plugin states
"""
import logging
import pytest
import salt.modules.rabbitmq as rabbitmq
import salt.states.rabbitmq_plugin as rabbitmq_plugin
from tests.support.mock import patch
log = logging.getLogger(__name__)
pytest.importorskip("docker")
pytestmark = [
pytest.mark.slow_test,
pytest.mark.skip_if_binaries_missing(
"docker", "dockerd", reason="Docker not installed"
),
]
def mock_get_rabbitmq_plugin():
return "/opt/rabbitmq/sbin/rabbitmq-plugins"
@pytest.fixture
def METHOD_NAME(docker_cmd_run_all_wrapper):
return {
rabbitmq_plugin: {
"__salt__": {
"rabbitmq.plugin_is_enabled": rabbitmq.plugin_is_enabled,
"rabbitmq.enable_plugin": rabbitmq.enable_plugin,
"rabbitmq.disable_plugin": rabbitmq.disable_plugin,
},
"__opts__": {"test": False},
"_utils__": {},
},
rabbitmq: {
"__salt__": {"cmd.run_all": docker_cmd_run_all_wrapper},
"__opts__": {},
"_utils__": {},
},
}
def test_enabled_enabled_disabled(rabbitmq_container):
"""
Test rabbitmq_plugin.enabled and rabbitmq_plugin_disabled
First enable the plugin.
Second try to enable the plugin again.
Third, try to disable the plugin.
"""
with patch.object(rabbitmq, "_get_rabbitmq_plugin", mock_get_rabbitmq_plugin):
# Enable the plugin
ret = rabbitmq_plugin.enabled("rabbitmq_auth_backend_http")
expected = {
"name": "rabbitmq_auth_backend_http",
"result": True,
"comment": "Plugin 'rabbitmq_auth_backend_http' was enabled.",
"changes": {"old": "", "new": "rabbitmq_auth_backend_http"},
}
assert ret == expected
# Try to enable the plugin again
ret = rabbitmq_plugin.enabled("rabbitmq_auth_backend_http")
expected = {
"name": "rabbitmq_auth_backend_http",
"result": True,
"comment": "Plugin 'rabbitmq_auth_backend_http' is already enabled.",
"changes": {},
}
assert ret == expected
# Disable the plugin
ret = rabbitmq_plugin.disabled("rabbitmq_auth_backend_http")
expected = {
"name": "rabbitmq_auth_backend_http",
"result": True,
"comment": "Plugin 'rabbitmq_auth_backend_http' was disabled.",
"changes": {"new": "", "old": "rabbitmq_auth_backend_http"},
}
assert ret == expected
def test_disabled(rabbitmq_container):
"""
Test rabbitmq_plugin.enabled and rabbitmq_plugin_disabled
First try to disable the plugin.
Second enable the plugin again.
Third disable the plugin.
"""
with patch.object(rabbitmq, "_get_rabbitmq_plugin", mock_get_rabbitmq_plugin):
# Try to disable the plugin
ret = rabbitmq_plugin.disabled("rabbitmq_auth_backend_http")
expected = {
"name": "rabbitmq_auth_backend_http",
"result": True,
"comment": "Plugin 'rabbitmq_auth_backend_http' is already disabled.",
"changes": {},
}
assert ret == expected
# Enable the plugin
ret = rabbitmq_plugin.enabled("rabbitmq_auth_backend_http")
expected = {
"name": "rabbitmq_auth_backend_http",
"result": True,
"comment": "Plugin 'rabbitmq_auth_backend_http' was enabled.",
"changes": {"old": "", "new": "rabbitmq_auth_backend_http"},
}
assert ret == expected
# Disable the plugin
ret = rabbitmq_plugin.disabled("rabbitmq_auth_backend_http")
expected = {
"name": "rabbitmq_auth_backend_http",
"result": True,
"comment": "Plugin 'rabbitmq_auth_backend_http' was disabled.",
"changes": {"new": "", "old": "rabbitmq_auth_backend_http"},
}
assert ret == expected |
277 | test filters aggregation buckets are accessible | # -*- coding: utf-8 -*-
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
#
# Modifications Copyright OpenSearch Contributors. See
# GitHub history for details.
#
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from pytest import raises
from opensearchpy import (
Date,
Document,
Keyword,
MultiSearch,
Q,
Search,
Text,
TransportError,
)
from opensearchpy.helpers.response import aggs
from .test_data import FLAT_DATA
class Repository(Document):
created_at = Date()
description = Text(analyzer="snowball")
tags = Keyword()
@classmethod
def search(cls):
return super(Repository, cls).search().filter("term", commit_repo="repo")
class Index:
name = "git"
class Commit(Document):
class Index:
name = "flat-git"
def METHOD_NAME(data_client):
has_tests_query = Q("term", files="test_opensearchpy/test_dsl")
s = Commit.search()[0:0]
s.aggs.bucket("top_authors", "terms", field="author.name.raw").bucket(
"has_tests", "filters", filters={"yes": has_tests_query, "no": ~has_tests_query}
).metric("lines", "stats", field="stats.lines")
response = s.execute()
assert isinstance(
response.aggregations.top_authors.buckets[0].has_tests.buckets.yes, aggs.Bucket
)
assert (
35
== response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.doc_count
)
assert (
228
== response.aggregations.top_authors.buckets[0].has_tests.buckets.yes.lines.max
)
def test_top_hits_are_wrapped_in_response(data_client):
s = Commit.search()[0:0]
s.aggs.bucket("top_authors", "terms", field="author.name.raw").metric(
"top_commits", "top_hits", size=5
)
response = s.execute()
top_commits = response.aggregations.top_authors.buckets[0].top_commits
assert isinstance(top_commits, aggs.TopHitsData)
assert 5 == len(top_commits)
hits = [h for h in top_commits]
assert 5 == len(hits)
assert isinstance(hits[0], Commit)
def test_inner_hits_are_wrapped_in_response(data_client):
s = Search(index="git")[0:1].query(
"has_parent", parent_type="repo", inner_hits={}, query=Q("match_all")
)
response = s.execute()
commit = response.hits[0]
assert isinstance(commit.meta.inner_hits.repo, response.__class__)
assert repr(commit.meta.inner_hits.repo[0]).startswith("<Hit(git/opensearch-py): ")
def test_scan_respects_doc_types(data_client):
repos = list(Repository.search().scan())
assert 1 == len(repos)
assert isinstance(repos[0], Repository)
assert repos[0].organization == "opensearch"
def test_scan_iterates_through_all_docs(data_client):
s = Search(index="flat-git")
commits = list(s.scan())
assert 52 == len(commits)
assert {d["_id"] for d in FLAT_DATA} == {c.meta.id for c in commits}
def test_response_is_cached(data_client):
s = Repository.search()
repos = list(s)
assert hasattr(s, "_response")
assert s._response.hits == repos
def test_multi_search(data_client):
s1 = Repository.search()
s2 = Search(index="flat-git")
ms = MultiSearch()
ms = ms.add(s1).add(s2)
r1, r2 = ms.execute()
assert 1 == len(r1)
assert isinstance(r1[0], Repository)
assert r1._search is s1
assert 52 == r2.hits.total.value
assert r2._search is s2
def test_multi_missing(data_client):
s1 = Repository.search()
s2 = Search(index="flat-git")
s3 = Search(index="does_not_exist")
ms = MultiSearch()
ms = ms.add(s1).add(s2).add(s3)
with raises(TransportError):
ms.execute()
r1, r2, r3 = ms.execute(raise_on_error=False)
assert 1 == len(r1)
assert isinstance(r1[0], Repository)
assert r1._search is s1
assert 52 == r2.hits.total.value
assert r2._search is s2
assert r3 is None
def test_raw_subfield_can_be_used_in_aggs(data_client):
s = Search(index="git")[0:0]
s.aggs.bucket("authors", "terms", field="author.name.raw", size=1)
r = s.execute()
authors = r.aggregations.authors
assert 1 == len(authors)
assert {"key": "Honza Král", "doc_count": 52} == authors[0] |
278 | build list request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(
region_id: str, subscription_id: str, *, filter: Optional[str] = None, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2019-04-01")) # type: Literal["2019-04-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/providers/Microsoft.VMwareCloudSimple/locations/{regionId}/usages",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"regionId": _SERIALIZER.url("region_id", region_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class UsagesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.vmwarecloudsimple.VMwareCloudSimple`'s
:attr:`usages` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, region_id: str, filter: Optional[str] = None, **kwargs: Any) -> Iterable["_models.Usage"]:
"""Implements Usages List method.
Returns list of usage in region.
:param region_id: The region Id (westus, eastus). Required.
:type region_id: str
:param filter: The filter to apply on the list operation. only name.value is allowed here as a
filter e.g. $filter=name.value eq 'xxxx'. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Usage or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.vmwarecloudsimple.models.Usage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
) # type: Literal["2019-04-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.UsageListResponse]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = METHOD_NAME(
region_id=region_id,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("UsageListResponse", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.CSRPError, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.VMwareCloudSimple/locations/{regionId}/usages"} # type: ignore |
279 | gen f | import pytest
from pybind11_tests import callbacks as m
from threading import Thread
def test_callbacks():
from functools import partial
def func1():
return "func1"
def func2(a, b, c, d):
return "func2", a, b, c, d
def func3(a):
return "func3({})".format(a)
assert m.test_callback1(func1) == "func1"
assert m.test_callback2(func2) == ("func2", "Hello", "x", True, 5)
assert m.test_callback1(partial(func2, 1, 2, 3, 4)) == ("func2", 1, 2, 3, 4)
assert m.test_callback1(partial(func3, "partial")) == "func3(partial)"
assert m.test_callback3(lambda i: i + 1) == "func(43) = 44"
f = m.test_callback4()
assert f(43) == 44
f = m.test_callback5()
assert f(number=43) == 44
def test_bound_method_callback():
# Bound Python method:
class MyClass:
def double(self, val):
return 2 * val
z = MyClass()
assert m.test_callback3(z.double) == "func(43) = 86"
z = m.CppBoundMethodTest()
assert m.test_callback3(z.triple) == "func(43) = 129"
def test_keyword_args_and_generalized_unpacking():
def f(*args, **kwargs):
return args, kwargs
assert m.test_tuple_unpacking(f) == (("positional", 1, 2, 3, 4, 5, 6), {})
assert m.test_dict_unpacking(f) == (("positional", 1), {"key": "value", "a": 1, "b": 2})
assert m.test_keyword_args(f) == ((), {"x": 10, "y": 20})
assert m.test_unpacking_and_keywords1(f) == ((1, 2), {"c": 3, "d": 4})
assert m.test_unpacking_and_keywords2(f) == (
("positional", 1, 2, 3, 4, 5),
{"key": "value", "a": 1, "b": 2, "c": 3, "d": 4, "e": 5}
)
with pytest.raises(TypeError) as excinfo:
m.test_unpacking_error1(f)
assert "Got multiple values for keyword argument" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.test_unpacking_error2(f)
assert "Got multiple values for keyword argument" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
m.test_arg_conversion_error1(f)
assert "Unable to convert call argument" in str(excinfo.value)
with pytest.raises(RuntimeError) as excinfo:
m.test_arg_conversion_error2(f)
assert "Unable to convert call argument" in str(excinfo.value)
def test_lambda_closure_cleanup():
m.test_cleanup()
cstats = m.payload_cstats()
assert cstats.alive() == 0
assert cstats.copy_constructions == 1
assert cstats.move_constructions >= 1
def test_cpp_function_roundtrip():
"""Test if passing a function pointer from C++ -> Python -> C++ yields the original pointer"""
assert m.test_dummy_function(m.dummy_function) == "matches dummy_function: eval(1) = 2"
assert (m.test_dummy_function(m.roundtrip(m.dummy_function)) ==
"matches dummy_function: eval(1) = 2")
assert m.roundtrip(None, expect_none=True) is None
assert (m.test_dummy_function(lambda x: x + 2) ==
"can't convert to function pointer: eval(1) = 3")
with pytest.raises(TypeError) as excinfo:
m.test_dummy_function(m.dummy_function2)
assert "incompatible function arguments" in str(excinfo.value)
with pytest.raises(TypeError) as excinfo:
m.test_dummy_function(lambda x, y: x + y)
assert any(s in str(excinfo.value) for s in ("missing 1 required positional argument",
"takes exactly 2 arguments"))
def test_function_signatures(doc):
assert doc(m.test_callback3) == "test_callback3(arg0: Callable[[int], int]) -> str"
assert doc(m.test_callback4) == "test_callback4() -> Callable[[int], int]"
def test_movable_object():
assert m.callback_with_movable(lambda _: None) is True
def test_async_callbacks():
# serves as state for async callback
class Item:
def __init__(self, value):
self.value = value
res = []
# generate stateful lambda that will store result in `res`
def METHOD_NAME():
s = Item(3)
return lambda j: res.append(s.value + j)
# do some work async
work = [1, 2, 3, 4]
m.test_async_callback(METHOD_NAME(), work)
# wait until work is done
from time import sleep
sleep(0.5)
assert sum(res) == sum([x + 3 for x in work])
def test_async_async_callbacks():
t = Thread(target=test_async_callbacks)
t.start()
t.join() |
280 | create entry | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import ast
import sys
from datetime import datetime
from xml.sax.saxutils import escape as xml_escape
from ._common_models import (
WindowsAzureData,
HeaderDict,
_unicode_type,
)
# TODO: check if this is used
def _get_readable_id(id_name, id_prefix_to_skip):
"""simplified an id to be more friendly for us people"""
# id_name is in the form 'https://namespace.host.suffix/name'
# where name may contain a forward slash!
pos = id_name.find('//')
if pos != -1:
pos += 2
if id_prefix_to_skip:
pos = id_name.find(id_prefix_to_skip, pos)
if pos != -1:
pos += len(id_prefix_to_skip)
pos = id_name.find('/', pos)
if pos != -1:
return id_name[pos + 1:]
return id_name
def METHOD_NAME(entry_body):
''' Adds common part of entry to a given entry body and return the whole
xml. '''
updated_str = datetime.utcnow().isoformat()
if datetime.utcnow().utcoffset() is None:
updated_str += '+00:00'
entry_start = '''<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom" >
<title /><updated>{updated}</updated><author><name /></author><id />
<content type="application/xml">
{body}</content></entry>'''
return entry_start.format(updated=updated_str, body=entry_body)
def _to_datetime(strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f")
_KNOWN_SERIALIZATION_XFORMS = {
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
'account_admin_live_email_id': 'AccountAdminLiveEmailId',
'service_admin_live_email_id': 'ServiceAdminLiveEmailId',
'subscription_id': 'SubscriptionID',
'fqdn': 'FQDN',
'private_id': 'PrivateID',
'os_virtual_hard_disk': 'OSVirtualHardDisk',
'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',
'logical_size_in_gb': 'LogicalSizeInGB',
'os': 'OS',
'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',
'copy_id': 'CopyId',
'os_state': 'OSState',
'vm_image': 'VMImage',
'vm_images': 'VMImages',
'os_disk_configuration': 'OSDiskConfiguration',
'public_ips': 'PublicIPs',
'public_ip': 'PublicIP',
'supported_os': 'SupportedOS',
'reserved_ip': 'ReservedIP',
'reserved_ips': 'ReservedIPs',
'aad_tenant_id': 'AADTenantID',
'start_ip_address': 'StartIPAddress',
'end_ip_address': 'EndIPAddress',
'operation_id': 'OperationId',
'operation_object_id': 'OperationObjectId',
'client_ip': 'ClientIP',
'status_id': 'ID',
'virtual_ips': 'VirtualIPs',
'virtual_ip': 'VirtualIP',
'recommended_vm_size': 'RecommendedVMSize'
}
def _get_serialization_name(element_name):
"""converts a Python name into a serializable name"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_'))
# TODO: check if this is used
def _convert_class_to_xml(source, xml_prefix=True):
if source is None:
return ''
xmlstr = ''
if xml_prefix:
xmlstr = '<?xml version="1.0" encoding="utf-8"?>'
if isinstance(source, list):
for value in source:
xmlstr += _convert_class_to_xml(value, False)
elif isinstance(source, WindowsAzureData):
class_name = source.__class__.__name__
xmlstr += '<' + class_name + '>'
for name, value in vars(source).items():
if value is not None:
if isinstance(value, list) or \
isinstance(value, WindowsAzureData):
xmlstr += _convert_class_to_xml(value, False)
else:
xmlstr += ('<' + _get_serialization_name(name) + '>' +
xml_escape(str(value)) + '</' +
_get_serialization_name(name) + '>')
xmlstr += '</' + class_name + '>'
return xmlstr
def _set_continuation_from_response_headers(feeds, response):
x_ms_continuation = HeaderDict()
for name, value in response.headers:
if 'x-ms-continuation' in name:
x_ms_continuation[name[len('x-ms-continuation') + 1:]] = value
if x_ms_continuation:
setattr(feeds, 'x_ms_continuation', x_ms_continuation)
def _get_request_body(request_body):
'''Converts an object into a request body. If it's None
we'll return an empty string, if it's one of our objects it'll
convert it to XML and return it. Otherwise we just use the object
directly'''
if request_body is None:
return b''
# TODO: check if this is used
if isinstance(request_body, WindowsAzureData):
request_body = _convert_class_to_xml(request_body)
if isinstance(request_body, bytes):
return request_body
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
request_body = str(request_body)
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
return request_body |
281 | test count | # -*- coding: utf-8 -*-
# Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
#
# Copyright (c) Pawel Jasinski
#
##
## Test str/byte equivalence for built-in string methods
##
import sys
import unittest
import itertools
from iptest import big, run_test
class ExtensibleStringClass(str):
pass
class StrBytesTest(unittest.TestCase):
def run_permutations(self, expr, inputs, str_eq, bytes_eq):
for params in itertools.product(*((x, ExtensibleStringClass(x), bytes(x, "ascii"), bytearray(x, "ascii")) for x in inputs)):
if all(not isinstance(z, str) for z in params):
self.assertEqual(expr(*params), bytes_eq)
elif any(not isinstance(z, str) for z in params):
with self.assertRaises(TypeError):
expr(*params)
else:
self.assertEqual(expr(*params), str_eq)
def test_contains(self):
self.run_permutations(lambda a1, a2: a1.__contains__(a2), ("a", "a"), True, True)
def test_format(self):
self.assertEqual("%s" % b"a", "b'a'")
# self.assertEqual(b"%s" % b"a", b"a")
# self.assertEqual("%s" % b"a", b"%s" % "a")
def METHOD_NAME(self):
self.run_permutations(lambda aa, a: aa.count(a), ("aa", "a"), 2, 2)
self.run_permutations(lambda aa, a: aa.count(a, 0), ("aa", "a"), 2, 2)
self.run_permutations(lambda aa, a: aa.count(a, 0, 1), ("aa", "a"), 1, 1)
def test_find(self):
self.run_permutations(lambda abc, b: abc.find(b), ("abc", "b"), 1, 1)
self.run_permutations(lambda abc, b: abc.find(b, 1), ("abc", "b"), 1, 1)
self.run_permutations(lambda abc, b: abc.find(b, 1, 2), ("abc", "b"), 1, 1)
self.run_permutations(lambda abc, b: abc.find(b, big(1)), ("abc", "b"), 1, 1)
self.run_permutations(lambda abc, b: abc.find(b, big(1), big(2)), ("abc", "b"), 1, 1)
def test_lstrip(self):
self.run_permutations(lambda xa, x: xa.lstrip(x), ("xa", "x"), "a", b"a")
def test_partition(self):
self.run_permutations(lambda abc, b: abc.partition(b), ("abc", "b"), ("a", "b", "c"), (b"a", b"b", b"c"))
def test_replace(self):
self.run_permutations(lambda abc, a, x: abc.replace(a, x), ("abc", "a", "x"), "xbc", b"xbc")
self.run_permutations(lambda abc, a, x: abc.replace(a, x, 1), ("abc", "a", "x"), "xbc", b"xbc")
self.assertEqual(b"abc".replace(b"a", memoryview(b"x")), b"xbc")
self.assertEqual(b"abc".replace(memoryview(b"a"), b"x"), b"xbc")
self.assertEqual(b"abc".replace(memoryview(b"a"), memoryview(b"x")), b"xbc")
# str/bytes return the original object
x = "abc"
self.assertIs(x.replace("d", "e"), x)
x = b"abc"
self.assertIs(x.replace(b"d", b"e"), x)
def test_rfind(self):
self.run_permutations(lambda abc, c: abc.rfind(c), ("abc", "c"), 2, 2)
self.run_permutations(lambda abc, c: abc.rfind(c, 1), ("abc", "c"), 2, 2)
self.run_permutations(lambda abc, c: abc.rfind(c, 1, 3), ("abc", "c"), 2, 2)
self.run_permutations(lambda abc, c: abc.rfind(c, big(1)), ("abc", "c"), 2, 2)
self.run_permutations(lambda abc, c: abc.rfind(c, big(1), big(3)), ("abc", "c"), 2, 2)
def test_rindex(self):
self.run_permutations(lambda abc, c: abc.rindex(c), ("abc", "c"), 2, 2)
self.run_permutations(lambda abc, c: abc.rindex(c, 1), ("abc", "c"), 2, 2)
self.run_permutations(lambda abc, c: abc.rindex(c, 1, 3), ("abc", "c"), 2, 2)
self.run_permutations(lambda abc, c: abc.rindex(c, big(1)), ("abc", "c"), 2, 2)
self.run_permutations(lambda abc, c: abc.rindex(c, big(1), big(3)), ("abc", "c"), 2, 2)
def test_rpartition(self):
self.run_permutations(lambda abc, b: abc.rpartition(b), ("abc", "b"), ("a", "b", "c"), (b"a", b"b", b"c"))
def test_rsplit(self):
self.run_permutations(lambda abc, b: abc.rsplit(b), ("abc", "b"), ["a", "c"], [b"a", b"c"])
self.run_permutations(lambda abc, b: abc.rsplit(b, 1), ("abc", "b"), ["a", "c"], [b"a", b"c"])
def test_rstrip(self):
self.run_permutations(lambda ax, x: ax.rstrip(x), ("ax", "x"), "a", b"a")
def test_split(self):
self.run_permutations(lambda abc, b: abc.split(b), ("abc", "b"), ["a", "c"], [b"a", b"c"])
self.run_permutations(lambda abc, b: abc.split(b, 1), ("abc", "b"), ["a", "c"], [b"a", b"c"])
def test_strip(self):
self.run_permutations(lambda xax, x: xax.strip(x), ("xax", "x"), "a", b"a")
def test_startswith(self):
self.run_permutations(lambda abc, a: abc.startswith(a), ("abc", "a"), True, True)
self.run_permutations(lambda abc, a: abc.startswith(a, 0), ("abc", "a"), True, True)
self.run_permutations(lambda abc, a: abc.startswith(a, 0, 1), ("abc", "a"), True, True)
def test_endswith(self):
self.run_permutations(lambda abc, c: abc.endswith(c), ("abc", "c"), True, True)
self.run_permutations(lambda abc, c: abc.endswith(c, 0), ("abc", "c"), True, True)
self.run_permutations(lambda abc, c: abc.endswith(c, 0, 3), ("abc", "c"), True, True)
def test_join(self):
self.run_permutations(lambda b, a, c: b.join([a, c]), ("b", "a", "c"), "abc", b"abc")
self.run_permutations(lambda a, b: a.join([b]), ("a", "b"), "b", b"b")
run_test(__name__) |
282 | test observation validity | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Wheel Bandit environment."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.bandits.environments import wheel_py_environment
class WheelBanditPyEnvironmentTest(tf.test.TestCase, parameterized.TestCase):
@parameterized.named_parameters(
dict(testcase_name='_delta_0', delta=0.0),
dict(testcase_name='_delta_2', delta=2.0),
)
def test_delta_out_of_bound_parameter(self, delta):
with self.assertRaisesRegexp(
ValueError, r'Delta must be in \(0, 1\)\, but saw delta: %g' % delta
):
wheel_py_environment.WheelPyEnvironment(
delta=delta,
mu_base=[1.2, 1.0, 1.0, 1.0, 1.0],
std_base=0.01 * np.ones(5),
mu_high=50.0,
std_high=0.01,
)
def test_mu_base_out_of_bound_parameter(self):
mu_base = [1.2, 1.0, 1.0, 1.0, 1.0, 1.0]
with self.assertRaisesRegexp(
ValueError, "The length of 'mu_base' must be 5, but saw 'mu_base':.*"
):
wheel_py_environment.WheelPyEnvironment(
delta=0.5,
mu_base=mu_base,
std_base=0.01 * np.ones(5),
mu_high=50.0,
std_high=0.01,
)
def test_std_base_out_of_bound_parameter(self):
with self.assertRaisesRegexp(
ValueError, r'The length of \'std_base\' must be 5\.'
):
wheel_py_environment.WheelPyEnvironment(
delta=0.5,
mu_base=[1.2, 1.0, 1.0, 1.0, 1.0],
std_base=0.01 * np.ones(6),
mu_high=50.0,
std_high=0.01,
)
def test_compute_optimal_action_and_reward(self):
observation = np.array(
[[0.1, 0.2], [0.3, -0.7], [-0.3, -0.7], [0.3, 0.7], [0.1, 0.3]]
)
actual_actions = wheel_py_environment.compute_optimal_action(
observation, 0.5
)
expected_actions = [0, 2, 4, 1, 0]
self.assertAllEqual(actual_actions, expected_actions)
actual_rewards = wheel_py_environment.compute_optimal_reward(
observation, 0.5, 1.5, 3.0
)
expected_rewards = [1.5, 3.0, 3.0, 3.0, 1.5]
self.assertAllEqual(actual_rewards, expected_rewards)
@parameterized.named_parameters(
dict(testcase_name='_batch_1', batch_size=1),
dict(testcase_name='_batch_4', batch_size=4),
)
def METHOD_NAME(self, batch_size):
"""Tests that the observations fall into the unit circle."""
env = wheel_py_environment.WheelPyEnvironment(
delta=0.5,
mu_base=[1.2, 1.0, 1.0, 1.0, 1.0],
std_base=0.01 * np.ones(5),
mu_high=50.0,
std_high=0.01,
batch_size=batch_size,
)
for _ in range(5):
observation = env.reset().observation
self.assertEqual(
list(observation.shape),
[batch_size] + list(env.observation_spec().shape),
)
for i in range(batch_size):
self.assertLessEqual(np.linalg.norm(observation[i, :]), 1)
@parameterized.named_parameters(
dict(testcase_name='_batch_1', batch_size=1),
dict(testcase_name='_batch_4', batch_size=4),
)
def test_rewards_validity(self, batch_size):
"""Tests that the rewards are valid."""
env = wheel_py_environment.WheelPyEnvironment(
delta=0.5,
mu_base=[1.2, 1.0, 1.0, 1.0, 1.0],
std_base=0.01 * np.ones(5),
mu_high=50.0,
std_high=0.01,
batch_size=batch_size,
)
time_step = env.reset()
time_step = env.step(np.arange(batch_size))
self.assertEqual(time_step.reward.shape, (batch_size,))
if __name__ == '__main__':
tf.test.main() |
283 | test immutable | #!/usr/bin/env python
# UserString is a wrapper around the native builtin string type.
# UserString instances should behave similar to builtin string objects.
import string
from test import test_support, string_tests
from UserString import UserString, MutableString
import warnings
class UserStringTest(
string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrStringUserStringTest,
string_tests.MixinStrUserStringTest
):
type2test = UserString
# Overwrite the three testing methods, because UserString
# can't cope with arguments propagated to UserString
# (and we don't test with subclasses)
def checkequal(self, result, object, methodname, *args):
result = self.fixtype(result)
object = self.fixtype(object)
# we don't fix the arguments, because UserString can't cope with it
realresult = getattr(object, methodname)(*args)
self.assertEqual(
result,
realresult
)
def checkraises(self, exc, object, methodname, *args):
object = self.fixtype(object)
# we don't fix the arguments, because UserString can't cope with it
self.assertRaises(
exc,
getattr(object, methodname),
*args
)
def checkcall(self, object, methodname, *args):
object = self.fixtype(object)
# we don't fix the arguments, because UserString can't cope with it
getattr(object, methodname)(*args)
class MutableStringTest(UserStringTest):
type2test = MutableString
# MutableStrings can be hashed => deactivate test
def test_hash(self):
pass
def test_setitem(self):
s = self.type2test("foo")
self.assertRaises(IndexError, s.__setitem__, -4, "bar")
self.assertRaises(IndexError, s.__setitem__, 3, "bar")
s[-1] = "bar"
self.assertEqual(s, "fobar")
s[0] = "bar"
self.assertEqual(s, "barobar")
def test_delitem(self):
s = self.type2test("foo")
self.assertRaises(IndexError, s.__delitem__, -4)
self.assertRaises(IndexError, s.__delitem__, 3)
del s[-1]
self.assertEqual(s, "fo")
del s[0]
self.assertEqual(s, "o")
del s[0]
self.assertEqual(s, "")
def test_setslice(self):
s = self.type2test("foo")
s[:] = "bar"
self.assertEqual(s, "bar")
s[1:2] = "foo"
self.assertEqual(s, "bfoor")
s[1:-1] = UserString("a")
self.assertEqual(s, "bar")
s[0:10] = 42
self.assertEqual(s, "42")
def test_delslice(self):
s = self.type2test("foobar")
del s[3:10]
self.assertEqual(s, "foo")
del s[-1:10]
self.assertEqual(s, "fo")
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
orig = string.ascii_letters + string.digits
for start in indices:
for stop in indices:
# Use indices[1:] when MutableString can handle real
# extended slices
for step in (None, 1, -1):
s = self.type2test(orig)
L = list(orig)
# Make sure we have a slice of exactly the right length,
# but with (hopefully) different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
s[start:stop:step] = "".join(data)
self.assertEqual(s, "".join(L))
del L[start:stop:step]
del s[start:stop:step]
self.assertEqual(s, "".join(L))
def METHOD_NAME(self):
s = self.type2test("foobar")
s2 = s.immutable()
self.assertEqual(s, s2)
self.assertIsInstance(s2, UserString)
def test_iadd(self):
s = self.type2test("foo")
s += "bar"
self.assertEqual(s, "foobar")
s += UserString("baz")
self.assertEqual(s, "foobarbaz")
s += 42
self.assertEqual(s, "foobarbaz42")
def test_imul(self):
s = self.type2test("foo")
s *= 1
self.assertEqual(s, "foo")
s *= 2
self.assertEqual(s, "foofoo")
s *= -1
self.assertEqual(s, "")
def test_main():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".*MutableString has been removed",
DeprecationWarning)
warnings.filterwarnings("ignore",
".*__(get|set|del)slice__ has been removed",
DeprecationWarning)
test_support.run_unittest(UserStringTest, MutableStringTest)
if __name__ == "__main__":
test_main() |
284 | get measurement | # coding=utf-8
import time
import copy
from flask_babel import lazy_gettext
from mycodo.inputs.base_input import AbstractInput
# Measurements
measurements_dict = {
0: {
'measurement': 'temperature',
'unit': 'C'
}
}
# Input information
INPUT_INFORMATION = {
'input_name_unique': 'DS18B20',
'input_manufacturer': 'MAXIM',
'input_name': 'DS18B20',
'input_library': 'w1thermsensor',
'measurements_name': 'Temperature',
'measurements_dict': measurements_dict,
'url_manufacturer': 'https://www.maximintegrated.com/en/products/sensors/DS18B20.html',
'url_datasheet': 'https://datasheets.maximintegrated.com/en/ds/DS18B20.pdf',
'url_product_purchase': [
'https://www.adafruit.com/product/374',
'https://www.adafruit.com/product/381',
'https://www.sparkfun.com/products/245'
],
'url_additional': 'https://github.com/cpetrich/counterfeit_DS18B20',
'message': 'Warning: Counterfeit DS18B20 sensors are common and can cause a host of issues. Review the Additional '
'URL for more information about how to determine if your sensor is authentic.',
'options_enabled': [
'location',
'resolution',
'period',
'pre_output'
],
'options_disabled': ['interface'],
'dependencies_module': [
('pip-pypi', 'w1thermsensor', 'w1thermsensor==2.0.0'),
],
'interfaces': ['1WIRE'],
'custom_commands': [
{
'type': 'message',
'default_value': """Set the resolution, precision, and response time for the sensor. This setting will be written to the EEPROM to allow persistence after power loss. The EEPROM has a limited amount of writes (>50k)."""
},
{
'id': 'resolution',
'type': 'select',
'default_value': '',
'options_select': [
('9', '9-bit, 0.5 °C, 93.75 ms'),
('10', '10-bit, 0.25 °C, 187.5 ms'),
('11', '11-bit, 0.125 °C, 375 ms'),
('12', '12-bit, 0.0625 °C, 750 ms')
],
'name': 'Resolution',
'phrase': 'Select the resolution for the sensor'
},
{
'id': 'set_resolution',
'type': 'button',
'name': 'Set Resolution'
}
],
'custom_options': [
{
'id': 'temperature_offset',
'type': 'float',
'default_value': 0.0,
'required': True,
'name': lazy_gettext("Temperature Offset"),
'phrase': "The temperature offset (degrees Celsius) to apply"
}
]
}
class InputModule(AbstractInput):
"""A sensor support class that monitors the DS18B20's temperature."""
def __init__(self, input_dev, testing=False):
super().__init__(input_dev, testing=testing, name=__name__)
self.sensor = None
self.temperature_offset = None
if not testing:
self.setup_custom_options(
INPUT_INFORMATION['custom_options'], input_dev)
self.try_initialize()
def initialize(self):
from w1thermsensor import W1ThermSensor
from w1thermsensor import Sensor
try:
self.sensor = W1ThermSensor(
Sensor.DS18B20, self.input_dev.location)
except:
self.logger.exception("Input initialization")
def METHOD_NAME(self):
"""Gets the DS18B20's temperature in Celsius."""
if not self.sensor:
self.logger.error("Error 101: Device not set up. See https://kizniche.github.io/Mycodo/Error-Codes#error-101 for more info.")
return
self.return_dict = copy.deepcopy(measurements_dict)
temperature = None
n = 2
for i in range(n):
try:
temperature = self.sensor.get_temperature()
except Exception as e:
if i == n:
self.logger.exception(
"{cls} raised an exception when taking a reading: {err}".format(cls=type(self).__name__, err=e))
return None
time.sleep(1)
if temperature == 85:
self.logger.error("Measurement returned 85 C, indicating an issue communicating with the sensor.")
return None
elif temperature is not None and not -55 < temperature < 125:
self.logger.error(
"Measurement outside the expected range of -55 C to 125 C: {temp} C".format(temp=temperature))
return None
elif temperature is not None:
self.value_set(0, temperature + self.temperature_offset)
return self.return_dict
def set_resolution(self, args_dict):
if 'resolution' not in args_dict or not args_dict['resolution']:
self.logger.error("Resolution required")
return
try:
self.sensor.set_resolution(
int(args_dict['resolution']), persist=True)
except Exception as err:
self.logger.error(
"Error setting resolution: {}".format(err)) |
285 | apply | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" A structure used to describe the network of a model."""
import sys
import types
import paddle
from paddleslim.core import GraphWrapper
from .graph import Graph, Node
from paddleslim.core.dygraph import dygraph2program
__all__ = ["GraphTracer"]
def METHOD_NAME(layer, func):
for name, child in layer.named_children():
func(child)
METHOD_NAME(child, func)
def _add_call_hook(module,
function_new,
method_name='forward',
backup_name='__forward_orig__'):
def _call_hook_enable(op):
# do not patch the top level modules. makes it easy to invoke by self.module(x)
if op is not module:
assert not hasattr(
op, backup_name
), f'in {op.__class__.__name__} detected an existing function {backup_name} : please double check'
# backup the original forward of op into backup_name
method_orig = getattr(op, method_name)
setattr(op, backup_name, method_orig)
# set new method
method_new = types.MethodType(function_new, op)
setattr(op, method_name, method_new)
METHOD_NAME(module, _call_hook_enable)
def _remove_call_hook(module,
method_name='forward',
backup_name='__forward_orig__'):
def _call_hook_disable(op):
if op is not module:
if hasattr(op, backup_name):
method_new = getattr(op, method_name)
method_orig = getattr(op, backup_name)
setattr(op, method_name, method_orig)
# delete the backup
setattr(op, backup_name, method_new)
delattr(op, backup_name)
METHOD_NAME(module, _call_hook_disable)
class GraphTracer(paddle.nn.Layer):
""" A tool used to trace the execution of the model.
Call the forward of the model decorated by this tracer
and it will create a graph.
Args:
model(paddle.nn.Layer): The model to be traced.
Examples:
.. code-block:: python
from paddeslim.core.graph_tracer import GraphTracer
from paddle.vision.models import resnet18
model = resnet18()
x = paddle.rand([1, 3, 224, 224])
tracer = GraphTracer(model)
tracer(x)
print(tracer.graph)
"""
def __init__(self, model: paddle.nn.Layer):
super(GraphTracer, self).__init__()
self._model = model
self._graph = None
self._call_count = {}
self._tensor_previous = {}
@property
def graph(self) -> Graph:
assert self._graph is not None, "Please trace the graph by calling forward function of current tracer."
return self._graph
def forward(self, inputs, *args, **kwargs):
self._graph = Graph()
_add_call_hook(self._model, self._analyze_modules_op)
self._model(inputs, *args, **kwargs)
_remove_call_hook(self._model)
def _analyze_modules_op(self, op, inputs, *args, **kwargs):
node = self._trace_in(op, inputs)
#print(f"inputs: {inputs.name}")
outputs = op.__forward_orig__(inputs, *args, **kwargs)
#print(f"outputs: {outputs.name}")
self._trace_out(node, outputs)
return outputs
def _call_layer(self, layer):
layer_name = layer.full_name()
if layer_name not in self._call_count:
self._call_count[layer_name] = 0
self._call_count[layer_name] += 1
return self._call_count[layer_name]
def _trace_in(self, layer, inputs):
inputs = self._convert_to_list(inputs)
call_cout = self._call_layer(layer)
current_node = Node(layer, call_cout)
if current_node.name not in self._graph._name2node:
self._graph._name2node[current_node.name] = current_node
current_node = self._graph._name2node[current_node.name]
for inp in inputs:
last_node = self._tensor_previous.get(inp.name, None)
if last_node is not None:
assert isinstance(last_node, Node)
if last_node not in current_node.previous_nodes:
current_node.previous_nodes.append(last_node)
if current_node not in last_node.next_nodes:
last_node.next_nodes.append(current_node)
return current_node
def _trace_out(self, current_node, outputs):
assert current_node is not None, "The current node has not been visited."
if current_node.is_leaf():
outputs = self._convert_to_list(outputs)
for out in outputs:
self._tensor_previous[out.name] = current_node
def _convert_to_list(self, tensors):
""" Convert tensor to list.
It is important to convert the inputs to a list.
Because visiting the tensor by 'for ... in' will create new
temp variables and break the tracing process.
"""
if isinstance(tensors, paddle.Tensor):
return [tensors]
elif isinstance(tensors, (list, tuple)):
for _t in tensors:
assert isinstance(_t, paddle.Tensor)
return tensors
raise TypeError(
f"Unsopported type: {type(tensors)}; The inputs type should be paddle.Tensor' or list of paddle.Tensor."
) |
286 | all nodes down | import random
import socket
import time
from pathlib import Path
import requests
from flask import Response
from nucypher_core import MetadataRequest, FleetStateChecksum
from nucypher.characters.lawful import Ursula
from nucypher.network.middleware import NucypherMiddlewareClient, RestMiddleware
from tests.constants import TEST_ETH_PROVIDER_URI
from tests.utils.ursula import MOCK_KNOWN_URSULAS_CACHE
class BadTestUrsulas(RuntimeError):
crash_right_now = True
class _TestMiddlewareClient(NucypherMiddlewareClient):
timeout = None
@staticmethod
def response_cleaner(response):
response.content = response.data
return response
def _get_mock_client_by_ursula(self, ursula):
port = ursula.rest_interface.port
return self._get_mock_client_by_port(port)
def _get_mock_client_by_url(self, url):
port = int(url.split(":")[1])
return self._get_mock_client_by_port(port)
def _get_mock_client_by_port(self, port):
ursula = self._get_ursula_by_port(port)
rest_app = ursula.rest_app
rest_app.testing = True
mock_client = rest_app.test_client()
return mock_client
def _get_ursula_by_port(self, port):
mkuc = MOCK_KNOWN_URSULAS_CACHE
try:
return mkuc[port]
except KeyError:
raise BadTestUrsulas(
"Can't find an Ursula with port {} - did you spin up the right test ursulas?".format(port))
def parse_node_or_host_and_port(self, node=None, host=None, port=None):
if node:
if any((host, port)):
raise ValueError("Don't pass host and port if you are passing the node.")
mock_client = self._get_mock_client_by_ursula(node)
elif all((host, port)):
node = self._get_ursula_by_port(port)
mock_client = self._get_mock_client_by_port(port)
else:
raise ValueError("You need to pass either the node or a host and port.")
host, port = node.rest_interface.host, node.rest_interface.port
return host, port, mock_client
def invoke_method(self, method, url, *args, **kwargs):
_cert_location = kwargs.pop("verify") # TODO: Is this something that can be meaningfully tested?
kwargs.pop("timeout", None) # Just get rid of timeout; not needed for the test client.
response = super().invoke_method(method, url, *args, **kwargs)
return response
def clean_params(self, request_kwargs):
request_kwargs["query_string"] = request_kwargs.pop("params", {})
def get_certificate(self, port, *args, **kwargs):
ursula = self._get_ursula_by_port(port)
return ursula.certificate, Path()
class MockRestMiddleware(RestMiddleware):
_ursulas = None
_client_class = _TestMiddlewareClient
class NotEnoughMockUrsulas(Ursula.NotEnoughUrsulas):
pass
class MockRestMiddlewareForLargeFleetTests(MockRestMiddleware):
"""
A MockRestMiddleware with workaround necessary to test the conditions that arise with thousands of nodes.
"""
def get_nodes_via_rest(self,
node,
fleet_state_checksum,
announce_nodes=None):
response_bytes = node.bytestring_of_known_nodes()
r = Response(response_bytes)
r.content = r.data
return r
class SluggishLargeFleetMiddleware(MockRestMiddlewareForLargeFleetTests):
"""
Similar to above, but with added delay to simulate network latency.
"""
def put_treasure_map_on_node(self, node, *args, **kwargs):
time.sleep(random.randrange(5, 15) / 100)
result = super().put_treasure_map_on_node(node=node, *args, **kwargs)
time.sleep(random.randrange(5, 15) / 100)
return result
class _MiddlewareClientWithConnectionProblems(_TestMiddlewareClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.ports_that_are_down = set()
self.certs_are_broken = False
def _get_ursula_by_port(self, port):
if port in self.ports_that_are_down:
raise ConnectionRefusedError
else:
return super()._get_ursula_by_port(port)
def get(self, *args, **kwargs):
if kwargs.get("path") == "public_information":
if self.certs_are_broken:
raise requests.exceptions.SSLError
port = kwargs.get("port")
if port in self.ports_that_are_down:
raise socket.gaierror
real_get = super(_TestMiddlewareClient, self).__getattr__("get")
return real_get(*args, **kwargs)
class NodeIsDownMiddleware(MockRestMiddleware):
"""
Modified middleware to emulate one node being down amongst many.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = _MiddlewareClientWithConnectionProblems(
eth_provider_uri=TEST_ETH_PROVIDER_URI
)
def node_is_down(self, node):
self.client.ports_that_are_down.add(node.rest_interface.port)
def node_is_up(self, node):
self.client.ports_that_are_down.remove(node.rest_interface.port)
def all_nodes_up(self):
self.client.ports_that_are_down = set()
def METHOD_NAME(self):
self.client.ports_that_are_down = set(MOCK_KNOWN_URSULAS_CACHE)
class EvilMiddleWare(MockRestMiddleware):
"""
Middleware for assholes.
"""
def propagate_shitty_interface_id(self, ursula, shitty_metadata):
"""
Try to get Ursula to propagate a malicious (or otherwise shitty) interface ID.
"""
fleet_state_checksum = FleetStateChecksum(this_node=None, other_nodes=[])
request = MetadataRequest(fleet_state_checksum=fleet_state_checksum, announce_nodes=[shitty_metadata])
response = self.client.post(node_or_sprout=ursula,
path="node_metadata",
data=bytes(request)
)
return response
def upload_arbitrary_data(self, node, path, data):
response = self.client.post(node_or_sprout=node,
path=path,
data=data)
return response |
287 | suite | #
# @file TestCVTerms.py
# @brief CVTerms unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/annotation/test/TestCVTerms.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestCVTerms(unittest.TestCase):
def test_CVTerm_addResource(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
resource = "GO6666";
self.assertTrue( term != None )
self.assertTrue( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
term.addResource(resource)
xa = term.getResources()
self.assertTrue( xa.getLength() == 1 )
self.assertTrue(( "rdf:resource" == xa.getName(0) ))
self.assertTrue(( "GO6666" == xa.getValue(0) ))
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_create(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
self.assertTrue( term != None )
self.assertTrue( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_createFromNode(self):
qual_triple = libsbml.XMLTriple("is", "", "bqbiol")
bag_triple = libsbml.XMLTriple()
li_triple = libsbml.XMLTriple()
att = libsbml.XMLAttributes()
att.add( "", "This is my resource")
att1 = libsbml.XMLAttributes()
li_token = libsbml.XMLToken(li_triple,att)
bag_token = libsbml.XMLToken(bag_triple,att1)
qual_token = libsbml.XMLToken(qual_triple,att1)
li = libsbml.XMLNode(li_token)
bag = libsbml.XMLNode(bag_token)
node = libsbml.XMLNode(qual_token)
bag.addChild(li)
node.addChild(bag)
term = libsbml.CVTerm(node)
self.assertTrue( term != None )
self.assertTrue( term.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assertTrue( term.getBiologicalQualifierType() == libsbml.BQB_IS )
xa = term.getResources()
self.assertTrue( xa.getLength() == 1 )
self.assertTrue(( "rdf:resource" == xa.getName(0) ))
self.assertTrue(( "This is my resource" == xa.getValue(0) ))
_dummyList = [ qual_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ bag_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ li_triple ]; _dummyList[:] = []; del _dummyList
_dummyList = [ li_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ bag_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ qual_token ]; _dummyList[:] = []; del _dummyList
_dummyList = [ att ]; _dummyList[:] = []; del _dummyList
_dummyList = [ att1 ]; _dummyList[:] = []; del _dummyList
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
_dummyList = [ node ]; _dummyList[:] = []; del _dummyList
_dummyList = [ bag ]; _dummyList[:] = []; del _dummyList
_dummyList = [ li ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_getResources(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
resource = "GO6666";
resource1 = "OtherURI";
term.addResource(resource)
term.addResource(resource1)
number = term.getNumResources()
self.assertTrue( number == 2 )
self.assertTrue(( "GO6666" == term.getResourceURI(0) ))
self.assertTrue(( "OtherURI" == term.getResourceURI(1) ))
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def test_CVTerm_set_get(self):
term = libsbml.CVTerm(libsbml.MODEL_QUALIFIER)
self.assertTrue( term != None )
self.assertTrue( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
term.setModelQualifierType(libsbml.BQM_IS)
self.assertTrue( term != None )
self.assertTrue( term.getQualifierType() == libsbml.MODEL_QUALIFIER )
self.assertTrue( term.getModelQualifierType() == libsbml.BQM_IS )
term.setQualifierType(libsbml.BIOLOGICAL_QUALIFIER)
term.setBiologicalQualifierType(libsbml.BQB_IS)
self.assertTrue( term.getQualifierType() == libsbml.BIOLOGICAL_QUALIFIER )
self.assertTrue( term.getBiologicalQualifierType() == libsbml.BQB_IS )
_dummyList = [ term ]; _dummyList[:] = []; del _dummyList
pass
def METHOD_NAME():
METHOD_NAME = unittest.TestSuite()
METHOD_NAME.addTest(unittest.makeSuite(TestCVTerms))
return METHOD_NAME
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(METHOD_NAME()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1) |
288 | get dp seed | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import numpy as np
import paddle
import paddle.distributed as dist
from paddle.distributed import fleet
from paddle.distributed.fleet.meta_parallel import get_rng_state_tracker
from ppfleetx.distributed.apis import comm_groups
from ppfleetx.utils.log import logger
__all__ = ["init_dist_env"]
_seed = None
_dp_seed = None
_hcg = None
def set_seed(seed):
# NOTE(shenliang03): For parameter init seed:
# seed: dp/mp_undistributed_paramter/sharding is same; others is different
# For compute seed(dropout):
# global seed: only mp group is same.
# local seed: all groups are different
if dist.get_world_size() > 1:
# obtain rank message of hybrid parallel
hcg = get_hcg()
mp_rank = hcg.get_model_parallel_rank()
mp_size = hcg.get_model_parallel_world_size()
pp_rank = hcg.get_stage_id()
pp_size = hcg.get_pipe_parallel_world_size()
dp_rank = hcg.get_data_parallel_rank()
dp_size = hcg.get_data_parallel_world_size()
sharding_rank = hcg.get_sharding_parallel_rank()
# sharding_size = hcg.get_sharding_parallel_world_size()
else:
mp_rank, mp_size = 0, 1
pp_rank, pp_size = 0, 1
dp_rank, dp_size = 0, 1
sharding_rank, _ = 0, 1
# NOTE: the commented seeds are set only for precision validation
# seed += 100 * pp_rank
random.seed(seed + 100 * pp_rank)
np.random.seed(seed + 100 * pp_rank)
# seed = mp_rank +
# pp_rank * (mp_size) +
# dp_rank * (mp_size * pp_size) +
# sharding_rank * (mp_size * pp_size * dp_size)
# seed offset is order to avoid conflicts with the parameter initialization seed
seed_offset = seed + 1024 + paddle.distributed.get_world_size()
global_seed = (
seed_offset
+ pp_rank * (mp_size)
+ dp_rank * (mp_size * pp_size)
+ sharding_rank * (mp_size * pp_size * dp_size)
)
seed_offset += paddle.distributed.get_world_size()
local_seed = (
seed_offset
+ mp_rank
+ pp_rank * (mp_size)
+ dp_rank * (mp_size * pp_size)
+ sharding_rank * (mp_size * pp_size * dp_size)
)
tracker = get_rng_state_tracker()
tracker.add("global_seed", global_seed)
tracker.add("local_seed", local_seed)
paddle.seed(global_seed)
logger.info("The global seed is set to {} and local seed is set to {}.".format(global_seed, local_seed))
global _seed
global _dp_seed
_seed = seed
_dp_seed = global_seed
def set_hcg(hcg):
global _hcg
_hcg = hcg
def get_hcg():
global _hcg
return _hcg
def get_seed():
global _seed
return _seed
def METHOD_NAME():
global _dp_seed
return _dp_seed
def init_dist_env(config):
paddle.set_device(config.Global.device)
strategy = fleet.DistributedStrategy()
if config.Distributed.mp_degree == 1 and config.Distributed.sharding.sharding_degree == 1:
order = ["pp", "dp", "sharding", "mp"]
else:
order = ["dp", "pp", "sharding", "mp"]
strategy.hybrid_configs = {
"dp_degree": config.Distributed.dp_degree,
"mp_degree": config.Distributed.mp_degree,
"pp_degree": config.Distributed.pp_degree,
"sharding_degree": config.Distributed.sharding.sharding_degree,
"order": order,
}
if config.Distributed.pp_degree > 1:
if "sequence_parallel" in config.Model:
if config.Model.sequence_parallel:
assert config.Global.enable_partial_send_recv is False, (
"if config.Distributed.pp_degree > 1 and config.Model.sequence_parallel is True, "
"config.Global.enable_partial_send_recv should be set False."
)
strategy.pipeline_configs = {
"accumulate_steps": config.Global.local_batch_size // config.Global.micro_batch_size,
"micro_batch_size": config.Global.micro_batch_size,
"enable_partial_send_recv": config.Global.enable_partial_send_recv,
}
# set control in tensor parallel
seed = config.Global.seed
strategy.tensor_parallel_configs = {"tensor_init_seed": seed}
hcg = comm_groups.create_hcg(strategy, hcg_name=config.Distributed.hcg)
set_hcg(hcg)
def get_local_rank():
return int(os.getenv("PADDLE_RANK_IN_NODE", 0))
def get_data_world_size():
if paddle.distributed.get_world_size() == 1:
return 1
hcg = get_hcg()
dp_size = hcg.get_data_parallel_world_size()
sharding_size = hcg.get_sharding_parallel_world_size()
return dp_size * sharding_size
def get_data_world_rank():
if paddle.distributed.get_world_size() == 1:
return 0
hcg = get_hcg()
dp_rank = hcg.get_data_parallel_rank()
sharding_rank = hcg.get_sharding_parallel_rank()
sharding_size = hcg.get_sharding_parallel_world_size()
return dp_rank * sharding_size + sharding_rank
def work_at_local_rank0(func):
def wrapper(*args, **kwargs):
local_rank = 0
if paddle.fluid.core.is_compiled_with_dist() and paddle.distributed.get_world_size() > 1:
local_rank = paddle.distributed.ParallelEnv().dev_id
if local_rank == 0:
func(*args, **kwargs)
return wrapper |
289 | set env var | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('googletest-env-var-test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print('Expected: %s' % (expected,))
print(' Actual: %s' % (actual,))
raise AssertionError
def METHOD_NAME(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs googletest-env-var-test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
METHOD_NAME(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
METHOD_NAME(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
METHOD_NAME('XML_OUTPUT_FILE', None) # For 'output' test
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
def testXmlOutputFile(self):
"""Tests that $XML_OUTPUT_FILE affects the output flag."""
METHOD_NAME('GTEST_OUTPUT', None)
METHOD_NAME('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/bar.xml', GetFlag('output'))
def testXmlOutputFileOverride(self):
"""Tests that $XML_OUTPUT_FILE is overridden by $GTEST_OUTPUT."""
METHOD_NAME('GTEST_OUTPUT', 'xml:tmp/foo.xml')
METHOD_NAME('XML_OUTPUT_FILE', 'tmp/bar.xml')
AssertEq('xml:tmp/foo.xml', GetFlag('output'))
if __name__ == '__main__':
gtest_test_utils.Main() |
290 | get user email | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
import json
from sqlalchemy import text
from sqlalchemy.orm.exc import NoResultFound
from idutils import normalize_doi
from invenio_accounts.models import User
from invenio_db import db
from invenio_oauthclient.models import UserIdentity
from inspire_schemas.utils import split_page_artid
from inspirehep.utils.normalizers import (
normalize_journal_title as _normalize_journal_title,
)
def check_book_existence(title):
query = text("""
SELECT r.json -> 'self' ->> '$ref' AS self_jsonref
FROM
records_metadata AS r
WHERE
(r.json -> '_collections') ? 'Literature'
AND
(r.json -> 'document_type') ? 'book'
AND
(r.json -> 'titles') @> :title
""").bindparams(title=json.dumps([{
"title": title
}]))
return db.session.execute(query)
def check_journal_existence(title):
query = text("""
SELECT r.id
FROM
records_metadata AS r
WHERE
(r.json -> '_collections') ? 'Journals'
AND
(r.json -> 'journal_title' @> :title)
""").bindparams(title=json.dumps(
{
"title": title
}
))
return db.session.execute(query)
def normalize_formdata(obj, formdata):
formdata = normalize_provided_doi(obj, formdata)
formdata = get_user_orcid(obj, formdata)
formdata = METHOD_NAME(obj, formdata)
formdata = split_page_range_article_id(obj, formdata)
formdata = normalize_journal_title(obj, formdata)
formdata = remove_english_language(obj, formdata)
formdata = find_book_id(obj, formdata)
return formdata
def normalize_provided_doi(obj, formdata):
try:
doi = formdata.get('doi')
formdata['doi'] = normalize_doi(doi)
except AttributeError:
formdata['doi'] = None
return formdata
def METHOD_NAME(obj, formdata):
try:
formdata['email'] = User.query.get(obj.id_user).email
except AttributeError:
formdata['email'] = None
return formdata
def get_user_orcid(obj, formdata):
try:
formdata['orcid'] = UserIdentity.query.filter_by(
id_user=obj.id_user, method='orcid').one().id
except NoResultFound:
formdata['orcid'] = None
return formdata
def split_page_range_article_id(obj, formdata):
page_range_article_id = formdata.get('page_range_article_id')
if page_range_article_id:
page_start, page_end, artid = split_page_artid(page_range_article_id)
formdata['start_page'] = page_start
formdata['end_page'] = page_end
formdata['artid'] = artid
return formdata
def normalize_journal_title(obj, formdata):
if formdata.get('type_of_doc') == 'book' or formdata.get('type_of_doc') == 'chapter':
result = check_journal_existence(formdata.get('series_title'))
if result.rowcount > 0:
formdata['journal_title'] = _normalize_journal_title(formdata.get('series_title'))
else:
formdata['journal_title'] = _normalize_journal_title(formdata['journal_title'])
return formdata
def find_book_id(obj, formdata):
if formdata.get('type_of_doc') == 'chapter':
if not formdata.get('parent_book'):
result = list(check_book_existence(formdata.get('book_title')))
if len(result) == 1:
formdata['parent_book'] = result[0][0]
return formdata
def remove_english_language(obj, formdata):
if formdata.get('language') == 'en':
del formdata['language']
del formdata['title_translation']
return formdata |
291 | get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
import urllib.parse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from ..._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(subscription_id: str, **kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2017-06-01"))
accept = _headers.pop("Accept", "application/json, text/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus")
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
}
_url: str = _url.format(**path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class SkusOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.storage.v2017_06_01.StorageManagement`'s
:attr:`skus` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Sku"]:
"""Lists the available SKUs supported by Microsoft.Storage for given subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Sku or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.storage.v2017_06_01.models.Sku]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._api_version or "2017-06-01"))
cls: ClsType[_models.StorageSkuListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("StorageSkuListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(METHOD_NAME, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/providers/Microsoft.Storage/skus"} |
292 | test less than 3 edges | import pytest
import networkx as nx
def test_directed_edge_swap():
graph = nx.path_graph(200, create_using=nx.DiGraph)
in_degrees = sorted((n, d) for n, d in graph.in_degree())
out_degrees = sorted((n, d) for n, d in graph.out_degree())
G = nx.directed_edge_swap(graph, nswap=40, max_tries=500, seed=1)
assert in_degrees == sorted((n, d) for n, d in G.in_degree())
assert out_degrees == sorted((n, d) for n, d in G.out_degree())
def test_edge_cases_directed_edge_swap():
# Tests cases when swaps are impossible, either too few edges exist, or self loops/cycles are unavoidable
# TODO: Rewrite function to explicitly check for impossible swaps and raise error
e = (
"Maximum number of swap attempts \\(11\\) exceeded "
"before desired swaps achieved \\(\\d\\)."
)
graph = nx.DiGraph([(0, 0), (0, 1), (1, 0), (2, 3), (3, 2)])
with pytest.raises(nx.NetworkXAlgorithmError, match=e):
nx.directed_edge_swap(graph, nswap=1, max_tries=10, seed=1)
def test_double_edge_swap():
graph = nx.barabasi_albert_graph(200, 1)
degrees = sorted(d for n, d in graph.degree())
G = nx.double_edge_swap(graph, 40)
assert degrees == sorted(d for n, d in graph.degree())
def test_double_edge_swap_seed():
graph = nx.barabasi_albert_graph(200, 1)
degrees = sorted(d for n, d in graph.degree())
G = nx.double_edge_swap(graph, 40, seed=1)
assert degrees == sorted(d for n, d in graph.degree())
def test_connected_double_edge_swap():
graph = nx.barabasi_albert_graph(200, 1)
degrees = sorted(d for n, d in graph.degree())
G = nx.connected_double_edge_swap(graph, 40, seed=1)
assert nx.is_connected(graph)
assert degrees == sorted(d for n, d in graph.degree())
def test_connected_double_edge_swap_low_window_threshold():
graph = nx.barabasi_albert_graph(200, 1)
degrees = sorted(d for n, d in graph.degree())
G = nx.connected_double_edge_swap(graph, 40, _window_threshold=0, seed=1)
assert nx.is_connected(graph)
assert degrees == sorted(d for n, d in graph.degree())
def test_connected_double_edge_swap_star():
# Testing ui==xi in connected_double_edge_swap
graph = nx.star_graph(40)
degrees = sorted(d for n, d in graph.degree())
G = nx.connected_double_edge_swap(graph, 1, seed=4)
assert nx.is_connected(graph)
assert degrees == sorted(d for n, d in graph.degree())
def test_connected_double_edge_swap_star_low_window_threshold():
# Testing ui==xi in connected_double_edge_swap with low window threshold
graph = nx.star_graph(40)
degrees = sorted(d for n, d in graph.degree())
G = nx.connected_double_edge_swap(graph, 1, _window_threshold=0, seed=4)
assert nx.is_connected(graph)
assert degrees == sorted(d for n, d in graph.degree())
def test_directed_edge_swap_small():
with pytest.raises(nx.NetworkXError):
G = nx.directed_edge_swap(nx.path_graph(3, create_using=nx.DiGraph))
def test_directed_edge_swap_tries():
with pytest.raises(nx.NetworkXError):
G = nx.directed_edge_swap(
nx.path_graph(3, create_using=nx.DiGraph), nswap=1, max_tries=0
)
def test_directed_exception_undirected():
graph = nx.Graph([(0, 1), (2, 3)])
with pytest.raises(nx.NetworkXNotImplemented):
G = nx.directed_edge_swap(graph)
def test_directed_edge_max_tries():
with pytest.raises(nx.NetworkXAlgorithmError):
G = nx.directed_edge_swap(
nx.complete_graph(4, nx.DiGraph()), nswap=1, max_tries=5
)
def test_double_edge_swap_small():
with pytest.raises(nx.NetworkXError):
G = nx.double_edge_swap(nx.path_graph(3))
def test_double_edge_swap_tries():
with pytest.raises(nx.NetworkXError):
G = nx.double_edge_swap(nx.path_graph(10), nswap=1, max_tries=0)
def test_double_edge_directed():
graph = nx.DiGraph([(0, 1), (2, 3)])
with pytest.raises(nx.NetworkXError, match="not defined for directed graphs."):
G = nx.double_edge_swap(graph)
def test_double_edge_max_tries():
with pytest.raises(nx.NetworkXAlgorithmError):
G = nx.double_edge_swap(nx.complete_graph(4), nswap=1, max_tries=5)
def test_connected_double_edge_swap_small():
with pytest.raises(nx.NetworkXError):
G = nx.connected_double_edge_swap(nx.path_graph(3))
def test_connected_double_edge_swap_not_connected():
with pytest.raises(nx.NetworkXError):
G = nx.path_graph(3)
nx.add_path(G, [10, 11, 12])
G = nx.connected_double_edge_swap(G)
def test_degree_seq_c4():
G = nx.cycle_graph(4)
degrees = sorted(d for n, d in G.degree())
G = nx.double_edge_swap(G, 1, 100)
assert degrees == sorted(d for n, d in G.degree())
def test_fewer_than_4_nodes():
G = nx.DiGraph()
G.add_nodes_from([0, 1, 2])
with pytest.raises(nx.NetworkXError, match=".*fewer than four nodes."):
nx.directed_edge_swap(G)
def METHOD_NAME():
G = nx.DiGraph([(0, 1), (1, 2)])
G.add_nodes_from([3, 4])
with pytest.raises(nx.NetworkXError, match=".*fewer than 3 edges"):
nx.directed_edge_swap(G)
G = nx.Graph()
G.add_nodes_from([0, 1, 2, 3])
with pytest.raises(nx.NetworkXError, match=".*fewer than 2 edges"):
nx.double_edge_swap(G) |
293 | stub list document classification jobs | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Stub functions that are used by the Amazon Comprehend unit tests.
"""
import datetime
from test_tools.example_stubber import ExampleStubber
class ComprehendStubber(ExampleStubber):
"""
A class that implements stub functions used by Amazon Comprehend unit tests.
The stubbed functions expect certain parameters to be passed to them as
part of the tests, and raise errors if the parameters are not as expected.
"""
def __init__(self, client, use_stubs=True):
"""
Initializes the object with a specific client and configures it for
stubbing or AWS passthrough.
:param client: A Boto3 Comprehend client.
:param use_stubs: When True, use stubs to intercept requests. Otherwise,
pass requests through to AWS.
"""
super().__init__(client, use_stubs)
def stub_detect_dominant_language(self, text, languages, error_code=None):
expected_params = {'Text': text}
response = {'Languages': languages}
self._stub_bifurcator(
'detect_dominant_language', expected_params, response,
error_code=error_code)
def stub_detect_entities(self, text, language, entities, error_code=None):
expected_params = {'Text': text, 'LanguageCode': language}
response = {'Entities': entities}
self._stub_bifurcator(
'detect_entities', expected_params, response, error_code=error_code)
def stub_detect_key_phrases(self, text, language, phrases, error_code=None):
expected_params = {'Text': text, 'LanguageCode': language}
response = {'KeyPhrases': phrases}
self._stub_bifurcator(
'detect_key_phrases', expected_params, response, error_code=error_code)
def stub_detect_pii_entities(self, text, language, entities, error_code=None):
expected_params = {'Text': text, 'LanguageCode': language}
response = {'Entities': entities}
self._stub_bifurcator(
'detect_pii_entities', expected_params, response, error_code=error_code)
def stub_detect_sentiment(
self, text, language, sentiment, sentiment_scores, error_code=None):
expected_params = {'Text': text, 'LanguageCode': language}
response = {'Sentiment': sentiment, 'SentimentScore': sentiment_scores}
self._stub_bifurcator(
'detect_sentiment', expected_params, response, error_code=error_code)
def stub_detect_syntax(self, text, language, tokens, error_code=None):
expected_params = {'Text': text, 'LanguageCode': language}
response = {'SyntaxTokens': tokens}
self._stub_bifurcator(
'detect_syntax', expected_params, response, error_code=error_code)
def stub_create_document_classifier(
self, name, lang_code, bucket_name, training_key, data_access_role_arn,
mode, classifier_arn, error_code=None):
expected_params = {
'DocumentClassifierName': name,
'LanguageCode': lang_code,
'InputDataConfig': {'S3Uri': f's3://{bucket_name}/{training_key}'},
'DataAccessRoleArn': data_access_role_arn,
'Mode': mode}
response = {'DocumentClassifierArn': classifier_arn}
self._stub_bifurcator(
'create_document_classifier', expected_params, response,
error_code=error_code)
def stub_describe_document_classifier(
self, classifier_arn, status, error_code=None):
expected_params = {'DocumentClassifierArn': classifier_arn}
response = {'DocumentClassifierProperties': {
'DocumentClassifierArn': classifier_arn,
'Status': status}}
self._stub_bifurcator(
'describe_document_classifier', expected_params, response,
error_code=error_code)
def stub_list_document_classifiers(self, arns, statuses, error_code=None):
expected_params = {}
response = {'DocumentClassifierPropertiesList': [
{'DocumentClassifierArn': arn, 'Status': status}
for arn, status in zip(arns, statuses)]}
self._stub_bifurcator(
'list_document_classifiers', expected_params, response,
error_code=error_code)
def stub_delete_document_classifier(self, classifier_arn, error_code=None):
expected_params = {'DocumentClassifierArn': classifier_arn}
response = {}
self._stub_bifurcator(
'delete_document_classifier', expected_params, response,
error_code=error_code)
def stub_start_document_classification_job(
self, classifier_arn, job_name, input_bucket, input_key, input_format,
output_bucket, output_key, data_role_arn, job_status, error_code=None):
expected_params = {
'DocumentClassifierArn': classifier_arn,
'JobName': job_name,
'InputDataConfig': {
'S3Uri': f's3://{input_bucket}/{input_key}',
'InputFormat': input_format},
'OutputDataConfig': {'S3Uri': f's3://{output_bucket}/{output_key}'},
'DataAccessRoleArn': data_role_arn}
response = {'JobStatus': job_status}
self._stub_bifurcator(
'start_document_classification_job', expected_params, response,
error_code=error_code)
def stub_describe_document_classification_job(
self, job_id, job_name, job_status, error_code=None):
expected_params = {'JobId': job_id}
response = {'DocumentClassificationJobProperties': {
'JobId': job_id, 'JobName': job_name, 'JobStatus': job_status}}
self._stub_bifurcator(
'describe_document_classification_job', expected_params, response,
error_code=error_code)
def METHOD_NAME(self, jobs, error_code=None):
expected_params = {}
response = {'DocumentClassificationJobPropertiesList': [
{'JobId': job} for job in jobs]}
self._stub_bifurcator(
'list_document_classification_jobs', expected_params, response,
error_code=error_code)
def stub_start_topics_detection_job(
self, job_name, input_bucket, input_key, input_format, output_bucket,
output_key, data_access_role_arn, job_id, job_status, error_code=None):
expected_params = {
'JobName': job_name,
'DataAccessRoleArn': data_access_role_arn,
'InputDataConfig': {
'S3Uri': f's3://{input_bucket}/{input_key}',
'InputFormat': input_format},
'OutputDataConfig': {'S3Uri': f's3://{output_bucket}/{output_key}'}}
response = {'JobId': job_id, 'JobStatus': job_status}
self._stub_bifurcator(
'start_topics_detection_job', expected_params, response,
error_code=error_code)
def stub_describe_topics_detection_job(self, job_id, error_code=None):
expected_params = {'JobId': job_id}
response = {'TopicsDetectionJobProperties': {'JobId': job_id}}
self._stub_bifurcator(
'describe_topics_detection_job', expected_params, response,
error_code=error_code)
def stub_list_topics_detection_jobs(self, job_ids, error_code=None):
expected_params = {}
response = {'TopicsDetectionJobPropertiesList': [
{'JobId': job_id} for job_id in job_ids]}
self._stub_bifurcator(
'list_topics_detection_jobs', expected_params, response,
error_code=error_code) |
294 | fully qualified table name | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import structlog
import warnings
from typing import List, Optional, Tuple
from ...core import Query
from ...core.context import get_db
from ...core.errors import MissingDateError
from .event_table_subset import EventTableSubset
from flowmachine.utils import standardise_date
logger = structlog.get_logger("flowmachine.debug", submodule=__name__)
class EventsTablesUnion(Query):
"""
Takes a list of subtables, subsets each of them
by date and selects a specified list of columns
from the result and unions (i.e. appends) all
of these tables. This class is mostly used as an
intermediate for other classes.
Parameters
----------
start, stop : str
ISO-format date
columns :
list of columns to select
tables : str or list of strings, default 'all'
Can be a sting of a single table (with the schema)
or a list of these. The keyword all is to select all
subscriber tables
subscriber_identifier : {'msisdn', 'imei'}, default 'msisdn'
Either msisdn, or imei, the column that identifies the subscriber.
subscriber_subset : str, list, flowmachine.core.Query, flowmachine.core.Table, default None
If provided, string or list of string which are msisdn or imeis to limit
results to; or, a query or table which has a column with a name matching
subscriber_identifier (typically, msisdn), to limit results to.
"""
def __init__(
self,
start,
stop,
*,
columns,
tables=None,
hours: Optional[Tuple[int, int]] = None,
subscriber_subset=None,
subscriber_identifier="msisdn",
):
""""""
if isinstance(tables, str) and tables.lower() == "all":
logger.warn(
"EventsTablesUnion will soon stop accepting the argument tables='all'. Use tables=None instead."
)
tables = None
self.start = standardise_date(start)
self.stop = standardise_date(stop)
self.columns = columns
self.tables = self._parse_tables(tables)
if "*" in columns and len(self.tables) != 1:
raise ValueError(
"Must give named tables when combining multiple event type tables."
)
self.date_subsets = self._make_table_list(
hours=hours,
subscriber_subset=subscriber_subset,
subscriber_identifier=subscriber_identifier,
)
super().__init__()
@property
def column_names(self) -> List[str]:
return self.date_subsets[
0
].column_names # Use in preference to self.columns which might be ["*"]
def _parse_tables(self, tables):
if tables is None:
return [f"events.{t}" for t in get_db().subscriber_tables]
elif isinstance(tables, str) and len(tables) > 0:
return [tables]
elif isinstance(tables, str):
raise ValueError("Empty table name.")
elif not isinstance(tables, list) or not all(
[isinstance(tbl, str) for tbl in tables]
):
raise ValueError("Tables must be a string or list of strings.")
elif len(tables) == 0:
raise ValueError("Empty tables list.")
else:
return tables
def _make_table_list(self, *, hours, subscriber_subset, subscriber_identifier):
"""
Makes a list of EventTableSubset queries.
"""
date_subsets = []
for table in self.tables:
try:
sql = EventTableSubset(
start=self.start,
stop=self.stop,
table=table,
columns=self.columns,
hours=hours,
subscriber_subset=subscriber_subset,
subscriber_identifier=subscriber_identifier,
)
date_subsets.append(sql)
except MissingDateError:
warnings.warn(
f"No data in {table} for {self.start}–{self.stop}", stacklevel=2
)
if not date_subsets:
raise MissingDateError(self.start, self.stop)
return date_subsets
def _make_query(self):
# Get the list of tables, select the relevant columns and union
# them all
sql = "\nUNION ALL\n".join(sd.get_query() for sd in self.date_subsets)
return sql
@property
def METHOD_NAME(self):
# EventTableSubset are a simple select from events, and should not be cached
raise NotImplementedError |
295 | i rule | # ___________________________________________________________________________
#
# Pyomo: Python Optimization Modeling Objects
# Copyright (c) 2008-2022
# National Technology and Engineering Solutions of Sandia, LLC
# Under the terms of Contract DE-NA0003525 with National Technology and
# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
from pyomo.common.collections import Bunch
from pyomo.core import *
import math
import random
def print_model_stats(options, model):
print("-" * 40)
if options is None:
print("DEFAULT")
else:
print(options.type)
rowc = {}
for i in model.I:
rowc[i] = 0
colc = {}
for i in model.J:
colc[i] = 0
for i, j in model.S:
rowc[i] += 1
colc[j] += 1
print("Row Counts")
s = 0.0
for i in sorted(rowc):
s += rowc[i]
print("Average: %s" % str(s / len(rowc)))
print("Col Counts")
s = 0.0
for i in sorted(colc):
s += colc[i]
print("Average: %s" % str(s / len(colc)))
print("I %d" % len(model.I))
print("J %d" % len(model.J))
print("-" * 40)
def pyomo_create_model(options=None, model_options=None):
if model_options is None:
model_options = Bunch()
if model_options.type is None:
model_options.type = 'fixed_set_size'
#
# m - number of elements
#
m = 100 if model_options.m is None else model_options.m
#
# n - number of sets
#
n = 200 if model_options.n is None else model_options.n
seed = 9090 if model_options.seed is None else model_options.seed
random.seed(9090)
#
if model_options.type == 'fixed_set_size':
#
# p - fixed number elements per set
# rho - fixed fraction of elements per set
#
p = model_options.p
if p is None:
if model_options.rho is None:
p = int(math.ceil(m * 0.7))
else:
p = int(math.ceil(m * model_options.rho))
#
def S_rule(model):
ans = set()
for j in range(1, n + 1):
tmp = list(range(1, m + 1))
random.shuffle(tmp)
for i in range(0, p):
ans.add((tmp[i], j))
return ans
elif model_options.type == 'fixed_element_coverage':
#
# p - fixed number of sets that cover each element
# rho - fixed fraction of sets that cover each element
#
p = model_options.p
if p is None:
if model_options.rho is None:
p = int(math.ceil(n * 0.4))
else:
p = int(math.ceil(n * model_options.rho))
#
def S_rule(model):
ans = set()
for i in range(1, m + 1):
tmp = list(range(1, n + 1))
random.shuffle(tmp)
for j in range(0, p):
ans.add((i, tmp[j]))
return ans
elif model_options.type == 'fixed_probability':
#
# rho - probability of selecting element for a set
#
rho = 0.3 if model_options.rho is None else model_options.rho
#
def S_rule(model):
ans = set()
for j in range(1, n + 1):
for i in range(1, m + 1):
if random.uniform(0, 1) < rho:
ans.add((i, j))
return ans
elif model_options.type == 'fixed_fill':
#
# rho - |S|/(I*J)
#
rho = 0.3 if model_options.rho is None else model_options.rho
#
def S_rule(model):
ans = set()
for j in range(1, n + 1):
for i in range(1, m + 1):
if random.uniform(0, 1) < rho:
ans.add((i, j))
return ans
#
# CREATE MODEL
#
model = ConcreteModel()
#
# (i,j) in S if element i in set j
#
model.S = Set(dimen=2, initialize=S_rule)
#
# Dynamically create the I and J index sets, since
# some rows or columns of S may not be populated.
#
def METHOD_NAME(model):
return set((i for (i, j) in model.S))
model.I = Set(initialize=METHOD_NAME)
def J_rule(model):
return set((j for (i, j) in model.S))
model.J = Set(initialize=J_rule)
#
# Weights
#
model.w = Param(model.J, within=NonNegativeReals, initialize=1.0)
#
# Set selection binary variables
#
model.x = Var(model.J, within=Binary)
#
# Objective
#
def cost_rule(model):
return sum_product(model.w, model.x)
model.cost = Objective(rule=cost_rule)
#
# Constraint
#
def cover_rule(model, i):
expr = 0
for j in model.x:
if (i, j) in model.S:
expr += model.x[j]
#
# WEH - this check is not needed, since I is constructed dynamically
#
# if expr is 0:
# return Constraint.Skip
return expr >= 1
model.cover = Constraint(model.I, rule=cover_rule)
#
print_model_stats(model_options, model)
return model
def test_model(options=None):
model = pyomo_create_model(model_options=options)
# print_model_stats(options, model)
if __name__ == '__main__':
test_model()
#
options = Bunch()
options.type = 'fixed_set_size'
options.m = 11
options.n = 21
options.rho = 0.3
test_model(options)
#
options = Bunch()
options.type = 'fixed_element_coverage'
test_model(options)
#
options = Bunch()
options.m = 100
options.n = 200
options.type = 'fixed_probability'
test_model(options)
#
options = Bunch()
options.type = 'fixed_element_coverage'
options.m = 10
options.n = 100
options.rho = 0.1
test_model(options)
# |
296 | weixin oauth login | # -*- coding: utf-8 -*-
import uuid
import urllib
import logging
import requests
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.core.files.base import ContentFile
from django.utils.translation import gettext as _
from seahub.api2.utils import get_api_token
from seahub import auth
from seahub.base.accounts import User
from seahub.avatar.models import Avatar
from seahub.profile.models import Profile
from seahub.utils import render_error, get_site_scheme_and_netloc
from seahub.auth.models import SocialAuthUser
from seahub.weixin.settings import ENABLE_WEIXIN, \
WEIXIN_OAUTH_APP_ID, WEIXIN_OAUTH_APP_SECRET, \
WEIXIN_OAUTH_SCOPE, WEIXIN_OAUTH_RESPONSE_TYPE, WEIXIN_OAUTH_QR_CONNECT_URL, \
WEIXIN_OAUTH_GRANT_TYPE, WEIXIN_OAUTH_ACCESS_TOKEN_URL, \
WEIXIN_OAUTH_USER_INFO_URL
logger = logging.getLogger(__name__)
# https://developers.weixin.qq.com/doc/oplatform/Website_App/WeChat_Login/Wechat_Login.html
def METHOD_NAME(request):
if not ENABLE_WEIXIN:
return render_error(request, _('Error, please contact administrator.'))
state = str(uuid.uuid4())
request.session['weixin_oauth_login_state'] = state
request.session['weixin_oauth_login_redirect'] = request.GET.get(auth.REDIRECT_FIELD_NAME, '/')
data = {
'appid': WEIXIN_OAUTH_APP_ID,
'redirect_uri': get_site_scheme_and_netloc() + reverse('weixin_oauth_callback'),
'response_type': WEIXIN_OAUTH_RESPONSE_TYPE,
'scope': WEIXIN_OAUTH_SCOPE,
'state': state,
}
url = WEIXIN_OAUTH_QR_CONNECT_URL + '?' + urllib.parse.urlencode(data)
return HttpResponseRedirect(url)
def weixin_oauth_callback(request):
if not ENABLE_WEIXIN:
return render_error(request, _('Error, please contact administrator.'))
state = request.GET.get('state', '')
if not state or state != request.session.get('weixin_oauth_login_state', ''):
logger.error('invalid state')
return render_error(request, _('Error, please contact administrator.'))
# get access_token and user openid
parameters = {
'appid': WEIXIN_OAUTH_APP_ID,
'secret': WEIXIN_OAUTH_APP_SECRET,
'code': request.GET.get('code'),
'grant_type': WEIXIN_OAUTH_GRANT_TYPE,
}
access_token_url = WEIXIN_OAUTH_ACCESS_TOKEN_URL + '?' + urllib.parse.urlencode(parameters)
access_token_json = requests.get(access_token_url).json()
openid = access_token_json.get('openid', '')
access_token = access_token_json.get('access_token', '')
if not access_token or not openid:
logger.error('invalid access_token or openid')
logger.error(access_token_url)
logger.error(access_token_json)
return render_error(request, _('Error, please contact administrator.'))
# login user in
auth_user = SocialAuthUser.objects.get_by_provider_and_uid('weixin', openid)
if auth_user:
email = auth_user.username
is_new_user = False
else:
email = None
is_new_user = True
try:
user = auth.authenticate(remote_user=email)
email = user.username
except User.DoesNotExist:
user = None
except Exception as e:
logger.error(e)
return render_error(request, _('Error, please contact administrator.'))
if not user or not user.is_active:
return render_error(request, _('User %s not found or inactive.') % email)
if is_new_user:
SocialAuthUser.objects.add(email, 'weixin', openid)
request.user = user
auth.login(request, user)
# get user profile info
parameters = {
'access_token': access_token,
'openid': openid,
}
user_info_url = WEIXIN_OAUTH_USER_INFO_URL + '?' + urllib.parse.urlencode(parameters)
user_info_resp = requests.get(user_info_url).json()
name = user_info_resp['nickname'] if 'nickname' in user_info_resp else ''
name = name.encode('raw_unicode_escape').decode('utf-8')
if name:
profile = Profile.objects.get_profile_by_user(email)
if not profile:
profile = Profile(user=email)
profile.nickname = name.strip()
profile.save()
avatar_url = user_info_resp['headimgurl'] if 'headimgurl' in user_info_resp else ''
try:
image_name = 'dingtalk_avatar'
image_file = requests.get(avatar_url).content
avatar = Avatar.objects.filter(emailuser=email, primary=True).first()
avatar = avatar or Avatar(emailuser=email, primary=True)
avatar_file = ContentFile(image_file)
avatar_file.name = image_name
avatar.avatar = avatar_file
avatar.save()
except Exception as e:
logger.error(e)
# generate auth token for Seafile client
api_token = get_api_token(request)
# redirect user to home page
response = HttpResponseRedirect(request.session['weixin_oauth_login_redirect'])
response.set_cookie('seahub_auth', email + '@' + api_token.key)
return response |
297 | test cross layer scaling equalize params | # /usr/bin/env python3.5
# -*- mode: python -*-
# =============================================================================
# @@-COPYRIGHT-START-@@
#
# Copyright (c) 2019, Qualcomm Innovation Center, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# @@-COPYRIGHT-END-@@
# =============================================================================
import unittest
import numpy as np
import aimet_common.libpymo as libpymo
class TestCrossLayerScaling(unittest.TestCase):
def METHOD_NAME(self):
print("starting python model optimization cross layer scaling test for non depthwise")
# Generating random numbers from a normal distribution for the weights and biases of the current and prev layer
np.random.seed(1)
total = 2 * 3 * 2 * 2
weight1 = np.array(np.random.randn(total))
bias1 = np.array(np.random.randn(2))
weight2 = np.array(np.random.randn(total))
weight_sz1 = np.array([2, 3, 2, 2])
weight_sz2 = np.array([3, 2, 2, 2])
# Initializing the struct EqualizationParams
prev_layer_params = libpymo.EqualizationParams()
curr_layer_params = libpymo.EqualizationParams()
prev_layer_params.weight = weight1
prev_layer_params.weightShape = weight_sz1
prev_layer_params.bias = bias1
prev_layer_params.isBiasNone = False
curr_layer_params.weight = weight2
curr_layer_params.weightShape = weight_sz2
w1, w2, b1, scale_factor = cross_layer_scaling_python_implementation(weight1.reshape(weight_sz1),
weight2.reshape(weight_sz2),
bias1)
rescaling_vector = libpymo.scaleLayerParams(prev_layer_params, curr_layer_params)
assert (np.allclose(w1.flatten(), prev_layer_params.weight))
assert (np.allclose(w2.flatten(), curr_layer_params.weight))
assert (np.allclose(b1, prev_layer_params.bias))
assert (np.allclose(scale_factor, rescaling_vector))
def test_cross_layer_scaling_equalize_params_depthwise(self):
np.random.seed(1)
weight1 = np.array(np.random.randn(2 * 2 * 3 * 2))
bias1 = np.array(np.random.randn(2))
weight2 = np.array(np.random.randn(2 * 2 * 4))
bias2 = np.array(np.random.randn(2))
weight3 = np.array(np.random.randn(2 * 2 * 4 * 5))
weight_sz1 = np.array([2, 2, 3, 2])
weight_sz2 = np.array([2, 2, 4, 1])
weight_sz3 = np.array([2, 2, 4, 5])
# Initializing the struct EqualizationParams
prev_layer_params = libpymo.EqualizationParams()
curr_layer_params = libpymo.EqualizationParams()
next_layer_params = libpymo.EqualizationParams()
prev_layer_params.weight = weight1
prev_layer_params.weightShape = weight_sz1
prev_layer_params.bias = bias1
prev_layer_params.isBiasNone = False
curr_layer_params.weight = weight2
curr_layer_params.weightShape = weight_sz2
curr_layer_params.bias = bias2
curr_layer_params.isBiasNone = False
next_layer_params.weight = weight3
next_layer_params.weightShape = weight_sz3
w1, w2, w3, b1, b2, s_12, s_23 = cross_layer_scaling_depthwise_separable_layers(weight1.reshape(weight_sz1),
weight2.reshape(weight_sz2),
weight3.reshape(weight_sz3),
bias1, bias2)
scaling_params = libpymo.scaleDepthWiseSeparableLayer(prev_layer_params, curr_layer_params, next_layer_params)
assert (np.allclose(w1.flatten(), prev_layer_params.weight))
assert (np.allclose(w2.flatten(), curr_layer_params.weight))
assert (np.allclose(w3.flatten(), next_layer_params.weight))
assert (np.allclose(b1, prev_layer_params.bias))
assert (np.allclose(b2, curr_layer_params.bias))
assert (np.allclose(scaling_params.scalingMatrix12, s_12))
assert (np.allclose(s_23, scaling_params.scalingMatrix23))
def cross_layer_scaling_python_implementation(weight1, weight2, bias1):
w1 = weight1
w2 = weight2
b1 = bias1
range1 = np.amax(np.abs(w1), axis=(1, 2, 3))
range2 = np.amax(np.abs(w2), axis=(0, 2, 3))
scale_factor = range1 / np.power(range1 * range2, 1. / 2)
for i in range(len(scale_factor)):
w1[i, :, :, :] = w1[i, :, :, :] * (1.0 / scale_factor[i])
w2[:, i, :, :] = w2[:, i, :, :] * scale_factor[i]
b1[i] = b1[i] * (1.0 / scale_factor[i])
return w1, w2, b1, scale_factor
def cross_layer_scaling_depthwise_separable_layers(weight1, weight2, weight3, bias1, bias2):
w1 = weight1
w2 = weight2
w3 = weight3
b1 = bias1
b2 = bias2
range1 = np.amax(np.abs(w1), axis=(1, 2, 3))
range2 = np.amax(np.abs(w2), axis=(1, 2, 3))
range3 = np.amax(np.abs(w3), axis=(0, 2, 3))
s_12 = range1 / np.power(range1 * range2 * range3, 1.0 / 3)
s_23 = np.power(range1 * range2 * range3, 1.0 / 3) / range3
for i in range(len(s_12)):
w1[i, :, :, :] = w1[i, :, :, :] * (1.0 / s_12[i])
w2[i, :, :, :] = w2[i, :, :, :] * s_12[i] * (1.0 / s_23[i])
b1[i] = b1[i] * (1.0 / s_12[i])
b2[i] = b2[i] * (1.0 / s_23[i])
w3[:, i, :, :] = w3[:, i, :, :] * s_23[i]
return w1, w2, w3, b1, b2, s_12, s_23 |
298 | test add peer | from unittest.mock import MagicMock, patch
import os
from bgpcfgd.directory import Directory
from bgpcfgd.template import TemplateFabric
from . import swsscommon_test
from .util import load_constants
from swsscommon import swsscommon
import bgpcfgd.managers_bgp
TEMPLATE_PATH = os.path.abspath('../../dockers/docker-fpm-frr/frr')
def load_constant_files():
paths = ["tests/data/constants", "../../files/image_config/constants"]
constant_files = []
for path in paths:
constant_files += [os.path.abspath(os.path.join(path, name)) for name in os.listdir(path)
if os.path.isfile(os.path.join(path, name)) and name.startswith("constants")]
return constant_files
def constructor(constants_path):
cfg_mgr = MagicMock()
constants = load_constants(constants_path)['constants']
common_objs = {
'directory': Directory(),
'cfg_mgr': cfg_mgr,
'tf': TemplateFabric(TEMPLATE_PATH),
'constants': constants
}
return_value_map = {
"['vtysh', '-c', 'show bgp vrfs json']": (0, "{\"vrfs\": {\"default\": {}}}", ""),
"['vtysh', '-c', 'show bgp vrf default neighbors json']": (0, "{\"10.10.10.1\": {}, \"20.20.20.1\": {}, \"fc00:10::1\": {}}", "")
}
bgpcfgd.managers_bgp.run_command = lambda cmd: return_value_map[str(cmd)]
m = bgpcfgd.managers_bgp.BGPPeerMgrBase(common_objs, "CONFIG_DB", swsscommon.CFG_BGP_NEIGHBOR_TABLE_NAME, "general", True)
assert m.peer_type == "general"
assert m.check_neig_meta == ('bgp' in constants and 'use_neighbors_meta' in constants['bgp'] and constants['bgp']['use_neighbors_meta'])
m.directory.put("CONFIG_DB", swsscommon.CFG_DEVICE_METADATA_TABLE_NAME, "localhost", {"bgp_asn": "65100"})
m.directory.put("CONFIG_DB", swsscommon.CFG_LOOPBACK_INTERFACE_TABLE_NAME, "Loopback0|11.11.11.11/32", {})
m.directory.put("CONFIG_DB", swsscommon.CFG_LOOPBACK_INTERFACE_TABLE_NAME, "Loopback0|FC00:1::32/128", {})
m.directory.put("LOCAL", "local_addresses", "30.30.30.30", {"interface": "Ethernet4|30.30.30.30/24"})
m.directory.put("LOCAL", "local_addresses", "fc00:20::20", {"interface": "Ethernet8|fc00:20::20/96"})
m.directory.put("LOCAL", "interfaces", "Ethernet4|30.30.30.30/24", {"anything": "anything"})
m.directory.put("LOCAL", "interfaces", "Ethernet8|fc00:20::20/96", {"anything": "anything"})
if m.check_neig_meta:
m.directory.put("CONFIG_DB", swsscommon.CFG_DEVICE_NEIGHBOR_METADATA_TABLE_NAME, "TOR", {})
return m
@patch('bgpcfgd.managers_bgp.log_info')
def test_update_peer_up(mocked_log_info):
for constant in load_constant_files():
m = constructor(constant)
res = m.set_handler("10.10.10.1", {"admin_status": "up"})
assert res, "Expect True return value for peer update"
mocked_log_info.assert_called_with("Peer 'default|10.10.10.1' admin state is set to 'up'")
@patch('bgpcfgd.managers_bgp.log_info')
def test_update_peer_up_ipv6(mocked_log_info):
for constant in load_constant_files():
m = constructor(constant)
res = m.set_handler("fc00:10::1", {"admin_status": "up"})
assert res, "Expect True return value for peer update"
mocked_log_info.assert_called_with("Peer 'default|fc00:10::1' admin state is set to 'up'")
@patch('bgpcfgd.managers_bgp.log_info')
def test_update_peer_down(mocked_log_info):
for constant in load_constant_files():
m = constructor(constant)
res = m.set_handler("10.10.10.1", {"admin_status": "down"})
assert res, "Expect True return value for peer update"
mocked_log_info.assert_called_with("Peer 'default|10.10.10.1' admin state is set to 'down'")
@patch('bgpcfgd.managers_bgp.log_err')
def test_update_peer_no_admin_status(mocked_log_err):
for constant in load_constant_files():
m = constructor(constant)
res = m.set_handler("10.10.10.1", {"anything": "anything"})
assert res, "Expect True return value for peer update"
mocked_log_err.assert_called_with("Peer '(default|10.10.10.1)': Can't update the peer. Only 'admin_status' attribute is supported")
@patch('bgpcfgd.managers_bgp.log_err')
def test_update_peer_invalid_admin_status(mocked_log_err):
for constant in load_constant_files():
m = constructor(constant)
res = m.set_handler("10.10.10.1", {"admin_status": "invalid"})
assert res, "Expect True return value for peer update"
mocked_log_err.assert_called_with("Peer 'default|10.10.10.1': Can't update the peer. It has wrong attribute value attr['admin_status'] = 'invalid'")
def METHOD_NAME():
for constant in load_constant_files():
m = constructor(constant)
res = m.set_handler("30.30.30.1", {'asn': '65200', 'holdtime': '180', 'keepalive': '60', 'local_addr': '30.30.30.30', 'name': 'TOR', 'nhopself': '0', 'rrclient': '0'})
assert res, "Expect True return value"
def test_add_peer_ipv6():
for constant in load_constant_files():
m = constructor(constant)
res = m.set_handler("fc00:20::1", {'asn': '65200', 'holdtime': '180', 'keepalive': '60', 'local_addr': 'fc00:20::20', 'name': 'TOR', 'nhopself': '0', 'rrclient': '0'})
assert res, "Expect True return value"
@patch('bgpcfgd.managers_bgp.log_warn')
def test_add_peer_no_local_addr(mocked_log_warn):
for constant in load_constant_files():
m = constructor(constant)
res = m.set_handler("30.30.30.1", {"admin_status": "up"})
assert res, "Expect True return value"
mocked_log_warn.assert_called_with("Peer 30.30.30.1. Missing attribute 'local_addr'")
@patch('bgpcfgd.managers_bgp.log_debug')
def test_add_peer_invalid_local_addr(mocked_log_debug):
for constant in load_constant_files():
m = constructor(constant)
res = m.set_handler("30.30.30.1", {"local_addr": "40.40.40.40", "admin_status": "up"})
assert not res, "Expect False return value"
mocked_log_debug.assert_called_with("Peer '30.30.30.1' with local address '40.40.40.40' wait for the corresponding interface to be set")
@patch('bgpcfgd.managers_bgp.log_info')
def test_del_handler(mocked_log_info):
for constant in load_constant_files():
m = constructor(constant)
m.del_handler("10.10.10.1")
mocked_log_info.assert_called_with("Peer '(default|10.10.10.1)' has been removed")
@patch('bgpcfgd.managers_bgp.log_warn')
def test_del_handler_nonexist_peer(mocked_log_warn):
for constant in load_constant_files():
m = constructor(constant)
m.del_handler("40.40.40.1")
mocked_log_warn.assert_called_with("Peer '(default|40.40.40.1)' has not been found") |
299 | wrap exceptions | # -*- coding: utf-8 -*-
import os
import sys
import traceback
from functools import wraps
from http import HTTPStatus
from xmlrpc.client import Fault
import django.core.exceptions
import django.db.models
import django.db.utils
from django.conf import settings
__filters__ = ("wrap_exceptions",)
def _validate_config():
if not hasattr(settings, "XMLRPC_METHODS"):
raise ImportError("Variable 'XMLRPC_METHODS' not set in settings.")
def _get_enable_apis():
_validate_config()
apis = list()
for value in settings.XMLRPC_METHODS.values():
for api in value:
apis.append(api[0])
return apis
def _wrap_exceptions(module_name):
"""Load api list and wrap them with decorators"""
module = __import__(module_name, {}, {}, [""])
funcs = getattr(module, "__all__", None)
if not funcs:
return
for func in funcs:
func = getattr(module, func, None)
if callable(func):
for api_filter in XMLRPC_API_FILTERS:
func = api_filter(func)
setattr(sys.modules[module.__name__], func.__name__, func)
def autowrap_xmlrpc_apis(path, package):
"""Auto load xmlrpc api, based on directory structure and XMLRPC_METHODS
setting.
It will load modules that were listed in XMLRPC_METHODS, and get __all__
attribute of each module to collect api functions.
Then wrap the apis with decorators in order(appearance order in
__filters__) and replace them.
Everything is done when import tcms.xmlrpc.* or tcms.xmlrpc automatically.
If you want to add new decorators, please append it in this module and
insert it into __filters__.
"""
enable_apis = _get_enable_apis()
for dir_path, dir_names, file_names in os.walk(path):
rel_path = os.path.relpath(dir_path, path)
if rel_path == ".":
rel_pkg = ""
else:
rel_pkg = ".%s" % ".".join(rel_path.split(os.sep))
for file_name in file_names:
root, ext = os.path.splitext(file_name)
# Skip __init__ and anything that's not .py
# FIXME maybe .pyc in prod env.
if ext != ".py" or root == "__init__":
continue
module_name = "%s%s.%s" % (package, rel_pkg, root)
if module_name in enable_apis:
_wrap_exceptions(module_name)
def _format_message(msg):
return [msg] if isinstance(msg, str) else msg
# create your own filter here.
def METHOD_NAME(func):
@wraps(func)
def _decorator(*args, **kwargs):
try:
return func(*args, **kwargs)
except django.core.exceptions.PermissionDenied as e:
# 403 Forbidden
fault_code = HTTPStatus.FORBIDDEN
fault_string = str(e)
except django.db.models.ObjectDoesNotExist as e:
# 404 Not Found
fault_code = HTTPStatus.NOT_FOUND
fault_string = str(e)
except (
django.core.exceptions.FieldDoesNotExist,
django.core.exceptions.FieldError,
django.core.exceptions.ValidationError,
django.core.exceptions.MultipleObjectsReturned,
django.forms.ValidationError,
ValueError,
TypeError,
) as e:
# 400 Bad Request
fault_code = HTTPStatus.BAD_REQUEST
fault_string = str(e)
except django.db.utils.IntegrityError as e:
# 409 Duplicate
fault_code = HTTPStatus.CONFLICT
fault_string = str(e)
except NotImplementedError as e:
fault_code = HTTPStatus.NOT_IMPLEMENTED
fault_string = str(e)
except Exception as e:
# 500 Server Error
fault_code = HTTPStatus.INTERNAL_SERVER_ERROR
fault_string = str(e)
if settings.DEBUG:
stack_trace = "".join(traceback.format_exception(*sys.exc_info()))
fault_string = f"{fault_string}\n{stack_trace}"
raise Fault(faultCode=fault_code, faultString=_format_message(fault_string))
return _decorator
XMLRPC_API_FILTERS = [
getattr(sys.modules[__name__], api_filter, None) for api_filter in __filters__
] |