label
stringlengths 1
61
| code
stringlengths 4k
8k
|
---|---|
count records | # Copyright (c) 2005-2012 Stephen John Machin, Lingfo Pty Ltd
# This module is part of the xlrd package, which is released under a
# BSD-style licence.
from .info import __VERSION__
import sys, zipfile, pprint
from . import timemachine
from .biffh import (
XLRDError,
biff_text_from_num,
error_text_from_code,
XL_CELL_BLANK,
XL_CELL_TEXT,
XL_CELL_BOOLEAN,
XL_CELL_ERROR,
XL_CELL_EMPTY,
XL_CELL_DATE,
XL_CELL_NUMBER
)
from .formula import * # is constrained by __all__
from .book import Book, colname
from .sheet import empty_cell
from .xldate import XLDateError, xldate_as_tuple, xldate_as_datetime
from .xlsx import X12Book
if sys.version.startswith("IronPython"):
# print >> sys.stderr, "...importing encodings"
import encodings
try:
import mmap
MMAP_AVAILABLE = 1
except ImportError:
MMAP_AVAILABLE = 0
USE_MMAP = MMAP_AVAILABLE
def open_workbook(filename=None,
logfile=sys.stdout,
verbosity=0,
use_mmap=USE_MMAP,
file_contents=None,
encoding_override=None,
formatting_info=False,
on_demand=False,
ragged_rows=False,
):
"""
Open a spreadsheet file for data extraction.
:param filename: The path to the spreadsheet file to be opened.
:param logfile: An open file to which messages and diagnostics are written.
:param verbosity: Increases the volume of trace material written to the
logfile.
:param use_mmap:
Whether to use the mmap module is determined heuristically.
Use this arg to override the result.
Current heuristic: mmap is used if it exists.
:param file_contents:
A string or an :class:`mmap.mmap` object or some other behave-alike
object. If ``file_contents`` is supplied, ``filename`` will not be used,
except (possibly) in messages.
:param encoding_override:
Used to overcome missing or bad codepage information
in older-version files. See :doc:`unicode`.
:param formatting_info:
The default is ``False``, which saves memory.
In this case, "Blank" cells, which are those with their own formatting
information but no data, are treated as empty by ignoring the file's
``BLANK`` and ``MULBLANK`` records.
This cuts off any bottom or right "margin" of rows of empty or blank
cells.
Only :meth:`~xlrd.sheet.Sheet.cell_value` and
:meth:`~xlrd.sheet.Sheet.cell_type` are available.
When ``True``, formatting information will be read from the spreadsheet
file. This provides all cells, including empty and blank cells.
Formatting information is available for each cell.
Note that this will raise a NotImplementedError when used with an
xlsx file.
:param on_demand:
Governs whether sheets are all loaded initially or when demanded
by the caller. See :doc:`on_demand`.
:param ragged_rows:
The default of ``False`` means all rows are padded out with empty cells so
that all rows have the same size as found in
:attr:`~xlrd.sheet.Sheet.ncols`.
``True`` means that there are no empty cells at the ends of rows.
This can result in substantial memory savings if rows are of widely
varying sizes. See also the :meth:`~xlrd.sheet.Sheet.row_len` method.
:returns: An instance of the :class:`~xlrd.book.Book` class.
"""
peeksz = 4
if file_contents:
peek = file_contents[:peeksz]
else:
with open(filename, "rb") as f:
peek = f.read(peeksz)
if peek == b"PK\x03\x04": # a ZIP file
if file_contents:
zf = zipfile.ZipFile(timemachine.BYTES_IO(file_contents))
else:
zf = zipfile.ZipFile(filename)
# Workaround for some third party files that use forward slashes and
# lower case names. We map the expected name in lowercase to the
# actual filename in the zip container.
component_names = dict([(X12Book.convert_filename(name), name)
for name in zf.namelist()])
if verbosity:
logfile.write('ZIP component_names:\n')
pprint.pprint(component_names, logfile)
if 'xl/workbook.xml' in component_names:
from . import xlsx
bk = xlsx.open_workbook_2007_xml(
zf,
component_names,
logfile=logfile,
verbosity=verbosity,
use_mmap=use_mmap,
formatting_info=formatting_info,
on_demand=on_demand,
ragged_rows=ragged_rows,
)
return bk
if 'xl/workbook.bin' in component_names:
raise XLRDError('Excel 2007 xlsb file; not supported')
if 'content.xml' in component_names:
raise XLRDError('Openoffice.org ODS file; not supported')
raise XLRDError('ZIP file contents not a known type of workbook')
from . import book
bk = book.open_workbook_xls(
filename=filename,
logfile=logfile,
verbosity=verbosity,
use_mmap=use_mmap,
file_contents=file_contents,
encoding_override=encoding_override,
formatting_info=formatting_info,
on_demand=on_demand,
ragged_rows=ragged_rows,
)
return bk
def dump(filename, outfile=sys.stdout, unnumbered=False):
"""
For debugging: dump an XLS file's BIFF records in char & hex.
:param filename: The path to the file to be dumped.
:param outfile: An open file, to which the dump is written.
:param unnumbered: If true, omit offsets (for meaningful diffs).
"""
from .biffh import biff_dump
bk = Book()
bk.biff2_8_load(filename=filename, logfile=outfile, )
biff_dump(bk.mem, bk.base, bk.stream_len, 0, outfile, unnumbered)
def METHOD_NAME(filename, outfile=sys.stdout):
"""
For debugging and analysis: summarise the file's BIFF records.
ie: produce a sorted file of ``(record_name, count)``.
:param filename: The path to the file to be summarised.
:param outfile: An open file, to which the summary is written.
"""
from .biffh import biff_count_records
bk = Book()
bk.biff2_8_load(filename=filename, logfile=outfile, )
biff_count_records(bk.mem, bk.base, bk.stream_len, outfile) |
test two args default | # Owner(s): ["oncall: fx"]
import torch
from torch.testing._internal.common_utils import (
TestCase, run_tests)
from torch.fx.experimental.proxy_tensor import make_fx
from torch.fx.passes.dialect.common.cse_pass import CSEPass, get_CSE_banned_ops
from torch.fx import symbolic_trace
import random
banned_ops = get_CSE_banned_ops()
P_default = CSEPass(banned_ops=banned_ops)
def check(self, f, t, delta, check_val=True, graph_input=False, P=None):
"""
check if the CSE modified graph of ``f``
1) has delta less nodes, and
2) do not reduce the number of nodes further on a second pass, and
3) modified returned is true only if the number of nodes decreases.
Args:
f: function to be checked
t: tensor to be passed to f
delta: an integer >= -1.
If delta = -1, it only checks if the new graph has less or equal number of nodes
check_val: if True, check if the output of f is correct
graph_input: True is f is type GraphModule
P: the pass to use. If None, use P_default
"""
if graph_input:
fx_g = f
else:
fx_g = make_fx(f)(t)
if P is None:
P = P_default
res = P(fx_g)
new_g = res.graph_module
new_graph = new_g.graph
modified = res.modified
# the number of nodes decrease/ or stay the same
old_num_nodes = len(fx_g.graph.nodes)
new_num_nodes = len(new_graph.nodes)
assert (new_num_nodes < old_num_nodes) == modified, "modified should be True if the number of nodes decrease"
if delta == -1:
self.assertTrue(old_num_nodes >= new_num_nodes, (
f"number of nodes increased {old_num_nodes}, {new_num_nodes}"))
else:
self.assertTrue(old_num_nodes == new_num_nodes + delta, (
f"number of nodes not the same {old_num_nodes - delta}, {new_num_nodes}\n {fx_g.graph} \n {new_graph}"))
# a second pass should not reduce more nodes
res = P(new_g)
pass_2_graph = res.graph_module.graph
pass_2_num_nodes = len(pass_2_graph.nodes)
self.assertTrue(pass_2_num_nodes == new_num_nodes, (
f"second pass graph has less node {pass_2_num_nodes}, {new_num_nodes}\n {new_graph} \n {pass_2_graph}"))
# check correctness
if check_val:
true_result = fx_g(t)
our_result = new_g(t)
if true_result is None: # both return None
self.assertTrue(our_result is None, f"true result is None, CSE result is {our_result}")
else: # results returned are the same
self.assertTrue(torch.all(true_result == our_result), (
f"results are different {true_result}, {our_result}")) # check results are the same
class TestCSEPass(TestCase):
def test_nochange(self):
def f(x):
a = x + 1
b = x + a
a = x
d = x + a
return b + d
t = torch.randn(2, 2)
check(self, f, t, 0)
def test_empty(self):
def f(x):
pass
t = torch.randn(2, 2)
check(self, f, t, 0)
def test_immutable_list_type(self):
def f(x):
a = x.sum(dim=1)
b = x.sum(dim=1)
c = x.sum()
d = x.sum()
return a + b + c + d
t = torch.randn(2, 2)
check(self, f, t, 2)
def test_immutable_list_multiple_entries(self):
def f(x):
a = x.sum(dim=[0, 1])
b = x.sum(dim=[0, 1])
c = x.sum(dim=1)
d = x.sum(dim=1)
return a + b + c + d
t = torch.randn(2, 2)
check(self, f, t, 2)
def test_simple(self):
def f(x):
a = x.cos()
b = x.cos()
c = a + a
d = b + b
return c + d
t = torch.randn(2, 2)
check(self, f, t, 2)
def test_simple_2(self):
def f(x):
a = x.cos().sin()
b = x.cos().sin()
c = a + a
d = b + b
return c + d
t = torch.randn(1)
check(self, f, t, 3)
def METHOD_NAME(self):
def f(x):
a = x.sum(dim=1)
b = x.sum(dim=1, keepdim=False)
c = x.sum(dim=1, keepdim=False)
d = x.sum(dim=1)
return a + b + c + d
t = torch.randn(2, 2)
check(self, f, t, 3)
def test_two_args(self):
def f(x):
a = x.sum(dim=1)
b = x.sum(dim=1, keepdim=True)
c = x.sum(dim=1, keepdim=True)
d = x.sum(dim=1)
return a + b + c + d
t = torch.randn(2, 2)
check(self, f, t, 2)
def test_simple_multiple_same_ops(self):
def f(x):
a = x.sum()
b = x.sum()
c = x.sum()
d = x.sum()
return a + b + c + d
t = torch.randn(2, 2)
check(self, f, t, 3)
def test_nested_immutable_list_type(self):
def f(x):
a = torch.cat((x, x))
b = torch.cat((x, x))
return a + b
t = torch.randn(2, 2)
check(self, f, t, 1)
def test_kwarg(self):
def f(x):
a = torch.ones_like(x)
b = torch.ones_like(x)
return a + b
t = torch.randn(2, 2)
check(self, f, t, 1)
"""
Generate function with random ops and check if the result is the same
"""
def test_random(self):
def f(x):
vals = [x]
ops = [torch.clone, torch.cos, torch.tanh, torch.nn.functional.gelu]
for _ in range(100):
new_val = random.choice(ops)(random.choice(vals))
vals.append(new_val)
return vals[-1]
fx_g = symbolic_trace(f)
fx_g.graph.eliminate_dead_code()
fx_g.recompile()
t = torch.randn(2, 2)
for _ in range(30):
check(self, fx_g, t, -1, graph_input=True)
"""
Test that banned list ban ops as expected.
"""
def test_banned_list(self):
def f(x):
a = x + 1
b = x + 1
return a + b
t = torch.randn(2, 2)
P_ban_add = P = CSEPass(banned_ops=[torch.ops.aten.add])
check(self, f, t, 0, P=P_ban_add) # check that add is banned
check(self, f, t, 1) # check that add is not banned by default
def test_rand_like(self):
def f(x):
a = torch.rand_like(x)
b = torch.rand_like(x)
return a + b
t = torch.randn(2, 2)
check(self, f, t, 0, check_val=False)
def test_rand_n(self):
def f(x):
a = torch.randn(4)
b = torch.randn(4)
return a + b
t = torch.randn(2, 2)
check(self, f, t, 0, check_val=False)
if __name__ == '__main__':
run_tests() |
is openbsd | """
Functions for identifying which platform a machine is
"""
import multiprocessing
import os
import platform
import subprocess
import sys
import distro
from salt.utils.decorators import memoize as real_memoize
def linux_distribution(full_distribution_name=True):
"""
Simple function to return information about the OS distribution (id_name, version, codename).
"""
if full_distribution_name:
return distro.name(), distro.version(best=True), distro.codename()
return distro.id(), distro.version(best=True), distro.codename()
@real_memoize
def is_windows():
"""
Simple function to return if a host is Windows or not
"""
return sys.platform.startswith("win")
@real_memoize
def is_proxy():
"""
Return True if this minion is a proxy minion.
Leverages the fact that is_linux() and is_windows
both return False for proxies.
TODO: Need to extend this for proxies that might run on
other Unices
"""
import __main__ as main
# This is a hack. If a proxy minion is started by other
# means, e.g. a custom script that creates the minion objects
# then this will fail.
ret = False
try:
# Changed this from 'salt-proxy in main...' to 'proxy in main...'
# to support the testsuite's temp script that is called 'cli_salt_proxy'
#
# Add '--proxyid' or '--proxyid=...' in sys.argv so that salt-call
# is seen as a proxy minion
if "proxy" in main.__file__ or any(
arg for arg in sys.argv if arg.startswith("--proxyid")
):
ret = True
except AttributeError:
pass
return ret
@real_memoize
def is_linux():
"""
Simple function to return if a host is Linux or not.
Note for a proxy minion, we need to return something else
"""
return sys.platform.startswith("linux")
@real_memoize
def is_darwin():
"""
Simple function to return if a host is Darwin (macOS) or not
"""
return sys.platform.startswith("darwin")
@real_memoize
def is_sunos():
"""
Simple function to return if host is SunOS or not
"""
return sys.platform.startswith("sunos")
@real_memoize
def is_smartos():
"""
Simple function to return if host is SmartOS (Illumos) or not
"""
if not is_sunos():
return False
else:
return os.uname()[3].startswith("joyent_")
@real_memoize
def is_smartos_globalzone():
"""
Function to return if host is SmartOS (Illumos) global zone or not
"""
if not is_smartos():
return False
else:
try:
zonename_proc = subprocess.Popen(
["zonename"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
zonename_output = (
zonename_proc.communicate()[0].strip().decode(__salt_system_encoding__)
)
zonename_retcode = zonename_proc.poll()
except OSError:
return False
if zonename_retcode:
return False
if zonename_output == "global":
return True
return False
@real_memoize
def is_smartos_zone():
"""
Function to return if host is SmartOS (Illumos) and not the gz
"""
if not is_smartos():
return False
else:
try:
zonename_proc = subprocess.Popen(
["zonename"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
zonename_output = (
zonename_proc.communicate()[0].strip().decode(__salt_system_encoding__)
)
zonename_retcode = zonename_proc.poll()
except OSError:
return False
if zonename_retcode:
return False
if zonename_output == "global":
return False
return True
@real_memoize
def is_junos():
"""
Simple function to return if host is Junos or not
"""
return sys.platform.startswith("freebsd") and os.uname().release.startswith("JNPR")
@real_memoize
def is_freebsd():
"""
Simple function to return if host is FreeBSD or not
"""
return sys.platform.startswith("freebsd")
@real_memoize
def is_netbsd():
"""
Simple function to return if host is NetBSD or not
"""
return sys.platform.startswith("netbsd")
@real_memoize
def METHOD_NAME():
"""
Simple function to return if host is OpenBSD or not
"""
return sys.platform.startswith("openbsd")
@real_memoize
def is_aix():
"""
Simple function to return if host is AIX or not
"""
return sys.platform.startswith("aix")
@real_memoize
def is_fedora():
"""
Simple function to return if host is Fedora or not
"""
(osname, osrelease, oscodename) = (
x.strip('"').strip("'") for x in linux_distribution()
)
return osname == "Fedora"
@real_memoize
def is_photonos():
"""
Simple function to return if host is Photon OS or not
"""
(osname, osrelease, oscodename) = (
x.strip('"').strip("'") for x in linux_distribution()
)
return osname == "VMware Photon OS"
@real_memoize
def is_aarch64():
"""
Simple function to return if host is AArch64 or not
"""
return platform.machine().startswith("aarch64")
def spawning_platform():
"""
Returns True if multiprocessing.get_start_method(allow_none=False) returns "spawn"
This is the default for Windows Python >= 3.4 and macOS on Python >= 3.8.
Salt, however, will force macOS to spawning by default on all python versions
"""
return multiprocessing.get_start_method(allow_none=False) == "spawn" |
map language to code | """base translator class"""
from abc import ABC, abstractmethod
from typing import List, Optional, Union
from deep_translator.constants import GOOGLE_LANGUAGES_TO_CODES
from deep_translator.exceptions import (
InvalidSourceOrTargetLanguage,
LanguageNotSupportedException,
)
class BaseTranslator(ABC):
"""
Abstract class that serve as a base translator for other different translators
"""
def __init__(
self,
base_url: str,
languages: dict = GOOGLE_LANGUAGES_TO_CODES,
source: str = "auto",
target: str = "en",
payload_key: Optional[str] = None,
element_tag: Optional[str] = None,
element_query: Optional[dict] = None,
**url_params,
):
"""
@param source: source language to translate from
@param target: target language to translate to
"""
self._base_url = base_url
self._languages = languages
self._supported_languages = list(self._languages.keys())
if not source:
raise InvalidSourceOrTargetLanguage(source)
if not target:
raise InvalidSourceOrTargetLanguage(target)
self._source, self._target = self.METHOD_NAME(source, target)
self._url_params = url_params
self._element_tag = element_tag
self._element_query = element_query
self.payload_key = payload_key
super().__init__()
@property
def source(self):
return self._source
@source.setter
def source(self, lang):
self._source = lang
@property
def target(self):
return self._target
@target.setter
def target(self, lang):
self._target = lang
def _type(self):
return self.__class__.__name__
def METHOD_NAME(self, *languages):
"""
map language to its corresponding code (abbreviation) if the language was passed by its full name by the user
@param languages: list of languages
@return: mapped value of the language or raise an exception if the language is not supported
"""
for language in languages:
if language in self._languages.values() or language == "auto":
yield language
elif language in self._languages.keys():
yield self._languages[language]
else:
raise LanguageNotSupportedException(
language,
message=f"No support for the provided language.\n"
f"Please select on of the supported languages:\n"
f"{self._languages}",
)
def _same_source_target(self) -> bool:
return self._source == self._target
def get_supported_languages(
self, as_dict: bool = False, **kwargs
) -> Union[list, dict]:
"""
return the supported languages by the Google translator
@param as_dict: if True, the languages will be returned as a dictionary mapping languages to their abbreviations
@return: list or dict
"""
return self._supported_languages if not as_dict else self._languages
def is_language_supported(self, language: str, **kwargs) -> bool:
"""
check if the language is supported by the translator
@param language: a string for 1 language
@return: bool or raise an Exception
"""
if (
language == "auto"
or language in self._languages.keys()
or language in self._languages.values()
):
return True
else:
return False
@abstractmethod
def translate(self, text: str, **kwargs) -> str:
"""
translate a text using a translator under the hood and return the translated text
@param text: text to translate
@param kwargs: additional arguments
@return: str
"""
return NotImplemented("You need to implement the translate method!")
def _translate_file(self, path: str, **kwargs) -> str:
"""
translate directly from file
@param path: path to the target file
@type path: str
@param kwargs: additional args
@return: str
"""
try:
with open(path, "r", encoding="utf-8") as f:
text = f.read().strip()
return self.translate(text)
except Exception as e:
raise e
def _translate_batch(self, batch: List[str], **kwargs) -> List[str]:
"""
translate a list of texts
@param batch: list of texts you want to translate
@return: list of translations
"""
if not batch:
raise Exception("Enter your text list that you want to translate")
arr = []
for i, text in enumerate(batch):
translated = self.translate(text, **kwargs)
arr.append(translated)
return arr |
init | #! /usr/bin/env python3
import argparse
from pathlib import Path
import shutil
import subprocess
import sys
from armory import __version__ as armory_version
script_dir = Path(__file__).parent
root_dir = script_dir.parent
armory_frameworks = ["armory", "pytorch-deepspeech", "yolo"]
# NOTE: Podman is not officially supported, but this enables
# use as a drop-in replacement for building.
container_platform = "docker" if shutil.which("docker") else "podman"
def cli_parser(argv=sys.argv[1:]):
parser = argparse.ArgumentParser("build.py")
arguments = (
(
("-f", "--framework"),
dict(
choices=armory_frameworks + ["all"],
help="Framework to build",
required=True,
),
),
(
("-b", "--base-tag"),
dict(
help="Version tag for twosixarmory/armory-base",
default="latest",
required=False,
),
),
(
("--no-cache"),
dict(
action="store_true",
help="Do not use docker cache",
),
),
(
("--no-pull"),
dict(
action="store_true",
help="Do not pull latest base",
),
),
(
("-n", "--dry-run"),
dict(
action="store_true",
help="Do not build, only print commands",
),
),
(
("-p", "--platform"),
dict(
choices=["docker", "podman"],
help="Print verbose output",
default=container_platform,
required=False,
),
),
)
for args, kwargs in arguments:
args = args if isinstance(args, tuple) else (args,)
parser.add_argument(*args, **kwargs)
parser.set_defaults(func=METHOD_NAME)
if len(argv) == 0 or argv[0] in ("usage", "help"):
parser.print_help()
sys.exit(1)
return parser.parse_args(argv)
def build_worker(framework, version, platform, base_tag, **kwargs):
"""Builds armory container for a given framework."""
# Note: The replace is used to convert the version to a valid docker tag.
version = version.replace("+", ".")
dockerfile = script_dir / f"Dockerfile-{framework}"
build_command = [
f"{platform}",
"build",
"--force-rm",
"--tag",
f"twosixarmory/{framework}:{version}",
"--build-arg",
f"base_image_tag={base_tag}",
"--file",
f"{dockerfile}",
f"{Path().cwd()}",
]
if kwargs.get("no_cache"):
build_command.insert(3, "--no-cache")
if not kwargs.get("no_pull"):
build_command.insert(3, "--pull")
if not dockerfile.exists():
sys.exit(
f"ERROR:\tError building {framework}!\n"
f"\tDockerfile not found: {dockerfile}\n"
)
print(f"EXEC\tPreparing to run:\n" f"\t\t{' '.join(build_command)}")
if not kwargs.get("dry_run"):
return subprocess.run(build_command).returncode
def METHOD_NAME(*args, **kwargs):
"""Kicks off the build process."""
exit_code = 0 # 0 = success, 1 = failure
frameworks = [kwargs.get("framework", False)]
if frameworks == ["all"]:
frameworks = armory_frameworks
print(f"EXEC:\tRetrieved version {armory_version}.")
print("EXEC:\tCleaning up...")
for key in ["framework", "func"]:
del kwargs[key]
for framework in frameworks:
print(f"EXEC:\tBuilding {framework} container.")
if status := build_worker(framework, armory_version, **kwargs):
exit_code = status
sys.exit(exit_code)
if __name__ == "__main__":
# Ensure correct location
if not (root_dir / "armory").is_dir():
sys.exit(
f"ERROR:\tEnsure this script is ran from the root of the armory repo.\n"
"\tEXAMPLE:\n"
f"\t\t$ python3 {root_dir / 'build.py'}"
)
# Ensure docker/podman is installed
if not shutil.which(container_platform):
sys.exit(
"ERROR:\tCannot find compatible container on the system.\n"
"\tAsk your system administrator to install either `docker` or `podman`."
)
# Parse CLI arguments
arguments = cli_parser()
arguments.func(**vars(arguments)) |
adjust resource limits | import atexit
import faulthandler
import os
import signal
import sys
import unittest
from test import support
from test.support.os_helper import TESTFN_UNDECODABLE, FS_NONASCII
try:
import gc
except ImportError:
gc = None
from test.libregrtest.utils import (setup_unraisable_hook,
setup_threading_excepthook)
UNICODE_GUARD_ENV = "PYTHONREGRTEST_UNICODE_GUARD"
def setup_tests(ns):
try:
stderr_fd = sys.__stderr__.fileno()
except (ValueError, AttributeError):
# Catch ValueError to catch io.UnsupportedOperation on TextIOBase
# and ValueError on a closed stream.
#
# Catch AttributeError for stderr being None.
stderr_fd = None
else:
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True, file=stderr_fd)
# Display the Python traceback on SIGALRM or SIGUSR1 signal
signals = []
if hasattr(signal, 'SIGALRM'):
signals.append(signal.SIGALRM)
if hasattr(signal, 'SIGUSR1'):
signals.append(signal.SIGUSR1)
for signum in signals:
faulthandler.register(signum, chain=True, file=stderr_fd)
METHOD_NAME()
replace_stdout()
support.record_original_stdout(sys.stdout)
if ns.testdir:
# Prepend test directory to sys.path, so runtest() will be able
# to locate tests
sys.path.insert(0, os.path.abspath(ns.testdir))
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.values():
if hasattr(module, '__path__'):
for index, path in enumerate(module.__path__):
module.__path__[index] = os.path.abspath(path)
if getattr(module, '__file__', None):
module.__file__ = os.path.abspath(module.__file__)
if ns.huntrleaks:
unittest.BaseTestSuite._cleanup = False
sys._deactivate_opcache()
if ns.memlimit is not None:
support.set_memlimit(ns.memlimit)
if ns.threshold is not None:
gc.set_threshold(ns.threshold)
support.suppress_msvcrt_asserts(ns.verbose and ns.verbose >= 2)
support.use_resources = ns.use_resources
if hasattr(sys, 'addaudithook'):
# Add an auditing hook for all tests to ensure PySys_Audit is tested
def _test_audit_hook(name, args):
pass
sys.addaudithook(_test_audit_hook)
setup_unraisable_hook()
setup_threading_excepthook()
if ns.timeout is not None:
# For a slow buildbot worker, increase SHORT_TIMEOUT and LONG_TIMEOUT
support.SHORT_TIMEOUT = max(support.SHORT_TIMEOUT, ns.timeout / 40)
support.LONG_TIMEOUT = max(support.LONG_TIMEOUT, ns.timeout / 4)
# If --timeout is short: reduce timeouts
support.LOOPBACK_TIMEOUT = min(support.LOOPBACK_TIMEOUT, ns.timeout)
support.INTERNET_TIMEOUT = min(support.INTERNET_TIMEOUT, ns.timeout)
support.SHORT_TIMEOUT = min(support.SHORT_TIMEOUT, ns.timeout)
support.LONG_TIMEOUT = min(support.LONG_TIMEOUT, ns.timeout)
if ns.xmlpath:
from test.support.testresult import RegressionTestResult
RegressionTestResult.USE_XML = True
# Ensure there's a non-ASCII character in env vars at all times to force
# tests consider this case. See BPO-44647 for details.
if TESTFN_UNDECODABLE and os.supports_bytes_environ:
os.environb.setdefault(UNICODE_GUARD_ENV.encode(), TESTFN_UNDECODABLE)
elif FS_NONASCII:
os.environ.setdefault(UNICODE_GUARD_ENV, FS_NONASCII)
def replace_stdout():
"""Set stdout encoder error handler to backslashreplace (as stderr error
handler) to avoid UnicodeEncodeError when printing a traceback"""
stdout = sys.stdout
try:
fd = stdout.fileno()
except ValueError:
# On IDLE, sys.stdout has no file descriptor and is not a TextIOWrapper
# object. Leaving sys.stdout unchanged.
#
# Catch ValueError to catch io.UnsupportedOperation on TextIOBase
# and ValueError on a closed stream.
return
sys.stdout = open(fd, 'w',
encoding=stdout.encoding,
errors="backslashreplace",
closefd=False,
newline='\n')
def restore_stdout():
sys.stdout.close()
sys.stdout = stdout
atexit.register(restore_stdout)
def METHOD_NAME():
"""Adjust the system resource limits (ulimit) if needed."""
try:
import resource
from resource import RLIMIT_NOFILE, RLIM_INFINITY
except ImportError:
return
fd_limit, max_fds = resource.getrlimit(RLIMIT_NOFILE)
# On macOS the default fd limit is sometimes too low (256) for our
# test suite to succeed. Raise it to something more reasonable.
# 1024 is a common Linux default.
desired_fds = 1024
if fd_limit < desired_fds and fd_limit < max_fds:
new_fd_limit = min(desired_fds, max_fds)
try:
resource.setrlimit(RLIMIT_NOFILE, (new_fd_limit, max_fds))
print(f"Raised RLIMIT_NOFILE: {fd_limit} -> {new_fd_limit}")
except (ValueError, OSError) as err:
print(f"Unable to raise RLIMIT_NOFILE from {fd_limit} to "
f"{new_fd_limit}: {err}.") |
client secret | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListAuthorizationServerSecretsResult',
'AwaitableListAuthorizationServerSecretsResult',
'list_authorization_server_secrets',
'list_authorization_server_secrets_output',
]
@pulumi.output_type
class ListAuthorizationServerSecretsResult:
"""
OAuth Server Secrets Contract.
"""
def __init__(__self__, METHOD_NAME=None, resource_owner_password=None, resource_owner_username=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'client_secret' to be a str")
pulumi.set(__self__, "client_secret", METHOD_NAME)
if resource_owner_password and not isinstance(resource_owner_password, str):
raise TypeError("Expected argument 'resource_owner_password' to be a str")
pulumi.set(__self__, "resource_owner_password", resource_owner_password)
if resource_owner_username and not isinstance(resource_owner_username, str):
raise TypeError("Expected argument 'resource_owner_username' to be a str")
pulumi.set(__self__, "resource_owner_username", resource_owner_username)
@property
@pulumi.getter(name="clientSecret")
def METHOD_NAME(self) -> Optional[str]:
"""
oAuth Authorization Server Secrets.
"""
return pulumi.get(self, "client_secret")
@property
@pulumi.getter(name="resourceOwnerPassword")
def resource_owner_password(self) -> Optional[str]:
"""
Can be optionally specified when resource owner password grant type is supported by this authorization server. Default resource owner password.
"""
return pulumi.get(self, "resource_owner_password")
@property
@pulumi.getter(name="resourceOwnerUsername")
def resource_owner_username(self) -> Optional[str]:
"""
Can be optionally specified when resource owner password grant type is supported by this authorization server. Default resource owner username.
"""
return pulumi.get(self, "resource_owner_username")
class AwaitableListAuthorizationServerSecretsResult(ListAuthorizationServerSecretsResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListAuthorizationServerSecretsResult(
METHOD_NAME=self.METHOD_NAME,
resource_owner_password=self.resource_owner_password,
resource_owner_username=self.resource_owner_username)
def list_authorization_server_secrets(authsid: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListAuthorizationServerSecretsResult:
"""
Gets the client secret details of the authorization server.
:param str authsid: Identifier of the authorization server.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['authsid'] = authsid
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20220901preview:listAuthorizationServerSecrets', __args__, opts=opts, typ=ListAuthorizationServerSecretsResult).value
return AwaitableListAuthorizationServerSecretsResult(
METHOD_NAME=pulumi.get(__ret__, 'client_secret'),
resource_owner_password=pulumi.get(__ret__, 'resource_owner_password'),
resource_owner_username=pulumi.get(__ret__, 'resource_owner_username'))
@_utilities.lift_output_func(list_authorization_server_secrets)
def list_authorization_server_secrets_output(authsid: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListAuthorizationServerSecretsResult]:
"""
Gets the client secret details of the authorization server.
:param str authsid: Identifier of the authorization server.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
"""
... |
test others | '''
Test cases for pyclbr.py
Nick Mathewson
'''
from test.support import run_unittest
import sys
from types import FunctionType, MethodType, BuiltinFunctionType
import pyclbr
from unittest import TestCase
StaticMethodType = type(staticmethod(lambda: None))
ClassMethodType = type(classmethod(lambda c: None))
# Here we test the python class browser code.
#
# The main function in this suite, 'testModule', compares the output
# of pyclbr with the introspected members of a module. Because pyclbr
# is imperfect (as designed), testModule is called with a set of
# members to ignore.
class PyclbrTest(TestCase):
def assertListEq(self, l1, l2, ignore):
''' succeed iff {l1} - {ignore} == {l2} - {ignore} '''
missing = (set(l1) ^ set(l2)) - set(ignore)
if missing:
print("l1=%r\nl2=%r\nignore=%r" % (l1, l2, ignore), file=sys.stderr)
self.fail("%r missing" % missing.pop())
def assertHasattr(self, obj, attr, ignore):
''' succeed iff hasattr(obj,attr) or attr in ignore. '''
if attr in ignore: return
if not hasattr(obj, attr): print("???", attr)
self.assertTrue(hasattr(obj, attr),
'expected hasattr(%r, %r)' % (obj, attr))
def assertHaskey(self, obj, key, ignore):
''' succeed iff key in obj or key in ignore. '''
if key in ignore: return
if key not in obj:
print("***",key, file=sys.stderr)
self.assertIn(key, obj)
def assertEqualsOrIgnored(self, a, b, ignore):
''' succeed iff a == b or a in ignore or b in ignore '''
if a not in ignore and b not in ignore:
self.assertEqual(a, b)
def checkModule(self, moduleName, module=None, ignore=()):
''' succeed iff pyclbr.readmodule_ex(modulename) corresponds
to the actual module object, module. Any identifiers in
ignore are ignored. If no module is provided, the appropriate
module is loaded with __import__.'''
ignore = set(ignore) | set(['object'])
if module is None:
# Import it.
# ('<silly>' is to work around an API silliness in __import__)
module = __import__(moduleName, globals(), {}, ['<silly>'])
dict = pyclbr.readmodule_ex(moduleName)
def ismethod(oclass, obj, name):
classdict = oclass.__dict__
if isinstance(obj, MethodType):
# could be a classmethod
if (not isinstance(classdict[name], ClassMethodType) or
obj.__self__ is not oclass):
return False
elif not isinstance(obj, FunctionType):
return False
objname = obj.__name__
if objname.startswith("__") and not objname.endswith("__"):
objname = "_%s%s" % (oclass.__name__, objname)
return objname == name
# Make sure the toplevel functions and classes are the same.
for name, value in dict.items():
if name in ignore:
continue
self.assertHasattr(module, name, ignore)
py_item = getattr(module, name)
if isinstance(value, pyclbr.Function):
self.assertIsInstance(py_item, (FunctionType, BuiltinFunctionType))
if py_item.__module__ != moduleName:
continue # skip functions that came from somewhere else
self.assertEqual(py_item.__module__, value.module)
else:
self.assertIsInstance(py_item, type)
if py_item.__module__ != moduleName:
continue # skip classes that came from somewhere else
real_bases = [base.__name__ for base in py_item.__bases__]
pyclbr_bases = [ getattr(base, 'name', base)
for base in value.super ]
try:
self.assertListEq(real_bases, pyclbr_bases, ignore)
except:
print("class=%s" % py_item, file=sys.stderr)
raise
actualMethods = []
for m in py_item.__dict__.keys():
if ismethod(py_item, getattr(py_item, m), m):
actualMethods.append(m)
foundMethods = []
for m in value.methods.keys():
if m[:2] == '__' and m[-2:] != '__':
foundMethods.append('_'+name+m)
else:
foundMethods.append(m)
try:
self.assertListEq(foundMethods, actualMethods, ignore)
self.assertEqual(py_item.__module__, value.module)
self.assertEqualsOrIgnored(py_item.__name__, value.name,
ignore)
# can't check file or lineno
except:
print("class=%s" % py_item, file=sys.stderr)
raise
# Now check for missing stuff.
def defined_in(item, module):
if isinstance(item, type):
return item.__module__ == module.__name__
if isinstance(item, FunctionType):
return item.__globals__ is module.__dict__
return False
for name in dir(module):
item = getattr(module, name)
if isinstance(item, (type, FunctionType)):
if defined_in(item, module):
self.assertHaskey(dict, name, ignore)
def test_easy(self):
self.checkModule('pyclbr')
self.checkModule('ast')
self.checkModule('doctest', ignore=("TestResults", "_SpoofOut",
"DocTestCase", '_DocTestSuite'))
self.checkModule('difflib', ignore=("Match",))
def test_decorators(self):
# XXX: See comment in pyclbr_input.py for a test that would fail
# if it were not commented out.
#
self.checkModule('test.pyclbr_input', ignore=['om'])
def METHOD_NAME(self):
cm = self.checkModule
# These were once about the 10 longest modules
cm('random', ignore=('Random',)) # from _random import Random as CoreGenerator
cm('cgi', ignore=('log',)) # set with = in module
cm('pickle')
cm('aifc', ignore=('openfp', '_aifc_params')) # set with = in module
cm('sre_parse', ignore=('dump', 'groups')) # from sre_constants import *; property
cm('pdb')
cm('pydoc')
# Tests for modules inside packages
cm('email.parser')
cm('test.test_pyclbr')
def test_issue_14798(self):
# test ImportError is raised when the first part of a dotted name is
# not a package
self.assertRaises(ImportError, pyclbr.readmodule_ex, 'asyncore.foo')
def test_main():
run_unittest(PyclbrTest)
if __name__ == "__main__":
test_main() |
xpath | # Copyright (C) 2023 Sartography
#
# This file is part of SpiffWorkflow.
#
# SpiffWorkflow is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# SpiffWorkflow is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from SpiffWorkflow.bpmn.parser.ValidationException import ValidationException
from SpiffWorkflow.bpmn.specs.data_spec import TaskDataReference, BpmnIoSpecification
from .util import first
DEFAULT_NSMAP = {
'bpmn': 'http://www.omg.org/spec/BPMN/20100524/MODEL',
'bpmndi': 'http://www.omg.org/spec/BPMN/20100524/DI',
'dc': 'http://www.omg.org/spec/DD/20100524/DC',
}
class NodeParser:
def __init__(self, node, nsmap=None, filename=None, lane=None):
self.node = node
self.nsmap = nsmap or DEFAULT_NSMAP
self.filename = filename
self.lane = self._get_lane() or lane
@property
def bpmn_id(self):
return self.node.get('id')
@property
def bpmn_attributes(self):
return {
'description': self.get_description(),
'lane': self.lane,
'bpmn_name': self.node.get('name'),
'documentation': self.parse_documentation(),
'data_input_associations': self.parse_incoming_data_references(),
'data_output_associations': self.parse_outgoing_data_references(),
}
def get_description(self):
return self.process_parser.parser.spec_descriptions.get(self.node.tag)
def METHOD_NAME(self, METHOD_NAME, extra_ns=None):
return self._xpath(self.node, METHOD_NAME, extra_ns)
def doc_xpath(self, METHOD_NAME, extra_ns=None):
root = self.node.getroottree().getroot()
return self._xpath(root, METHOD_NAME, extra_ns)
def attribute(self, attribute, namespace=None, node=None):
if node is None:
node = self.node
prefix = '{' + self.nsmap.get(namespace or 'bpmn') + '}'
return node.attrib.get(f'{prefix}{attribute}')
def parse_condition(self, sequence_flow):
expression = first(self._xpath(sequence_flow, './/bpmn:conditionExpression'))
return expression.text if expression is not None else None
def parse_documentation(self, sequence_flow=None):
node = sequence_flow if sequence_flow is not None else self.node
documentation_node = first(self._xpath(node, './/bpmn:documentation'))
return None if documentation_node is None else documentation_node.text
def parse_incoming_data_references(self):
specs = []
for name in self.METHOD_NAME('./bpmn:dataInputAssociation/bpmn:sourceRef'):
ref = first(self.doc_xpath(f".//bpmn:dataObjectReference[@id='{name.text}']"))
if ref is not None and ref.get('dataObjectRef') in self.process_parser.spec.data_objects:
specs.append(self.process_parser.spec.data_objects[ref.get('dataObjectRef')])
else:
ref = first(self.doc_xpath(f".//bpmn:dataStoreReference[@id='{name.text}']"))
if ref is not None and ref.get('dataStoreRef') in self.process_parser.data_stores:
specs.append(self.process_parser.data_stores[ref.get('dataStoreRef')])
else:
raise ValidationException(f'Cannot resolve dataInputAssociation {name}', self.node, self.filename)
return specs
def parse_outgoing_data_references(self):
specs = []
for name in self.METHOD_NAME('./bpmn:dataOutputAssociation/bpmn:targetRef'):
ref = first(self.doc_xpath(f".//bpmn:dataObjectReference[@id='{name.text}']"))
if ref is not None and ref.get('dataObjectRef') in self.process_parser.spec.data_objects:
specs.append(self.process_parser.spec.data_objects[ref.get('dataObjectRef')])
else:
ref = first(self.doc_xpath(f".//bpmn:dataStoreReference[@id='{name.text}']"))
if ref is not None and ref.get('dataStoreRef') in self.process_parser.data_stores:
specs.append(self.process_parser.data_stores[ref.get('dataStoreRef')])
else:
raise ValidationException(f'Cannot resolve dataOutputAssociation {name}', self.node, self.filename)
return specs
def parse_io_spec(self):
data_refs = {}
for elem in self.METHOD_NAME('./bpmn:ioSpecification/bpmn:dataInput'):
ref = self.create_data_spec(elem, TaskDataReference)
data_refs[ref.bpmn_id] = ref
for elem in self.METHOD_NAME('./bpmn:ioSpecification/bpmn:dataOutput'):
ref = self.create_data_spec(elem, TaskDataReference)
data_refs[ref.bpmn_id] = ref
inputs, outputs = [], []
for ref in self.METHOD_NAME('./bpmn:ioSpecification/bpmn:inputSet/bpmn:dataInputRefs'):
if ref.text in data_refs:
inputs.append(data_refs[ref.text])
for ref in self.METHOD_NAME('./bpmn:ioSpecification/bpmn:outputSet/bpmn:dataOutputRefs'):
if ref.text in data_refs:
outputs.append(data_refs[ref.text])
return BpmnIoSpecification(inputs, outputs)
def create_data_spec(self, item, cls):
return cls(item.attrib.get('id'), item.attrib.get('name'))
def parse_extensions(self, node=None):
return {}
def get_position(self, node=None):
node = node if node is not None else self.node
nodeid = node.get('id')
if nodeid is not None:
bounds = first(self.doc_xpath(f".//bpmndi:BPMNShape[@bpmnElement='{nodeid}']//dc:Bounds"))
if bounds is not None:
return {'x': float(bounds.get('x', 0)), 'y': float(bounds.get('y', 0))}
return {'x': 0.0, 'y': 0.0}
def _get_lane(self):
noderef = first(self.doc_xpath(f".//bpmn:flowNodeRef[text()='{self.bpmn_id}']"))
if noderef is not None:
return noderef.getparent().get('name')
def _xpath(self, node, METHOD_NAME, extra_ns=None):
if extra_ns is not None:
nsmap = self.nsmap.copy()
nsmap.update(extra_ns)
else:
nsmap = self.nsmap
return node.METHOD_NAME(METHOD_NAME, namespaces=nsmap)
def raise_validation_exception(self, message):
raise ValidationException(message, self.node, self.filename) |
save config | import re
import os
import sys
from collections import OrderedDict
from spytest import st
class PoeHooks(object):
def get_vars(self, dut, phase=None):
retval = dict()
retval["mgmt_ifname"] = st.get_mgmt_ifname(dut)
retval["mgmt_ipv4"] = st.get_mgmt_ip(dut)
retval["version"] = self.show_version(dut)
retval["hwsku"] = "Poe"
return retval
def is_kdump_supported(self, dut):
return False
def pre_load_image(self, dut):
return False
def post_cli_recovery(self, scope, dut, cmd, attempt=0):
# scope is session/module/function
# return True to bail-out, False to ignore, None to retry
return True
def post_reboot(self, dut, is_upgrade=False):
return False
def post_config_reload(self, dut):
return False
def post_login(self, dut, **kwargs):
pass
def post_session(self, dut):
return False
def init_config(self, dut, type, hwsku=None, profile="na"):
return False
def extend_config(self, dut, type, ifname_type="none"):
return False
def verify_config(self, dut, type):
return True
def METHOD_NAME(self, dut, type):
return True
def apply_config(self, dut, phase):
return True
def clear_config(self, dut, **kwargs):
return True
def shutdown(self, dut, portlist):
return True
def noshutdown(self, dut, portlist):
return True
def get_status(self, dut, port_csv):
retval = []
for port in port_csv.split(","):
retval.append({"interface": port, "oper": "up", "admin": "up"})
return retval
def get_interface_status(self, dut, port_csv):
retval = self.get_status(dut, port_csv)
return retval[0]["oper"] if retval else None
def show_version(self, dut, **kwargs):
return "Poe Version"
def get_system_status(self, dut, service=None, **kwargs):
return True
def verify_topology(self, hooks, check_type, threads=True, skip_tgen=False):
return True
def set_port_defaults(self, dut, breakout, speed):
return True
def clear_logging(self, dut, **kwargs):
pass
def fetch_syslogs(self, dut, severity=None, since=None):
pass
def ifa_enable(self, dut):
pass
def ztp_disable(self, dut, **kwargs):
pass
def kdump_enable(self, dut):
return True
def upgrade_image(self, dut, url, max_time=1800, skip_error_check=False, migartion=True):
return "success"
def set_mgmt_ip_gw(self, dut, ipmask, gw, **kwargs):
pass
def get_mgmt_ip(self, dut, interface, **kwargs):
return "0.0.0.0"
def renew_mgmt_ip(self, dut, interface, **kwargs):
pass
def upgrade_libsai(self, dut, url):
pass
def get_ifname_type(self, dut):
return st.get_ifname_type(dut)
def set_ifname_type(self, dut, ifname_type):
pass
def get_physical_ifname_map(self, dut):
cmd = "show all"
output = st.show(dut, cmd, skip_tmpl=True)
output = st.parse_show(dut, cmd, output, "poe_show_all.tmpl")
retval = OrderedDict()
for ent in output:
iface = ent["interface"]
retval[iface] = iface
return retval
def debug_system_status(self, dut, log_file=None):
pass
def dut_reboot(self, dut, **kwargs):
return True
def get_onie_grub_config(self, dut, mode):
return "", []
def init_features(self, fgroup, fsupp=None, funsupp=None):
from apis.common.sonic_features import Feature
return Feature(fgroup, fsupp, funsupp)
def init_support(self, hooks, cfg, dut=None):
from apis.common.support import Support
return Support(hooks, cfg, dut)
def init_prompts(self, model=None, logger=None, normal_user_mode=None):
from apis.common.poe_prompts import Prompts
return Prompts("poe", logger, normal_user_mode)
def exec_ssh_remote_dut(self, dut, ipaddress, username, password, command=None, timeout=30, **kwargs):
pass
def verify_prompt(self, dut, prompt):
prompt = prompt.replace("\\", "")
if re.compile(r".*[#|\$|>]\s*$").match(prompt):
return True, False
return False, False
def get_base_prompt(self, dut, **kwargs):
return "Poe"
def get_hostname(self, dut, **kwargs):
return "Poe"
def set_hostname(self, dut, name):
pass
def verify_device_info(self, dut, phase):
return True
def dump_config_db(self, dut):
pass
def show_sai_profile(self, dut):
pass
def is_reboot_confirm(self, dut):
return False
def show_dut_time(self, dut):
pass
def gnmi_cert_config_ensure(self, dut):
pass
def get_mode(self, dut, which):
if which == "normal-user":
return "normal-user"
return "unknown-mode"
def get_regex(self, dut, which, *args):
if which == "sudopass":
return None
if which == "login":
return r"User:\s*$"
if which == "login_anywhere":
return r"User:\s*"
if which == "anyprompt":
if st.get_device_type(dut) in ["icos"]:
return r"[#|>|\$]\s*$"
return r"[#|>]\s*$"
return "unknown"
def get_default_pass(self, dut):
return ""
def get_templates_info(self, dut, model):
return "templates", "sonic"
def get_custom_ui(self, dut):
return "click"
def get_cli_type_record(self, dut, cli_type):
file_name = sys._getframe(5).f_code.co_filename
file_name = os.path.basename(file_name)
func_name = sys._getframe(5).f_code.co_name
return "{}::{},{}".format(file_name, func_name, cli_type)
def verify_ui_support(self, dut, cli_type, cmd):
return cli_type
def audit(self, atype, dut, *args, **kwargs):
return None
def read_syslog(self, dut, lvl, phase, name):
return ""
def read_core(self, dut, name):
return ""
def read_tech_support(self, dut, name):
return ""
def read_sysinfo(self, dut, scope, name):
return {}
def check_kdump_files(self, dut):
return False
def clear_kdump_files(self, dut):
return False
def check_core_files(self, dut):
return False
def clear_core_files(self, dut):
return False
def save_config_db(self, dut, scope, name):
return False
def save_running_config(self, dut, scope, name):
return False
def verify_config_replace(self, dut, scope, res, desc):
return res, desc
def verify_command(self, dut, cmd, cli_type):
return cmd |
breadth | # This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
import sys
import threading
import warnings
from hypothesis import HealthCheck, given, settings, strategies as st
from tests.common.debug import find_any, minimal
from tests.common.utils import flaky
def test_can_generate_with_large_branching():
def flatten(x):
if isinstance(x, list):
return sum(map(flatten, x), [])
else:
return [x]
size = 20
xs = minimal(
st.recursive(
st.integers(),
lambda x: st.lists(x, min_size=size // 2),
max_leaves=size * 2,
),
lambda x: isinstance(x, list) and len(flatten(x)) >= size,
timeout_after=None,
)
assert flatten(xs) == [0] * size
def test_can_generate_some_depth_with_large_branching():
def depth(x):
if x and isinstance(x, list):
return 1 + max(map(depth, x))
else:
return 1
xs = minimal(
st.recursive(st.integers(), st.lists),
lambda x: depth(x) > 1,
timeout_after=None,
)
assert xs in ([0], [[]])
def test_can_find_quite_broad_lists():
def METHOD_NAME(x):
if isinstance(x, list):
return sum(map(METHOD_NAME, x))
else:
return 1
target = 10
broad = minimal(
st.recursive(st.booleans(), lambda x: st.lists(x, max_size=target // 2)),
lambda x: METHOD_NAME(x) >= target,
settings=settings(max_examples=10000),
timeout_after=None,
)
assert METHOD_NAME(broad) == target
def test_drawing_many_near_boundary():
target = 4
ls = minimal(
st.lists(
st.recursive(
st.booleans(),
lambda x: st.lists(
x, min_size=2 * (target - 1), max_size=2 * target
).map(tuple),
max_leaves=2 * target - 1,
)
),
lambda x: len(set(x)) >= target,
timeout_after=None,
)
assert len(ls) == target
def test_can_use_recursive_data_in_sets():
nested_sets = st.recursive(st.booleans(), st.frozensets, max_leaves=3)
find_any(nested_sets, settings=settings(deadline=None))
def flatten(x):
if isinstance(x, bool):
return frozenset((x,))
else:
result = frozenset()
for t in x:
result |= flatten(t)
if len(result) == 2:
break
return result
x = minimal(nested_sets, lambda x: len(flatten(x)) == 2, settings(deadline=None))
assert x in (
frozenset((False, True)),
frozenset((False, frozenset((True,)))),
frozenset((frozenset((False, True)),)),
)
@flaky(max_runs=2, min_passes=1)
def test_can_form_sets_of_recursive_data():
size = 3
trees = st.sets(
st.recursive(
st.booleans(),
lambda x: st.lists(x, min_size=size).map(tuple),
max_leaves=20,
)
)
xs = minimal(trees, lambda x: len(x) >= size, timeout_after=None)
assert len(xs) == size
def test_drawing_from_recursive_strategy_is_thread_safe():
shared_strategy = st.recursive(
st.integers(), lambda s: st.lists(s, max_size=2), max_leaves=20
)
errors = []
@settings(
database=None, deadline=None, suppress_health_check=[HealthCheck.too_slow]
)
@given(data=st.data())
def test(data):
try:
data.draw(shared_strategy)
except Exception as exc:
errors.append(exc)
threads = []
original_recursionlimit = sys.getrecursionlimit()
# We may get a warning here about not resetting recursionlimit,
# since it was changed during execution; ignore it.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for _ in range(4):
threads.append(threading.Thread(target=test))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Cleanup: reset the recursion limit that was (probably) not reset
# automatically in the threaded test.
sys.setrecursionlimit(original_recursionlimit)
assert not errors
SELF_REF = st.recursive(
st.deferred(lambda: st.booleans() | SELF_REF),
lambda s: st.lists(s, min_size=1),
)
@given(SELF_REF)
def test_self_ref_regression(_):
# See https://github.com/HypothesisWorks/hypothesis/issues/2794
pass |
get property |
import copy
import os
import six
import yaml
from collections import Mapping
from geodata.address_expansions.address_dictionaries import address_phrase_dictionaries
from geodata.configs.utils import nested_get, DoesNotExist, recursive_merge, alternative_probabilities
from geodata.math.sampling import cdf, check_probability_distribution
this_dir = os.path.realpath(os.path.dirname(__file__))
ADDRESS_CONFIG_DIR = os.path.join(this_dir, os.pardir, os.pardir, os.pardir,
'resources', 'addresses')
DICTIONARIES_DIR = os.path.join(this_dir, os.pardir, os.pardir, os.pardir,
'resources', 'dictionaries')
class AddressConfig(object):
def __init__(self, config_dir=ADDRESS_CONFIG_DIR, dictionaries_dir=DICTIONARIES_DIR):
self.address_configs = {}
self.cache = {}
for filename in os.listdir(config_dir):
if not filename.endswith('.yaml'):
continue
config = yaml.load(open(os.path.join(ADDRESS_CONFIG_DIR, filename)))
countries = config.pop('countries', {})
for k in countries.keys():
country_config = countries[k]
config_copy = copy.deepcopy(config)
countries[k] = recursive_merge(config_copy, country_config)
config['countries'] = countries
lang = filename.rsplit('.yaml')[0]
self.address_configs[lang] = config
self.sample_phrases = {}
for language in address_phrase_dictionaries.languages:
for dictionary in address_phrase_dictionaries.language_dictionaries[language]:
self.sample_phrases[(language, dictionary)] = {}
for phrases in address_phrase_dictionaries.phrases[(language, dictionary)]:
self.sample_phrases[(language, dictionary)][phrases[0]] = phrases[1:]
def METHOD_NAME(self, key, language, country=None, default=None):
keys = key.split('.')
config = self.address_configs.get(language, {})
if country:
country_config = config.get('countries', {}).get(country, {})
if country_config:
config = country_config
value = nested_get(config, keys)
if value is not DoesNotExist:
return value
return default
def cache_key(self, prop, language, dictionaries=(), country=None):
return (prop, language, country, tuple(dictionaries))
def alternative_probabilities(self, prop, language, dictionaries=(), country=None):
'''Get a probability distribution over alternatives'''
key = self.cache_key(prop, language, dictionaries, country=country)
if key not in self.cache:
properties = self.METHOD_NAME(prop, language, country=country, default=None)
if properties is None:
return None, None
alternatives, probs = alternative_probabilities(properties)
if alternatives is None:
return None, None
forms = []
form_probs = []
for props, prob in zip(alternatives, probs):
phrases, phrase_probs = self.form_probabilities(props, language, dictionaries=dictionaries)
forms.extend([(p, props) for p in phrases])
form_probs.extend([prob * p for p in phrase_probs])
sample_probability = properties.get('sample_probability')
if sample_probability is not None:
sample_phrases = []
for dictionary in dictionaries:
phrases = self.sample_phrases.get((language, dictionary), [])
for canonical, surface_forms in six.iteritems(phrases):
sample_phrases.append(canonical)
sample_phrases.extend(surface_forms)
# Note: use the outer properties dictionary e.g. units.alphanumeric
forms.extend([(p, properties) for p in sample_phrases])
form_probs.extend([float(sample_probability) / len(sample_phrases)] * len(sample_phrases))
try:
check_probability_distribution(form_probs)
except AssertionError:
print 'values were: {}'.format(forms)
raise
form_probs_cdf = cdf(form_probs)
self.cache[key] = (forms, form_probs_cdf)
return self.cache[key]
def form_probabilities(self, properties, language, dictionaries=()):
probs = []
alternatives = []
canonical_prob = properties.get('canonical_probability', 1.0)
canonical = properties['canonical']
alternatives.append(canonical)
probs.append(canonical_prob)
if 'abbreviated_probability' in properties:
probs.append(properties['abbreviated_probability'])
abbreviated = properties['abbreviated']
assert isinstance(abbreviated, basestring)
alternatives.append(abbreviated)
if properties.get('sample', False) and 'sample_probability' in properties:
sample_prob = properties['sample_probability']
samples = set()
for dictionary in dictionaries:
phrases = self.sample_phrases.get((language, dictionary), {})
samples |= set(phrases.get(canonical, []))
if 'sample_exclude' in properties:
samples -= set(properties['sample_exclude'])
if samples:
for phrase in samples:
probs.append(sample_prob / float(len(samples)))
alternatives.append(phrase)
else:
total = sum(probs)
probs = [p / total for p in probs]
try:
check_probability_distribution(probs)
except AssertionError:
print 'values were: {}'.format(alternatives)
raise
return alternatives, probs
address_config = AddressConfig() |
get attr | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Primitive operators in the TVM IR."""
import tvm._ffi
from . import _ffi_api
from .expr import RelayExpr
@tvm._ffi.register_object("Op")
class Op(RelayExpr):
"""Primitive operator in the IR."""
def __init__(self):
raise RuntimeError("Cannot create op, use get instead")
def astext(self, show_meta_data=True, annotate=None):
"""Get the text format of the expression.
Parameters
----------
show_meta_data : bool
Whether to include meta data section in the text
if there is meta data.
annotate: Optional[Object->str]
Optionally annotate function to provide additional
information in the comment block.
Returns
-------
text : str
The text format of the expression.
Notes
-----
The meta data section is necessary to fully parse the text format.
However, it can contain dumps that are big (e.g constant weights),
so it can be helpful to skip printing the meta data section.
"""
from tvm.relay import astext # pylint: disable=import-outside-toplevel
return astext(self, show_meta_data, annotate)
@staticmethod
def get(op_name):
"""Get the Op for a given name
Parameters
----------
op_name : str
The operator name
Returns
-------
op : Op
The op of the corresponding name
"""
return _ffi_api.GetOp(op_name)
def METHOD_NAME(self, attr_name):
"""Get additional attribute about the operator.
Parameters
----------
attr_name : str
The attribute name.
Returns
-------
value : object
The attribute value
"""
return _ffi_api.OpGetAttr(self, attr_name)
def has_attr(self, attr_name):
"""Check whether the operator has additional attribute.
Parameters
----------
attr_name : str
The attribute name.
Returns
-------
value : bool
Whether the operator has additional attribute
"""
return _ffi_api.OpHasAttr(self, attr_name)
def set_attr(self, attr_name, value, plevel=10):
"""Set attribute about the operator.
Parameters
----------
attr_name : str
The attribute name
value : object
The attribute value
plevel : int
The priority level
"""
_ffi_api.OpSetAttr(self, attr_name, value, plevel)
def reset_attr(self, attr_name):
"""Reset attribute about the operator.
Parameters
----------
attr_name : str
The attribute name
"""
_ffi_api.OpResetAttr(self, attr_name)
def add_type_rel(self, rel_name, type_rel_func=None):
"""Attach the type function corresponding to the return type.
Parameters
----------
rel_name : str
The type relation name to register.
type_rel_func : Optional[function (args: List[Type], attrs: Attrs) -> Type]
The backing relation function which can solve an arbitrary relation on variables.
Differences with type_rel_func in C++:
1) When type_rel_func is not None
a) OpAddTypeRel on C++ side will adjust type_rel_func with TypeReporter to
calling convention of relay type system.
b) type_rel_func returns output argument's type, return None means can't
infer output's type.
c) only support single output operators for now, the last argument is output tensor.
2) when type_rel_func is None, will call predefined type_rel_funcs in relay
according to ``tvm.relay.type_relation.`` + rel_name.
"""
_ffi_api.OpAddTypeRel(self, rel_name, type_rel_func)
def add_argument(self, name, type, description): # pylint: disable=redefined-builtin
"""Add arguments information to the function.
Parameters
----------
name : str
The argument name.
type : str
The argument type.
description : str
The argument description.
"""
_ffi_api.OpAddArgument(self, name, type, description)
def set_support_level(self, level):
"""Set the support level of op.
Parameters
----------
level : int
The support level.
"""
_ffi_api.OpSetSupportLevel(self, level)
def set_num_inputs(self, n):
"""Set the support level of op.
Parameters
----------
n : int
The input number.
"""
_ffi_api.OpSetNumInputs(self, n)
def set_attrs_type_key(self, key):
"""Set the attribute type key of op.
Parameters
----------
key : str
The type key.
"""
_ffi_api.OpSetAttrsTypeKey(self, key)
@staticmethod
def list_op_names():
"""List all the op names in the op registry.
Returns
-------
value : List[str]
The registered op names
"""
return _ffi_api.ListOpNames()
def register_op_attr(op_name, attr_key, value=None, level=10):
"""Register an operator property of an operator by name.
Parameters
----------
op_name : str
The name of operator
attr_key : str
The attribute name.
value : object, optional
The value to set
level : int, optional
The priority level
Returns
-------
fregister : function
Register function if value is not specified.
"""
def _register(v):
"""internal register function"""
_ffi_api.RegisterOpAttr(op_name, attr_key, v, level)
return v
return _register(value) if value is not None else _register
def register_intrin_lowering(
op_name,
target,
*,
f=None,
level=10,
):
"""Register Op lowering function
Parameters
----------
op_name : str
The op name
target : str
The target string for given intrinsic lowering function
f : function, optional
The function to be registered.
level : int
The priority level
Returns
-------
fregister : function
Register op lowering function if f is not specified.
"""
def _register(f):
"""internal register function"""
_ffi_api.RegisterOpLowerIntrinsic(op_name, f, target, level)
return f
return _register(f) if f is not None else _register |
output | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network vnet peering wait",
)
class Wait(AAZWaitCommand):
"""Place the CLI in a waiting state until a condition is met.
"""
_aaz_info = {
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/virtualnetworks/{}/virtualnetworkpeerings/{}", "2018-11-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self.METHOD_NAME()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.vnet_name = AAZStrArg(
options=["--vnet-name"],
help="The virtual network (VNet) name.",
required=True,
id_part="name",
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The name of the VNet peering.",
required=True,
id_part="child_name_1",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.VirtualNetworkPeeringsGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def METHOD_NAME(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False)
return result
class VirtualNetworkPeeringsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/virtualNetworkPeerings/{virtualNetworkPeeringName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"virtualNetworkName", self.ctx.args.vnet_name,
required=True,
),
**self.serialize_url_param(
"virtualNetworkPeeringName", self.ctx.args.name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2018-11-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.etag = AAZStrType()
_schema_on_200.id = AAZStrType()
_schema_on_200.name = AAZStrType()
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
properties = cls._schema_on_200.properties
properties.allow_forwarded_traffic = AAZBoolType(
serialized_name="allowForwardedTraffic",
)
properties.allow_gateway_transit = AAZBoolType(
serialized_name="allowGatewayTransit",
)
properties.allow_virtual_network_access = AAZBoolType(
serialized_name="allowVirtualNetworkAccess",
)
properties.peering_state = AAZStrType(
serialized_name="peeringState",
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
)
properties.remote_address_space = AAZObjectType(
serialized_name="remoteAddressSpace",
)
properties.remote_virtual_network = AAZObjectType(
serialized_name="remoteVirtualNetwork",
)
properties.use_remote_gateways = AAZBoolType(
serialized_name="useRemoteGateways",
)
remote_address_space = cls._schema_on_200.properties.remote_address_space
remote_address_space.address_prefixes = AAZListType(
serialized_name="addressPrefixes",
)
address_prefixes = cls._schema_on_200.properties.remote_address_space.address_prefixes
address_prefixes.Element = AAZStrType()
remote_virtual_network = cls._schema_on_200.properties.remote_virtual_network
remote_virtual_network.id = AAZStrType()
return cls._schema_on_200
class _WaitHelper:
"""Helper class for Wait"""
__all__ = ["Wait"] |
compute am scores and lm scores | import math
from typing import List, Tuple
import torch
try:
import k2
except ImportError or ModuleNotFoundError:
k2 = None
def remove_repeated_and_leq(tokens: List[int], blank_id: int = 0):
"""Generate valid token sequence.
Result may be used as input of transformer decoder and neural language model.
Fristly, remove repeated token from a "token alignment" seqs;
Then remove blank symbols.
This fuction may be replaced by tokenizing word_seqs with tokenizer
or composeing word_seqs_fsas with L_inv.fst
or composing token_seqs with ctc_topo.
Current method is slelected other than previous three methods
because it won't need an extra object, i.e. tokenizer, L.fst or ctc_topo.
"""
new_tokens = []
previous = None
for token in tokens:
if token != previous:
new_tokens.append(token)
previous = token
new_tokens = [token for token in new_tokens if token > blank_id]
return new_tokens
def _intersect_device(
a_fsas: k2.Fsa,
b_fsas: k2.Fsa,
b_to_a_map: torch.Tensor,
sorted_match_a: bool,
batch_size: int = 500,
):
"""Wrap k2.intersect_device
This is a wrapper of k2.intersect_device and its purpose is to split
b_fsas into several batches and process each batch separately to avoid
CUDA OOM error.
The arguments and return value of this function are the same as
k2.intersect_device.
NOTE: You can decrease batch_size in case of CUDA out of memory error.
"""
assert k2 is not None, "please follow 'tools/installers' to install"
num_fsas = b_fsas.shape[0]
if num_fsas <= batch_size:
return k2.intersect_device(
a_fsas, b_fsas, b_to_a_map=b_to_a_map, sorted_match_a=sorted_match_a
)
num_batches = int(math.ceil(float(num_fsas) / batch_size))
splits = []
for i in range(num_batches):
start = i * batch_size
end = min(start + batch_size, num_fsas)
splits.append((start, end))
ans = []
for start, end in splits:
indexes = torch.arange(start, end).to(b_to_a_map)
fsas = k2.index_fsa(b_fsas, indexes)
b_to_a = k2.index_select(b_to_a_map, indexes)
path_lats = k2.intersect_device(
a_fsas, fsas, b_to_a_map=b_to_a, sorted_match_a=sorted_match_a
)
ans.append(path_lats)
return k2.cat(ans)
def METHOD_NAME(
lats: k2.Fsa,
word_fsas_with_epsilon_loops: k2.Fsa,
path_to_seq_map: torch.Tensor,
device: str = "cuda",
batch_size: int = 500,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute AM and LM scores of n-best lists (represented as word_fsas).
Args:
lats:
An FsaVec, which is the output of `k2.intersect_dense_pruned`.
It must have the attribute `lm_scores`.
word_fsas_with_epsilon_loops:
An FsaVec representing a n-best list. Note that it has been processed
by `k2.add_epsilon_self_loops`.
path_to_seq_map:
A 1-D torch.Tensor with dtype torch.int32. path_to_seq_map[i] indicates
which sequence the i-th Fsa in word_fsas_with_epsilon_loops belongs to.
path_to_seq_map.numel() == word_fsas_with_epsilon_loops.arcs.dim0().
batch_size:
Batchify the n-best list when intersecting with inverted_lats.
You could tune this to avoid GPU OOM issue or increase the GPU usage.
Returns:
Return a tuple of (1-D torch.Tensor, 1-D torch.Tensor) containing
the AM and LM scores of each path.
`am_scores.numel() == word_fsas_with_epsilon_loops.shape[0]`
`lm_scores.numel() == word_fsas_with_epsilon_loops.shape[0]`
"""
assert (
k2 is not None
), "k2 is not installed, please follow 'tools/installers' to install"
assert len(lats.shape) == 3
# k2.compose() currently does not support b_to_a_map. To void
# replicating `lats`, we use k2.intersect_device here.
#
# lats has phone IDs as `labels` and word IDs as aux_labels, so we
# need to invert it here.
inverted_lats = k2.invert(lats)
# Now the `labels` of inverted_lats are word IDs (a 1-D torch.Tensor)
# and its `aux_labels` are phone IDs ( a k2.RaggedInt with 2 axes)
# Remove its `aux_labels` since it is not needed in the
# following computation
del inverted_lats.aux_labels
inverted_lats = k2.arc_sort(inverted_lats)
am_path_lats = _intersect_device(
inverted_lats,
word_fsas_with_epsilon_loops,
b_to_a_map=path_to_seq_map,
sorted_match_a=True,
batch_size=batch_size,
)
am_path_lats = k2.top_sort(k2.connect(am_path_lats))
# The `scores` of every arc consists of `am_scores` and `lm_scores`
tot_score_device = "cpu"
if hasattr(lats, "lm_scores"):
am_path_lats.scores = am_path_lats.scores - am_path_lats.lm_scores
am_scores = (
am_path_lats.to(tot_score_device)
.get_tot_scores(use_double_scores=True, log_semiring=False)
.to(device)
)
# Start to compute lm_scores
am_path_lats.scores = am_path_lats.lm_scores
lm_scores = (
am_path_lats.to(tot_score_device)
.get_tot_scores(use_double_scores=True, log_semiring=False)
.to(device)
)
else:
am_scores = (
am_path_lats.to(tot_score_device)
.get_tot_scores(use_double_scores=True, log_semiring=False)
.to(device)
)
lm_scores = None
return am_scores, lm_scores
def nbest_am_lm_scores(
lats: k2.Fsa,
num_paths: int,
device: str = "cuda",
batch_size: int = 500,
):
"""Compute am scores with word_seqs
Compatible with both ctc_decoding or TLG decoding.
"""
assert (
k2 is not None
), "k2 is not installed, please follow 'tools/installers' to install"
paths = k2.random_paths(lats, num_paths=num_paths, use_double_scores=True)
if isinstance(lats.aux_labels, torch.Tensor):
word_seqs = k2.ragged.index(lats.aux_labels.contiguous(), paths)
else:
# '_k2.RaggedInt' object has no attribute 'contiguous'
word_seqs = lats.aux_labels.index(paths)
word_seqs = word_seqs.remove_axis(word_seqs.num_axes - 2)
# With ctc_decoding, word_seqs stores token_ids.
# With TLG decoding, word_seqs stores word_ids.
word_seqs = word_seqs.remove_values_leq(0)
unique_word_seqs, num_repeats, new2old = word_seqs.unique(
need_num_repeats=True, need_new2old_indexes=True
)
seq_to_path_shape = unique_word_seqs.shape.get_layer(0)
path_to_seq_map = seq_to_path_shape.row_ids(1)
# used to split final computed tot_scores
seq_to_path_splits = seq_to_path_shape.row_splits(1)
unique_word_seqs = unique_word_seqs.remove_axis(0)
word_fsas = k2.linear_fsa(unique_word_seqs)
word_fsas_with_epsilon_loops = k2.add_epsilon_self_loops(word_fsas)
am_scores, lm_scores = METHOD_NAME(
lats, word_fsas_with_epsilon_loops, path_to_seq_map, device, batch_size
)
token_seqs = k2.ragged.index(lats.labels.contiguous(), paths)
token_seqs = token_seqs.remove_axis(0)
token_ids, _ = token_seqs.index(new2old, axis=0)
token_ids = token_ids.tolist()
# Now remove repeated tokens and 0s and -1s.
token_ids = [remove_repeated_and_leq(tokens) for tokens in token_ids]
return am_scores, lm_scores, token_ids, new2old, path_to_seq_map, seq_to_path_splits |
post operations | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"networkfabric acl delete",
)
class Delete(AAZCommand):
"""Delete the Access Control List resource
:example: Delete the Access Control List
az networkfabric acl delete --resource-group "example-rg" --resource-name "example-acl"
"""
_aaz_info = {
"version": "2023-06-15",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.managednetworkfabric/accesscontrollists/{}", "2023-06-15"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_name = AAZStrArg(
options=["--resource-name"],
help="Name of the Access Control List",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
help="Name of the resource group",
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.AccessControlListsDelete(ctx=self.ctx)()
self.METHOD_NAME()
@register_callback
def pre_operations(self):
pass
@register_callback
def METHOD_NAME(self):
pass
class AccessControlListsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedNetworkFabric/accessControlLists/{accessControlListName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"accessControlListName", self.ctx.args.resource_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-06-15",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
set low threshold | #!/usr/bin/env python
########################################################################
# DellEMC N3248PXE
#
# Module contains an implementation of SONiC Platform Base API and
# provides the Thermals' information which are available in the platform
#
########################################################################
try:
import os
from sonic_platform_base.thermal_base import ThermalBase
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
class Thermal(ThermalBase):
"""DellEMC Platform-specific Thermal class"""
# [ Sensor-Name, Sensor-ID ]
SENSOR_MAPPING = [
['Switch Near Temperature', '7-0049'],
['Switch Rear Temperature', '7-004a'],
['Front Panel PHY Temperature', '7-004b'],
['Near Front Panel Temperature', '7-004c'],
['Middle Fan Tray Temperature', '7-004f'],
]
def __init__(self, thermal_index):
ThermalBase.__init__(self)
self.index = thermal_index + 1
temp_hwmon = '/sys/bus/i2c/devices/' + self.SENSOR_MAPPING[thermal_index][1] + '/hwmon'
self.temp_file = temp_hwmon + '/' + os.listdir(temp_hwmon)[0] + '/' + 'temp1_input'
def get_name(self):
"""
Retrieves the name of the thermal
Returns:
string: The name of the thermal
"""
return self.SENSOR_MAPPING[self.index - 1][0]
def get_presence(self):
"""
Retrieves the presence of the thermal
Returns:
bool: True if thermal is present, False if not
"""
return True
def get_model(self):
"""
Retrieves the model number (or part number) of the Thermal
Returns:
string: Model/part number of Thermal
"""
return 'NA'
def get_serial(self):
"""
Retrieves the serial number of the Thermal
Returns:
string: Serial number of Thermal
"""
return 'NA'
def get_status(self):
"""
Retrieves the operational status of the thermal
Returns:
A boolean value, True if thermal is operating properly,
False if not
"""
return True
def get_temperature(self):
"""
Retrieves current temperature reading from thermal
Returns:
A float number of current temperature in Celsius up to
nearest thousandth of one degree Celsius, e.g. 30.125
"""
temperature = 0.0
try :
temperature = float(open(self.temp_file).read()) / 1000.0
except Exception:
pass
return float(temperature)
def get_high_threshold(self):
"""
Retrieves the high threshold temperature of thermal
Returns:
A float number, the high threshold temperature of thermal in
Celsius up to nearest thousandth of one degree Celsius,
e.g. 30.125
"""
return 75.0
def get_low_threshold(self):
"""
Retrieves the low threshold temperature of thermal
Returns:
A float number, the low threshold temperature of thermal in
Celsius up to nearest thousandth of one degree Celsius,
e.g. 30.125
"""
return 0.0
def set_high_threshold(self, temperature):
"""
Sets the high threshold temperature of thermal
Args :
temperature: A float number up to nearest thousandth of one
degree Celsius, e.g. 30.125
Returns:
A boolean, True if threshold is set successfully, False if
not
"""
# Thermal threshold values are pre-defined based on HW.
return False
def METHOD_NAME(self, temperature):
"""
Sets the low threshold temperature of thermal
Args :
temperature: A float number up to nearest thousandth of one
degree Celsius, e.g. 30.125
Returns:
A boolean, True if threshold is set successfully, False if
not
"""
# Thermal threshold values are pre-defined based on HW.
return False |
test mc2step symm 4o4e | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import mcscf
def setUpModule():
global mol, molsym, m, msym
b = 1.4
mol = gto.M(
verbose = 5,
output = '/dev/null',
atom = [
['N',( 0.000000, 0.000000, -b/2)],
['N',( 0.000000, 0.000000, b/2)], ],
basis = {'N': 'ccpvdz', },
)
m = scf.RHF(mol)
m.conv_tol = 1e-9
m.scf()
molsym = gto.M(
verbose = 5,
output = '/dev/null',
atom = [
['N',( 0.000000, 0.000000, -b/2)],
['N',( 0.000000, 0.000000, b/2)], ],
basis = {'N': 'ccpvdz', },
symmetry = True
)
msym = scf.RHF(molsym)
msym.conv_tol = 1e-9
msym.scf()
def tearDownModule():
global mol, molsym, m, msym
mol.stdout.close()
molsym.stdout.close()
del mol, molsym, m, msym
class KnownValues(unittest.TestCase):
def test_mc1step_4o4e(self):
mc = mcscf.CASSCF(m, 4, 4)
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.913786407955, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.7015375913946591, 4)
def test_mc1step_4o4e_internal_rotation(self):
mc = mcscf.CASSCF(m, 4, 4)
mc.internal_rotation = True
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.913786407955, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.7015375913946591, 4)
def test_mc2step_4o4e(self):
mc = mcscf.CASSCF(m, 4, 4)
emc = mc.mc2step()[0]
self.assertAlmostEqual(emc, -108.913786407955, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.7015375913946591, 4)
def test_mc1step_6o6e_high_cost(self):
mc = mcscf.CASSCF(m, 6, 6)
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.980105451388, 7)
def test_mc2step_6o6e_high_cost(self):
mc = mcscf.CASSCF(m, 6, 6)
emc = mc.mc2step()[0]
self.assertAlmostEqual(emc, -108.980105451388, 7)
def test_mc1step_symm_4o4e(self):
mc = mcscf.CASSCF(msym, 4, 4)
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.913786407955, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.7015375913946591, 4)
def METHOD_NAME(self):
mc = mcscf.CASSCF(msym, 4, 4)
emc = mc.mc2step()[0]
self.assertAlmostEqual(emc, -108.913786407955, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.7015375913946591, 4)
def test_mc1step_symm_6o6e_high_cost(self):
mc = mcscf.CASSCF(msym, 6, 6)
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.980105451388, 7)
def test_mc2step_symm_6o6e_high_cost(self):
mc = mcscf.CASSCF(msym, 6, 6)
emc = mc.mc2step()[0]
self.assertAlmostEqual(emc, -108.980105451388, 7)
def test_casci_4o4e(self):
mc = mcscf.CASCI(m, 4, 4)
emc = mc.casci()[0]
self.assertAlmostEqual(emc, -108.8896744464714, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.6910275883606078, 4)
def test_casci_symm_4o4e(self):
mc = mcscf.CASCI(msym, 4, 4)
emc = mc.casci()[0]
self.assertAlmostEqual(emc, -108.8896744464714, 7)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.6910275883606078, 4)
mc.wfnsym = 'A2u'
# raised by mc.fcisolver.guess_wfnsym
self.assertRaises(RuntimeError, mc.kernel)
def test_casci_from_uhf(self):
mf = scf.UHF(mol)
mf.scf()
mc = mcscf.CASCI(mf, 4, 4)
emc = mc.casci()[0]
self.assertAlmostEqual(emc, -108.8896744464714, 6)
self.assertAlmostEqual(numpy.linalg.norm(mc.analyze()),
2.6910275883606078, 4)
def test_casci_from_uhf1(self):
mf = scf.UHF(mol)
mf.scf()
mc = mcscf.CASSCF(mf, 4, 4)
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.913786407955, 7)
emc = mc.mc2step()[0]
self.assertAlmostEqual(emc, -108.913786407955, 7)
def test_frozen1s(self):
mc = mcscf.CASSCF(msym, 4, 4)
mc.frozen = 3
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.91373646206542, 7)
def test_frozenselect(self):
mc = mcscf.CASSCF(msym, 4, 4)
mc.frozen = [i-1 for i in [19, 20, 26, 27]]
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.91238513746941, 7)
def test_wfnsym(self):
mc = mcscf.CASSCF(msym, 4, (3,1))
mc.fcisolver.wfnsym = 14
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.74508322877787, 7)
mc.wfnsym = 'A2u'
emc = mc.mc1step()[0]
self.assertAlmostEqual(emc, -108.69019443475308, 7)
def test_ucasci(self):
mc = mcscf.UCASCI(msym, 4, (3,1))
emc = mc.kernel()[0]
self.assertAlmostEqual(emc, -108.77486560653847, 7)
def test_ucasscf_high_cost(self):
mc = mcscf.UCASSCF(msym, 4, (3,1))
emc = mc.kernel()[0]
self.assertAlmostEqual(emc, -108.80789718975041, 7)
def test_newton_casscf(self):
mc = mcscf.newton(mcscf.CASSCF(m, 4, 4)).run()
self.assertAlmostEqual(mc.e_tot, -108.9137864132358, 8)
def test_newton_casscf_symm(self):
mc = mcscf.newton(mcscf.CASSCF(msym, 4, 4)).run()
self.assertAlmostEqual(mc.e_tot, -108.9137864132358, 8)
if __name__ == "__main__":
print("Full Tests for N2")
unittest.main() |
test slotted | """Slot tests
Made for Jython.
"""
from test import test_support
import unittest
# The strict tests fail on PyPy (but work on CPython and Jython).
# They're questionable
strict = True
class SlottedTestCase(unittest.TestCase):
def METHOD_NAME(self):
class Foo(object):
__slots__ = 'bar'
self.assert_('__dict__' not in Foo.__dict__)
foo = Foo()
self.assert_(not hasattr(foo, '__dict__'))
foo.bar = 'hello bar'
self.assertEqual(foo.bar, 'hello bar')
self.assertRaises(AttributeError, setattr, foo, 'foo', 'hello foo')
class Baz(object):
__slots__ = ['python', 'jython']
self.assert_('__dict__' not in Baz.__dict__)
baz = Baz()
self.assert_(not hasattr(baz, '__dict__'))
baz.python = 'hello python'
baz.jython = 'hello jython'
self.assertEqual(baz.python, 'hello python')
self.assertEqual(baz.jython, 'hello jython')
self.assertRaises(AttributeError, setattr, baz, 'foo', 'hello')
class SlottedWithDictTestCase(unittest.TestCase):
def test_subclass(self):
class Base(object):
pass
class Foo(Base):
__slots__ = 'bar'
self.assert_('__dict__' not in Foo.__dict__)
foo = Foo()
self.assert_(hasattr(foo, '__dict__'))
foo.bar = 'hello bar'
foo.foo = 'hello foo'
self.assertEqual(foo.bar, 'hello bar')
self.assertEqual(foo.__dict__, {'foo': 'hello foo'})
def test_subclass_mro(self):
class Base(object):
pass
class Slotted(object):
__slots__ = 'baz'
class Foo(Slotted, Base):
__slots__ = 'bar'
if strict:
self.assert_('__dict__' in Foo.__dict__)
self.assertEqual(Foo.__dict__['__dict__'].__objclass__, Foo)
foo = Foo()
self.assert_(hasattr(foo, '__dict__'))
foo.bar = 'hello bar'
foo.baz = 'hello baz'
foo.foo = 'hello foo'
self.assertEqual(foo.bar, 'hello bar')
self.assertEqual(foo.baz, 'hello baz')
self.assertEqual(foo.__dict__, {'foo': 'hello foo'})
class Bar(Slotted, Base):
pass
if strict:
self.assert_('__dict__' in Bar.__dict__)
self.assertEqual(Bar.__dict__['__dict__'].__objclass__, Bar)
bar = Bar()
self.assert_(hasattr(bar, '__dict__'))
bar.bar = 'hello bar'
bar.baz = 'hello baz'
bar.foo = 'hello foo'
self.assertEqual(bar.bar, 'hello bar')
self.assertEqual(bar.baz, 'hello baz')
self.assertEqual(bar.__dict__, {'foo': 'hello foo', 'bar': 'hello bar'})
def test_subclass_oldstyle(self):
class OldBase:
pass
class Foo(OldBase, object):
__slots__ = 'bar'
if strict:
self.assert_('__dict__' in Foo.__dict__)
self.assertEqual(Foo.__dict__['__dict__'].__objclass__, Foo)
foo = Foo()
self.assert_(hasattr(foo, '__dict__'))
foo.bar = 'hello bar'
foo.foo = 'hello foo'
self.assertEqual(foo.bar, 'hello bar')
self.assertEqual(foo.__dict__, {'foo': 'hello foo'})
class Bar(OldBase, object):
__slots__ = '__dict__'
self.assert_('__dict__' in Bar.__dict__)
self.assertEqual(Bar.__dict__['__dict__'].__objclass__, Bar)
bar = Bar()
self.assert_(hasattr(bar, '__dict__'))
bar.bar = 'hello bar'
bar.foo = 'hello foo'
self.assertEqual(bar.bar, 'hello bar')
self.assertEqual(bar.__dict__, {'foo': 'hello foo', 'bar': 'hello bar'})
def test_mixin_oldstyle(self):
class OldBase:
pass
class NewBase(object):
pass
class Baz(NewBase, OldBase):
__slots__ = 'baz'
self.assert_('__dict__' not in Baz.__dict__)
baz = Baz()
self.assert_(hasattr(baz, '__dict__'))
baz.baz = 'hello baz'
baz.bar = 'hello bar'
self.assertEqual(baz.baz, 'hello baz')
self.assertEqual(baz.bar, 'hello bar')
self.assertEqual(baz.__dict__, {'bar': 'hello bar'})
class SlottedWithWeakrefTestCase(unittest.TestCase):
def test_subclass_oldstyle(self):
class OldBase:
pass
class Foo(OldBase, object):
__slots__ = '__dict__'
self.assert_(hasattr(Foo, '__weakref__'))
def test_main():
test_support.run_unittest(SlottedTestCase,
SlottedWithDictTestCase,
SlottedWithWeakrefTestCase)
if __name__ == '__main__':
test_main() |
on reconnect | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This provides the service to publish to the queue."""
import asyncio
import json
import logging
import random
import string
import nest_asyncio # noqa: I001
from flask import _app_ctx_stack
from nats.aio.client import Client as NATS, DEFAULT_CONNECT_TIMEOUT # noqa N814; by convention the name is NATS
from stan.aio.client import Client as STAN # noqa N814; by convention the name is STAN
class QueueService():
"""Provides services to use the Queue from Flask.
For ease of use, this follows the style of a Flask Extension
"""
def __init__(self, app=None, loop=None):
"""Initialize, supports setting the app context on instantiation."""
# Default NATS Options
self.name = 'default_api_client'
self.nats_options = {}
self.stan_options = {}
self.loop = loop
self.nats_servers = None
self.subject = None
self.logger = logging.getLogger()
if app is not None:
self.init_app(app, self.loop)
def init_app(self, app, loop=None,
nats_options=None, stan_options=None):
"""Initialize the extension.
:param app: Flask app
:return: naked
"""
nest_asyncio.apply()
self.name = app.config.get('NATS_CLIENT_NAME')
self.loop = loop or asyncio.get_event_loop()
self.nats_servers = app.config.get('NATS_SERVERS').split(',')
self.subject = app.config.get('NATS_FILER_SUBJECT')
default_nats_options = {
'name': self.name,
'io_loop': self.loop,
'servers': self.nats_servers,
'connect_timeout': app.config.get('NATS_CONNECT_TIMEOUT', DEFAULT_CONNECT_TIMEOUT),
# NATS handlers
'error_cb': self.on_error,
'closed_cb': self.on_close,
'reconnected_cb': self.METHOD_NAME,
'disconnected_cb': self.on_disconnect,
}
if not nats_options:
nats_options = {}
self.nats_options = {**default_nats_options, **nats_options}
default_stan_options = {
'cluster_id': app.config.get('NATS_CLUSTER_ID'),
'client_id':
(self.name.
lower().
strip(string.whitespace)
).translate({ord(c): '_' for c in string.punctuation})
+ '_' + str(random.SystemRandom().getrandbits(0x58))
}
if not stan_options:
stan_options = {}
self.stan_options = {**default_stan_options, **stan_options}
app.teardown_appcontext(self.teardown)
def teardown(self, exception): # pylint: disable=unused-argument; flask method signature
"""Destroy all objects created by this extension."""
try:
this_loop = self.loop or asyncio.get_event_loop()
this_loop.run_until_complete(self.close())
except RuntimeError as e:
self.logger.error(e)
async def connect(self):
"""Connect to the queueing service."""
ctx = _app_ctx_stack.top
if ctx:
if not hasattr(ctx, 'nats'):
ctx.nats = NATS()
ctx.stan = STAN()
if not ctx.nats.is_connected:
self.stan_options = {**self.stan_options, **{'nats': ctx.nats}}
await ctx.nats.connect(**self.nats_options)
await ctx.stan.connect(**self.stan_options)
async def close(self):
"""Close the connections to the queue."""
if self.nats and self.nats.is_connected:
await self.stan.close()
await self.nats.close()
def publish_json(self, payload=None, subject=None):
"""Publish the json payload to the Queue Service."""
try:
subject = subject or self.subject
self.loop.run_until_complete(self.async_publish_json(payload, subject))
except Exception as err:
self.logger.error('Error: %s', err)
raise err
async def publish_json_to_subject(self, payload=None, subject=None):
"""Publish the json payload to the specified subject."""
try:
await self.async_publish_json(payload, subject)
except Exception as err:
self.logger.error('Error: %s', err)
raise err
async def async_publish_json(self, payload=None, subject=None):
"""Publish the json payload to the Queue Service."""
if not self.is_connected:
await self.connect()
await self.stan.publish(subject=subject,
payload=json.dumps(payload).encode('utf-8'))
async def on_error(self, e):
"""Handle errors raised by the client library."""
self.logger.warning('Error: %s', e)
async def METHOD_NAME(self):
"""Invoke by the client library when attempting to reconnect to NATS."""
self.logger.warning('Reconnected to NATS at nats://%s', self.nats.connected_url.netloc if self.nats else 'none')
async def on_disconnect(self):
"""Invoke by the client library when disconnected from NATS."""
self.logger.warning('Disconnected from NATS')
async def on_close(self):
"""Invoke by the client library when the NATS connection is closed."""
self.logger.warning('Closed connection to NATS')
@property
def is_closed(self):
"""Return True if the connection toThe cluster is closed."""
if self.nats:
return self.nats.is_closed
return True
@property
def is_connected(self):
"""Return True if connected to the NATS cluster."""
if self.nats:
return self.nats.is_connected
return False
@property
def stan(self):
"""Return the STAN client for the Queue Service."""
ctx = _app_ctx_stack.top
if ctx:
if not hasattr(ctx, 'stan'):
return None
return ctx.stan
return None
@property
def nats(self):
"""Return the NATS client for the Queue Service."""
ctx = _app_ctx_stack.top
if ctx:
if not hasattr(ctx, 'nats'):
return None
return ctx.nats
return None |
typeof nb type | from collections import namedtuple
from functools import singledispatch
import ctypes
import enum
import numpy as np
from numpy.random.bit_generator import BitGenerator
from numba.core import types, utils, errors
from numba.np import numpy_support
# terminal color markup
_termcolor = errors.termcolor()
class Purpose(enum.Enum):
# Value being typed is used as an argument
argument = 1
# Value being typed is used as a constant
constant = 2
_TypeofContext = namedtuple("_TypeofContext", ("purpose",))
def typeof(val, purpose=Purpose.argument):
"""
Get the Numba type of a Python value for the given purpose.
"""
# Note the behaviour for Purpose.argument must match _typeof.c.
c = _TypeofContext(purpose)
ty = typeof_impl(val, c)
if ty is None:
msg = _termcolor.errmsg(
f"Cannot determine Numba type of {type(val)}")
raise ValueError(msg)
return ty
@singledispatch
def typeof_impl(val, c):
"""
Generic typeof() implementation.
"""
tp = _typeof_buffer(val, c)
if tp is not None:
return tp
tp = getattr(val, "_numba_type_", None)
if tp is not None:
return tp
# cffi is handled here as it does not expose a public base class
# for exported functions or CompiledFFI instances.
from numba.core.typing import cffi_utils
if cffi_utils.SUPPORTED:
if cffi_utils.is_cffi_func(val):
return cffi_utils.make_function_type(val)
if cffi_utils.is_ffi_instance(val):
return types.ffi
return None
def _typeof_buffer(val, c):
from numba.core.typing import bufproto
try:
m = memoryview(val)
except TypeError:
return
# Object has the buffer protocol
try:
dtype = bufproto.decode_pep3118_format(m.format, m.itemsize)
except ValueError:
return
type_class = bufproto.get_type_class(type(val))
layout = bufproto.infer_layout(m)
return type_class(dtype, m.ndim, layout=layout,
readonly=m.readonly)
@typeof_impl.register(ctypes._CFuncPtr)
def _typeof_ctypes_function(val, c):
from .ctypes_utils import is_ctypes_funcptr, make_function_type
if is_ctypes_funcptr(val):
return make_function_type(val)
@typeof_impl.register(type)
def _typeof_type(val, c):
"""
Type various specific Python types.
"""
if issubclass(val, BaseException):
return types.ExceptionClass(val)
if issubclass(val, tuple) and hasattr(val, "_asdict"):
return types.NamedTupleClass(val)
if issubclass(val, np.generic):
return types.NumberClass(numpy_support.from_dtype(val))
if issubclass(val, types.Type):
return types.TypeRef(val)
from numba.typed import Dict
if issubclass(val, Dict):
return types.TypeRef(types.DictType)
from numba.typed import List
if issubclass(val, List):
return types.TypeRef(types.ListType)
@typeof_impl.register(bool)
def _typeof_bool(val, c):
return types.boolean
@typeof_impl.register(float)
def _typeof_float(val, c):
return types.float64
@typeof_impl.register(complex)
def _typeof_complex(val, c):
return types.complex128
@typeof_impl.register(int)
def _typeof_int(val, c):
# As in _typeof.c
nbits = utils.bit_length(val)
if nbits < 32:
typ = types.intp
elif nbits < 64:
typ = types.int64
elif nbits == 64 and val >= 0:
typ = types.uint64
else:
raise ValueError("Int value is too large: %s" % val)
return typ
@typeof_impl.register(np.generic)
def _typeof_numpy_scalar(val, c):
try:
return numpy_support.map_arrayscalar_type(val)
except NotImplementedError:
pass
@typeof_impl.register(str)
def _typeof_str(val, c):
return types.string
@typeof_impl.register(type((lambda a: a).__code__))
def _typeof_code(val, c):
return types.code_type
@typeof_impl.register(type(None))
def _typeof_none(val, c):
return types.none
@typeof_impl.register(type(Ellipsis))
def _typeof_ellipsis(val, c):
return types.ellipsis
@typeof_impl.register(tuple)
def _typeof_tuple(val, c):
tys = [typeof_impl(v, c) for v in val]
if any(ty is None for ty in tys):
return
return types.BaseTuple.from_types(tys, type(val))
@typeof_impl.register(list)
def _typeof_list(val, c):
if len(val) == 0:
raise ValueError("Cannot type empty list")
ty = typeof_impl(val[0], c)
if ty is None:
raise ValueError(
f"Cannot type list element type {type(val[0])}")
return types.List(ty, reflected=True)
@typeof_impl.register(set)
def _typeof_set(val, c):
if len(val) == 0:
raise ValueError("Cannot type empty set")
item = next(iter(val))
ty = typeof_impl(item, c)
if ty is None:
raise ValueError(
f"Cannot type set element type {type(item)}")
return types.Set(ty, reflected=True)
@typeof_impl.register(slice)
def _typeof_slice(val, c):
return types.slice2_type if val.step in (None, 1) else types.slice3_type
@typeof_impl.register(enum.Enum)
@typeof_impl.register(enum.IntEnum)
def _typeof_enum(val, c):
clsty = typeof_impl(type(val), c)
return clsty.member_type
@typeof_impl.register(enum.EnumMeta)
def _typeof_enum_class(val, c):
cls = val
members = list(cls.__members__.values())
if len(members) == 0:
raise ValueError("Cannot type enum with no members")
dtypes = {typeof_impl(mem.value, c) for mem in members}
if len(dtypes) > 1:
raise ValueError("Cannot type heterogeneous enum: "
"got value types %s"
% ", ".join(sorted(str(ty) for ty in dtypes)))
if issubclass(val, enum.IntEnum):
typecls = types.IntEnumClass
else:
typecls = types.EnumClass
return typecls(cls, dtypes.pop())
@typeof_impl.register(np.dtype)
def _typeof_dtype(val, c):
tp = numpy_support.from_dtype(val)
return types.DType(tp)
@typeof_impl.register(np.ndarray)
def _typeof_ndarray(val, c):
if isinstance(val, np.ma.MaskedArray):
msg = "Unsupported array type: numpy.ma.MaskedArray."
raise errors.NumbaTypeError(msg)
try:
dtype = numpy_support.from_dtype(val.dtype)
except errors.NumbaNotImplementedError:
raise errors.NumbaValueError(f"Unsupported array dtype: {val.dtype}")
layout = numpy_support.map_layout(val)
readonly = not val.flags.writeable
return types.Array(dtype, val.ndim, layout, readonly=readonly)
@typeof_impl.register(types.NumberClass)
def _typeof_number_class(val, c):
return val
@typeof_impl.register(types.Literal)
def _typeof_literal(val, c):
return val
@typeof_impl.register(types.TypeRef)
def _typeof_typeref(val, c):
return val
@typeof_impl.register(types.Type)
def METHOD_NAME(val, c):
if isinstance(val, types.BaseFunction):
return val
elif isinstance(val, (types.Number, types.Boolean)):
return types.NumberClass(val)
else:
return types.TypeRef(val)
@typeof_impl.register(BitGenerator)
def typeof_numpy_random_bitgen(val, c):
return types.NumPyRandomBitGeneratorType(val)
@typeof_impl.register(np.random.Generator)
def typeof_random_generator(val, c):
return types.NumPyRandomGeneratorType(val) |
test compatible strict optimade field | from typing import Callable, List
import pytest
from pydantic import BaseModel, Field, ValidationError
from optimade.models.utils import OptimadeField, StrictField, SupportLevel
def make_bad_models(field: Callable):
"""Check that models using `field` to replace `Field` provide
appropriate warnings and errors.
"""
with pytest.raises(RuntimeError, match="with forbidden keywords"):
class BadModel(BaseModel):
bad_field: int = field(..., random_key="disallowed")
with pytest.warns(UserWarning, match="No description"):
class AnotherBadModel(BaseModel):
bad_field: int = field(...)
def test_strict_field():
"""Test `StrictField` creation for failure on bad keys, and
warnings with no description.
"""
make_bad_models(StrictField)
def test_optimade_field():
"""Test `OptimadeField` creation for failure on bad keys, and
warnings with no description.
"""
make_bad_models(OptimadeField)
def METHOD_NAME() -> None:
"""This test checks that OptimadeField and StrictField
produce the same schemas when given the same arguments.
"""
class CorrectModelWithStrictField(BaseModel):
# check that unit and uniqueItems are passed through
good_field: List[str] = StrictField(
...,
support=SupportLevel.MUST,
queryable=SupportLevel.OPTIONAL,
description="Unit test to make sure that StrictField allows through OptimadeField keys",
pattern="^structures$",
unit="stringiness",
uniqueItems=True,
sortable=True,
)
class CorrectModelWithOptimadeField(BaseModel):
good_field: List[str] = OptimadeField(
...,
# Only difference here is that OptimadeField allows case-insensitive
# strings to be passed instead of support levels directly
support="must",
queryable="optional",
description="Unit test to make sure that StrictField allows through OptimadeField keys",
pattern="^structures$",
uniqueItems=True,
unit="stringiness",
sortable=True,
)
optimade_schema = CorrectModelWithOptimadeField.schema()
strict_schema = CorrectModelWithStrictField.schema()
strict_schema["title"] = optimade_schema["title"]
assert strict_schema == optimade_schema
def test_formula_regexp() -> None:
"""This test checks some simple chemical formulae with the
`CHEMICAL_FORMULA_REGEXP`.
"""
import re
from optimade.models.utils import CHEMICAL_FORMULA_REGEXP
class DummyModel(BaseModel):
formula: str = Field(regex=CHEMICAL_FORMULA_REGEXP)
good_formulae = (
"AgCl",
"H5F",
"LiP5",
"Jn7Qb4", # Regexp does not care about the actual existence of elements
"A5B213CeD3E65F12G",
"",
)
bad_formulae = (
"Ag...Cl",
"123123",
"Ag Cl",
"abcd",
"6F7G",
"A0Be2",
"A1Be2",
"A0B1",
)
for formula in good_formulae:
assert re.match(CHEMICAL_FORMULA_REGEXP, formula)
assert DummyModel(formula=formula)
for formula in bad_formulae:
with pytest.raises(ValidationError):
assert DummyModel(formula=formula)
def test_reduce_formula():
from optimade.models.utils import reduce_formula
assert reduce_formula("Si1O2") == "O2Si"
assert reduce_formula("Si11O2") == "O2Si11"
assert reduce_formula("Si10O2C4") == "C2OSi5"
assert reduce_formula("Li1") == "Li"
assert reduce_formula("Li1Ge1") == "GeLi"
def test_anonymize_formula():
from optimade.models.utils import anonymize_formula
assert anonymize_formula("Si1O2") == "A2B"
assert anonymize_formula("Si11O2") == "A11B2"
assert anonymize_formula("Si10O2C4") == "A5B2C"
assert anonymize_formula("Si1 O2") == "A2B"
assert anonymize_formula("Si11 O2") == "A11B2"
assert anonymize_formula("Si10 O2C4") == "A5B2C" |
abstractmethod | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) according to PEP 3119."""
import types
from _weakrefset import WeakSet
# Instance of old-style class
class _C: pass
_InstanceType = type(_C())
def METHOD_NAME(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C:
__metaclass__ = ABCMeta
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = set(name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False))
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print >> file, "%s: %r" % (name, value)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking when it's simple.
subclass = getattr(instance, '__class__', None)
if subclass is not None and subclass in cls._abc_cache:
return True
subtype = type(instance)
# Old-style instances
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subtype in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or
cls.__subclasscheck__(subtype))
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False |
create dummy report | import contextlib
import os.path
import sys
import traceback
# Encapsulates test result reporting. The interface is probably not optimal,
# but at least it's a start.
#
# The interface is loosely modelled off of python's file API.
class DummyReport:
"""
Can be used in place of TestReport to print results to terminal (without
some of the unnecessary formatting
"""
def write(self, msg, flush = False):
print(msg)
sys.stdout.flush()
def passing(self,msg):
print('pass: {}'.format(msg))
def fail(self, msg):
print('FAIL: {}'.format(msg))
def incomplete(self, msg):
print('incomplete: {}'.format(msg))
class TestReport:
"""
Respresents the report for the current test(s).
DO NOT CONSTRUCT THIS DIRECTLY. USE create_test_report INSTEAD.
Note
----
The underlying testing infrastructure is a little confusing. After looking
through things, tests results are read from individual files.
The test file must include a line that says "UNIT TEST BEGIN". If it
doesn't include "END CELLO" and "UNIT TEST END", anywhere, the test is
counted as crashed (this count seems to happen in 2 different places).
This doesn't actually affect the end result of the test suite.
Each test can report multiple test cases that are failures, are incomplete,
or pass. The test suite (and CircleCi) only fails if at least one failure
is reported
The underlying testing infrastructure requires that this is at least
somewhat eager about writing information to file. For example, if there is
a test failure, but some (related or unrelated) problem causes the program
this to exit before writing that to file, the test infrastructure will
currently indicate success.
"""
def __init__(self, f):
self._f = f
self._complete = False
self.write('UNIT TEST BEGIN\n',flush = True)
def write(self, msg, flush = False):
self._f.write(msg)
if flush:
self._f.flush()
def _test_case_status(self, prefix, msg = None):
# it's EXTREMELY important that there is a space between the newline
# and the prefix
prefix = '\n ' + prefix
if msg is not None:
msg = ': '.join([prefix,msg])
else:
msg = prefix
self.write(msg, flush = True)
def fail(self, msg = None):
# don't touch the prefix string unless it changes in build.sh
self._test_case_status(prefix = "FAIL", msg = msg)
def incomplete(self, msg = None):
# If the reason you are invoking this method should prevent continuous
# integration from passing, you should use `fail` instead of method
# don't touch the prefix string unless it changes in build.sh
self._test_case_status(prefix = "incomplete", msg = msg)
def passing(self,msg = None):
# don't touch the prefix string unless it changes in build.sh
self._test_case_status(prefix = "pass", msg = msg)
def complete(self):
# At the time of writing, test results reported after complete will
# still be counted. It's also currently OK for test reports to call
# this method more than once. However, it's unclear whether that could
# change in the future
self._complete = True
self.write('\nUNIT TEST END'
'\nEND CELLO\n',
flush = True)
def is_complete(self):
return self._complete
@contextlib.contextmanager
def METHOD_NAME(*args, **kwargs):
try:
yield DummyReport()
finally:
pass
@contextlib.contextmanager
def create_test_report(test_file, clobber = True):
"""
This is a context manager used to construct an instance of TestReport.
If the TestReport has not already completed at the end of the `with`
statement, this will call it's `complete` method unless there is an
exception - in that case, `fail` will be called and the report will be left
incomplete. If an exception but the test report is already `complete`,
the exception will just be printed
"""
if os.path.isfile(test_file):
if clobber:
print("Overwriting {}".format(test_file))
else:
raise ValueError(
"The test report file, {}, already exists".format(test_file)
)
else:
print("Creating {}".format(test_file))
f = open(test_file,'w')
test_report = TestReport(f)
err = None
err_str = None
try:
yield test_report
except:
exc_info = sys.exc_info()
err = exc_info[1]
err_str = ''.join(traceback.format_exception(*exc_info))
finally:
if not test_report.is_complete():
if err_str is not None:
test_report.fail(
"Unexpected python exception occured before the test "
"completed. Exception information is provided below:\n"
)
test_report.write(err_str)
test_report.write('\n')
# leave the report incomplete
else:
test_report.complete()
elif err_str is not None:
test_report.write(
"Unexpected python exception occured after the test "
"completed. Exception information is provided below:\n"
)
test_report.write(err_str)
test_report.write('\n')
f.close()
# if an error occured, let's pass it through
if err is not None:
raise err |
submit | import os
from . import logfiles
from . import helpers
from . import chunky_parts
from . import workflow
def METHOD_NAME(config):
if config["general"]["verbose"]:
print("\n", 40 * "+ ")
print("Submitting jobscript to batch system...")
print()
print(f"Output written by {config['computer']['batch_system']}:")
if config["general"]["verbose"]:
print("\n", 40 * "+ ")
for command in config["general"]["submit_command"]:
print(command)
for command in config["general"]["submit_command"]:
os.system(command)
return config
def resubmit_batch_or_shell(config, batch_or_shell, cluster=None):
config = config["general"]["batch"].write_simple_runscript(
config, cluster, batch_or_shell
)
if not check_if_check(config):
config = METHOD_NAME(config)
return config
def resubmit_SimulationSetup(config, cluster=None):
monitor_file = logfiles.logfile_handle
# Jobs that should be started directly from the compute job:
jobtype = config["general"]["jobtype"]
monitor_file.write(f"{cluster} for this run:\n")
command_line_config = config["general"]["command_line_config"]
command_line_config["jobtype"] = cluster
monitor_file.write(f"Initializing {cluster} object with:\n")
monitor_file.write(str(command_line_config))
# NOTE(PG) Non top level import to avoid circular dependency:
os.chdir(config["general"]["started_from"])
from .sim_objects import SimulationSetup
cluster_obj = SimulationSetup(command_line_config)
monitor_file.write(f"{cluster} object built....\n")
if f"{cluster}_update_{jobtype}_config_before_resubmit" in cluster_obj.config:
monitor_file.write(
f"{cluster} object needs to update the calling job config:\n"
)
# FIXME(PG): This might need to be a deep update...?
config.update(
cluster_obj.config[f"{cluster}_update_{jobtype}_config_before_resubmit"]
)
if not check_if_check(config):
monitor_file.write(f"Calling {cluster} job:\n")
config["general"]["experiment_over"] = cluster_obj(kill_after_submit=False)
return config
def get_submission_type(cluster, config):
# Figure out if next job is resubmitted to batch system,
# just executed in shell or invoked as new SimulationSetup
# object
clusterconf = config["general"]["workflow"]["subjob_clusters"][cluster]
if clusterconf.get("submit_to_batch_system", False):
submission_type = "batch"
elif cluster in ["newrun", "prepcompute", "tidy", "inspect", "viz"]:
submission_type = "SimulationSetup"
else:
submission_type = "shell"
return submission_type
def end_of_experiment(config):
if config["general"]["next_date"] >= config["general"]["final_date"]:
monitor_file = logfiles.logfile_handle
monitor_file.write("Reached the end of the simulation, quitting...\n")
config["general"]["experiment_over"] = True
helpers.write_to_log(config, ["# Experiment over"], message_sep="")
return True
return False
def end_of_experiment_all_models(config):
index = 1
expid = config["general"]["expid"]
while "model" + str(index) in config["general"]["original_config"]:
if (
not config["model" + str(index)]["setup_name"]
== config["general"]["setup_name"]
):
experiment_done = False
setup_name = config["model" + str(index)]["setup_name"]
print(f"Testing if {setup_name} is already done...")
logfile = (
config["general"]["experiment_log_dir"]
+ "/"
+ expid
+ "_"
+ setup_name
+ ".log"
)
if os.path.isfile(logfile):
with open(logfile, "r") as open_logfile:
logfile_array = open_logfile.readlines()
for line in logfile_array:
if "# Experiment over" in line:
print(f" ...{setup_name} is done.")
experiment_done = True
break
if not experiment_done:
print("Still something left to do...")
return False
index += 1
print("Nothing left to do...")
return True
def check_if_check(config):
if config["general"]["check"]:
print(
"Actually not submitting anything, this job preparation was launched in 'check' mode (-c)."
)
print()
return True
else:
return False
def maybe_resubmit(config):
jobtype = config["general"]["jobtype"]
nextrun = resubmit_recursively(config, jobtype=jobtype)
if nextrun: # submit list contains stuff from next run
config = _increment_date_and_run_number(config)
config = _write_date_file(config)
if end_of_experiment(config):
if config["general"].get("iterative_coupling", False):
if end_of_experiment_all_models(config):
return config
else:
# config = chunky_parts._update_chunk_date_file(config)
return config
cluster = config["general"]["workflow"]["first_task_in_queue"]
nextrun = resubmit_recursively(
config, list_of_clusters=[cluster], nextrun_in=True
)
return config
def resubmit_recursively(config, jobtype=None, list_of_clusters=None, nextrun_in=False):
nextrun = False
if not list_of_clusters:
list_of_clusters = config["general"]["workflow"]["subjob_clusters"][
jobtype
].get("next_submit", [])
for cluster in list_of_clusters:
if (
cluster == config["general"]["workflow"]["first_task_in_queue"]
and not nextrun_in
):
nextrun = True
else:
if not workflow.skip_cluster(cluster, config):
submission_type = get_submission_type(cluster, config)
if submission_type == "SimulationSetup":
resubmit_SimulationSetup(config, cluster)
elif submission_type in ["batch", "shell"]:
resubmit_batch_or_shell(config, submission_type, cluster)
else:
print(f"Skipping {cluster}")
nextrun = (
resubmit_recursively(config, jobtype=cluster, nextrun_in=nextrun_in)
or nextrun
)
return nextrun
def _increment_date_and_run_number(config):
config["general"]["run_number"] += 1
config["general"]["current_date"] += config["general"]["delta_date"]
config["general"]["command_line_config"]["current_date"] = config["general"][
"current_date"
].format(form=9, givenph=False, givenpm=False, givenps=False)
config["general"]["command_line_config"]["run_number"] = config["general"][
"run_number"
]
config = chunky_parts.update_command_line_config(config)
return config
def _write_date_file(config): # self, date_file=None):
# monitor_file = config["general"]["logfile"]
monitor_file = logfiles.logfile_handle
# if not date_file:
date_file = (
f"{config['general']['experiment_scripts_dir']}"
f"/{config['general']['expid']}_{config['general']['setup_name']}.date"
)
with open(date_file, "w") as date_file:
date_file.write(
config["general"]["current_date"].output()
+ " "
+ str(config["general"]["run_number"])
)
monitor_file.write("writing date file \n")
return config |
test format output type | from textwrap import dedent
import pytest
from graphql import GraphQLArgument as Argument
from graphql import GraphQLEnumType, GraphQLEnumValue, GraphQLID
from graphql import GraphQLField as Field
from graphql import GraphQLInputField as Input
from graphql import GraphQLInputField as InputField
from graphql import GraphQLInputObjectType as InputObject
from graphql import GraphQLInt as Int
from graphql import GraphQLList as List
from graphql import GraphQLNonNull as NonNull
from graphql import GraphQLObjectType as Object
from graphql import GraphQLScalarType as Scalar
from graphql import GraphQLString as String
from dagger._codegen.generator import (
Context,
_InputField,
format_input_type,
format_name,
format_output_type,
)
from dagger._codegen.generator import Enum as EnumHandler
from dagger._codegen.generator import Scalar as ScalarHandler
@pytest.fixture()
def ctx():
return Context(
id_map={
"CacheID": "CacheVolume",
"FileID": "File",
"SecretID": "Secret",
},
id_query_map={
"ContainerID": "container",
"DirectoryID": "directory",
},
simple_objects_map={},
remaining={"Secret"},
)
@pytest.mark.parametrize(
("graphql", "expected"),
[
("stdout", "stdout"),
("envVariable", "env_variable"), # casing
("from", "from_"), # reserved keyword
("type", "type"), # builtin
("withFS", "with_fs"), # initialism
],
)
def test_format_name(graphql, expected):
assert format_name(graphql) == expected
opts = InputObject(
"Options",
fields={
"key": InputField(NonNull(Scalar("CacheID"))),
"name": InputField(String),
},
)
@pytest.mark.parametrize(
("graphql", "expected"),
[
(NonNull(List(NonNull(String))), "list[str]"),
(List(String), "Optional[list[Optional[str]]]"),
(List(NonNull(String)), "Optional[list[str]]"),
(NonNull(Scalar("FileID")), "File"),
(Scalar("FileID"), "Optional[File]"),
(NonNull(opts), "Options"),
(opts, "Optional[Options]"),
(NonNull(List(NonNull(opts))), "list[Options]"),
(NonNull(List(opts)), "list[Optional[Options]]"),
(List(NonNull(opts)), "Optional[list[Options]]"),
(List(opts), "Optional[list[Optional[Options]]]"),
],
)
def test_format_input_type(graphql, expected, ctx: Context):
assert format_input_type(graphql, ctx.id_map) == expected
cache_volume = Object(
"CacheVolume",
fields={
"id": Field(
NonNull(Scalar("CacheID")),
{},
),
},
)
@pytest.mark.parametrize(
("graphql", "expected"),
[
(NonNull(List(NonNull(String))), "list[str]"),
(List(String), "Optional[list[Optional[str]]]"),
(List(NonNull(String)), "Optional[list[str]]"),
(NonNull(Scalar("FileID")), "FileID"),
(Scalar("FileID"), "Optional[FileID]"),
(NonNull(cache_volume), "CacheVolume"),
(cache_volume, "CacheVolume"),
(List(NonNull(cache_volume)), "list[CacheVolume]"),
(List(cache_volume), "list[Optional[CacheVolume]]"),
],
)
def METHOD_NAME(graphql, expected):
assert format_output_type(graphql) == expected
@pytest.mark.parametrize(
("name", "args", "expected"),
[
("args", (NonNull(List(String)),), "args: Sequence[Optional[str]]"),
("secret", (NonNull(Scalar("SecretID")),), "secret: Secret"),
("secret", (Scalar("SecretID"),), "secret: Optional[Secret] = None"),
("from", (String, None), "from_: Optional[str] = None"),
("lines", (Int, 1), "lines: Optional[int] = 1"),
(
"configPath",
(NonNull(String), "/dagger.json"),
'config_path: str = "/dagger.json"',
),
],
)
@pytest.mark.parametrize("cls", [Argument, Input])
def test_input_field_param(cls, name, args, expected, ctx: Context):
assert _InputField(ctx, name, cls(*args)).as_param() == expected
@pytest.mark.parametrize(
("name", "args", "expected"),
[
(
"context",
(NonNull(Scalar("DirectoryID")),),
'Arg("context", context),',
),
(
"secret",
(Scalar("SecretID"),),
'Arg("secret", secret, None),',
),
(
"lines",
(Int, 1),
'Arg("lines", lines, 1),',
),
(
"from",
(String, None),
'Arg("from", from_, None),',
),
(
"configPath",
(NonNull(String), "/dagger.json"),
'Arg("configPath", config_path, "/dagger.json"),',
),
],
)
@pytest.mark.parametrize("cls", [Argument, Input])
def test_input_field_arg(cls, name, args, expected, ctx: Context):
assert _InputField(ctx, name, cls(*args)).as_arg() == expected
@pytest.mark.parametrize(
("type_", "expected"),
[
(GraphQLID, False),
(String, False),
(Int, False),
(Scalar("FileID"), True),
(Object("Container", {}), False),
],
)
def test_scalar_predicate(type_, expected, ctx: Context):
assert ScalarHandler(ctx).predicate(type_) is expected
@pytest.mark.parametrize(
("type_", "expected"),
[
# with doc
(
Scalar("SecretID", description="A unique identifier for a secret."),
dedent(
'''
class SecretID(Scalar):
"""A unique identifier for a secret."""
''',
),
),
# without doc
(
Scalar("FileID"),
dedent(
"""
class FileID(Scalar):
...
""",
),
),
],
)
def test_scalar_render(type_, expected, ctx: Context):
handler = ScalarHandler(ctx)
assert handler.render(type_) == expected
@pytest.mark.parametrize(
("type_", "expected"),
[
# with doc
(
GraphQLEnumType(
"Enumeration",
{
"ONE": GraphQLEnumValue("ONE", description="First value."),
"TWO": GraphQLEnumValue("TWO", description="Second value."),
"THREE": GraphQLEnumValue("THREE", description="Third value."),
},
description="Example of an enumeration.",
),
dedent(
'''
class Enumeration(Enum):
"""Example of an enumeration."""
ONE = "ONE"
"""First value."""
THREE = "THREE"
"""Third value."""
TWO = "TWO"
"""Second value."""
''',
),
),
# without doc
(
GraphQLEnumType(
"Enumeration",
{
"ONE": GraphQLEnumValue("ONE"),
"TWO": GraphQLEnumValue("TWO"),
"THREE": GraphQLEnumValue("THREE"),
},
),
dedent(
"""
class Enumeration(Enum):
ONE = "ONE"
THREE = "THREE"
TWO = "TWO"
""",
),
),
],
)
def test_enum_render(type_, expected, ctx: Context):
handler = EnumHandler(ctx)
assert handler.render(type_) == expected |
handle | # Taken from
# https://github.com/django-extensions/django-extensions/blob/master/django_extensions/management/commands/shell_plus.py
# django_extensions/management/commands/shell_plus.py
# pylint: skip-file
from __future__ import print_function
import os
import time
from django.core.management.base import BaseCommand
from django_extensions.management.shells import import_objects
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'--plain',
action='store_true',
dest='plain',
help='Tells Django to use plain Python, not BPython nor IPython.',
)
parser.add_argument(
'--bpython',
action='store_true',
dest='bpython',
help='Tells Django to use BPython, not IPython.',
)
parser.add_argument(
'--ipython',
action='store_true',
dest='ipython',
help='Tells Django to use IPython, not BPython.',
)
parser.add_argument(
'--notebook',
action='store_true',
dest='notebook',
help='Tells Django to use IPython args.',
)
parser.add_argument(
'--no-pythonrc',
action='store_true',
dest='no_pythonrc',
help='Tells Django not to execute PYTHONSTARTUP file',
)
parser.add_argument(
'--print-sql',
action='store_true',
default=False,
help="Print SQL queries as they're executed",
)
parser.add_argument(
'--dont-load',
action='append',
dest='dont_load',
default=[],
help='Ignore autoloading of some apps/models. Can be used several times.',
)
parser.add_argument(
'--quiet-load',
action='store_true',
default=False,
dest='quiet_load',
help='Do not display loaded models messages',
)
help = "Like the 'shell' command but autoloads the models of all installed Django apps."
requires_model_validation = True
def METHOD_NAME(self, *args, **options):
use_notebook = options.get('notebook', False)
use_ipython = options.get('ipython', False)
use_bpython = options.get('bpython', False)
use_plain = options.get('plain', False)
use_pythonrc = not options.get('no_pythonrc', True)
if options.get("print_sql", False):
# Code from http://gist.github.com/118990
from django.db.backends import util
sqlparse = None
try:
import sqlparse
except ImportError:
pass
class PrintQueryWrapper(util.CursorDebugWrapper):
def execute(self, sql, params=()):
starttime = time.time()
try:
return self.cursor.execute(sql, params)
finally:
execution_time = time.time() - starttime
raw_sql = self.db.ops.last_executed_query(
self.cursor, sql, params
)
if sqlparse:
print(sqlparse.format(raw_sql, reindent=True))
else:
print(raw_sql)
print()
print(
'Execution time: %.6fs [Database: %s]'
% (execution_time, self.db.alias)
)
print()
util.CursorDebugWrapper = PrintQueryWrapper
def run_notebook():
from django.conf import settings
from IPython.frontend.html.notebook import notebookapp
app = notebookapp.NotebookApp.instance()
ipython_arguments = getattr(
settings,
'IPYTHON_ARGUMENTS',
['--ext', 'django_extensions.management.notebook_extension'],
)
app.initialize(ipython_arguments)
app.start()
def run_plain():
# Using normal Python shell
import code
imported_objects = import_objects(options, self.style)
try:
# Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then import user.
if use_pythonrc:
pythonrc = os.environ.get("PYTHONSTARTUP")
if pythonrc and os.path.isfile(pythonrc):
try:
exec(
compile(open(pythonrc).read(), pythonrc, 'exec'),
globals(),
locals(),
)
except NameError:
pass
code.interact(local=imported_objects)
def run_bpython():
from bpython import embed
imported_objects = import_objects(options, self.style)
embed(imported_objects)
def run_ipython():
try:
from IPython import embed
imported_objects = import_objects(options, self.style)
embed(user_ns=imported_objects)
except ImportError:
# IPython < 0.11
# Explicitly pass an empty list as arguments, because otherwise
# IPython would use sys.argv from this script.
# Notebook not supported for IPython < 0.11.
from IPython.Shell import IPShell
imported_objects = import_objects(options, self.style)
shell = IPShell(argv=[], user_ns=imported_objects)
shell.mainloop()
if use_notebook:
run_notebook()
elif use_plain:
run_plain()
elif use_ipython:
run_ipython()
elif use_bpython:
run_bpython()
else:
for func in (run_bpython, run_ipython, run_plain):
try:
func()
except ImportError:
continue
else:
break
else:
import traceback
traceback.print_exc()
print(
self.style.ERROR(
"Could not load any interactive Python environment."
)
) |
test mark absent | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors
# See license.txt
import frappe
from frappe.tests.utils import FrappeTestCase
from frappe.utils import (
add_days,
add_months,
get_first_day,
get_last_day,
get_year_ending,
get_year_start,
getdate,
nowdate,
)
from erpnext.setup.doctype.employee.test_employee import make_employee
from hrms.hr.doctype.attendance.attendance import (
DuplicateAttendanceError,
OverlappingShiftAttendanceError,
get_unmarked_days,
mark_attendance,
)
from hrms.tests.test_utils import get_first_sunday
test_records = frappe.get_test_records("Attendance")
class TestAttendance(FrappeTestCase):
def setUp(self):
from hrms.payroll.doctype.salary_slip.test_salary_slip import make_holiday_list
from_date = get_year_start(add_months(getdate(), -1))
to_date = get_year_ending(getdate())
self.holiday_list = make_holiday_list(from_date=from_date, to_date=to_date)
frappe.db.delete("Attendance")
def test_duplicate_attendance(self):
employee = make_employee("[email protected]", company="_Test Company")
date = nowdate()
mark_attendance(employee, date, "Present")
attendance = frappe.get_doc(
{
"doctype": "Attendance",
"employee": employee,
"attendance_date": date,
"status": "Absent",
"company": "_Test Company",
}
)
self.assertRaises(DuplicateAttendanceError, attendance.insert)
def test_duplicate_attendance_with_shift(self):
from hrms.hr.doctype.shift_type.test_shift_type import setup_shift_type
employee = make_employee("[email protected]", company="_Test Company")
date = nowdate()
shift_1 = setup_shift_type(shift_type="Shift 1", start_time="08:00:00", end_time="10:00:00")
mark_attendance(employee, date, "Present", shift=shift_1.name)
# attendance record with shift
attendance = frappe.get_doc(
{
"doctype": "Attendance",
"employee": employee,
"attendance_date": date,
"status": "Absent",
"company": "_Test Company",
"shift": shift_1.name,
}
)
self.assertRaises(DuplicateAttendanceError, attendance.insert)
# attendance record without any shift
attendance = frappe.get_doc(
{
"doctype": "Attendance",
"employee": employee,
"attendance_date": date,
"status": "Absent",
"company": "_Test Company",
}
)
self.assertRaises(DuplicateAttendanceError, attendance.insert)
def test_overlapping_shift_attendance_validation(self):
from hrms.hr.doctype.shift_type.test_shift_type import setup_shift_type
employee = make_employee("[email protected]", company="_Test Company")
date = nowdate()
shift_1 = setup_shift_type(shift_type="Shift 1", start_time="08:00:00", end_time="10:00:00")
shift_2 = setup_shift_type(shift_type="Shift 2", start_time="09:30:00", end_time="11:00:00")
mark_attendance(employee, date, "Present", shift=shift_1.name)
# attendance record with overlapping shift
attendance = frappe.get_doc(
{
"doctype": "Attendance",
"employee": employee,
"attendance_date": date,
"status": "Absent",
"company": "_Test Company",
"shift": shift_2.name,
}
)
self.assertRaises(OverlappingShiftAttendanceError, attendance.insert)
def test_allow_attendance_with_different_shifts(self):
# allows attendance with 2 different non-overlapping shifts
from hrms.hr.doctype.shift_type.test_shift_type import setup_shift_type
employee = make_employee("[email protected]", company="_Test Company")
date = nowdate()
shift_1 = setup_shift_type(shift_type="Shift 1", start_time="08:00:00", end_time="10:00:00")
shift_2 = setup_shift_type(shift_type="Shift 2", start_time="11:00:00", end_time="12:00:00")
mark_attendance(employee, date, "Present", shift_1.name)
frappe.get_doc(
{
"doctype": "Attendance",
"employee": employee,
"attendance_date": date,
"status": "Absent",
"company": "_Test Company",
"shift": shift_2.name,
}
).insert()
def METHOD_NAME(self):
employee = make_employee("[email protected]")
date = nowdate()
attendance = mark_attendance(employee, date, "Absent")
fetch_attendance = frappe.get_value(
"Attendance", {"employee": employee, "attendance_date": date, "status": "Absent"}
)
self.assertEqual(attendance, fetch_attendance)
def test_unmarked_days(self):
first_sunday = get_first_sunday(
self.holiday_list, for_date=get_last_day(add_months(getdate(), -1))
)
attendance_date = add_days(first_sunday, 1)
employee = make_employee(
"[email protected]", date_of_joining=add_days(attendance_date, -1)
)
frappe.db.set_value("Employee", employee, "holiday_list", self.holiday_list)
mark_attendance(employee, attendance_date, "Present")
unmarked_days = get_unmarked_days(
employee, get_first_day(attendance_date), get_last_day(attendance_date)
)
unmarked_days = [getdate(date) for date in unmarked_days]
# attendance already marked for the day
self.assertNotIn(attendance_date, unmarked_days)
# attendance unmarked
self.assertIn(getdate(add_days(attendance_date, 1)), unmarked_days)
# holiday considered in unmarked days
self.assertIn(first_sunday, unmarked_days)
def test_unmarked_days_excluding_holidays(self):
first_sunday = get_first_sunday(
self.holiday_list, for_date=get_last_day(add_months(getdate(), -1))
)
attendance_date = add_days(first_sunday, 1)
employee = make_employee(
"[email protected]", date_of_joining=add_days(attendance_date, -1)
)
frappe.db.set_value("Employee", employee, "holiday_list", self.holiday_list)
mark_attendance(employee, attendance_date, "Present")
unmarked_days = get_unmarked_days(
employee, get_first_day(attendance_date), get_last_day(attendance_date), exclude_holidays=True
)
unmarked_days = [getdate(date) for date in unmarked_days]
# attendance already marked for the day
self.assertNotIn(attendance_date, unmarked_days)
# attendance unmarked
self.assertIn(getdate(add_days(attendance_date, 1)), unmarked_days)
# holidays not considered in unmarked days
self.assertNotIn(first_sunday, unmarked_days)
def test_unmarked_days_as_per_joining_and_relieving_dates(self):
first_sunday = get_first_sunday(
self.holiday_list, for_date=get_last_day(add_months(getdate(), -1))
)
date = add_days(first_sunday, 1)
doj = add_days(date, 1)
relieving_date = add_days(date, 5)
employee = make_employee(
"[email protected]", date_of_joining=doj, relieving_date=relieving_date
)
frappe.db.set_value("Employee", employee, "holiday_list", self.holiday_list)
attendance_date = add_days(date, 2)
mark_attendance(employee, attendance_date, "Present")
unmarked_days = get_unmarked_days(
employee, get_first_day(attendance_date), get_last_day(attendance_date)
)
unmarked_days = [getdate(date) for date in unmarked_days]
# attendance already marked for the day
self.assertNotIn(attendance_date, unmarked_days)
# date before doj not in unmarked days
self.assertNotIn(add_days(doj, -1), unmarked_days)
# date after relieving not in unmarked days
self.assertNotIn(add_days(relieving_date, 1), unmarked_days)
def tearDown(self):
frappe.db.rollback() |
flip coin | import re
###################
#### CONSTANTS ####
###################
## Change these strings to whatever you choose to name your piles from the XML
deck = "Deck"
discard = "Discard"
## Change this HEX string value to customize the highlight color
highlight = "#ff0000"
# Change this positive integer value to customize the default Draw Many count
drawManyDefault = 5
# Change this tuple if you want to create a specific default marker (not recommended)
StandardMarker = ("Marker", "d9851c6f-2ed7-4ca9-82d2-f22e4e12114c")
######################
#### PILE ACTIONS ####
######################
def shuffle(group, x = 0, y = 0):
mute()
group.shuffle()
notify("{} shuffles their {}.".format(me, group.name))
def draw(group, x = 0, y = 0):
mute()
if len(group) < 1:
return
card = group.top()
card.moveTo(card.owner.hand)
notify("{} draws a card from {}.".format(me, group.name))
def drawMany(group, x = 0, y = 0):
if len(group) < 1:
return
mute()
global drawManyDefault
count = askInteger("Draw how many cards?", drawManyDefault)
drawManyDefault = count
for card in group.top(count):
card.moveTo(card.owner.hand)
notify("{} draws {} cards from {}.".format(me, count, group.name))
def discardMany(group, x = 0, y = 0):
if len(group) < 1:
return
mute()
count = askInteger("Discard how many cards?", 1)
for card in group.top(count):
card.moveTo(card.owner.piles[discard])
notify("{} discards {} cards from {}.".format(me, count, group.name))
def allToDeck(group, x = 0, y = 0):
mute()
for card in group:
card.moveTo(card.owner.piles[deck])
notify("{} moves all cards from {} to {}.".format(me, group.name, me.piles[deck].name))
######################
#### HAND ACTIONS ####
######################
def randomDiscard(group, x = 0, y = 0):
mute()
card = group.random()
if card == None:
return
card.moveTo(me.piles[discard])
notify("{} randomly discards {} from {}.".format(me, card, group.name))
def randomPick(group, x = 0, y = 0):
mute()
card = group.random()
if card == None:
return
if confirm("Reveal randomly-picked {}?".format(card.Name)):
index = card.index
card.moveTo(me.piles[discard])
notify("{} randomly picks {} from their {}.".format(me, card, group.name))
card.moveTo(me.hand, index)
else:
notify("{} randomly picks {} (hidden) from their {}.".format(me, card, group.name))
card.select()
card.target(True)
#######################
#### TABLE ACTIONS ####
#######################
def rollDice(group, x = 0, y = 0):
mute()
n = rnd(1, 6)
notify("{} rolls {} on a 6-sided die.".format(me, n))
def METHOD_NAME(group, x = 0, y = 0):
mute()
n = rnd(1, 2)
if n == 1:
notify("{} flips heads.".format(me))
else:
notify("{} flips tails.".format(me))
def interrupt(group, x = 0, y = 0):
notify('{} interrupts the game.'.format(me))
def passTurn(group, x = 0, y = 0):
notify('{} passes.'.format(me))
def addAnyMarker(cards, x = 0, y = 0):
mute()
marker, quantity = askMarker()
if quantity == 0: return
for card in cards:
card.markers[marker] += quantity
notify("{} adds {} {} marker(s) to {}.".format(me, quantity, marker[0], card))
def addMarker(cards, x = 0, y = 0):
mute()
for card in cards:
card.markers[StandardMarker] += 1
notify("{} adds a marker to {}.".format(me, card))
def removeMarker(cards, x = 0, y = 0):
mute()
for card in cards:
if card.markers[StandardMarker] < 1:
return
card.markers[StandardMarker] -= 1
notify("{} removes a marker from {}.".format(me, card))
def rotate(cards, x = 0, y = 0):
mute()
for card in cards:
card.orientation ^= Rot90
if card.orientation & Rot90 == Rot90:
notify('{} turns {} sideways'.format(me, card))
else:
notify('{} turns {} upright'.format(me, card))
def flip(cards, x = 0, y = 0):
mute()
for card in cards:
if card.isFaceUp == True:
notify("{} flips {} face down.".format(me, card))
card.isFaceUp = False
else:
card.isFaceUp = True
notify("{} flips {} face up.".format(me, card))
def highlightCard(cards, x = 0, y = 0):
mute()
for card in cards:
if card.highlight == highlight:
card.highlight = None
notify('{} removes highlight from {}'.format(me, card))
else:
card.highlight = highlight
notify('{} highlights {}'.format(me, card))
|
main | """
An attempt at a user friendly Cart3d GUI
"""
# -*- coding: utf-8 -*-
import sys
import os.path
# kills the program when you hit Cntl+C from the command line
# doesn't save the current state as presumably there's been an error
import signal
signal.signal(signal.SIGINT, signal.SIG_DFL)
from qtpy import QtCore, QtGui
from qtpy.QtWidgets import QApplication
# 3rd party
import vtk
# pyNastran
import pyNastran
from pyNastran.gui.formats import Cart3dIO#, CLASS_MAP
from pyNastran.gui.arg_handling import get_inputs
#from pyNastran.gui.qt_files.gui_qt_common import GuiQtCommon
from pyNastran.gui.gui_common import GuiCommon2
try:
PKG_PATH = sys._MEIPASS #@UndefinedVariable
SCRIPT_PATH = os.path.join(PKG_PATH, 'scripts')
ICON_PATH = os.path.join(PKG_PATH, 'icons')
except Exception:
PKG_PATH = pyNastran.__path__[0]
SCRIPT_PATH = os.path.join(PKG_PATH, 'gui', 'scripts')
ICON_PATH = os.path.join(PKG_PATH, 'gui', 'icons')
class MainWindow(GuiCommon2):
def __init__(self, inputs, **kwds):
html_logging = True
fmt_order = ['cart3d']
kwds['inputs'] = inputs
kwds['fmt_order'] = fmt_order
kwds['html_logging'] = html_logging
super(MainWindow, self).__init__(**kwds)
self.base_window_title = "pyCart3d v%s" % pyNastran.__version__
self.build_fmts(fmt_order, stop_on_failure=True)
logo = os.path.join(ICON_PATH, 'logo.png')
self.logo = logo
self.set_script_path(SCRIPT_PATH)
self.set_icon_path(ICON_PATH)
self.setup_gui()
self.setup_post(inputs)
def _cart3d_remap_bcs(self):
pass
def _create_cart3d_tools_and_menu_items(self):
tools = [
('about_cart3d', 'About Cart3d GUI', 'tabout.png', 'CTRL+H', 'About Cart3d GUI and help on shortcuts', self.about_dialog),
#('about', 'About Orig GUI', 'tabout.png', 'CTRL+H', 'About Nastran GUI and help on shortcuts', self.about_dialog),
]
self.menu_edit = self.menubar.addMenu('Edit Cart3d')
self.menu_help_cart3d = self.menubar.addMenu('&Help')
self.menu_help.menuAction().setVisible(False)
#self.file.menuAction().setVisible(False)
#self.menu_help.
#self.actions['about'].Disable()
menu_items = [
(self.menu_help_cart3d, ('about_cart3d',)),
#(self.menu_help, ('load_geometry', 'load_results', 'script', '', 'exit')),
#(self.menu_help2, ('load_geometry', 'load_results', 'script', '', 'exit')),
]
return tools, menu_items
def _cleanup_cart3d_tools_and_menu_items(self):
self.menu_help.menuAction().setVisible(True)
self.menu_help2.menuAction().setVisible(False)
self.menu_edit.menuAction().setVisible(False)
def about_dialog(self):
""" Display about dialog """
copyright = pyNastran.__pyqt_copyright__
about = [
'pyCart3d Qt GUI',
'',
'pyCart3d v%s' % pyNastran.__version__,
copyright,
pyNastran.__author__,
'',
'%s' % pyNastran.__website__,
'',
'Mouse',
'Left Click - Rotate',
'Middle Click - Pan/Recenter Rotation Point',
'Shift + Left Click - Pan/Recenter Rotation Point',
'Right Mouse / Wheel - Zoom',
'',
'Keyboard Controls',
#'r - reset camera view',
#'X/x - snap to x axis',
#'Y/y - snap to y axis',
#'Z/z - snap to z axis',
#'',
#'h - show/hide legend & info',
'CTRL+I - take a screenshot (image)',
'CTRL+L - cycle results',
#'m/M - scale up/scale down by 1.1 times',
#'o/O - rotate counter-clockwise/clockwise 5 degrees',
's - view model as a surface',
'w - view model as a wireframe',
'',
'Reload Model: using the same filename reload the model',
]
QtGui.QMessageBox.about(self, "About pyCart3d GUI", "\n".join(about))
def on_reload(self):
case = self.icase
if self.format == 'usm3d':
self.step_results_usm3d()
else:
self.on_load_geometry(self.infile_name, self.format)
msg = '%s - %s - %s' % (self.format, self.infile_name, self.out_filename)
self.window_title = msg
self.log_command('on_reload()')
self.cycle_results(case)
def closeEvent(self, event):
"""
Handling saving state before application when application is
being closed.
"""
settings = QtCore.QSettings()
settings.setValue("main_WindowGeometry", self.saveGeometry())
settings.setValue("mainWindowState", self.saveState())
self.settings.save(settings)
#screen_shape = QtGui.QDesktopWidget().screenGeometry()
main_window = self.window()
width = main_window.frameGeometry().width()
height = main_window.frameGeometry().height()
settings.setValue('screen_shape', (width, height))
qpos = self.pos()
pos = qpos.x(), qpos.y()
settings.setValue('pos', pos)
q_app = QApplication.instance()
if q_app is None:
sys.exit()
q_app.quit()
def METHOD_NAME():
app = QApplication(sys.argv)
QApplication.setOrganizationName("pyCart3d")
QApplication.setOrganizationDomain(pyNastran.__website__)
QApplication.setApplicationName("pyCart3d")
QApplication.setApplicationVersion(pyNastran.__version__)
inputs = get_inputs()
window = MainWindow(inputs)
sys.exit(app.exec_())
if __name__ == '__main__': # pragma: no cover
METHOD_NAME() |
delete keys from containers | # -*- coding: utf-8 -*-
from contextlib import contextmanager
from datetime import datetime
from os import sep
from typing import Optional, Any, List, Tuple, Union
import dateutil.parser
import deprecation
import logging
from sceptre.exceptions import PathConversionError
from sceptre import __version__
def logging_level():
"""
Return the logging level.
"""
logger = logging.getLogger(__name__)
return logger.getEffectiveLevel()
def get_external_stack_name(project_code, stack_name):
"""
Returns the name given to a stack in CloudFormation.
:param project_code: The project code, as defined in config.yaml.
:type project_code: str
:param stack_name: The name of the stack.
:type stack_name: str
:returns: The name given to the stack in CloudFormation.
:rtype: str
"""
return "-".join([project_code, stack_name.replace("/", "-")])
def mask_key(key):
"""
Returns an masked version of ``key``.
Returned version has all but the last four characters are replaced with the
character "*".
:param key: The string to mask.
:type key: str
:returns: An masked version of the key
:rtype: str
"""
num_mask_chars = len(key) - 4
return "".join(["*" if i < num_mask_chars else c for i, c in enumerate(key)])
def _call_func_on_values(func, attr, cls):
"""
Searches through dictionary or list for objects of type `cls` and calls the
supplied function `func`. Supports nested dictionaries and lists.
Does not detect objects used as keys in dictionaries.
:param attr: A dictionary or list to search through.
:type attr: dict or list
:return: The dictionary or list structure.
:rtype: dict or list
"""
def func_on_instance(key):
if isinstance(value, cls):
func(attr, key, value)
elif isinstance(value, list) or isinstance(value, dict):
_call_func_on_values(func, value, cls)
if isinstance(attr, dict):
for key, value in attr.items():
func_on_instance(key)
elif isinstance(attr, list):
for index, value in enumerate(attr):
func_on_instance(index)
return attr
Container = Union[list, dict]
Key = Union[str, int]
def METHOD_NAME(keys_to_delete: List[Tuple[Container, Key]]):
"""Removes the indicated keys/indexes from their paired containers."""
list_items_to_delete = []
for container, key in keys_to_delete:
if isinstance(container, list):
# If it's a list, we want to gather up the items to remove from the list.
# We don't want to modify the list length yet, since removals will change all the other
# list indexes. Instead, we'll get the actual items at those indexes to remove later.
list_items_to_delete.append((container, container[key]))
else:
del container[key]
# Finally, now that we have all the items we want to remove the lists, we'll remove those
# items specifically from the lists.
for containing_list, item in list_items_to_delete:
containing_list.remove(item)
def normalise_path(path):
"""
Converts a path to use correct path separator.
Raises an PathConversionError if the path has a
trailing slash.
:param path: A directory path
:type path: str
:raises: sceptre.exceptions.PathConversionError
:returns: A normalised path with forward slashes.
:returns: string
"""
if sep == "/":
path = path.replace("\\", "/")
elif sep == "\\":
path = path.replace("/", "\\")
if path.endswith("/") or path.endswith("\\"):
raise PathConversionError(
"'{0}' is an invalid path string. Paths should "
"not have trailing slashes.".format(path)
)
return path
def sceptreise_path(path):
"""
Converts a path to use correct sceptre path separator.
Raises an PathConversionError if the path has a
trailing slash.
:param path: A directory path
:type path: str
:raises: sceptre.exceptions.PathConversionError
:returns: A normalised path with forward slashes.
:returns: string
"""
path = path.replace("\\", "/")
if path.endswith("/") or path.endswith("\\"):
raise PathConversionError(
"'{0}' is an invalid path string. Paths should "
"not have trailing slashes.".format(path)
)
return path
@contextmanager
def null_context():
"""A context manager that does nothing. This is identical to the nullcontext in py3.7+, but isn't
available in py3.6, so providing it here instead.
"""
yield
def extract_datetime_from_aws_response_headers(
boto_response: dict,
) -> Optional[datetime]:
"""Returns a datetime.datetime extracted from the response metadata in a
boto response or None if it's unable to find or parse one.
:param boto_response: A dictionary returned from a boto client call
:returns a datetime.datetime or None
"""
if boto_response is None:
return None
try:
return dateutil.parser.parse(
boto_response["ResponseMetadata"]["HTTPHeaders"]["date"]
)
except (KeyError, dateutil.parser.ParserError):
# We expect a KeyError if the date isn't present in the response. We
# expect a ParserError if it's not well-formed. Any other error we want
# to pass along.
return None
def gen_repr(instance: Any, class_label: str = None, attributes: List[str] = []) -> str:
"""
Returns a standard __repr__ based on instance attributes.
:param instance: The instance to represent (`self`).
:param class_label: Override the name of the class found through introspection.
:param attributes: List the attributes to include the in representation.
:returns: A string representation of `instance`
"""
if not class_label:
class_label = instance.__class__.__name__
attr_str = ", ".join(
[f"{a}={repr(instance.__getattribute__(a))}" for a in attributes]
)
return f"{class_label}({attr_str})"
def create_deprecated_alias_property(
alias_from: str, alias_to: str, deprecated_in: str, removed_in: Optional[str]
) -> property:
"""Creates a property object with a deprecated getter and a deprecated setter that emit warnings
when used, aliasing to their renamed property names.
:param alias_from: The name of the attribute that is deprecated and that needs to be aliased
:param alias_to: The name of the attribute to alias the deprecated field to.
:param deprecated_in: The version in which the property is deprecated.
:param removed_in: The version when it will be removed, after which the alias will no longer work.
This value can be None, indicating that removal is not yet planned.
:return: A property object to be assigned directly onto a class.
"""
def getter(self):
return getattr(self, alias_to)
getter.__name__ = alias_from
def setter(self, value):
setattr(self, alias_to, value)
setter.__name__ = alias_from
deprecation_kwargs = dict(
deprecated_in=deprecated_in,
removed_in=removed_in,
current_version=__version__,
details=(
f'It is being renamed to "{alias_to}". You should migrate all uses of "{alias_from}" to '
f"that in order to avoid future breakage."
),
)
deprecated_getter = deprecation.deprecated(**deprecation_kwargs)(getter)
deprecated_setter = deprecation.deprecated(**deprecation_kwargs)(setter)
deprecated_property = property(deprecated_getter, deprecated_setter)
return deprecated_property |
decode special data | ###############################################################
# Copyright 2023 Lawrence Livermore National Security, LLC
# (c.f. AUTHORS, NOTICE.LLNS, COPYING)
#
# This file is part of the Flux resource manager framework.
# For details, see https://github.com/flux-framework.
#
# SPDX-License-Identifier: LGPL-3.0
###############################################################
import errno
import json
from _flux._core import ffi, lib
from flux.future import WaitAllFuture
from flux.job import JobID
from flux.rpc import RPC
def _decode_field(data, key):
try:
tmp = json.loads(data[key])
data[key] = tmp
except json.decoder.JSONDecodeError:
# Ignore if can't be decoded
pass
# a few keys are special, decode them into dicts if you can
def METHOD_NAME(data):
for key in ("jobspec", "R"):
if key in data:
_decode_field(data, key)
class JobInfoLookupRPC(RPC):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.jobid = None
def get(self):
return super().get()
def get_decode(self):
data = super().get()
METHOD_NAME(data)
return data
def job_info_lookup(flux_handle, jobid, keys=["jobspec"]):
payload = {"id": int(jobid), "keys": keys, "flags": 0}
rpc = JobInfoLookupRPC(flux_handle, "job-info.lookup", payload)
rpc.jobid = jobid
return rpc
def _original_setup(keys, original):
jobspec_original = False
J_appended = False
if original and "jobspec" in keys:
keys.remove("jobspec")
jobspec_original = True
if "J" not in keys:
keys.append("J")
J_appended = True
return jobspec_original, J_appended
def _get_original_jobspec(job_data):
J = bytes(job_data["J"], encoding="utf8")
val = lib.flux_unwrap_string(J, False, ffi.NULL, ffi.NULL)
result = ffi.string(val)
lib.free(val)
return result.decode("utf-8")
def _original_update(job_data, decode, jobspec_original, J_appended):
if jobspec_original:
job_data["jobspec"] = _get_original_jobspec(job_data)
if decode:
_decode_field(job_data, "jobspec")
if J_appended:
job_data.pop("J")
# jobs_kvs_lookup simple variant for one jobid
def job_kvs_lookup(flux_handle, jobid, keys=["jobspec"], decode=True, original=False):
"""
Lookup job kvs data based on a jobid
:flux_handle: A Flux handle obtained from flux.Flux()
:jobid: jobid to lookup info for
:keys: Optional list of keys to fetch. (default is "jobspec")
:decode: Optional flag to decode special data into Python data structures
currently decodes "jobspec" and "R" into dicts
(default True)
:original: For 'jobspec', return the original submitted jobspec
"""
keyscpy = list(keys)
jobspec_original, J_appended = _original_setup(keyscpy, original)
payload = {"id": int(jobid), "keys": keyscpy, "flags": 0}
rpc = JobInfoLookupRPC(flux_handle, "job-info.lookup", payload)
try:
if decode:
rsp = rpc.get_decode()
else:
rsp = rpc.get()
_original_update(rsp, decode, jobspec_original, J_appended)
# The job does not exist!
except FileNotFoundError:
return None
return rsp
class JobKVSLookupFuture(WaitAllFuture):
"""Wrapper Future for multiple jobids"""
def __init__(self):
super(JobKVSLookupFuture, self).__init__()
self.errors = []
def _get(self, decode=True):
jobs = []
# Wait for all RPCs to complete
self.wait_for()
# Get all successful jobs, accumulate errors in self.errors
for child in self.children:
try:
if decode:
rsp = child.get_decode()
else:
rsp = child.get()
jobs.append(rsp)
except EnvironmentError as err:
if err.errno == errno.ENOENT:
msg = f"JobID {child.jobid.orig} unknown"
else:
msg = f"rpc: {err.strerror}"
self.errors.append(msg)
return jobs
def get(self):
"""get all successful results, appending errors into self.errors"""
return self._get(False)
def get_decode(self):
"""
get all successful results, appending errors into self.errors. Decode
special data into Python data structures
"""
return self._get(True)
class JobKVSLookup:
"""User friendly class to lookup job KVS data
:flux_handle: A Flux handle obtained from flux.Flux()
:ids: List of jobids to get data for
:keys: Optional list of keys to fetch. (default is "jobspec")
:decode: Optional flag to decode special data into Python data structures
currently decodes "jobspec" and "R" into dicts
(default True)
:original: For 'jobspec', return the original submitted jobspec
"""
def __init__(
self,
flux_handle,
ids=[],
keys=["jobspec"],
decode=True,
original=False,
):
self.handle = flux_handle
self.keys = list(keys)
self.ids = list(map(JobID, ids)) if ids else []
self.decode = decode
self.errors = []
self.jobspec_original, self.J_appended = _original_setup(self.keys, original)
def fetch_data(self):
"""Initiate the job info lookup to the Flux job-info module
JobKVSLookup.fetch_data() returns a JobKVSLookupFuture,
which will be fulfilled when the job data is available.
Once the Future has been fulfilled, a list of objects
can be obtained via JobKVSLookup.data(). If
JobKVSLookupFuture.errors is non-empty, then it will contain a
list of errors returned via the query.
"""
listids = JobKVSLookupFuture()
for jobid in self.ids:
listids.push(job_info_lookup(self.handle, jobid, self.keys))
return listids
def data(self):
"""Synchronously fetch a list of data responses
If the Future object returned by JobKVSLookup.fetch_data has
not yet been fulfilled (e.g. is_ready() returns False), then this call
may block. Otherwise, returns a list of responses for all job ids
returned.
"""
rpc = self.fetch_data()
if self.decode:
data = rpc.get_decode()
else:
data = rpc.get()
if hasattr(rpc, "errors"):
self.errors = rpc.errors
for job_data in data:
_original_update(
job_data, self.decode, self.jobspec_original, self.J_appended
)
return data |
field | # This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.util.enum import RichIntEnum
from indico.util.i18n import _
from indico.util.locators import locator_property
from indico.util.string import format_repr, text_to_repr
def _get_next_position(context):
"""Get the next contribution field position for the event."""
event_id = context.current_parameters['event_id']
res = db.session.query(db.func.max(ContributionField.position)).filter(ContributionField.event_id == event_id).one()
return (res[0] or 0) + 1
class ContributionFieldVisibility(RichIntEnum):
__titles__ = [None, _('Everyone'), _('Managers and submitters'), _('Only managers')]
__css_classes__ = [None, 'public', 'submitters', 'managers']
public = 1
managers_and_submitters = 2
managers_only = 3
class ContributionField(db.Model):
__tablename__ = 'contribution_fields'
__table_args__ = (db.UniqueConstraint('event_id', 'legacy_id'),
{'schema': 'events'})
id = db.Column(
db.Integer,
primary_key=True
)
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
index=True,
nullable=False
)
legacy_id = db.Column(
db.String,
nullable=True
)
position = db.Column(
db.Integer,
nullable=False,
default=_get_next_position
)
title = db.Column(
db.String,
nullable=False
)
description = db.Column(
db.Text,
nullable=False,
default=''
)
is_required = db.Column(
db.Boolean,
nullable=False,
default=False
)
is_active = db.Column(
db.Boolean,
nullable=False,
default=True
)
is_user_editable = db.Column(
db.Boolean,
nullable=False,
default=True
)
visibility = db.Column(
PyIntEnum(ContributionFieldVisibility),
nullable=False,
default=ContributionFieldVisibility.public
)
field_type = db.Column(
db.String,
nullable=True
)
field_data = db.Column(
JSONB,
nullable=False,
default={}
)
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'contribution_fields',
order_by=position,
cascade='all, delete-orphan',
lazy='dynamic'
)
)
# relationship backrefs:
# - abstract_values (AbstractFieldValue.contribution_field)
# - contribution_values (ContributionFieldValue.contribution_field)
def _get_field(self, management=False):
from indico.modules.events.contributions import get_contrib_field_types
try:
impl = get_contrib_field_types()[self.field_type]
except KeyError:
return None
return impl(self, management=management)
@property
def METHOD_NAME(self):
return self._get_field()
@property
def mgmt_field(self):
return self._get_field(management=True)
@property
def is_public(self):
return self.visibility == ContributionFieldVisibility.public
@property
def filter_choices(self):
no_value = {None: _('No value')}
return no_value | {x['id']: x['option'] for x in self.field_data.get('options', {})}
def __repr__(self):
return format_repr(self, 'id', 'field_type', is_required=False, is_active=True, _text=self.title)
@locator_property
def locator(self):
return dict(self.event.locator, contrib_field_id=self.id)
class ContributionFieldValueBase(db.Model):
__abstract__ = True
#: The name of the backref on the `ContributionField`
contribution_field_backref_name = None
data = db.Column(
JSONB,
nullable=False
)
@declared_attr
def contribution_field_id(cls):
return db.Column(
db.Integer,
db.ForeignKey('events.contribution_fields.id', name=f'fk_{cls.__tablename__}_contribution_field'),
primary_key=True,
index=True
)
@declared_attr
def contribution_field(cls):
return db.relationship(
'ContributionField',
lazy=False,
backref=db.backref(
cls.contribution_field_backref_name,
cascade='all, delete-orphan',
lazy=True
)
)
@property
def friendly_data(self):
return self.contribution_field.METHOD_NAME.get_friendly_value(self.data)
class ContributionFieldValue(ContributionFieldValueBase):
__tablename__ = 'contribution_field_values'
__table_args__ = {'schema': 'events'}
contribution_field_backref_name = 'contribution_values'
contribution_id = db.Column(
db.Integer,
db.ForeignKey('events.contributions.id'),
index=True,
nullable=False,
primary_key=True
)
# relationship backrefs:
# - contribution (Contribution.field_values)
def __repr__(self):
text = text_to_repr(self.data) if isinstance(self.data, str) else self.data
return format_repr(self, 'contribution_id', 'contribution_field_id', _text=text) |
set value | # -*- coding: utf-8 -*-
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# this file implements the ServerWrapper class, which takes care
# of all the load and save functions for misc tables associated
# with a server (such as packages, hardware, history)
#
# the server.Server class inherits this ServerWrapper class
#
from .server_hardware import Hardware
from .server_packages import Packages
from .server_history import History
from .server_suse import SuseData
from rhn.UserDictCase import UserDictCase
from spacewalk.server import rhnSQL
class ServerWrapper(Packages, Hardware, History, SuseData):
""" This is a middle class that ties all the subclasses together, plus it
provides a cleaner way to keep all the wrapper functions in one place.
The main Server class is based on this one and it looks a little bit
cleaner that way.
"""
def __init__(self):
self.server = UserDictCase()
Packages.__init__(self)
History.__init__(self)
Hardware.__init__(self)
SuseData.__init__(self)
def __repr__(self):
return "<%s instance>" % (self.__class__,)
def METHOD_NAME(self, name, value):
""" update a value in self.server """
if name is None or value is None:
return -1
self.server[name] = value
return 0
###
# PACKAGES
###
def add_package(self, entry):
""" Wrappers for the similar functions from Packages class that supplementaly
require a valid sysid.
"""
return Packages.add_package(self, self.server.get("id"), entry)
def delete_package(self, entry):
return Packages.delete_package(self, self.server.get("id"), entry)
def dispose_packages(self):
return Packages.dispose_packages(self, self.server["id"])
def save_packages(self, schedule=1):
""" wrapper for the Packages.save_packages_byid() which requires the sysid """
ret = self.save_packages_byid(self.server["id"], schedule=schedule)
# this function is primarily called from outside
# so we have to commit here
rhnSQL.commit()
return ret
###
# HARDWARE
###
def delete_hardware(self):
""" Wrappers for the similar functions from Hardware class """
return Hardware.delete_hardware(self, self.server.get("id"))
def save_hardware(self):
""" wrapper for the Hardware.save_hardware_byid() which requires the sysid """
ret = self.save_hardware_byid(self.server["id"])
# this function is primarily called from outside
# so we have to commit here
rhnSQL.commit()
return ret
def reload_hardware(self):
""" wrapper for the Hardware.reload_hardware_byid() which requires the sysid """
ret = self.reload_hardware_byid(self.server["id"])
return ret
###
# HISTORY
###
def save_history(self):
ret = self.save_history_byid(self.server["id"])
# this function is primarily called from outside
# so we have to commit here
rhnSQL.commit()
return ret
###
### SUSE PRODUCT DATA
###
def save_suse_products(self):
ret = self.save_suse_products_byid(self.server["id"])
rhnSQL.commit()
return ret
def update_suse_products(self, products):
self.add_suse_products(products)
return self.save_suse_products() |
dump anchor |
# Copyright 2008 Johannes Reinhardt <[email protected]>
# Copyright 2012-2020 Jaap Karssenberg <[email protected]>
'''This modules handles export of LaTeX Code'''
import os
import re
import string
import logging
from zim.newfs import FilePath
from zim.formats import *
from zim.formats.plain import Dumper as TextDumper
from zim.parsing import url_encode, URL_ENCODE_READABLE
from zim.config.dicts import Choice
logger = logging.getLogger('zim.formats.latex')
info = {
'name': 'latex',
'desc': 'LaTeX',
'mimetype': 'application/x-tex',
'extension': 'tex',
'native': False,
'import': False,
'export': True,
'usebase': True,
}
encode_re = re.compile(r'([&$^%#_\\<>\n])')
encode_dict = {
'\\': '$\\backslash$',
'&': '\\$',
'$': '\\$ ',
'^': '\\^{}',
'%': '\\%',
'#': '\\# ',
'_': '\\_',
'>': '\\textgreater{}',
'<': '\\textless{}',
'\n': '\n\n',
}
class Dumper(TextDumper):
BULLETS = {
UNCHECKED_BOX: '\\item[\\Square]',
XCHECKED_BOX: '\\item[\\XBox]',
CHECKED_BOX: '\\item[\\CheckedBox]',
MIGRATED_BOX: '\\item[\\RIGHTarrow]',
TRANSMIGRATED_BOX: '\\item[\\LEFTarrow]',
BULLET: '\\item',
}
SECTIONING = {
'report': {
1: '\\chapter{%s}',
2: '\\section{%s}',
3: '\\subsection{%s}',
4: '\\subsubsection{%s}',
5: '\\paragraph{%s}'
},
'article': {
1: '\\section{%s}',
2: '\\subsection{%s}',
3: '\\subsubsection{%s}',
4: '\\paragraph{%s}',
5: '\\subparagraph{%s}'
},
'book': {
1: '\\part{%s}',
2: '\\chapter{%s}',
3: '\\section{%s}',
4: '\\subsection{%s}',
5: '\\subsubsection{%s}'
}
}
TAGS = {
EMPHASIS: ('\\emph{', '}'),
STRONG: ('\\textbf{', '}'),
MARK: ('\\uline{', '}'),
STRIKE: ('\\sout{', '}'),
TAG: ('', ''), # No additional annotation (apart from the visible @)
SUBSCRIPT: ('$_{', '}$'),
SUPERSCRIPT: ('$^{', '}$'),
}
TEMPLATE_OPTIONS = {
'document_type': Choice('report', ('report', 'article', 'book'))
}
def dump(self, tree):
assert isinstance(tree, ParseTree)
assert self.linker, 'LaTeX dumper needs a linker object'
self.document_type = self.template_options['document_type']
logger.info('used document type: %s' % self.document_type)
return TextDumper.dump(self, tree)
@staticmethod
def encode_text(tag, text):
if tag not in (VERBATIM_BLOCK, VERBATIM, OBJECT):
return encode_re.sub(lambda m: encode_dict[m.group(1)], text)
else:
return text
def dump_pre(self, tag, attrib, strings):
indent = int(attrib.get('indent', 0))
text = ''.join(strings)
text = text.replace('\n\n', '\n') # remove newlines introduces by encode_text
strings = text.splitlines(True)
if indent:
strings = self.prefix_lines(' ' * indent, strings)
strings.insert(0, '\n\\begin{lstlisting}\n')
strings.append('\n\\end{lstlisting}\n')
return strings
def dump_h(self, tag, attrib, strings):
level = int(attrib['level'])
if level < 1:
level = 1
elif level > 5:
level = 5
text = ''.join(strings).strip('\n')
return [self.SECTIONING[self.document_type][level] % text, '\n']
def _start_list(self):
if self.context[-1].tag == LISTITEM and \
self.context[-1].text and self.context[-1].text[-1].endswith('\n\n'):
# strip '\n' introduced by encode_text()
self.context[-1].text[-1] = self.context[-1].text[-1][:-1]
def dump_ul(self, tag, attrib, strings):
self._start_list()
strings.insert(0, '\\begin{itemize}\n')
strings.append('\\end{itemize}\n')
return TextDumper.dump_ul(self, tag, attrib, strings)
def dump_ol(self, tag, attrib, strings):
self._start_list()
start = attrib.get('start', 1)
if start in string.ascii_lowercase:
type = 'a'
start = string.ascii_lowercase.index(start) + 1
elif start in string.ascii_uppercase:
type = 'A'
start = string.ascii_uppercase.index(start) + 1
else:
type = '1'
start = int(start)
strings.insert(0, '\\begin{enumerate}[%s]\n' % type)
if start > 1:
strings.insert(1, '\setcounter{enumi}{%i}\n' % (start - 1))
strings.append('\\end{enumerate}\n')
return TextDumper.dump_ol(self, tag, attrib, strings)
def dump_li(self, tag, attrib, strings):
# Always return "\item" for numbered lists
if self.context[-1].tag == BULLETLIST:
if 'bullet' in attrib \
and attrib['bullet'] in self.BULLETS:
bullet = self.BULLETS[attrib['bullet']]
else:
bullet = self.BULLETS[BULLET]
elif self.context[-1].tag == NUMBEREDLIST:
bullet = self.BULLETS[BULLET]
else:
assert False, 'Unnested li element'
if strings[-1].endswith('\n\n'):
strings[-1] = strings[-1][:-1] # strip '\n' introduced by encode_text()
return (bullet, ' ') + tuple(strings)
def is_supported_image(self, path):
# Latex only supports limited image formats by default
# Whitelist pdf, png, jpg & eps -- all else should become a link
if '.' in path:
_, ext = path.rsplit('.', 1)
return ext.lower() in ('png', 'jpg', 'jpeg', 'eps', 'pdf')
else:
return False
def dump_img(self, tag, attrib, strings=None):
imagepath = self.linker.img(attrib['src'])
if not self.is_supported_image(imagepath):
attrib.setdefault('href', attrib['src'])
return self.dump_link(tag, attrib, strings)
# We try to get images about the same visual size,
# therefore need to specify dot density 96 dpi seems to be
# common for computer monitors
dpi = 96
if 'width' in attrib and not 'height' in attrib:
options = 'width=%fin, keepaspectratio=true' \
% (float(attrib['width']) / dpi)
elif 'height' in attrib and not 'width' in attrib:
options = 'height=%fin, keepaspectratio=true' \
% (float(attrib['height']) / dpi)
elif 'height' in attrib and 'width' in attrib:
options = 'height=%fin, width=%fin' \
% (float(attrib['height']) / dpi, float(attrib['width']) / dpi)
else:
options = ''
if imagepath.startswith('file://'):
try:
imagepath = FilePath(imagepath).path # avoid URIs here
except:
pass # e.g. non-locl uri, malformed path
image = '\\includegraphics[%s]{%s}' % (options, imagepath)
if 'href' in attrib:
href = self.linker.link(attrib['href'])
return ['\\href{%s}{%s}' % (href, image)]
else:
return [image]
def METHOD_NAME(self, tag, attrib, strings=None):
return ("\\label{", attrib['name'], "}")
def dump_link(self, tag, attrib, strings=None):
# TODO: how do you do page links within an exported document
# use \label{} per page start and \ref to link it ??
href = self.linker.link(attrib['href'])
if href.startswith('#'):
return ['\\ref{%s}' % href.lstrip('#')]
else:
href = url_encode(href, URL_ENCODE_READABLE)
if strings:
text = ''.join(strings)
else:
text = href
return ['\\href{%s}{%s}' % (href, text)]
def dump_code(self, tag, attrib, strings):
# Here we try several possible delimiters for the inline verb
# command of LaTeX
text = ''.join(strings)
for delim in '+*|$&%!-_':
if not delim in text:
return ['\\lstinline' + delim + text + delim]
else:
assert False, 'Found no suitable delimiter for verbatim text: %s' % element
dump_object_fallback = dump_pre
def dump_table(self, tag, attrib, strings):
table = [] # result table
rows = strings
aligns, _wraps = TableParser.get_options(attrib)
rowline = lambda row: '&'.join([' ' + cell + ' ' for cell in row]) + '\\tabularnewline\n\hline'
aligns = ['l' if a == 'left' else 'r' if a == 'right' else 'c' if a == 'center' else 'l' for a in aligns]
for i, row in enumerate(rows):
for j, (cell, align) in enumerate(zip(row, aligns)):
if '\n' in cell:
rows[i][j] = '\shortstack[' + align + ']{' + cell.replace("\n", "\\") + '}'
# print table
table.append('\\begin{tabular}{ |' + '|'.join(aligns) + '| }')
table.append('\hline')
table += [rowline(rows[0])]
table.append('\hline')
table += [rowline(row) for row in rows[1:]]
table.append('\end{tabular}')
return [line + "\n" for line in table]
def dump_line(self, tag, attrib, strings=None):
return '\n\\hrule\n' |
resolve products | from django.db.models import Exists, OuterRef, Sum
from ...channel.models import Channel
from ...order import OrderStatus
from ...order.models import Order
from ...permission.utils import has_one_of_permissions
from ...product import models
from ...product.models import ALL_PRODUCTS_PERMISSIONS
from ..channel import ChannelQsContext
from ..core import ResolveInfo
from ..core.context import get_database_connection_name
from ..core.tracing import traced_resolver
from ..core.utils import from_global_id_or_error
from ..utils import get_user_or_app_from_context
from ..utils.filters import filter_by_period
def resolve_category_by_id(id):
return models.Category.objects.filter(pk=id).first()
def resolve_category_by_slug(slug):
return models.Category.objects.filter(slug=slug).first()
def resolve_categories(_info: ResolveInfo, level=None):
qs = models.Category.objects.prefetch_related("children")
if level is not None:
qs = qs.filter(level=level)
return qs.distinct()
def resolve_collection_by_id(_info: ResolveInfo, id, channel_slug, requestor):
return (
models.Collection.objects.visible_to_user(requestor, channel_slug=channel_slug)
.filter(id=id)
.first()
)
def resolve_collection_by_slug(_info: ResolveInfo, slug, channel_slug, requestor):
return (
models.Collection.objects.visible_to_user(requestor, channel_slug)
.filter(slug=slug)
.first()
)
def resolve_collections(info: ResolveInfo, channel_slug):
requestor = get_user_or_app_from_context(info.context)
qs = models.Collection.objects.visible_to_user(requestor, channel_slug)
return ChannelQsContext(qs=qs, channel_slug=channel_slug)
def resolve_digital_content_by_id(id):
return models.DigitalContent.objects.filter(pk=id).first()
def resolve_digital_contents(_info: ResolveInfo):
return models.DigitalContent.objects.all()
def resolve_product(
info: ResolveInfo, id, slug, external_reference, channel_slug, requestor
):
database_connection_name = get_database_connection_name(info.context)
qs = models.Product.objects.using(database_connection_name).visible_to_user(
requestor, channel_slug=channel_slug
)
if id:
_type, id = from_global_id_or_error(id, "Product")
return qs.filter(id=id).first()
elif slug:
return qs.filter(slug=slug).first()
else:
return qs.filter(external_reference=external_reference).first()
@traced_resolver
def METHOD_NAME(
info: ResolveInfo, requestor, channel_slug=None
) -> ChannelQsContext:
database_connection_name = get_database_connection_name(info.context)
qs = (
models.Product.objects.all()
.using(database_connection_name)
.visible_to_user(requestor, channel_slug)
)
if not has_one_of_permissions(requestor, ALL_PRODUCTS_PERMISSIONS):
channels = Channel.objects.filter(slug=str(channel_slug))
product_channel_listings = models.ProductChannelListing.objects.filter(
Exists(channels.filter(pk=OuterRef("channel_id"))),
visible_in_listings=True,
)
qs = qs.filter(
Exists(product_channel_listings.filter(product_id=OuterRef("pk")))
)
return ChannelQsContext(qs=qs, channel_slug=channel_slug)
def resolve_product_type_by_id(id):
return models.ProductType.objects.filter(pk=id).first()
def resolve_product_types(_info: ResolveInfo):
return models.ProductType.objects.all()
@traced_resolver
def resolve_variant(
_info: ResolveInfo,
id,
sku,
external_reference,
*,
channel_slug,
requestor,
requestor_has_access_to_all
):
visible_products = models.Product.objects.visible_to_user(
requestor, channel_slug
).values_list("pk", flat=True)
qs = models.ProductVariant.objects.filter(product__id__in=visible_products)
if not requestor_has_access_to_all:
qs = qs.available_in_channel(channel_slug)
if id:
_, id = from_global_id_or_error(id, "ProductVariant")
return qs.filter(pk=id).first()
elif sku:
return qs.filter(sku=sku).first()
else:
return qs.filter(external_reference=external_reference).first()
@traced_resolver
def resolve_product_variants(
_info: ResolveInfo,
requestor_has_access_to_all,
requestor,
ids=None,
channel_slug=None,
) -> ChannelQsContext:
visible_products = models.Product.objects.visible_to_user(requestor, channel_slug)
qs = models.ProductVariant.objects.filter(product__id__in=visible_products)
if not requestor_has_access_to_all:
visible_products = visible_products.annotate_visible_in_listings(
channel_slug
).exclude(visible_in_listings=False)
qs = qs.filter(product__in=visible_products).available_in_channel(channel_slug)
if ids:
db_ids = [
from_global_id_or_error(node_id, "ProductVariant")[1] for node_id in ids
]
qs = qs.filter(pk__in=db_ids)
return ChannelQsContext(qs=qs, channel_slug=channel_slug)
def resolve_report_product_sales(period, channel_slug) -> ChannelQsContext:
qs = models.ProductVariant.objects.all()
# filter by period
qs = filter_by_period(qs, period, "order_lines__order__created_at")
# annotate quantity
qs = qs.annotate(quantity_ordered=Sum("order_lines__quantity"))
# filter by channel and order status
channels = Channel.objects.filter(slug=channel_slug).values("pk")
exclude_status = [OrderStatus.DRAFT, OrderStatus.CANCELED, OrderStatus.EXPIRED]
orders = Order.objects.exclude(status__in=exclude_status).filter(
Exists(channels.filter(pk=OuterRef("channel_id")).values("pk"))
)
qs = qs.filter(
Exists(orders.filter(pk=OuterRef("order_lines__order_id"))),
quantity_ordered__isnull=False,
)
# order by quantity ordered
qs = qs.order_by("-quantity_ordered")
return ChannelQsContext(qs=qs, channel_slug=channel_slug) |
test fixed policy batched on nested observations | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test for tf_agents.policies.fixed_policy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.policies import fixed_policy
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
from tf_agents.utils import test_utils
class FixedPolicyTest(test_utils.TestCase):
def setUp(self):
super(FixedPolicyTest, self).setUp()
# Creates an MDP with:
# - dim(observation) = 2
# - number of actions = 4
self._obs_spec = tensor_spec.TensorSpec([2], tf.float32)
self._time_step_spec = ts.time_step_spec(self._obs_spec)
self._num_actions = 4
self._action_spec = tensor_spec.BoundedTensorSpec(
shape=(1,), dtype=tf.int32, minimum=0, maximum=self._num_actions - 1
)
# The policy always outputs the same action.
self._fixed_action = 1
self._policy = fixed_policy.FixedPolicy(
np.asarray([self._fixed_action], dtype=np.int32),
self._time_step_spec,
self._action_spec,
)
def testFixedPolicySingle(self):
observations = tf.constant([1, 2], dtype=tf.float32)
time_step = ts.restart(observations)
action_step = self._policy.action(time_step)
distribution_step = self._policy.distribution(time_step)
mode = distribution_step.action.mode()
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(self.evaluate(action_step.action), [self._fixed_action])
self.assertAllEqual(self.evaluate(mode), [self._fixed_action])
def testFixedPolicyBatched(self):
batch_size = 2
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_step = ts.restart(observations, batch_size=batch_size)
action_step = self._policy.action(time_step)
distribution_step = self._policy.distribution(time_step)
mode = distribution_step.action.mode()
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(
self.evaluate(action_step.action), [[self._fixed_action]] * batch_size
)
self.assertAllEqual(
self.evaluate(mode), [[self._fixed_action]] * batch_size
)
def METHOD_NAME(self):
batch_size = 2
observations = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
time_step = ts.restart(observations, batch_size=batch_size)
action_spec = (
tensor_spec.TensorSpec(shape=(2,), dtype=tf.float32),
(
tensor_spec.TensorSpec(shape=(1,), dtype=tf.int64),
{'dict': tensor_spec.TensorSpec(shape=(), dtype=tf.int32)},
),
)
fixed_action = (
np.array([100, 200], dtype=np.float32),
(np.array([300], dtype=np.int64), {'dict': 400}),
)
policy = fixed_policy.FixedPolicy(
fixed_action, self._time_step_spec, action_spec
) # pytype: disable=wrong-arg-types
action = policy.action(time_step).action
distribution_mode = tf.nest.map_structure(
lambda t: t.mode(), policy.distribution(time_step).action
)
self.evaluate(tf.compat.v1.global_variables_initializer())
expected = (
tf.constant([[100, 200]] * batch_size, dtype=tf.float32),
(
tf.constant([[300]] * batch_size, dtype=tf.int64),
{'dict': tf.constant([400] * batch_size, dtype=tf.int32)},
),
)
tf.nest.map_structure(self.assertAllEqual, action, expected)
tf.nest.map_structure(self.assertAllEqual, distribution_mode, expected)
if __name__ == '__main__':
tf.test.main() |
get data module | import json
import math
import random
from functools import partial
from typing import List
import sentencepiece as spm
import torch
import torchaudio
from data_module import LibriSpeechDataModule
from lightning import Batch
_decibel = 2 * 20 * math.log10(torch.iinfo(torch.int16).max)
_gain = pow(10, 0.05 * _decibel)
_spectrogram_transform = torchaudio.transforms.MelSpectrogram(sample_rate=16000, n_fft=400, n_mels=80, hop_length=160)
random.seed(999)
def _piecewise_linear_log(x):
x = x * _gain
x[x > math.e] = torch.log(x[x > math.e])
x[x <= math.e] = x[x <= math.e] / math.e
return x
class FunctionalModule(torch.nn.Module):
def __init__(self, functional):
super().__init__()
self.functional = functional
def forward(self, input):
return self.functional(input)
class GlobalStatsNormalization(torch.nn.Module):
def __init__(self, global_stats_path):
super().__init__()
with open(global_stats_path) as f:
blob = json.loads(f.read())
self.mean = torch.tensor(blob["mean"])
self.invstddev = torch.tensor(blob["invstddev"])
def forward(self, input):
return (input - self.mean) * self.invstddev
def _extract_labels(sp_model, samples: List):
targets = [sp_model.encode(sample[2].lower()) for sample in samples]
biasingwords = []
for sample in samples:
for word in sample[6]:
if word not in biasingwords:
biasingwords.append(word)
lengths = torch.tensor([len(elem) for elem in targets]).to(dtype=torch.int32)
targets = torch.nn.utils.rnn.pad_sequence(
[torch.tensor(elem) for elem in targets],
batch_first=True,
padding_value=1.0,
).to(dtype=torch.int32)
return targets, lengths, biasingwords
def _extract_features(data_pipeline, samples: List):
mel_features = [_spectrogram_transform(sample[0].squeeze()).transpose(1, 0) for sample in samples]
features = torch.nn.utils.rnn.pad_sequence(mel_features, batch_first=True)
features = data_pipeline(features)
lengths = torch.tensor([elem.shape[0] for elem in mel_features], dtype=torch.int32)
return features, lengths
def _extract_tries(sp_model, biasingwords, blist, droprate, maxsize):
if len(biasingwords) > 0 and droprate > 0:
biasingwords = random.sample(biasingwords, k=int(len(biasingwords) * (1 - droprate)))
if len(biasingwords) < maxsize:
distractors = random.sample(blist, k=max(0, maxsize - len(biasingwords)))
for word in distractors:
if word not in biasingwords:
biasingwords.append(word)
biasingwords = [sp_model.encode(word.lower()) for word in biasingwords]
biasingwords = sorted(biasingwords)
worddict = {tuple(word): i + 1 for i, word in enumerate(biasingwords)}
lextree = make_lexical_tree(worddict, -1)
return lextree, biasingwords
def make_lexical_tree(word_dict, word_unk):
"""Make a prefix tree"""
# node [dict(subword_id -> node), word_id, word_set[start-1, end]]
root = [{}, -1, None]
for w, wid in word_dict.items():
if wid > 0 and wid != word_unk:
succ = root[0]
for i, cid in enumerate(w):
if cid not in succ:
succ[cid] = [{}, -1, (wid - 1, wid)]
else:
prev = succ[cid][2]
succ[cid][2] = (min(prev[0], wid - 1), max(prev[1], wid))
if i == len(w) - 1:
succ[cid][1] = wid
succ = succ[cid][0]
return root
class TrainTransform:
def __init__(self, global_stats_path: str, sp_model_path: str, blist: List[str], droprate: float, maxsize: int):
self.sp_model = spm.SentencePieceProcessor(model_file=sp_model_path)
self.train_data_pipeline = torch.nn.Sequential(
FunctionalModule(_piecewise_linear_log),
GlobalStatsNormalization(global_stats_path),
FunctionalModule(partial(torch.transpose, dim0=1, dim1=2)),
torchaudio.transforms.FrequencyMasking(27),
torchaudio.transforms.FrequencyMasking(27),
torchaudio.transforms.TimeMasking(100, p=0.2),
torchaudio.transforms.TimeMasking(100, p=0.2),
FunctionalModule(partial(torch.transpose, dim0=1, dim1=2)),
)
self.blist = blist
self.droprate = droprate
self.maxsize = maxsize
def __call__(self, samples: List):
features, feature_lengths = _extract_features(self.train_data_pipeline, samples)
targets, target_lengths, biasingwords = _extract_labels(self.sp_model, samples)
tries, biasingwords = _extract_tries(self.sp_model, biasingwords, self.blist, self.droprate, self.maxsize)
return Batch(features, feature_lengths, targets, target_lengths, tries)
class ValTransform:
def __init__(self, global_stats_path: str, sp_model_path: str, blist: List[str], droprate: float, maxsize: int):
self.sp_model = spm.SentencePieceProcessor(model_file=sp_model_path)
self.valid_data_pipeline = torch.nn.Sequential(
FunctionalModule(_piecewise_linear_log),
GlobalStatsNormalization(global_stats_path),
)
self.blist = blist
self.droprate = droprate
self.maxsize = maxsize
def __call__(self, samples: List):
features, feature_lengths = _extract_features(self.valid_data_pipeline, samples)
targets, target_lengths, biasingwords = _extract_labels(self.sp_model, samples)
if self.blist:
tries, biasingwords = _extract_tries(self.sp_model, biasingwords, self.blist, self.droprate, self.maxsize)
else:
tries = []
return Batch(features, feature_lengths, targets, target_lengths, tries)
class TestTransform:
def __init__(self, global_stats_path: str, sp_model_path: str, blist: List[str], droprate: float, maxsize: int):
self.val_transforms = ValTransform(global_stats_path, sp_model_path, blist, droprate, maxsize)
def __call__(self, sample):
return self.val_transforms([sample]), [sample]
def METHOD_NAME(
librispeech_path, global_stats_path, sp_model_path, subset=None, biasinglist=None, droprate=0.0, maxsize=1000
):
fullbiasinglist = []
if biasinglist:
with open(biasinglist) as fin:
fullbiasinglist = [line.strip() for line in fin]
train_transform = TrainTransform(
global_stats_path=global_stats_path,
sp_model_path=sp_model_path,
blist=fullbiasinglist,
droprate=droprate,
maxsize=maxsize,
)
val_transform = ValTransform(
global_stats_path=global_stats_path,
sp_model_path=sp_model_path,
blist=fullbiasinglist,
droprate=droprate,
maxsize=maxsize,
)
test_transform = TestTransform(
global_stats_path=global_stats_path,
sp_model_path=sp_model_path,
blist=fullbiasinglist,
droprate=droprate,
maxsize=maxsize,
)
return LibriSpeechDataModule(
librispeech_path=librispeech_path,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
subset=subset,
fullbiasinglist=fullbiasinglist,
) |
guess content type | from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def METHOD_NAME(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
else:
return result
if not six.PY3 and isinstance(value, six.text_type): # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = METHOD_NAME(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value is not None:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location |
get historic instrument order from order id | import datetime
from syscore.constants import arg_not_supplied
from sysexecution.orders.named_order_objects import missing_order, no_parent
from sysdata.mongodb.mongo_order_stack import (
mongoInstrumentOrderStackData,
mongoContractOrderStackData,
mongoBrokerOrderStackData,
)
from sysdata.mongodb.mongo_historic_orders import (
mongoStrategyHistoricOrdersData,
mongoContractHistoricOrdersData,
mongoBrokerHistoricOrdersData,
)
from sysdata.production.historic_orders import (
brokerHistoricOrdersData,
contractHistoricOrdersData,
strategyHistoricOrdersData,
)
from sysdata.data_blob import dataBlob
from sysobjects.fills import ListOfFills
from sysexecution.order_stacks.broker_order_stack import brokerOrderStackData
from sysexecution.order_stacks.contract_order_stack import contractOrderStackData
from sysexecution.order_stacks.instrument_order_stack import instrumentOrderStackData
from sysexecution.orders.contract_orders import contractOrder
from sysexecution.orders.broker_orders import (
brokerOrder,
brokerOrderWithParentInformation,
)
from sysexecution.orders.instrument_orders import instrumentOrder
from sysexecution.orders.list_of_orders import listOfOrders
from sysobjects.production.tradeable_object import instrumentStrategy, futuresContract
class dataOrders(object):
def __init__(self, data: dataBlob = arg_not_supplied):
# Check data has the right elements to do this
if data is arg_not_supplied:
data = dataBlob()
data.add_class_list(
[
mongoInstrumentOrderStackData,
mongoContractOrderStackData,
mongoBrokerOrderStackData,
mongoContractHistoricOrdersData,
mongoStrategyHistoricOrdersData,
mongoBrokerHistoricOrdersData,
]
)
self._data = data
@property
def data(self) -> dataBlob:
return self._data
@property
def db_strategy_historic_orders_data(self) -> strategyHistoricOrdersData:
return self.data.db_strategy_historic_orders
@property
def db_contract_historic_orders_data(self) -> contractHistoricOrdersData:
return self.data.db_contract_historic_orders
@property
def db_broker_historic_orders_data(self) -> brokerHistoricOrdersData:
return self.data.db_broker_historic_orders
@property
def db_instrument_stack_data(self) -> instrumentOrderStackData:
return self.data.db_instrument_order_stack
@property
def db_contract_stack_data(self) -> contractOrderStackData:
return self.data.db_contract_order_stack
@property
def db_broker_stack_data(self) -> brokerOrderStackData:
return self.data.db_broker_order_stack
def add_historic_orders_to_data(
self,
instrument_order: instrumentOrder,
list_of_contract_orders: listOfOrders,
list_of_broker_orders: listOfOrders,
):
self.add_historic_instrument_order_to_data(instrument_order)
for contract_order in list_of_contract_orders:
self.add_historic_contract_order_to_data(contract_order)
for broker_order in list_of_broker_orders:
self.add_historic_broker_order_to_data(broker_order)
def add_historic_instrument_order_to_data(self, instrument_order: instrumentOrder):
self.db_strategy_historic_orders_data.add_order_to_data(instrument_order)
def add_historic_contract_order_to_data(self, contract_order: contractOrder):
self.db_contract_historic_orders_data.add_order_to_data(contract_order)
def add_historic_broker_order_to_data(self, broker_order: brokerOrder):
self.db_broker_historic_orders_data.add_order_to_data(broker_order)
def get_historic_broker_order_ids_in_date_range(
self,
period_start: datetime.datetime,
period_end: datetime.datetime = arg_not_supplied,
) -> list:
# remove split orders
order_id_list = (
self.db_broker_historic_orders_data.get_list_of_order_ids_in_date_range(
period_start, period_end=period_end
)
)
return order_id_list
def get_historic_contract_order_ids_in_date_range(
self, period_start: datetime.datetime, period_end: datetime.datetime
) -> list:
order_id_list = (
self.db_contract_historic_orders_data.get_list_of_order_ids_in_date_range(
period_start, period_end
)
)
return order_id_list
def get_historic_instrument_order_ids_in_date_range(
self, period_start: datetime.datetime, period_end: datetime.datetime
) -> list:
order_id_list = (
self.db_strategy_historic_orders_data.get_list_of_order_ids_in_date_range(
period_start, period_end
)
)
return order_id_list
def METHOD_NAME(
self, order_id: int
) -> instrumentOrder:
order = self.db_strategy_historic_orders_data.get_order_with_orderid(order_id)
return order
def get_historic_contract_order_from_order_id(self, order_id: int) -> contractOrder:
order = self.db_contract_historic_orders_data.get_order_with_orderid(order_id)
return order
def get_historic_broker_order_from_order_id(self, order_id: int) -> brokerOrder:
order = self.db_broker_historic_orders_data.get_order_with_orderid(order_id)
return order
def get_fills_history_for_contract(
self, futures_contract: futuresContract
) -> ListOfFills:
## We get this from broker fills, as they have leg by leg information
list_of_fills = (
self.db_broker_historic_orders_data.get_fills_history_for_contract(
futures_contract
)
)
return list_of_fills
def get_fills_history_for_instrument_strategy(
self, instrument_strategy: instrumentStrategy
) -> ListOfFills:
list_of_fills = self.db_strategy_historic_orders_data.get_fills_history_for_instrument_strategy(
instrument_strategy
)
return list_of_fills
def get_historic_broker_order_from_order_id_with_execution_data(
self, order_id: int
) -> brokerOrderWithParentInformation:
order = self.get_historic_broker_order_from_order_id(order_id)
contract_order = self.get_parent_contract_order_for_historic_broker_order_id(
order_id
)
instrument_order = (
self.get_parent_instrument_order_for_historic_broker_order_id(order_id)
)
augmented_order = brokerOrderWithParentInformation.create_augemented_order(
order, contract_order=contract_order, instrument_order=instrument_order
)
return augmented_order
def get_parent_contract_order_for_historic_broker_order_id(
self, order_id: int
) -> contractOrder:
broker_order = self.get_historic_broker_order_from_order_id(order_id)
contract_order_id = broker_order.parent
if contract_order_id is no_parent:
return missing_order
contract_order = self.get_historic_contract_order_from_order_id(
contract_order_id
)
return contract_order
def get_parent_instrument_order_for_historic_broker_order_id(
self, order_id: int
) -> instrumentOrder:
contract_order = self.get_parent_contract_order_for_historic_broker_order_id(
order_id
)
if contract_order is missing_order:
return missing_order
instrument_order_id = contract_order.parent
if instrument_order_id is no_parent:
return missing_order
instrument_order = self.METHOD_NAME(
instrument_order_id
)
return instrument_order |
set location | import asyncio
import json
import logging
from homematicip.aio.class_maps import (
TYPE_CLASS_MAP,
TYPE_GROUP_MAP,
TYPE_RULE_MAP,
TYPE_SECURITY_EVENT_MAP,
)
from homematicip.aio.connection import AsyncConnection
from homematicip.aio.securityEvent import AsyncSecurityEvent
from homematicip.base.enums import *
from homematicip.home import Home, OAuthOTK
LOGGER = logging.getLogger(__name__)
class AsyncHome(Home):
"""this class represents the 'Async Home' of the homematic ip"""
_typeClassMap = TYPE_CLASS_MAP
_typeGroupMap = TYPE_GROUP_MAP
_typeSecurityEventMap = TYPE_SECURITY_EVENT_MAP
_typeRuleMap = TYPE_RULE_MAP
def __init__(self, loop, websession=None):
super().__init__(connection=AsyncConnection(loop, websession))
async def init(self, access_point_id, lookup=True):
await self._connection.init(access_point_id, lookup)
async def get_current_state(self, clearConfig: bool = False):
"""downloads the current configuration and parses it into self
Args:
clearConfig(bool): if set to true, this function will remove all old objects
from self.devices, self.client, ... to have a fresh config instead of reparsing them
"""
LOGGER.debug("get_current_state")
json_state = await self.download_configuration()
return self.update_home(json_state, clearConfig)
async def download_configuration(self):
return await self._connection.api_call(*super().download_configuration())
async def enable_events(self) -> asyncio.Task:
"""Connects to the websocket. Returns a listening task."""
return await self._connection.ws_connect(
on_message=self._ws_on_message, on_error=self._ws_on_error
)
async def disable_events(self):
await self._connection.close_websocket_connection()
async def get_OAuth_OTK(self):
token = OAuthOTK(self._connection)
token.from_json(await self._connection.api_call("home/getOAuthOTK"))
return token
async def activate_absence_with_duration(self, duration):
return await self._connection.api_call(
*super().activate_absence_with_duration(duration)
)
async def set_powermeter_unit_price(self, price):
return await self._connection.api_call(
*super().set_powermeter_unit_price(price)
)
async def set_intrusion_alert_through_smoke_detectors(self, activate=True):
return await self._connection.api_call(
*super().set_intrusion_alert_through_smoke_detectors(activate)
)
async def set_timezone(self, timezone):
return await self._connection.api_call(*super().set_timezone(timezone))
async def set_zones_device_assignment(self, internal_devices, external_devices):
return await self._connection.api_call(
*super().set_zones_device_assignment(internal_devices, internal_devices)
)
async def set_pin(self, newPin, oldPin=None):
if newPin is None:
newPin = ""
data = {"pin": newPin}
if oldPin:
self._connection.headers["PIN"] = str(oldPin)
result = await self._connection.api_call("home/setPin", body=json.dumps(data))
if oldPin:
del self._connection.headers["PIN"]
return result
async def get_security_journal(self):
journal = await self._connection.api_call(
"home/security/getSecurityJournal",
json.dumps(self._connection.clientCharacteristics),
)
if journal is None or "errorCode" in journal:
LOGGER.error(
"Could not get the security journal. Error: %s", journal["errorCode"]
)
return None
ret = []
for entry in journal["entries"]:
try:
eventType = SecurityEventType(entry["eventType"])
if eventType in self._typeSecurityEventMap:
j = self._typeSecurityEventMap[eventType](self._connection)
except:
j = AsyncSecurityEvent(self._connection)
LOGGER.warning("There is no class for %s yet", entry["eventType"])
j.from_json(entry)
ret.append(j)
return ret
async def activate_absence_with_period(self, endtime):
return await self._connection.api_call(
*super().activate_absence_with_period(endtime)
)
async def activate_absence_permanent(self):
return await self._connection.api_call(*super().activate_absence_permanent())
async def deactivate_absence(self):
return await self._connection.api_call(*super().deactivate_absence())
async def activate_vacation(self, endtime, temperature):
return await self._connection.api_call(
*super().activate_vacation(endtime, temperature)
)
async def deactivate_vacation(self):
return await self._connection.api_call(*super().deactivate_vacation())
async def set_zone_activation_delay(self, delay):
return await self._connection.api_call(
*super().set_zone_activation_delay(delay)
)
async def set_security_zones_activation(self, internal=True, external=True):
return await self._connection.api_call(
*super().set_security_zones_activation(internal, external)
)
async def delete_group(self, group):
return await group.delete()
async def METHOD_NAME(self, city, latitude, longitude):
return await self._connection.api_call(
*super().METHOD_NAME(city, latitude, longitude)
) |
loss | # Copyright The Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from lightning_utilities import module_available
from torch.utils.data import Dataset
if module_available("lightning"):
from lightning.pytorch import LightningModule
else:
from pytorch_lightning import LightningModule
class RandomDictStringDataset(Dataset):
"""Class for creating a dictionary of random strings."""
def __init__(self, size, length) -> None:
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index) -> dict:
"""Get datapoint."""
return {"id": str(index), "x": self.data[index]}
def __len__(self) -> int:
"""Return length of dataset."""
return self.len
class RandomDataset(Dataset):
"""Random dataset for testing PL Module."""
def __init__(self, size, length) -> None:
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index) -> torch.Tensor:
"""Get datapoint."""
return self.data[index]
def __len__(self) -> int:
"""Get length of dataset."""
return self.len
class BoringModel(LightningModule):
"""Testing PL Module.
Use as follows:
- subclass
- modify the behavior for what you want
class TestModel(BaseTestModel):
def training_step(...):
# do your own thing
or:
model = BaseTestModel()
model.training_epoch_end = None
"""
def __init__(self) -> None:
super().__init__()
self.layer = torch.nn.Linear(32, 2)
def forward(self, x):
"""Forward pass of x through model."""
return self.layer(x)
@staticmethod
def METHOD_NAME(_, prediction) -> torch.Tensor:
"""Arbitrary loss."""
return torch.nn.functional.mse_loss(prediction, torch.ones_like(prediction))
def step(self, x):
"""Single step in model."""
x = self(x)
return torch.nn.functional.mse_loss(x, torch.ones_like(x))
def training_step(self, batch, batch_idx):
"""Single training step in model."""
output = self.layer(batch)
METHOD_NAME = self.METHOD_NAME(batch, output)
return {"loss": METHOD_NAME}
def validation_step(self, batch, batch_idx):
"""Single validation step in the model."""
output = self.layer(batch)
METHOD_NAME = self.METHOD_NAME(batch, output)
return {"x": METHOD_NAME}
def test_step(self, batch, batch_idx):
"""Single test step in the model."""
output = self.layer(batch)
METHOD_NAME = self.METHOD_NAME(batch, output)
return {"y": METHOD_NAME}
def configure_optimizers(self):
"""Configure which optimizer to use when training the model."""
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
return {"optimizer": optimizer, "scheduler": lr_scheduler}
def train_dataloader(self):
"""Define train dataloader used for training the model."""
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def val_dataloader(self):
"""Define validation dataloader used for validating the model."""
return torch.utils.data.DataLoader(RandomDataset(32, 64))
def test_dataloader(self):
"""Define test dataloader used for testing the mdoel."""
return torch.utils.data.DataLoader(RandomDataset(32, 64)) |
append | # ../config/cvar.py
"""Provides ConVar functionality in configuration files."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Source.Python Imports
# Cvars
from cvars import ConVar
# Hooks
from hooks.exceptions import except_hooks
# Translations
from translations.strings import TranslationStrings
# =============================================================================
# >> CLASSES
# =============================================================================
class _CvarManager(dict):
"""Class used to store a cvar instance."""
def __init__(
self, name, default, description, flags, min_value, max_value):
"""Called on instantiation.
:param str name:
Name of the console variable.
:param object default:
A default value for the console variable. It will be converted to
a string.
:param str/TranslationStrings description:
A description of the console variable.
:param ConVarFlags flags:
Flags that should be used for the console variable.
:param float min_value:
Minimum value.
:param float max_value:
Maximum value.
"""
# Initialize the dictionary
super().__init__()
# Is the given description a TranslationStrings instance?
if isinstance(description, TranslationStrings):
# Store the description as the proper language string
description = description.get_string()
# Store the base attributes for the cvar
self._name = name
self._default = default
self._description = description
# Get the cvar instance
self._cvar = ConVar(
name, str(default), description, flags, min_value, max_value)
# Set the attribute to show the default value
self.show_default = True
# Store a list to iterate over description fields and text
self._order = list()
def __enter__(self):
"""Used when using "with" context management to create the cvar."""
return self
def __getattr__(self, attr):
"""Return cvar attributes or items in the instance."""
# Does the cvar instance have the given attribute?
if hasattr(self.cvar, attr):
# Return the attribute for the cvar instance
return getattr(self.cvar, attr)
# Return the item
return self.__getitem__(attr)
def __setattr__(self, attr, value):
"""Set cvar attributes."""
# Does the cvar instance have the given attribute?
if not attr.startswith('_') and hasattr(self.cvar, attr):
# Set the cvar's attribute and return
setattr(self.cvar, attr, value)
return
# Set the instance's attribute
super().__setattr__(attr, value)
def __missing__(self, item):
"""Create the item as a _ListManager instance.
:rtype: _ListManager
"""
# Get the _ListManager instance for the given item
value = self[item] = _ListManager(item)
# Add the _ListManager instance to the ordered list
self._order.METHOD_NAME(value)
# Return the _ListManager instance
return value
def __iter__(self):
"""Iterate over items in the ordered list."""
# Loop through items in the ordered list
for item in self._order:
# Is the current item a _ListManager instance?
if isinstance(item, _ListManager):
# Yield the item's name
yield item.name, 0
# Loop through each line in the _ListManager instance
for line in item:
# Yield the line
yield item.start + line, item.indent
# Is the current item not a _ListManager instance?
else:
# Yield the item
yield item, 0
def __exit__(self, exctype, value, trace_back):
"""Used when exiting "with" context management to create the cvar."""
# Was an exception raised?
if trace_back:
# Print the exception
except_hooks.print_exception(exctype, value, trace_back)
# Return
return False
# Return
return True
def __bool__(self):
"""Return the boolean value of the ConVar.
:rtype: bool
"""
return bool(self.cvar)
def __int__(self):
"""Return the integer value of the ConVar.
:rtype: int
"""
return int(self.cvar)
def __float__(self):
"""Return the floating point value of the ConVar.
:rtype: float
"""
return float(self.cvar)
def __str__(self):
"""Return the string value of the ConVar.
:rtype: str
"""
return str(self.cvar)
def text(self, text):
"""Add a text to the list.
:param str/TranslationStrings text:
The text to add.
"""
# Is the item a TranslationStrings instance?
if isinstance(text, TranslationStrings):
# Get the proper text for the given item
text = text.get_string()
# Add the text to the ordered list
self._order.METHOD_NAME(text)
@property
def name(self):
"""Return the cvar's name.
:rtype: str
"""
return self._name
@property
def default(self):
"""Return the cvar's default value.
:rtype: str
"""
return self._default
@property
def description(self):
"""Return the cvar's description.
:rtype: str
"""
return self._description
@property
def cvar(self):
"""Return the cvar's :class:`cvars.ConVar` instance.
:rtype: ConVar
"""
return self._cvar
class _ListManager(list):
"""List class used to store text for a specific descriptor of a cvar."""
def __init__(self, name):
"""Called on instantiation."""
# Initialize the list
super().__init__()
# Is the given name a TranslationStrings instance?
if isinstance(name, TranslationStrings):
# Get the proper text for the given name
name = name.get_string()
# Store the base attributes for the list
self._name = name
self.start = ' * '
self.indent = 9
@property
def name(self):
"""Return the name of the list.
:rtype: str
"""
return self._name
def METHOD_NAME(self, text):
"""Override append to add the proper text."""
# Is the item a TranslationStrings instance?
if isinstance(text, TranslationStrings):
# Get the proper text for the given item
text = text.get_string()
# Add the item to the list
super().METHOD_NAME(text) |
poisson residual | import pytest
from firedrake import *
from pyadjoint.tape import get_working_tape, pause_annotation
try:
from firedrake.ml.pytorch import *
import torch
import torch.nn.functional as torch_func
from torch.nn import Module, Flatten, Linear
class EncoderDecoder(Module):
"""Build a simple toy model"""
def __init__(self, n):
super(EncoderDecoder, self).__init__()
self.n = n
self.m = int(n/2)
self.flatten = Flatten()
self.linear_encoder = Linear(self.n, self.m)
self.linear_decoder = Linear(self.m, self.n)
def encode(self, x):
return torch_func.relu(self.linear_encoder(x))
def decode(self, x):
return torch_func.relu(self.linear_decoder(x))
def forward(self, x):
# [batch_size, n]
x = self.flatten(x)
# [batch_size, m]
hidden = self.encode(x)
# [batch_size, n]
return self.decode(hidden)
except ImportError:
# PyTorch is not installed
pass
@pytest.fixture(autouse=True)
def handle_taping():
yield
tape = get_working_tape()
tape.clear_tape()
@pytest.fixture(autouse=True, scope="module")
def handle_annotation():
from firedrake.adjoint import annotate_tape, continue_annotation
if not annotate_tape():
continue_annotation()
yield
# Ensure annotation is paused when we finish.
annotate = annotate_tape()
if annotate:
pause_annotation()
@pytest.fixture(scope="module")
def mesh():
return UnitSquareMesh(10, 10)
@pytest.fixture(scope="module")
def V(mesh):
return FunctionSpace(mesh, "CG", 1)
@pytest.fixture
def f_exact(V, mesh):
x, y = SpatialCoordinate(mesh)
return Function(V).interpolate(sin(pi * x) * sin(pi * y))
# Set of Firedrake operations that will be composed with PyTorch operations
def METHOD_NAME(u, f, V):
"""Assemble the residual of a Poisson problem"""
v = TestFunction(V)
F = (inner(grad(u), grad(v)) + inner(u, v) - inner(f, v)) * dx
return assemble(F)
# Set of Firedrake operations that will be composed with PyTorch operations
def solve_poisson(f, V):
"""Solve Poisson problem with homogeneous Dirichlet boundary conditions"""
u = Function(V)
v = TestFunction(V)
F = (inner(grad(u), grad(v)) + inner(u, v) - inner(f, v)) * dx
bcs = [DirichletBC(V, Constant(1.0), "on_boundary")]
# Solve PDE
solve(F == 0, u, bcs=bcs)
# Assemble Firedrake loss
return assemble(u ** 2 * dx)
@pytest.fixture(params=["poisson_residual", "solve_poisson"])
def firedrake_operator(request, f_exact, V):
# Return firedrake operator and the corresponding non-control arguments
if request.param == "poisson_residual":
return METHOD_NAME, (f_exact, V)
elif request.param == "solve_poisson":
return solve_poisson, (V,)
@pytest.mark.skipcomplex # Taping for complex-valued 0-forms not yet done
@pytest.mark.skiptorch # Skip if PyTorch is not installed
def test_pytorch_loss_backward(V, f_exact):
"""Test backpropagation through a vector-valued Firedrake operator"""
from firedrake.adjoint import ReducedFunctional, Control
# Instantiate model
model = EncoderDecoder(V.dim())
# Set double precision
model.double()
# Check that gradients are initially set to None
assert all([pi.grad is None for pi in model.parameters()])
# Convert f_exact to torch.Tensor
f_P = to_torch(f_exact)
# Forward pass
u_P = model(f_P)
# Set control
u = Function(V)
c = Control(u)
# Set reduced functional which expresses the Firedrake operations in terms of the control
Jhat = ReducedFunctional(METHOD_NAME(u, f_exact, V), c)
# Construct the torch operator that takes a callable representing the Firedrake operations
G = torch_operator(Jhat)
# Compute Poisson residual in Firedrake using the torch operator: `residual_P` is a torch.Tensor
residual_P = G(u_P)
# Compute PyTorch loss
loss = (residual_P ** 2).sum()
# -- Check backpropagation API -- #
loss.backward()
# Check that gradients were propagated to model parameters
# This test doesn't check the correctness of these gradients
# -> This is checked in `test_taylor_torch_operator`
assert all([pi.grad is not None for pi in model.parameters()])
# -- Check forward operator -- #
u = from_torch(u_P, V)
residual = METHOD_NAME(u, f_exact, V)
residual_P_exact = to_torch(residual)
assert (residual_P - residual_P_exact).detach().norm() < 1e-10
@pytest.mark.skipcomplex # Taping for complex-valued 0-forms not yet done
@pytest.mark.skiptorch # Skip if PyTorch is not installed
def test_firedrake_loss_backward(V):
"""Test backpropagation through a scalar-valued Firedrake operator"""
from firedrake.adjoint import ReducedFunctional, Control
# Instantiate model
model = EncoderDecoder(V.dim())
# Set double precision
model.double()
# Check that gradients are initially set to None
assert all([pi.grad is None for pi in model.parameters()])
# Model input
u = Function(V)
# Convert f to torch.Tensor
u_P = to_torch(u)
# Forward pass
f_P = model(u_P)
# Set control
f = Function(V)
c = Control(f)
# Set reduced functional which expresses the Firedrake operations in terms of the control
Jhat = ReducedFunctional(solve_poisson(f, V), c)
# Construct the torch operator that takes a callable representing the Firedrake operations
G = torch_operator(Jhat)
# Solve Poisson problem and compute the loss defined as the L2-norm of the solution
# -> `loss_P` is a torch.Tensor
loss_P = G(f_P)
# -- Check backpropagation API -- #
loss_P.backward()
# Check that gradients were propagated to model parameters
# This test doesn't check the correctness of these gradients
# -> This is checked in `test_taylor_torch_operator`
assert all([pi.grad is not None for pi in model.parameters()])
# -- Check forward operator -- #
f = from_torch(f_P, V)
loss = solve_poisson(f, V)
loss_P_exact = to_torch(loss)
assert (loss_P - loss_P_exact).detach().norm() < 1e-10
@pytest.mark.skipcomplex # Taping for complex-valued 0-forms not yet done
@pytest.mark.skiptorch # Skip if PyTorch is not installed
def test_taylor_torch_operator(firedrake_operator, V):
"""Taylor test for the torch operator"""
from firedrake.adjoint import ReducedFunctional, Control
# Control value
w = Function(V)
# Get Firedrake operator and other operator arguments
fd_op, args = firedrake_operator
# Set reduced functional
Jhat = ReducedFunctional(fd_op(w, *args), Control(w))
# Define the torch operator
G = torch_operator(Jhat)
# `gradcheck` is likely to fail if the inputs are not double precision (cf. https://pytorch.org/docs/stable/generated/torch.autograd.gradcheck.html)
x_P = torch.rand(V.dim(), dtype=torch.double, requires_grad=True)
# Taylor test (`eps` is the perturbation)
assert torch.autograd.gradcheck(G, x_P, eps=1e-6) |
fixed pooling monotonic attention | from functools import partial
import torch
from torch import Tensor
import math
import torch.nn.functional as F
from . import register_monotonic_attention
from .monotonic_multihead_attention import (
MonotonicAttention,
MonotonicInfiniteLookbackAttention,
WaitKAttention
)
from typing import Dict, Optional
def METHOD_NAME(monotonic_attention):
def create_model(monotonic_attention, klass):
class FixedStrideMonotonicAttention(monotonic_attention):
def __init__(self, args):
self.waitk_lagging = 0
self.num_heads = 0
self.noise_mean = 0.0
self.noise_var = 0.0
super().__init__(args)
self.pre_decision_type = args.fixed_pre_decision_type
self.pre_decision_ratio = args.fixed_pre_decision_ratio
self.pre_decision_pad_threshold = args.fixed_pre_decision_pad_threshold
assert self.pre_decision_ratio > 1
if args.fixed_pre_decision_type == "average":
self.pooling_layer = torch.nn.AvgPool1d(
kernel_size=self.pre_decision_ratio,
stride=self.pre_decision_ratio,
ceil_mode=True,
)
elif args.fixed_pre_decision_type == "last":
def last(key):
if key.size(2) < self.pre_decision_ratio:
return key
else:
k = key[
:,
:,
self.pre_decision_ratio - 1:: self.pre_decision_ratio,
].contiguous()
if key.size(-1) % self.pre_decision_ratio != 0:
k = torch.cat([k, key[:, :, -1:]], dim=-1).contiguous()
return k
self.pooling_layer = last
else:
raise NotImplementedError
@staticmethod
def add_args(parser):
super(
FixedStrideMonotonicAttention, FixedStrideMonotonicAttention
).add_args(parser)
parser.add_argument(
"--fixed-pre-decision-ratio",
type=int,
required=True,
help=(
"Ratio for the fixed pre-decision,"
"indicating how many encoder steps will start"
"simultaneous decision making process."
),
)
parser.add_argument(
"--fixed-pre-decision-type",
default="average",
choices=["average", "last"],
help="Pooling type",
)
parser.add_argument(
"--fixed-pre-decision-pad-threshold",
type=float,
default=0.3,
help="If a part of the sequence has pad"
",the threshold the pooled part is a pad.",
)
def insert_zeros(self, x):
bsz_num_heads, tgt_len, src_len = x.size()
stride = self.pre_decision_ratio
weight = F.pad(torch.ones(1, 1, 1).to(x), (stride - 1, 0))
x_upsample = F.conv_transpose1d(
x.view(-1, src_len).unsqueeze(1),
weight,
stride=stride,
padding=0,
)
return x_upsample.squeeze(1).view(bsz_num_heads, tgt_len, -1)
def p_choose(
self,
query: Optional[Tensor],
key: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
):
assert key is not None
assert query is not None
src_len = key.size(0)
tgt_len = query.size(0)
batch_size = query.size(1)
key_pool = self.pooling_layer(key.transpose(0, 2)).transpose(0, 2)
if key_padding_mask is not None:
key_padding_mask_pool = (
self.pooling_layer(key_padding_mask.unsqueeze(0).float())
.squeeze(0)
.gt(self.pre_decision_pad_threshold)
)
# Make sure at least one element is not pad
key_padding_mask_pool[:, 0] = 0
else:
key_padding_mask_pool = None
if incremental_state is not None:
# The floor instead of ceil is used for inference
# But make sure the length key_pool at least 1
if (
max(1, math.floor(key.size(0) / self.pre_decision_ratio))
) < key_pool.size(0):
key_pool = key_pool[:-1]
if key_padding_mask_pool is not None:
key_padding_mask_pool = key_padding_mask_pool[:-1]
p_choose_pooled = self.p_choose_from_qk(
query,
key_pool,
key_padding_mask_pool,
incremental_state=incremental_state,
)
# Upsample, interpolate zeros
p_choose = self.insert_zeros(p_choose_pooled)
if p_choose.size(-1) < src_len:
# Append zeros if the upsampled p_choose is shorter than src_len
p_choose = torch.cat(
[
p_choose,
torch.zeros(
p_choose.size(0),
tgt_len,
src_len - p_choose.size(-1)
).to(p_choose)
],
dim=2
)
else:
# can be larger than src_len because we used ceil before
p_choose = p_choose[:, :, :src_len]
p_choose[:, :, -1] = p_choose_pooled[:, :, -1]
assert list(p_choose.size()) == [
batch_size * self.num_heads,
tgt_len,
src_len,
]
return p_choose
FixedStrideMonotonicAttention.__name__ = klass.__name__
return FixedStrideMonotonicAttention
return partial(create_model, monotonic_attention)
@register_monotonic_attention("waitk_fixed_pre_decision")
@METHOD_NAME(WaitKAttention)
class WaitKAttentionFixedStride:
pass
@register_monotonic_attention("hard_aligned_fixed_pre_decision")
@METHOD_NAME(MonotonicAttention)
class MonotonicAttentionFixedStride:
pass
@register_monotonic_attention("infinite_lookback_fixed_pre_decision")
@METHOD_NAME(MonotonicInfiniteLookbackAttention)
class MonotonicInfiniteLookbackAttentionFixedStride:
pass |
find server exp | from abc import ABC, abstractmethod
from asyncio import Lock
from io import BytesIO
from logging import Logger
from typing import List
from aiohttp import ClientSession
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorDatabase
from redbot.core import Config, commands
from redbot.core.bot import Red
class MixinMeta(ABC):
"""
Base class for well behaved type hint detection with composite class.
Basically, to keep developers sane when not all attributes are defined in each mixin.
"""
bot: Red
log: Logger
config: Config
_db_ready: bool
_db_lock: Lock
client: AsyncIOMotorClient
db: AsyncIOMotorDatabase
session: ClientSession
@abstractmethod
async def _connect_to_mongo(self):
raise NotImplementedError
@abstractmethod
async def _create_user(self, user, server):
raise NotImplementedError
@abstractmethod
async def _hex_to_rgb(self, hex_num: str, a: int) -> tuple:
raise NotImplementedError
@abstractmethod
async def _badge_convert_dict(self, userinfo):
raise NotImplementedError
@abstractmethod
async def _process_purchase(self, ctx) -> bool:
raise NotImplementedError
@abstractmethod
def _truncate_text(self, text, max_length) -> str:
raise NotImplementedError
@abstractmethod
async def asyncify(self, func, *args, **kwargs):
raise NotImplementedError
@abstractmethod
async def asyncify_thread(self, func, *args, **kwargs):
raise NotImplementedError
@abstractmethod
async def asyncify_process(self, func, *args, **kwargs):
raise NotImplementedError
@abstractmethod
async def hash_with_md5(self, string):
raise NotImplementedError
@abstractmethod
async def _handle_levelup(self, user, userinfo, server, channel):
raise NotImplementedError
@abstractmethod
async def _required_exp(self, level: int) -> int:
raise NotImplementedError
@abstractmethod
async def _level_exp(self, level: int) -> int:
raise NotImplementedError
@abstractmethod
async def _find_level(self, total_exp) -> int:
raise NotImplementedError
@abstractmethod
async def _find_server_rank(self, user, server) -> int:
raise NotImplementedError
@abstractmethod
async def _find_global_rank(self, user) -> int:
raise NotImplementedError
@abstractmethod
async def METHOD_NAME(self, user, server) -> int:
raise NotImplementedError
@abstractmethod
async def _find_server_rep_rank(self, user, server) -> int:
raise NotImplementedError
@abstractmethod
async def _find_global_rep_rank(self, user) -> int:
raise NotImplementedError
@abstractmethod
async def draw_profile(self, user, server) -> BytesIO:
raise NotImplementedError
@abstractmethod
async def draw_rank(self, user, server) -> BytesIO:
raise NotImplementedError
@abstractmethod
async def draw_levelup(self, user, server) -> BytesIO:
raise NotImplementedError
@abstractmethod
async def _process_exp(self, message, userinfo, exp: int):
raise NotImplementedError
@abstractmethod
async def _give_chat_credit(self, user, server):
raise NotImplementedError
@abstractmethod
async def _valid_image_url(self, url):
raise NotImplementedError
@abstractmethod
async def _auto_color(self, ctx, url: str, ranks) -> List[str]:
raise NotImplementedError
@abstractmethod
def _center(self, start, end, text, font) -> int:
raise NotImplementedError
@abstractmethod
def char_in_font(self, unicode_char, font) -> bool:
raise NotImplementedError
@abstractmethod
def _contrast(self, bg_color, color1, color2):
raise NotImplementedError
@abstractmethod
def _luminance(self, color):
raise NotImplementedError
@abstractmethod
def _contrast_ratio(self, bgcolor, foreground):
raise NotImplementedError
@abstractmethod
def _name(self, user, max_length) -> str:
raise NotImplementedError
@abstractmethod
def _add_corners(self, im, rad, multiplier=6):
raise NotImplementedError
class CompositeMetaClass(type(commands.Cog), type(ABC)):
"""
This allows the metaclass used for proper type detection to
coexist with discord.py's metaclass
"""
pass |
test should be able to click element | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.common.exceptions import MoveTargetOutOfBoundsException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
def test_clicking_on_anchor_scrolls_page(driver, pages):
scrollScript = """var pageY;
if (typeof(window.pageYOffset) == 'number') {
pageY = window.pageYOffset;
} else {
pageY = document.documentElement.scrollTop;
}
return pageY;"""
pages.load("macbeth.html")
driver.find_element(By.PARTIAL_LINK_TEXT, "last speech").click()
yOffset = driver.execute_script(scrollScript)
# Focusing on to click, but not actually following,
# the link will scroll it in to view, which is a few pixels further than 0
assert yOffset > 300
def test_should_scroll_to_click_on_an_element_hidden_by_overflow(driver, pages):
pages.load("click_out_of_bounds_overflow.html")
link = driver.find_element(By.ID, "link")
try:
link.click()
except MoveTargetOutOfBoundsException as e:
AssertionError("Should not be out of bounds: %s" % e.msg)
def test_should_be_able_to_click_on_an_element_hidden_by_overflow(driver, pages):
pages.load("scroll.html")
link = driver.find_element(By.ID, "line8")
# This used to throw a MoveTargetOutOfBoundsException - we don't expect it to
link.click()
assert "line8" == driver.find_element(By.ID, "clicked").text
@pytest.mark.xfail_firefox
@pytest.mark.xfail_remote
def test_should_be_able_to_click_on_an_element_hidden_by_double_overflow(driver, pages):
pages.load("scrolling_tests/page_with_double_overflow_auto.html")
driver.find_element(By.ID, "link").click()
WebDriverWait(driver, 3).until(EC.title_is("Clicked Successfully!"))
def test_should_be_able_to_click_on_an_element_hidden_by_yoverflow(driver, pages):
pages.load("scrolling_tests/page_with_y_overflow_auto.html")
driver.find_element(By.ID, "link").click()
WebDriverWait(driver, 3).until(EC.title_is("Clicked Successfully!"))
def test_should_not_scroll_overflow_elements_which_are_visible(driver, pages):
pages.load("scroll2.html")
list = driver.find_element(By.TAG_NAME, "ul")
item = list.find_element(By.ID, "desired")
item.click()
yOffset = driver.execute_script("return arguments[0].scrollTop", list)
assert 0 == yOffset, "Should not have scrolled"
@pytest.mark.xfail_firefox
@pytest.mark.xfail_remote
def test_should_not_scroll_if_already_scrolled_and_element_is_in_view(driver, pages):
pages.load("scroll3.html")
driver.find_element(By.ID, "button2").click()
scrollTop = get_scroll_top(driver)
driver.find_element(By.ID, "button1").click()
assert scrollTop == get_scroll_top(driver)
def test_should_be_able_to_click_radio_button_scrolled_into_view(driver, pages):
pages.load("scroll4.html")
driver.find_element(By.ID, "radio").click()
# If we don't throw, we're good
@pytest.mark.xfail_safari
def test_should_scroll_overflow_elements_if_click_point_is_out_of_view_but_element_is_in_view(driver, pages):
pages.load("scroll5.html")
driver.find_element(By.ID, "inner").click()
assert "clicked" == driver.find_element(By.ID, "clicked").text
@pytest.mark.xfail_firefox(reason="https://github.com/w3c/webdriver/issues/408")
@pytest.mark.xfail_remote(reason="https://github.com/w3c/webdriver/issues/408")
@pytest.mark.xfail_safari
def METHOD_NAME(driver, pages):
pages.load("scrolling_tests/page_with_frame_out_of_view.html")
driver.switch_to.frame(driver.find_element(By.NAME, "frame"))
element = driver.find_element(By.NAME, "checkbox")
element.click()
assert element.is_selected()
def test_should_be_able_to_click_element_that_is_out_of_view_in_aframe(driver, pages):
pages.load("scrolling_tests/page_with_scrolling_frame.html")
driver.switch_to.frame(driver.find_element(By.NAME, "scrolling_frame"))
element = driver.find_element(By.NAME, "scroll_checkbox")
element.click()
assert element.is_selected()
def test_should_not_be_able_to_click_element_that_is_out_of_view_in_anon_scrollable_frame(driver, pages):
pages.load("scrolling_tests/page_with_non_scrolling_frame.html")
driver.switch_to.frame("scrolling_frame")
element = driver.find_element(By.NAME, "scroll_checkbox")
element.click()
# TODO we should assert that the click was unsuccessful
@pytest.mark.xfail_safari
def test_should_be_able_to_click_element_that_is_out_of_view_in_aframe_that_is_out_of_view(driver, pages):
pages.load("scrolling_tests/page_with_scrolling_frame_out_of_view.html")
driver.switch_to.frame(driver.find_element(By.NAME, "scrolling_frame"))
element = driver.find_element(By.NAME, "scroll_checkbox")
element.click()
assert element.is_selected()
@pytest.mark.xfail_firefox
@pytest.mark.xfail_chrome
@pytest.mark.xfail_remote
def test_should_be_able_to_click_element_that_is_out_of_view_in_anested_frame(driver, pages):
pages.load("scrolling_tests/page_with_nested_scrolling_frames.html")
driver.switch_to.frame(driver.find_element(By.NAME, "scrolling_frame"))
driver.switch_to.frame(driver.find_element(By.NAME, "nested_scrolling_frame"))
element = driver.find_element(By.NAME, "scroll_checkbox")
element.click()
assert element.is_selected()
@pytest.mark.xfail_firefox
@pytest.mark.xfail_safari
@pytest.mark.xfail_chrome
@pytest.mark.xfail_remote
def test_should_be_able_to_click_element_that_is_out_of_view_in_anested_frame_that_is_out_of_view(driver, pages):
pages.load("scrolling_tests/page_with_nested_scrolling_frames_out_of_view.html")
driver.switch_to.frame(driver.find_element(By.NAME, "scrolling_frame"))
driver.switch_to.frame(driver.find_element(By.NAME, "nested_scrolling_frame"))
element = driver.find_element(By.NAME, "scroll_checkbox")
element.click()
assert element.is_selected()
def test_should_not_scroll_when_getting_element_size(driver, pages):
pages.load("scroll3.html")
scrollTop = get_scroll_top(driver)
driver.find_element(By.ID, "button1").size
assert scrollTop == get_scroll_top(driver)
def get_scroll_top(driver):
return driver.execute_script("return document.body.scrollTop") |
test valid split sms to with extra | from functools import cached_property
from urllib.parse import parse_qs
import responses
from sentry.models import Rule
from sentry.plugins.base import Notification
from sentry.testutils.cases import PluginTestCase, TestCase
from sentry_plugins.twilio.plugin import TwilioConfigurationForm, TwilioPlugin, split_sms_to
class TwilioPluginSMSSplitTest(TestCase):
def test_valid_split_sms_to(self):
to = "330-509-3095, (330)-509-3095, +13305093095, 4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
def test_valid_split_sms_to_with_extra_spaces(self):
to = "330-509-3095 , (330)-509-3095, +13305093095, 4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
def test_valid_split_sms_to_with_just_spaces(self):
to = "330-509-3095 (330)-509-3095 +13305093095 4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
def test_valid_split_sms_to_with_no_whitespace(self):
to = "330-509-3095,(330)-509-3095,+13305093095,4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
def test_split_sms_to_with_single_number(self):
to = "555-555-5555"
expected = {"555-555-5555"}
actual = split_sms_to(to)
assert expected == actual
def test_valid_split_sms_to_newline(self):
to = "330-509-3095,\n(330)-509-3095\n,+13305093095\n,\n4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
def test_valid_split_sms_to_with_just_newlines(self):
to = "330-509-3095\n(330)-509-3095\n+13305093095\n\n4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
def METHOD_NAME(self):
to = "330-509-3095\n\n\n\n\n,\n\n\n\n\n\n\n\n\n(330)-509-3095,\n\n\n\n+13305093095,\n\n4045550144"
expected = {"330-509-3095", "(330)-509-3095", "+13305093095", "4045550144"}
actual = split_sms_to(to)
assert expected == actual
class TwilioConfigurationFormTest(TestCase):
def test_valid_form(self):
form = TwilioConfigurationForm(
data={
"sms_from": "3305093095",
"sms_to": "330-509-3095, (330)-509-3095, +13305093095, 4045550144",
"auth_token": "foo",
"account_sid": "bar",
}
)
self.assertTrue(form.is_valid())
cleaned = form.clean()
assert cleaned is not None
self.assertDictEqual(
cleaned,
{
"auth_token": "foo",
"sms_to": "+13305093095,+14045550144",
"sms_from": "+13305093095",
"account_sid": "bar",
},
)
def test_invalid_form(self):
form = TwilioConfigurationForm(data={"sms_from": "foobar", "sms_to": "911"})
self.assertFalse(form.is_valid())
errors = form.errors.as_data()
error_msgs = {k: [e.message for e in v] for k, v in errors.items()}
self.assertDictEqual(
error_msgs,
{
"auth_token": ["This field is required."],
"account_sid": ["This field is required."],
"sms_from": ["foobar is not a valid phone number."],
"sms_to": ["911 is not a valid phone number."],
},
)
class TwilioPluginTest(PluginTestCase):
@cached_property
def plugin(self):
return TwilioPlugin()
def test_conf_key(self):
assert self.plugin.conf_key == "twilio"
def test_entry_point(self):
self.assertPluginInstalled("twilio", self.plugin)
def test_is_configured(self):
for o in ("account_sid", "auth_token", "sms_from", "sms_to"):
assert self.plugin.is_configured(self.project) is False
self.plugin.set_option(o, "foo", self.project)
assert self.plugin.is_configured(self.project) is True
@responses.activate
def test_simple_notification(self):
responses.add("POST", "https://api.twilio.com/2010-04-01/Accounts/abcdef/Messages.json")
self.plugin.set_option("account_sid", "abcdef", self.project)
self.plugin.set_option("auth_token", "abcd", self.project)
self.plugin.set_option("sms_from", "4158675309", self.project)
self.plugin.set_option("sms_to", "4154444444", self.project)
event = self.store_event(
data={
"message": "Hello world",
"level": "warning",
"platform": "python",
"culprit": "foo.bar",
},
project_id=self.project.id,
)
rule = Rule.objects.create(project=self.project, label="my rule")
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}):
self.plugin.notify(notification)
request = responses.calls[0].request
payload = parse_qs(request.body)
assert payload == {
"To": ["+14154444444"],
"From": ["+14158675309"],
"Body": ["Sentry [%s] WARNING: Hello world" % self.project.slug.title()],
} |
test right option is selected on language | # encoding: utf-8
import pytest
from ckan.lib.helpers import url_for
from bs4 import BeautifulSoup
from ckan.tests import factories
class TestHome(object):
def test_home_renders(self, app):
response = app.get(url_for("home.index"))
assert "Welcome to CKAN" in response.body
@pytest.mark.usefixtures("non_clean_db")
def test_email_address_nag(self, app):
# before CKAN 1.6, users were allowed to have no email addresses
# can't use factory to create user as without email it fails validation
from ckan import model
user = model.User(name="has-no-email", password="correct123")
model.Session.add(user)
model.Session.commit()
user_token = factories.APIToken(user=user.id)
headers = {"Authorization": user_token["token"]}
response = app.get(url=url_for("home.index"), headers=headers)
assert "update your profile" in response.body
assert str(url_for("user.edit")) in response.body
assert " and add your email address." in response.body
@pytest.mark.usefixtures("non_clean_db")
def test_email_address_no_nag(self, app):
user = factories.User(email="[email protected]")
user_token = factories.APIToken(user=user["name"])
headers = {"Authorization": user_token["token"]}
response = app.get(url=url_for("home.index"), headers=headers)
assert "add your email address" not in response
@pytest.mark.ckan_config(
"ckan.legacy_route_mappings", '{"my_home_route": "home.index"}'
)
def test_map_pylons_to_flask_route(self, app):
response = app.get(url_for("my_home_route"))
assert "Welcome to CKAN" in response.body
response = app.get(url_for("home"))
assert "Welcome to CKAN" in response.body
@pytest.mark.ckan_config(
"ckan.legacy_route_mappings", {"my_home_route": "home.index"}
)
def test_map_pylons_to_flask_route_using_dict(self, app):
response = app.get(url_for("my_home_route"))
assert "Welcome to CKAN" in response.body
response = app.get(url_for("home"))
assert "Welcome to CKAN" in response.body
class TestI18nURLs(object):
def test_right_urls_are_rendered_on_language_selector(self, app):
response = app.get(url_for("home.index"))
html = BeautifulSoup(response.body)
select = html.find(id="field-lang-select")
for option in select.find_all("option"):
if option.text.strip() == u"English":
assert option["value"] == "/en/"
elif option.text.strip() == u"čeština (Česká republika)":
assert option["value"] == "/cs_CZ/"
elif option.text.strip() == u"português (Brasil)":
assert option["value"] == "/pt_BR/"
elif option.text.strip() == u"srpski (latinica)":
assert option["value"] == "/sr_Latn/"
def test_default_english_option_is_selected_on_language_selector(
self, app
):
response = app.get(url_for("home.index"))
html = BeautifulSoup(response.body)
select = html.find(id="field-lang-select")
for option in select.find_all("option"):
if option["value"] == "/en/":
assert option["selected"] == "selected"
else:
assert not option.has_attr("selected")
def METHOD_NAME(self, app):
response = app.get(url_for("home.index", locale="ca"))
html = BeautifulSoup(response.body)
select = html.find(id="field-lang-select")
for option in select.find_all("option"):
if option["value"] == "/ca/":
assert option["selected"] == "selected"
else:
assert not option.has_attr("selected")
def test_redirects_legacy_locales(self, app):
locales_mapping = [
('zh_TW', 'zh_Hant_TW'),
('zh_CN', 'zh_Hans_CN'),
]
for locale in locales_mapping:
legacy_locale = locale[0]
new_locale = locale[1]
response = app.get(f'/{legacy_locale}/', follow_redirects=False)
assert response.status_code == 308
assert (
response.headers['Location'] ==
f'http://test.ckan.net/{new_locale}'
)
response = app.get(f'/{legacy_locale}/dataset?some=param', follow_redirects=False)
assert response.status_code == 308
assert (
response.headers['Location'] ==
f'http://test.ckan.net/{new_locale}/dataset?some=param'
) |
distance indicators | """
BDS test for IID time series
References
----------
Broock, W. A., J. A. Scheinkman, W. D. Dechert, and B. LeBaron. 1996.
"A Test for Independence Based on the Correlation Dimension."
Econometric Reviews 15 (3): 197-235.
Kanzler, Ludwig. 1999.
"Very Fast and Correctly Sized Estimation of the BDS Statistic".
SSRN Scholarly Paper ID 151669. Rochester, NY: Social Science Research Network.
LeBaron, Blake. 1997.
"A Fast Algorithm for the BDS Statistic."
Studies in Nonlinear Dynamics & Econometrics 2 (2) (January 1).
"""
import numpy as np
from scipy import stats
from statsmodels.tools.validation import array_like
def METHOD_NAME(x, epsilon=None, distance=1.5):
"""
Calculate all pairwise threshold distance indicators for a time series
Parameters
----------
x : 1d array
observations of time series for which heaviside distance indicators
are calculated
epsilon : scalar, optional
the threshold distance to use in calculating the heaviside indicators
distance : scalar, optional
if epsilon is omitted, specifies the distance multiplier to use when
computing it
Returns
-------
indicators : 2d array
matrix of distance threshold indicators
Notes
-----
Since this can be a very large matrix, use np.int8 to save some space.
"""
x = array_like(x, 'x')
if epsilon is not None and epsilon <= 0:
raise ValueError("Threshold distance must be positive if specified."
" Got epsilon of %f" % epsilon)
if distance <= 0:
raise ValueError("Threshold distance must be positive."
" Got distance multiplier %f" % distance)
# TODO: add functionality to select epsilon optimally
# TODO: and/or compute for a range of epsilons in [0.5*s, 2.0*s]?
# or [1.5*s, 2.0*s]?
if epsilon is None:
epsilon = distance * x.std(ddof=1)
return np.abs(x[:, None] - x) < epsilon
def correlation_sum(indicators, embedding_dim):
"""
Calculate a correlation sum
Useful as an estimator of a correlation integral
Parameters
----------
indicators : ndarray
2d array of distance threshold indicators
embedding_dim : int
embedding dimension
Returns
-------
corrsum : float
Correlation sum
indicators_joint
matrix of joint-distance-threshold indicators
"""
if not indicators.ndim == 2:
raise ValueError('Indicators must be a matrix')
if not indicators.shape[0] == indicators.shape[1]:
raise ValueError('Indicator matrix must be symmetric (square)')
if embedding_dim == 1:
indicators_joint = indicators
else:
corrsum, indicators = correlation_sum(indicators, embedding_dim - 1)
indicators_joint = indicators[1:, 1:]*indicators[:-1, :-1]
nobs = len(indicators_joint)
corrsum = np.mean(indicators_joint[np.triu_indices(nobs, 1)])
return corrsum, indicators_joint
def correlation_sums(indicators, max_dim):
"""
Calculate all correlation sums for embedding dimensions 1:max_dim
Parameters
----------
indicators : 2d array
matrix of distance threshold indicators
max_dim : int
maximum embedding dimension
Returns
-------
corrsums : ndarray
Correlation sums
"""
corrsums = np.zeros((1, max_dim))
corrsums[0, 0], indicators = correlation_sum(indicators, 1)
for i in range(1, max_dim):
corrsums[0, i], indicators = correlation_sum(indicators, 2)
return corrsums
def _var(indicators, max_dim):
"""
Calculate the variance of a BDS effect
Parameters
----------
indicators : ndarray
2d array of distance threshold indicators
max_dim : int
maximum embedding dimension
Returns
-------
variances : float
Variance of BDS effect
"""
nobs = len(indicators)
corrsum_1dim, _ = correlation_sum(indicators, 1)
k = ((indicators.sum(1)**2).sum() - 3*indicators.sum() +
2*nobs) / (nobs * (nobs - 1) * (nobs - 2))
variances = np.zeros((1, max_dim - 1))
for embedding_dim in range(2, max_dim + 1):
tmp = 0
for j in range(1, embedding_dim):
tmp += (k**(embedding_dim - j))*(corrsum_1dim**(2 * j))
variances[0, embedding_dim-2] = 4 * (
k**embedding_dim +
2 * tmp +
((embedding_dim - 1)**2) * (corrsum_1dim**(2 * embedding_dim)) -
(embedding_dim**2) * k * (corrsum_1dim**(2 * embedding_dim - 2)))
return variances, k
def bds(x, max_dim=2, epsilon=None, distance=1.5):
"""
BDS Test Statistic for Independence of a Time Series
Parameters
----------
x : ndarray
Observations of time series for which bds statistics is calculated.
max_dim : int
The maximum embedding dimension.
epsilon : {float, None}, optional
The threshold distance to use in calculating the correlation sum.
distance : float, optional
Specifies the distance multiplier to use when computing the test
statistic if epsilon is omitted.
Returns
-------
bds_stat : float
The BDS statistic.
pvalue : float
The p-values associated with the BDS statistic.
Notes
-----
The null hypothesis of the test statistic is for an independent and
identically distributed (i.i.d.) time series, and an unspecified
alternative hypothesis.
This test is often used as a residual diagnostic.
The calculation involves matrices of size (nobs, nobs), so this test
will not work with very long datasets.
Implementation conditions on the first m-1 initial values, which are
required to calculate the m-histories:
x_t^m = (x_t, x_{t-1}, ... x_{t-(m-1)})
"""
x = array_like(x, 'x', ndim=1)
nobs_full = len(x)
if max_dim < 2 or max_dim >= nobs_full:
raise ValueError("Maximum embedding dimension must be in the range"
" [2,len(x)-1]. Got %d." % max_dim)
# Cache the indicators
indicators = METHOD_NAME(x, epsilon, distance)
# Get estimates of m-dimensional correlation integrals
corrsum_mdims = correlation_sums(indicators, max_dim)
# Get variance of effect
variances, k = _var(indicators, max_dim)
stddevs = np.sqrt(variances)
bds_stats = np.zeros((1, max_dim - 1))
pvalues = np.zeros((1, max_dim - 1))
for embedding_dim in range(2, max_dim+1):
ninitial = (embedding_dim - 1)
nobs = nobs_full - ninitial
# Get estimates of 1-dimensional correlation integrals
# (see Kanzler footnote 10 for why indicators are truncated)
corrsum_1dim, _ = correlation_sum(indicators[ninitial:, ninitial:], 1)
corrsum_mdim = corrsum_mdims[0, embedding_dim - 1]
# Get the intermediate values for the statistic
effect = corrsum_mdim - (corrsum_1dim**embedding_dim)
sd = stddevs[0, embedding_dim - 2]
# Calculate the statistic: bds_stat ~ N(0,1)
bds_stats[0, embedding_dim - 2] = np.sqrt(nobs) * effect / sd
# Calculate the p-value (two-tailed test)
pvalue = 2*stats.norm.sf(np.abs(bds_stats[0, embedding_dim - 2]))
pvalues[0, embedding_dim - 2] = pvalue
return np.squeeze(bds_stats), np.squeeze(pvalues) |
test should set scripts sources property | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from os.path import join
from pybuilder.core import Project
from pybuilder.plugins.python.core_plugin import (DISTRIBUTION_PROPERTY,
PYTHON_SOURCES_PROPERTY,
SCRIPTS_SOURCES_PROPERTY,
SCRIPTS_TARGET_PROPERTY)
from pybuilder.plugins.python.core_plugin import init_python_directories
from test_utils import patch
class InitPythonDirectoriesTest(unittest.TestCase):
def greedy(self, generator):
return list(generator)
def setUp(self):
self.project = Project(".")
@patch("pybuilder.plugins.python.core_plugin.walk")
def test_should_set_list_modules_function_with_project_modules(self, walk):
self.project.set_property("dir_source_main_python",
"src/main/python")
init_python_directories(self.project)
src_path = self.project.expand_path("$dir_source_main_python")
walk.return_value = [
(src_path, ["pybuilder"], ("foo.py", "bar.py")),
(join(src_path, "pybuilder"), ["pluginhelper", "plugins"], ["__init__.py", "foo.py", "foo.txt"]),
(join(src_path, "pybuilder", "pluginhelper"), [], ["__init__.py"]),
(join(src_path, "pybuilder", "plugins"), [], ["__init__.py"])
]
self.assertEqual(
["bar", "foo"],
self.greedy(self.project.list_modules())
)
@patch("pybuilder.plugins.python.core_plugin.walk")
def test_should_set_list_packages_function_with_project_packages(self, walk):
self.project.set_property("dir_source_main_python",
"src/main/python")
init_python_directories(self.project)
src_path = self.project.expand_path("$dir_source_main_python")
walk.return_value = [
(join(src_path, "pybuilder"), ["pluginhelper", "plugins"], ["__init__.py", "foo.py", "foo.txt"]),
(join(src_path, "pybuilder", "pluginhelper"), [], ["__init__.py"]),
(join(src_path, "pybuilder", "plugins"), [], ["__init__.py"])
]
self.assertEqual(
["pybuilder",
"pybuilder.pluginhelper",
"pybuilder.plugins"],
self.greedy(self.project.list_packages())
)
@patch("pybuilder.plugins.python.core_plugin.walk")
def test_should_not_cut_off_packages_when_path_ends_with_trailing_slash(self, walk):
self.project.set_property("dir_source_main_python",
"src/main/python/")
init_python_directories(self.project)
src_path = self.project.expand_path("$dir_source_main_python")
walk.return_value = [
(join(src_path, "pybuilder"), ["pluginhelper", "plugins"], ("__init__.py", "foo.py", "foo.txt")),
(join(src_path, "pybuilder", "pluginhelper"), [], ["__init__.py"]),
(join(src_path, "pybuilder", "plugins"), [], ["__init__.py"])
]
self.assertEqual(
["pybuilder",
"pybuilder.pluginhelper",
"pybuilder.plugins"],
self.greedy(self.project.list_packages())
)
@patch("pybuilder.plugins.python.core_plugin.walk")
@patch("pybuilder.plugins.python.core_plugin.exists")
def test_should_set_list_scripts_function_with_project_scripts(self, exists, walk):
self.project.set_property("dir_source_main_scripts",
"src/main/scripts")
init_python_directories(self.project)
src_path = self.project.expand_path("$dir_source_main_scripts")
exists.return_value = True
walk.return_value = [
(src_path, ["pybuilder"], ("boo.py", "baz.py")),
(join(src_path, "pybuilder"), ["pluginhelper", "plugins"], ["__init__.py", "foo.py", "foo.txt"]),
(join(src_path, "pybuilder", "pluginhelper"), [], ["__init__.py"]),
(join(src_path, "pybuilder", "plugins"), [], ["__init__.py"])
]
self.assertEqual(
["baz.py", "boo.py"],
self.greedy(self.project.list_scripts())
)
def test_should_set_python_sources_property(self):
init_python_directories(self.project)
self.assertEqual(
"src/main/python", self.project.get_property(PYTHON_SOURCES_PROPERTY, "caboom"))
def METHOD_NAME(self):
init_python_directories(self.project)
self.assertEqual(
"src/main/scripts", self.project.get_property(SCRIPTS_SOURCES_PROPERTY, "caboom"))
def test_should_set_dist_scripts_property(self):
init_python_directories(self.project)
self.assertEqual(
"scripts", self.project.get_property(SCRIPTS_TARGET_PROPERTY))
def test_should_set_dist_property(self):
init_python_directories(self.project)
self.assertEqual("$dir_target/dist/.-1.0.dev0",
self.project.get_property(DISTRIBUTION_PROPERTY, "caboom")) |
add plugin | import warnings
from cms.api import METHOD_NAME
from cms.utils.permissions import get_current_user
from cms.wizards.wizard_base import Wizard
from cms.wizards.wizard_pool import AlreadyRegisteredException, wizard_pool
from django import forms
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from .cms_appconfig import BlogConfig
from .fields import slugify
from .forms import PostAdminFormBase
from .models import Post
from .settings import get_setting
class PostWizardForm(PostAdminFormBase):
default_appconfig = None
slug = forms.SlugField(
label=_("Slug"),
max_length=752,
required=False,
help_text=_("Leave empty for automatic slug, or override as required."),
)
def __init__(self, *args, **kwargs):
if "initial" not in kwargs or not kwargs.get("initial", False):
kwargs["initial"] = {}
kwargs["initial"]["app_config"] = self.default_appconfig
if "data" in kwargs and kwargs["data"] is not None:
data = kwargs["data"].copy()
data["1-app_config"] = self.default_appconfig
kwargs["data"] = data
super().__init__(*args, **kwargs)
self.fields["app_config"].widget = forms.Select(
attrs=self.fields["app_config"].widget.attrs,
choices=self.fields["app_config"].widget.choices,
)
self.fields["app_config"].widget.attrs["disabled"] = True
if "categories" in self.fields:
self.fields["categories"].queryset = self.available_categories
class Meta:
model = Post
fields = ["app_config", "title", "slug", "abstract", "categories"]
class Media:
js = (
"admin/js/vendor/jquery/jquery.js",
"admin/js/jquery.init.js",
)
def save(self, commit=True):
self.instance._set_default_author(get_current_user())
instance = super().save(commit)
self.METHOD_NAME()
return instance
def clean_slug(self):
"""
Generate a valid slug, in case the given one is taken
"""
source = self.cleaned_data.get("slug", "")
lang_choice = self.language_code
if not source:
source = slugify(self.cleaned_data.get("title", ""))
qs = Post._default_manager.active_translations(lang_choice).language(lang_choice)
used = list(qs.values_list("translations__slug", flat=True))
slug = source
i = 1
while slug in used:
slug = "{}-{}".format(source, i)
i += 1
return slug
def METHOD_NAME(self):
"""
Add text field content as text plugin to the blog post.
"""
text = self.cleaned_data.get("post_text", "")
app_config = self.cleaned_data.get("app_config", None)
plugin_type = get_setting("WIZARD_CONTENT_PLUGIN")
plugin_body = get_setting("WIZARD_CONTENT_PLUGIN_BODY")
if text and app_config.use_placeholder:
opts = {
"placeholder": self.instance.content,
"plugin_type": plugin_type,
"language": self.language_code,
plugin_body: text,
}
METHOD_NAME(**opts)
class PostWizard(Wizard):
pass
for config in BlogConfig.objects.all().order_by("namespace"):
seed = slugify("{}.{}".format(config.app_title, config.namespace))
new_wizard = type(str(seed), (PostWizard,), {})
new_form = type("{}Form".format(seed), (PostWizardForm,), {"default_appconfig": config.pk})
post_wizard = new_wizard(
title=_("New {0}").format(config.object_name),
weight=200,
form=new_form,
model=Post,
description=_("Create a new {0} in {1}").format(config.object_name, config.app_title),
)
try:
wizard_pool.register(post_wizard)
except AlreadyRegisteredException: # pragma: no cover
if settings.DEBUG:
raise
else:
warnings.warn(
f"Wizard {seed} cannot be registered. Please make sure that "
f"BlogConfig.namespace {config.namespace} and BlogConfig.app_title {config.app_title} are"
"unique together",
stacklevel=2,
) |
test can change images | # Copyright 2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
from io import BytesIO
from mutagen import asf
from tests import TestCase, get_data_path
from quodlibet.formats.wma import WMAFile, unpack_image, pack_image
from quodlibet.formats._image import APICType, EmbeddedImage
from .helper import get_temp_copy
class TWMAFile(TestCase):
def setUp(self):
self.f = get_temp_copy(get_data_path('test.wma'))
self.song = WMAFile(self.f)
self.f2 = get_temp_copy(get_data_path('test-2.wma'))
self.song2 = WMAFile(self.f2)
self.f3 = get_temp_copy(get_data_path('test.asf'))
self.song3 = WMAFile(self.f3)
def tearDown(self):
os.unlink(self.f)
os.unlink(self.f2)
os.unlink(self.f3)
def test_basic(self):
self.song["title"] = u"SomeTestValue"
self.song.write()
self.song.reload()
self.assertEqual(self.song("title"), u"SomeTestValue")
def test_multi(self):
self.song["genre"] = u"Rock\nPop"
self.song.write()
self.song.reload()
# XXX: mutagen doesn't preserve order.. fix it!
self.assertEqual(set(self.song.list("genre")), {u"Rock", u"Pop"})
def test_length(self):
self.assertAlmostEqual(self.song("~#length"), 3.7120, 3)
self.assertAlmostEqual(self.song2("~#length"), 3.684, 3)
self.assertAlmostEqual(self.song3("~#length"), 11.38, 2)
def test_channels(self):
assert self.song("~#channels") == 2
assert self.song2("~#channels") == 2
assert self.song3("~#channels") == 1
def test_bitrate(self):
self.assertEqual(self.song("~#bitrate"), 64)
self.assertEqual(self.song2("~#bitrate"), 38)
self.assertEqual(self.song3("~#bitrate"), 5)
def test_sample_rate(self):
assert self.song("~#samplerate") == 48000
assert self.song2("~#samplerate") == 44100
assert self.song3("~#samplerate") == 8000
def test_write(self):
self.song.write()
self.song2.write()
self.song3.write()
def test_can_change(self):
self.assertTrue(self.song.can_change("title"))
self.assertFalse(self.song.can_change("foobar"))
self.assertTrue("albumartist" in self.song.can_change())
def test_format(self):
self.assertEqual(self.song("~format"), "ASF")
self.assertEqual(self.song2("~format"), "ASF")
self.assertEqual(self.song3("~format"), "ASF")
def test_codec(self):
self.assertEqual(self.song("~codec"),
u"Windows Media Audio 9 Standard")
self.assertEqual(self.song2("~codec"),
u"Windows Media Audio 9 Professional")
self.assertEqual(self.song3("~codec"),
u"Intel G.723")
def test_encoding(self):
self.assertEqual(
self.song("~encoding"),
u"Windows Media Audio 9.1\n64 kbps, 48 kHz, stereo 2-pass CBR")
self.assertEqual(
self.song2("~encoding"),
(u"Windows Media Audio 9.1 Professional\n192 kbps, 44 kHz, "
"2 channel 24 bit 2-pass VBR"))
self.assertEqual(self.song3("~encoding"),
u"Microsoft G.723.1\n8 kHz Mono, 5333 Bit/s")
def test_mb_release_track_id(self):
tag = asf.ASF(self.f)
tag["MusicBrainz/Release Track Id"] = [u"foo"]
tag.save()
song = WMAFile(self.f)
self.assertEqual(song("musicbrainz_releasetrackid"), u"foo")
song["musicbrainz_releasetrackid"] = u"bla"
song.write()
tag = asf.ASF(self.f)
self.assertEqual(tag["MusicBrainz/Release Track Id"], [u"bla"])
def test_invalid(self):
path = get_data_path('empty.xm')
self.assertTrue(os.path.exists(path))
self.assertRaises(Exception, WMAFile, path)
def test_get_images(self):
tag = asf.ASF(self.f2)
tag["WM/Picture"] = [tag["WM/Picture"][0], tag["WM/Picture"][0]]
tag.save()
self.song2.reload()
images = self.song2.get_images()
self.assertTrue(images and len(images) == 2)
def test_get_image(self):
self.assertFalse(self.song.get_primary_image())
image = self.song2.get_primary_image()
self.assertTrue(image)
self.assertEqual(image.mime_type, "image/jpeg")
self.assertTrue(image.read())
def test_get_image_invalid_data(self):
tag = asf.ASF(self.f)
tag["WM/Picture"] = [asf.ASFValue(b"nope", asf.BYTEARRAY)]
tag.save()
self.assertFalse(self.song.has_images)
self.song.reload()
self.assertTrue(self.song.has_images)
image = self.song.get_primary_image()
self.assertFalse(image)
def test_unpack_image_min(self):
data = b"\x03" + b"\x00" * 4 + b"\x00" * 4
mime, desc, data, type_ = unpack_image(data)
self.assertEqual(mime, u"")
self.assertEqual(desc, u"")
self.assertEqual(data, b"")
self.assertEqual(type_, 3)
def test_unpack_image_invalid(self):
self.assertRaises(ValueError, unpack_image, b"")
self.assertRaises(ValueError, unpack_image, b"\x00" * 6)
self.assertRaises(ValueError, unpack_image, b"\x00" * 8)
self.assertRaises(ValueError, unpack_image, b"\x00" * 100)
def test_pack_image(self):
d = pack_image(
u"image/jpeg", u"Description", b"foo", APICType.COVER_FRONT)
mime, desc, data, type_ = unpack_image(d)
self.assertEqual(mime, u"image/jpeg")
self.assertEqual(desc, u"Description")
self.assertEqual(data, b"foo")
self.assertEqual(type_, APICType.COVER_FRONT)
def test_clear_images(self):
# cover case
image = self.song2.get_primary_image()
self.assertTrue(image)
self.song2.clear_images()
self.assertFalse(self.song2.has_images)
self.song2.reload()
image = self.song2.get_primary_image()
self.assertFalse(image)
# no cover case
self.song.clear_images()
def test_set_image(self):
fileobj = BytesIO(b"foo")
image = EmbeddedImage(fileobj, "image/jpeg", 10, 10, 8)
self.assertFalse(self.song.has_images)
self.song.set_image(image)
self.assertTrue(self.song.has_images)
image = self.song.get_primary_image()
self.assertEqual(image.mime_type, "image/jpeg")
self.assertEqual(image.read(), b"foo")
def METHOD_NAME(self):
self.assertTrue(self.song.can_change_images)
def test_can_multiple_values(self):
self.assertTrue("artist" in self.song.can_multiple_values())
self.assertTrue(self.song.can_multiple_values("genre")) |
test check on clean dataset | # ----------------------------------------------------------------------------
# Copyright (C) 2021-2023 Deepchecks (https://www.deepchecks.com)
#
# This file is part of Deepchecks.
# Deepchecks is distributed under the terms of the GNU Affero General
# Public License (version 3 or later).
# You should have received a copy of the GNU Affero General Public License
# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------------
#
"""Test for the NLP SpecialCharacters check."""
import pytest
from hamcrest import *
from deepchecks.nlp.checks.data_integrity.special_characters import SpecialCharacters
from deepchecks.nlp.text_data import TextData
from tests.base.utils import equal_condition_result
@pytest.fixture
def clean_dataset():
return TextData(
raw_text=[
"Hello world",
"Do not worry be happy",
"Weather is fine"
]
)
@pytest.fixture
def dataset_with_special_characters():
return TextData(
raw_text=[
"Hello world¶¶",
"Do not worry¸ be happy·",
"Weather is fine",
"Readability counts·",
"Errors should never pass silently·",
]
)
def METHOD_NAME(clean_dataset):
# Arrange
check = SpecialCharacters().add_condition_samples_ratio_w_special_characters_less_or_equal(0)
# Act
result = check.run(dataset=clean_dataset)
conditions_decision = check.conditions_decision(result)
# Assert
assert_that(result.value, has_entries({
"samples_per_special_char": has_length(0),
"percent_of_samples_with_special_chars": equal_to(0),
'percent_special_chars_per_sample': has_length(3),
}))
assert_that(result.display, has_length(0))
assert_that(conditions_decision[0], equal_condition_result(
is_pass=True,
details="Found 0 samples with special char ratio above threshold",
name='Ratio of samples containing more than 20% special characters is below 0%'
)) # type: ignore
def test_check_on_dataset_with_emptt_sample():
# Arrange
data = TextData(raw_text=['', 'aa'])
check = SpecialCharacters().add_condition_samples_ratio_w_special_characters_less_or_equal(0)
# Act
result = check.run(dataset=data)
# Assert
assert_that(result.value, has_entries({
"samples_per_special_char": has_length(0),
"percent_of_samples_with_special_chars": equal_to(0),
'percent_special_chars_per_sample': has_length(2),
}))
def test_check_on_samples_with_special_characters(dataset_with_special_characters):
# Arrange
check = SpecialCharacters().add_condition_samples_ratio_w_special_characters_less_or_equal(
threshold_ratio_per_sample=0.1, max_ratio=0.15)
# Act
result = check.run(dataset=dataset_with_special_characters)
conditions_decision = check.conditions_decision(result)
# Assert
assert_that(result.value, has_entries({
"samples_per_special_char": has_entries({'¶': [0], '·': [1, 3, 4], '¸': [1]}),
"percent_of_samples_with_special_chars": equal_to(0.8),
'percent_special_chars_per_sample': has_length(5),
}))
assert_that(result.display, has_length(3))
assert_that(conditions_decision[0], equal_condition_result(
is_pass=False,
details="Found 1 samples with special char ratio above threshold",
name='Ratio of samples containing more than 10% special characters is below 15%'
)) # type: ignore
def test_tweet_dataset(tweet_emotion_train_test_textdata_sampled):
# Arrange
_, text_data = tweet_emotion_train_test_textdata_sampled
check = SpecialCharacters().add_condition_samples_ratio_w_special_characters_less_or_equal()
# Act
result = check.run(dataset=text_data)
conditions_decision = check.conditions_decision(result)
# Assert
assert_that(result.value, has_entries({
"samples_per_special_char": has_entries({'😍': [71, 614, 1813, 1901]}),
"percent_of_samples_with_special_chars": equal_to(0.168),
'percent_special_chars_per_sample': has_length(500),
}))
assert_that(result.display, has_length(3))
assert_that(conditions_decision[0], equal_condition_result(
is_pass=True,
details="Found 1 samples with special char ratio above threshold",
name='Ratio of samples containing more than 20% special characters is below 5%'
)) # type: ignore |
printed assert equal | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
import os
import warnings
import platform
import unittest
import inspect
import contextlib
from pathlib import Path
import typing as tp
import numpy as np
from . import errors
try:
import pytest
except ImportError:
pass # makes most of this module usable without pytest
@contextlib.contextmanager
def suppress_nevergrad_warnings() -> tp.Iterator[None]:
with warnings.catch_warnings():
# tests do not need to be efficient
warnings.simplefilter("ignore", category=errors.NevergradWarning)
yield
def assert_set_equal(
estimate: tp.Iterable[tp.Any], reference: tp.Iterable[tp.Any], err_msg: str = ""
) -> None:
"""Asserts that both sets are equals, with comprehensive error message.
This function should only be used in tests.
Parameters
----------
estimate: iterable
sequence of elements to compare with the reference set of elements
reference: iterable
reference sequence of elements
"""
estimate, reference = (set(x) for x in [estimate, reference])
elements = [("additional", estimate - reference), ("missing", reference - estimate)]
messages = [" - {} element(s): {}.".format(name, s) for (name, s) in elements if s]
if messages:
messages = ([err_msg] if err_msg else []) + ["Sets are not equal:"] + messages
raise AssertionError("\n".join(messages))
def METHOD_NAME(actual: tp.Any, desired: tp.Any, err_msg: str = "") -> None:
try:
np.testing.assert_equal(actual, desired, err_msg=err_msg)
except AssertionError as e:
print("\n" + "# " * 12 + "DEBUG MESSAGE " + "# " * 12)
print(f"Expected: {desired}\nbut got: {actual}")
raise e
def assert_markdown_links_not_broken(folder: tp.Union[str, Path]) -> None:
"""Asserts that all relative hyperlinks are valid in markdown files of the folder
and its subfolders.
Note
----
http hyperlinks are not tested.
"""
links = _get_all_markdown_links(folder)
broken = [l for l in links if not l.exists()]
if broken:
text = "\n - ".join([str(l) for l in broken])
raise AssertionError(f"Broken markdown links:\n - {text}")
class _MarkdownLink:
"""Handle to a markdown link, for easy existence test and printing
(external links are not tested)
"""
def __init__(self, folder: Path, filepath: Path, string: str, link: str) -> None:
self._folder = folder
self._filepath = filepath
self._string = string
self._link = link
def exists(self) -> bool:
if self._link.startswith("http"): # consider it exists
return True
fullpath = self._folder / self._filepath.parent / self._link
return fullpath.exists()
def __repr__(self) -> str:
return f"{self._link} ({self._string}) from file {self._filepath}"
def _get_all_markdown_links(folder: tp.Union[str, Path]) -> tp.List[_MarkdownLink]:
"""Returns a list of all existing markdown links"""
pattern = re.compile(r"\[(?P<string>.+?)\]\((?P<link>\S+?)\)")
folder = Path(folder).expanduser().absolute()
links = []
for rfilepath in folder.glob("**/*.md"):
if ("/site-packages/" if os.name != "nt" else "\\site-packages\\") not in str(rfilepath):
filepath = folder / rfilepath
with filepath.open("r") as f:
text = f.read()
for match in pattern.finditer(text):
links.append(_MarkdownLink(folder, rfilepath, match.group("string"), match.group("link")))
return links
class parametrized:
"""Simplified decorator API for specifying named parametrized test with pytests
(like with old "genty" package)
See example of use in test_testing
Parameters
----------
**kwargs:
name of the argument is converted as id of the experiments, and the provided tuple
contains a value for each of the arguments of the underlying function (in the definition order).
"""
def __init__(self, **kwargs: tp.Tuple[tp.Any, ...]):
self.ids = sorted(kwargs)
self.params = tuple(kwargs[name] for name in self.ids)
assert self.params
self.num_params = len(self.params[0])
assert all(isinstance(p, (tuple, list)) for p in self.params)
assert all(self.num_params == len(p) for p in self.params[1:])
def __call__(self, func: tp.Callable[..., None]) -> tp.Any: # type is lost here :(
names = list(inspect.signature(func).parameters.keys())
assert len(names) == self.num_params, f"Parameter names: {names}"
return pytest.mark.parametrize(
",".join(names), self.params if self.num_params > 1 else [p[0] for p in self.params], ids=self.ids
)(func)
@contextlib.contextmanager
def skip_error_on_systems(error_type: tp.Type[Exception], systems: tp.Iterable[str]) -> tp.Iterator[None]:
"""Context manager for skipping a test upon a specific error on specific systems
This is mostly used to skip some tests for features which are incompatible with Windows
Eg. of systems (mind the capitalized first letter): Darwin, Windows
"""
try:
yield
except error_type as e:
system = platform.system()
if system in systems:
raise unittest.SkipTest(f"Skipping on system {system}")
if systems: # only print if the context is actually active for some system
print(f'This is system "{system}" (should it be skipped for the test?)')
raise e |
etcd client mock | """
Test case for the etcd SDB module
"""
import logging
import pytest
import salt.sdb.etcd_db as etcd_db
import salt.utils.etcd_util as etcd_util
from tests.support.mock import MagicMock, create_autospec, patch
log = logging.getLogger(__name__)
@pytest.fixture
def configure_loader_modules():
return {
etcd_db: {
"__opts__": {
"myetcd": {
"url": "http://127.0.0.1",
"auth": {"token": "test", "method": "token"},
}
}
}
}
@pytest.fixture
def instance():
return create_autospec(etcd_util.EtcdBase)
@pytest.fixture
def METHOD_NAME(instance):
mocked_client = MagicMock()
mocked_client.return_value = instance
return mocked_client
def test_set(METHOD_NAME, instance):
"""
Test salt.sdb.etcd_db.set function
"""
with patch("salt.sdb.etcd_db._get_conn", METHOD_NAME):
instance.get.return_value = "super awesome"
assert (
etcd_db.set_("sdb://myetcd/path/to/foo/bar", "super awesome")
== "super awesome"
)
instance.set.assert_called_with("sdb://myetcd/path/to/foo/bar", "super awesome")
instance.get.assert_called_with("sdb://myetcd/path/to/foo/bar")
assert (
etcd_db.set_(
"sdb://myetcd/path/to/foo/bar", "super awesome", service="Pablo"
)
== "super awesome"
)
instance.set.assert_called_with("sdb://myetcd/path/to/foo/bar", "super awesome")
instance.get.assert_called_with("sdb://myetcd/path/to/foo/bar")
assert (
etcd_db.set_(
"sdb://myetcd/path/to/foo/bar", "super awesome", profile="Picasso"
)
== "super awesome"
)
instance.set.assert_called_with("sdb://myetcd/path/to/foo/bar", "super awesome")
instance.get.assert_called_with("sdb://myetcd/path/to/foo/bar")
instance.get.side_effect = Exception
pytest.raises(Exception, etcd_db.set_, "bad key", "bad value")
def test_get(METHOD_NAME, instance):
"""
Test salt.sdb.etcd_db.get function
"""
with patch("salt.sdb.etcd_db._get_conn", METHOD_NAME):
instance.get.return_value = "super awesome"
assert etcd_db.get("sdb://myetcd/path/to/foo/bar") == "super awesome"
instance.get.assert_called_with("sdb://myetcd/path/to/foo/bar")
assert (
etcd_db.get("sdb://myetcd/path/to/foo/bar", service="salt")
== "super awesome"
)
instance.get.assert_called_with("sdb://myetcd/path/to/foo/bar")
assert (
etcd_db.get("sdb://myetcd/path/to/foo/bar", profile="stack")
== "super awesome"
)
instance.get.assert_called_with("sdb://myetcd/path/to/foo/bar")
instance.get.side_effect = Exception
pytest.raises(Exception, etcd_db.get, "bad key")
def test_delete(METHOD_NAME, instance):
"""
Test salt.sdb.etcd_db.delete function
"""
with patch("salt.sdb.etcd_db._get_conn", METHOD_NAME):
instance.delete.return_value = True
assert etcd_db.delete("sdb://myetcd/path/to/foo/bar")
instance.delete.assert_called_with("sdb://myetcd/path/to/foo/bar")
assert etcd_db.delete("sdb://myetcd/path/to/foo/bar", service="salt")
instance.delete.assert_called_with("sdb://myetcd/path/to/foo/bar")
assert etcd_db.delete("sdb://myetcd/path/to/foo/bar", profile="stack")
instance.delete.assert_called_with("sdb://myetcd/path/to/foo/bar")
instance.delete.side_effect = Exception
assert not etcd_db.delete("sdb://myetcd/path/to/foo/bar")
def test__get_conn(METHOD_NAME):
"""
Test salt.sdb.etcd_db._get_conn function
"""
with patch("salt.utils.etcd_util.get_conn", METHOD_NAME):
conn = etcd_db._get_conn("random profile")
# Checking for EtcdClient methods since we autospec'd
assert hasattr(conn, "set")
assert hasattr(conn, "get") |
get form kwargs | # Copyright © Michal Čihař <[email protected]>
#
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import annotations
from django.core.exceptions import PermissionDenied
from django.forms import inlineformset_factory
from django.http import Http404, HttpResponseRedirect
from django.shortcuts import redirect
from django.urls import reverse
from django.utils.translation import gettext
from django.views.generic import DetailView, UpdateView
from weblate.auth.forms import ProjectTeamForm, SitewideTeamForm
from weblate.auth.models import AutoGroup, Group, Invitation, User
from weblate.trans.forms import UserAddTeamForm, UserManageForm
from weblate.trans.util import redirect_next
from weblate.utils import messages
from weblate.utils.views import get_paginator, show_form_errors
from weblate.wladmin.forms import ChangedCharField
class TeamUpdateView(UpdateView):
model = Group
template_name = "auth/team.html"
auto_formset = inlineformset_factory(
Group,
AutoGroup,
fields=("match",),
extra=0,
field_classes={"match": ChangedCharField},
)
def get_form_class(self):
if self.object.defining_project:
return ProjectTeamForm
return SitewideTeamForm
def get_form(self, form_class=None):
if not self.request.user.has_perm("meta:team.edit", self.object):
return None
return super().get_form(form_class)
def METHOD_NAME(self):
kwargs = super().METHOD_NAME()
if self.object.defining_project:
kwargs["project"] = self.object.defining_project
return kwargs
def get_object(self, queryset=None):
result = super().get_object(queryset=queryset)
user = self.request.user
if (
not user.has_perm("meta:team.edit", result)
and not user.has_perm("meta:team.users", result)
and not user.groups.filter(pk=result.pk).exists()
):
raise PermissionDenied
return result
def get_context_data(self, **kwargs):
result = super().get_context_data(**kwargs)
if "auto_formset" not in result:
result["auto_formset"] = self.auto_formset(instance=self.object)
if self.request.user.has_perm("meta:team.users", self.object):
result["users"] = get_paginator(
self.request,
self.object.user_set.filter(is_active=True, is_bot=False).order(),
)
result["add_user_form"] = UserAddTeamForm()
result["admins"] = self.object.admins.all()
return result
def handle_add_user(self, request):
form = UserAddTeamForm(request.POST)
if form.is_valid():
if form.cleaned_data["make_admin"]:
self.object.admins.add(form.cleaned_data["user"])
else:
self.object.admins.remove(form.cleaned_data["user"])
form.cleaned_data["user"].groups.add(self.object)
else:
show_form_errors(request, form)
return HttpResponseRedirect(self.get_success_url())
def handle_remove_user(self, request):
form = UserManageForm(request.POST)
if form.is_valid():
form.cleaned_data["user"].groups.remove(self.object)
else:
show_form_errors(request, form)
return HttpResponseRedirect(self.get_success_url())
def handle_delete(self, request):
if self.object.defining_project:
fallback = (
reverse(
"manage-access",
kwargs={"project": self.object.defining_project.slug},
)
+ "#teams"
)
elif request.user.is_superuser:
fallback = reverse("manage-teams")
else:
fallback = reverse("manage_access") + "#teams"
if self.object.internal and not self.object.defining_project:
messages.error(request, gettext("Cannot remove built-in team!"))
else:
self.object.delete()
return redirect_next(request.POST.get("next"), fallback)
def post(self, request, **kwargs):
self.object = self.get_object()
if self.request.user.has_perm("meta:team.users", self.object):
if "add_user" in request.POST:
return self.handle_add_user(request)
if "remove_user" in request.POST:
return self.handle_remove_user(request)
form = self.get_form()
if form is None:
return self.form_invalid(form, None)
if "delete" in request.POST:
return self.handle_delete(request)
formset = self.auto_formset(instance=self.object, data=request.POST)
if form.is_valid() and formset.is_valid():
formset.save()
return self.form_valid(form)
return self.form_invalid(form, formset)
def form_invalid(self, form, formset):
"""If the form is invalid, render the invalid form."""
return self.render_to_response(
self.get_context_data(form=form, auto_formset=formset)
)
class InvitationView(DetailView):
model = Invitation
def check_access(self):
invitation = self.object
user = self.request.user
if invitation.user:
if not user.is_authenticated:
raise PermissionDenied
if invitation.user != user:
raise Http404
def get(self, request, *args, **kwargs):
self.object = self.get_object()
self.check_access()
if not self.object.user:
# When inviting new user go through registration
request.session["invitation_link"] = str(self.object.pk)
return redirect("register")
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def post(self, request, **kwargs):
self.object = invitation = self.get_object()
# Handle admin actions first
action = request.POST.get("action", "")
if action in ("resend", "remove"):
project = invitation.group.defining_project
# Permission check
if not request.user.has_perm(
"project.permissions" if project else "user.edit", project
):
raise PermissionDenied
# Perform admin action
if action == "resend":
invitation.send_email()
messages.success(request, gettext("User invitation e-mail was sent."))
else:
invitation.delete()
messages.success(request, gettext("User invitation was removed."))
# Redirect
if project:
return redirect("manage-access", project=project.slug)
return redirect("manage-users")
# Accept invitation
self.check_access()
invitation.accept(request, request.user)
if invitation.group.defining_project:
return redirect(invitation.group.defining_project)
return redirect("home")
def accept_invitation(request, invitation: Invitation, user: User | None):
if user is None:
user = invitation.user
if user is None:
raise Http404
user.groups.add(invitation.group)
messages.success(
request, gettext("Accepted invitation to the %s team.") % invitation.group
)
invitation.delete() |
pytest cmdline main | import asyncio
import gc
import logging
import platform
import sys
from datetime import datetime
from typing import Optional
import human_readable
import pytest
from _pytest.config import Config
from _pytest.python import Function
from aiohttp.web_app import Application
from tribler.core.components.restapi.rest.rest_endpoint import RESTEndpoint
from tribler.core.components.restapi.rest.rest_manager import error_middleware
from tribler.core.utilities.network_utils import default_network_utils
# Enable origin tracking for coroutine objects in the current thread, so when a test does not handle
# some coroutine properly, we can see a traceback with the name of the test which created the coroutine.
# Note that the error can happen in an unrelated test where the unhandled task from the previous test
# was garbage collected. Without the origin tracking, it may be hard to see the test that created the task.
sys.set_coroutine_origin_tracking_depth(10)
enable_extended_logging = False
pytest_start_time: Optional[datetime] = None # a time when the test suite started
# pylint: disable=unused-argument, redefined-outer-name
def pytest_configure(config):
# Disable logging from faker for all tests
logging.getLogger('faker.factory').propagate = False
@pytest.hookimpl
def METHOD_NAME(config: Config):
""" Enable extended logging if the verbose option is used """
# Called for performing the main command line action.
global enable_extended_logging # pylint: disable=global-statement
enable_extended_logging = config.option.verbose > 0
@pytest.hookimpl
def pytest_collection_finish(session):
""" Save the start time of the test suite execution"""
# Called after collection has been performed and modified.
global pytest_start_time # pylint: disable=global-statement
pytest_start_time = datetime.now()
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_protocol(item: Function, log=True, nextitem=None):
""" Modify the pytest output to include the execution duration for all tests """
# Perform the runtest protocol for a single test item.
if enable_extended_logging and pytest_start_time:
start_time = datetime.now()
print(f'\n{start_time.strftime("%H:%M:%S.%f")[:-3]} Starting "{item.name}"...', end='', flush=True)
yield
now = datetime.now()
duration = (now - start_time).total_seconds()
total = now - pytest_start_time
print(f' in {duration:.3f}s ({human_readable.time_delta(total)} in total)', end='')
else:
yield
@pytest.fixture(autouse=True)
def ensure_gc():
""" Ensure that the garbage collector runs after each test.
This is critical for test stability as we use Libtorrent and need to ensure all its destructors are called. """
# For this fixture, it is necessary for it to be called as late as possible within the current test's scope.
# Therefore it should be placed at the first place in the "function" scope.
# If there are two or more autouse fixtures within this scope, the order should be explicitly set through using
# this fixture as a dependency.
# See the discussion in https://github.com/Tribler/tribler/pull/7542 for more information.
yield
# Without "yield" the fixture triggers the garbage collection at the beginning of the (next) test.
# For that reason, the errors triggered during the garbage collection phase will take place not in the erroneous
# test but in the randomly scheduled next test. Usually, these errors are silently suppressed, as any exception in
# __del__ methods is silently suppressed, but they still can somehow affect the test.
#
# By adding the yield we move the garbage collection phase to the end of the current test, to not affect the next
# test.
gc.collect()
@pytest.fixture
def free_port():
return default_network_utils.get_random_free_port(start=1024, stop=50000)
@pytest.fixture
def event_loop():
if platform.system() == 'Windows':
# to prevent the "Loop is closed" error
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
policy = asyncio.get_event_loop_policy()
loop = policy.new_event_loop()
yield loop
loop.close()
@pytest.fixture
async def rest_api(event_loop, aiohttp_client, endpoint: RESTEndpoint):
# In each test file that requires the use of this fixture, the endpoint fixture needs to be specified.
client_max_size: int = endpoint.app._client_max_size # pylint:disable=protected-access
app = Application(middlewares=[error_middleware], client_max_size=client_max_size)
app.add_subapp(endpoint.path, endpoint.app)
yield await aiohttp_client(app)
await endpoint.shutdown()
await app.shutdown() |
test image processor from dict with kwargs | # coding=utf-8
# Copyright 2023 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_vision_available
from ...test_image_processing_common import ImageProcessingTestMixin, prepare_image_inputs
if is_vision_available():
from transformers import EfficientNetImageProcessor
class EfficientNetImageProcessorTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=13,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"height": 18, "width": 18}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
class EfficientNetImageProcessorTest(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = EfficientNetImageProcessor if is_vision_available() else None
def setUp(self):
self.image_processor_tester = EfficientNetImageProcessorTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
image_processing = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
def METHOD_NAME(self):
image_processor = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
def test_rescale(self):
# EfficientNet optionally rescales between -1 and 1 instead of the usual 0 and 1
image = np.arange(0, 256, 1, dtype=np.uint8).reshape(1, 8, 32)
image_processor = self.image_processing_class(**self.image_processor_dict)
rescaled_image = image_processor.rescale(image, scale=1 / 127.5)
expected_image = (image * (1 / 127.5)).astype(np.float32) - 1
self.assertTrue(np.allclose(rescaled_image, expected_image))
rescaled_image = image_processor.rescale(image, scale=1 / 255, offset=False)
expected_image = (image / 255.0).astype(np.float32)
self.assertTrue(np.allclose(rescaled_image, expected_image)) |
real extract | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
parse_iso8601,
)
class TV4IE(InfoExtractor):
IE_DESC = 'tv4.se and tv4play.se'
_VALID_URL = r'''(?x)https?://(?:www\.)?
(?:
tv4\.se/(?:[^/]+)/klipp/(?:.*)-|
tv4play\.se/
(?:
(?:program|barn)/(?:(?:[^/]+/){1,2}|(?:[^\?]+)\?video_id=)|
iframe/video/|
film/|
sport/|
)
)(?P<id>[0-9]+)'''
_GEO_COUNTRIES = ['SE']
_TESTS = [
{
'url': 'http://www.tv4.se/kalla-fakta/klipp/kalla-fakta-5-english-subtitles-2491650',
'md5': 'cb837212f342d77cec06e6dad190e96d',
'info_dict': {
'id': '2491650',
'ext': 'mp4',
'title': 'Kalla Fakta 5 (english subtitles)',
'thumbnail': r're:^https?://.*\.jpg$',
'timestamp': int,
'upload_date': '20131125',
},
},
{
'url': 'http://www.tv4play.se/iframe/video/3054113',
'md5': 'cb837212f342d77cec06e6dad190e96d',
'info_dict': {
'id': '3054113',
'ext': 'mp4',
'title': 'Så här jobbar ficktjuvarna - se avslöjande bilder',
'thumbnail': r're:^https?://.*\.jpg$',
'description': 'Unika bilder avslöjar hur turisternas fickor vittjas mitt på Stockholms central. Två experter på ficktjuvarna avslöjar knepen du ska se upp för.',
'timestamp': int,
'upload_date': '20150130',
},
},
{
'url': 'http://www.tv4play.se/sport/3060959',
'only_matching': True,
},
{
'url': 'http://www.tv4play.se/film/2378136',
'only_matching': True,
},
{
'url': 'http://www.tv4play.se/barn/looney-tunes?video_id=3062412',
'only_matching': True,
},
{
'url': 'http://www.tv4play.se/program/farang/3922081',
'only_matching': True,
},
{
'url': 'https://www.tv4play.se/program/nyheterna/avsnitt/13315940',
'only_matching': True,
}
]
def METHOD_NAME(self, url):
video_id = self._match_id(url)
info = self._download_json(
'https://playback-api.b17g.net/asset/%s' % video_id,
video_id, 'Downloading video info JSON', query={
'service': 'tv4',
'device': 'browser',
'protocol': 'hls,dash',
'drm': 'widevine',
})['metadata']
title = info['title']
manifest_url = self._download_json(
'https://playback-api.b17g.net/media/' + video_id,
video_id, query={
'service': 'tv4',
'device': 'browser',
'protocol': 'hls',
})['playbackItem']['manifestUrl']
formats = self._extract_m3u8_formats(
manifest_url, video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False)
formats.extend(self._extract_mpd_formats(
manifest_url.replace('.m3u8', '.mpd'),
video_id, mpd_id='dash', fatal=False))
formats.extend(self._extract_f4m_formats(
manifest_url.replace('.m3u8', '.f4m'),
video_id, f4m_id='hds', fatal=False))
formats.extend(self._extract_ism_formats(
re.sub(r'\.ism/.*?\.m3u8', r'.ism/Manifest', manifest_url),
video_id, ism_id='mss', fatal=False))
if not formats and info.get('is_geo_restricted'):
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
# 'subtitles': subtitles,
'description': info.get('description'),
'timestamp': parse_iso8601(info.get('broadcast_date_time')),
'duration': int_or_none(info.get('duration')),
'thumbnail': info.get('image'),
'is_live': info.get('isLive') is True,
'series': info.get('seriesTitle'),
'season_number': int_or_none(info.get('seasonNumber')),
'episode': info.get('episodeTitle'),
'episode_number': int_or_none(info.get('episodeNumber')),
} |
parse args | #!/usr/bin/env python3
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
"""
Script to scan Zephyr include directories and emit system call and subsystem metadata
System calls require a great deal of boilerplate code in order to implement
completely. This script is the first step in the build system's process of
auto-generating this code by doing a text scan of directories containing
C or header files, and building up a database of system calls and their
function call prototypes. This information is emitted to a generated
JSON file for further processing.
This script also scans for struct definitions such as __subsystem and
__net_socket, emitting a JSON dictionary mapping tags to all the struct
declarations found that were tagged with them.
If the output JSON file already exists, its contents are checked against
what information this script would have outputted; if the result is that the
file would be unchanged, it is not modified to prevent unnecessary
incremental builds.
"""
import sys
import re
import argparse
import os
import json
from pathlib import PurePath
regex_flags = re.MULTILINE | re.VERBOSE
syscall_regex = re.compile(r'''
(?:__syscall|__syscall_always_inline)\s+ # __syscall attribute, must be first
([^(]+) # type and name of system call (split later)
[(] # Function opening parenthesis
([^)]*) # Arg list (split later)
[)] # Closing parenthesis
''', regex_flags)
struct_tags = ["__subsystem", "__net_socket"]
tagged_struct_decl_template = r'''
%s\s+ # tag, must be first
struct\s+ # struct keyword is next
([^{]+) # name of subsystem
[{] # Open curly bracket
'''
def tagged_struct_update(target_list, tag, contents):
regex = re.compile(tagged_struct_decl_template % tag, regex_flags)
items = [mo.groups()[0].strip() for mo in regex.finditer(contents)]
target_list.extend(items)
def analyze_headers(include_dir, scan_dir, file_list):
syscall_ret = []
tagged_ret = {}
for tag in struct_tags:
tagged_ret[tag] = []
syscall_files = dict()
# Get the list of header files which contains syscalls to be emitted.
# If file_list does not exist, we emit all syscalls.
if file_list:
with open(file_list, "r", encoding="utf-8") as fp:
contents = fp.read()
for one_file in contents.split(";"):
if os.path.isfile(one_file):
syscall_files[one_file] = {"emit": True}
else:
sys.stderr.write(f"{one_file} does not exists!\n")
sys.exit(1)
multiple_directories = set()
if include_dir:
multiple_directories |= set(include_dir)
if scan_dir:
multiple_directories |= set(scan_dir)
# Look for source files under various directories.
# Due to "syscalls/*.h" being included unconditionally in various
# other header files. We must generate the associated syscall
# header files (e.g. for function stubs).
for base_path in multiple_directories:
for root, dirs, files in os.walk(base_path, topdown=True):
dirs.sort()
files.sort()
for fn in files:
# toolchain/common.h has the definitions of these tags which we
# don't want to trip over
path = os.path.join(root, fn)
if (not (path.endswith(".h") or path.endswith(".c")) or
path.endswith(os.path.join(os.sep, 'toolchain',
'common.h'))):
continue
path = PurePath(os.path.normpath(path)).as_posix()
if path not in syscall_files:
if include_dir and base_path in include_dir:
syscall_files[path] = {"emit" : True}
else:
syscall_files[path] = {"emit" : False}
# Parse files to extract syscall functions
for one_file in syscall_files:
with open(one_file, "r", encoding="utf-8") as fp:
try:
contents = fp.read()
except Exception:
sys.stderr.write("Error decoding %s\n" % path)
raise
fn = os.path.basename(one_file)
try:
to_emit = syscall_files[one_file]["emit"] | args.emit_all_syscalls
syscall_result = [(mo.groups(), fn, to_emit)
for mo in syscall_regex.finditer(contents)]
for tag in struct_tags:
tagged_struct_update(tagged_ret[tag], tag, contents)
except Exception:
sys.stderr.write("While parsing %s\n" % fn)
raise
syscall_ret.extend(syscall_result)
return syscall_ret, tagged_ret
def update_file_if_changed(path, new):
if os.path.exists(path):
with open(path, 'r') as fp:
old = fp.read()
if new != old:
with open(path, 'w') as fp:
fp.write(new)
else:
with open(path, 'w') as fp:
fp.write(new)
def METHOD_NAME():
global args
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter, allow_abbrev=False)
parser.add_argument(
"-i", "--include", required=False, action="append",
help="Include directories recursively scanned for .h files "
"containing syscalls that must be present in final binary. "
"Can be specified multiple times: -i topdir1 -i topdir2 ...")
parser.add_argument(
"--scan", required=False, action="append",
help="Scan directories recursively for .h files containing "
"syscalls that need stubs generated but may not need to "
"be present in final binary. Can be specified multiple "
"times.")
parser.add_argument(
"-j", "--json-file", required=True,
help="Write system call prototype information as json to file")
parser.add_argument(
"-t", "--tag-struct-file", required=True,
help="Write tagged struct name information as json to file")
parser.add_argument(
"--file-list", required=False,
help="Text file containing semi-colon separated list of "
"header file where only syscalls in these files "
"are emitted.")
parser.add_argument(
"--emit-all-syscalls", required=False, action="store_true",
help="Emit all potential syscalls in the tree")
args = parser.METHOD_NAME()
def main():
METHOD_NAME()
syscalls, tagged = analyze_headers(args.include, args.scan,
args.file_list)
# Only write json files if they don't exist or have changes since
# they will force an incremental rebuild.
syscalls_in_json = json.dumps(
syscalls,
indent=4,
sort_keys=True
)
update_file_if_changed(args.json_file, syscalls_in_json)
tagged_struct_in_json = json.dumps(
tagged,
indent=4,
sort_keys=True
)
update_file_if_changed(args.tag_struct_file, tagged_struct_in_json)
if __name__ == "__main__":
main() |
test pretty print with full us phone | # These tests are auto-generated with test data from:
# https://github.com/exercism/problem-specifications/tree/main/exercises/phone-number/canonical-data.json
# File last updated on 2023-07-19
import unittest
from phone_number import (
PhoneNumber,
)
class PhoneNumberTest(unittest.TestCase):
def test_cleans_the_number(self):
number = PhoneNumber("(223) 456-7890").number
self.assertEqual(number, "2234567890")
def test_cleans_numbers_with_dots(self):
number = PhoneNumber("223.456.7890").number
self.assertEqual(number, "2234567890")
def test_cleans_numbers_with_multiple_spaces(self):
number = PhoneNumber("223 456 7890 ").number
self.assertEqual(number, "2234567890")
def test_invalid_when_9_digits(self):
with self.assertRaises(ValueError) as err:
PhoneNumber("123456789")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "must not be fewer than 10 digits")
def test_invalid_when_11_digits_does_not_start_with_a_1(self):
with self.assertRaises(ValueError) as err:
PhoneNumber("22234567890")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "11 digits must start with 1")
def test_valid_when_11_digits_and_starting_with_1(self):
number = PhoneNumber("12234567890").number
self.assertEqual(number, "2234567890")
def test_valid_when_11_digits_and_starting_with_1_even_with_punctuation(self):
number = PhoneNumber("+1 (223) 456-7890").number
self.assertEqual(number, "2234567890")
def test_invalid_when_more_than_11_digits(self):
with self.assertRaises(ValueError) as err:
PhoneNumber("321234567890")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "must not be greater than 11 digits")
def test_invalid_with_letters(self):
with self.assertRaises(ValueError) as err:
PhoneNumber("523-abc-7890")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "letters not permitted")
def test_invalid_with_punctuations(self):
with self.assertRaises(ValueError) as err:
PhoneNumber("523-@:!-7890")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "punctuations not permitted")
def test_invalid_if_area_code_starts_with_0(self):
with self.assertRaises(ValueError) as err:
PhoneNumber("(023) 456-7890")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "area code cannot start with zero")
def test_invalid_if_area_code_starts_with_1(self):
with self.assertRaises(ValueError) as err:
PhoneNumber("(123) 456-7890")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "area code cannot start with one")
def test_invalid_if_exchange_code_starts_with_0(self):
with self.assertRaises(ValueError) as err:
PhoneNumber("(223) 056-7890")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "exchange code cannot start with zero")
def test_invalid_if_exchange_code_starts_with_1(self):
with self.assertRaises(ValueError) as err:
PhoneNumber("(223) 156-7890")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "exchange code cannot start with one")
def test_invalid_if_area_code_starts_with_0_on_valid_11_digit_number(self):
with self.assertRaises(ValueError) as err:
PhoneNumber("1 (023) 456-7890")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "area code cannot start with zero")
def test_invalid_if_area_code_starts_with_1_on_valid_11_digit_number(self):
with self.assertRaises(ValueError) as err:
PhoneNumber("1 (123) 456-7890")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "area code cannot start with one")
def test_invalid_if_exchange_code_starts_with_0_on_valid_11_digit_number(self):
with self.assertRaises(ValueError) as err:
PhoneNumber("1 (223) 056-7890")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "exchange code cannot start with zero")
def test_invalid_if_exchange_code_starts_with_1_on_valid_11_digit_number(self):
with self.assertRaises(ValueError) as err:
PhoneNumber("1 (223) 156-7890")
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "exchange code cannot start with one")
# Additional tests for this track
def test_area_code(self):
number = PhoneNumber("2234567890")
self.assertEqual(number.area_code, "223")
def test_pretty_print(self):
number = PhoneNumber("2234567890")
self.assertEqual(number.pretty(), "(223)-456-7890")
def METHOD_NAME(self):
number = PhoneNumber("12234567890")
self.assertEqual(number.pretty(), "(223)-456-7890") |
id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWebAppDomainOwnershipIdentifierSlotResult',
'AwaitableGetWebAppDomainOwnershipIdentifierSlotResult',
'get_web_app_domain_ownership_identifier_slot',
'get_web_app_domain_ownership_identifier_slot_output',
]
@pulumi.output_type
class GetWebAppDomainOwnershipIdentifierSlotResult:
"""
A domain specific resource identifier.
"""
def __init__(__self__, METHOD_NAME=None, kind=None, name=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetWebAppDomainOwnershipIdentifierSlotResult(GetWebAppDomainOwnershipIdentifierSlotResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebAppDomainOwnershipIdentifierSlotResult(
METHOD_NAME=self.METHOD_NAME,
kind=self.kind,
name=self.name,
type=self.type)
def get_web_app_domain_ownership_identifier_slot(domain_ownership_identifier_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
slot: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebAppDomainOwnershipIdentifierSlotResult:
"""
Get domain ownership identifier for web app.
:param str domain_ownership_identifier_name: Name of domain ownership identifier.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will delete the binding for the production slot.
"""
__args__ = dict()
__args__['domainOwnershipIdentifierName'] = domain_ownership_identifier_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['slot'] = slot
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:web/v20181101:getWebAppDomainOwnershipIdentifierSlot', __args__, opts=opts, typ=GetWebAppDomainOwnershipIdentifierSlotResult).value
return AwaitableGetWebAppDomainOwnershipIdentifierSlotResult(
METHOD_NAME=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_web_app_domain_ownership_identifier_slot)
def get_web_app_domain_ownership_identifier_slot_output(domain_ownership_identifier_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
slot: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebAppDomainOwnershipIdentifierSlotResult]:
"""
Get domain ownership identifier for web app.
:param str domain_ownership_identifier_name: Name of domain ownership identifier.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
:param str slot: Name of the deployment slot. If a slot is not specified, the API will delete the binding for the production slot.
"""
... |
has set active index | #
# auto-pts - The Bluetooth PTS Automation Framework
#
# Copyright (c) 2023, Oticon.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
"""Wrapper around btp messages. The functions are added as needed."""
import binascii
import logging
import struct
from autopts.pybtp import defs
from autopts.pybtp.btp.btp import CONTROLLER_INDEX, get_iut_method as get_iut,\
btp_hdr_check, pts_addr_get, pts_addr_type_get
from autopts.pybtp.types import addr2btp_ba, BTPError
HAS = {
'read_supported_cmds': ( defs.BTP_SERVICE_ID_HAS,
defs.HAS_READ_SUPPORTED_COMMANDS,
CONTROLLER_INDEX),
'set_active_index': ( defs.BTP_SERVICE_ID_HAS,
defs.HAS_SET_ACTIVE_INDEX,
CONTROLLER_INDEX),
'set_preset_name': ( defs.BTP_SERVICE_ID_HAS,
defs.HAS_SET_PRESET_NAME,
CONTROLLER_INDEX),
'remove_preset': ( defs.BTP_SERVICE_ID_HAS,
defs.HAS_REMOVE_PRESET,
CONTROLLER_INDEX),
'add_preset': ( defs.BTP_SERVICE_ID_HAS,
defs.HAS_ADD_PRESET,
CONTROLLER_INDEX),
'set_properties': ( defs.BTP_SERVICE_ID_HAS,
defs.HAS_SET_PROPERTIES,
CONTROLLER_INDEX)
}
def has_command_rsp_succ(timeout=20.0):
logging.debug("%s", has_command_rsp_succ.__name__)
iutctl = get_iut()
tuple_hdr, tuple_data = iutctl.btp_socket.read(timeout)
logging.debug("received %r %r", tuple_hdr, tuple_data)
btp_hdr_check(tuple_hdr, defs.BTP_SERVICE_ID_HAS)
return tuple_data
def address_to_ba(bd_addr_type=None, bd_addr=None):
data = bytearray()
bd_addr_ba = addr2btp_ba(pts_addr_get(bd_addr))
bd_addr_type_ba = chr(pts_addr_type_get(bd_addr_type)).encode('utf-8')
data.extend(bd_addr_type_ba)
data.extend(bd_addr_ba)
return data
def METHOD_NAME(index, bd_addr_type=None, bd_addr=None):
logging.debug(f"{METHOD_NAME.__name__}")
data = address_to_ba(bd_addr_type, bd_addr)
data += struct.pack('B', index)
iutctl = get_iut()
iutctl.btp_socket.send(*HAS['set_active_index'], data=data)
has_command_rsp_succ()
def has_set_preset_name(index, name, bd_addr_type=None, bd_addr=None):
logging.debug(f"{has_set_preset_name.__name__}")
data = address_to_ba(bd_addr_type, bd_addr)
size = len(name.encode())
data += struct.pack('BB' + str(size) + 's', index, size, name.encode())
iutctl = get_iut()
iutctl.btp_socket.send(*HAS['set_preset_name'], data=data)
has_command_rsp_succ()
def has_remove_preset(index, bd_addr_type=None, bd_addr=None):
logging.debug(f"{has_remove_preset.__name__}")
data = address_to_ba(bd_addr_type, bd_addr)
data += struct.pack('B', index)
iutctl = get_iut()
iutctl.btp_socket.send(*HAS['remove_preset'], data=data)
has_command_rsp_succ()
def has_add_preset(index, properties, name, bd_addr_type=None, bd_addr=None):
logging.debug(f"{has_add_preset.__name__}")
data = address_to_ba(bd_addr_type, bd_addr)
size = len(name.encode())
data += struct.pack('BBB' + str(size) + 's', index, properties, size, name.encode())
iutctl = get_iut()
iutctl.btp_socket.send(*HAS['add_preset'], data=data)
has_command_rsp_succ()
def has_set_properties(index, properties, bd_addr_type=None, bd_addr=None):
logging.debug(f"{has_add_preset.__name__}")
data = address_to_ba(bd_addr_type, bd_addr)
data += struct.pack('BB', index, properties)
iutctl = get_iut()
iutctl.btp_socket.send(*HAS['set_properties'], data=data)
has_command_rsp_succ() |
tear down | #
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import os
import random
import faiss
import shutil
import unittest
import pathlib as pl
class TestIndexFaiss(unittest.TestCase):
@staticmethod
def assertIsFile(path):
if not pl.Path(path).resolve().is_file():
raise AssertionError("File does not exist: %s" % str(path))
def setUp(self):
self.docids = []
self.texts = []
self.test_file = 'tests/resources/simple_cacm_corpus.json'
self.tmp_dir = f'tmp_{self.__class__.__name__}_{str(random.randint(0, 1000))}'
with open(self.test_file) as f:
for line in f:
line = json.loads(line)
self.docids.append(line['id'])
self.texts.append(line['contents'])
def prepare_encoded_collection(self):
encoded_corpus_dir = f'{self.tmp_dir}/temp_index'
cmd = f'python -m pyserini.encode \
input --corpus {self.test_file} \
--fields text \
output --embeddings {encoded_corpus_dir} \
--to-faiss \
encoder --encoder castorini/tct_colbert-v2-hnp-msmarco \
--fields text \
--max-length 512 \
--batch 1 \
--device cpu'
status = os.system(cmd)
self.assertEqual(status, 0)
self.assertIsFile(os.path.join(encoded_corpus_dir, 'docid'))
self.assertIsFile(os.path.join(encoded_corpus_dir, 'index'))
return encoded_corpus_dir
def test_faiss_hnsw(self):
index_dir = f'{self.tmp_dir}/temp_hnsw'
encoded_corpus_dir = self.prepare_encoded_collection()
cmd = f'python -m pyserini.index.faiss \
--input {encoded_corpus_dir} \
--output {index_dir} \
--M 3 \
--hnsw'
status = os.system(cmd)
self.assertEqual(status, 0)
docid_fn = os.path.join(index_dir, 'docid')
index_fn = os.path.join(index_dir, 'index')
self.assertIsFile(docid_fn)
self.assertIsFile(index_fn)
index = faiss.read_index(index_fn)
vectors = index.reconstruct_n(0, index.ntotal)
with open(docid_fn) as f:
self.assertListEqual([docid.strip() for docid in f], self.docids)
self.assertAlmostEqual(vectors[0][0], 0.12679848074913025, places=4)
self.assertAlmostEqual(vectors[0][-1], -0.0037349488120526075, places=4)
self.assertAlmostEqual(vectors[2][0], 0.03678430616855621, places=4)
self.assertAlmostEqual(vectors[2][-1], 0.13209162652492523, places=4)
def test_faiss_pq(self):
index_dir = f'{self.tmp_dir}/temp_pq'
encoded_corpus_dir = self.prepare_encoded_collection()
cmd = f'python -m pyserini.index.faiss \
--input {encoded_corpus_dir} \
--output {index_dir} \
--pq-m 3 \
--efC 1 \
--pq-nbits 128 \
--pq'
status = os.system(cmd)
self.assertEqual(status, 0)
docid_fn = os.path.join(index_dir, 'docid')
index_fn = os.path.join(index_dir, 'index')
self.assertIsFile(docid_fn)
self.assertIsFile(index_fn)
index = faiss.read_index(index_fn)
vectors = index.reconstruct_n(0, index.ntotal)
with open(docid_fn) as f:
self.assertListEqual([docid.strip() for docid in f], self.docids)
self.assertAlmostEqual(vectors[0][0], 0.04343192, places=4)
self.assertAlmostEqual(vectors[0][-1], 0.075478144, places=4)
self.assertAlmostEqual(vectors[2][0], 0.04343192, places=4)
self.assertAlmostEqual(vectors[2][-1], 0.075478144, places=4)
def METHOD_NAME(self):
shutil.rmtree(self.tmp_dir) |
generate unix entry | from __future__ import annotations
import logging
import re
from base64 import b64decode
from contextlib import suppress
from pathlib import Path
from tempfile import NamedTemporaryFile
from docker.types import Mount
from analysis.PluginBase import AnalysisBasePlugin
from helperFunctions.docker import run_docker_container
from helperFunctions.fileSystem import get_src_dir
from helperFunctions.tag import TagColor
from plugins.mime_blacklists import MIME_BLACKLIST_NON_EXECUTABLE
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from objects.file import FileObject
from collections.abc import Callable
JOHN_PATH = Path(__file__).parent.parent / 'bin' / 'john'
JOHN_POT = Path(__file__).parent.parent / 'bin' / 'john.pot'
WORDLIST_PATH = Path(get_src_dir()) / 'bin' / 'passwords.txt'
USER_NAME_REGEX = br'[a-zA-Z][a-zA-Z0-9_-]{2,15}'
UNIX_REGEXES = [
USER_NAME_REGEX + br':[^:]?:\d+:\d*:[^:]*:[^:]*:[^\n ]*',
USER_NAME_REGEX + br':\$[1256][ay]?\$[a-zA-Z0-9\./+]*\$[a-zA-Z0-9\./+]{16,128}={0,2}', # MD5 / Blowfish / SHA
USER_NAME_REGEX + br':[a-zA-Z0-9\./=]{13}:\d*:\d*:', # DES
]
HTPASSWD_REGEXES = [
USER_NAME_REGEX + br':\$apr1\$[a-zA-Z0-9\./+=]+\$[a-zA-Z0-9\./+]{22}', # MD5 apr1
USER_NAME_REGEX + br':\{SHA\}[a-zA-Z0-9\./+]{27}=', # SHA-1
]
MOSQUITTO_REGEXES = [br'[a-zA-Z][a-zA-Z0-9_-]{2,15}\:\$6\$[a-zA-Z0-9+/=]+\$[a-zA-Z0-9+/]{86}==']
RESULTS_DELIMITER = '=== Results: ==='
class AnalysisPlugin(AnalysisBasePlugin):
"""
This plug-in tries to find and crack passwords
"""
NAME = 'users_and_passwords'
DEPENDENCIES = [] # noqa: RUF012
MIME_BLACKLIST = MIME_BLACKLIST_NON_EXECUTABLE
DESCRIPTION = 'search for UNIX, httpd, and mosquitto password files, parse them and try to crack the passwords'
VERSION = '0.5.4'
FILE = __file__
def process_object(self, file_object: FileObject) -> FileObject:
if self.NAME not in file_object.processed_analysis:
file_object.processed_analysis[self.NAME] = {}
file_object.processed_analysis[self.NAME]['summary'] = []
self.find_password_entries(file_object, UNIX_REGEXES, METHOD_NAME)
self.find_password_entries(file_object, HTPASSWD_REGEXES, generate_htpasswd_entry)
self.find_password_entries(file_object, MOSQUITTO_REGEXES, generate_mosquitto_entry)
return file_object
def find_password_entries(self, file_object: FileObject, regex_list: list[bytes], entry_gen_function: Callable):
for passwd_regex in regex_list:
passwd_entries = re.findall(passwd_regex, file_object.binary)
for entry in passwd_entries:
self.update_file_object(file_object, entry_gen_function(entry))
def _add_found_password_tag(self, file_object: FileObject, result: dict):
for password_entry in result:
if 'password' in result[password_entry]:
username = password_entry.split(':', 1)[0]
password = result[password_entry]['password']
self.add_analysis_tag(
file_object, f'{username}_{password}', f'Password: {username}:{password}', TagColor.RED, True
)
def update_file_object(self, file_object: FileObject, result_entry: dict):
file_object.processed_analysis[self.NAME].update(result_entry)
file_object.processed_analysis[self.NAME]['summary'].extend(list(result_entry))
self._add_found_password_tag(file_object, result_entry)
def METHOD_NAME(entry: bytes) -> dict:
user_name, pw_hash, *_ = entry.split(b':')
result_entry = {'type': 'unix', 'entry': _to_str(entry)}
try:
if pw_hash.startswith(b'$') or _is_des_hash(pw_hash):
result_entry['password-hash'] = _to_str(pw_hash)
result_entry['cracked'] = crack_hash(b':'.join((user_name, pw_hash)), result_entry)
except (IndexError, AttributeError, TypeError):
logging.warning(f'Unsupported password format: {entry}', exc_info=True)
return {f'{_to_str(user_name)}:unix': result_entry}
def generate_htpasswd_entry(entry: bytes) -> dict:
user_name, pw_hash = entry.split(b':')
result_entry = {'type': 'htpasswd', 'entry': _to_str(entry), 'password-hash': _to_str(pw_hash)}
result_entry['cracked'] = crack_hash(entry, result_entry)
return {f'{_to_str(user_name)}:htpasswd': result_entry}
def generate_mosquitto_entry(entry: bytes) -> dict:
entry_decoded = _to_str(entry)
user, _, _, salt_hash, passwd_hash, *_ = re.split(r'[:$]', entry_decoded)
passwd_entry = f'{user}:$dynamic_82${b64decode(passwd_hash).hex()}$HEX${b64decode(salt_hash).hex()}'
result_entry = {'type': 'mosquitto', 'entry': entry_decoded, 'password-hash': passwd_hash}
result_entry['cracked'] = crack_hash(passwd_entry.encode(), result_entry, '--format=dynamic_82')
return {f'{user}:mosquitto': result_entry}
def _is_des_hash(pw_hash: str) -> bool:
return len(pw_hash) == 13 # noqa: PLR2004
def crack_hash(passwd_entry: bytes, result_entry: dict, format_term: str = '') -> bool:
with NamedTemporaryFile() as fp:
fp.write(passwd_entry)
fp.seek(0)
john_process = run_docker_container(
'fact/john:alpine-3.18',
command=f'/work/input_file {format_term}',
mounts=[
Mount('/work/input_file', fp.name, type='bind'),
Mount('/root/.john/john.pot', str(JOHN_POT), type='bind'),
],
logging_label='users_and_passwords',
)
result_entry['log'] = john_process.stdout
if 'No password hashes loaded' in john_process.stdout:
result_entry['ERROR'] = 'hash type is not supported'
return False
output = parse_john_output(john_process.stdout)
if output:
if any('0 password hashes cracked' in line for line in output):
result_entry['ERROR'] = 'password cracking not successful'
return False
with suppress(IndexError):
result_entry['password'] = output[0].split(':')[1]
return True
return False
def parse_john_output(john_output: str) -> list[str]:
if RESULTS_DELIMITER in john_output:
start_offset = john_output.find(RESULTS_DELIMITER) + len(RESULTS_DELIMITER) + 1 # +1 is '\n' after delimiter
return [line for line in john_output[start_offset:].split('\n') if line]
return []
def _to_str(byte_str: bytes) -> str:
"""result entries must be converted from `bytes` to `str` in order to be saved as JSON"""
return byte_str.decode(errors='replace') |
test api key live mode | from copy import deepcopy
from unittest.mock import patch
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from djstripe import models
from djstripe.enums import APIKeyType
from djstripe.settings import djstripe_settings
from . import FAKE_ACCOUNT, FAKE_FILEUPLOAD_LOGO
class TestCheckApiKeySettings(TestCase):
@override_settings(
STRIPE_LIVE_SECRET_KEY="sk_live_foo",
STRIPE_LIVE_PUBLIC_KEY="sk_live_foo",
STRIPE_LIVE_MODE=True,
)
@patch("stripe.Account.retrieve")
@patch(
"stripe.File.retrieve",
return_value=deepcopy(FAKE_FILEUPLOAD_LOGO),
autospec=True,
)
def test_global_api_keys_live_mode(
self,
fileupload_retrieve_mock,
account_retrieve_mock,
):
fake_account = deepcopy(FAKE_ACCOUNT)
fake_account["settings"]["branding"]["icon"] = None
account_retrieve_mock.return_value = fake_account
with patch.object(
models.api,
"get_api_key_details_by_prefix",
return_value=(APIKeyType.secret, True),
):
account = models.Account.sync_from_stripe_data(
fake_account, api_key=djstripe_settings.STRIPE_SECRET_KEY
)
self.assertEqual(account.default_api_key, "sk_live_foo")
self.assertEqual(djstripe_settings.STRIPE_LIVE_MODE, True)
self.assertEqual(djstripe_settings.STRIPE_SECRET_KEY, "sk_live_foo")
self.assertEqual(djstripe_settings.LIVE_API_KEY, "sk_live_foo")
@override_settings(
STRIPE_TEST_SECRET_KEY="sk_test_foo",
STRIPE_TEST_PUBLIC_KEY="pk_test_foo",
STRIPE_LIVE_MODE=False,
)
@patch("stripe.Account.retrieve")
@patch(
"stripe.File.retrieve",
return_value=deepcopy(FAKE_FILEUPLOAD_LOGO),
autospec=True,
)
def test_global_api_keys_test_mode(
self,
fileupload_retrieve_mock,
account_retrieve_mock,
):
fake_account = deepcopy(FAKE_ACCOUNT)
fake_account["settings"]["branding"]["icon"] = None
account_retrieve_mock.return_value = fake_account
with patch.object(
models.api,
"get_api_key_details_by_prefix",
return_value=(APIKeyType.secret, False),
):
account = models.Account.sync_from_stripe_data(
fake_account, api_key=djstripe_settings.STRIPE_SECRET_KEY
)
self.assertEqual(account.default_api_key, "sk_test_foo")
self.assertEqual(djstripe_settings.STRIPE_LIVE_MODE, False)
self.assertEqual(djstripe_settings.STRIPE_SECRET_KEY, "sk_test_foo")
self.assertEqual(djstripe_settings.TEST_API_KEY, "sk_test_foo")
@override_settings(
STRIPE_TEST_SECRET_KEY="sk_test_foo",
STRIPE_LIVE_SECRET_KEY="sk_live_foo",
STRIPE_TEST_PUBLIC_KEY="pk_test_foo",
STRIPE_LIVE_PUBLIC_KEY="pk_live_foo",
STRIPE_LIVE_MODE=True,
)
@patch("stripe.Account.retrieve")
@patch(
"stripe.File.retrieve",
return_value=deepcopy(FAKE_FILEUPLOAD_LOGO),
autospec=True,
)
def METHOD_NAME(
self,
fileupload_retrieve_mock,
account_retrieve_mock,
):
fake_account = deepcopy(FAKE_ACCOUNT)
fake_account["settings"]["branding"]["icon"] = None
account_retrieve_mock.return_value = fake_account
with patch.object(
models.api,
"get_api_key_details_by_prefix",
return_value=(APIKeyType.secret, True),
):
account = models.Account.sync_from_stripe_data(
fake_account, api_key=djstripe_settings.STRIPE_SECRET_KEY
)
self.assertEqual(account.default_api_key, "sk_live_foo")
del settings.STRIPE_SECRET_KEY, settings.STRIPE_TEST_SECRET_KEY
del settings.STRIPE_PUBLIC_KEY, settings.STRIPE_TEST_PUBLIC_KEY
self.assertEqual(djstripe_settings.STRIPE_LIVE_MODE, True)
self.assertEqual(djstripe_settings.STRIPE_SECRET_KEY, "sk_live_foo")
self.assertEqual(djstripe_settings.STRIPE_PUBLIC_KEY, "pk_live_foo")
@override_settings(
STRIPE_TEST_SECRET_KEY="sk_test_foo",
STRIPE_LIVE_SECRET_KEY="sk_live_foo",
STRIPE_TEST_PUBLIC_KEY="pk_test_foo",
STRIPE_LIVE_PUBLIC_KEY="pk_live_foo",
STRIPE_LIVE_MODE=False,
)
@patch("stripe.Account.retrieve")
@patch(
"stripe.File.retrieve",
return_value=deepcopy(FAKE_FILEUPLOAD_LOGO),
autospec=True,
)
def test_secret_key_test_mode(
self,
fileupload_retrieve_mock,
account_retrieve_mock,
):
fake_account = deepcopy(FAKE_ACCOUNT)
fake_account["settings"]["branding"]["icon"] = None
account_retrieve_mock.return_value = fake_account
with patch.object(
models.api,
"get_api_key_details_by_prefix",
return_value=(APIKeyType.secret, False),
):
account = models.Account.sync_from_stripe_data(
fake_account, api_key=djstripe_settings.STRIPE_SECRET_KEY
)
self.assertEqual(account.default_api_key, "sk_test_foo")
del settings.STRIPE_SECRET_KEY
del settings.STRIPE_PUBLIC_KEY
self.assertEqual(djstripe_settings.STRIPE_LIVE_MODE, False)
self.assertEqual(djstripe_settings.STRIPE_SECRET_KEY, "sk_test_foo")
self.assertEqual(djstripe_settings.STRIPE_PUBLIC_KEY, "pk_test_foo")
self.assertEqual(djstripe_settings.TEST_API_KEY, "sk_test_foo") |
get boot2docker status | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
import sys
from semantic_version import Version
from cement.utils.misc import minimal_logger
from ebcli.containers import commands
from ebcli.core import fileoperations
from ebcli.lib import heuristics, utils
from ebcli.resources.strings import strings
from ebcli.objects.exceptions import CommandError
LOG = minimal_logger(__name__)
SUPPORTED_DOCKER_V = '1.6.0'
SUPPORTED_BOOT2DOCKER_V = '1.6.0'
LOCALHOST = '127.0.0.1'
EXPORT = 'export'
BOOT2DOCKER_RUNNING = 'running'
DOCKER_HOST = 'DOCKER_HOST'
DOCKER_CERT_PATH = 'DOCKER_CERT_PATH'
DOCKER_TLS_VERIFY = 'DOCKER_TLS_VERIFY'
def supported_docker_installed():
"""
Return whether proper Docker version is installed.
:return: bool
"""
try:
clean_version = remove_leading_zeros_from_version(commands.version())
return Version(clean_version) >= Version(SUPPORTED_DOCKER_V)
except (OSError, CommandError):
return False
def validate_docker_installed():
_validate_docker_installed(supported_docker_installed())
def _validate_docker_installed(supported_docker_installed):
versions = {'boot2docker-version': SUPPORTED_BOOT2DOCKER_V,
'docker-version': SUPPORTED_DOCKER_V}
err = strings['local.dockernotpresent'].format(**versions)
if not supported_docker_installed:
raise CommandError(err)
def container_ip():
"""
Return the ip address that local containers are or will be running on.
:return str
"""
try:
return _boot2docker_ip()
except OSError:
return LOCALHOST
def _boot2docker_ip():
args = ['boot2docker', 'ip']
return utils.exec_cmd_quiet(args).strip()
def setup(env=os.environ):
validate_docker_installed()
boot2docker_setup(env)
def boot2docker_setup(env=os.environ):
if not heuristics.is_boot2docker_installed():
return
LOG.debug('Ensuring boot2docker VM has initialized, started and the client is set up...')
_init_boot2docker()
if not _is_boot2docker_running():
_start_boot2docker()
boot2docker_certs_path = os.path.sep.join(['.boot2docker', 'certs',
'boot2docker-vm'])
if DOCKER_HOST not in env:
env[DOCKER_HOST] = 'tcp://{}:2376'.format(_boot2docker_ip())
if DOCKER_CERT_PATH not in env:
env[DOCKER_CERT_PATH] = os.path.join(fileoperations.get_home(),
boot2docker_certs_path)
if DOCKER_TLS_VERIFY not in env:
env[DOCKER_TLS_VERIFY] = '1'
LOG.debug('DOCKER_HOST is set to ' + env[DOCKER_HOST])
LOG.debug('DOCKER_CERT_PATH is set to ' + env[DOCKER_CERT_PATH])
LOG.debug('DOCKER_TLS_VERIFY is set to ' + env[DOCKER_TLS_VERIFY])
LOG.debug('PATH is set to ' + env.get('PATH', ''))
def is_windows():
return 'win32' in str(sys.platform).lower()
def _is_boot2docker_running():
return METHOD_NAME() == BOOT2DOCKER_RUNNING
def METHOD_NAME():
return utils.exec_cmd_quiet(['boot2docker', 'status']).strip()
def _start_boot2docker():
utils.exec_cmd_quiet(['boot2docker', 'start'])
def _init_boot2docker():
utils.exec_cmd_quiet(['boot2docker', 'init'])
def remove_leading_zeros_from_version(version_string):
# regex explaination: remove zeroes if both:
# 1. the start of string (major version) or following a '.'
# 2. followed by some other digit
return re.sub(r'((?<=\.)|^)[0]+(?=\d+)', r'', version_string) |
test vert pol | #!/usr/bin/env python
# coding: utf-8
#
# Project: Azimuthal integration
# https://github.com/silx-kit/pyFAI
#
# Copyright (C) 2015-2018 European Synchrotron Radiation Facility, Grenoble, France
#
# Principal author: Jérôme Kieffer ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"test suite for polarization corrections"
__author__ = "Jérôme Kieffer"
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__date__ = "22/04/2022"
import unittest
import numpy
import logging
logger = logging.getLogger(__name__)
from ..azimuthalIntegrator import AzimuthalIntegrator
class TestPolarization(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.shape = (13, 13)
Y, X = numpy.ogrid[-6:7, -6:7]
self.rotY = numpy.radians(30.0 * Y)
self.rotX = numpy.radians(30.0 * X)
self.tth = numpy.sqrt(self.rotY ** 2 + self.rotX ** 2)
chi = numpy.arctan2(self.rotY, self.rotX)
self.ai = AzimuthalIntegrator(dist=1, pixel1=0.1, pixel2=0.1)
self.ai._cached_array["2th_center"] = self.tth
self.ai._cached_array["chi_center"] = chi
self.epsilon = 1e-15
def tearDown(self):
unittest.TestCase.tearDown(self)
self.shape = self.rotY = self.rotX = self.tth = self.ai = None
def testNoPol(self):
"without polarization correction should be 1"
self.assertTrue(abs(self.ai.polarization(factor=None) - numpy.ones(self.shape)).max() == 0, "without polarization correction should be 1")
def testCircularPol(self):
"Circular polarization should decay in (1+(cos2θ)^2)/2"
pol = ((1.0 + numpy.cos(self.tth) ** 2) / 2.0).astype("float32")
self.assertTrue(abs(self.ai.polarization(factor=0) - pol).max() == 0, "with circular polarization correction is independent of chi")
self.assertTrue(abs(self.ai.polarization(factor=0, axis_offset=1) - pol).max() == 0, "with circular polarization correction is independent of chi, 1")
self.assertTrue(abs(self.ai.polarization(factor=0, axis_offset=2) - pol).max() == 0, "with circular polarization correction is independent of chi, 2")
self.assertTrue(abs(self.ai.polarization(factor=0, axis_offset=3) - pol).max() == 0, "with circular polarization correction is independent of chi, 3")
def testHorizPol(self):
"horizontal polarization should decay in (cos2θ)**2 in horizontal plane and no correction in vertical one"
self.assertTrue(abs(self.ai.polarization(factor=1)[:, 6] - numpy.ones(13)).max() == 0, "No correction in the vertical plane")
self.assertTrue(abs(self.ai.polarization(factor=1)[6] - numpy.cos(self.rotX) ** 2).max() < self.epsilon, "cos(2th)^2 like in the horizontal plane")
def METHOD_NAME(self):
"Vertical polarization should decay in (cos2θ)**2 in vertical plane and no correction in horizontal one"
self.assertTrue(abs(self.ai.polarization(factor=-1)[6] - numpy.ones(13)).max() == 0, "No correction in the horizontal plane")
self.assertTrue(abs(self.ai.polarization(factor=-1)[:, 6] - (numpy.cos((2 * self.rotX)) + 1) / 2).max() < self.epsilon, "cos(2th)^2 like in the verical plane")
def testoffsetPol(self):
"test for the rotation of the polarization axis"
self.assertTrue(abs(self.ai.polarization(factor=1, axis_offset=numpy.pi / 2)[6] - numpy.ones(13)).max() == 0, "No correction in the horizontal plane")
self.assertTrue(abs(self.ai.polarization(factor=1, axis_offset=numpy.pi / 2)[:, 6] - (numpy.cos((2 * self.rotX)) + 1) / 2).max() < self.epsilon, "cos(2th)^2 like in the verical plane")
def testNumExpr(self):
for _ in range(10):
p = 2.0 * numpy.random.random() - 1.0
offset = 10.0 * numpy.random.random() - 3.0
self.assertTrue(abs(
self.ai.polarization(factor=p, axis_offset=offset, path="numpy") -
self.ai.polarization(factor=p, axis_offset=offset, path="numexpr")).max() == 0,
f"Numexpr validation with p={p}, offset={offset}")
def suite():
loader = unittest.defaultTestLoader.loadTestsFromTestCase
testsuite = unittest.TestSuite()
testsuite.addTest(loader(TestPolarization))
return testsuite
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite()) |
get default prefix | # License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
class PosConfig(models.Model):
_inherit = "pos.config"
@api.depends(
"l10n_es_simplified_invoice_sequence_id.number_next_actual",
"l10n_es_simplified_invoice_sequence_id.prefix",
"l10n_es_simplified_invoice_sequence_id.padding",
)
def _compute_simplified_invoice_sequence(self):
for pos in self:
seq = pos.l10n_es_simplified_invoice_sequence_id
pos.l10n_es_simplified_invoice_number = (
seq._get_current_sequence().number_next_actual
)
pos.l10n_es_simplified_invoice_prefix = seq._get_prefix_suffix()[0]
pos.l10n_es_simplified_invoice_padding = seq.padding
iface_l10n_es_simplified_invoice = fields.Boolean(
string="Use simplified invoices for this POS",
)
is_simplified_config = fields.Boolean(
store=False, compute="_compute_simplified_config"
)
l10n_es_simplified_invoice_sequence_id = fields.Many2one(
"ir.sequence",
string="Simplified Invoice IDs Sequence",
help="Autogenerate for each POS created",
copy=False,
readonly=True,
)
l10n_es_simplified_invoice_limit = fields.Float(
string="Sim.Inv limit amount",
digits="Account",
help="Over this amount is not legally posible to create "
"a simplified invoice",
default=3000, # Spanish legal limit
)
l10n_es_simplified_invoice_prefix = fields.Char(
"Simplified Invoice prefix",
readonly=True,
compute="_compute_simplified_invoice_sequence",
)
l10n_es_simplified_invoice_padding = fields.Integer(
"Simplified Invoice padding",
readonly=True,
compute="_compute_simplified_invoice_sequence",
)
l10n_es_simplified_invoice_number = fields.Integer(
"Sim.Inv number",
readonly=True,
compute="_compute_simplified_invoice_sequence",
)
@api.depends("iface_l10n_es_simplified_invoice")
def _compute_simplified_config(self):
for pos in self:
pos.is_simplified_config = pos.iface_l10n_es_simplified_invoice
@api.model_create_multi
def create(self, vals_list):
for vals in vals_list:
# Auto create simp. inv. sequence
prefix = initial_prefix = "{}{}".format(
vals["name"], self.METHOD_NAME()
)
ith = 0
while self.env["ir.sequence"].search_count([("prefix", "=", prefix)]):
ith += 1
prefix = "{}_{}".format(initial_prefix, ith)
simp_inv_seq_id = self.env["ir.sequence"].create(
{
"name": _("Simplified Invoice %s") % vals["name"],
"implementation": "standard",
"padding": self._get_default_padding(),
"prefix": prefix,
"code": "pos.config.simplified_invoice",
"company_id": vals.get("company_id", False),
}
)
vals["l10n_es_simplified_invoice_sequence_id"] = simp_inv_seq_id.id
return super().create(vals_list)
@api.onchange("iface_l10n_es_simplified_invoice")
def _onchange_l10n_iface_l10n_es_simplified_invoice(self):
if self.iface_l10n_es_simplified_invoice and not self.invoice_journal_id:
self.invoice_journal_id = self._default_invoice_journal()
def copy(self, default=None):
return super(
PosConfig,
self.with_context(copy_pos_config=True),
).copy(default)
def write(self, vals):
if not self._context.get("copy_pos_config") and "name" not in vals:
for pos in self:
sequence = pos.l10n_es_simplified_invoice_sequence_id
sequence.check_simplified_invoice_unique_prefix()
if "name" in vals:
prefix = self.l10n_es_simplified_invoice_prefix.replace(
self.name, vals["name"]
)
if prefix != self.l10n_es_simplified_invoice_prefix:
self.l10n_es_simplified_invoice_sequence_id.update(
{
"prefix": prefix,
"name": (
self.l10n_es_simplified_invoice_sequence_id.name.replace(
self.name, vals["name"]
)
),
}
)
return super().write(vals)
def unlink(self):
self.mapped("l10n_es_simplified_invoice_sequence_id").unlink()
return super().unlink()
def _get_default_padding(self):
return self.env["ir.config_parameter"].get_param(
"l10n_es_pos.simplified_invoice_sequence.padding", 4
)
def METHOD_NAME(self):
return self.env["ir.config_parameter"].get_param(
"l10n_es_pos.simplified_invoice_sequence.prefix", ""
)
def _get_l10n_es_sequence_name(self):
"""HACK: This is done for getting the proper translation."""
return _("Simplified Invoice %s") |
test query | """
:codeauthor: Rahul Handay <[email protected]>
"""
import pytest
import salt.states.http as http
from tests.support.mock import MagicMock, patch
@pytest.fixture
def configure_loader_modules():
return {http: {}}
def METHOD_NAME():
"""
Test to perform an HTTP query and statefully return the result
"""
ret = [
{
"changes": {},
"comment": (
" Either match text (match) or a status code (status) is required."
),
"data": {},
"name": "salt",
"result": False,
},
{
"changes": {},
"comment": " (TEST MODE)",
"data": True,
"name": "salt",
"result": None,
},
]
assert http.query("salt") == ret[0]
with patch.dict(http.__opts__, {"test": True}):
mock = MagicMock(return_value=True)
with patch.dict(http.__salt__, {"http.query": mock}):
assert http.query("salt", "Dude", "stack") == ret[1]
def test_query_pcre_statustype():
"""
Test to perform an HTTP query with a regex used to match the status code and statefully return the result
"""
testurl = "salturl"
http_result = {"text": "This page returned a 201 status code", "status": "201"}
state_return = {
"changes": {},
"comment": (
'Match text "This page returned" was found. Status pattern "200|201" was'
" found."
),
"data": {"status": "201", "text": "This page returned a 201 status code"},
"name": testurl,
"result": True,
}
with patch.dict(http.__opts__, {"test": False}):
mock = MagicMock(return_value=http_result)
with patch.dict(http.__salt__, {"http.query": mock}):
assert (
http.query(
testurl,
match="This page returned",
status="200|201",
status_type="pcre",
)
== state_return
)
def test_query_stringstatustype():
"""
Test to perform an HTTP query with a string status code and statefully return the result
"""
testurl = "salturl"
http_result = {"text": "This page returned a 201 status code", "status": "201"}
state_return = {
"changes": {},
"comment": 'Match text "This page returned" was found. Status 201 was found.',
"data": {"status": "201", "text": "This page returned a 201 status code"},
"name": testurl,
"result": True,
}
with patch.dict(http.__opts__, {"test": False}):
mock = MagicMock(return_value=http_result)
with patch.dict(http.__salt__, {"http.query": mock}):
assert (
http.query(
testurl,
match="This page returned",
status="201",
status_type="string",
)
== state_return
)
def test_query_liststatustype():
"""
Test to perform an HTTP query with a list of status codes and statefully return the result
"""
testurl = "salturl"
http_result = {"text": "This page returned a 201 status code", "status": "201"}
state_return = {
"changes": {},
"comment": 'Match text "This page returned" was found. Status 201 was found.',
"data": {"status": "201", "text": "This page returned a 201 status code"},
"name": testurl,
"result": True,
}
with patch.dict(http.__opts__, {"test": False}):
mock = MagicMock(return_value=http_result)
with patch.dict(http.__salt__, {"http.query": mock}):
assert (
http.query(
testurl,
match="This page returned",
status=["200", "201"],
status_type="list",
)
== state_return
)
def test_wait_for_with_interval():
"""
Test for wait_for_successful_query waits for request_interval
"""
query_mock = MagicMock(side_effect=[{"error": "error"}, {"result": True}])
with patch.object(http, "query", query_mock):
with patch("time.sleep", MagicMock()) as sleep_mock:
assert http.wait_for_successful_query(
"url", request_interval=1, status=200
) == {"result": True}
sleep_mock.assert_called_once_with(1)
def test_wait_for_without_interval():
"""
Test for wait_for_successful_query waits for request_interval
"""
query_mock = MagicMock(side_effect=[{"error": "error"}, {"result": True}])
with patch.object(http, "query", query_mock):
with patch("time.sleep", MagicMock()) as sleep_mock:
assert http.wait_for_successful_query("url", status=200) == {"result": True}
sleep_mock.assert_not_called() |
get subset name | """Create workflow moved from avalon-core repository.
Renamed classes and functions
- 'Creator' -> 'LegacyCreator'
- 'create' -> 'legacy_create'
"""
import os
import logging
import collections
from openpype.client import get_asset_by_id
from .subset_name import METHOD_NAME
class LegacyCreator(object):
"""Determine how assets are created"""
label = None
family = None
defaults = None
maintain_selection = True
enabled = True
dynamic_subset_keys = []
log = logging.getLogger("LegacyCreator")
log.propagate = True
def __init__(self, name, asset, options=None, data=None):
self.name = name # For backwards compatibility
self.options = options
# Default data
self.data = collections.OrderedDict()
self.data["id"] = "pyblish.avalon.instance"
self.data["family"] = self.family
self.data["asset"] = asset
self.data["subset"] = name
self.data["active"] = True
self.data.update(data or {})
@classmethod
def apply_settings(cls, project_settings, system_settings):
"""Apply OpenPype settings to a plugin class."""
host_name = os.environ.get("AVALON_APP")
plugin_type = "create"
plugin_type_settings = (
project_settings
.get(host_name, {})
.get(plugin_type, {})
)
global_type_settings = (
project_settings
.get("global", {})
.get(plugin_type, {})
)
if not global_type_settings and not plugin_type_settings:
return
plugin_name = cls.__name__
plugin_settings = None
# Look for plugin settings in host specific settings
if plugin_name in plugin_type_settings:
plugin_settings = plugin_type_settings[plugin_name]
# Look for plugin settings in global settings
elif plugin_name in global_type_settings:
plugin_settings = global_type_settings[plugin_name]
if not plugin_settings:
return
cls.log.debug(">>> We have preset for {}".format(plugin_name))
for option, value in plugin_settings.items():
if option == "enabled" and value is False:
cls.log.debug(" - is disabled by preset")
else:
cls.log.debug(" - setting `{}`: `{}`".format(option, value))
setattr(cls, option, value)
def process(self):
pass
@classmethod
def get_dynamic_data(
cls, variant, task_name, asset_id, project_name, host_name
):
"""Return dynamic data for current Creator plugin.
By default return keys from `dynamic_subset_keys` attribute as mapping
to keep formatted template unchanged.
```
dynamic_subset_keys = ["my_key"]
---
output = {
"my_key": "{my_key}"
}
```
Dynamic keys may override default Creator keys (family, task, asset,
...) but do it wisely if you need.
All of keys will be converted into 3 variants unchanged, capitalized
and all upper letters. Because of that are all keys lowered.
This method can be modified to prefill some values just keep in mind it
is class method.
Returns:
dict: Fill data for subset name template.
"""
dynamic_data = {}
for key in cls.dynamic_subset_keys:
key = key.lower()
dynamic_data[key] = "{" + key + "}"
return dynamic_data
@classmethod
def METHOD_NAME(
cls, variant, task_name, asset_id, project_name, host_name=None
):
"""Return subset name created with entered arguments.
Logic extracted from Creator tool. This method should give ability
to get subset name without the tool.
TODO: Maybe change `variant` variable.
By default is output concatenated family with user text.
Args:
variant (str): What is entered by user in creator tool.
task_name (str): Context's task name.
asset_id (ObjectId): Mongo ID of context's asset.
project_name (str): Context's project name.
host_name (str): Name of host.
Returns:
str: Formatted subset name with entered arguments. Should match
config's logic.
"""
dynamic_data = cls.get_dynamic_data(
variant, task_name, asset_id, project_name, host_name
)
asset_doc = get_asset_by_id(
project_name, asset_id, fields=["data.tasks"]
)
return METHOD_NAME(
cls.family,
variant,
task_name,
asset_doc,
project_name,
host_name,
dynamic_data=dynamic_data
)
def legacy_create(Creator, name, asset, options=None, data=None):
"""Create a new instance
Associate nodes with a subset and family. These nodes are later
validated, according to their `family`, and integrated into the
shared environment, relative their `subset`.
Data relative each family, along with default data, are imprinted
into the resulting objectSet. This data is later used by extractors
and finally asset browsers to help identify the origin of the asset.
Arguments:
Creator (Creator): Class of creator
name (str): Name of subset
asset (str): Name of asset
options (dict, optional): Additional options from GUI
data (dict, optional): Additional data from GUI
Raises:
NameError on `subset` already exists
KeyError on invalid dynamic property
RuntimeError on host error
Returns:
Name of instance
"""
from openpype.pipeline import registered_host
host = registered_host()
plugin = Creator(name, asset, options, data)
if plugin.maintain_selection is True:
with host.maintained_selection():
print("Running %s with maintained selection" % plugin)
instance = plugin.process()
return instance
print("Running %s" % plugin)
instance = plugin.process()
return instance |
wipe | import time
from contextlib import contextmanager
import numpy
import pytest
import stbt_core as stbt
def test_motionresult_repr():
assert repr(stbt.MotionResult(
time=1466002032.335607, motion=True,
region=stbt.Region(x=321, y=32, right=334, bottom=42),
frame=stbt.Frame(numpy.zeros((720, 1280, 3)),
time=1466002032.335607))) \
== ("MotionResult("
"time=1466002032.336, motion=True, "
"region=Region(x=321, y=32, right=334, bottom=42), "
"frame=<Frame(time=1466002032.336)>)")
def test_wait_for_motion_half_motion_str_2of4():
with MockTime().patch():
res = stbt.wait_for_motion(
consecutive_frames='2/4', frames=fake_frames())
print(res)
assert res.time == 1466084606.
def test_wait_for_motion_half_motion_str_2of3():
with MockTime().patch():
res = stbt.wait_for_motion(
consecutive_frames='2/3', frames=fake_frames())
print(res)
assert res.time == 1466084606.
def test_wait_for_motion_half_motion_str_4of10():
with MockTime().patch():
# Time is not affected by consecutive_frames parameter
res = stbt.wait_for_motion(
consecutive_frames='4/10', timeout_secs=20, frames=fake_frames())
assert res.time == 1466084606.
def test_wait_for_motion_half_motion_str_3of4():
try:
with MockTime().patch():
stbt.wait_for_motion(consecutive_frames='3/4', frames=fake_frames())
assert False, "wait_for_motion succeeded unexpectedly"
except stbt.MotionTimeout:
pass
def test_wait_for_motion_half_motion_int():
with pytest.raises(stbt.MotionTimeout), MockTime().patch():
stbt.wait_for_motion(consecutive_frames=2, frames=fake_frames())
def test_that_wait_for_motion_detects_a_wipe():
stbt.wait_for_motion(consecutive_frames="10/30", frames=METHOD_NAME())
stbt.wait_for_motion(frames=gradient_wipe())
def test_detect_motion_region_and_mask():
def dm(**kwargs):
return next(stbt.detect_motion(frames=METHOD_NAME(), **kwargs))
r = stbt.Region(0, 0, right=640, bottom=1280)
# Just check no exceptions
dm()
dm(mask="mask-out-left-half-720p.png")
dm(mask=numpy.full((720, 1280), 255, dtype=numpy.uint8))
dm(mask=r)
dm(region=r)
with pytest.raises(ValueError,
match="Cannot specify mask and region at the same time"):
dm(region=r, mask=numpy.zeros((720, 1280), dtype=numpy.uint8))
with pytest.raises(ValueError,
match=r"Mask\(<Image>\) doesn't overlap with the frame"):
dm(mask=numpy.zeros((720, 1280), dtype=numpy.uint8))
with pytest.raises(ValueError,
match=r"~Region.ALL doesn't overlap with the frame"):
dm(mask=~stbt.Region.ALL)
def fake_frames():
a = numpy.zeros((2, 2, 3), dtype=numpy.uint8)
a.flags.writeable = False
b = numpy.ones((2, 2, 3), dtype=numpy.uint8) * 255
b.flags.writeable = False
# Motion: v v v v v v v v v
data = [a, a, a, a, a, a, b, b, a, a, b, b, a, a, b, b, a, a, b, b, a, a, b]
# ^ ^
# | L Motion starts here at timestamp 1466084606.
# L Video starts here at timestamp 1466084600
start_time = time.time()
for n, x in enumerate(data):
t = start_time + n
time.sleep(t - time.time())
yield stbt.Frame(x, time=t)
def METHOD_NAME():
frame = numpy.zeros((720, 1280, 3), dtype=numpy.uint8)
for x in range(0, 720, 2):
frame[x:x + 2, :, :] = 255
yield stbt.Frame(frame.copy(), time=x / 30.)
def clamp(x, bottom, top):
return min(top, max(bottom, x))
def gradient_wipe(min_=100, max_=200, swipe_height=40):
"""Use write_video(gradient_wipe()) to see what this looks like."""
frame = min_ * numpy.ones(
(720 + swipe_height * 4, 1280, 3), dtype=numpy.uint8)
diff = max_ - min_
# detect_motion ignores differences of under 25, so what's the fastest we
# can wipe while making sure the inter-frame differences are always under
# 25?:
speed = 24 * swipe_height / diff
print("pixel difference: %f" % (diff / swipe_height))
print("max_speed: %f" % speed)
edge = numpy.ones((swipe_height * 3, 1280, 3), dtype=numpy.uint8) * min_
for n in range(swipe_height * 3):
edge[n, :, :] = clamp(max_ - (n - swipe_height) * diff / swipe_height,
min_, max_)
for x in range(0, frame.shape[0] - swipe_height * 3, int(speed)):
frame[x:x + swipe_height * 3, :, :] = edge
yield stbt.Frame(frame[swipe_height * 2:swipe_height * 2 + 720].copy(),
time=x / 30.)
def write_video(g):
"""This was useful during the development of wipe and gradient_wipe.
Usage: write_video(gradient_wipe())"""
import cv2
vw = cv2.VideoWriter("test.avi", cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
30, (1280, 720))
for frame in g:
vw.write(frame)
vw.release()
class MockTime():
def __init__(self, start_time=1466084600.):
self._time = start_time
self._functions = []
def time(self):
t = self._time
return t
def sleep(self, seconds):
while self._functions and self._functions[0][0] <= self._time + seconds:
_, fn = self._functions.pop(0)
fn()
self._time += seconds
def interrupt(self, exception):
def raise_exception():
raise exception
self.at(0, raise_exception)
def at(self, offset, func):
self._functions.append((self._time + offset, func))
self._functions.sort()
@contextmanager
def assert_duration(self, seconds):
start_time = self._time
yield self
assert self._time - start_time == seconds
@contextmanager
def patch(self):
from unittest.mock import patch
with patch("time.time", self.time), \
patch("time.sleep", self.sleep):
yield self |
test split token to subtokens | # Copyright 2018 MLBenchmark Group. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test Subtokenizer and string helper methods."""
import collections
import tempfile
import unittest
import tokenizer
class SubtokenizerTest(unittest.TestCase):
def _init_subtokenizer(self, vocab_list):
w = tempfile.NamedTemporaryFile(delete=False)
for subtoken in vocab_list:
w.write("'%s'" % subtoken)
w.write("\n")
w.close()
return tokenizer.Subtokenizer(w.name, reserved_tokens=[])
def test_encode(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
s = "testing 123"
encoded_list = subtokenizer.encode(s)
self.assertEqual([1, 2, 0], encoded_list)
def test_decode(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0] # testing 123
decoded_str = subtokenizer.decode(encoded_list)
self.assertEqual("testing 123", decoded_str)
def test_subtoken_ids_to_tokens(self):
vocab_list = ["123_", "test", "ing_"]
subtokenizer = self._init_subtokenizer(vocab_list)
encoded_list = [1, 2, 0] # testing 123
token_list = subtokenizer._subtoken_ids_to_tokens(encoded_list)
self.assertEqual([u"testing", u"123"], token_list)
class StringHelperTest(unittest.TestCase):
def test_split_string_to_tokens(self):
text = "test? testing 123."
tokens = tokenizer._split_string_to_tokens(text)
self.assertEqual(["test", "? ", "testing", "123", "."], tokens)
def test_join_tokens_to_string(self):
tokens = ["test", "? ", "testing", "123", "."]
s = tokenizer._join_tokens_to_string(tokens)
self.assertEqual("test? testing 123.", s)
def test_escape_token(self):
token = u"abc_\\4"
alphabet = set("abc_\\u;")
escaped_token = tokenizer._escape_token(token, alphabet)
self.assertEqual("abc\\u\\\\\\52;_", escaped_token)
def test_unescape_token(self):
escaped_token = u"Underline: \\u, Backslash: \\\\, Unicode: \\52;"
unescaped_token = tokenizer._unescape_token(escaped_token)
self.assertEqual(
"Underline: _, Backslash: \\, Unicode: 4", unescaped_token)
def test_list_to_index_dict(self):
lst = ["test", "strings"]
d = tokenizer._list_to_index_dict(lst)
self.assertDictEqual({"test": 0, "strings": 1}, d)
def METHOD_NAME(self):
token = "abc"
subtoken_dict = {"a": 0, "b": 1, "c": 2, "ab": 3}
max_subtoken_length = 2
subtokens = tokenizer._split_token_to_subtokens(
token, subtoken_dict, max_subtoken_length)
self.assertEqual(["ab", "c"], subtokens)
def test_generate_alphabet_dict(self):
s = ["testing", "123"]
reserved_tokens = ["???"]
alphabet = tokenizer._generate_alphabet_dict(s, reserved_tokens)
self.assertIn("?", alphabet)
self.assertIn("t", alphabet)
self.assertIn("e", alphabet)
self.assertIn("s", alphabet)
self.assertIn("i", alphabet)
self.assertIn("n", alphabet)
self.assertIn("g", alphabet)
self.assertIn("1", alphabet)
self.assertIn("2", alphabet)
self.assertIn("3", alphabet)
def test_count_and_gen_subtokens(self):
token_counts = {"abc": 5}
alphabet = set("abc_")
subtoken_dict = {"a": 0, "b": 1, "c": 2, "_": 3}
max_subtoken_length = 2
subtoken_counts = tokenizer._count_and_gen_subtokens(
token_counts, alphabet, subtoken_dict, max_subtoken_length)
self.assertIsInstance(subtoken_counts, collections.defaultdict)
self.assertDictEqual(
{"a": 5, "b": 5, "c": 5, "_": 5, "ab": 5, "bc": 5, "c_": 5,
"abc": 5, "bc_": 5, "abc_": 5}, subtoken_counts)
def test_filter_and_bucket_subtokens(self):
subtoken_counts = collections.defaultdict(
int, {"a": 2, "b": 4, "c": 1, "ab": 6, "ac": 3, "abbc": 5})
min_count = 3
subtoken_buckets = tokenizer._filter_and_bucket_subtokens(
subtoken_counts, min_count)
self.assertEqual(len(subtoken_buckets[0]), 0)
self.assertEqual(set("b"), subtoken_buckets[1])
self.assertEqual(set(["ab", "ac"]), subtoken_buckets[2])
self.assertEqual(len(subtoken_buckets[3]), 0)
self.assertEqual(set(["abbc"]), subtoken_buckets[4])
def test_gen_new_subtoken_list(self):
subtoken_counts = collections.defaultdict(
int, {"translate": 10, "t": 40, "tr": 16, "tra": 12})
min_count = 5
alphabet = set("translate")
reserved_tokens = ["reserved", "tokens"]
subtoken_list, max_token_length = tokenizer._gen_new_subtoken_list(
subtoken_counts, min_count, alphabet, reserved_tokens)
# Check that "tra" isn"t in the list (its count should be decremented to 2,
# so it should not be added to the canddiate list).
self.assertNotIn("tra", subtoken_list)
self.assertIn("tr", subtoken_list)
self.assertIn("t", subtoken_list)
self.assertEqual(len("translate"), max_token_length)
def test_generate_subtokens(self):
token_counts = {"ab": 1, "bc": 3, "abc": 5}
alphabet = set("abc_")
min_count = 100
num_iterations = 1
reserved_tokens = ["reserved", "tokens"]
vocab_list = tokenizer._generate_subtokens(
token_counts, alphabet, min_count, num_iterations, reserved_tokens)
# Check that reserved tokens are at the front of the list
self.assertEqual(vocab_list[:2], reserved_tokens)
# Check that each character in alphabet is in the vocab list
for c in alphabet:
self.assertIn(c, vocab_list)
if __name__ == "__main__":
unittest.main() |
test iter2 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from paddlenlp.data import SamplerHelper
from paddlenlp.datasets import load_dataset
from tests.common_test import CpuCommonTest
from tests.testing_utils import assert_raises, get_tests_dir
def cmp(x, y):
return -1 if x < y else 1 if x > y else 0
class TestSampler(CpuCommonTest):
@classmethod
def setUpClass(cls):
fixture_path = get_tests_dir(os.path.join("fixtures", "dummy"))
cls.train_ds = load_dataset("clue", "tnews", data_files=[os.path.join(fixture_path, "tnews", "train.json")])
def test_length(self):
train_batch_sampler = SamplerHelper(self.train_ds)
self.check_output_equal(len(train_batch_sampler), 10)
self.check_output_equal(len(train_batch_sampler), train_batch_sampler.length)
train_batch_sampler.length = 5
self.check_output_equal(len(train_batch_sampler), 5)
def test_iter1(self):
train_ds_len = len(self.train_ds)
ds_iter = iter(range(train_ds_len - 1, -1, -1))
train_batch_sampler = SamplerHelper(self.train_ds, ds_iter)
for i, sample in enumerate(train_batch_sampler):
self.check_output_equal(i, train_ds_len - 1 - sample)
def METHOD_NAME(self):
train_batch_sampler = SamplerHelper(self.train_ds)
for i, sample in enumerate(train_batch_sampler):
self.check_output_equal(i, sample)
def test_list(self):
train_batch_sampler = SamplerHelper(self.train_ds)
list_sampler = train_batch_sampler.list()
self.check_output_equal(type(iter(list_sampler)).__name__, "list_iterator")
for i, sample in enumerate(list_sampler):
self.check_output_equal(i, sample)
def test_shuffle_no_buffer_size(self):
train_batch_sampler = SamplerHelper(self.train_ds)
shuffle_sampler = train_batch_sampler.shuffle(seed=102)
expected_result = {0: 4, 1: 9}
for i, sample in enumerate(shuffle_sampler):
if i in expected_result.keys():
self.check_output_equal(sample, expected_result[i])
def test_shuffle_buffer_size(self):
train_batch_sampler = SamplerHelper(self.train_ds)
shuffle_sampler = train_batch_sampler.shuffle(buffer_size=10, seed=102)
expected_result = {0: 4, 1: 9}
for i, sample in enumerate(shuffle_sampler):
if i in expected_result.keys():
self.check_output_equal(sample, expected_result[i])
def test_sort_buffer_size(self):
train_ds_len = len(self.train_ds)
ds_iter = iter(range(train_ds_len - 1, -1, -1))
train_batch_sampler = SamplerHelper(self.train_ds, ds_iter)
sort_sampler = train_batch_sampler.sort(cmp=lambda x, y, dataset: cmp(x, y), buffer_size=5)
for i, sample in enumerate(sort_sampler):
if i < 5:
self.check_output_equal(i + 5, sample)
else:
self.check_output_equal(i - 5, sample)
def test_sort_no_buffer_size(self):
train_ds_len = len(self.train_ds)
ds_iter = iter(range(train_ds_len - 1, -1, -1))
train_batch_sampler = SamplerHelper(self.train_ds, ds_iter)
sort_sampler = train_batch_sampler.sort(cmp=lambda x, y, dataset: cmp(x, y))
for i, sample in enumerate(sort_sampler):
self.check_output_equal(i, sample)
def test_batch(self):
train_batch_sampler = SamplerHelper(self.train_ds)
batch_size = 3
batch_sampler = train_batch_sampler.batch(batch_size)
for i, sample in enumerate(batch_sampler):
for j, minibatch in enumerate(sample):
self.check_output_equal(i * batch_size + j, minibatch)
@assert_raises(ValueError)
def test_batch_oversize(self):
train_batch_sampler = SamplerHelper(self.train_ds)
batch_size = 3
batch_sampler = train_batch_sampler.batch(
batch_size,
key=lambda size_so_far, minibatch_len: max(size_so_far, minibatch_len),
batch_size_fn=lambda new, count, sofar, data_source: len(data_source),
)
for i, sample in enumerate(batch_sampler):
for j, minibatch in enumerate(sample):
self.check_output_equal(i * batch_size + j, minibatch)
def test_shard(self):
train_batch_sampler = SamplerHelper(self.train_ds)
shard_sampler1 = train_batch_sampler.shard(2, 0)
shard_sampler2 = train_batch_sampler.shard(2, 1)
for i, sample in enumerate(shard_sampler1):
self.check_output_equal(i * 2, sample)
for i, sample in enumerate(shard_sampler2):
self.check_output_equal(i * 2 + 1, sample)
def test_shard_default(self):
train_batch_sampler = SamplerHelper(self.train_ds)
shard_sampler1 = train_batch_sampler.shard()
for i, sample in enumerate(shard_sampler1):
self.check_output_equal(i, sample)
def test_apply(self):
train_ds_len = len(self.train_ds)
ds_iter = iter(range(train_ds_len - 1, -1, -1))
train_batch_sampler = SamplerHelper(self.train_ds, ds_iter)
apply_sampler = train_batch_sampler.apply(
lambda sampler: SamplerHelper.sort(sampler, cmp=lambda x, y, dataset: cmp(x, y))
)
for i, sample in enumerate(apply_sampler):
self.check_output_equal(i, sample)
if __name__ == "__main__":
unittest.main() |
tear down module | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import tempfile
import numpy
from pyscf import lib
from pyscf import gto
from pyscf import ao2mo
def setUpModule():
global mol, eri0
mol = gto.Mole()
mol.verbose = 5
mol.output = '/dev/null'
mol.atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.build()
eri0 = mol.intor('int2e_spinor')
def METHOD_NAME():
global mol, eri0
mol.stdout.close()
del mol, eri0
def trans(eri, mos):
eriref = lib.einsum('pjkl,pi->ijkl', eri , mos[0].conj())
eriref = lib.einsum('ipkl,pj->ijkl', eriref, mos[1])
eriref = lib.einsum('ijpl,pk->ijkl', eriref, mos[2].conj())
eriref = lib.einsum('ijkp,pl->ijkl', eriref, mos[3])
return eriref
class KnownValues(unittest.TestCase):
def test_r_outcore_eri(self):
n2c = mol.nao_2c()
numpy.random.seed(1)
mo = numpy.random.random((n2c,n2c)) + numpy.random.random((n2c,n2c))*1j
eriref = trans(eri0, [mo]*4)
ftmp = tempfile.NamedTemporaryFile()
ao2mo.kernel(mol, mo, erifile=ftmp.name, intor='int2e_spinor', max_memory=10, ioblk_size=5)
with ao2mo.load(ftmp) as eri1:
self.assertAlmostEqual(lib.fp(eri1), -550.72966498073129-1149.3561026721848j, 8)
self.assertAlmostEqual(abs(eri1-eriref.reshape(n2c**2,n2c**2)).max(), 0, 9)
eri1 = ao2mo.kernel(mol, (mo[:,:2], mo[:,:4], mo[:,:2], mo[:,:4]),
erifile=ftmp.name, intor='int2e_spinor')
with ao2mo.load(ftmp) as eri1:
self.assertAlmostEqual(abs(eri1-eriref[:2,:4,:2,:4].reshape(8,8)).max(), 0, 9)
ftmp = lib.H5TmpFile()
ao2mo.kernel(mol, (mo[:,:2], mo[:,:4], mo[:,:2], mo[:,:4]),
erifile=ftmp, intor='int2e_spinor', aosym='s1')
with ao2mo.load(ftmp) as eri1:
self.assertAlmostEqual(abs(eri1-eriref[:2,:4,:2,:4].reshape(8,8)).max(), 0, 9)
eri1 = ao2mo.kernel(mol, (mo[:,:2], mo[:,:4], mo[:,:4], mo[:,:2]),
intor='int2e_spinor', aosym='s2ij')
self.assertAlmostEqual(abs(eri1-eriref[:2,:4,:4,:2].reshape(8,8)).max(), 0, 9)
eri1 = ao2mo.kernel(mol, (mo[:,:2], mo[:,:4], mo[:,:2], mo[:,:4]),
intor='int2e_spinor', aosym='s2kl')
self.assertAlmostEqual(abs(eri1-eriref[:2,:4,:2,:4].reshape(8,8)).max(), 0, 9)
eri1 = ao2mo.kernel(mol, mo[:,:0], intor='int2e_spinor')
self.assertTrue(eri1.size == 0)
def test_r_outcore_eri_grad(self):
n2c = mol.nao_2c()
numpy.random.seed(1)
mo = numpy.random.random((n2c,4)) + numpy.random.random((n2c,4))*1j
eri1 = ao2mo.kernel(mol, mo, intor='int2e_ip1_spinor')
self.assertAlmostEqual(lib.fp(eri1), -696.47505768925771-265.10054236197817j, 8)
def test_ao2mo_r_e2(self):
n2c = mol.nao_2c()
numpy.random.seed(1)
mo = numpy.random.random((n2c,n2c)) + numpy.random.random((n2c,n2c))*1j
tao = numpy.asarray(mol.tmap(), dtype=numpy.int32)
buf = ao2mo._ao2mo.r_e1('int2e_spinor', mo, (0,4,0,3), (0, 2, 8),
mol._atm, mol._bas, mol._env, tao, 's1')
buf = buf.reshape(8,12).T
ref = lib.einsum('pqkl,pi,qj->ijkl', eri0, mo[:,:4].conj(), mo[:,:3])
self.assertAlmostEqual(lib.fp(buf), 0.30769732102451997-0.58664393190628461j, 8)
self.assertAlmostEqual(abs(buf[:,:4]-ref[:,:,:2,:2].reshape(12,4)).max(), 0, 9)
self.assertAlmostEqual(abs(buf[:,4:]-ref[:,:,:2,2:4].reshape(12,4)).max(), 0, 9)
buf = ao2mo._ao2mo.r_e2(eri0.reshape(n2c**2,n2c,n2c), mo, (0,2,0,4), tao, None, 's1')
ref = lib.einsum('xpq,pk,ql->xkl', eri0.reshape(n2c**2,n2c,n2c),
mo[:,:2].conj(), mo[:,:4])
self.assertAlmostEqual(lib.fp(buf), 14.183520455200011+10.179224253811057j, 8)
self.assertAlmostEqual(abs(buf.reshape(n2c**2,2,4)-ref).max(), 0, 9)
buf = ao2mo._ao2mo.r_e2(eri0.reshape(n2c**2,n2c,n2c), mo, (0,0,4,4), tao, None, 's1')
self.assertEqual(buf.size, 0)
if __name__ == '__main__':
print('Full Tests for ao2mo.r_outcore')
unittest.main()
#
#if __name__ == '__main__':
# from pyscf import scf
# from pyscf import gto
# from pyscf.ao2mo import addons
# mol = gto.M(
# verbose = 0,
# atom = [
# ["O" , (0. , 0. , 0.)],
# [1 , (0. , -0.757 , 0.587)],
# [1 , (0. , 0.757 , 0.587)]],
# basis = 'ccpvdz')
#
# mf = scf.RHF(mol)
# mf.scf()
#
# eri0 = full(mf._eri, mf.mo_coeff)
# mos = (mf.mo_coeff,)*4
# print(numpy.allclose(eri0, full(mol, mf.mo_coeff)))
# print(numpy.allclose(eri0, general(mf._eri, mos)))
# print(numpy.allclose(eri0, general(mol, mos)))
# with load(full(mol, mf.mo_coeff, 'h2oeri.h5', dataname='dat1'), 'dat1') as eri1:
# print(numpy.allclose(eri0, eri1))
# with load(general(mol, mos, 'h2oeri.h5', dataname='dat1'), 'dat1') as eri1:
# print(numpy.allclose(eri0, eri1))
# |
add weight | """Defines common layers."""
import tensorflow as tf
from opennmt.utils.misc import shape_list
def dropout(x, rate, training=None):
"""Simple dropout layer."""
if not training or rate == 0:
return x
return tf.nn.dropout(x, rate)
def gelu(x):
"""Gaussian Error Linear Unit activation function described in
https://arxiv.org/abs/1606.08415.
"""
return tf.nn.gelu(x, approximate=True)
class Dense(tf.keras.layers.Dense):
"""Small ``tf.keras.layers.Dense`` extension to possibly reuse an existing weight
matrix.
"""
def __init__(self, units, weight=None, transpose=False, **kwargs):
"""Initializes the layer.
Args:
unit: Positive integer, dimensionality of the output space.
weight: The weight to reuse.
transpose: Whether :obj:`weight` should be transposed or not.
kwargs: Additional layers arguments.
"""
super().__init__(units, **kwargs)
self.set_kernel(weight, transpose=transpose)
def set_kernel(self, weight, transpose=False):
"""Use :obj:`weight` as the kernel weights matrix.
Args:
weight: The weight to use.
transpose: Whether :obj:`weight` should be transposed or not.
Raises:
ValueError: if the layer is already built.
"""
if self.built:
raise ValueError("The layer is already built")
self.weight = weight
self.transpose = transpose
def METHOD_NAME(self, name, *args, **kwargs):
if self.weight is not None and name == "kernel":
return self.weight
return super().METHOD_NAME(name, *args, **kwargs)
def call(self, inputs):
shape = shape_list(inputs)
rank = len(shape)
if rank > 2:
inputs = tf.reshape(inputs, [-1, shape[-1]])
if inputs.dtype is tf.float16 and self.units % 8 != 0:
padding_size = 8 - self.units % 8
paddings = (
[[0, padding_size], [0, 0]]
if self.transpose
else [[0, 0], [0, padding_size]]
)
kernel = tf.pad(self.kernel, paddings)
outputs = tf.matmul(inputs, kernel, transpose_b=self.transpose)
outputs = outputs[:, : self.units]
else:
outputs = tf.matmul(inputs, self.kernel, transpose_b=self.transpose)
if self.use_bias:
outputs = tf.nn.bias_add(outputs, self.bias)
if self.activation is not None:
outputs = self.activation(outputs)
if rank > 2:
outputs = tf.reshape(outputs, shape[:-1] + [self.units])
return outputs
def map_v1_weights(self, weights):
m = [(self.kernel, weights["kernel"])]
if self.use_bias:
m.append((self.bias, weights["bias"]))
return m
class LayerNorm(tf.keras.layers.LayerNormalization):
"""Layer normalization."""
def map_v1_weights(self, weights):
return [(self.beta, weights["beta"]), (self.gamma, weights["gamma"])]
class LayerWrapper(tf.keras.layers.Layer):
"""Layer wrapper for input/output normalization, input/output dropout and
residual connection.
"""
def __init__(
self,
layer,
normalize_input=False,
normalize_output=False,
input_dropout=0,
output_dropout=0,
residual_connection=False,
**kwargs
):
"""Initializes the layer.
Args:
layer: The layer to wrap.
normalize_input: Apply layer normalization on the input.
normalize_output: Apply layer normalization on the output.
input_dropout: The probability to drop units in the layer input.
output_dropout: The probability to drop units in the layer output.
residual_connection: Add the inputs to layer outputs (if their shape are
compatible).
kwargs: Additional layer arguments.
"""
super().__init__(**kwargs)
self.layer = layer
self.input_layer_norm = LayerNorm() if normalize_input else None
self.output_layer_norm = LayerNorm() if normalize_output else None
self.input_dropout = input_dropout
self.output_dropout = output_dropout
self.residual_connection = residual_connection
def call(self, inputs, *args, **kwargs):
"""Runs the wrapper."""
training = kwargs.get("training")
x = inputs
if self.input_layer_norm is not None:
x = self.input_layer_norm(x)
x = dropout(x, self.input_dropout, training=training)
all_outputs = self.layer(x, *args, **kwargs)
if isinstance(all_outputs, tuple):
outputs = all_outputs[0]
extra_outputs = list(all_outputs)[1:]
else:
outputs = all_outputs
extra_outputs = None
outputs = dropout(outputs, self.output_dropout, training=training)
if self.residual_connection and outputs.shape[-1] == inputs.shape[-1]:
outputs += inputs
if self.output_layer_norm is not None:
outputs = self.output_layer_norm(outputs)
if extra_outputs:
return tuple([outputs] + extra_outputs)
return outputs
# The wrapper should be serializable to be used in tf.keras.layers.Bidirectional.
def get_config(self):
"""Returns the layer wrapper configuration."""
config = {
"layer": tf.keras.layers.serialize(self.layer),
"normalize_input": self.input_layer_norm is not None,
"normalize_output": self.output_layer_norm is not None,
"input_dropout": self.input_dropout,
"output_dropout": self.output_dropout,
"residual_connection": self.residual_connection,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
@classmethod
def from_config(cls, config):
"""Creates a layer wrapper from its configuration."""
layer = tf.keras.layers.deserialize(config.pop("layer"))
return cls(layer, **config) |
unregister | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from twisted.python import log
from buildbot.process.measured_service import MeasuredBuildbotServiceManager
from buildbot.util import misc
from buildbot.worker.protocols import msgpack as bbmsgpack
from buildbot.worker.protocols import pb as bbpb
class WorkerRegistration:
__slots__ = ['master', 'worker', 'pbReg', 'msgpack_reg']
def __init__(self, master, worker):
self.master = master
self.worker = worker
self.pbReg = None
self.msgpack_reg = None
def __repr__(self):
return f"<{self.__class__.__name__} for {repr(self.worker.workername)}>"
@defer.inlineCallbacks
def unregister(self):
bs = self.worker
# update with portStr=None to remove any registration in place
if self.pbReg is not None:
yield self.master.workers.pb.updateRegistration(
bs.workername, bs.password, None)
if self.msgpack_reg is not None:
yield self.master.workers.msgpack.updateRegistration(
bs.workername, bs.password, None)
yield self.master.workers.METHOD_NAME(self)
@defer.inlineCallbacks
def update(self, worker_config, global_config):
# For most protocols, there's nothing to do, but for PB we must
# update the registration in case the port or password has changed.
if 'pb' in global_config.protocols:
self.pbReg = yield self.master.workers.pb.updateRegistration(
worker_config.workername, worker_config.password,
global_config.protocols['pb']['port'])
if 'msgpack_experimental_v7' in global_config.protocols:
self.msgpack_reg = yield self.master.workers.msgpack.updateRegistration(
worker_config.workername, worker_config.password,
global_config.protocols['msgpack_experimental_v7']['port'])
def getPBPort(self):
return self.pbReg.getPort()
def get_msgpack_port(self):
return self.msgpack_reg.getPort()
class WorkerManager(MeasuredBuildbotServiceManager):
name = "WorkerManager"
managed_services_name = "workers"
config_attr = "workers"
PING_TIMEOUT = 10
reconfig_priority = 127
def __init__(self, master):
super().__init__()
self.pb = bbpb.Listener(master)
self.msgpack = bbmsgpack.Listener(master)
# WorkerRegistration instances keyed by worker name
self.registrations = {}
# connection objects keyed by worker name
self.connections = {}
@property
def workers(self):
# self.workers contains a ready Worker instance for each
# potential worker, i.e. all the ones listed in the config file.
# If the worker is connected, self.workers[workername].worker will
# contain a RemoteReference to their Bot instance. If it is not
# connected, that attribute will hold None.
# workers attribute is actually just an alias to multiService's
# namedService
return self.namedServices
def getWorkerByName(self, workerName):
return self.registrations[workerName].worker
def register(self, worker):
# TODO: doc that reg.update must be called, too
workerName = worker.workername
reg = WorkerRegistration(self.master, worker)
self.registrations[workerName] = reg
return defer.succeed(reg)
def METHOD_NAME(self, registration):
del self.registrations[registration.worker.workername]
@defer.inlineCallbacks
def newConnection(self, conn, workerName):
if workerName in self.connections:
log.msg(f"Got duplication connection from '{workerName}'"
" starting arbitration procedure")
old_conn = self.connections[workerName]
try:
yield misc.cancelAfter(self.PING_TIMEOUT,
old_conn.remotePrint("master got a duplicate connection"),
self.master.reactor)
# if we get here then old connection is still alive, and new
# should be rejected
raise RuntimeError("rejecting duplicate worker")
except defer.CancelledError:
old_conn.loseConnection()
log.msg(f"Connected worker '{workerName}' ping timed out after {self.PING_TIMEOUT} "
"seconds")
except RuntimeError:
raise
except Exception as e:
old_conn.loseConnection()
log.msg(f"Got error while trying to ping connected worker {workerName}:{e}")
log.msg(f"Old connection for '{workerName}' was lost, accepting new")
try:
yield conn.remotePrint(message="attached")
info = yield conn.remoteGetWorkerInfo()
log.msg(f"Got workerinfo from '{workerName}'")
except Exception as e:
log.msg(f"Failed to communicate with worker '{workerName}'\n{e}".format(workerName, e))
raise
conn.info = info
self.connections[workerName] = conn
def remove():
del self.connections[workerName]
conn.notifyOnDisconnect(remove)
# accept the connection
return True |
create subscription in enrollment account initial | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import uuid
from msrest.pipeline import ClientRawResponse
from msrest.polling import LROPoller, NoPolling
from msrestazure.polling.arm_polling import ARMPolling
from .. import models
class SubscriptionFactoryOperations(object):
"""SubscriptionFactoryOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
:ivar api_version: Version of the API to be used with the client request. Current version is 2015-06-01. Constant value: "2018-03-01-preview".
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.api_version = "2018-03-01-preview"
self.config = config
def METHOD_NAME(
self, enrollment_account_name, body, custom_headers=None, raw=False, **operation_config):
# Construct URL
url = self.create_subscription_in_enrollment_account.metadata['url']
path_format_arguments = {
'enrollmentAccountName': self._serialize.url("enrollment_account_name", enrollment_account_name, 'str')
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct body
body_content = self._serialize.body(body, 'SubscriptionCreationParameters')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200, 202]:
raise models.ErrorResponseException(self._deserialize, response)
deserialized = None
header_dict = {}
if response.status_code == 200:
deserialized = self._deserialize('SubscriptionCreationResult', response)
header_dict = {
'Location': 'str',
'Retry-After': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
def create_subscription_in_enrollment_account(
self, enrollment_account_name, body, custom_headers=None, raw=False, polling=True, **operation_config):
"""Creates an Azure subscription.
:param enrollment_account_name: The name of the enrollment account to
which the subscription will be billed.
:type enrollment_account_name: str
:param body: The subscription creation parameters.
:type body:
~azure.mgmt.subscription.models.SubscriptionCreationParameters
:param dict custom_headers: headers that will be added to the request
:param bool raw: The poller return type is ClientRawResponse, the
direct response alongside the deserialized response
:param polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:return: An instance of LROPoller that returns
SubscriptionCreationResult or
ClientRawResponse<SubscriptionCreationResult> if raw==True
:rtype:
~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.subscription.models.SubscriptionCreationResult]
or
~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.subscription.models.SubscriptionCreationResult]]
:raises:
:class:`ErrorResponseException<azure.mgmt.subscription.models.ErrorResponseException>`
"""
raw_result = self.METHOD_NAME(
enrollment_account_name=enrollment_account_name,
body=body,
custom_headers=custom_headers,
raw=True,
**operation_config
)
def get_long_running_output(response):
header_dict = {
'Location': 'str',
'Retry-After': 'str',
}
deserialized = self._deserialize('SubscriptionCreationResult', response)
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
lro_delay = operation_config.get(
'long_running_operation_timeout',
self.config.long_running_operation_timeout)
if polling is True: polling_method = ARMPolling(lro_delay, **operation_config)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
create_subscription_in_enrollment_account.metadata = {'url': '/providers/Microsoft.Billing/enrollmentAccounts/{enrollmentAccountName}/providers/Microsoft.Subscription/createSubscription'} |
build command | import subprocess
from unittest.mock import MagicMock, call
import pytest
from briefcase.console import Console, Log
from briefcase.exceptions import BriefcaseCommandError
from briefcase.platforms.linux.system import LinuxSystemBuildCommand
@pytest.fixture
def METHOD_NAME(tmp_path, first_app):
command = LinuxSystemBuildCommand(
logger=Log(),
console=Console(),
base_path=tmp_path / "base_path",
data_path=tmp_path / "briefcase",
apps={"first": first_app},
)
command.tools.host_os = "Linux"
command.tools.host_arch = "wonky"
# Mock subprocess
command.tools.subprocess = MagicMock()
return command
def test_deb_requirements(METHOD_NAME, first_app_config):
"""Debian requirements can be verified."""
first_app_config.target_vendor_base = "debian"
METHOD_NAME.verify_system_packages(first_app_config)
# The packages were verified
assert METHOD_NAME.tools.subprocess.check_output.mock_calls == [
call(["dpkg", "-s", "python3-dev"]),
call(["dpkg", "-s", "build-essential"]),
]
def test_rpm_requirements(METHOD_NAME, first_app_config):
"""RHEL requirements can be verified."""
first_app_config.target_vendor_base = "rhel"
METHOD_NAME.verify_system_packages(first_app_config)
assert METHOD_NAME.tools.subprocess.check_output.mock_calls == [
call(["rpm", "-q", "python3-devel"]),
call(["rpm", "-q", "gcc"]),
call(["rpm", "-q", "make"]),
call(["rpm", "-q", "pkgconf-pkg-config"]),
]
def test_suse_requirements(METHOD_NAME, first_app_config):
"""SUSE requirements can be verified."""
first_app_config.target_vendor_base = "suse"
METHOD_NAME.verify_system_packages(first_app_config)
assert METHOD_NAME.tools.subprocess.check_output.mock_calls == [
call(["rpm", "-q", "--whatprovides", "python3-devel"]),
call(["rpm", "-q", "--whatprovides", "patterns-devel-base-devel_basis"]),
]
def test_arch_requirements(METHOD_NAME, first_app_config, capsys):
"""Arch requirements can be verified."""
first_app_config.target_vendor_base = "arch"
METHOD_NAME.verify_system_packages(first_app_config)
assert METHOD_NAME.tools.subprocess.check_output.mock_calls == [
call(["pacman", "-Q", "python3"]),
call(["pacman", "-Q", "base-devel"]),
]
def test_unknown_requirements(METHOD_NAME, first_app_config, capsys):
"""An unknown system can't be verified."""
first_app_config.target_vendor_base = "somevendor"
METHOD_NAME.verify_system_packages(first_app_config)
# No packages verified
METHOD_NAME.tools.subprocess.check_output.assert_not_called()
# A warning was logged.
output = capsys.readouterr().out
assert "WARNING: Can't verify system packages" in output
def test_missing_packages(METHOD_NAME, first_app_config, capsys):
"""If there are missing system packages, an error is raised."""
# Mock the system requirement tools; there's a base requirement of
# a packaged called "compiler", verified using "check <pkg>", and
# installed using "system <pkg>"
METHOD_NAME._system_requirement_tools = MagicMock(
return_value=(
["compiler"],
["check"],
["system", "install_flag"],
)
)
# Add some system requirements.
first_app_config.system_requires = ["first", "second", "third"]
# Mock the side effect of checking those requirements.
METHOD_NAME.tools.subprocess.check_output.side_effect = [
subprocess.CalledProcessError(cmd="check", returncode=1),
"installed",
subprocess.CalledProcessError(cmd="check", returncode=1),
"installed",
]
# Verify the requirements. This will raise an error, but the error
# message will tell you how to install the system packages.
with pytest.raises(
BriefcaseCommandError,
match=r" sudo system install_flag compiler second",
):
METHOD_NAME.verify_system_packages(first_app_config)
def test_packages_installed(METHOD_NAME, first_app_config, capsys):
"""If all required packages are installed, no error is raised."""
# Mock the system requirement tools; there's a base requirement of
# a packaged called "compiler", verified using "check <pkg>", and
# installed using "system <pkg>"
METHOD_NAME._system_requirement_tools = MagicMock(
return_value=(
["compiler"],
["check"],
["system", "install_flag"],
)
)
# Add some system requirements.
first_app_config.system_requires = ["first", "second", "third"]
# Mock the effect of checking requirements that are all present
METHOD_NAME.tools.subprocess.check_output.return_value = "installed"
# Verify the requirements. This will raise an error.
METHOD_NAME.verify_system_packages(first_app_config)
assert METHOD_NAME.tools.subprocess.check_output.mock_calls == [
call(["check", "compiler"]),
call(["check", "first"]),
call(["check", "second"]),
call(["check", "third"]),
] |
close | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from . import models as _models
from .._serialization import Deserializer, Serializer
from ._configuration import KeyVaultManagementClientConfiguration
from .operations import (
Operations,
PrivateEndpointConnectionsOperations,
PrivateLinkResourcesOperations,
VaultsOperations,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class KeyVaultManagementClient: # pylint: disable=client-accepts-api-version-keyword
"""The Azure management API provides a RESTful set of web services that interact with Azure Key
Vault.
:ivar vaults: VaultsOperations operations
:vartype vaults: azure.mgmt.keyvault.v2018_02_14.operations.VaultsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections:
azure.mgmt.keyvault.v2018_02_14.operations.PrivateEndpointConnectionsOperations
:ivar private_link_resources: PrivateLinkResourcesOperations operations
:vartype private_link_resources:
azure.mgmt.keyvault.v2018_02_14.operations.PrivateLinkResourcesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.keyvault.v2018_02_14.operations.Operations
:param credential: Credential needed for the client to connect to Azure. Required.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: Subscription credentials which uniquely identify Microsoft Azure
subscription. The subscription ID forms part of the URI for every service call. Required.
:type subscription_id: str
:param base_url: Service URL. Default value is "https://management.azure.com".
:type base_url: str
:keyword api_version: Api Version. Default value is "2018-02-14". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = KeyVaultManagementClientConfiguration(
credential=credential, subscription_id=subscription_id, **kwargs
)
self._client: ARMPipelineClient = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in _models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.vaults = VaultsOperations(self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.private_link_resources = PrivateLinkResourcesOperations(
self._client, self._config, self._serialize, self._deserialize
)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(self, request: HttpRequest, **kwargs: Any) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/dpcodegen/python/send_request
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def METHOD_NAME(self) -> None:
self._client.METHOD_NAME()
def __enter__(self) -> "KeyVaultManagementClient":
self._client.__enter__()
return self
def __exit__(self, *exc_details: Any) -> None:
self._client.__exit__(*exc_details) |
test broken basepath removal | # SPDX-License-Identifier: MIT
# SPDX-FileCopyrightText: © 2004 Tristan Seligmann and Jonathan Jacobs
# SPDX-FileCopyrightText: © 2012 Bastian Kleineidam
# SPDX-FileCopyrightText: © 2015 Tobias Gruetzmacher
import json
import os
import re
import pytest
import responses
import dosagelib.cmd
import httpmocks
def cmd(*options):
"""'Fake' run dosage with given options."""
return dosagelib.cmd.main(('--allow-multiple',) + options)
def cmd_ok(*options):
assert cmd(*options) == 0
def cmd_err(*options):
assert cmd(*options) == 1
@pytest.mark.usefixtures('_nosleep', '_noappdirs')
class TestDosage:
"""Test the dosage commandline client."""
# This shouldn't hit the network at all, so add responses without mocks to
# make sure it doesn't do that
@responses.activate
@pytest.mark.parametrize(('option'), [
('-l'),
('--list'),
('--singlelist'),
])
def test_list_comics(self, option, capfd):
cmd_ok(option)
out = capfd.readouterr().out
assert 'ADummyTestScraper' in out
@responses.activate
def test_display_version(self):
cmd_ok("--version")
@responses.activate
def test_update_available(self, capfd):
responses.add(responses.GET, re.compile(r'https://api\.github\.com/'),
json={'tag_name': '9999.0', 'assets': [
{'browser_download_url': 'TEST.whl'},
{'browser_download_url': 'TEST.exe'},
]})
cmd_ok('--version', '-v')
out = capfd.readouterr().out
best = 'TEST.exe' if os.name == 'nt' else 'TEST.whl'
assert best in out
assert 'A new version' in out
@responses.activate
def test_no_update_available(self, capfd):
responses.add(responses.GET, re.compile(r'https://api\.github\.com/'),
json={'tag_name': '1.0'})
cmd_ok('--version', '-v')
out = capfd.readouterr().out
assert 'Detected local or development' in out
@responses.activate
def test_current(self, capfd):
responses.add(responses.GET, re.compile(r'https://api\.github\.com/'),
json={'tag_name': dosagelib.__version__})
cmd_ok('--version', '-v')
out = capfd.readouterr().out
assert out.endswith('issues\n')
@responses.activate
def test_update_broken(self, capfd):
responses.add(responses.GET, re.compile(r'https://api\.github\.com/'),
json={})
cmd_ok('--version', '-v')
out = capfd.readouterr().out
assert 'invalid update file' in out
def test_display_help(self):
for option in ("-h", "--help"):
with pytest.raises(SystemExit):
cmd(option)
def test_module_help(self, capfd):
cmd_ok("-m", "-t", "xkcd")
out = capfd.readouterr().out
assert re.match(r'([0-9][0-9]:){2}.. xkcd>', out)
def METHOD_NAME(self):
assert cmd('-m', 'Comicsxkcd') == 2
def test_working_basepath_removal(self):
cmd_ok('-m', 'Comics/xkcd')
cmd_ok('-m', 'Comics\\xkcd')
def test_no_comics_specified(self):
cmd_err()
def test_unknown_option(self):
with pytest.raises(SystemExit):
cmd('--imadoofus')
def test_multiple_comics_match(self):
cmd_err('Garfield')
@responses.activate
def test_fetch_html_and_rss_json(self, tmpdir):
httpmocks.xkcd()
cmd_ok("-n", "2", "-v", "-b", str(tmpdir), "-o", "html", "-o", "rss",
"-o", "json", "--no-downscale", "xkcd")
@responses.activate
def test_fetch_html_and_rss_2(self, tmp_path):
httpmocks.page('http://www.bloomingfaeries.com/', 'bf-home')
httpmocks.page(re.compile('http://www.*faeries-405/'), 'bf-405')
httpmocks.png(re.compile(r'http://www\.blooming.*405.*jpg'))
httpmocks.png(re.compile(r'http://www\.blooming.*406.*jpg'), 'tall')
cmd_ok("--numstrips", "2", "--baseurl", "bla", "--basepath",
str(tmp_path), "--output", "rss", "--output", "html", "--adult",
"BloomingFaeries")
html = next((tmp_path / 'html').glob('*.html')).read_text()
assert "width=" in html
@responses.activate
def test_fetch_html_broken_img(self, tmp_path):
httpmocks.page('http://www.bloomingfaeries.com/', 'bf-home')
httpmocks.page(re.compile('http://www.*faeries-405/'), 'bf-405')
responses.add(responses.GET, re.compile(r'.*\.jpg'), body=b'\377\330',
content_type='image/jpeg')
cmd_ok("--numstrips", "2", "--baseurl", "bla", "--basepath",
str(tmp_path), "--output", "html", "--adult", "BloomingFaeries")
html = next((tmp_path / 'html').glob('*.html')).read_text()
assert "width=" not in html
@responses.activate
def test_fetch_indexed(self, tmpdir):
httpmocks.xkcd()
cmd_ok("-n", "2", "-v", "-b", str(tmpdir), "xkcd:303")
@responses.activate
def test_fetch_all_existing(self, tmp_path):
httpmocks.xkcd()
xkcd = tmp_path / 'xkcd'
xkcd.mkdir()
other = tmp_path / 'randomdir'
other.mkdir()
cmd_ok('-v', '-b', str(tmp_path), '@')
assert len(list(xkcd.glob('*'))) == 2
assert len(list(other.glob('*'))) == 0
@responses.activate
def test_json_page_key_bounce_and_multi_image(self, tmpdir):
httpmocks.page(re.compile(r'.*com/$'), 'zp-home')
httpmocks.page(re.compile(r'.*com/comic/missing/$'), 'zp-223')
httpmocks.page(re.compile(r'.*com/comic/lifejacket/$'), 'zp-222')
httpmocks.jpeg(re.compile(r'https://cdn-.*\.jpg'))
cmd_ok("-v", "-b", str(tmpdir), "-o", "json", "ZenPencils")
directory = tmpdir.join('ZenPencils')
f = directory.join('dosage.json').open(encoding='utf-8')
data = json.load(f)
f.close()
pages = data['pages']
assert len(pages) == 1
page = list(pages.keys())[0]
assert page == 'https://zenpencils.com/comic/missing/'
images = data['pages'][page]['images']
assert len(images) == 2
for imgfile in images.values():
assert directory.join(imgfile).check(file=1) |
get next | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._access_review_schedule_definitions_assigned_for_my_approval_operations import build_list_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AccessReviewScheduleDefinitionsAssignedForMyApprovalOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.authorization.v2021_03_01_preview.aio.AuthorizationManagementClient`'s
:attr:`access_review_schedule_definitions_assigned_for_my_approval` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
self._api_version = input_args.pop(0) if input_args else kwargs.pop("api_version")
@distributed_trace
def list(
self, filter: Optional[str] = None, **kwargs: Any
) -> AsyncIterable["_models.AccessReviewScheduleDefinition"]:
"""Get access review instances assigned for my approval.
:param filter: The filter to apply on the operation. Other than standard filters, one custom
filter option is supported : 'assignedToMeToReview()'. When one specified
$filter=assignedToMeToReview(), only items that are assigned to the calling user to review are
returned. Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AccessReviewScheduleDefinition or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.authorization.v2021_03_01_preview.models.AccessReviewScheduleDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop(
"api_version", _params.pop("api-version", self._api_version or "2021-03-01-preview")
)
cls: ClsType[_models.AccessReviewScheduleDefinitionListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
filter=filter,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("AccessReviewScheduleDefinitionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def METHOD_NAME(next_link=None):
request = prepare_request(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDefinition, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(METHOD_NAME, extract_data)
list.metadata = {"url": "/providers/Microsoft.Authorization/accessReviewScheduleDefinitions"} |
main | #!/usr/bin/env python3
import find_clang
import sys
import clang.cindex
import time
import os
structs = []
extrastructs = []
def valid_spelling(spelling):
return spelling and not spelling.startswith("(")
def build_struct(cursor, anonymousUnion=False):
if not anonymousUnion:
structs.append(cursor.spelling)
print("template <class A, class B>\nvoid copy%s(A * dest, B * src)\n{" % cursor.spelling)
for c in cursor.get_children():
if c.kind == clang.cindex.CursorKind.UNION_DECL:
if valid_spelling(c.spelling):
raise Exception("Cannot handle non anonymous unions")
copied_union_member = False
for uc in c.get_children():
if not valid_spelling(uc.spelling) or uc.kind == clang.cindex.CursorKind.PACKED_ATTR:
# Ignore
pass
else:
# per default we copy only the first member of a union and warn if there are more
# members (declare the other members NOBACKUP)
if copied_union_member:
print("Warning more than one union member (%s) in anynomous union inside struct %s, consider NOBACKUP statements" % (uc.spelling, cursor.spelling), file=sys.stderr)
else:
copy_decl(uc, uc.spelling)
copied_union_member = True
elif c.kind == clang.cindex.CursorKind.FIELD_DECL:
copy_decl(c, c.spelling)
if not anonymousUnion:
print("}\n")
def build(cursor):
result = []
for c in cursor.get_children():
if c.location.file.name == sys.argv[1]:
if c.kind == clang.cindex.CursorKind.STRUCT_DECL:
build_struct(c)
for c, spelling in extrastructs:
print("template <class A, class B>\nvoid copy%s(A * dest, B * src)\n{" % spelling)
build_struct(c, True)
print("}\n")
return result
def copy_decl(c, spelling):
children = [ch for ch in c.get_children()]
if c.type.get_array_size() > 0:
if c.type.get_array_element_type().spelling in structs:
print(" for (int i=0; i<%d; i++) {" % c.type.get_array_size())
print(" copy%s(&dest->%s[i], &src->%s[i]);" % (c.type.get_array_element_type().spelling, spelling, spelling))
print(" }")
else:
print(" memcpy(dest->%s, src->%s, sizeof(dest->%s));" % (spelling, spelling, spelling))
elif len(children) == 1 and children[0].kind == clang.cindex.CursorKind.STRUCT_DECL and not valid_spelling(children[0].spelling):
# inline declared structs
if valid_spelling(c.semantic_parent.spelling):
spelling_func = c.semantic_parent.spelling + "_" + spelling
else:
spelling_func = c.semantic_parent.semantic_parent.spelling + "_" + spelling
extrastructs.append((children[0], spelling_func))
print(" copy%s(&dest->%s, &src->%s);" % (spelling_func, spelling, spelling))
elif c.type.get_declaration().spelling in structs:
print(" copy%s(&dest->%s, &src->%s);" % (c.type.get_declaration().spelling, spelling, spelling))
else:
print(" dest->%s = src->%s;" % (spelling, spelling))
def header():
print("// This file was auto-generated by %s script on %s. Do not edit this file!\n\n\n" % (os.path.basename(sys.argv[0]), time.asctime()))
def print_translation_unit_diags(diags, prefix=''):
for diag in diags:
print(prefix + str(diag), file=sys.stderr)
print_translation_unit_diags(diag.children, ' ' + prefix)
def METHOD_NAME():
if not find_clang.initLibClang():
sys.exit(-1)
index = find_clang.index
args = ['-x', 'c++', '-std=c++11'] + sys.argv[2:]
if find_clang.builtin_hdr_path:
args.append("-I" + find_clang.builtin_hdr_path)
translation_unit = index.parse(sys.argv[1], args)
if translation_unit.diagnostics:
print_translation_unit_diags(translation_unit.diagnostics)
sys.exit(-1)
header()
build(translation_unit.cursor)
if __name__ == "__main__":
METHOD_NAME() |
test clshxma | from pathlib import Path
from larch.io import read_ascii, guess_beamline, guess_filereader, read_fdmnes
base_dir = Path(__file__).parent.parent.resolve()
def _tester(fname, return_group=False):
fname = base_dir / 'examples' / 'xafsdata' / 'beamlines' / fname
group = read_ascii(fname)
cls = guess_beamline(group.header)
bldat = cls(group.header)
labels = bldat.get_array_labels()
print(fname, cls.__name__, len(labels), group.data.shape, labels)
if return_group:
return bldat, labels, group
else:
return bldat, labels
def test_apsxsd_new(fname='APS9BM_2019.dat'):
bldat, labels = _tester(fname)
assert('aps xsd' in bldat.name.lower())
assert(1 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(labels[0] == 'mono_energy')
assert(labels[1].startswith('scaler_pre'))
assert(labels[4].startswith('i0'))
def test_apsxsd20id_newer(fname='APS20ID_2022.dat'):
bldat, labels = _tester(fname)
assert('aps xsd' in bldat.name.lower())
assert(1 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(labels[0].startswith('mono_energy'))
assert(labels[1].startswith('scaler_pre'))
assert(labels[-1] == "xsp3_4_total")
assert('it' in labels)
def test_apsxsd20id_new(fname='APS20ID_2018.dat'):
bldat, labels = _tester(fname)
assert('aps xsd' in bldat.name.lower())
assert(1 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(labels[0].startswith('mono_energy'))
assert(labels[1].startswith('scaler_pre'))
assert('it' in labels)
def test_apsxsd_old(fname='APS20BM_2001.dat'):
bldat, labels = _tester(fname)
assert('aps xsd' in bldat.name.lower())
assert(1 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(labels[0] == 'mono_energy')
assert(labels[1].startswith('scaler_pre'))
assert(labels[2] == 'i0')
def test_apsmrcat(fname='APS10BM_2019.dat'):
bldat, labels = _tester(fname)
assert('aps mrcat' in bldat.name.lower())
assert(1 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(len(labels) == 5)
assert(labels[0] == 'energy')
assert(labels[1] == 'io')
assert(labels[2] == 'it')
assert(labels[3] == 'iref')
def test_apsgse(fname='APS13ID_2019.dat'):
bldat, labels = _tester(fname)
assert('gse epicsscan' in bldat.name.lower())
assert(1 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(len(labels) == 21)
assert(labels[0] == 'energy')
assert(labels[1] == 'tscaler')
assert(labels[2] == 'i0')
assert(labels[3] == 'i1')
assert(labels[4] == 'i2')
assert(labels[5] == 'mn_ka_mca1')
def test_apsgse_old(fname='APS13ID_2008.dat'):
bldat, labels = _tester(fname)
assert('gse epicsscan' in bldat.name.lower())
assert(1 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(len(labels) == 4)
assert(labels[0] == 'energy')
assert(labels[1] == 'scaler_count_time')
assert(labels[2] == 'i0')
assert(labels[3] == 'i1')
def test_aps12bm(fname='APS12BM_2019.dat'):
bldat, labels = _tester(fname)
assert('12bm' in bldat.name.lower())
assert(1 == bldat.energy_column)
assert('keV' == bldat.energy_units)
assert(len(labels) == 26)
assert(labels[0] == 'energy')
assert(labels[1] == 'sec')
assert(labels[2] == 'mononrg')
assert(labels[3] == 'i0')
assert(labels[4] == 'i1')
assert(labels[5] == 'i2')
def test_aps9bm2006(fname='APS9BM_2006.dat'):
bldat, labels = _tester(fname)
assert(1 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(len(labels) > 5)
assert(labels[0] == 'energy')
assert('i0' in labels)
def test_esrfsnbl(fname='ESRF_SNBL_2013.dat'):
bldat, labels = _tester(fname)
assert(1 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(len(labels) > 5)
assert(labels[0] == 'mon')
assert(labels[1] == 'det1')
assert(labels[2] == 'det2')
def test_nsls2_6bm(fname='NSLS6BM_2019.dat'):
bldat, labels = _tester(fname)
assert(1 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(len(labels) > 5)
assert(labels[0] == 'energy')
assert(labels[1] == 'requested_energy')
assert(labels[2] == 'measurement_time')
assert(labels[3] == 'xmu')
assert(labels[4] == 'i0')
def test_nsls2_8id(fname='NSLS8ID_2019.dat'):
bldat, labels = _tester(fname)
assert(1 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(len(labels) > 5)
assert(labels[0] == 'energy')
assert(labels[1] == 'i0')
assert(labels[2] == 'ir')
assert(labels[3] == 'it')
assert(labels[4] == 'iff')
def test_ssrl1(fname='SSRL1_2006.dat'):
bldat, labels = _tester(fname)
assert(3 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(len(labels) == 6)
assert(labels[0] == 'real_time_clock')
assert(labels[1] == 'requested_energy')
assert(labels[2] == 'achieved_energy')
assert(labels[3] == 'i0')
assert(labels[4] == 'i1')
assert(labels[5] == 'i2')
def test_ssrl2(fname='SSRLmicro_2008.dat'):
bldat, labels = _tester(fname)
assert(2 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(len(labels) > 65)
assert(labels[0] == 'real_time_clock')
assert(labels[1] == 'requested_energy')
assert(labels[2] == 'i0')
assert(labels[3] == 'i1')
assert(labels[4] == 'i2')
assert(labels[5] == 'sca1_1')
def test_nslsxdac(fname='NSLS_XDAC_2011.dat'):
bldat, labels = _tester(fname)
assert(1 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(len(labels) > 15)
assert(labels[0] == 'energy')
assert(labels[1] == 'i0')
assert(labels[2] == 'it')
assert(labels[3] == 'ifch1')
assert(labels[4] == 'ifch2')
def METHOD_NAME(fname='CLSHXMA.dat'):
bldat, labels = _tester(fname)
assert(4 == bldat.energy_column)
assert('eV' == bldat.energy_units)
assert(len(labels) > 10)
assert(labels[0] == 'event_id')
assert(labels[1] == 'absenergyname')
assert(labels[2] == 'energyfeedback')
assert(labels[3] == 'energyachieved')
assert(labels[4] == 'detector1')
def test_kekpf12c(fname='PFBL12C_2005.dat'):
bldat, labels = _tester(fname)
assert(2 == bldat.energy_column)
assert('deg' == bldat.energy_units)
assert(bldat.mono_dspace > 3)
assert(len(labels) == 5)
def test_one_line_header(fname='ESRF_BM08_LISA_2021.dat'):
bldat, labels = _tester(fname)
assert(labels == ['ebraggenergy', 'i0_eh1', 'i1_eh1', 'mu', 'i1_eh2', 'ir_eh2', 'mu_ref'])
def test_zero_line_header(fname='generic_columns_no_header.dat'):
bldat, labels, group = _tester(fname, return_group=True)
assert(group.array_labels == ['col1', 'col2', 'col3', 'col4', 'col5', 'col6', 'col7'])
def test_fdmnes(fnames=['FDMNES_2022_Mo2C_out.dat', 'FDMNES_2022_Mo2C_out_conv.dat']):
for fname in fnames:
fname = base_dir / 'examples' / 'xafsdata' / 'beamlines' / fname
assert(guess_filereader(fname) == 'read_fdmnes')
group = read_fdmnes(fname)
assert(group.array_labels == ['energy', 'xanes'])
assert(group.header_dict['E_edge'] == 20000.0)
if __name__ == '__main__':
test_apsxsd_new()
test_apsxsd_old()
test_apsgse()
test_apsgse_old()
test_apsmrcat()
test_aps12bm()
test_aps9bm2006()
test_esrfsnbl()
test_nsls2_8id()
test_nsls2_6bm()
test_ssrl1()
test_ssrl2()
test_nslsxdac()
METHOD_NAME()
test_kekpf12c()
test_one_line_header()
test_zero_line_header()
test_fdmnes() |
test profile has builtin blacklist | #
# Copyright (c) 2016 Hewlett-Packard Development Company, L.P.
#
# SPDX-License-Identifier: Apache-2.0
from unittest import mock
import testtools
from stevedore import extension
from bandit.blacklists import utils
from bandit.core import extension_loader
from bandit.core import issue
from bandit.core import test_properties as test
from bandit.core import test_set
@test.checks("Str")
@test.test_id("B000")
def test_plugin():
sets = []
sets.append(
utils.build_conf_dict(
"telnet",
"B401",
issue.Cwe.CLEARTEXT_TRANSMISSION,
["telnetlib"],
"A telnet-related module is being imported. Telnet is "
"considered insecure. Use SSH or some other encrypted protocol.",
"HIGH",
)
)
sets.append(
utils.build_conf_dict(
"marshal",
"B302",
issue.Cwe.DESERIALIZATION_OF_UNTRUSTED_DATA,
["marshal.load", "marshal.loads"],
"Deserialization with the marshal module is possibly dangerous.",
)
)
return {"Import": sets, "ImportFrom": sets, "Call": sets}
class BanditTestSetTests(testtools.TestCase):
def _make_test_manager(self, plugin):
return extension.ExtensionManager.make_test_instance(
[extension.Extension("test_plugin", None, test_plugin, None)]
)
def setUp(self):
super().setUp()
mngr = self._make_test_manager(mock.Mock)
self.patchExtMan = mock.patch("stevedore.extension.ExtensionManager")
self.mockExtMan = self.patchExtMan.start()
self.mockExtMan.return_value = mngr
self.old_ext_man = extension_loader.MANAGER
extension_loader.MANAGER = extension_loader.Manager()
self.config = mock.MagicMock()
self.config.get_setting.return_value = None
def tearDown(self):
self.patchExtMan.stop()
super().tearDown()
extension_loader.MANAGER = self.old_ext_man
def test_has_defaults(self):
ts = test_set.BanditTestSet(self.config)
self.assertEqual(1, len(ts.get_tests("Str")))
def test_profile_include_id(self):
profile = {"include": ["B000"]}
ts = test_set.BanditTestSet(self.config, profile)
self.assertEqual(1, len(ts.get_tests("Str")))
def test_profile_exclude_id(self):
profile = {"exclude": ["B000"]}
ts = test_set.BanditTestSet(self.config, profile)
self.assertEqual(0, len(ts.get_tests("Str")))
def test_profile_include_none(self):
profile = {"include": []} # same as no include
ts = test_set.BanditTestSet(self.config, profile)
self.assertEqual(1, len(ts.get_tests("Str")))
def test_profile_exclude_none(self):
profile = {"exclude": []} # same as no exclude
ts = test_set.BanditTestSet(self.config, profile)
self.assertEqual(1, len(ts.get_tests("Str")))
def METHOD_NAME(self):
ts = test_set.BanditTestSet(self.config)
self.assertEqual(1, len(ts.get_tests("Import")))
self.assertEqual(1, len(ts.get_tests("ImportFrom")))
self.assertEqual(1, len(ts.get_tests("Call")))
def test_profile_exclude_builtin_blacklist(self):
profile = {"exclude": ["B001"]}
ts = test_set.BanditTestSet(self.config, profile)
self.assertEqual(0, len(ts.get_tests("Import")))
self.assertEqual(0, len(ts.get_tests("ImportFrom")))
self.assertEqual(0, len(ts.get_tests("Call")))
def test_profile_exclude_builtin_blacklist_specific(self):
profile = {"exclude": ["B302", "B401"]}
ts = test_set.BanditTestSet(self.config, profile)
self.assertEqual(0, len(ts.get_tests("Import")))
self.assertEqual(0, len(ts.get_tests("ImportFrom")))
self.assertEqual(0, len(ts.get_tests("Call")))
def test_profile_filter_blacklist_none(self):
ts = test_set.BanditTestSet(self.config)
blacklist = ts.get_tests("Import")[0]
self.assertEqual(2, len(blacklist._config["Import"]))
self.assertEqual(2, len(blacklist._config["ImportFrom"]))
self.assertEqual(2, len(blacklist._config["Call"]))
def test_profile_filter_blacklist_one(self):
profile = {"exclude": ["B401"]}
ts = test_set.BanditTestSet(self.config, profile)
blacklist = ts.get_tests("Import")[0]
self.assertEqual(1, len(blacklist._config["Import"]))
self.assertEqual(1, len(blacklist._config["ImportFrom"]))
self.assertEqual(1, len(blacklist._config["Call"]))
def test_profile_filter_blacklist_include(self):
profile = {"include": ["B001", "B401"]}
ts = test_set.BanditTestSet(self.config, profile)
blacklist = ts.get_tests("Import")[0]
self.assertEqual(1, len(blacklist._config["Import"]))
self.assertEqual(1, len(blacklist._config["ImportFrom"]))
self.assertEqual(1, len(blacklist._config["Call"]))
def test_profile_filter_blacklist_all(self):
profile = {"exclude": ["B401", "B302"]}
ts = test_set.BanditTestSet(self.config, profile)
# if there is no blacklist data for a node type then we wont add a
# blacklist test to it, as this would be pointless.
self.assertEqual(0, len(ts.get_tests("Import")))
self.assertEqual(0, len(ts.get_tests("ImportFrom")))
self.assertEqual(0, len(ts.get_tests("Call")))
def test_profile_blacklist_compat(self):
data = [
utils.build_conf_dict(
"marshal",
"B302",
issue.Cwe.DESERIALIZATION_OF_UNTRUSTED_DATA,
["marshal.load", "marshal.loads"],
(
"Deserialization with the marshal module is possibly "
"dangerous."
),
)
]
profile = {"include": ["B001"], "blacklist": {"Call": data}}
ts = test_set.BanditTestSet(self.config, profile)
blacklist = ts.get_tests("Call")[0]
self.assertNotIn("Import", blacklist._config)
self.assertNotIn("ImportFrom", blacklist._config)
self.assertEqual(1, len(blacklist._config["Call"])) |
test get segment to oid mapping with | from mock import *
from .gp_unittest import *
from gpcheckcat_modules.repair_missing_extraneous import RepairMissingExtraneous
class RepairMissingExtraneousTestCase(GpTestCase):
def setUp(self):
self.all_seg_ids = [-1,0,1,2,3]
self.table_name = 'pg_attribut"e'
self.catalog_table_obj = Mock(spec=['getTableName',
'tableHasConsistentOids',
'getPrimaryKey'])
self.catalog_table_obj.getTableName.return_value = self.table_name
def test_get_segment_to_oid_mapping_with_both_extra_and_missing(self):
issues = [(49401, "extra", [1,2]),
(49401, "extra", [1,2]),
(49402, "missing", [2,3]),
(49403, "extra", [2,3]),
(49404, "missing", [1]),
(49405, "extra", [2,3]),
(49406, "missing", [2])]
self.subject = RepairMissingExtraneous(self.catalog_table_obj, issues, "attrelid")
repair_sql_contents = self.subject.get_segment_to_oid_mapping(self.all_seg_ids)
self.assertEqual(len(repair_sql_contents), 5)
self.assertEqual(repair_sql_contents[-1], set([49402, 49404, 49406]))
self.assertEqual(repair_sql_contents[0], set([49402, 49404, 49406]))
self.assertEqual(repair_sql_contents[1], set([49401, 49402, 49406]))
self.assertEqual(repair_sql_contents[2], set([49401, 49403, 49404, 49405]))
self.assertEqual(repair_sql_contents[3], set([49403, 49404, 49405, 49406]))
def METHOD_NAME(self):
issues = [(49401, 'cmax', "extra", [1,2]),
(49401, 'cmax', "extra", [1,2]),
(49403, 'cmax', "extra", [2,3]),
(49405, 'cmax', "extra", [2,3])]
self.subject = RepairMissingExtraneous(self.catalog_table_obj, issues, "attrelid")
repair_sql_contents = self.subject.get_segment_to_oid_mapping(self.all_seg_ids)
self.assertEqual(len(repair_sql_contents), 3)
self.assertEqual(repair_sql_contents[1], set([49401]))
self.assertEqual(repair_sql_contents[2], set([49401, 49403, 49405]))
self.assertEqual(repair_sql_contents[3], set([49403, 49405]))
def test_get_segment_to_oid_mapping_with_only_missing(self):
issues = [(49401, 'cmax', "missing", [1,2]),
(49401, 'cmax', "missing", [1,2]),
(49403, 'cmax', "missing", [2,3]),
(49405, 'cmax', "missing", [2,3])]
self.subject = RepairMissingExtraneous(self.catalog_table_obj, issues, "attrelid")
repair_sql_contents = self.subject.get_segment_to_oid_mapping(self.all_seg_ids)
self.assertEqual(len(repair_sql_contents), 4)
self.assertEqual(repair_sql_contents[-1], set([49401, 49403, 49405]))
self.assertEqual(repair_sql_contents[0], set([49401, 49403, 49405]))
self.assertEqual(repair_sql_contents[1], set([49403, 49405]))
self.assertEqual(repair_sql_contents[3], set([49401]))
def test_get_delete_sql__with_multiple_oids(self):
self.subject = RepairMissingExtraneous(self.catalog_table_obj, None, "attrelid")
oids = [1,3,4]
delete_sql = self.subject.get_delete_sql(oids)
self.assertEqual(delete_sql, 'BEGIN;set allow_system_table_mods=true;'
'delete from "pg_attribut""e" where "attrelid" in (1,3,4);COMMIT;')
def test_get_delete_sql__with_one_oid(self):
self.subject = RepairMissingExtraneous(self.catalog_table_obj, None, "attrelid")
oids = [5]
delete_sql = self.subject.get_delete_sql(oids)
self.assertEqual(delete_sql, 'BEGIN;set allow_system_table_mods=true;'
'delete from "pg_attribut""e" where "attrelid" in (5);COMMIT;')
def test_get_delete_sql__with_one_pkey_one_issue(self):
issues = [('!!', 'cmax', "extra", [1,2]),]
self.catalog_table_obj.tableHasConsistentOids.return_value = False
self.catalog_table_obj.getPrimaryKey.return_value = ["oprname"]
self.subject = RepairMissingExtraneous(self.catalog_table_obj, issues, None)
oids = None
delete_sql = self.subject.get_delete_sql(oids)
self.assertEqual(delete_sql, 'BEGIN;set allow_system_table_mods=true;'
'delete from "pg_attribut""e" where oprname = \'!!\';COMMIT;')
def test_get_delete_sql__with_one_pkey_mult_issues(self):
issues = [('!!', 'cmax', "missing", [1,2]),
('8!', 'cmax', "extra", [1,2]),
('*!', 'cmax', "missing", [2,3]),
('!!', 'cmax', "extra", [2,3])]
self.catalog_table_obj.tableHasConsistentOids.return_value = False
self.catalog_table_obj.getPrimaryKey.return_value = ["oprname"]
self.subject = RepairMissingExtraneous(self.catalog_table_obj, issues, None)
oids = None
delete_sql = self.subject.get_delete_sql(oids)
self.assertEqual(delete_sql, 'BEGIN;set allow_system_table_mods=true;'
'delete from "pg_attribut""e" where oprname = \'!!\';'
'delete from "pg_attribut""e" where oprname = \'8!\';'
'delete from "pg_attribut""e" where oprname = \'*!\';'
'delete from "pg_attribut""e" where oprname = \'!!\';COMMIT;')
def test_get_delete_sql__with_multiple_pkey_mult_issue(self):
issues = [('!!', 48920, 0, 1, 'cmax', "missing", [1,2]),
('8!', 15, 1, 3, 'cmax', "extra", [1,2]),
('*!', 48920, 2, 3, 'cmax', "missing", [2,3]),
('!!', 11, 2, 3, 'cmax', "extra", [2,3])]
self.catalog_table_obj.tableHasConsistentOids.return_value = False
self.catalog_table_obj.getPrimaryKey.return_value = ["oprname",
"oprnamespace",
"oprleft",
"oprright"]
self.subject = RepairMissingExtraneous(self.catalog_table_obj, issues, None)
oids = None
delete_sql = self.subject.get_delete_sql(oids)
self.assertEqual(delete_sql, 'BEGIN;set allow_system_table_mods=true;'
'delete from "pg_attribut""e" where oprname = \'!!\' and oprnamespace = \'48920\''
' and oprleft = \'0\' and oprright = \'1\';'
'delete from "pg_attribut""e" where oprname = \'8!\' and oprnamespace = \'15\''
' and oprleft = \'1\' and oprright = \'3\';'
'delete from "pg_attribut""e" where oprname = \'*!\' and oprnamespace = \'48920\''
' and oprleft = \'2\' and oprright = \'3\';'
'delete from "pg_attribut""e" where oprname = \'!!\' and oprnamespace = \'11\''
' and oprleft = \'2\' and oprright = \'3\';'
'COMMIT;')
if __name__ == '__main__':
run_tests() |
test missing config | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import hashlib
from twisted.internet import defer
from twisted.trial import unittest
from buildbot import util
from buildbot.config import ConfigErrors
from buildbot.interfaces import LatentWorkerFailedToSubstantiate
from buildbot.test.fake import fakemaster
from buildbot.test.fake import httpclientservice as fakehttpclientservice
from buildbot.test.fake.fakebuild import FakeBuildForRendering as FakeBuild
from buildbot.test.fake.fakeprotocol import FakeTrivialConnection as FakeBot
from buildbot.test.reactor import TestReactorMixin
from buildbot.worker import upcloud
# Please see https://developers.upcloud.com/ for details
upcloudStorageTemplatePayload = {
'storages': {
'storage': [
{
'access': 'public',
'title': 'rendered:test-image',
'uuid': '8b47d21b-b4c3-445d-b75c-5a723ff39681'
}
]
}
}
upcloudServerCreatePayload = {
'server': {
'hostname': 'worker',
'password': 'supersecret',
'state': 'maintenance',
'uuid': '438b5b08-4147-4193-bf64-a5318f51d3bd',
'title': 'buildbot-worker-87de7e',
'plan': '1xCPU-1GB'
}
}
upcloudServerStartedPayload = {
'server': {
'hostname': 'worker',
'password': 'supersecret',
'state': 'started',
'uuid': '438b5b08-4147-4193-bf64-a5318f51d3bd',
'title': 'buildbot-worker-87de7e',
'plan': '1xCPU-1GB'
}
}
upcloudServerStoppedPayload = {
'server': {
'hostname': 'worker',
'password': 'supersecret',
'state': 'stopped',
'uuid': '438b5b08-4147-4193-bf64-a5318f51d3bd',
'title': 'buildbot-worker-87de7e',
'plan': '1xCPU-1GB'
}
}
class TestUpcloudWorker(TestReactorMixin, unittest.TestCase):
worker = None
def setUp(self):
self.setup_test_reactor()
@defer.inlineCallbacks
def setupWorker(self, *args, **kwargs):
worker = upcloud.UpcloudLatentWorker(
*args, api_username='test-api-user', api_password='test-api-password', **kwargs)
master = fakemaster.make_master(self, wantData=True)
self._http = worker.client = yield fakehttpclientservice.HTTPClientService.getService(
master, self, upcloud.DEFAULT_BASE_URL, auth=('test-api-user', 'test-api-password'),
debug=False)
worker.setServiceParent(master)
yield master.startService()
self.masterhash = hashlib.sha1(util.unicode2bytes(master.name)).hexdigest()[:6]
self.addCleanup(master.stopService)
self.worker = worker
return worker
def test_instantiate(self):
worker = upcloud.UpcloudLatentWorker('test-worker', image='test-image',
api_username='test-api-user',
api_password='test-api-password')
self.failUnlessIsInstance(worker, upcloud.UpcloudLatentWorker)
def METHOD_NAME(self):
worker = None
with self.assertRaises(ConfigErrors):
worker = upcloud.UpcloudLatentWorker('test-worker')
with self.assertRaises(ConfigErrors):
worker = upcloud.UpcloudLatentWorker('test-worker', image='test-image')
with self.assertRaises(ConfigErrors):
worker = upcloud.UpcloudLatentWorker('test-worker', image='test-image',
api_username='test-api-user')
self.assertTrue(worker is None)
@defer.inlineCallbacks
def test_missing_image(self):
worker = yield self.setupWorker('worker', image='no-such-image')
self._http.expect(method='get', ep='/storage/template',
content_json=upcloudStorageTemplatePayload)
with self.assertRaises(LatentWorkerFailedToSubstantiate):
yield worker.substantiate(None, FakeBuild())
@defer.inlineCallbacks
def test_start_worker(self):
worker = yield self.setupWorker('worker', image='test-image')
# resolve image to storage uuid
self._http.expect(method='get', ep='/storage/template',
content_json=upcloudStorageTemplatePayload)
# actually start server
self._http.expect(method='post', ep='/server', params=None, data=None, json={'server':
{'zone': 'de-fra1', 'title': 'buildbot-worker-87de7e', 'hostname': 'worker',
'user_data': '',
'login_user': {'username': 'root', 'ssh_keys': {'ssh_key': []}},
'password_delivery': 'none',
'storage_devices': {'storage_device': [
{'action': 'clone', 'storage': '8b47d21b-b4c3-445d-b75c-5a723ff39681', 'title':
f'buildbot-worker-{self.masterhash}', 'size': 10, 'tier': 'maxiops'}]},
'plan': '1xCPU-1GB'}},
content_json=upcloudServerCreatePayload, code=202)
# determine it's up & running
self._http.expect(method='get', ep='/server/438b5b08-4147-4193-bf64-a5318f51d3bd',
content_json=upcloudServerStartedPayload)
# get root password
self._http.expect(method='get', ep='/server/438b5b08-4147-4193-bf64-a5318f51d3bd',
content_json=upcloudServerStartedPayload)
# stop server
self._http.expect(method='post', ep='/server/438b5b08-4147-4193-bf64-a5318f51d3bd/stop',
json={'stop_server': {'stop_type': 'hard', 'timeout': '1'}},
content_json=upcloudServerStartedPayload)
# now it's stopped
self._http.expect(method='get', ep='/server/438b5b08-4147-4193-bf64-a5318f51d3bd',
content_json=upcloudServerStoppedPayload)
# then delete it
self._http.expect(method='delete',
ep='/server/438b5b08-4147-4193-bf64-a5318f51d3bd?storages=1', code=204)
d = worker.substantiate(None, FakeBuild())
yield worker.attached(FakeBot())
yield d |
test tno at lon lat | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Unit tests for the quality assurance
Level classes.
"""
import pytest
from flowmachine.core import make_spatial_unit
from flowmachine.core.errors import InvalidSpatialUnitError
from flowmachine.features import TotalNetworkObjects, AggregateNetworkObjects
def METHOD_NAME(get_dataframe):
"""
Regression test for #108. TNO should work at lon-lat level.
"""
tno = TotalNetworkObjects(
start="2016-01-01",
stop="2016-01-07",
network_object=make_spatial_unit("versioned-cell"),
spatial_unit=make_spatial_unit("lon-lat"),
)
assert tno.get_dataframe().value.sum() == 330
@pytest.mark.parametrize(
"stat, expected",
[
("avg", 30.541666666666668),
("max", 38),
("min", 21),
("median", 31.0),
("mode", 27),
("stddev", 4.096437122848253),
("variance", 16.780797101449277),
],
)
def test_aggregate_returns_correct_values(stat, expected, get_dataframe):
"""
AggregateNetworkObjects returns correct values.
"""
instance = AggregateNetworkObjects(
total_network_objects=TotalNetworkObjects(
start="2016-01-01", stop="2016-12-30", table="calls", total_by="hour"
),
statistic=stat,
)
df = get_dataframe(instance)
#
# This will compare the very first
# value with an independently
# computed value.
#
assert pytest.approx(df.value[0]) == expected
def test_count_returns_correct_values(get_dataframe):
"""
TotalNetworkObjects returns correct values.
"""
instance = TotalNetworkObjects(
start="2016-01-01", stop="2016-12-30", table="calls", total_by="hour"
)
df = get_dataframe(instance)
#
# This will compare the very first
# value with an independently
# computed value.
#
assert df.value[34] == 31
def test_bad_total_by():
"""Test value errors are raised for bad 'total_by' param"""
with pytest.raises(ValueError):
TotalNetworkObjects(
start="2016-01-01",
stop="2016-12-30",
table="calls",
total_by="BAD_TOTAL_BY",
)
@pytest.mark.parametrize(
"bad_arg, spatial_unit_type",
[("spatial_unit", "cell"), ("network_object", "lon-lat")],
)
def test_bad_spatial_units(bad_arg, spatial_unit_type):
"""
Test InvalidSpatialUnitErrors are raised for bad 'network_object' or
'spatial_unit' params.
"""
su = make_spatial_unit(spatial_unit_type)
with pytest.raises(InvalidSpatialUnitError):
TotalNetworkObjects(
start="2016-01-01", stop="2016-12-30", table="calls", **{bad_arg: su}
)
def test_bad_aggregate_by():
"""Test that invalid 'aggregate_by' param raises value error."""
with pytest.raises(ValueError):
AggregateNetworkObjects(
total_network_objects=TotalNetworkObjects(
start="2016-01-01", stop="2016-12-30", table="calls"
),
aggregate_by="BAD_AGGREGATE_BY",
)
def test_bad_statistic():
"""Test that invalid stat for aggregate raises value error."""
with pytest.raises(ValueError):
AggregateNetworkObjects(
total_network_objects=TotalNetworkObjects(
start="2016-01-01", stop="2016-12-30", table="calls"
),
statistic="BAD STAT",
)
def test_median_returns_correct_values(get_dataframe):
"""
features.network.TotalNetworkObjects median aggregate returns correct values.
"""
instance = AggregateNetworkObjects(
total_network_objects=TotalNetworkObjects(
table="calls",
total_by="hour",
network_object=make_spatial_unit("versioned-site"),
),
aggregate_by="day",
statistic="median",
)
#
# This will compare the very first
# value with an independently
# computed value.
#
assert get_dataframe(instance).head(1)["value"][0] == 25
def test_mean_returns_correct_values(get_dataframe):
"""
features.network.TotalNetworkObjects aggregation returns correct values.
"""
instance = AggregateNetworkObjects(
total_network_objects=TotalNetworkObjects(
start="2016-01-01",
stop="2016-12-30",
total_by="hour",
network_object=make_spatial_unit("versioned-site"),
),
aggregate_by="day",
)
#
# This will compare the very first
# value with an independently
# computed value.
#
assert get_dataframe(instance).head(1)["value"][0] == pytest.approx(28.7916666666)
@pytest.mark.parametrize(
"total_by, aggregate_by_expected",
[
("second", "minute"),
("minute", "hour"),
("hour", "day"),
("day", "month"),
("month", "year"),
("year", "century"),
],
)
def test_period_agg_default(total_by, aggregate_by_expected):
"""Correct aggregation period is deduced."""
inst = AggregateNetworkObjects(
total_network_objects=TotalNetworkObjects(
start="2016-01-01", stop="2016-12-30", total_by=total_by
)
)
assert inst.aggregate_by == aggregate_by_expected |
strip | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests the node stripping tool."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.framework import graph_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_io
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
from tensorflow.python.tools import strip_unused_lib
class StripUnusedTest(test_util.TensorFlowTestCase):
def testStripUnused(self):
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# We'll create an input graph that has a single constant containing 1.0,
# and that then multiplies it by 2.
with ops.Graph().as_default():
constant_node = constant_op.constant(1.0, name="constant_node")
wanted_input_node = math_ops.subtract(constant_node,
3.0,
name="wanted_input_node")
output_node = math_ops.multiply(
wanted_input_node, 2.0, name="output_node")
math_ops.add(output_node, 2.0, name="later_node")
sess = session.Session()
output = self.evaluate(output_node)
self.assertNear(-4.0, output, 0.00001)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# We save out the graph to disk, and then call the const conversion
# routine.
input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
input_binary = False
output_binary = True
output_node_names = "output_node"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
def METHOD_NAME(input_node_names):
strip_unused_lib.strip_unused_from_files(input_graph_path, input_binary,
output_graph_path, output_binary,
input_node_names,
output_node_names,
dtypes.float32.as_datatype_enum)
with self.assertRaises(KeyError):
METHOD_NAME("does_not_exist")
with self.assertRaises(ValueError):
METHOD_NAME("wanted_input_node:0")
input_node_names = "wanted_input_node"
METHOD_NAME(input_node_names)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_path, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(3, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("Add", node.op)
self.assertNotEqual("Sub", node.op)
if node.name == input_node_names:
self.assertTrue("shape" in node.attr)
with session.Session() as sess:
input_node = sess.graph.get_tensor_by_name("wanted_input_node:0")
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node, feed_dict={input_node: [10.0]})
self.assertNear(20.0, output, 0.00001)
def testStripUnusedMultipleInputs(self):
input_graph_name = "input_graph.pb"
output_graph_name = "output_graph.pb"
# We'll create an input graph that multiplies two input nodes.
with ops.Graph().as_default():
constant_node1 = constant_op.constant(1.0, name="constant_node1")
constant_node2 = constant_op.constant(2.0, name="constant_node2")
input_node1 = math_ops.subtract(constant_node1, 3.0, name="input_node1")
input_node2 = math_ops.subtract(constant_node2, 5.0, name="input_node2")
output_node = math_ops.multiply(
input_node1, input_node2, name="output_node")
math_ops.add(output_node, 2.0, name="later_node")
sess = session.Session()
output = self.evaluate(output_node)
self.assertNear(6.0, output, 0.00001)
graph_io.write_graph(sess.graph, self.get_temp_dir(), input_graph_name)
# We save out the graph to disk, and then call the const conversion
# routine.
input_graph_path = os.path.join(self.get_temp_dir(), input_graph_name)
input_binary = False
input_node_names = "input_node1,input_node2"
input_node_types = [
dtypes.float32.as_datatype_enum, dtypes.float32.as_datatype_enum
]
output_binary = True
output_node_names = "output_node"
output_graph_path = os.path.join(self.get_temp_dir(), output_graph_name)
strip_unused_lib.strip_unused_from_files(input_graph_path, input_binary,
output_graph_path, output_binary,
input_node_names,
output_node_names,
input_node_types)
# Now we make sure the variable is now a constant, and that the graph still
# produces the expected result.
with ops.Graph().as_default():
output_graph_def = graph_pb2.GraphDef()
with open(output_graph_path, "rb") as f:
output_graph_def.ParseFromString(f.read())
_ = importer.import_graph_def(output_graph_def, name="")
self.assertEqual(3, len(output_graph_def.node))
for node in output_graph_def.node:
self.assertNotEqual("Add", node.op)
self.assertNotEqual("Sub", node.op)
if node.name == input_node_names:
self.assertTrue("shape" in node.attr)
with session.Session() as sess:
input_node1 = sess.graph.get_tensor_by_name("input_node1:0")
input_node2 = sess.graph.get_tensor_by_name("input_node2:0")
output_node = sess.graph.get_tensor_by_name("output_node:0")
output = sess.run(output_node,
feed_dict={input_node1: [10.0],
input_node2: [-5.0]})
self.assertNear(-50.0, output, 0.00001)
if __name__ == "__main__":
test.main() |
test adversarial trainer fbf pytorch fit and | # MIT License
#
# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest
import logging
import numpy as np
from art.defences.trainer import AdversarialTrainerFBFPyTorch
@pytest.fixture()
def get_adv_trainer(framework, image_dl_estimator):
def _get_adv_trainer():
if framework == "keras":
trainer = None
if framework in ["tensorflow", "tensorflow2v1"]:
trainer = None
if framework == "pytorch":
classifier, _ = image_dl_estimator()
trainer = AdversarialTrainerFBFPyTorch(classifier, eps=0.05)
if framework == "scikitlearn":
trainer = None
return trainer
return _get_adv_trainer
@pytest.fixture()
def fix_get_mnist_subset(get_mnist_dataset):
(x_train_mnist, y_train_mnist), (x_test_mnist, y_test_mnist) = get_mnist_dataset
n_train = 100
n_test = 100
yield x_train_mnist[:n_train], y_train_mnist[:n_train], x_test_mnist[:n_test], y_test_mnist[:n_test]
@pytest.mark.skip_framework("tensorflow", "keras", "scikitlearn", "mxnet", "kerastf")
def METHOD_NAME(get_adv_trainer, fix_get_mnist_subset):
(x_train_mnist, y_train_mnist, x_test_mnist, y_test_mnist) = fix_get_mnist_subset
x_test_mnist_original = x_test_mnist.copy()
trainer = get_adv_trainer()
if trainer is None:
logging.warning("Couldn't perform this test because no trainer is defined for this framework configuration")
return
predictions = np.argmax(trainer.predict(x_test_mnist), axis=1)
accuracy = np.sum(predictions == np.argmax(y_test_mnist, axis=1)) / x_test_mnist.shape[0]
trainer.fit(x_train_mnist, y_train_mnist, nb_epochs=20)
predictions_new = np.argmax(trainer.predict(x_test_mnist), axis=1)
accuracy_new = np.sum(predictions_new == np.argmax(y_test_mnist, axis=1)) / x_test_mnist.shape[0]
np.testing.assert_array_almost_equal(
float(np.mean(x_test_mnist_original - x_test_mnist)),
0.0,
decimal=4,
)
assert accuracy == 0.32
assert accuracy_new == 0.63
trainer.fit(x_train_mnist, y_train_mnist, nb_epochs=20, validation_data=(x_train_mnist, y_train_mnist))
@pytest.mark.skip_framework("tensorflow", "keras", "scikitlearn", "mxnet", "kerastf")
def test_adversarial_trainer_fbf_pytorch_fit_generator_and_predict(
get_adv_trainer, fix_get_mnist_subset, image_data_generator
):
(x_train_mnist, y_train_mnist, x_test_mnist, y_test_mnist) = fix_get_mnist_subset
x_test_mnist_original = x_test_mnist.copy()
generator = image_data_generator()
trainer = get_adv_trainer()
if trainer is None:
logging.warning("Couldn't perform this test because no trainer is defined for this framework configuration")
return
predictions = np.argmax(trainer.predict(x_test_mnist), axis=1)
accuracy = np.sum(predictions == np.argmax(y_test_mnist, axis=1)) / x_test_mnist.shape[0]
trainer.fit_generator(generator=generator, nb_epochs=20)
predictions_new = np.argmax(trainer.predict(x_test_mnist), axis=1)
accuracy_new = np.sum(predictions_new == np.argmax(y_test_mnist, axis=1)) / x_test_mnist.shape[0]
np.testing.assert_array_almost_equal(
float(np.mean(x_test_mnist_original - x_test_mnist)),
0.0,
decimal=4,
)
assert accuracy == 0.32
assert accuracy_new > 0.2 |
restore tilt configurations | from app.models import BrewPiDevice, Beer, FermentationProfile
from gravity.models import GravitySensor, GravityLog, TiltTempCalibrationPoint, TiltGravityCalibrationPoint, \
TiltConfiguration, TiltBridge, IspindelConfiguration, IspindelGravityCalibrationPoint
from constance import config
def restore_brewpi_devices(obj_list:list, update:bool) -> list:
"""Loop through a list of BrewPiDevice object dicts, call each one's from_dict() method, and then save the object"""
restore_status = []
for obj_dict in obj_list:
device = BrewPiDevice.from_dict(obj_dict, update=update)
device.save()
restore_status.append({'uuid': device.uuid, 'success': True})
return restore_status
def restore_beers(obj_list:list, update:bool) -> list:
"""Loop through a list of Beer object dicts, call each one's from_dict() method, and then save the object"""
restore_status = []
for obj_dict in obj_list:
beer = Beer.from_dict(obj_dict, update=update)
beer.save()
restore_status.append({'uuid': beer.uuid, 'success': True})
return restore_status
def restore_fermentation_profiles(obj_list:list, update:bool) -> list:
"""Loop through a list of FermentationProfile object dicts, call each one's from_dict() method, and then save the
object. This also implicitly restores all associated FermentationProfilePoint objects."""
restore_status = []
for obj_dict in obj_list:
profile = FermentationProfile.load_from_dict(obj_dict, update=update)
restore_status.append({'uuid': profile.uuid, 'success': True})
return restore_status
# Gravity functions
def restore_gravity_sensors(obj_list:list, update:bool) -> list:
"""Loop through a list of GravitySensor object dicts, call each one's from_dict() method, and then save the object"""
restore_status = []
for obj_dict in obj_list:
sensor = GravitySensor.from_dict(obj_dict, update=update)
sensor.save()
restore_status.append({'uuid': sensor.uuid, 'success': True})
return restore_status
def restore_gravity_logs(obj_list:list, update:bool) -> list:
"""Loop through a list of GravityLog object dicts, call each one's from_dict() method, and then save the object"""
restore_status = []
for obj_dict in obj_list:
log = GravityLog.from_dict(obj_dict, update=update)
log.save()
restore_status.append({'uuid': log.uuid, 'success': True})
return restore_status
def restore_tilt_temp_calibration_points(obj_list:list, update:bool) -> list:
"""Loop through a list of TiltTempCalibrationPoint object dicts, call each one's from_dict() method, and then save
the object"""
restore_status = []
for obj_dict in obj_list:
point = TiltTempCalibrationPoint.from_dict(obj_dict, update=update)
point.save()
restore_status.append({'uuid': point.uuid, 'success': True})
return restore_status
def restore_tilt_gravity_calibration_points(obj_list:list, update:bool) -> list:
"""Loop through a list of TiltGravityCalibrationPoint object dicts, call each one's from_dict() method, and then
save the object"""
restore_status = []
for obj_dict in obj_list:
point = TiltGravityCalibrationPoint.from_dict(obj_dict, update=update)
point.save()
restore_status.append({'uuid': point.uuid, 'success': True})
return restore_status
def METHOD_NAME(obj_list:list, update:bool) -> list:
"""Loop through a list of TiltConfiguration object dicts, call each one's from_dict() method, and then save the
object"""
restore_status = []
for obj_dict in obj_list:
tilt_config = TiltConfiguration.from_dict(obj_dict, update=update)
tilt_config.save()
restore_status.append({'uuid': tilt_config.uuid, 'success': True})
return restore_status
def restore_tiltbridges(obj_list:list, update:bool) -> list:
"""Loop through a list of TiltBridge object dicts, call each one's from_dict() method, and then save the object"""
restore_status = []
for obj_dict in obj_list:
bridge = TiltBridge.from_dict(obj_dict, update=update)
bridge.save()
restore_status.append({'uuid': bridge.uuid, 'success': True})
return restore_status
def restore_ispindel_configurations(obj_list:list, update:bool) -> list:
"""Loop through a list of IspindelConfiguration object dicts, call each one's from_dict() method, and then save the
object"""
restore_status = []
for obj_dict in obj_list:
ispindel_config = IspindelConfiguration.from_dict(obj_dict, update=update)
ispindel_config.save()
restore_status.append({'uuid': ispindel_config.uuid, 'success': True})
return restore_status
def restore_ispindel_gravity_calibration_points(obj_list:list, update:bool) -> list:
"""Loop through a list of IspindelGravityCalibrationPoint object dicts, call each one's from_dict() method, and
then save the object"""
restore_status = []
for obj_dict in obj_list:
point = IspindelGravityCalibrationPoint.from_dict(obj_dict, update=update)
point.save()
restore_status.append({'uuid': point.uuid, 'success': True})
return restore_status
def restore_fermentrack_configuration_options(obj_dict:dict):
"""Work through a dict containing all the Constance options, updating each setting to match what we were passed"""
if 'BREWERY_NAME' in obj_dict:
config.BREWERY_NAME = obj_dict['BREWERY_NAME']
if 'DATE_TIME_FORMAT_DISPLAY' in obj_dict:
config.DATE_TIME_FORMAT_DISPLAY = obj_dict['DATE_TIME_FORMAT_DISPLAY']
if 'REQUIRE_LOGIN_FOR_DASHBOARD' in obj_dict:
config.REQUIRE_LOGIN_FOR_DASHBOARD = obj_dict['REQUIRE_LOGIN_FOR_DASHBOARD']
if 'TEMPERATURE_FORMAT' in obj_dict:
config.TEMPERATURE_FORMAT = obj_dict['TEMPERATURE_FORMAT']
if 'GRAVITY_DISPLAY_FORMAT' in obj_dict:
config.GRAVITY_DISPLAY_FORMAT = obj_dict['GRAVITY_DISPLAY_FORMAT']
if 'USER_HAS_COMPLETED_CONFIGURATION' in obj_dict:
config.USER_HAS_COMPLETED_CONFIGURATION = obj_dict['USER_HAS_COMPLETED_CONFIGURATION']
if 'TEMP_CONTROL_SUPPORT_ENABLED' in obj_dict:
config.TEMP_CONTROL_SUPPORT_ENABLED = obj_dict['TEMP_CONTROL_SUPPORT_ENABLED']
if 'GRAVITY_SUPPORT_ENABLED' in obj_dict:
config.GRAVITY_SUPPORT_ENABLED = obj_dict['GRAVITY_SUPPORT_ENABLED']
# if 'LAST_GIT_CHECK' in obj_dict:
# config.LAST_GIT_CHECK = obj_dict['LAST_GIT_CHECK']
# if 'GIT_UPDATE_TYPE' in obj_dict:
# config.GIT_UPDATE_TYPE = obj_dict['GIT_UPDATE_TYPE']
if 'ALLOW_GIT_BRANCH_SWITCHING' in obj_dict:
config.ALLOW_GIT_BRANCH_SWITCHING = obj_dict['ALLOW_GIT_BRANCH_SWITCHING']
if 'PREFERRED_TIMEZONE' in obj_dict:
config.PREFERRED_TIMEZONE = obj_dict['PREFERRED_TIMEZONE']
if 'GRAPH_BEER_TEMP_COLOR' in obj_dict:
config.GRAPH_BEER_TEMP_COLOR = obj_dict['GRAPH_BEER_TEMP_COLOR']
if 'GRAPH_BEER_SET_COLOR' in obj_dict:
config.GRAPH_BEER_SET_COLOR = obj_dict['GRAPH_BEER_SET_COLOR']
if 'GRAPH_FRIDGE_TEMP_COLOR' in obj_dict:
config.GRAPH_FRIDGE_TEMP_COLOR = obj_dict['GRAPH_FRIDGE_TEMP_COLOR']
if 'GRAPH_FRIDGE_SET_COLOR' in obj_dict:
config.GRAPH_FRIDGE_SET_COLOR = obj_dict['GRAPH_FRIDGE_SET_COLOR']
if 'GRAPH_ROOM_TEMP_COLOR' in obj_dict:
config.GRAPH_ROOM_TEMP_COLOR = obj_dict['GRAPH_ROOM_TEMP_COLOR']
if 'GRAPH_GRAVITY_COLOR' in obj_dict:
config.GRAPH_GRAVITY_COLOR = obj_dict['GRAPH_GRAVITY_COLOR']
if 'GRAPH_GRAVITY_TEMP_COLOR' in obj_dict:
config.GRAPH_GRAVITY_TEMP_COLOR = obj_dict['GRAPH_GRAVITY_TEMP_COLOR']
if 'CUSTOM_THEME' in obj_dict:
config.CUSTOM_THEME = obj_dict['CUSTOM_THEME'] |
test read coinc eventtable | # -*- coding: utf-8 -*-
# Copyright (C) California Institute of Technology (2022)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Tests for :mod:`gwpy.table.io.gstlal`
"""
import pytest
from numpy import testing as nptest
from numpy import float32
from ..io import gstlal as gstlalio
from gwpy.table import EventTable
__author__ = 'Derek Davis <[email protected]>'
# -- gstlal file fixture -----------------------------------------------------
GSTLAL_FILE = """<?xml version='1.0' encoding='utf-8'?>
<!DOCTYPE LIGO_LW SYSTEM "http://ldas-sw.ligo.caltech.edu/doc/ligolwAPI/html/ligolw_dtd.txt">
<LIGO_LW>
<Table Name="coinc_inspiral:table">
<Column Name="coinc_event:coinc_event_id" Type="int_8s"/>
<Column Name="combined_far" Type="real_8"/>
<Column Name="end_time" Type="int_4s"/>
<Column Name="end_time_ns" Type="int_4s"/>
<Column Name="false_alarm_rate" Type="real_8"/>
<Column Name="ifos" Type="lstring"/>
<Column Name="mass" Type="real_8"/>
<Column Name="mchirp" Type="real_8"/>
<Column Name="minimum_duration" Type="real_8"/>
<Column Name="snr" Type="real_8"/>
<Stream Name="coinc_inspiral:table" Delimiter="," Type="Local">
1,1,100,0,1,"H1,L1",1,1,1,1,
2,1,100,0,1,"H1,L1",1,1,1,1,
</Stream>
</Table>
<Table Name="coinc_event:table">
<Column Name="coinc_definer:coinc_def_id" Type="int_8s"/>
<Column Name="coinc_event_id" Type="int_8s"/>
<Column Name="instruments" Type="lstring"/>
<Column Name="likelihood" Type="real_8"/>
<Column Name="nevents" Type="int_4u"/>
<Column Name="process:process_id" Type="int_8s"/>
<Column Name="time_slide:time_slide_id" Type="int_8s"/>
<Stream Name="coinc_event:table" Delimiter="," Type="Local">
0,1,"H1,L1",1,1,0,0,
1,1,"H1,L1",1,1,0,0
</Stream>
</Table>
<Table Name="sngl_inspiral:table">
<Column Name="process:process_id" Type="int_8s"/>
<Column Name="ifo" Type="lstring"/>
<Column Name="end_time" Type="int_4s"/>
<Column Name="end_time_ns" Type="int_4s"/>
<Column Name="eff_distance" Type="real_4"/>
<Column Name="coa_phase" Type="real_4"/>
<Column Name="mass1" Type="real_4"/>
<Column Name="mass2" Type="real_4"/>
<Column Name="snr" Type="real_4"/>
<Column Name="chisq" Type="real_4"/>
<Column Name="chisq_dof" Type="int_4s"/>
<Column Name="bank_chisq" Type="real_4"/>
<Column Name="bank_chisq_dof" Type="int_4s"/>
<Column Name="sigmasq" Type="real_8"/>
<Column Name="spin1x" Type="real_4"/>
<Column Name="spin1y" Type="real_4"/>
<Column Name="spin1z" Type="real_4"/>
<Column Name="spin2x" Type="real_4"/>
<Column Name="spin2y" Type="real_4"/>
<Column Name="spin2z" Type="real_4"/>
<Column Name="template_duration" Type="real_8"/>
<Column Name="event_id" Type="int_8s"/>
<Column Name="Gamma0" Type="real_4"/>
<Column Name="Gamma1" Type="real_4"/>
<Column Name="Gamma2" Type="real_4"/>
<Stream Name="sngl_inspiral:table" Delimiter="," Type="Local">
1,"L1",100,0,nan,0,10,10,3,2,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,
1,"H1",100,0,nan,0,10,10,3,2,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1,
1,"V1",100,0,nan,0,10,10,3,2,1,1,1,1,0,0,0,0,0,0,1,1,1,1,1
</Stream>
</Table>
</LIGO_LW>
""" # noqa: E501
@pytest.fixture
def gstlal_table(tmp_path):
tmp = tmp_path / "H1L1V1-LLOID-1-1.xml.gz"
tmp.write_text(GSTLAL_FILE)
return tmp
# -- test data ----------------------------------------------------------------
SNGL_LEN = 3
COINC_LEN = 2
@pytest.mark.requires("ligo.lw.lsctables")
def test_sngl_function(gstlal_table):
table = gstlalio.read_gstlal_sngl(gstlal_table)
assert len(table) == SNGL_LEN
@pytest.mark.requires("ligo.lw.lsctables")
def test_read_sngl_eventtable(gstlal_table):
table = EventTable.read(gstlal_table, format='ligolw.gstlal',
triggers='sngl')
assert len(table) == SNGL_LEN
@pytest.mark.requires("ligo.lw.lsctables")
def test_read_sngl_format(gstlal_table):
table = EventTable.read(gstlal_table, format='ligolw.gstlal.sngl')
assert len(table) == SNGL_LEN
@pytest.mark.requires("ligo.lw.lsctables")
def test_read_sngl_columns(gstlal_table):
table = EventTable.read(gstlal_table, format='ligolw.gstlal.sngl',
columns=['snr', 'end_time'])
assert list(table.keys()) == ['snr', 'end_time']
@pytest.mark.requires("ligo.lw.lsctables")
def test_coinc_function(gstlal_table):
table = gstlalio.read_gstlal_coinc(gstlal_table)
assert len(table) == COINC_LEN
@pytest.mark.requires("ligo.lw.lsctables")
def METHOD_NAME(gstlal_table):
table = EventTable.read(gstlal_table, format='ligolw.gstlal',
triggers='coinc')
assert len(table) == COINC_LEN
@pytest.mark.requires("ligo.lw.lsctables")
def test_read_coinc_format(gstlal_table):
table = EventTable.read(gstlal_table, format='ligolw.gstlal.coinc')
assert len(table) == COINC_LEN
@pytest.mark.requires("ligo.lw.lsctables")
def test_read_coinc_columns(gstlal_table):
table = EventTable.read(gstlal_table, format='ligolw.gstlal.coinc',
columns=['snr', 'end_time'])
assert list(table.keys()) == ['snr', 'end_time']
@pytest.mark.requires("ligo.lw.lsctables")
def test_derived_values(gstlal_table):
table = EventTable.read(gstlal_table, format='ligolw.gstlal',
triggers='sngl',
columns=['snr_chi', 'chi_snr', 'mchirp'])
nptest.assert_almost_equal(
table['snr_chi'][0], 4.5)
nptest.assert_almost_equal(
table['chi_snr'][0], 1./4.5)
nptest.assert_almost_equal(
table['mchirp'][0], float32(8.705506))
@pytest.mark.requires("ligo.lw.lsctables")
def test_incorrect_sngl_column(gstlal_table):
with pytest.raises(
ValueError,
match="is not a valid column name",
):
EventTable.read(gstlal_table, format='ligolw.gstlal.sngl',
columns=['nan'])
@pytest.mark.requires("ligo.lw.lsctables")
def test_incorrect_coinc_column(gstlal_table):
with pytest.raises(
ValueError,
match="is not a valid column name",
):
EventTable.read(gstlal_table, format='ligolw.gstlal.coinc',
columns=['nan'])
@pytest.mark.requires("ligo.lw.lsctables")
def test_incorrect_trigger_name(gstlal_table):
with pytest.raises(
ValueError,
match="^The 'triggers' argument",
):
EventTable.read(gstlal_table, format='ligolw.gstlal',
triggers='nan') |
test pyunit skip | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import sys
import traceback
import unittest as pyunit
from unittest import skipIf
from zope.interface import implementer
from twisted.python.failure import Failure
from twisted.trial.itrial import IReporter, ITestCase
from twisted.trial.test import pyunitcases
from twisted.trial.unittest import PyUnitResultAdapter, SynchronousTestCase
class PyUnitTestTests(SynchronousTestCase):
def setUp(self):
self.original = pyunitcases.PyUnitTest("test_pass")
self.test = ITestCase(self.original)
def test_callable(self):
"""
Tests must be callable in order to be used with Python's unittest.py.
"""
self.assertTrue(callable(self.test), f"{self.test!r} is not callable.")
class PyUnitResultTests(SynchronousTestCase):
"""
Tests to show that PyUnitResultAdapter wraps TestResult objects from the
standard library 'unittest' module in such a way as to make them usable and
useful from Trial.
"""
# Once erroneous is ported to Python 3 this can be replaced with
# erroneous.ErrorTest:
class ErrorTest(SynchronousTestCase):
"""
A test case which has a L{test_foo} which will raise an error.
@ivar ran: boolean indicating whether L{test_foo} has been run.
"""
ran = False
def test_foo(self):
"""
Set C{self.ran} to True and raise a C{ZeroDivisionError}
"""
self.ran = True
1 / 0
def test_dontUseAdapterWhenReporterProvidesIReporter(self):
"""
The L{PyUnitResultAdapter} is only used when the result passed to
C{run} does *not* provide L{IReporter}.
"""
@implementer(IReporter)
class StubReporter:
"""
A reporter which records data about calls made to it.
@ivar errors: Errors passed to L{addError}.
@ivar failures: Failures passed to L{addFailure}.
"""
def __init__(self):
self.errors = []
self.failures = []
def startTest(self, test):
"""
Do nothing.
"""
def stopTest(self, test):
"""
Do nothing.
"""
def addError(self, test, error):
"""
Record the error.
"""
self.errors.append(error)
test = self.ErrorTest("test_foo")
result = StubReporter()
test.run(result)
self.assertIsInstance(result.errors[0], Failure)
def test_success(self):
class SuccessTest(SynchronousTestCase):
ran = False
def test_foo(s):
s.ran = True
test = SuccessTest("test_foo")
result = pyunit.TestResult()
test.run(result)
self.assertTrue(test.ran)
self.assertEqual(1, result.testsRun)
self.assertTrue(result.wasSuccessful())
def test_failure(self):
class FailureTest(SynchronousTestCase):
ran = False
def test_foo(s):
s.ran = True
s.fail("boom!")
test = FailureTest("test_foo")
result = pyunit.TestResult()
test.run(result)
self.assertTrue(test.ran)
self.assertEqual(1, result.testsRun)
self.assertEqual(1, len(result.failures))
self.assertFalse(result.wasSuccessful())
def test_error(self):
test = self.ErrorTest("test_foo")
result = pyunit.TestResult()
test.run(result)
self.assertTrue(test.ran)
self.assertEqual(1, result.testsRun)
self.assertEqual(1, len(result.errors))
self.assertFalse(result.wasSuccessful())
def test_setUpError(self):
class ErrorTest(SynchronousTestCase):
ran = False
def setUp(self):
1 / 0
def test_foo(s):
s.ran = True
test = ErrorTest("test_foo")
result = pyunit.TestResult()
test.run(result)
self.assertFalse(test.ran)
self.assertEqual(1, result.testsRun)
self.assertEqual(1, len(result.errors))
self.assertFalse(result.wasSuccessful())
def test_tracebackFromFailure(self):
"""
Errors added through the L{PyUnitResultAdapter} have the same traceback
information as if there were no adapter at all.
"""
try:
1 / 0
except ZeroDivisionError:
exc_info = sys.exc_info()
f = Failure()
pyresult = pyunit.TestResult()
result = PyUnitResultAdapter(pyresult)
result.addError(self, f)
self.assertEqual(
pyresult.errors[0][1], "".join(traceback.format_exception(*exc_info))
)
def test_traceback(self):
"""
As test_tracebackFromFailure, but covering more code.
"""
class ErrorTest(SynchronousTestCase):
exc_info = None
def test_foo(self):
try:
1 / 0
except ZeroDivisionError:
self.exc_info = sys.exc_info()
raise
test = ErrorTest("test_foo")
result = pyunit.TestResult()
test.run(result)
# We can't test that the tracebacks are equal, because Trial's
# machinery inserts a few extra frames on the top and we don't really
# want to trim them off without an extremely good reason.
#
# So, we just test that the result's stack ends with the
# exception's stack.
expected_stack = "".join(traceback.format_tb(test.exc_info[2]))
observed_stack = "\n".join(result.errors[0][1].splitlines()[:-1])
self.assertEqual(
expected_stack.strip(), observed_stack[-len(expected_stack) :].strip()
)
def test_tracebackFromCleanFailure(self):
"""
Errors added through the L{PyUnitResultAdapter} have the same
traceback information as if there were no adapter at all, even
if the Failure that held the information has been cleaned.
"""
try:
1 / 0
except ZeroDivisionError:
exc_info = sys.exc_info()
f = Failure()
f.cleanFailure()
pyresult = pyunit.TestResult()
result = PyUnitResultAdapter(pyresult)
result.addError(self, f)
tback = "".join(traceback.format_exception(*exc_info))
self.assertEqual(
pyresult.errors[0][1].endswith("ZeroDivisionError: division by zero\n"),
tback.endswith("ZeroDivisionError: division by zero\n"),
)
def test_trialSkip(self):
"""
Skips using trial's skipping functionality are reported as skips in
the L{pyunit.TestResult}.
"""
class SkipTest(SynchronousTestCase):
@skipIf(True, "Let's skip!")
def test_skip(self):
1 / 0
test = SkipTest("test_skip")
result = pyunit.TestResult()
test.run(result)
self.assertEqual(result.skipped, [(test, "Let's skip!")])
def METHOD_NAME(self):
"""
Skips using pyunit's skipping functionality are reported as skips in
the L{pyunit.TestResult}.
"""
class SkipTest(SynchronousTestCase):
@pyunit.skip("skippy")
def test_skip(self):
1 / 0
test = SkipTest("test_skip")
result = pyunit.TestResult()
test.run(result)
self.assertEqual(result.skipped, [(test, "skippy")]) |
ttest finish | # Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import numpy as np
from scipy import __version__ as sp_version
from scipy.stats import (
ttest_ind as sp_ttest_ind,
ttest_ind_from_stats as sp_ttest_ind_from_stats,
ttest_rel as sp_ttest_rel,
ttest_1samp as sp_ttest_1samp,
)
from scipy.stats import distributions as sp_distributions
from ...core import ExecutableTuple
from ...lib.version import parse as parse_version
from ..arithmetic import (
divide as mt_divide,
sqrt as mt_sqrt,
absolute as mt_abs,
isnan as mt_isnan,
)
from ..base import where as mt_where
from ..reduction import (
var as mt_var,
mean as mt_mean,
)
from ..utils import implement_scipy
def _equal_var_ttest_denom(v1, n1, v2, n2):
df = n1 + n2 - 2.0
svar = ((n1 - 1) * v1 + (n2 - 1) * v2) / df
denom = mt_sqrt(svar * (1.0 / n1 + 1.0 / n2)) # XXX: np -> da
return df, denom
def _unequal_var_ttest_denom(v1, n1, v2, n2):
vn1 = v1 / n1
vn2 = v2 / n2
with np.errstate(divide="ignore", invalid="ignore"):
df = (vn1 + vn2) ** 2 / (vn1**2 / (n1 - 1) + vn2**2 / (n2 - 1))
# If df is undefined, variances are zero (assumes n1 > 0 & n2 > 0).
# Hence it doesn't matter what df is as long as it's not NaN.
df = mt_where(mt_isnan(df), 1, df)
denom = mt_sqrt(vn1 + vn2)
return df, denom
def _ttest_ind_from_stats(mean1, mean2, denom, df, alternative):
d = mean1 - mean2
with np.errstate(divide="ignore", invalid="ignore"):
t = mt_divide(d, denom)
t, prob = METHOD_NAME(df, t, alternative)
return t, prob
def METHOD_NAME(df, t, alternative):
"""Common code between all 3 t-test functions."""
if alternative != "two-sided" and parse_version(sp_version) < parse_version(
"1.6.0"
): # pragma: no cover
raise ValueError("alternative must be 'two-sided' with scipy prior to 1.6.0")
if alternative == "less":
prob = t.map_chunk(sp_distributions.t.cdf, args=(df,))
elif alternative == "greater":
prob = t.map_chunk(sp_distributions.t.sf, args=(df,))
elif alternative == "two-sided":
prob = mt_abs(t).map_chunk(sp_distributions.t.sf, args=(df,)) * 2
else:
raise ValueError("alternative must be 'less', 'greater' or 'two-sided'")
if t.ndim == 0:
t = t[()]
return t, prob
Ttest_1sampResult = namedtuple("Ttest_1sampResult", ("statistic", "pvalue"))
@implement_scipy(sp_ttest_1samp)
def ttest_1samp(a, popmean, axis=0, nan_policy="propagate", alternative="two-sided"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
n = a.shape[axis]
df = n - 1
d = a.mean(axis=axis) - popmean
v = a.var(axis=axis, ddof=1)
denom = mt_sqrt(v / float(n))
with np.errstate(divide="ignore", invalid="ignore"):
t = mt_divide(d, denom)
t, prob = METHOD_NAME(df, t, alternative)
return ExecutableTuple(Ttest_1sampResult(t, prob))
Ttest_indResult = namedtuple("Ttest_indResult", ("statistic", "pvalue"))
@implement_scipy(sp_ttest_ind)
def ttest_ind(a, b, axis=0, equal_var=True, alternative="two-sided"):
v1 = mt_var(a, axis, ddof=1)
v2 = mt_var(b, axis, ddof=1)
n1 = a.shape[axis]
n2 = b.shape[axis]
if equal_var:
df, denom = _equal_var_ttest_denom(v1, n1, v2, n2)
else:
df, denom = _unequal_var_ttest_denom(v1, n1, v2, n2)
res = _ttest_ind_from_stats(
mt_mean(a, axis), mt_mean(b, axis), denom, df, alternative
)
return ExecutableTuple(Ttest_indResult(*res))
@implement_scipy(sp_ttest_ind_from_stats)
def ttest_ind_from_stats(
mean1, std1, nobs1, mean2, std2, nobs2, equal_var=True, alternative="two-sided"
):
if equal_var:
df, denom = _equal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
else:
df, denom = _unequal_var_ttest_denom(std1**2, nobs1, std2**2, nobs2)
res = _ttest_ind_from_stats(mean1, mean2, denom, df, alternative)
return ExecutableTuple(Ttest_indResult(*res))
Ttest_relResult = namedtuple("Ttest_relResult", ("statistic", "pvalue"))
@implement_scipy(sp_ttest_rel)
def ttest_rel(a, b, axis=0, nan_policy="propagate", alternative="two-sided"):
if nan_policy != "propagate":
raise NotImplementedError(
"`nan_policy` other than 'propagate' have not been implemented."
)
n = a.shape[axis]
df = float(n - 1)
d = (a - b).astype(np.float64)
v = mt_var(d, axis, ddof=1)
dm = mt_mean(d, axis)
denom = mt_sqrt(v / float(n))
with np.errstate(divide="ignore", invalid="ignore"):
t = mt_divide(dm, denom)
t, prob = METHOD_NAME(df, t, alternative)
return ExecutableTuple(Ttest_relResult(t, prob)) |
location | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetAccountResult',
'AwaitableGetAccountResult',
'get_account',
'get_account_output',
]
@pulumi.output_type
class GetAccountResult:
"""
The EngagementFabric account
"""
def __init__(__self__, id=None, METHOD_NAME=None, name=None, sku=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The location of the resource
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def sku(self) -> 'outputs.SKUResponse':
"""
The SKU of the resource
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The fully qualified type of the resource
"""
return pulumi.get(self, "type")
class AwaitableGetAccountResult(GetAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountResult(
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_account(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult:
"""
The EngagementFabric account
Azure REST API version: 2018-09-01-preview.
:param str account_name: Account Name
:param str resource_group_name: Resource Group Name
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:engagementfabric:getAccount', __args__, opts=opts, typ=GetAccountResult).value
return AwaitableGetAccountResult(
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
sku=pulumi.get(__ret__, 'sku'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_account)
def get_account_output(account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAccountResult]:
"""
The EngagementFabric account
Azure REST API version: 2018-09-01-preview.
:param str account_name: Account Name
:param str resource_group_name: Resource Group Name
"""
... |
get logging health | # -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge-iot.readthedocs.io/
# FLEDGE_END
import asyncio
import json
from aiohttp import web
from fledge.common.common import _FLEDGE_DATA, _FLEDGE_ROOT
from fledge.common.logger import FLCoreLogger
__author__ = "Deepanshu Yadav"
__copyright__ = "Copyright (c) 2022, Dianomic Systems Inc."
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_help = """
----------------------------------------------------------
| GET | /fledge/health/storage |
| GET | /fledge/health/logging |
----------------------------------------------------------
"""
_LOGGER = FLCoreLogger().get_logger(__name__)
async def get_disk_usage(given_dir):
"""
Helper function that calculates used, available, usage(in %) for a given directory in file system.
Returns a tuple of used(in KB's integer), available(in KB's integer), usage(in %)
"""
disk_check_process = await asyncio.create_subprocess_shell('df -k ' + given_dir,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout, stderr = await disk_check_process.communicate()
if disk_check_process.returncode != 0:
stderr = stderr.decode("utf-8")
msg = "Failed to get disk stats of {} directory. {}".format(given_dir, str(stderr))
_LOGGER.error(msg)
raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg}))
# Following output is parsed.
"""
Filesystem 1K-blocks Used Available Use% Mounted on
/dev/sda5 122473072 95449760 20755872 83% /
"""
disk_stats = stdout.decode("utf-8")
required_stats = disk_stats.split('\n')[1].split()
used = int(required_stats[2])
available = int(required_stats[3])
usage = int(required_stats[4].replace("%", ''))
return used, available, usage
async def METHOD_NAME(request: web.Request) -> web.Response:
"""
Return the health of logging.
Args:
request: None
Returns:
Return the health of logging.
Sample Response :
{
"disk": {
"usage": 63,
"used": 42936800,
"available": 25229400
},
"levels": [
{
"name" : "Sine",
"level" : "info"
},
{
"name" : "OMF",
"level" : "debug"
}
]
}
:Example:
curl -X GET http://localhost:8081/fledge/health/logging
"""
response = {}
try:
from fledge.common.storage_client.payload_builder import PayloadBuilder
from fledge.services.core import connect
payload = PayloadBuilder().SELECT("key", "value").payload()
_storage_client = connect.get_storage_async()
excluded_log_levels = ["error", "warning"]
results = await _storage_client.query_tbl_with_payload('configuration', payload)
log_levels = []
for row in results["rows"]:
for item_name, item_info in row["value"].items():
if item_name == "logLevel" and item_info['value'] not in excluded_log_levels:
service_name = row["key"].replace("Advanced", "").strip()
log_level = item_info['value']
log_levels.append({"name": service_name, "level": log_level})
response["levels"] = log_levels
except Exception as ex:
msg = "Could not fetch service information."
_LOGGER.error(ex, msg)
raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "{} {}".format(msg, str(ex))}))
try:
response['disk'] = {}
used, available, usage = await get_disk_usage('/var/log')
# fill all the fields after values are retrieved
response['disk']['used'] = used
response['disk']['usage'] = usage
response['disk']['available'] = available
except Exception as ex:
msg = "Failed to get disk stats for /var/log."
_LOGGER.error(ex, msg)
raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "{} {}".format(msg, str(ex))}))
else:
return web.json_response(response)
async def get_storage_health(request: web.Request) -> web.Response:
"""
Return the health of Storage service & data directory.
Args:
request: None
Returns:
Return the health of Storage service & data directory.
Sample Response :
{
"uptime": 33,
"name": "Fledge Storage",
"statistics": {
"commonInsert": 30,
"commonSimpleQuery": 3,
"commonQuery": 91,
"commonUpdate": 2,
"commonDelete": 1,
"readingAppend": 0,
"readingFetch": 0,
"readingQuery": 1,
"readingPurge": 0
},
"disk": {
"used": 95287524,
"usage": 82,
"available": 20918108,
"status": "green"
}
}
:Example:
curl -X GET http://localhost:8081/fledge/health/storage
"""
# Find the address and management host for the Storage service.
from fledge.services.core.service_registry.service_registry import ServiceRegistry
from fledge.services.core.service_registry.exceptions import DoesNotExist
try:
services = ServiceRegistry.get(name="Fledge Storage")
service = services[0]
except DoesNotExist:
msg = "Cannot ping the storage service. It does not exist in service registry."
_LOGGER.error(msg)
raise web.HTTPNotFound(reason=msg, body=json.dumps({"message": msg}))
try:
from fledge.common.service_record import ServiceRecord
if service._status != ServiceRecord.Status.Running:
msg = "The Storage service is not in Running state."
raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg}))
from fledge.common.microservice_management_client.microservice_management_client import \
MicroserviceManagementClient
mgt_client = MicroserviceManagementClient(service._address,
service._management_port)
response = await mgt_client.ping_service()
except Exception as ex:
msg = str(ex)
_LOGGER.error(ex, "Could not ping the Storage service.")
raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": msg}))
try:
response['disk'] = {}
data_dir_path = _FLEDGE_DATA if _FLEDGE_DATA else _FLEDGE_ROOT + '/data'
used, available, usage = await get_disk_usage(data_dir_path)
status = 'green'
if usage > 95:
status = 'red'
elif 90 < usage <= 95:
status = 'yellow'
# fill all the fields after values are retrieved
response['disk']['used'] = used
response['disk']['usage'] = usage
response['disk']['available'] = available
response['disk']['status'] = status
except Exception as ex:
msg = "Failed to get disk stats for Storage service."
_LOGGER.error(ex, msg)
raise web.HTTPInternalServerError(reason=msg, body=json.dumps({"message": "{} {}".format(msg, str(ex))}))
else:
return web.json_response(response) |
kind | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetTagInheritanceSettingResult',
'AwaitableGetTagInheritanceSettingResult',
'get_tag_inheritance_setting',
'get_tag_inheritance_setting_output',
]
@pulumi.output_type
class GetTagInheritanceSettingResult:
"""
Tag Inheritance Setting definition.
"""
def __init__(__self__, id=None, METHOD_NAME=None, name=None, properties=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Specifies the kind of settings.
Expected value is 'taginheritance'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.TagInheritancePropertiesResponse':
"""
The properties of the tag inheritance setting.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetTagInheritanceSettingResult(GetTagInheritanceSettingResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTagInheritanceSettingResult(
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
properties=self.properties,
type=self.type)
def get_tag_inheritance_setting(scope: Optional[str] = None,
type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTagInheritanceSettingResult:
"""
Get the setting from the given scope by name.
:param str scope: The scope associated with this setting. This includes 'subscriptions/{subscriptionId}' for subscription scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for billing profile scope.
:param str type: Setting type.
"""
__args__ = dict()
__args__['scope'] = scope
__args__['type'] = type
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:costmanagement/v20230801:getTagInheritanceSetting', __args__, opts=opts, typ=GetTagInheritanceSettingResult).value
return AwaitableGetTagInheritanceSettingResult(
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_tag_inheritance_setting)
def get_tag_inheritance_setting_output(scope: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTagInheritanceSettingResult]:
"""
Get the setting from the given scope by name.
:param str scope: The scope associated with this setting. This includes 'subscriptions/{subscriptionId}' for subscription scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}' for Billing Account scope, 'providers/Microsoft.Billing/billingAccounts/{billingAccountId}/billingProfiles/{billingProfileId}' for billing profile scope.
:param str type: Setting type.
"""
... |
is agent enabled | """
For Dynatrace, we have two different ingestion methods:
1. via Dynatrace OneAgent. It's being downloaded and injected to the java
runtime.
2. via telegraf. Telegraf ingests custom runtime metrics using Dynatrace
output plugin
"""
import logging
import os
import json
from functools import lru_cache
from urllib.parse import urljoin
from buildpack import util
INGEST_ENDPOINT = "api/v2/metrics/ingest"
NAMESPACE = "dynatrace"
BUILD_PATH = os.path.join(".local", NAMESPACE)
# Environment variables for Dynatrace OneAgent
# Only passed to the agent if set as environment variable
default_env = {
# -- Environment variables for the integration
# "DT_PAAS_TOKEN": required, also used for telegraf integration
# "DT_SAAS_URL": required, also used for telegraf integration
"DT_TENANT": None, # required for agent integration, dynatrace envID
# optional, default value is get from manifest.json which is downloaded
# along with the agent installer
"DT_TENANTTOKEN": None,
# -- Environment variables for orchestration
"DT_CLUSTER_ID": None, # optional, default not set
# optional metadata e.g. Department=Acceptance Stage=Sprint
"DT_CUSTOM_PROP": None,
# -- Environment variables for troubleshooting
"DT_LOGSTREAM": "stdout", # optional
# Use this environment variable to define the console log level.
# Valid options are: NONE, SEVERE, and INFO.
"DT_LOGLEVELCON": None,
# Set to true or false to enable or disable OneAgent.
"DT_AGENTACTIVE": None,
}
def stage(buildpack_dir, root_dir, cache_path):
"""
Downloads and unzips necessary OneAgent components
"""
if METHOD_NAME():
try:
util.resolve_dependency(
dependency="dynatrace.agent",
destination=os.path.join(root_dir, NAMESPACE),
buildpack_dir=buildpack_dir,
cache_dir=cache_path, # CACHE_DIR,
unpack=True,
overrides={
# need to us rstrip because otherwise the download link
# formed with double slashes and it doesn't work
"url": os.environ.get("DT_SAAS_URL").rstrip("/"),
"environment": os.environ.get("DT_TENANT"),
"token": os.environ.get("DT_PAAS_TOKEN"),
},
# cache is not working properly, so ignoring for now.
# Can be debugged later, a stack trace exists in the PR:
# https://github.com/mendix/cf-mendix-buildpack/pull/562
ignore_cache=True,
)
except Exception:
logging.warning("Dynatrace agent download and unpack failed", exc_info=True)
def update_config(m2ee):
"""
Injects Dynatrace configuration to java runtime
"""
if not METHOD_NAME():
logging.debug(
"Skipping Dynatrace OneAgent setup, required env vars are not set"
)
return
logging.info("Enabling Dynatrace OneAgent")
try:
manifest = get_manifest()
except Exception:
logging.warning("Failed to parse Dynatrace manifest file", exc_info=True)
return
agent_path = get_agent_path()
logging.debug("Agent path: [%s]", agent_path)
if not os.path.exists(agent_path):
raise Exception(f"Dynatrace Agent not found: {agent_path}")
# dynamic default
default_env.update({"DT_TENANTTOKEN": manifest.get("tenantToken")})
for key, dv in default_env.items():
value = os.environ.get(key, dv)
if value is not None:
util.upsert_custom_environment_variable(m2ee, key, value)
util.upsert_custom_environment_variable(
m2ee, "DT_CONNECTION_POINT", get_connection_endpoint()
)
util.upsert_javaopts(
m2ee,
[
f"-agentpath:{os.path.abspath(agent_path)}",
"-Xshare:off",
],
)
@lru_cache(maxsize=None)
def get_manifest():
manifest_path = os.path.join(BUILD_PATH, "manifest.json")
with open(manifest_path, "r") as file_handler:
return json.load(file_handler)
def get_connection_endpoint():
manifest = get_manifest()
endpoints = manifest.get("communicationEndpoints", [])
# prepend the DT_SAAS_URL because the communication endpoints might not be correct
endpoints.insert(0, _join_url(os.environ.get("DT_SAAS_URL"), "communication"))
return ";".join(endpoints)
def get_agent_path():
manifest = get_manifest()
technologies = manifest.get("technologies")
java_binaries = technologies.get("java").get("linux-x86-64")
for file in java_binaries:
binary_type = file.get("binarytype")
if binary_type == "loader":
return os.path.join(BUILD_PATH, file.get("path"))
def is_telegraf_enabled():
return "DT_PAAS_TOKEN" in os.environ.keys() and "DT_SAAS_URL" in os.environ.keys()
def METHOD_NAME():
return is_telegraf_enabled() and ("DT_TENANT" in os.environ.keys())
def get_ingestion_info():
if not is_telegraf_enabled():
return None, None
logging.info("Metrics ingestion to Dynatrace via telegraf is configured")
token = os.getenv("DT_PAAS_TOKEN")
base_url = os.getenv("DT_SAAS_URL")
tenant_id = os.getenv("DT_TENANT")
if os.getenv("DT_IS_MANAGED", "false").lower() == "true":
base_url = _join_url(base_url, f"e/{tenant_id}")
ingest_url = _join_url(base_url, INGEST_ENDPOINT)
return token, ingest_url
def _join_url(saas_url, endpoint):
"""
Basic url join but purposefully isolated to add some unittests easily.
When merging an url and an additional endpoint, python's urljoin method
has so many little details. See:
https://stackoverflow.com/questions/10893374/python-confusions-with-urljoin
So, basically we need to make sure that the url ends with '/' and
the endpoint does not start with '/'
"""
saas_url = f"{saas_url}/"
endpoint = endpoint.lstrip("/")
return urljoin(saas_url, endpoint) |
batch predict | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import os
from functools import partial
import numpy as np
import paddle
from data import convert_example, create_dataloader, load_vocab
from model.dep import BiAffineParser
from utils import decode, flat_words
from paddlenlp.datasets import load_dataset
from paddlenlp.transformers import AutoModel
# fmt: off
parser = argparse.ArgumentParser()
# Predict
parser.add_argument("--params_path", type=str, default='model_file/best.pdparams', required=True, help="Directory to load model parameters.")
parser.add_argument("--task_name", choices=["nlpcc13_evsam05_thu", "nlpcc13_evsam05_hit"], type=str, default="nlpcc13_evsam05_thu", help="Select the task.")
parser.add_argument("--device", choices=["cpu", "gpu"], default="gpu", help="Select which device to train model, defaults to gpu.")
parser.add_argument("--encoding_model", choices=["lstm", "lstm-pe", "ernie-3.0-medium-zh", "ernie-1.0", "ernie-tiny", "ernie-gram-zh"], type=str, default="ernie-3.0-medium-zh", help="Select the encoding model.")
parser.add_argument("--batch_size", type=int, default=1000, help="Numbers of examples a batch for training.")
parser.add_argument("--infer_output_file", type=str, default='infer_output.conll', help="The path to save infer results.")
# Preprocess
parser.add_argument("--n_buckets", type=int, default=15, help="Number of buckets to devide the dataset.")
# Postprocess
parser.add_argument("--tree", type=bool, default=True, help="Ensure the output conforms to the tree structure.")
# Lstm
parser.add_argument("--feat", choices=["char", "pos"], type=str, default=None, help="The feature representation to use.")
args = parser.parse_args()
# fmt: on
@paddle.no_grad()
def METHOD_NAME(
model,
data_loader,
rel_vocab,
word_pad_index,
word_bos_index,
word_eos_index,
):
model.eval()
arcs, rels = [], []
for inputs in data_loader():
if args.encoding_model.startswith("ernie") or args.encoding_model == "lstm-pe":
words = inputs[0]
words, feats = flat_words(words)
s_arc, s_rel, words = model(words, feats)
else:
words, feats = inputs
s_arc, s_rel, words = model(words, feats)
mask = paddle.logical_and(
paddle.logical_and(words != word_pad_index, words != word_bos_index),
words != word_eos_index,
)
lens = paddle.sum(paddle.cast(mask, "int32"), axis=-1)
arc_preds, rel_preds = decode(s_arc, s_rel, mask)
arcs.extend(paddle.split(paddle.masked_select(arc_preds, mask), lens.numpy().tolist()))
rels.extend(paddle.split(paddle.masked_select(rel_preds, mask), lens.numpy().tolist()))
arcs = [[str(s) for s in seq.numpy().tolist()] for seq in arcs]
rels = [rel_vocab.to_tokens(seq.numpy().tolist()) for seq in rels]
return arcs, rels
def do_predict(args):
paddle.set_device(args.device)
# if args.encoding_model.startswith("ernie"):
# tokenizer = AutoTokenizer.from_pretrained(args.encoding_model)
# elif args.encoding_model == "lstm-pe":
# tokenizer = AutoTokenizer.from_pretrained("ernie-3.0-medium-zh")
# else:
# tokenizer = None
# Load vocabs from model file path
vocab_dir = os.path.split(args.params_path)[0]
word_vocab, feat_vocab, rel_vocab = load_vocab(vocab_dir)
n_rels, n_words = len(rel_vocab), len(word_vocab)
if args.encoding_model == "lstm":
n_feats = len(feat_vocab)
word_pad_index = word_vocab.to_indices("[PAD]")
word_bos_index = word_vocab.to_indices("[BOS]")
word_eos_index = word_vocab.to_indices("[EOS]")
else:
n_feats = None
word_pad_index = word_vocab.to_indices("[PAD]")
word_bos_index = word_vocab.to_indices("[CLS]")
word_eos_index = word_vocab.to_indices("[SEP]")
test_ds = load_dataset(args.task_name, splits=["test"])
test_ds_copy = copy.deepcopy(test_ds)
trans_fn = partial(
convert_example,
vocabs=[word_vocab, feat_vocab, rel_vocab],
encoding_model=args.encoding_model,
feat=args.feat,
mode="test",
)
test_data_loader, buckets = create_dataloader(
test_ds,
batch_size=args.batch_size,
mode="test",
n_buckets=args.n_buckets,
trans_fn=trans_fn,
)
# Load pretrained model if encoding model is ernie-3.0-medium-zh, ernie-1.0, ernie-tiny or ernie-gram-zh
if args.encoding_model in ["ernie-3.0-medium-zh", "ernie-1.0", "ernie-tiny"]:
pretrained_model = AutoModel.from_pretrained(args.encoding_model)
elif args.encoding_model == "ernie-gram-zh":
pretrained_model = AutoModel.from_pretrained(args.encoding_model)
else:
pretrained_model = None
# Load model
model = BiAffineParser(
encoding_model=args.encoding_model,
feat=args.feat,
n_rels=n_rels,
n_feats=n_feats,
n_words=n_words,
pad_index=word_pad_index,
eos_index=word_eos_index,
pretrained_model=pretrained_model,
)
# Load saved model parameters
if os.path.isfile(args.params_path):
state_dict = paddle.load(args.params_path)
model.set_dict(state_dict)
print("Loaded parameters from %s" % args.params_path)
else:
raise ValueError("The parameters path is incorrect or not specified.")
# Start predict
pred_arcs, pred_rels = METHOD_NAME(
model,
test_data_loader,
rel_vocab,
word_pad_index,
word_bos_index,
word_eos_index,
)
# Restore the order of sentences in the buckets
if buckets:
indices = np.argsort(np.array([i for bucket in buckets.values() for i in bucket]))
else:
indices = range(len(pred_arcs))
pred_heads = [pred_arcs[i] for i in indices]
pred_deprels = [pred_rels[i] for i in indices]
with open(args.infer_output_file, "w", encoding="utf-8") as out_file:
for res, head, rel in zip(test_ds_copy, pred_heads, pred_deprels):
res["HEAD"] = tuple(head)
res["DEPREL"] = tuple(rel)
res = "\n".join("\t".join(map(str, line)) for line in zip(*res.values())) + "\n"
out_file.write("{}\n".format(res))
out_file.close()
print("Results saved!")
if __name__ == "__main__":
do_predict(args) |