id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
5,800 | # -*- coding: utf-8 -*-
import pandas as pd
from PySide6 import QtCore, QtWidgets
from ..dialogs.messagebox import MessageBox
from ..ui import resource_rc
from ..ui.tabular_filter import Ui_TabularFilter
class TabularFilter(Ui_TabularFilter, QtWidgets.QWidget):
def __init__(self, signals, int_format, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
self._target = None
self.names = [item[0] for item in signals]
self.dtype_kind = [item[1] for item in signals]
self.is_bytearray = [item[2] for item in signals]
self.int_format = int_format
self.relation.addItems(["AND", "OR"])
self.column.addItems(self.names)
self.op.addItems([">", ">=", "<", "<=", "==", "!="])
self.target.editingFinished.connect(self.METHOD_NAME)
self.column.currentIndexChanged.connect(self.column_changed)
def column_changed(self, index):
self.target.setText("")
self._target = None
def METHOD_NAME(self):
idx = self.column.currentIndex()
column_name = self.column.currentText()
kind = self.dtype_kind[idx]
target = self.target.text().strip()
if target:
if kind in "ui":
if target.startswith("0x"):
try:
self._target = int(target, 16)
except:
MessageBox.warning(
None,
"Wrong target value",
f"{column_name} requires an integer target value",
)
elif target.startswith("0b"):
try:
self._target = int(target, 2)
except:
MessageBox.warning(
None,
"Wrong target value",
f"{column_name} requires an integer target value",
)
else:
if self.int_format == "hex":
try:
self._target = int(target, 16)
self.target.setText(f"0x{self._target:X}")
except:
MessageBox.warning(
None,
"Wrong target value",
f"{column_name} requires a hex-format integer target value",
)
elif self.int_format == "bin":
try:
self._target = int(target, 2)
self.target.setText(f"0b{self._target:b}")
except:
MessageBox.warning(
None,
"Wrong target value",
f"{column_name} requires a bin-format integer target value",
)
else:
try:
self._target = int(target)
except:
try:
self._target = int(target, 16)
self.target.setText(f"0x{self._target:X}")
except:
MessageBox.warning(
None,
"Wrong target value",
f"{column_name} requires an integer target value",
)
elif kind == "f":
try:
self._target = float(target)
except:
MessageBox.warning(
None,
"Wrong target value",
f"{column_name} requires a float target value",
)
elif kind == "O":
is_bytearray = self.is_bytearray[idx]
if is_bytearray:
try:
bytes.fromhex(target.replace(" ", ""))
except:
MessageBox.warning(
None,
"Wrong target value",
f"{column_name} requires a correct hexstring",
)
else:
target = target.strip().replace(" ", "")
target = [target[i : i + 2] for i in range(0, len(target), 2)]
target = " ".join(target).upper()
if self._target is None:
self._target = f'"{target}"'
self.target.setText(target)
elif self._target.strip('"') != target:
self._target = f'"{target}"'
self.target.setText(target)
else:
self._target = f'"{target}"'
elif kind == "S":
self._target = f'b"{target}"'
elif kind == "U":
self._target = f'"{target}"'
elif kind == "M":
try:
pd.Timestamp(target)
except:
MessageBox.warning(
None,
"Wrong target value",
f"Datetime {column_name} requires a correct pandas Timestamp literal",
)
else:
self._target = target
def to_config(self):
info = {
"enabled": self.enabled.checkState() == QtCore.Qt.Checked,
"relation": self.relation.currentText(),
"column": self.column.currentText(),
"op": self.op.currentText(),
"target": str(self._target),
}
return info | null |
5,801 | #!/usr/bin/env python
import argparse
import datetime
import os
import shutil
import subprocess
import sys
import re
import time
import yaml
MAINDIR = os.getcwd()
TIMING_RE = re.compile(r'Psi4 exiting successfully. Buy a developer a beer!')
TEST_LEVELS = {
'short': ['short'],
'medium': ['medium'],
'long': ['long'],
'standard': ['short', 'medium'],
'all': ['short', 'medium', 'long']
}
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
def METHOD_NAME(jobdir, psi4command, test_results, test_time):
"""Run a test in jobdir using the psi4command"""
start = time.time()
os.chdir(jobdir)
successful = True
# Run Psi4
try:
out = subprocess.check_output([psi4command, "-n2"])
except:
# something went wrong
successful = False
test_results[jobdir] = 'FAILED'
# check if Forte ended successfully
if successful:
timing = open('output.dat').read()
m = TIMING_RE.search(timing)
if m:
test_results[jobdir] = 'PASSED'
else:
test_results[jobdir] = 'FAILED'
successful = False
print(out.decode('utf-8'))
os.chdir(MAINDIR)
end = time.time()
test_time[jobdir] = end - start
return successful
def prepare_summary(jobdir, test_results, test_time, summary, color):
"""Append the result of a computation to a summary"""
if test_results[jobdir] == 'PASSED':
if color:
msg = bcolors.OKGREEN + 'PASSED' + bcolors.ENDC
else:
msg = 'PASSED'
elif test_results[jobdir] == 'FAILED':
if color:
msg = bcolors.FAIL + 'FAILED' + bcolors.ENDC
else:
msg = 'FAILED'
duration = test_time[jobdir]
if color:
filler = '.' * max(0, 76 - len(jobdir + msg))
else:
filler = '.' * max(0, 67 - len(jobdir + msg))
summary.append(' {0}{1}{2}{3:7.1f}'.format(jobdir.upper(), filler, msg,
duration))
return duration
def setup_argument_parser():
"""Setup an ArgumentParser object to deal with user input."""
parser = argparse.ArgumentParser(description='Run Forte tests.')
parser.add_argument('--psi4_exec',
help='the location of the psi4 executable')
parser.add_argument('--file',
help='the yaml file containing the list of tests (default: tests.yaml)',
default='tests.yaml')
parser.add_argument('--failed',
help='run only failed tests (listed in the file failed_tests)',
action='store_true')
parser.add_argument('--bw',
help='print the summary in black and white? (default: color)',
action='store_true')
parser.add_argument('--failed_dump',
help='dump the output of the failed tests to stdout?',
action='store_true')
parser.add_argument('--type',
help='which type of test to run? (default: standard)',
choices={'short', 'medium','long','standard', 'all'},
default='standard')
parser.add_argument('--group',
help='which group of tests to run? (default: None)',
default=None)
return parser.parse_args()
def find_psi4(args):
"""Find the psi4 executable or use value provided by the user."""
psi4command = None
# if not provided try to detect psi4
if args.psi4_exec == None:
psi4command = shutil.which('psi4')
else:
psi4command = args.psi4_exec
if psi4command == None:
print(
'Could not detect your PSI4 executable. Please specify its location.'
)
exit(1)
return psi4command
def main():
psi4command = ''
total_time = 0.0
summary = []
test_results = {}
test_time = {}
failed_tests = {}
args = setup_argument_parser()
psi4command = find_psi4(args)
print('Running forte tests using the psi4 executable found in:\n %s\n' %
psi4command)
# default is to run tests listed in tests.yaml
test_dict_file = args.file
# optionally, run only tests that previously failed
if args.failed:
print('Running only failed tests')
test_dict_file = 'failed_tests.yaml'
# read the yaml file
with open(test_dict_file, 'rt') as infile:
test_dict = yaml.load(infile, Loader=yaml.FullLoader)
tested_groups = test_dict.keys()
if args.group != None:
tested_groups = [args.group]
ntests = 0
nfailed = 0
# loop over group tests
for test_group, test_levels in test_dict.items():
if test_group in tested_groups:
print('Test group {}'.format(test_group.upper()))
group_failed_tests = {} # test that failed in this group
for test_level, tests in test_levels.items():
local_failed_tests = []
if test_level in TEST_LEVELS[args.type]:
for test in tests:
print(' Running test {}'.format(test.upper()))
successful = METHOD_NAME(test, psi4command, test_results,
test_time)
if not successful:
local_failed_tests.append(test)
nfailed += 1
total_time += prepare_summary(test, test_results,
test_time, summary,
not args.bw)
ntests += len(tests)
if len(local_failed_tests) > 0:
group_failed_tests[test_level] = local_failed_tests
if len(group_failed_tests) > 0:
failed_tests[test_group] = group_failed_tests
# print a summary of the tests
summary_str = 'Summary:\n'
summary_str += ' ' * 4 + '=' * 76 + '\n'
summary_str += ' TEST' + ' ' * 57 + 'RESULT TIME (s)\n'
summary_str += ' ' * 4 + '-' * 76 + '\n'
summary_str += '\n'.join(summary) + '\n'
summary_str += ' ' * 4 + '=' * 76
print(summary_str)
print('\nTotal time: %6.1f s\n' % total_time)
import datetime
now = datetime.datetime.now()
file_name = 'test_results_%s.txt' % now.strftime("%Y-%m-%d-%H%M")
with open(file_name, 'w') as outfile:
outfile.write(summary_str)
outfile.write('\nTotal time: %6.1f s\n' % total_time)
# save the list of failed tests
with open('failed_tests.yaml', 'w') as outfile:
yaml.dump(failed_tests, outfile, default_flow_style=False)
if nfailed == 0:
print('Tests: All passed ({} tests)\n'.format(ntests))
else:
print('Tests: {} passed and {} failed\n'.format(
ntests - nfailed, nfailed))
# Get the current date and time
dt = datetime.datetime.now()
now = dt.strftime('%Y-%m-%d-%H:%M')
print('The following tests failed:')
for test_group, test_levels in failed_tests.items():
print('Test group {}'.format(test_group.upper()))
for test_level, tests in test_levels.items():
for test in tests:
print(' {}'.format(test.upper()))
if args.failed_dump:
for test_group, test_levels in failed_tests.items():
for test_level, tests in test_levels.items():
for test in tests:
print('\n\n==> %s TEST OUTPUT <==\n' % test.upper())
subprocess.call('cat %s/output.dat' % test, shell=True)
print('\n')
exit(1)
if __name__ == '__main__':
main() | null |
5,802 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import fontTools.pens.boundsPen
from fontTools.misc.transform import Transform
from ufo2ft.filters import BaseFilter
logger = logging.getLogger(__name__)
class PropagateAnchorsFilter(BaseFilter):
def set_context(self, font, glyphSet):
ctx = super().set_context(font, glyphSet)
ctx.processed = set()
return ctx
def __call__(self, font, glyphSet=None):
if super().__call__(font, glyphSet):
modified = self.context.modified
if modified:
logger.info("Glyphs with propagated anchors: %i" % len(modified))
return modified
def filter(self, glyph):
if not glyph.components:
return False
before = len(glyph.anchors)
_propagate_glyph_anchors(
self.context.glyphSet,
glyph,
self.context.processed,
self.context.modified,
)
return len(glyph.anchors) > before
def _propagate_glyph_anchors(glyphSet, composite, processed, modified):
"""
Propagate anchors from base glyphs to a given composite
glyph, and to all composite glyphs used in between.
"""
if composite.name in processed:
return
processed.add(composite.name)
if not composite.components:
return
base_components = []
mark_components = []
anchor_names = set()
to_add = {}
for component in composite.components:
try:
glyph = glyphSet[component.baseGlyph]
except KeyError:
logger.warning(
"Anchors not propagated for inexistent component {} "
"in glyph {}".format(component.baseGlyph, composite.name)
)
else:
_propagate_glyph_anchors(glyphSet, glyph, processed, modified)
if any(a.name.startswith("_") for a in glyph.anchors):
mark_components.append(component)
else:
base_components.append(component)
anchor_names |= {a.name for a in glyph.anchors}
if mark_components and not base_components and _is_ligature_mark(composite):
# The composite is a mark that is composed of other marks (E.g.
# "circumflexcomb_tildecomb"). Promote the mark that is positioned closest
# to the origin to a base.
try:
component = METHOD_NAME(mark_components, glyphSet)
except Exception as e:
raise Exception(
"Error while determining which component of composite "
"'{}' is the lowest: {}".format(composite.name, str(e))
) from e
mark_components.remove(component)
base_components.append(component)
glyph = glyphSet[component.baseGlyph]
anchor_names |= {a.name for a in glyph.anchors}
for anchor_name in anchor_names:
# don't add if composite glyph already contains this anchor OR any
# associated ligature anchors (e.g. "top_1, top_2" for "top")
if not any(a.name.startswith(anchor_name) for a in composite.anchors):
_get_anchor_data(to_add, glyphSet, base_components, anchor_name)
for component in mark_components:
_adjust_anchors(to_add, glyphSet, component)
# we sort propagated anchors to append in a deterministic order
for name, (x, y) in sorted(to_add.items()):
anchor_dict = {"name": name, "x": x, "y": y}
try:
composite.appendAnchor(anchor_dict)
except TypeError: # pragma: no cover
# fontParts API
composite.appendAnchor(name, (x, y))
if to_add:
modified.add(composite.name)
def _get_anchor_data(anchor_data, glyphSet, components, anchor_name):
"""Get data for an anchor from a list of components."""
anchors = []
for component in components:
for anchor in glyphSet[component.baseGlyph].anchors:
if anchor.name == anchor_name:
anchors.append((anchor, component))
break
if len(anchors) > 1:
for i, (anchor, component) in enumerate(anchors):
t = Transform(*component.transformation)
name = "%s_%d" % (anchor.name, i + 1)
anchor_data[name] = t.transformPoint((anchor.x, anchor.y))
elif anchors:
anchor, component = anchors[0]
t = Transform(*component.transformation)
anchor_data[anchor.name] = t.transformPoint((anchor.x, anchor.y))
def _adjust_anchors(anchor_data, glyphSet, component):
"""
Adjust base anchors to which a mark component may have been attached, by
moving the base anchor attached to a mark anchor to the position of
the mark component's base anchor.
"""
glyph = glyphSet[component.baseGlyph]
t = Transform(*component.transformation)
for anchor in glyph.anchors:
# only adjust if this anchor has data and the component also contains
# the associated mark anchor (e.g. "_top" for "top")
if anchor.name in anchor_data and any(
a.name == "_" + anchor.name for a in glyph.anchors
):
anchor_data[anchor.name] = t.transformPoint((anchor.x, anchor.y))
def METHOD_NAME(components, glyph_set):
"""Return the component whose (xmin, ymin) bounds are closest to origin.
This ensures that a component that is moved below another is
actually recognized as such. Looking only at the transformation
offset can be misleading.
"""
return min(components, key=lambda comp: _distance((0, 0), _bounds(comp, glyph_set)))
def _distance(pos1, pos2):
x1, y1 = pos1
x2, y2 = pos2
return (x1 - x2) ** 2 + (y1 - y2) ** 2
def _is_ligature_mark(glyph):
return not glyph.name.startswith("_") and "_" in glyph.name
def _bounds(component, glyph_set):
"""Return the (xmin, ymin) of the bounds of `component`."""
if hasattr(component, "bounds"): # e.g. defcon
return component.bounds[:2]
elif hasattr(component, "draw"): # e.g. ufoLib2
pen = fontTools.pens.boundsPen.BoundsPen(glyphSet=glyph_set)
component.draw(pen)
return pen.bounds[:2]
else:
raise ValueError(
f"Don't know to to compute the bounds of component '{component}' "
) | null |
5,803 | """pytest configuration
Extends output capture as needed by pybind11: ignore constructors, optional unordered lines.
Adds docstring and exceptions message sanitizers.
"""
import contextlib
import difflib
import gc
import re
import textwrap
import pytest
# Early diagnostic for failed imports
import pybind11_tests
_long_marker = re.compile(r"([0-9])L")
_hexadecimal = re.compile(r"0x[0-9a-fA-F]+")
# Avoid collecting Python3 only files
collect_ignore = []
def _strip_and_dedent(s):
"""For triple-quote strings"""
return textwrap.dedent(s.lstrip("\n").rstrip())
def _split_and_sort(s):
"""For output which does not require specific line order"""
return sorted(_strip_and_dedent(s).splitlines())
def _make_explanation(a, b):
"""Explanation for a failed assert -- the a and b arguments are List[str]"""
return ["--- actual / +++ expected"] + [
line.strip("\n") for line in difflib.ndiff(a, b)
]
class Output:
"""Basic output post-processing and comparison"""
def __init__(self, string):
self.string = string
self.explanation = []
def __str__(self):
return self.string
def __eq__(self, other):
# Ignore constructor/destructor output which is prefixed with "###"
a = [
line
for line in self.string.strip().splitlines()
if not line.startswith("###")
]
b = _strip_and_dedent(other).splitlines()
if a == b:
return True
else:
self.explanation = _make_explanation(a, b)
return False
class Unordered(Output):
"""Custom comparison for output without strict line ordering"""
def __eq__(self, other):
a = _split_and_sort(self.string)
b = _split_and_sort(other)
if a == b:
return True
else:
self.explanation = _make_explanation(a, b)
return False
class Capture:
def __init__(self, capfd):
self.capfd = capfd
self.out = ""
self.err = ""
def __enter__(self):
self.capfd.readouterr()
return self
def __exit__(self, *args):
self.out, self.err = self.capfd.readouterr()
def __eq__(self, other):
a = Output(self.out)
b = other
if a == b:
return True
else:
self.explanation = a.explanation
return False
def __str__(self):
return self.out
def __contains__(self, item):
return item in self.out
@property
def unordered(self):
return Unordered(self.out)
@property
def stderr(self):
return Output(self.err)
@pytest.fixture
def capture(capsys):
"""Extended `capsys` with context manager and custom equality operators"""
return Capture(capsys)
class SanitizedString:
def __init__(self, sanitizer):
self.sanitizer = sanitizer
self.string = ""
self.explanation = []
def __call__(self, thing):
self.string = self.sanitizer(thing)
return self
def __eq__(self, other):
a = self.string
b = _strip_and_dedent(other)
if a == b:
return True
else:
self.explanation = _make_explanation(a.splitlines(), b.splitlines())
return False
def _sanitize_general(s):
s = s.strip()
s = s.replace("pybind11_tests.", "m.")
s = _long_marker.sub(r"\1", s)
return s
def _sanitize_docstring(thing):
s = thing.__doc__
s = _sanitize_general(s)
return s
@pytest.fixture
def METHOD_NAME():
"""Sanitize docstrings and add custom failure explanation"""
return SanitizedString(_sanitize_docstring)
def _sanitize_message(thing):
s = str(thing)
s = _sanitize_general(s)
s = _hexadecimal.sub("0", s)
return s
@pytest.fixture
def msg():
"""Sanitize messages and add custom failure explanation"""
return SanitizedString(_sanitize_message)
# noinspection PyUnusedLocal
def pytest_assertrepr_compare(op, left, right):
"""Hook to insert custom failure explanation"""
if hasattr(left, "explanation"):
return left.explanation
@contextlib.contextmanager
def suppress(exception):
"""Suppress the desired exception"""
try:
yield
except exception:
pass
def gc_collect():
"""Run the garbage collector twice (needed when running
reference counting tests with PyPy)"""
gc.collect()
gc.collect()
def pytest_configure():
pytest.suppress = suppress
pytest.gc_collect = gc_collect
def pytest_report_header(config):
del config # Unused.
assert (
pybind11_tests.compiler_info is not None
), "Please update pybind11_tests.cpp if this assert fails."
return (
"C++ Info:"
f" {pybind11_tests.compiler_info}"
f" {pybind11_tests.cpp_std}"
f" {pybind11_tests.PYBIND11_INTERNALS_ID}"
) | null |
5,804 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import torch
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops.operations.spectral_ops as P
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.common.api import jit
from mindspore.ops import functional as F
class BartlettWindowNet(nn.Cell):
def __init__(self, periodic=True, dtype=mstype.float32):
super(BartlettWindowNet, self).__init__()
self.bartlettwindow = P.BartlettWindow(periodic=periodic, dtype=dtype)
@jit
def construct(self, input_x):
return self.bartlettwindow(input_x)
def get_dtype(dtype="float16"):
if dtype == "float16":
nptype = np.float16
msptype = mstype.float16
pttype = torch.float32
elif dtype == "float32":
nptype = np.float32
msptype = mstype.float32
pttype = torch.float32
elif dtype == "float64":
nptype = np.float64
msptype = mstype.float64
pttype = torch.float64
else:
print("The attr 'dtype' must in [float16, float32, float64]")
return nptype, msptype, pttype
def bartlett_window(periodic, dtype, loss):
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
nptype, msptype, pttype = get_dtype(dtype)
input_x_np = np.array(200, dtype=np.int32)
input_x_ms = Tensor(input_x_np)
input_x_torch = torch.tensor(input_x_np)
bartlett_window_net = BartlettWindowNet(periodic, msptype)
bartlett_window_output = bartlett_window_net(input_x_ms)
bartlett_window_expect = torch.bartlett_window(input_x_torch, periodic=periodic, dtype=pttype)
assert np.allclose(bartlett_window_output.asnumpy(), bartlett_window_expect.numpy().astype(nptype), loss, loss)
def bartlett_window_pynative(periodic, dtype, loss):
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
nptype, msptype, pttype = get_dtype(dtype)
input_x_np = np.array(200, dtype=np.int64)
input_x_ms = Tensor(input_x_np)
input_x_torch = torch.tensor(input_x_np)
bartlett_window_net = BartlettWindowNet(periodic, msptype)
bartlett_window_output = bartlett_window_net(input_x_ms)
bartlett_window_expect = torch.bartlett_window(input_x_torch, periodic=periodic, dtype=pttype)
assert np.allclose(bartlett_window_output.asnumpy(), bartlett_window_expect.numpy().astype(nptype), loss, loss)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_bartlett_window_graph_int32_true_float32():
"""
Feature: ALL To ALL
Description: test cases for BartlettWindow
Expectation: the result match to torch
"""
bartlett_window(periodic=True, dtype="float32", loss=1.0e-4)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_bartlett_window_pynative_int64_false_float64():
"""
Feature: ALL To ALL
Description: test cases for BartlettWindow
Expectation: the result match to torch
"""
bartlett_window_pynative(periodic=False, dtype="float64", loss=1.0e-5)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def METHOD_NAME(mode):
"""
Feature: test bartlett_window functional api for PyNative and Graph modes.
Description: test bartlett_window functional api and compare with expected output.
Expectation: the result match with expected result.
"""
context.set_context(mode=mode, device_target="GPU")
window_length = Tensor(5, mstype.int32)
output = F.bartlett_window(window_length, periodic=True, dtype=mstype.float32)
expected = np.array([0, 0.4, 0.8, 0.8, 0.4], np.float32)
np.testing.assert_array_equal(output.asnumpy(), expected) | null |
5,805 | from unittest import TestCase
from aioquic.buffer import Buffer, BufferReadError, BufferWriteError, size_uint_var
class BufferTest(TestCase):
def test_data_slice(self):
buf = Buffer(data=b"\x08\x07\x06\x05\x04\x03\x02\x01")
self.assertEqual(buf.data_slice(0, 8), b"\x08\x07\x06\x05\x04\x03\x02\x01")
self.assertEqual(buf.data_slice(1, 3), b"\x07\x06")
with self.assertRaises(BufferReadError):
buf.data_slice(-1, 3)
with self.assertRaises(BufferReadError):
buf.data_slice(0, 9)
with self.assertRaises(BufferReadError):
buf.data_slice(1, 0)
def test_pull_bytes(self):
buf = Buffer(data=b"\x08\x07\x06\x05\x04\x03\x02\x01")
self.assertEqual(buf.pull_bytes(3), b"\x08\x07\x06")
def METHOD_NAME(self):
buf = Buffer(data=b"\x08\x07\x06\x05\x04\x03\x02\x01")
with self.assertRaises(BufferReadError):
buf.pull_bytes(-1)
def test_pull_bytes_truncated(self):
buf = Buffer(capacity=0)
with self.assertRaises(BufferReadError):
buf.pull_bytes(2)
self.assertEqual(buf.tell(), 0)
def test_pull_bytes_zero(self):
buf = Buffer(data=b"\x08\x07\x06\x05\x04\x03\x02\x01")
self.assertEqual(buf.pull_bytes(0), b"")
def test_pull_uint8(self):
buf = Buffer(data=b"\x08\x07\x06\x05\x04\x03\x02\x01")
self.assertEqual(buf.pull_uint8(), 0x08)
self.assertEqual(buf.tell(), 1)
def test_pull_uint8_truncated(self):
buf = Buffer(capacity=0)
with self.assertRaises(BufferReadError):
buf.pull_uint8()
self.assertEqual(buf.tell(), 0)
def test_pull_uint16(self):
buf = Buffer(data=b"\x08\x07\x06\x05\x04\x03\x02\x01")
self.assertEqual(buf.pull_uint16(), 0x0807)
self.assertEqual(buf.tell(), 2)
def test_pull_uint16_truncated(self):
buf = Buffer(capacity=1)
with self.assertRaises(BufferReadError):
buf.pull_uint16()
self.assertEqual(buf.tell(), 0)
def test_pull_uint32(self):
buf = Buffer(data=b"\x08\x07\x06\x05\x04\x03\x02\x01")
self.assertEqual(buf.pull_uint32(), 0x08070605)
self.assertEqual(buf.tell(), 4)
def test_pull_uint32_truncated(self):
buf = Buffer(capacity=3)
with self.assertRaises(BufferReadError):
buf.pull_uint32()
self.assertEqual(buf.tell(), 0)
def test_pull_uint64(self):
buf = Buffer(data=b"\x08\x07\x06\x05\x04\x03\x02\x01")
self.assertEqual(buf.pull_uint64(), 0x0807060504030201)
self.assertEqual(buf.tell(), 8)
def test_pull_uint64_truncated(self):
buf = Buffer(capacity=7)
with self.assertRaises(BufferReadError):
buf.pull_uint64()
self.assertEqual(buf.tell(), 0)
def test_push_bytes(self):
buf = Buffer(capacity=3)
buf.push_bytes(b"\x08\x07\x06")
self.assertEqual(buf.data, b"\x08\x07\x06")
self.assertEqual(buf.tell(), 3)
def test_push_bytes_truncated(self):
buf = Buffer(capacity=3)
with self.assertRaises(BufferWriteError):
buf.push_bytes(b"\x08\x07\x06\x05")
self.assertEqual(buf.tell(), 0)
def test_push_bytes_zero(self):
buf = Buffer(capacity=3)
buf.push_bytes(b"")
self.assertEqual(buf.data, b"")
self.assertEqual(buf.tell(), 0)
def test_push_uint8(self):
buf = Buffer(capacity=1)
buf.push_uint8(0x08)
self.assertEqual(buf.data, b"\x08")
self.assertEqual(buf.tell(), 1)
def test_push_uint16(self):
buf = Buffer(capacity=2)
buf.push_uint16(0x0807)
self.assertEqual(buf.data, b"\x08\x07")
self.assertEqual(buf.tell(), 2)
def test_push_uint32(self):
buf = Buffer(capacity=4)
buf.push_uint32(0x08070605)
self.assertEqual(buf.data, b"\x08\x07\x06\x05")
self.assertEqual(buf.tell(), 4)
def test_push_uint64(self):
buf = Buffer(capacity=8)
buf.push_uint64(0x0807060504030201)
self.assertEqual(buf.data, b"\x08\x07\x06\x05\x04\x03\x02\x01")
self.assertEqual(buf.tell(), 8)
def test_seek(self):
buf = Buffer(data=b"01234567")
self.assertFalse(buf.eof())
self.assertEqual(buf.tell(), 0)
buf.seek(4)
self.assertFalse(buf.eof())
self.assertEqual(buf.tell(), 4)
buf.seek(8)
self.assertTrue(buf.eof())
self.assertEqual(buf.tell(), 8)
with self.assertRaises(BufferReadError):
buf.seek(-1)
self.assertEqual(buf.tell(), 8)
with self.assertRaises(BufferReadError):
buf.seek(9)
self.assertEqual(buf.tell(), 8)
class UintVarTest(TestCase):
def roundtrip(self, data, value):
buf = Buffer(data=data)
self.assertEqual(buf.pull_uint_var(), value)
self.assertEqual(buf.tell(), len(data))
buf = Buffer(capacity=8)
buf.push_uint_var(value)
self.assertEqual(buf.data, data)
def test_uint_var(self):
# 1 byte
self.roundtrip(b"\x00", 0)
self.roundtrip(b"\x01", 1)
self.roundtrip(b"\x25", 37)
self.roundtrip(b"\x3f", 63)
# 2 bytes
self.roundtrip(b"\x7b\xbd", 15293)
self.roundtrip(b"\x7f\xff", 16383)
# 4 bytes
self.roundtrip(b"\x9d\x7f\x3e\x7d", 494878333)
self.roundtrip(b"\xbf\xff\xff\xff", 1073741823)
# 8 bytes
self.roundtrip(b"\xc2\x19\x7c\x5e\xff\x14\xe8\x8c", 151288809941952652)
self.roundtrip(b"\xff\xff\xff\xff\xff\xff\xff\xff", 4611686018427387903)
def test_pull_uint_var_truncated(self):
buf = Buffer(capacity=0)
with self.assertRaises(BufferReadError):
buf.pull_uint_var()
buf = Buffer(data=b"\xff")
with self.assertRaises(BufferReadError):
buf.pull_uint_var()
def test_push_uint_var_too_big(self):
buf = Buffer(capacity=8)
with self.assertRaises(ValueError) as cm:
buf.push_uint_var(4611686018427387904)
self.assertEqual(
str(cm.exception), "Integer is too big for a variable-length integer"
)
def test_size_uint_var(self):
self.assertEqual(size_uint_var(63), 1)
self.assertEqual(size_uint_var(16383), 2)
self.assertEqual(size_uint_var(1073741823), 4)
self.assertEqual(size_uint_var(4611686018427387903), 8)
with self.assertRaises(ValueError) as cm:
size_uint_var(4611686018427387904)
self.assertEqual(
str(cm.exception), "Integer is too big for a variable-length integer"
) | null |
5,806 | #!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# -----------------------------------------------------------------------------
# Copyright 2019-2020 Arm Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -----------------------------------------------------------------------------
"""
The ``astc_test_image_dl`` utility provides a means to programatically download
test images that are available online, avoiding the need to duplicate them in
the git repository.
"""
import os
import sys
import urllib.request
from PIL import Image
TEST_IMAGE_DIR = os.path.join("Test", "Images")
def download(testSet, index, srcUrl, dstPath):
"""
Download a single image.
Args:
testSet (str): The test set name.
index (int): The download index.
srcUrl (str): The download URL.
dstPath (str): The destination path.
"""
dirName = os.path.dirname(dstPath)
if not os.path.exists(dirName):
os.makedirs(dirName)
# Skip downloads if the file already exists
if not os.path.exists(dstPath):
print("%s image %u: Downloading" % (testSet, index))
urllib.request.urlretrieve(srcUrl, dstPath)
else:
print("%s image %u: Skipping" % (testSet, index))
def make_landscape(imgPath):
"""
Make an image on disk landscape aspect (edit in place)
Args:
imgPath: The pth of the image on disk.
"""
img = Image.open(imgPath)
if img.size[0] < img.size[1]:
img = img.rotate(90, expand=True)
img.save(imgPath)
def make_mixed_image(imgPathA, imgPathB, dstPath):
"""
Make image consisting of RGB from A's RGB, and alpha from B's luminance.
Args:
imgPathA: The path of input A on disk.
imgPathB: The path of input B on disk.
dstPath: The path of the destination.
"""
imgA = Image.open(imgPathA)
imgB = Image.open(imgPathB).convert("L")
imgA.putalpha(imgB)
dirs = os.path.dirname(dstPath)
if not os.path.exists(dirs):
os.makedirs(dirs)
imgA.save(dstPath)
def make_montage(imageDir, dstPath):
"""
Make a single mosaic montage consisting of all of the Kodak images.
Args:
imgDir: The directory path of the Kodak images on disk.
dstPth: The file path of the resulting montage.
"""
cols = 6
rows = 4
width = 768
height = 512
images = os.listdir(imageDir)
images.sort()
montage = Image.new('RGB', (width * cols, height * rows))
for i, src in enumerate(images):
im = Image.open(os.path.join(imageDir, src))
col = i % cols
row = i // cols
montage.paste(im, (width * col, height * row))
dirs = os.path.dirname(dstPath)
if not os.path.exists(dirs):
os.makedirs(dirs)
montage.save(dstPath)
def retrieve_kodak_set():
"""
Download the public domain Kodak image set.
To make test set mosaics easier to build we rotate images to make
everything landscape.
"""
testSet = "Kodak"
# Download the original RGB images
for i in range(1, 25):
fle = "ldr-rgb-kodak%02u.png" % i
dst = os.path.join(TEST_IMAGE_DIR, "Kodak", "LDR-RGB", fle)
src = "http://r0k.us/graphics/kodak/kodak/kodim%02u.png" % i
download(testSet, i, src, dst)
# Canonicalize image aspect
make_landscape(dst)
# Make some correlated alpha RGBA images
fle = "ldr-rgb-kodak%02u.png" # Expand later
pattern = os.path.join(TEST_IMAGE_DIR, "Kodak", "LDR-RGB", fle)
for i in (22, 23):
imgA = pattern % i
fle = "ldr-rgba-kodak%02u+ca.png" % i
dst = os.path.join(TEST_IMAGE_DIR, "KodakSim", "LDR-RGBA", fle)
make_mixed_image(imgA, imgA, dst)
# Make some non-correlated alpha RGBA images
for i, j in ((22, 24), (23, 20)):
imgA = pattern % i
imgB = pattern % j
fle = "ldr-rgba-kodak%02u+%02u+nca.png" % (i, j)
dst = os.path.join(TEST_IMAGE_DIR, "KodakSim", "LDR-RGBA", fle)
make_mixed_image(imgA, imgB, dst)
# Make a large montage
srcDir = os.path.join(TEST_IMAGE_DIR, "Kodak", "LDR-RGB")
fle = "ldr-rgb-montage.png"
dst = os.path.join(TEST_IMAGE_DIR, "KodakMnt", "LDR-RGB", fle)
make_montage(srcDir, dst)
def METHOD_NAME():
"""
The main function.
Returns:
int: The process return code.
"""
retrieve_kodak_set()
return 0
if __name__ == "__main__":
sys.exit(METHOD_NAME()) | null |
5,807 | import logging
from functools import update_wrapper
from pathlib import Path
from urllib.error import URLError
import click
from pymobiledevice3.cli.cli_common import Command, print_json
from pymobiledevice3.exceptions import AlreadyMountedError, DeveloperDiskImageNotFoundError, NotMountedError, \
UnsupportedCommandError
from pymobiledevice3.lockdown import LockdownClient
from pymobiledevice3.services.mobile_image_mounter import DeveloperDiskImageMounter, MobileImageMounterService, \
PersonalizedImageMounter, auto_mount
logger = logging.getLogger(__name__)
def catch_errors(func):
def catch_function(*args, **kwargs):
try:
return func(*args, **kwargs)
except AlreadyMountedError:
logger.error('Given image was already mounted')
except UnsupportedCommandError:
logger.error('Your iOS version doesn\'t support this command')
return update_wrapper(catch_function, func)
@click.group()
def cli():
""" mounter cli """
pass
@cli.group()
def mounter():
""" mounter options """
pass
@mounter.command('list', cls=Command)
@click.option('--color/--no-color', default=True)
def mounter_list(service_provider: LockdownClient, color):
""" list all mounted images """
output = []
images = MobileImageMounterService(lockdown=service_provider).copy_devices()
for image in images:
image_signature = image.get('ImageSignature')
if image_signature is not None:
image['ImageSignature'] = image_signature.hex()
output.append(image)
print_json(output, colored=color)
@mounter.command('lookup', cls=Command)
@click.option('--color/--no-color', default=True)
@click.argument('image_type')
def mounter_lookup(service_provider: LockdownClient, color, image_type):
""" lookup mounter image type """
try:
signature = MobileImageMounterService(lockdown=service_provider).lookup_image(image_type)
print_json(signature, colored=color)
except NotMountedError:
logger.error(f'Disk image of type: {image_type} is not mounted')
@mounter.command('umount-developer', cls=Command)
@catch_errors
def METHOD_NAME(service_provider: LockdownClient):
""" unmount Developer image """
try:
DeveloperDiskImageMounter(lockdown=service_provider).umount()
logger.info('Developer image unmounted successfully')
except NotMountedError:
logger.error('Developer image isn\'t currently mounted')
@mounter.command('umount-personalized', cls=Command)
@catch_errors
def mounter_umount_personalized(service_provider: LockdownClient):
""" unmount Personalized image """
try:
PersonalizedImageMounter(lockdown=service_provider).umount()
logger.info('Personalized image unmounted successfully')
except NotMountedError:
logger.error('Personalized image isn\'t currently mounted')
@mounter.command('mount-developer', cls=Command)
@click.argument('image', type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument('signature', type=click.Path(exists=True, file_okay=True, dir_okay=False))
@catch_errors
def mounter_mount_developer(service_provider: LockdownClient, image: str, signature: str):
""" mount developer image """
DeveloperDiskImageMounter(lockdown=service_provider).mount(Path(image), Path(signature))
logger.info('Developer image mounted successfully')
@mounter.command('mount-personalized', cls=Command)
@click.argument('image', type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument('trust-cache', type=click.Path(exists=True, file_okay=True, dir_okay=False))
@click.argument('build-manifest', type=click.Path(exists=True, file_okay=True, dir_okay=False))
@catch_errors
def mounter_mount_personalized(service_provider: LockdownClient, image: str, trust_cache: str, build_manifest: str):
""" mount personalized image """
PersonalizedImageMounter(lockdown=service_provider).mount(Path(image), Path(build_manifest), Path(trust_cache))
logger.info('Personalized image mounted successfully')
@mounter.command('auto-mount', cls=Command)
@click.option('-x', '--xcode', type=click.Path(exists=True, dir_okay=True, file_okay=False),
help='Xcode application path used to figure out automatically the DeveloperDiskImage path')
@click.option('-v', '--version', help='use a different DeveloperDiskImage version from the one retrieved by lockdown'
'connection')
def mounter_auto_mount(service_provider: LockdownClient, xcode: str, version: str):
""" auto-detect correct DeveloperDiskImage and mount it """
try:
auto_mount(service_provider, xcode=xcode, version=version)
logger.info('DeveloperDiskImage mounted successfully')
except URLError:
logger.warning('failed to query DeveloperDiskImage versions')
except DeveloperDiskImageNotFoundError:
logger.error('Unable to find the correct DeveloperDiskImage')
except AlreadyMountedError:
logger.error('DeveloperDiskImage already mounted')
except PermissionError as e:
logger.error(
f'DeveloperDiskImage could not be saved to Xcode default path ({e.filename}). '
f'Please make sure your user has the necessary permissions')
@mounter.command('query-developer-mode-status', cls=Command)
@click.option('--color/--no-color', default=True)
def mounter_query_developer_mode_status(service_provider: LockdownClient, color):
""" Query developer mode status """
print_json(MobileImageMounterService(lockdown=service_provider).query_developer_mode_status(), colored=color)
@mounter.command('query-nonce', cls=Command)
@click.option('--image-type')
@click.option('--color/--no-color', default=True)
def mounter_query_nonce(service_provider: LockdownClient, image_type: str, color: bool):
""" Query nonce """
print_json(MobileImageMounterService(lockdown=service_provider).query_nonce(image_type), colored=color)
@mounter.command('query-personalization-identifiers', cls=Command)
@click.option('--color/--no-color', default=True)
def mounter_query_personalization_identifiers(service_provider: LockdownClient, color):
""" Query personalization identifiers """
print_json(MobileImageMounterService(lockdown=service_provider).query_personalization_identifiers(), colored=color)
@mounter.command('query-personalization-manifest', cls=Command)
@click.option('--color/--no-color', default=True)
def mounter_query_personalization_manifest(service_provider: LockdownClient, color):
""" Query personalization manifest """
result = []
mounter = MobileImageMounterService(lockdown=service_provider)
for device in mounter.copy_devices():
result.append(mounter.query_personalization_manifest(device['PersonalizedImageType'], device['ImageSignature']))
print_json(result, colored=color)
@mounter.command('roll-personalization-nonce', cls=Command)
def mounter_roll_personalization_nonce(service_provider: LockdownClient):
MobileImageMounterService(lockdown=service_provider).roll_personalization_nonce()
@mounter.command('roll-cryptex-nonce', cls=Command)
def mounter_roll_cryptex_nonce(service_provider: LockdownClient):
""" Roll cryptex nonce (will reboot) """
MobileImageMounterService(lockdown=service_provider).roll_cryptex_nonce() | null |
5,808 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2018 Stephan Thiele <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
class LinFsm:
class State:
WaitForBreak = 'WAIT_FOR_BREAK'
Sync = 'SYNC'
Pid = 'PID'
Data = 'DATA'
Checksum = 'CHECKSUM'
Error = 'ERROR'
def transit(self, target_state):
if not self._transition_allowed(target_state):
return False
self.state = target_state
return True
def _transition_allowed(self, target_state):
if target_state == LinFsm.State.Error:
return True
return target_state in self.allowed_state[self.state]
def reset(self):
self.state = LinFsm.State.WaitForBreak
def __init__(self):
a = dict()
a[LinFsm.State.WaitForBreak] = (LinFsm.State.Sync,)
a[LinFsm.State.Sync] = (LinFsm.State.Pid,)
a[LinFsm.State.Pid] = (LinFsm.State.Data,)
a[LinFsm.State.Data] = (LinFsm.State.Data, LinFsm.State.Checksum)
a[LinFsm.State.Checksum] = (LinFsm.State.WaitForBreak,)
a[LinFsm.State.Error] = (LinFsm.State.Sync,)
self.allowed_state = a
self.state = None
self.reset()
class Decoder(srd.Decoder):
api_version = 3
id = 'lin'
name = 'LIN'
longname = 'Local Interconnect Network'
desc = 'Local Interconnect Network (LIN) protocol.'
license = 'gplv2+'
inputs = ['uart']
outputs = []
tags = ['Automotive']
options = (
{'id': 'version', 'desc': 'Protocol version', 'default': 2, 'values': (1, 2), 'idn':'dec_lin_opt_version'},
)
annotations = (
('data', 'LIN data'),
('control', 'Protocol info'),
('error', 'Error descriptions'),
('inline_error', 'Protocol violations and errors'),
)
annotation_rows = (
('data', 'Data', (0, 1, 3)),
('error', 'Error', (2,)),
)
def __init__(self):
self.reset()
def reset(self):
self.fsm = LinFsm()
self.lin_header = []
self.lin_rsp = []
self.lin_version = None
self.out_ann = None
self.ss_block = None
self.es_block = None
self.done_break = False
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.lin_version = self.options['version']
def putx(self, data):
self.put(self.ss_block, self.es_block, self.out_ann, data)
def wipe_break_null_byte(self, value):
# Upon a break condition a null byte is received which must be ignored.
if self.fsm.state not in (LinFsm.State.WaitForBreak, LinFsm.State.Error):
if len(self.lin_rsp):
value = self.lin_rsp.pop()[2]
else:
self.lin_header.pop()
if value != 0:
self.fsm.transit(LinFsm.State.Error)
self.handle_error(None)
return False
return True
def handle_wait_for_break(self, value):
self.wipe_break_null_byte(value)
def handle_break(self, value):
if self.fsm.state not in (LinFsm.State.WaitForBreak, LinFsm.State.Error):
if self.wipe_break_null_byte(value):
self.fsm.transit(LinFsm.State.Checksum)
self.handle_checksum()
self.fsm.reset()
self.fsm.transit(LinFsm.State.Sync)
self.done_break = True
self.putx([1, ['Break condition', 'Break', 'Brk', 'B']])
def handle_sync(self, value):
self.fsm.transit(LinFsm.State.Pid)
self.lin_header.append((self.ss_block, self.es_block, value))
def handle_pid(self, value):
self.fsm.transit(LinFsm.State.Data)
self.lin_header.append((self.ss_block, self.es_block, value))
def METHOD_NAME(self, value):
self.lin_rsp.append((self.ss_block, self.es_block, value))
def handle_checksum(self):
sync = self.lin_header.pop(0) if len(self.lin_header) else None
self.put(sync[0], sync[1], self.out_ann, [0, ['Sync', 'S']])
if sync[2] != 0x55:
self.put(sync[0], sync[1], self.out_ann,
[2, ['Sync is not 0x55', 'Not 0x55', '!= 0x55']])
pid = self.lin_header.pop(0) if len(self.lin_header) else None
checksum = self.lin_rsp.pop() if len(self.lin_rsp) else None
if pid:
id_ = pid[2] & 0x3F
parity = pid[2] >> 6
expected_parity = self.calc_parity(pid[2])
parity_valid = parity == expected_parity
if not parity_valid:
self.put(pid[0], pid[1], self.out_ann, [2, ['P != %d' % expected_parity]])
ann_class = 0 if parity_valid else 3
self.put(pid[0], pid[1], self.out_ann, [ann_class, [
'ID: %02X Parity: %d (%s)' % (id_, parity, 'ok' if parity_valid else 'bad'),
'ID: 0x%02X' % id_, 'I: %d' % id_
]])
if len(self.lin_rsp):
checksum_valid = self.checksum_is_valid(pid[2], self.lin_rsp, checksum[2])
for b in self.lin_rsp:
self.put(b[0], b[1], self.out_ann, [0, ['Data: 0x%02X' % b[2], 'D: 0x%02X' % b[2]]])
ann_class = 0 if checksum_valid else 3
self.put(checksum[0], checksum[1], self.out_ann,
[ann_class, ['Checksum: 0x%02X' % checksum[2], 'Checksum', 'Chk', 'C']])
if not checksum_valid:
self.put(checksum[0], checksum[1], self.out_ann, [2, ['Checksum invalid']])
else:
pass # No response.
self.lin_header.clear()
self.lin_rsp.clear()
def handle_error(self, dummy):
self.putx([3, ['Error', 'Err', 'E']])
def checksum_is_valid(self, pid, data, checksum):
if self.lin_version == 2:
id_ = pid & 0x3F
if id_ != 60 and id_ != 61:
checksum += pid
for d in data:
checksum += d[2]
carry_bits = int(checksum / 256)
checksum += carry_bits
return checksum & 0xFF == 0xFF
@staticmethod
def calc_parity(pid):
id_ = [((pid & 0x3F) >> i) & 1 for i in range(8)]
p0 = id_[0] ^ id_[1] ^ id_[2] ^ id_[4]
p1 = not (id_[1] ^ id_[3] ^ id_[4] ^ id_[5])
return (p0 << 0) | (p1 << 1)
def end(self):
if self.done_break and len(self.lin_rsp):
self.handle_checksum();
def decode(self, ss, es, data):
ptype, rxtx, pdata = data
self.ss_block, self.es_block = ss, es
# Ignore all UART packets except the actual data packets or BREAK.
if ptype == 'BREAK':
self.handle_break(pdata)
if ptype != 'DATA':
return
# We're only interested in the byte value (not individual bits).
pdata = pdata[0]
# Short LIN overview:
# - Message begins with a BREAK (0x00) for at least 13 bittimes.
# - Break is always followed by a SYNC byte (0x55).
# - Sync byte is followed by a PID byte (Protected Identifier).
# - PID byte is followed by 1 - 8 data bytes and a final checksum byte.
handler = getattr(self, 'handle_%s' % self.fsm.state.lower())
handler(pdata) | null |
5,809 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Generate vm_impl function for math ops"""
import copy
import numpy as np
from mindspore.common.dtype import dtype_to_nptype
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
from mindspore.ops.vm_impl_registry import vm_impl_registry as vm_impl_getters
from .vm_interface import vm
# pylint: disable=unused-argument
@vm_impl_getters.register(P.ZerosLike)
def vm_impl_zeroslike(self):
def vm_impl(x):
x = x.asnumpy()
out = np.zeros_like(x)
return Tensor(out)
return vm_impl
@vm_impl_getters.register(P.Zeros)
def vm_impl_zeros(self):
def vm_impl(x, y):
out = np.zeros(x)
return Tensor(out)
return vm_impl
@vm_impl_getters.register(P.Ones)
def METHOD_NAME(self):
def vm_impl(x, y):
out = np.ones(x)
return Tensor(out)
return vm_impl
@vm_impl_getters.register(P.Log)
def vm_impl_log(self):
def vm_impl(x):
x = x.asnumpy()
out = np.log(x)
return Tensor(out)
return vm_impl
@vm_impl_getters.register(P.Add)
def vm_impl_tensor_add(self):
"""Generate vm_impl function for TensorAdd."""
def vm_impl(x, y):
x = x.asnumpy()
y = y.asnumpy()
return Tensor(x + y)
return vm_impl
# pylint: disable=used-before-assignment
@vm_impl_getters.register(P.LogicalNot)
def vm_impl_logical_not(self):
def vm_impl(x):
x = x.asnumpy()
out = vm.logical_not(x)
return Tensor(out)
return vm_impl
@vm_impl_getters.register(P.MatMul)
def vm_impl_mat_mul(self):
"""Generate vm_impl function for MatMul."""
def vm_impl(x, w):
x = x.asnumpy()
w = w.asnumpy()
if self.transpose_a:
x = x.transpose()
if self.transpose_b:
w = w.transpose()
z = x @ w
return Tensor(z)
return vm_impl
@vm_impl_getters.register(P.AddN)
def vm_impl_addn(self):
"""Generate vm_impl function for AddN."""
def vm_impl(inputs):
added = copy.deepcopy(inputs[0].asnumpy())
for x in inputs[1:]:
added += x.asnumpy()
return Tensor(added)
return vm_impl
@vm_impl_getters.register(P.Neg)
def vm_impl_neg(self):
"""Generate vm_impl function for Neg."""
def vm_impl(x):
x = x.asnumpy()
return Tensor(-x)
return vm_impl
@vm_impl_getters.register(P.Sub)
def vm_impl_Sub(self):
"""Generate vm_impl function for Sub."""
def vm_impl(x, y):
x = x.asnumpy()
y = y.asnumpy()
return Tensor(x - y)
return vm_impl
@vm_impl_getters.register(P.Mul)
def vm_impl_mul(self):
"""Generate vm_impl function for Mul."""
def vm_impl(x, y):
x = x.asnumpy()
y = y.asnumpy()
return Tensor(x * y)
return vm_impl
@vm_impl_getters.register(P.Conj)
def vm_impl_conj(self):
"""Generate vm_impl function for Conj."""
def vm_impl(x):
x = x.asnumpy()
t = np.conj(x)
return Tensor(t)
return vm_impl
@vm_impl_getters.register(P.Square)
def vm_impl_square(self):
"""Generate vm_impl function for Square."""
def vm_impl(x):
x = x.asnumpy()
return Tensor(x * x)
return vm_impl
@vm_impl_getters.register(P.Sqrt)
def vm_impl_sqrt(self):
"""Generate vm_impl function for Sqrt."""
def vm_impl(x):
x = x.asnumpy()
res = vm.sqrt(x)
return Tensor(res)
return vm_impl
@vm_impl_getters.register(P.Pow)
def vm_impl_pow(self):
"""Generate vm_impl function for Pow."""
def vm_impl(x, y):
x = x.asnumpy()
y = y.asnumpy()
res = vm.power(x, y)
return Tensor(res)
return vm_impl
@vm_impl_getters.register(P.Exp)
def vm_impl_exp(self):
"""Generate vm_impl function for Exp."""
def vm_impl(x):
x = x.asnumpy()
res = vm.exp(x)
return Tensor(res)
return vm_impl
@vm_impl_getters.register(P.RealDiv)
def vm_impl_real_div(self):
"""Generate vm_impl function for RealDiv."""
def vm_impl(x, y):
x = x.asnumpy()
y = y.asnumpy()
out = x / y
out = np.array(out, x.dtype)
return Tensor(out)
return vm_impl
@vm_impl_getters.register(P.Div)
def vm_impl_div(self):
"""Generate vm_impl function for Div."""
def vm_impl(x, y):
x = x.asnumpy()
y = y.asnumpy()
return Tensor(x / y)
return vm_impl
@vm_impl_getters.register(P.ReduceMean)
def vm_impl_reduce_mean(self):
"""Generate vm_impl function for ReduceMean."""
def vm_impl(x, axis):
x = x.asnumpy()
out = vm.mean(x, axis)
return Tensor(out)
return vm_impl
@vm_impl_getters.register(P.ReduceMax)
def vm_impl_reduce_max(self):
"""Generate vm_impl function for ReduceMean."""
def vm_impl(x, axis):
x = x.asnumpy()
if axis == ():
axis = None
out = np.amax(x, axis)
return Tensor(out)
return vm_impl
@vm_impl_getters.register(P.Equal)
def vm_impl_equal(self):
"""Generate vm_impl function for Equal."""
def vm_impl(x, y):
x = x.asnumpy()
y = y.asnumpy()
out = vm.equal(x, y)
return Tensor(np.array(out))
return vm_impl
@vm_impl_getters.register(P.NotEqual)
def vm_impl_not_equal(self):
"""Generate vm_impl function for NotEqual."""
def vm_impl(x, y):
x = x.asnumpy()
y = y.asnumpy()
out = vm.not_equal(x, y)
return Tensor(np.array(out))
return vm_impl
@vm_impl_getters.register(P.Greater)
def vm_impl_greater(self):
"""Generate vm_impl function for Greater."""
def vm_impl(x, y):
x = x.asnumpy()
y = y.asnumpy()
out = vm.greater(x, y)
return Tensor(np.array(out))
return vm_impl
@vm_impl_getters.register(P.Maximum)
def vm_impl_maximum(self):
"""Generate vm_impl function for Maximum."""
def vm_impl(x, y):
x = x.asnumpy()
y = y.asnumpy()
out = vm.maximum(x, y)
return Tensor(out)
return vm_impl
@vm_impl_getters.register(P.Minimum)
def vm_impl_minimum(self):
"""Generate vm_impl function for Minimum."""
def vm_impl(x, y):
x = x.asnumpy()
y = y.asnumpy()
out = vm.minimum(x, y)
return Tensor(out)
return vm_impl
@vm_impl_getters.register(P.Less)
def vm_impl_less(self):
"""Generate vm_impl function for Less"""
def vm_impl(x, y):
x = x.asnumpy()
y = y.asnumpy()
out = vm.less(x, y)
return Tensor(np.array(out))
return vm_impl
@vm_impl_getters.register(P.ScalarCast)
def vm_impl_scalar_cast(self):
"""Generate vm_impl function for ScalarCast"""
def vm_impl(x, t):
np_type = dtype_to_nptype(t)
value = np_type(x)
cast_value = value.item()
return cast_value
return vm_impl | null |
5,810 | from typing import Collection, Dict, Optional, Tuple, Union, cast
from ..language import DirectiveLocation
from ..pyutils import inspect, merge_kwargs, natural_comparison_key
from ..type import (
GraphQLArgument,
GraphQLDirective,
GraphQLEnumType,
GraphQLEnumValue,
GraphQLField,
GraphQLInputField,
GraphQLInputObjectType,
GraphQLInputType,
GraphQLInterfaceType,
GraphQLList,
GraphQLNamedType,
GraphQLNonNull,
GraphQLObjectType,
GraphQLSchema,
GraphQLUnionType,
is_enum_type,
is_input_object_type,
is_interface_type,
is_introspection_type,
is_list_type,
is_non_null_type,
is_object_type,
is_scalar_type,
is_union_type,
)
__all__ = ["lexicographic_sort_schema"]
def lexicographic_sort_schema(schema: GraphQLSchema) -> GraphQLSchema:
"""Sort GraphQLSchema.
This function returns a sorted copy of the given GraphQLSchema.
"""
def replace_type(
type_: Union[GraphQLList, GraphQLNonNull, GraphQLNamedType]
) -> Union[GraphQLList, GraphQLNonNull, GraphQLNamedType]:
if is_list_type(type_):
return GraphQLList(replace_type(type_.of_type))
if is_non_null_type(type_):
return GraphQLNonNull(replace_type(type_.of_type))
return replace_named_type(cast(GraphQLNamedType, type_))
def replace_named_type(type_: GraphQLNamedType) -> GraphQLNamedType:
return type_map[type_.name]
def replace_maybe_type(
maybe_type: Optional[GraphQLNamedType],
) -> Optional[GraphQLNamedType]:
return maybe_type and replace_named_type(maybe_type)
def sort_directive(directive: GraphQLDirective) -> GraphQLDirective:
return GraphQLDirective(
**merge_kwargs(
directive.to_kwargs(),
locations=sorted(directive.locations, key=sort_by_name_key),
args=sort_args(directive.args),
)
)
def sort_args(args_map: Dict[str, GraphQLArgument]) -> Dict[str, GraphQLArgument]:
args = {}
for name, arg in sorted(args_map.items()):
args[name] = GraphQLArgument(
**merge_kwargs(
arg.to_kwargs(),
type_=replace_type(cast(GraphQLNamedType, arg.type)),
)
)
return args
def sort_fields(fields_map: Dict[str, GraphQLField]) -> Dict[str, GraphQLField]:
fields = {}
for name, field in sorted(fields_map.items()):
fields[name] = GraphQLField(
**merge_kwargs(
field.to_kwargs(),
type_=replace_type(cast(GraphQLNamedType, field.type)),
args=sort_args(field.args),
)
)
return fields
def sort_input_fields(
fields_map: Dict[str, GraphQLInputField]
) -> Dict[str, GraphQLInputField]:
return {
name: GraphQLInputField(
cast(
GraphQLInputType, replace_type(cast(GraphQLNamedType, field.type))
),
description=field.description,
default_value=field.default_value,
ast_node=field.ast_node,
)
for name, field in sorted(fields_map.items())
}
def sort_types(array: Collection[GraphQLNamedType]) -> Tuple[GraphQLNamedType, ...]:
return tuple(
replace_named_type(type_) for type_ in sorted(array, key=sort_by_name_key)
)
def METHOD_NAME(type_: GraphQLNamedType) -> GraphQLNamedType:
if is_scalar_type(type_) or is_introspection_type(type_):
return type_
if is_object_type(type_):
return GraphQLObjectType(
**merge_kwargs(
type_.to_kwargs(),
interfaces=lambda: sort_types(type_.interfaces),
fields=lambda: sort_fields(type_.fields),
)
)
if is_interface_type(type_):
return GraphQLInterfaceType(
**merge_kwargs(
type_.to_kwargs(),
interfaces=lambda: sort_types(type_.interfaces),
fields=lambda: sort_fields(type_.fields),
)
)
if is_union_type(type_):
return GraphQLUnionType(
**merge_kwargs(type_.to_kwargs(), types=lambda: sort_types(type_.types))
)
if is_enum_type(type_):
return GraphQLEnumType(
**merge_kwargs(
type_.to_kwargs(),
values={
name: GraphQLEnumValue(
val.value,
description=val.description,
deprecation_reason=val.deprecation_reason,
ast_node=val.ast_node,
)
for name, val in sorted(type_.values.items())
},
)
)
if is_input_object_type(type_):
return GraphQLInputObjectType(
**merge_kwargs(
type_.to_kwargs(),
fields=lambda: sort_input_fields(type_.fields),
)
)
# Not reachable. All possible types have been considered.
raise TypeError(f"Unexpected type: {inspect(type_)}.")
type_map: Dict[str, GraphQLNamedType] = {
type_.name: METHOD_NAME(type_)
for type_ in sorted(schema.type_map.values(), key=sort_by_name_key)
}
return GraphQLSchema(
types=type_map.values(),
directives=[
sort_directive(directive)
for directive in sorted(schema.directives, key=sort_by_name_key)
],
query=cast(Optional[GraphQLObjectType], replace_maybe_type(schema.query_type)),
mutation=cast(
Optional[GraphQLObjectType], replace_maybe_type(schema.mutation_type)
),
subscription=cast(
Optional[GraphQLObjectType], replace_maybe_type(schema.subscription_type)
),
ast_node=schema.ast_node,
)
def sort_by_name_key(
type_: Union[GraphQLNamedType, GraphQLDirective, DirectiveLocation]
) -> Tuple:
return natural_comparison_key(type_.name) | null |
5,811 | """
pyexcel.book
~~~~~~~~~~~~~~~~~~~
Excel book
:copyright: (c) 2014-2022 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
from pyexcel import _compact as compact
from pyexcel.sheet import Sheet
from pyexcel.internal.meta import BookMeta
from pyexcel.internal.common import SheetIterator
LOCAL_UUID = 0
class Book(BookMeta):
"""
Read an excel book that has one or more sheets
For csv file, there will be just one sheet
"""
def __init__(self, sheets=None, filename="memory", path=None):
"""
Book constructor
Selecting a specific book according to filename extension
:param sheets: a dictionary of data
:param filename: the physical file
:param path: the relative path or absolute path
:param keywords: additional parameters to be passed on
"""
self.__path = None
self.__name_array = []
self.filename = None
self.__sheets = compact.OrderedDict()
self.init(sheets=sheets, filename=filename, path=path)
def init(self, sheets=None, filename="memory", path=None):
"""indpendent function so that it could be called multiple times"""
self.__path = path
self.filename = filename
self.load_from_sheets(sheets)
def load_from_sheets(self, sheets):
"""
Load content from existing sheets
:param dict sheets: a dictionary of sheets. Each sheet is
a list of lists
"""
if sheets is None:
return
keys = sheets.keys()
for name in keys:
value = sheets[name]
if isinstance(value, Sheet):
sheet = value
sheet.name = name
else:
# array
sheet = Sheet(value, name)
# this sheets keep sheet order
self.__sheets.update({name: sheet})
# this provide the convenience of access the sheet
self.__dict__[name.replace(" ", "_")] = sheet
self.__name_array = list(self.__sheets.keys())
def __iter__(self):
return SheetIterator(self)
def __len__(self):
return len(self.__name_array)
def METHOD_NAME(self, key=None, reverse=False):
self.__name_array = sorted(self.__name_array, key=key, reverse=reverse)
def number_of_sheets(self):
"""
Return the number of sheets
"""
return len(self)
def sheet_names(self):
"""
Return all sheet names
"""
return self.__name_array
def sheet_by_name(self, name):
"""
Get the sheet with the specified name
"""
return self.__sheets[name]
def sheet_by_index(self, index):
"""
Get the sheet with the specified index
"""
if index < len(self.__name_array):
sheet_name = self.__name_array[index]
return self.sheet_by_name(sheet_name)
def remove_sheet(self, sheet):
"""
Remove a sheet
"""
if isinstance(sheet, int):
if sheet < len(self.__name_array):
sheet_name = self.__name_array[sheet]
del self.__sheets[sheet_name]
self.__name_array = list(self.__sheets.keys())
else:
raise IndexError
elif isinstance(sheet, str):
if sheet in self.__name_array:
del self.__sheets[sheet]
self.__name_array = list(self.__sheets.keys())
else:
raise KeyError
else:
raise TypeError
def __getitem__(self, key):
"""Override operator[]"""
if isinstance(key, int):
return self.sheet_by_index(key)
return self.sheet_by_name(key)
def __delitem__(self, other):
"""
Override del book[index]
"""
self.remove_sheet(other)
return self
def __add__(self, other):
"""
Override operator +
example::
book3 = book1 + book2
book3 = book1 + book2["Sheet 1"]
"""
content = {}
current_dict = self.to_dict()
for k in current_dict.keys():
new_key = k
if len(current_dict.keys()) == 1:
new_key = "%s_%s" % (self.filename, k)
content[new_key] = current_dict[k]
if isinstance(other, Book):
other_dict = other.to_dict()
for key in other_dict.keys():
new_key = key
if len(other_dict.keys()) == 1:
new_key = other.filename
if new_key in content:
uid = local_uuid()
new_key = "%s_%s" % (key, uid)
content[new_key] = other_dict[key]
elif isinstance(other, Sheet):
new_key = other.name
if new_key in content:
uid = local_uuid()
new_key = "%s_%s" % (other.name, uid)
content[new_key] = other.array
else:
raise TypeError
output = Book()
output.load_from_sheets(content)
return output
def __iadd__(self, other):
"""
Operator overloading +=
example::
book += book2
book += book2["Sheet1"]
"""
if isinstance(other, Book):
names = other.sheet_names()
for name in names:
new_key = name
if len(names) == 1:
new_key = other.filename
if new_key in self.__name_array:
uid = local_uuid()
new_key = "%s_%s" % (name, uid)
self.__sheets[new_key] = Sheet(other[name].array, new_key)
elif isinstance(other, Sheet):
new_key = other.name
if new_key in self.__name_array:
uid = local_uuid()
new_key = "%s_%s" % (other.name, uid)
self.__sheets[new_key] = Sheet(other.array, new_key)
else:
raise TypeError
self.__name_array = list(self.__sheets.keys())
return self
def to_dict(self):
"""Convert the book to a dictionary"""
the_dict = compact.OrderedDict()
for sheet in self:
the_dict.update({sheet.name: sheet.array})
return the_dict
def to_book(bookstream):
"""Convert a bookstream to Book"""
if isinstance(bookstream, Book):
return bookstream
return Book(
bookstream.to_dict(),
filename=bookstream.filename,
path=bookstream.path,
)
def local_uuid():
"""create home made uuid"""
global LOCAL_UUID
LOCAL_UUID = LOCAL_UUID + 1
return LOCAL_UUID | null |
5,812 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from unittest.mock import call
from unittest import mock
from sans.common.enums import OutputMode, RowState
from sans.gui_logic.models.async_workers.sans_run_tab_async import SansRunTabAsync
class SansRunTabAsyncTest(unittest.TestCase):
def setUp(self):
self.notify_progress = mock.MagicMock()
self.notify_done = mock.MagicMock()
self.notify_error = mock.MagicMock()
self._mock_rows = [(mock.Mock(), i) for i in range(3)]
self.async_worker = SansRunTabAsync(self.notify_progress, self.notify_done, self.notify_error)
worker = self.async_worker
worker.set_unit_test_mode(True)
# Mock out various expensive methods
worker._notify_progress_signal = mock.create_autospec(worker._notify_progress_signal)
worker.batch_processor = mock.create_autospec(worker.batch_processor)
def test_that_notify_done_method_set_correctly(self):
self.async_worker.success_cb_slot(mock.NonCallableMock())
self.notify_done.assert_called_once_with()
def METHOD_NAME(self):
get_states_mock = mock.MagicMock()
states = {0: mock.MagicMock()}
errors = {}
get_states_mock.return_value = states, errors
expected_shift_scale_factors = (1.1, 2.2)
self.async_worker.batch_processor.return_value = expected_shift_scale_factors
self.async_worker.process_states_on_thread(
row_index_pairs=self._mock_rows,
get_states_func=get_states_mock,
use_optimizations=False,
output_mode=OutputMode.BOTH,
plot_results=False,
output_graph="",
)
for row, _ in self._mock_rows:
self.assertEqual(RowState.PROCESSED, row.state)
self.assertIsNone(row.tool_tip)
self.assertEqual(self.async_worker.batch_processor.call_count, 3)
expected_emit_calls = [call(i, [], []) for i in range(len(self._mock_rows))]
self.async_worker._notify_progress_signal.signal.emit.assert_has_calls(expected_emit_calls, any_order=True)
def test_that_process_states_emits_row_failed_information(self):
self.async_worker.batch_processor.side_effect = Exception("failure")
get_states_mock = mock.MagicMock()
states = {0: mock.MagicMock()}
errors = {}
get_states_mock.return_value = states, errors
self.async_worker.process_states_on_thread(
row_index_pairs=self._mock_rows,
get_states_func=get_states_mock,
use_optimizations=False,
output_mode=OutputMode.BOTH,
plot_results=False,
output_graph="",
)
for row, _ in self._mock_rows:
self.assertEqual(RowState.ERROR, row.state)
self.assertEqual("failure", row.tool_tip)
def test_that_process_states_emits_row_failed_information_when_get_states_returns_error(self):
get_states_mock = mock.MagicMock()
states = {}
errors = {row[0]: "error message" for row in self._mock_rows}
get_states_mock.return_value = states, errors
self.async_worker.process_states_on_thread(
row_index_pairs=self._mock_rows,
get_states_func=get_states_mock,
use_optimizations=False,
output_mode=OutputMode.BOTH,
plot_results=False,
output_graph="",
)
for row, _ in self._mock_rows:
self.assertEqual(RowState.ERROR, row.state)
self.assertEqual("error message", row.tool_tip)
def test_that_process_states_emits_row_failed_information_when_get_states_throws(self):
get_states_mock = mock.MagicMock()
get_states_mock.side_effect = Exception("failure")
self.async_worker.process_states_on_thread(
row_index_pairs=self._mock_rows,
get_states_func=get_states_mock,
use_optimizations=False,
output_mode=OutputMode.BOTH,
plot_results=False,
output_graph="",
)
for row, _ in self._mock_rows:
self.assertEqual(RowState.ERROR, row.state)
self.assertEqual("failure", row.tool_tip)
@mock.patch("sans.gui_logic.models.async_workers.sans_run_tab_async.load_workspaces_from_states")
def test_that_load_workspaces_sets_row_to_processed(self, mocked_loader):
states = {0: mock.MagicMock()}
errors = {}
get_states_mock = mock.MagicMock()
get_states_mock.return_value = states, errors
self.async_worker.load_workspaces_on_thread(row_index_pairs=self._mock_rows, get_states_func=get_states_mock)
self.assertEqual(len(self._mock_rows), mocked_loader.call_count)
for row, _ in self._mock_rows:
self.assertEqual(RowState.PROCESSED, row.state)
self.assertIsNone(row.tool_tip)
@mock.patch("sans.gui_logic.models.async_workers.sans_run_tab_async.load_workspaces_from_states")
def test_that_load_workspaces_sets_rows_to_error(self, mocked_loader):
mocked_loader.side_effect = Exception("failure")
states = {0: mock.MagicMock()}
errors = {}
get_states_mock = mock.MagicMock()
get_states_mock.return_value = states, errors
self.async_worker.load_workspaces_on_thread(row_index_pairs=self._mock_rows, get_states_func=get_states_mock)
self.assertEqual(len(self._mock_rows), mocked_loader.call_count)
for row, _ in self._mock_rows:
self.assertEqual(RowState.ERROR, row.state)
self.assertEqual("failure", row.tool_tip)
def test_success_cb_triggers_notify_done(self):
self.async_worker.success_cb_slot(mock.NonCallableMock())
self.notify_done.assert_called_once()
def test_error_cb_triggers_with_stack_trace(self):
expected = mock.NonCallableMagicMock()
self.async_worker.error_cb_slot(expected)
self.notify_error.assert_called_once_with(str(expected))
if __name__ == "__main__":
unittest.main() | null |
5,813 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops import operations as P
class Net(nn.Cell):
def __init__(self, reduction):
super(Net, self).__init__()
self.loss = P.NLLLoss(reduction=reduction)
def construct(self, predict, target, weight):
return self.loss(predict, target, weight)
class NLLLossGradNet(nn.Cell):
def __init__(self, reduction):
super(NLLLossGradNet, self).__init__()
self.grad = G.NLLLossGrad(reduction=reduction)
def construct(self, x, dout_x, target, weight, total_weight):
gout = self.grad(x, dout_x, target, weight, total_weight)
return gout
def nll_loss_template(nptype_input, nptype_weight, reduction):
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
nll_loss_net = Net(reduction)
predict = Tensor(
np.array([[0.53, 0.74, -2.12], [1.29, -0.34, -1.13]]).astype(nptype_input))
target = Tensor(np.array([0, 1]).astype(np.int32))
weight = Tensor(np.array([0.45, -0.32, 1.21]).astype(nptype_weight))
loss, total_weight = nll_loss_net(predict, target, weight)
loss_np = loss.asnumpy()
total_weight_np = total_weight.asnumpy()
expected_tot_weight = np.array(0.129999995)
if reduction == 'none':
expected_loss = np.array([-0.238499984, -0.108800001])
elif reduction == 'mean':
expected_loss = np.array(-2.67153859)
elif reduction == 'sum':
expected_loss = np.array(-0.347299993)
if nptype_input == np.float32 and nptype_weight == np.float32:
ertol_loss = 1e-06
elif nptype_input == np.float16 or nptype_weight == np.float16:
ertol_loss = 1e-03
if nptype_weight == np.float32:
ertol_weight = 1e-06
elif nptype_weight == np.float16:
ertol_weight = 1e-03
np.testing.assert_allclose(loss_np, expected_loss, ertol_loss)
np.testing.assert_allclose(
total_weight_np, expected_tot_weight, ertol_weight)
def nll_loss_grad_template(nptype_input, nptype_weight, reduction):
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
nll_loss_grad_net = NLLLossGradNet(reduction)
x = Tensor(
np.array([[0.53, 0.74, -2.12], [1.29, -0.34, -1.13]]).astype(nptype_input))
if reduction == "none":
dloss = Tensor(
np.array([3.24, -2.13]).astype(nptype_input))
else:
dloss = Tensor(np.array(1.23).astype(nptype_input))
target = Tensor(np.array([0, 1]).astype(np.int32))
weight = Tensor(np.array([0.45, -0.32, 1.21]).astype(nptype_weight))
total_weight = Tensor(np.array(0.13).astype(nptype_weight))
dx = nll_loss_grad_net(x, dloss, target, weight, total_weight)
dx_np = dx.asnumpy()
print(dx)
if reduction == "none":
dx_expected = np.array([[-1.45799994, 0, 0], [0, -0.681600034, 0]])
elif reduction == "mean":
dx_expected = np.array([[-4.25769234, 0, 0], [0, 3.02769232, 0]])
else:
dx_expected = np.array([[-0.553499997, 0, 0], [0, 0.393599987, 0]])
if nptype_input == np.float32 and nptype_weight == np.float32:
ertol_loss = 1e-06
else:
ertol_loss = 1e-02
np.testing.assert_allclose(dx_np, dx_expected, ertol_loss)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nll_loss_no_reduction():
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_template(np.float32, np.float32, "none")
nll_loss_template(np.float32, np.float16, "none")
nll_loss_template(np.float16, np.float32, "none")
nll_loss_template(np.float16, np.float16, "none")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nll_loss_mean_reduction():
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_template(np.float32, np.float32, "mean")
nll_loss_template(np.float32, np.float16, "mean")
nll_loss_template(np.float16, np.float32, "mean")
nll_loss_template(np.float16, np.float16, "mean")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def METHOD_NAME():
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_template(np.float32, np.float32, "sum")
nll_loss_template(np.float32, np.float16, "sum")
nll_loss_template(np.float16, np.float32, "sum")
nll_loss_template(np.float16, np.float16, "sum")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nll_loss_grad_mean_reduction():
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_grad_template(np.float32, np.float32, "mean")
nll_loss_grad_template(np.float32, np.float16, "mean")
nll_loss_grad_template(np.float16, np.float32, "mean")
nll_loss_grad_template(np.float16, np.float16, "mean")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nll_loss_grad_sum_reduction():
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_grad_template(np.float32, np.float32, "sum")
nll_loss_grad_template(np.float32, np.float16, "sum")
nll_loss_grad_template(np.float16, np.float32, "sum")
nll_loss_grad_template(np.float16, np.float16, "sum")
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_nll_loss_grad_no_reduction():
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_grad_template(np.float32, np.float32, "none")
nll_loss_grad_template(np.float32, np.float16, "none")
nll_loss_grad_template(np.float16, np.float32, "none")
nll_loss_grad_template(np.float16, np.float16, "none") | null |
5,814 | import functools
from typing import List
from sympy.core.basic import Basic
from sympy.core.containers import Tuple
from sympy.core.singleton import S
from sympy.core.sympify import _sympify
from sympy.tensor.array.mutable_ndim_array import MutableNDimArray
from sympy.tensor.array.ndim_array import NDimArray, ImmutableNDimArray, ArrayKind
from sympy.utilities.iterables import flatten
class DenseNDimArray(NDimArray):
_array: List[Basic]
def __new__(self, *args, **kwargs):
return ImmutableDenseNDimArray(*args, **kwargs)
@property
def kind(self) -> ArrayKind:
return ArrayKind._union(self._array)
def __getitem__(self, index):
"""
Allows to get items from N-dim array.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray([0, 1, 2, 3], (2, 2))
>>> a
[[0, 1], [2, 3]]
>>> a[0, 0]
0
>>> a[1, 1]
3
>>> a[0]
[0, 1]
>>> a[1]
[2, 3]
Symbolic index:
>>> from sympy.abc import i, j
>>> a[i, j]
[[0, 1], [2, 3]][i, j]
Replace `i` and `j` to get element `(1, 1)`:
>>> a[i, j].subs({i: 1, j: 1})
3
"""
syindex = self._check_symbolic_index(index)
if syindex is not None:
return syindex
index = self._check_index_for_getitem(index)
if isinstance(index, tuple) and any(isinstance(i, slice) for i in index):
sl_factors, eindices = self._get_slice_data_for_array_access(index)
array = [self._array[self._parse_index(i)] for i in eindices]
nshape = [len(el) for i, el in enumerate(sl_factors) if isinstance(index[i], slice)]
return type(self)(array, nshape)
else:
index = self._parse_index(index)
return self._array[index]
@classmethod
def zeros(cls, *shape):
list_length = functools.reduce(lambda x, y: x*y, shape, S.One)
return cls._new(([0]*list_length,), shape)
def tomatrix(self):
"""
Converts MutableDenseNDimArray to Matrix. Can convert only 2-dim array, else will raise error.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray([1 for i in range(9)], (3, 3))
>>> b = a.tomatrix()
>>> b
Matrix([
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]])
"""
from sympy.matrices import Matrix
if self.rank() != 2:
raise ValueError('Dimensions must be of size of 2')
return Matrix(self.shape[0], self.shape[1], self._array)
def reshape(self, *newshape):
"""
Returns MutableDenseNDimArray instance with new shape. Elements number
must be suitable to new shape. The only argument of method sets
new shape.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray([1, 2, 3, 4, 5, 6], (2, 3))
>>> a.shape
(2, 3)
>>> a
[[1, 2, 3], [4, 5, 6]]
>>> b = a.reshape(3, 2)
>>> b.shape
(3, 2)
>>> b
[[1, 2], [3, 4], [5, 6]]
"""
new_total_size = functools.reduce(lambda x,y: x*y, newshape)
if new_total_size != self._loop_size:
raise ValueError('Expecting reshape size to %d but got prod(%s) = %d' % (
self._loop_size, str(newshape), new_total_size))
# there is no `.func` as this class does not subtype `Basic`:
return type(self)(self._array, newshape)
class ImmutableDenseNDimArray(DenseNDimArray, ImmutableNDimArray): # type: ignore
def __new__(cls, iterable, shape=None, **kwargs):
return cls._new(iterable, shape, **kwargs)
@classmethod
def _new(cls, iterable, shape, **kwargs):
shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs)
shape = Tuple(*map(_sympify, shape))
cls._check_special_bounds(flat_list, shape)
flat_list = flatten(flat_list)
flat_list = Tuple(*flat_list)
self = Basic.__new__(cls, flat_list, shape, **kwargs)
self._shape = shape
self._array = list(flat_list)
self._rank = len(shape)
self._loop_size = functools.reduce(lambda x,y: x*y, shape, 1)
return self
def __setitem__(self, index, value):
raise TypeError('immutable N-dim array')
def METHOD_NAME(self):
return MutableDenseNDimArray(self)
def _eval_simplify(self, **kwargs):
from sympy.simplify.simplify import simplify
return self.applyfunc(simplify)
class MutableDenseNDimArray(DenseNDimArray, MutableNDimArray):
def __new__(cls, iterable=None, shape=None, **kwargs):
return cls._new(iterable, shape, **kwargs)
@classmethod
def _new(cls, iterable, shape, **kwargs):
shape, flat_list = cls._handle_ndarray_creation_inputs(iterable, shape, **kwargs)
flat_list = flatten(flat_list)
self = object.__new__(cls)
self._shape = shape
self._array = list(flat_list)
self._rank = len(shape)
self._loop_size = functools.reduce(lambda x,y: x*y, shape) if shape else len(flat_list)
return self
def __setitem__(self, index, value):
"""Allows to set items to MutableDenseNDimArray.
Examples
========
>>> from sympy import MutableDenseNDimArray
>>> a = MutableDenseNDimArray.zeros(2, 2)
>>> a[0,0] = 1
>>> a[1,1] = 1
>>> a
[[1, 0], [0, 1]]
"""
if isinstance(index, tuple) and any(isinstance(i, slice) for i in index):
value, eindices, slice_offsets = self._get_slice_data_for_array_assignment(index, value)
for i in eindices:
other_i = [ind - j for ind, j in zip(i, slice_offsets) if j is not None]
self._array[self._parse_index(i)] = value[other_i]
else:
index = self._parse_index(index)
self._setter_iterable_check(value)
value = _sympify(value)
self._array[index] = value
def as_immutable(self):
return ImmutableDenseNDimArray(self)
@property
def free_symbols(self):
return {i for j in self._array for i in j.free_symbols} | null |
5,815 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test data sink"""
import numpy as np
import pytest
import mindspore as ms
import mindspore.nn as nn
from mindspore import ops as P
import mindspore.dataset as ds
from mindspore import Tensor, context
from mindspore.train.data_sink import data_sink
def fixed_dataset_generator():
for _ in range(1, 10):
yield (
np.ones((3, 2048, 7, 7), dtype=np.float32),
np.ones((3, 1000), dtype=np.float32))
def dynamic_dataset_generator_cell():
for i in range(1, 10):
yield (
np.ones((i, 2048, 7, 7), dtype=np.float32),
np.ones((i, 1000), dtype=np.float32))
def dynamic_dataset_generator_func():
for i in range(1, 10):
yield (
np.ones((i), dtype=np.float32),
np.ones((i), dtype=np.float32))
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.dense = nn.Dense()
self.relu = nn.ReLU()
def construct(self, x):
x = self.dense(x)
x = self.relu(x)
return x
class ReluReduceMeanDenseRelu(nn.Cell):
def __init__(self, kernel, bias, in_channel, num_class):
super().__init__()
self.relu = P.ReLU()
self.mean = P.ReduceMean(keep_dims=False)
self.dense = nn.Dense(in_channel, num_class, kernel, bias)
def construct(self, x_):
x_ = self.relu(x_)
x_ = self.mean(x_, (2, 3))
x_ = self.dense(x_)
x_ = self.relu(x_)
return x_
def METHOD_NAME(model, dataset, loss_fn, opt, input_signature=None):
def forward_fn(data, label):
logits = model(data)
loss = loss_fn(logits, label)
return loss, logits
grad_fn = P.value_and_grad(forward_fn, None, opt.parameters, has_aux=True)
model.set_train()
def train_step(data, label):
(loss, _), grads = grad_fn(data, label)
loss = P.depend(loss, opt(grads))
return loss
data_size = dataset.get_dataset_size()
epochs = 5
steps = data_size * epochs
sink_size = data_size
jit = ms.JitConfig()
sink_process = data_sink(train_step, dataset, sink_size=sink_size, jit_config=jit, input_signature=input_signature)
for _ in range(steps):
loss = sink_process()
print("loss: ", loss)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_data_sink_fixed_shape(mode):
"""
Feature: mindspore.train.data_sink
Description: test data_sink with fixed-shape dataset.
Expectation: Success.
"""
context.set_context(mode=mode)
weight = Tensor(np.ones((1000, 2048)).astype(np.float32))
bias = Tensor(np.ones((1000,)).astype(np.float32))
network = ReluReduceMeanDenseRelu(weight, bias, 2048, 1000)
dataset = ds.GeneratorDataset(
fixed_dataset_generator, ["data", "label"])
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
METHOD_NAME(network, dataset, loss_fn, opt)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@pytest.mark.skip(reason='Have ops issue, not support yet')
def test_data_sink_dynamic_shape(mode):
"""
Feature: mindspore.train.data_sink
Description: test data_sink with dynamic shape dataset.
Expectation: Success.
"""
context.set_context(mode=mode)
weight = Tensor(np.ones((1000, 2048)).astype(np.float32))
bias = Tensor(np.ones((1000,)).astype(np.float32))
network = ReluReduceMeanDenseRelu(weight, bias, 2048, 1000)
dataset = ds.GeneratorDataset(dynamic_dataset_generator_cell, ["data", "label"])
loss_fn = nn.SoftmaxCrossEntropyWithLogits(sparse=False)
opt = nn.Momentum(network.trainable_params(), 0.01, 0.9)
input_signature = (Tensor(shape=[None, 2048, 7, 7], dtype=ms.float32),
Tensor(shape=[None, 1000], dtype=ms.float32))
METHOD_NAME(network, dataset, loss_fn, opt, input_signature)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_function_data_sink_dynamic_shape(mode):
"""
Feature: mindspore.train.data_sink
Description: test data_sink with dynamic shape dataset.
Expectation: Success.
"""
context.set_context(mode=mode)
dataset = ds.GeneratorDataset(dynamic_dataset_generator_func, ["data", "label"])
def func_net(x, y):
out = x + y
return out
data_size = dataset.get_dataset_size()
epochs = 5
steps = data_size * epochs
sink_size = data_size
jit = ms.JitConfig()
input_signature = (Tensor(shape=[None,], dtype=ms.float32), Tensor(shape=[None,], dtype=ms.float32))
sink_process = data_sink(func_net, dataset, sink_size=sink_size, jit_config=jit, input_signature=input_signature)
for _ in range(steps):
out = sink_process()
print("out: ", out) | null |
5,816 | #!/usr/bin/env python3
# Script for erasing all data about a user from the database.
# Intended for GDPR erasure requests.
#
# NOTE: We recommend implementing a "GDPR Erasure Ban" on the user's last IP/HWID before erasing their data, to prevent abuse.
# This is acceptable under the GDPR as a "legitimate interest" to prevent GDPR erasure being used to avoid moderation/bans.
# You would need to do this *before* running this script, to avoid losing the IP/HWID of the user entirely.
import argparse
import os
import psycopg2
from uuid import UUID
LATEST_DB_MIGRATION = "20230725193102_AdminNotesImprovementsForeignKeys"
def main():
parser = argparse.ArgumentParser()
# Yes we need both to reliably pseudonymize the admin_log table.
parser.add_argument("user_id", help="User ID to erase data for")
parser.add_argument("user_name", help="User name to erase data for")
parser.add_argument("--ignore-schema-mismatch", action="store_true")
parser.add_argument("--connection-string", required=True, help="Database connection string to use. See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING")
args = parser.parse_args()
conn = psycopg2.connect(args.connection_string)
cur = conn.cursor()
check_schema_version(cur, args.ignore_schema_mismatch)
user_id = args.user_id
user_name = args.user_name
clear_admin(cur, user_id)
pseudonymize_admin_log(cur, user_name, user_id)
clear_assigned_user_id(cur, user_id)
METHOD_NAME(cur, user_id)
clear_play_time(cur, user_id)
clear_player(cur, user_id)
clear_preference(cur, user_id)
clear_server_ban(cur, user_id)
clear_server_ban_exemption(cur, user_id)
clear_server_role_ban(cur, user_id)
clear_uploaded_resource_log(cur, user_id)
clear_whitelist(cur, user_id)
print("Committing...")
conn.commit()
def check_schema_version(cur: "psycopg2.cursor", ignore_mismatch: bool):
cur.execute('SELECT "MigrationId" FROM "__EFMigrationsHistory" ORDER BY "__EFMigrationsHistory" DESC LIMIT 1')
schema_version = cur.fetchone()
if schema_version == None:
print("Unable to read database schema version.")
exit(1)
if schema_version[0] != LATEST_DB_MIGRATION:
print(f"Unsupport schema version of DB: '{schema_version[0]}'. Supported: {LATEST_DB_MIGRATION}")
if ignore_mismatch:
return
exit(1)
def clear_admin(cur: "psycopg2.cursor", user_id: str):
print("Clearing admin...")
cur.execute("""
DELETE FROM
admin
WHERE
user_id = %s
""", (user_id,))
def pseudonymize_admin_log(cur: "psycopg2.cursor", user_name: str, user_id: str):
print("Pseudonymizing admin_log...")
cur.execute("""
UPDATE
admin_log l
SET
message = replace(message, %s, %s)
FROM
admin_log_player lp
WHERE
lp.round_id = l.round_id AND lp.log_id = l.admin_log_id AND player_user_id = %s;
""", (user_name, user_id, user_id,))
def clear_assigned_user_id(cur: "psycopg2.cursor", user_id: str):
print("Clearing assigned_user_id...")
cur.execute("""
DELETE FROM
assigned_user_id
WHERE
user_id = %s
""", (user_id,))
def METHOD_NAME(cur: "psycopg2.cursor", user_id: str):
print("Clearing connection_log...")
cur.execute("""
DELETE FROM
connection_log
WHERE
user_id = %s
""", (user_id,))
def clear_play_time(cur: "psycopg2.cursor", user_id: str):
print("Clearing play_time...")
cur.execute("""
DELETE FROM
play_time
WHERE
player_id = %s
""", (user_id,))
def clear_player(cur: "psycopg2.cursor", user_id: str):
print("Clearing player...")
cur.execute("""
DELETE FROM
player
WHERE
user_id = %s
""", (user_id,))
def clear_preference(cur: "psycopg2.cursor", user_id: str):
print("Clearing preference...")
cur.execute("""
DELETE FROM
preference
WHERE
user_id = %s
""", (user_id,))
def clear_server_ban(cur: "psycopg2.cursor", user_id: str):
print("Clearing server_ban...")
cur.execute("""
DELETE FROM
server_ban
WHERE
player_user_id = %s
""", (user_id,))
def clear_server_ban_exemption(cur: "psycopg2.cursor", user_id: str):
print("Clearing server_ban_exemption...")
cur.execute("""
DELETE FROM
server_ban_exemption
WHERE
user_id = %s
""", (user_id,))
def clear_server_role_ban(cur: "psycopg2.cursor", user_id: str):
print("Clearing server_role_ban...")
cur.execute("""
DELETE FROM
server_role_ban
WHERE
player_user_id = %s
""", (user_id,))
def clear_uploaded_resource_log(cur: "psycopg2.cursor", user_id: str):
print("Clearing uploaded_resource_log...")
cur.execute("""
DELETE FROM
uploaded_resource_log
WHERE
user_id = %s
""", (user_id,))
def clear_whitelist(cur: "psycopg2.cursor", user_id: str):
print("Clearing whitelist...")
cur.execute("""
DELETE FROM
whitelist
WHERE
user_id = %s
""", (user_id,))
main()
# "I'm surprised you managed to write this entire Python file without spamming the word 'sus' everywhere." - Remie
| null |
5,817 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Environment for component execution."""
from collections.abc import MutableSequence, Sequence
import contextlib
import inspect
from typing import Any, List, Type, TypeVar, get_args, get_origin, Optional, Union
from tfx.orchestration.portable import data_types
from tfx.proto.orchestration import execution_result_pb2
from tfx.types import artifact as artifact_lib
from tfx.types import artifact_utils
from tfx.utils import typing_utils
from ml_metadata.proto import metadata_store_pb2
_TAny = TypeVar('_TAny')
_TArtifact = TypeVar('_TArtifact', bound=artifact_lib.Artifact)
class Environ(contextlib.ExitStack):
"""Tflex component execution environment."""
def __init__(
self,
*,
execution_info: data_types.ExecutionInfo,
executor_output: Optional[execution_result_pb2.ExecutorOutput] = None,
):
super().__init__()
self._execution_info = execution_info
self._executor_output = executor_output
def _get_updated_output_artifacts(
self, key: str
) -> Optional[List[metadata_store_pb2.Artifact]]:
if (
self._executor_output is None
or key not in self._executor_output.output_artifacts
):
return None
return list(self._executor_output.output_artifacts[key].artifacts)
def _get_updated_exec_properties(self, key) -> Optional[Any]:
if self._executor_output is None:
return None
return self._executor_output.execution_properties.get(key, default=None)
def strict_get(self, name: str, type_hint: Type[_TAny]) -> _TAny:
"""Get environment value with name and type hint."""
def assert_type_hint(expected):
if type_hint != expected:
raise TypeError(f'Expected {type_hint} for {name} but got {expected}.')
def try_deserialize_artifact(
artifact: Union[metadata_store_pb2.Artifact, _TArtifact],
artifact_type: Type[_TArtifact],
) -> _TArtifact:
if isinstance(artifact, metadata_store_pb2.Artifact):
return artifact_utils.deserialize_artifact(
artifact_type.artifact_type,
artifact,
)
return artifact
def METHOD_NAME(
artifact_list: Sequence[
Union[artifact_lib.Artifact, metadata_store_pb2.Artifact]
],
*,
is_output: bool,
):
debug_target = (
f'output_dict[{name}]' if is_output else f'input_dict[{name}]'
)
if inspect.isclass(type_hint):
if issubclass(type_hint, artifact_lib.Artifact):
if len(artifact_list) != 1:
raise TypeError(
f'Expected 1 artifact for {debug_target} but got'
f' {len(artifact_list)}.'
)
result = artifact_list[0]
if isinstance(result, metadata_store_pb2.Artifact):
result = artifact_utils.deserialize_artifact(
type_hint.artifact_type, result
)
if not isinstance(result, type_hint):
raise TypeError(
f'Expected {type_hint} for {debug_target} but got'
f' {result.__class__.__name__}.'
)
return result
else:
raise TypeError(
f'Expected {type_hint} for {debug_target} but got'
f' {type_hint.__name__}.'
)
# TODO(jjong): Add PreOutputArtifact and AsyncOutputArtifact support.
if origin := get_origin(type_hint):
if origin in (list, Sequence, MutableSequence):
if args := get_args(type_hint):
artifact_type = args[0]
if inspect.isclass(artifact_type) and issubclass(
artifact_type, artifact_lib.Artifact
):
artifact_list = [
try_deserialize_artifact(a, artifact_type)
for a in artifact_list
]
if any(not isinstance(a, artifact_type) for a in artifact_list):
raise TypeError(
f'Expected {type_hint} for {debug_target} but got'
f' {artifact_list}'
)
return artifact_list
raise TypeError(
f'Invalid type hint {type_hint} for {debug_target}. Must be one of'
' `YourArtifactType`, `list[YourArtifactType]`,'
)
if name in self._execution_info.input_dict:
return METHOD_NAME(
self._execution_info.input_dict[name], is_output=False
)
if artifact_list := (
self._get_updated_output_artifacts(name)
or self._execution_info.output_dict.get(name)
):
return METHOD_NAME(
artifact_list, is_output=True
)
if result := (
self._get_updated_exec_properties(name)
or self._execution_info.exec_properties.get(name)
):
if not typing_utils.is_compatible(result, type_hint):
raise TypeError(
f'Expected {type_hint} for exec_properties[{name}] but got'
f' {result}.'
)
return result
if name == 'execution_id':
assert_type_hint(int)
return self._execution_info.execution_id
if name == 'stateful_working_dir':
assert_type_hint(str)
return self._execution_info.stateful_working_dir
if name == 'tmp_dir':
assert_type_hint(str)
return self._execution_info.tmp_dir
if name == 'pipeline_id':
assert_type_hint(str)
if self._execution_info.pipeline_info is None:
raise RuntimeError('There is no pipeline_info to get pipeline_id')
return self._execution_info.pipeline_info.id
if name == 'pipeline_run_id':
assert_type_hint(str)
return self._execution_info.pipeline_run_id
valid_names: set[str] = {
*self._execution_info.input_dict,
*self._execution_info.output_dict,
*self._execution_info.exec_properties,
'execution_id',
'stateful_working_dir',
'tmp_dir',
'pipeline_id',
'pipeline_run_id',
}
if self._executor_output is not None:
valid_names.update({
*self._executor_output.output_artifacts,
*self._executor_output.execution_properties,
})
raise AttributeError(
f'Unknown attribute {name}. Valid names: {valid_names}'
) | null |
5,818 | from rest_framework import status
from rest_framework.reverse import reverse
from .utils import make_assignment, make_doc
from api.tests.utils import CRUDMixin
from examples.models import Assignment
from projects.models import Member
from projects.tests.utils import prepare_project
from users.tests.utils import make_user
class TestAssignmentList(CRUDMixin):
def setUp(self):
self.project = prepare_project()
self.non_member = make_user()
self.example = make_doc(self.project.item)
make_assignment(self.project.item, self.example, self.project.admin)
self.data = {"example": self.example.id, "assignee": self.project.staffs[0].id}
self.url = reverse(viewname="assignment_list", args=[self.project.item.id])
def test_allow_project_member_to_list_assignments(self):
for member in self.project.members:
self.assert_fetch(member, status.HTTP_200_OK)
def test_denies_non_project_member_to_list_assignments(self):
self.assert_fetch(self.non_member, status.HTTP_403_FORBIDDEN)
def test_denies_unauthenticated_user_to_list_assignments(self):
self.assert_fetch(expected=status.HTTP_403_FORBIDDEN)
def test_allows_project_admin_to_assign(self):
response = self.assert_create(self.project.admin, status.HTTP_201_CREATED)
self.assertEqual(response.data["example"], self.data["example"])
self.assertEqual(response.data["assignee"], self.data["assignee"])
def test_denies_non_admin_to_assign(self):
for member in self.project.staffs:
self.assert_create(member, status.HTTP_403_FORBIDDEN)
def test_denies_non_project_member_to_assign(self):
self.assert_create(self.non_member, status.HTTP_403_FORBIDDEN)
def test_denies_unauthenticated_user_to_assign(self):
self.assert_create(expected=status.HTTP_403_FORBIDDEN)
class TestAssignmentDetail(CRUDMixin):
def setUp(self):
self.project = prepare_project()
self.non_member = make_user()
example = make_doc(self.project.item)
assignment = make_assignment(self.project.item, example, self.project.admin)
self.data = {"assignee": self.project.staffs[0].id}
self.url = reverse(viewname="assignment_detail", args=[self.project.item.id, assignment.id])
def test_allows_project_member_to_get_assignment(self):
for member in self.project.members:
self.assert_fetch(member, status.HTTP_200_OK)
def test_denies_non_project_member_to_get_assignment(self):
self.assert_fetch(self.non_member, status.HTTP_403_FORBIDDEN)
def METHOD_NAME(self):
self.assert_fetch(expected=status.HTTP_403_FORBIDDEN)
def test_allows_project_admin_to_reassign(self):
response = self.assert_update(self.project.admin, status.HTTP_200_OK)
self.assertEqual(response.data["assignee"], self.data["assignee"])
def test_denies_non_admin_to_reassign(self):
for member in self.project.staffs:
self.assert_update(member, status.HTTP_403_FORBIDDEN)
def test_denies_non_project_member_to_reassign(self):
self.assert_update(self.non_member, status.HTTP_403_FORBIDDEN)
def test_denies_unauthenticated_user_to_reassign(self):
self.assert_update(expected=status.HTTP_403_FORBIDDEN)
def test_allows_project_admin_to_unassign(self):
self.assert_delete(self.project.admin, status.HTTP_204_NO_CONTENT)
def test_denies_non_admin_to_unassign(self):
for member in self.project.staffs:
self.assert_delete(member, status.HTTP_403_FORBIDDEN)
def test_denies_non_project_member_to_unassign(self):
self.assert_delete(self.non_member, status.HTTP_403_FORBIDDEN)
def test_denies_unauthenticated_user_to_unassign(self):
self.assert_delete(expected=status.HTTP_403_FORBIDDEN)
class TestAssignmentBulk(CRUDMixin):
def setUp(self):
self.project = prepare_project()
self.non_member = make_user()
self.example = make_doc(self.project.item)
members = Member.objects.filter(project=self.project.item)
workloads = [{"member_id": member.id, "weight": 100} for member in members]
self.data = {"strategy_name": "sampling_without_replacement", "workloads": workloads}
self.url = reverse(viewname="bulk_assignment", args=[self.project.item.id])
def test_denies_non_admin_to_bulk_assign(self):
for member in self.project.staffs:
self.assert_create(member, status.HTTP_403_FORBIDDEN)
def test_denies_non_project_member_to_bulk_assign(self):
self.assert_create(self.non_member, status.HTTP_403_FORBIDDEN)
def test_denies_unauthenticated_user_to_bulk_assign(self):
self.assert_create(expected=status.HTTP_403_FORBIDDEN)
def test_allows_project_admin_to_bulk_assign(self):
self.assert_create(self.project.admin, status.HTTP_201_CREATED)
expected = self.project.item.examples.count() * len(self.project.members)
self.assertEqual(Assignment.objects.count(), expected) | null |
5,819 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetDatabaseResult',
'AwaitableGetDatabaseResult',
'get_database',
'get_database_output',
]
@pulumi.output_type
class GetDatabaseResult:
"""
A collection of values returned by getDatabase.
"""
def __init__(__self__, cluster_name=None, hot_cache_period=None, METHOD_NAME=None, location=None, name=None, resource_group_name=None, size=None, soft_delete_period=None):
if cluster_name and not isinstance(cluster_name, str):
raise TypeError("Expected argument 'cluster_name' to be a str")
pulumi.set(__self__, "cluster_name", cluster_name)
if hot_cache_period and not isinstance(hot_cache_period, str):
raise TypeError("Expected argument 'hot_cache_period' to be a str")
pulumi.set(__self__, "hot_cache_period", hot_cache_period)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if size and not isinstance(size, float):
raise TypeError("Expected argument 'size' to be a float")
pulumi.set(__self__, "size", size)
if soft_delete_period and not isinstance(soft_delete_period, str):
raise TypeError("Expected argument 'soft_delete_period' to be a str")
pulumi.set(__self__, "soft_delete_period", soft_delete_period)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> str:
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="hotCachePeriod")
def hot_cache_period(self) -> str:
"""
The time the data that should be kept in cache for fast queries as ISO 8601 timespan.
"""
return pulumi.get(self, "hot_cache_period")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The Azure Region in which the managed Kusto Database exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def size(self) -> float:
"""
The size of the database in bytes.
"""
return pulumi.get(self, "size")
@property
@pulumi.getter(name="softDeletePeriod")
def soft_delete_period(self) -> str:
"""
The time the data should be kept before it stops being accessible to queries as ISO 8601 timespan.
"""
return pulumi.get(self, "soft_delete_period")
class AwaitableGetDatabaseResult(GetDatabaseResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDatabaseResult(
cluster_name=self.cluster_name,
hot_cache_period=self.hot_cache_period,
METHOD_NAME=self.METHOD_NAME,
location=self.location,
name=self.name,
resource_group_name=self.resource_group_name,
size=self.size,
soft_delete_period=self.soft_delete_period)
def get_database(cluster_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDatabaseResult:
"""
Use this data source to access information about an existing Kusto Database
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.kusto.get_database(cluster_name="test_cluster",
name="my-kusto-database",
resource_group_name="test_resource_group")
```
:param str cluster_name: The name of the Kusto Cluster this database is added to.
:param str name: The name of the Kusto Database.
:param str resource_group_name: The Resource Group where the Kusto Database exists.
"""
__args__ = dict()
__args__['clusterName'] = cluster_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:kusto/getDatabase:getDatabase', __args__, opts=opts, typ=GetDatabaseResult).value
return AwaitableGetDatabaseResult(
cluster_name=pulumi.get(__ret__, 'cluster_name'),
hot_cache_period=pulumi.get(__ret__, 'hot_cache_period'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
resource_group_name=pulumi.get(__ret__, 'resource_group_name'),
size=pulumi.get(__ret__, 'size'),
soft_delete_period=pulumi.get(__ret__, 'soft_delete_period'))
@_utilities.lift_output_func(get_database)
def get_database_output(cluster_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDatabaseResult]:
"""
Use this data source to access information about an existing Kusto Database
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.kusto.get_database(cluster_name="test_cluster",
name="my-kusto-database",
resource_group_name="test_resource_group")
```
:param str cluster_name: The name of the Kusto Cluster this database is added to.
:param str name: The name of the Kusto Database.
:param str resource_group_name: The Resource Group where the Kusto Database exists.
"""
... | null |
5,820 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor, context
class Net(nn.Cell):
def construct(self, x, diagonal=0):
return x.tril(diagonal)
class TrilNet(nn.Cell):
def __init__(self):
super(TrilNet, self).__init__()
self.tril = nn.Tril()
def construct(self, value, k):
return self.tril(value, k)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_tril(mode):
"""
Feature: tril
Description: Verify the result of tril
Expectation: success
"""
ms.set_context(mode=mode)
x = Tensor([[-1.8297, -0.8474, 1.0292], [-1.2167, 0.5574, -0.6753], [-0.6702, 0.2276, 1.2421]])
net = Net()
output = net(x)
expect_output = np.array([[-1.8297, 0., 0.], [-1.2167, 0.5574, 0.], [-0.6702, 0.2276, 1.2421]], dtype=np.float32)
assert np.allclose(output.asnumpy(), expect_output)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE,])
def test_tril_0(mode):
"""
Feature: test_tril
Description: Verify the result of test_tril
Expectation: success
"""
value = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
net = TrilNet()
out = net(value, 0)
assert np.sum(out.asnumpy()) == 34
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE,])
def test_tril_1(mode):
"""
Feature: test_tril_1
Description: Verify the result of test_tril_1
Expectation: success
"""
value = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
net = TrilNet()
out = net(value, 1)
assert np.sum(out.asnumpy()) == 42
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE,])
def test_tril_2(mode):
"""
Feature: test_tril_2
Description: Verify the result of test_tril_2
Expectation: success
"""
value = Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
net = TrilNet()
out = net(value, -1)
assert np.sum(out.asnumpy()) == 19
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE,])
def METHOD_NAME(mode):
"""
Feature: test_tril_parameter
Description: Verify the result of test_tril_parameter
Expectation: success
"""
net = TrilNet()
net(Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), 0)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE,])
def test_tril_parameter_1(mode):
"""
Feature: test_tril_parameter_1
Description: Verify the result of test_tril_parameter_1
Expectation: success
"""
net = TrilNet()
net(Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), 0)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE,])
def test_tril_parameter_2(mode):
"""
Feature: test_tril_parameter_2
Description: Verify the result of test_tril_parameter_2
Expectation: success
"""
net = TrilNet()
net(Tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]]), 0) | null |
5,821 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import shapely
from shapely.geometry import Polygon
import numpy as np
from collections import defaultdict
import operator
import editdistance
def strQ2B(ustring):
rstring = ""
for uchar in ustring:
inside_code = ord(uchar)
if inside_code == 12288:
inside_code = 32
elif (inside_code >= 65281 and inside_code <= 65374):
inside_code -= 65248
rstring += chr(inside_code)
return rstring
def polygon_from_str(polygon_points):
"""
Create a shapely polygon object from gt or dt line.
"""
polygon_points = np.array(polygon_points).reshape(4, 2)
polygon = Polygon(polygon_points).convex_hull
return polygon
def polygon_iou(poly1, poly2):
"""
Intersection over union between two shapely polygons.
"""
if not poly1.intersects(
poly2): # this test is fast and can accelerate calculation
iou = 0
else:
try:
inter_area = poly1.intersection(poly2).area
union_area = poly1.area + poly2.area - inter_area
iou = float(inter_area) / union_area
except shapely.geos.TopologicalError:
# except Exception as e:
# print(e)
print('shapely.geos.TopologicalError occurred, iou set to 0')
iou = 0
return iou
def METHOD_NAME(str1, str2):
return editdistance.eval(str1, str2)
def e2e_eval(gt_dir, res_dir, ignore_blank=False):
print('start testing...')
iou_thresh = 0.5
val_names = os.listdir(gt_dir)
num_gt_chars = 0
gt_count = 0
dt_count = 0
hit = 0
ed_sum = 0
for i, val_name in enumerate(val_names):
with open(os.path.join(gt_dir, val_name), encoding='utf-8') as f:
gt_lines = [o.strip() for o in f.readlines()]
gts = []
ignore_masks = []
for line in gt_lines:
parts = line.strip().split('\t')
# ignore illegal data
if len(parts) < 9:
continue
assert (len(parts) < 11)
if len(parts) == 9:
gts.append(parts[:8] + [''])
else:
gts.append(parts[:8] + [parts[-1]])
ignore_masks.append(parts[8])
val_path = os.path.join(res_dir, val_name)
if not os.path.exists(val_path):
dt_lines = []
else:
with open(val_path, encoding='utf-8') as f:
dt_lines = [o.strip() for o in f.readlines()]
dts = []
for line in dt_lines:
# print(line)
parts = line.strip().split("\t")
assert (len(parts) < 10), "line error: {}".format(line)
if len(parts) == 8:
dts.append(parts + [''])
else:
dts.append(parts)
dt_match = [False] * len(dts)
gt_match = [False] * len(gts)
all_ious = defaultdict(tuple)
for index_gt, gt in enumerate(gts):
gt_coors = [float(gt_coor) for gt_coor in gt[0:8]]
gt_poly = polygon_from_str(gt_coors)
for index_dt, dt in enumerate(dts):
dt_coors = [float(dt_coor) for dt_coor in dt[0:8]]
dt_poly = polygon_from_str(dt_coors)
iou = polygon_iou(dt_poly, gt_poly)
if iou >= iou_thresh:
all_ious[(index_gt, index_dt)] = iou
sorted_ious = sorted(
all_ious.items(), key=operator.itemgetter(1), reverse=True)
sorted_gt_dt_pairs = [item[0] for item in sorted_ious]
# matched gt and dt
for gt_dt_pair in sorted_gt_dt_pairs:
index_gt, index_dt = gt_dt_pair
if gt_match[index_gt] == False and dt_match[index_dt] == False:
gt_match[index_gt] = True
dt_match[index_dt] = True
if ignore_blank:
gt_str = strQ2B(gts[index_gt][8]).replace(" ", "")
dt_str = strQ2B(dts[index_dt][8]).replace(" ", "")
else:
gt_str = strQ2B(gts[index_gt][8])
dt_str = strQ2B(dts[index_dt][8])
if ignore_masks[index_gt] == '0':
ed_sum += METHOD_NAME(gt_str, dt_str)
num_gt_chars += len(gt_str)
if gt_str == dt_str:
hit += 1
gt_count += 1
dt_count += 1
# unmatched dt
for tindex, dt_match_flag in enumerate(dt_match):
if dt_match_flag == False:
dt_str = dts[tindex][8]
gt_str = ''
ed_sum += METHOD_NAME(dt_str, gt_str)
dt_count += 1
# unmatched gt
for tindex, gt_match_flag in enumerate(gt_match):
if gt_match_flag == False and ignore_masks[tindex] == '0':
dt_str = ''
gt_str = gts[tindex][8]
ed_sum += METHOD_NAME(gt_str, dt_str)
num_gt_chars += len(gt_str)
gt_count += 1
eps = 1e-9
print('hit, dt_count, gt_count', hit, dt_count, gt_count)
precision = hit / (dt_count + eps)
recall = hit / (gt_count + eps)
fmeasure = 2.0 * precision * recall / (precision + recall + eps)
avg_edit_dist_img = ed_sum / len(val_names)
avg_edit_dist_field = ed_sum / (gt_count + eps)
character_acc = 1 - ed_sum / (num_gt_chars + eps)
print('character_acc: %.2f' % (character_acc * 100) + "%")
print('avg_edit_dist_field: %.2f' % (avg_edit_dist_field))
print('avg_edit_dist_img: %.2f' % (avg_edit_dist_img))
print('precision: %.2f' % (precision * 100) + "%")
print('recall: %.2f' % (recall * 100) + "%")
print('fmeasure: %.2f' % (fmeasure * 100) + "%")
if __name__ == '__main__':
# if len(sys.argv) != 3:
# print("python3 ocr_e2e_eval.py gt_dir res_dir")
# exit(-1)
# gt_folder = sys.argv[1]
# pred_folder = sys.argv[2]
gt_folder = sys.argv[1]
pred_folder = sys.argv[2]
e2e_eval(gt_folder, pred_folder) | null |
5,822 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class MaskedFillNet(nn.Cell):
def __init__(self):
super(MaskedFillNet, self).__init__()
self.maskedfill = P.MaskedFill()
def construct(self, inputs, mask, value):
return self.maskedfill(inputs, mask, value)
def maskedfill_fun(ntype):
maskedfill_net = MaskedFillNet()
inputs = Tensor(np.array([[1, 2, 3, 4], [5, 6, 7, 8]]).astype(ntype))
mask = Tensor(np.array([[True, True, False, True], [False, False, True, False]]).astype(np.bool))
value = Tensor(np.array(22).astype(ntype))
expect = np.array([[22, 22, 3, 22], [5, 6, 22, 8]]).astype(ntype)
output = maskedfill_net(inputs, mask, value)
assert (output.asnumpy() == expect).all()
mask = Tensor(np.array([[True, True, True, True], [True, True, True, True]]).astype(np.bool))
value = Tensor(np.array(1).astype(ntype))
expect = np.array([[1, 1, 1, 1], [1, 1, 1, 1]]).astype(ntype)
output = maskedfill_net(inputs, mask, value)
assert (output.asnumpy() == expect).all()
mask = Tensor(np.array([[False, False, False, False], [False, False, False, False]]).astype(np.bool))
value = Tensor(np.array(22).astype(ntype))
expect = np.array([[1, 2, 3, 4], [5, 6, 7, 8]]).astype(ntype)
output = maskedfill_net(inputs, mask, value)
assert (output.asnumpy() == expect).all()
# BroadCast
mask = Tensor(np.array([True, True, False, True]).astype(np.bool))
value = Tensor(np.array(22).astype(ntype))
expect = np.array([[22, 22, 3, 22], [22, 22, 7, 22]]).astype(ntype)
output = maskedfill_net(inputs, mask, value)
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maskedfill_float():
"""
Feature: Test MaskedFill op.
Description: Test MaskedFill with float input.
Expectation: The result match to expect.
"""
maskedfill_fun(np.float32)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maskedfill_float16():
"""
Feature: Test MaskedFill op.
Description: Test MaskedFill with float16 input.
Expectation: The result match to expect.
"""
maskedfill_fun(np.float16)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maskedfill_int():
"""
Feature: Test MaskedFill op.
Description: Test MaskedFill with int input.
Expectation: The result match to expect.
"""
maskedfill_fun(np.int32)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maskedfill_int8():
"""
Feature: Test MaskedFill op.
Description: Test MaskedFill with int8 input.
Expectation: The result match to expect.
"""
maskedfill_fun(np.int8)
def maskedfill_value(value):
maskedfill_net = MaskedFillNet()
inputs = Tensor(np.array([1, 2, 3, 4]).astype(np.float32))
mask = Tensor(np.array([True, True, False, True]).astype(np.bool))
expect = np.array([0.5, 0.5, 3, 0.5]).astype(np.float32)
output = maskedfill_net(inputs, mask, value)
assert (output.asnumpy() == expect).all()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_maskedfill_float_value():
"""
Feature: Test MaskedFill op.
Description: Test MaskedFill with float value.
Expectation: The result match to expect.
"""
maskedfill_value(0.5)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def METHOD_NAME():
"""
Feature: Test MaskedFill op.
Description: Test MaskedFill with tensor input.
Expectation: The result match to expect.
"""
maskedfill_value(Tensor(0.5)) | null |
5,823 | import json
import os
import textwrap
from conan.api.conan_api import ConanAPI
from conans.model.conf import BUILT_IN_CONFS
from conans.test.utils.test_files import temp_folder
from conans.test.utils.tools import TestClient
from conans.util.env import environment_update
def test_missing_subarguments():
""" config MUST run with a subcommand. Otherwise, it MUST exits with error.
"""
client = TestClient()
client.run("config", assert_error=True)
assert "ERROR: Exiting with code: 2" in client.out
class TestConfigHome:
""" The test framework cannot test the CONAN_HOME env-var because it is not using it
(it will break tests for maintainers that have the env-var defined)
"""
def test_config_home_default(self):
client = TestClient()
client.run("config home")
assert f"{client.cache.cache_folder}\n" == client.stdout
client.run("config home --format=text", assert_error=True)
# It is not possible to use --format=text explicitly
assert "--format=text" in client.out
def test_api_uses_env_var_home(self):
cache_folder = os.path.join(temp_folder(), "custom")
with environment_update({"CONAN_HOME": cache_folder}):
api = ConanAPI()
assert api.cache_folder == cache_folder
def METHOD_NAME():
"""
'conan config list' shows all the built-in Conan configurations
"""
client = TestClient()
client.run("config list")
for k, v in BUILT_IN_CONFS.items():
assert f"{k}: {v}" in client.out
client.run("config list --format=json")
assert f"{json.dumps(BUILT_IN_CONFS, indent=4)}\n" == client.stdout
client.run("config list unexpectedarg", assert_error=True)
assert "unrecognized arguments: unexpectedarg" in client.out
def test_config_install():
tc = TestClient()
tc.save({'config/foo': ''})
# This should not fail (insecure flag exists)
tc.run("config install config --insecure")
assert "foo" in os.listdir(tc.cache_folder)
# Negative test, ensure we would be catching a missing arg if it did not exist
tc.run("config install config --superinsecure", assert_error=True)
def test_config_install_conanignore():
tc = TestClient()
conanignore = textwrap.dedent("""
a/*
b/c/*
d/*
""")
tc.save({
'config_folder/.conanignore': conanignore,
"config_folder/a/test": '',
'config_folder/abracadabra': '',
'config_folder/b/bison': '',
'config_folder/b/a/test2': '',
'config_folder/b/c/helmet': '',
'config_folder/d/prix': '',
'config_folder/d/foo/bar': '',
'config_folder/foo': ''
})
def _assert_config_exists(path):
assert os.path.exists(os.path.join(tc.cache_folder, path))
def _assert_config_not_exists(path):
assert not os.path.exists(os.path.join(tc.cache_folder, path))
tc.run('config install config_folder')
_assert_config_not_exists(".conanignore")
_assert_config_not_exists("a")
_assert_config_not_exists("a/test")
_assert_config_exists("abracadabra")
_assert_config_exists("b")
_assert_config_exists("b/bison")
_assert_config_exists("b/a/test2")
_assert_config_not_exists("b/c/helmet")
_assert_config_not_exists("b/c")
_assert_config_not_exists("d/prix")
_assert_config_not_exists("d/foo/bar")
_assert_config_not_exists("d")
_assert_config_exists("foo")
os.listdir(tc.current_folder)
def test_config_show():
globalconf = textwrap.dedent("""
tools.build:jobs=42
tools.files.download:retry_wait=10
tools.files.download:retry=7
core.net.http:timeout=30
core.net.http:max_retries=5
zlib/*:user.mycategory:retry=True
zlib/*:user.mycategory:foo=0
zlib/*:user.myothercategory:foo=0
""")
tc = TestClient()
tc.save_home({"global.conf": globalconf})
tc.run("config show tools.build:jobs")
assert "42" in tc.out
tc.run("config show core*")
assert "core.net.http:timeout" in tc.out
assert "30" in tc.out
assert "core.net.http:max_retries" in tc.out
assert "5" in tc.out
tc.run("config show *retr*")
assert "tools.files.download:retry_wait" in tc.out
assert "tools.files.download:retry" in tc.out
assert "core.net.http:max_retries" in tc.out
assert "zlib/*:user.mycategory:retry" in tc.out
tc.run("config show zlib*")
assert "zlib/*:user.mycategory:retry" in tc.out
assert "zlib/*:user.mycategory:foo" in tc.out
assert "zlib/*:user.myothercategory:foo" in tc.out
tc.run("config show zlib/*")
assert "zlib/*:user.mycategory:retry" in tc.out
assert "zlib/*:user.mycategory:foo" in tc.out
assert "zlib/*:user.myothercategory:foo" in tc.out
tc.run("config show zlib/*:foo")
assert "zlib/*:user.mycategory:foo" in tc.out
assert "zlib/*:user.myothercategory:foo" in tc.out | null |
5,824 | """
ID list class for Jaseci
Generalized functions for managing '_ids' convention for lists of Jaseci
objects
parent_obj is the instance that the list belongs to
"""
from jaseci.utils.utils import logger
class IdList(list):
"""
ID list class for tracking lists of objects in Jaseci
ingest_list is a list of hex strings to convert to UUID and append.
"""
def __init__(self, parent_obj, auto_save=True, in_list=None):
self.parent_obj = parent_obj
self.cached_objects = []
self.heal_list = []
self.auto_save = auto_save
if in_list:
self.extend(in_list)
def cache_reset(self):
self.cached_objects = []
def add_obj(
self, obj, push_front=False, allow_dups=False, silent=False, bypass=False
):
"""Adds a obj obj to Jaseci object"""
self.parent_obj.check_hooks_match(obj)
if not allow_dups and obj.jid in self:
if not silent:
logger.warning(str(f"{obj} is already in {self.parent_obj}'s list"))
else:
self.cache_reset()
if push_front:
self.insert(0, obj.jid)
else:
self.append(obj.jid)
if not bypass:
if not obj.j_parent:
obj.j_parent = self.parent_obj.jid
self.save(obj)
self.save()
def add_obj_list(self, obj_list, push_front=False, allow_dups=False, silent=False):
self.cache_reset()
if push_front:
obj_list.reverse()
for i in obj_list:
self.add_obj(i, push_front=push_front, allow_dups=allow_dups, silent=silent)
def remove_obj(self, obj):
"""Remove a Jaseci obj from list"""
self.cache_reset()
self.remove(obj.jid)
self.save()
def heal(self):
for i in self.heal_list:
self.remove(i)
if len(self.heal_list) and hasattr(self.parent_obj, "save"):
self.save()
self.heal_list = []
def destroy_obj(self, obj):
"""Completely destroys a Jaseci obj obj by it's name"""
self.remove_obj(obj)
obj.destroy()
def obj_for_id_not_exist_error(self, item_id):
self.heal_list.append(item_id)
my_name = "id_list"
for k, v in self.parent_obj.__dict__.items():
if id(v) == id(self):
my_name = k
return f"{item_id} not found in {my_name} of {self.parent_obj}!"
def get_obj_by_name(self, name, kind=None, silent=False):
"""Returns a Jaseci obj obj by it's name"""
ret = None
for i in self:
obj = self.parent_obj._h.get_obj(self.parent_obj._m_id, i)
if not obj:
logger.critical(self.obj_for_id_not_exist_error(i))
continue
if obj.name == name:
if kind and obj.kind != kind:
continue
ret = obj
break
if not ret and not silent:
logger.error(str(f"object for '{name}' not found in '{self.parent_obj}'!"))
self.heal()
return ret
def has_obj_by_name(self, name, kind=None):
"""Returns whether a Jaseci obj exists by it's name"""
return self.get_obj_by_name(name, kind, silent=True) is not None
def remove_obj_by_name(self, name, kind=None):
"""Remove a Jaseci obj by it's name"""
self.remove_obj(self.get_obj_by_name(name, kind))
def destroy_obj_by_name(self, name, kind=None):
"""Destroy a Jaseci obj by it's name"""
self.destroy_obj(self.get_obj_by_name(name, kind))
def obj_list(self):
"""Return list of objects from ids"""
if not len(self.cached_objects):
for i in self:
obj = self.parent_obj._h.get_obj(self.parent_obj._m_id, i)
if not obj:
logger.critical(self.obj_for_id_not_exist_error(i))
else:
self.cached_objects.append(obj)
self.heal()
return self.cached_objects.copy()
def METHOD_NAME(self):
"""Remove a Jaseci obj obj by it's name"""
for i in self.obj_list():
self.remove_obj(i)
if len(self):
logger.critical(
str(
f"Remove all failed in id_list of {self.parent_obj} - "
+ f"still has {self}!"
)
)
def destroy_all(self):
"""Remove a Jaseci obj obj by it's name"""
for i in self.obj_list():
self.destroy_obj(i)
if len(self):
logger.critical(
str(
f"Destroy all failed in id_list of {self.parent_obj} - "
+ f"still has {self}!"
)
)
def first_obj(self):
"""Get first object in list"""
if not self:
logger.error(str(f"List in '{self.parent_obj}' is empty!"))
return None
return self.parent_obj._h.get_obj(self.parent_obj._m_id, self[0])
def pop_first_obj(self):
"""Get first object in list"""
ret = self.first_obj()
if ret:
self.remove_obj(ret)
return ret
def save(self, obj=None):
if self.auto_save:
self.parent_obj.save()
if obj:
obj.save() | null |
5,825 | # -*- coding: utf-8 -*-
###
# (C) Copyright [2020] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import mock
from hpeOneView import HPEOneViewValueError
from hpeOneView.connection import connection
from hpeOneView.resources.resource import ResourceClient
from hpeOneView.resources.servers.id_pools_ranges import IdPoolsRanges
import unittest
class TestIdPoolsRanges(unittest.TestCase):
resource_info = {'type': 'Range',
'name': 'No name'}
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host, 800)
self.id_pool_name = 'vsn'
self.client = IdPoolsRanges(self.id_pool_name, self.connection)
self.example_uri = "/rest/id-pools/" + self.id_pool_name + "/ranges/f0a0a113-ec97-41b4-83ce-d7c92b900e7c"
@mock.patch.object(ResourceClient, '__init__')
def test_id_pools_ranges_constructor_with_type_vsn(self, mock_rclient):
mock_rclient.return_value = None
IdPoolsRanges('vsn', self.connection)
mock_rclient.assert_called_once_with(self.connection, '/rest/id-pools/vsn/ranges')
@mock.patch.object(ResourceClient, '__init__')
def test_id_pools_ranges_constructor_with_type_vwwn(self, mock_rclient):
mock_rclient.return_value = None
IdPoolsRanges('vwwn', self.connection)
mock_rclient.assert_called_once_with(self.connection, '/rest/id-pools/vwwn/ranges')
@mock.patch.object(ResourceClient, '__init__')
def test_id_pools_ranges_constructor_with_type_vmac(self, mock_rclient):
mock_rclient.return_value = None
IdPoolsRanges('vmac', self.connection)
mock_rclient.assert_called_once_with(self.connection, '/rest/id-pools/vmac/ranges')
@mock.patch.object(ResourceClient, '__init__')
def test_id_pools_ranges_constructor_with_invalid_type(self, mock_rclient):
mock_rclient.return_value = None
self.assertRaises(HPEOneViewValueError, IdPoolsRanges, 'invalid', self.connection)
@mock.patch.object(ResourceClient, 'create')
def test_create_called_once(self, mock_create):
self.client.create(self.resource_info)
mock_create.assert_called_once_with(self.resource_info, timeout=-1)
@mock.patch.object(ResourceClient, 'get')
def test_get_by_id_called_once(self, mock_get):
id_pools_range_id = "f0a0a113-ec97-41b4-83ce-d7c92b900e7c"
self.client.get(id_pools_range_id)
mock_get.assert_called_once_with(id_pools_range_id)
@mock.patch.object(ResourceClient, 'get')
def test_get_by_uri_called_once(self, mock_get):
self.client.get(self.example_uri)
mock_get.assert_called_once_with(self.example_uri)
@mock.patch.object(ResourceClient, 'update')
def test_enable_called_once(self, update):
self.client.enable(self.resource_info.copy(), self.example_uri)
update.assert_called_once_with(self.resource_info.copy(), self.example_uri, timeout=-1)
@mock.patch.object(ResourceClient, 'get_collection')
def test_get_allocated_fragments_called_once_with_defaults(self, mock_get):
self.client.get_allocated_fragments(self.example_uri)
mock_get.assert_called_once_with(self.example_uri + "/allocated-fragments?start=0&count=-1")
@mock.patch.object(ResourceClient, 'get_collection')
def test_get_allocated_fragments_called_once(self, mock_get):
self.client.get_allocated_fragments(self.example_uri, 5, 2)
mock_get.assert_called_once_with(self.example_uri + "/allocated-fragments?start=2&count=5")
@mock.patch.object(ResourceClient, 'get_collection')
def test_get_free_fragments_called_once_with_defaults(self, mock_get):
self.client.get_free_fragments(self.example_uri)
mock_get.assert_called_once_with(self.example_uri + "/free-fragments?start=0&count=-1")
@mock.patch.object(ResourceClient, 'get_collection')
def test_get_free_fragments_called_once(self, mock_get):
self.client.get_free_fragments(self.example_uri, 5, 3)
mock_get.assert_called_once_with(self.example_uri + "/free-fragments?start=3&count=5")
@mock.patch.object(ResourceClient, 'delete')
def test_delete_called_once(self, mock_delete):
self.client.delete({'uri': '/rest/uri'}, force=True, timeout=50)
mock_delete.assert_called_once_with({'uri': '/rest/uri'}, force=True, timeout=50)
@mock.patch.object(ResourceClient, 'delete')
def test_delete_called_once_with_defaults(self, mock_delete):
self.client.delete({'uri': '/rest/uri'})
mock_delete.assert_called_once_with({'uri': '/rest/uri'}, force=False, timeout=-1)
@mock.patch.object(ResourceClient, 'update')
def test_allocate_called_once(self, mock_update):
self.client.allocate(self.resource_info.copy(), self.example_uri)
mock_update.assert_called_once_with(self.resource_info.copy(), self.example_uri + "/allocator", timeout=-1)
@mock.patch.object(ResourceClient, 'update')
def METHOD_NAME(self, update):
self.client.collect(self.resource_info.copy(), self.example_uri)
update.assert_called_once_with(self.resource_info.copy(), self.example_uri + "/collector", timeout=-1) | null |
5,826 | import torch
import copy
from torch.nn.utils.rnn import PackedSequence
class _LSTM(torch.nn.LSTM):
# This is a solution to swap the lstm module with the ipex counterpart
# and will upstream this operator to PyTorch when oneDNN support
# bias and src_iter_c in bf16 in bf16 inference. Will keep this
# for better support of blocked-format weight, e.g. for training.
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# port from torch/nn/modules/rnn.py
# replace the _VF.lstm with torch.ops.torch_ipex.lstm when the input is not PackedSequence
def forward(self, input, hx=None): # noqa: F811
orig_input = input
# xxx: isinstance check needs to be in conditional for TorchScript to compile
if isinstance(orig_input, PackedSequence):
# fallback to PyTorch LSTM since PackedSequence unsupported in oneDNN
return super(_LSTM, self).forward(input, hx)
else:
batch_sizes = None
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
num_directions = 2 if self.bidirectional else 1
real_hidden_size = (
self.proj_size if self.proj_size > 0 else self.hidden_size
)
h_zeros = torch.zeros(
self.num_layers * num_directions,
max_batch_size,
real_hidden_size,
dtype=input.dtype,
device=input.device,
)
c_zeros = torch.zeros(
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=input.dtype,
device=input.device,
)
hx = (h_zeros, c_zeros)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
result = torch.ops.torch_ipex.ipex_lstm(
input,
hx,
self._flat_weights,
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
self.batch_first,
)
output = result[0]
hidden = result[1:]
return output, self.permute_hidden(hidden, unsorted_indices)
def METHOD_NAME(optimizer, param_dict):
if optimizer is None:
return
for group in optimizer.param_groups:
for i, p in enumerate(group["params"]):
if p in param_dict:
new_param = param_dict[p]
group["params"][i] = new_param
if p in optimizer.state:
optimizer.state[new_param] = optimizer.state.pop(p)
def replace_lstm_with_ipex_lstm(model, optimizer):
# replace lstm with ipex lstm during inference
# does not support the case where model itself is torch.nn.LSTM
for child_name, child in model.named_children():
if isinstance(child, torch.nn.LSTM):
assert hasattr(
child, "weight_ih_l0"
), "torch.nn.LSTM should have weight_ih_l0"
ipex_lstm = _LSTM(
child.input_size,
child.hidden_size,
child.num_layers,
child.bias,
child.batch_first,
child.dropout,
child.bidirectional,
child.proj_size,
child.weight_ih_l0.device,
child.weight_ih_l0.dtype,
)
ipex_lstm.__dict__ = copy.deepcopy(child.__dict__)
setattr(model, child_name, ipex_lstm)
param_dict = {}
original_params = dict(child.named_parameters())
for name, para in ipex_lstm.named_parameters():
param_dict.update({original_params[name]: para})
METHOD_NAME(optimizer, param_dict)
else:
replace_lstm_with_ipex_lstm(child, optimizer) | null |
5,827 | # Copyright (c) 2022 Tulir Asokan
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import annotations
from mautrix.api import Method, Path
from mautrix.errors import MatrixResponseError
from mautrix.types import (
DeviceID,
LoginFlowList,
LoginResponse,
LoginType,
MatrixUserIdentifier,
UserID,
UserIdentifier,
WhoamiResponse,
)
from .base import BaseClientAPI
class ClientAuthenticationMethods(BaseClientAPI):
"""
Methods in section 5 Authentication of the spec. These methods are used for setting and getting user
metadata and searching for users.
See also: `API reference <https://matrix.org/docs/spec/client_server/r0.6.1.html#client-authentication>`__
"""
# region 5.5 Login
# API reference: https://matrix.org/docs/spec/client_server/r0.6.1.html#login
async def get_login_flows(self) -> LoginFlowList:
"""
Get login flows supported by the homeserver.
See also: `API reference <https://matrix.org/docs/spec/client_server/r0.6.1#get-matrix-client-r0-login>`__
Returns:
The list of login flows that the homeserver supports.
"""
resp = await self.api.request(Method.GET, Path.v3.login)
try:
return LoginFlowList.deserialize(resp)
except KeyError:
raise MatrixResponseError("`flows` not in response.")
async def login(
self,
identifier: UserIdentifier | UserID | None = None,
login_type: LoginType = LoginType.PASSWORD,
device_name: str | None = None,
device_id: str | None = None,
password: str | None = None,
store_access_token: bool = True,
update_hs_url: bool = False,
**kwargs: str,
) -> LoginResponse:
"""
Authenticates the user, and issues an access token they can use to authorize themself in
subsequent requests.
See also: `API reference <https://matrix.org/docs/spec/client_server/r0.6.1#post-matrix-client-r0-login>`__
Args:
login_type: The login type being used.
identifier: Identification information for the user.
device_name: A display name to assign to the newly-created device.
Ignored if ``device_id`` correspnods to a known device.
device_id: ID of the client device. If this does not correspond to a known client
device, a new device will be created. The server will auto-generate a device_id
if this is not specified.
password: The user's password. Required when `type` is `m.login.password`.
store_access_token: Whether or not mautrix-python should store the returned access token
in this ClientAPI instance for future requests.
update_hs_url: Whether or not mautrix-python should use the returned homeserver URL
in this ClientAPI instance for future requests.
**kwargs: Additional arguments for other login types.
Returns:
The login response.
"""
if identifier is None or isinstance(identifier, str):
identifier = MatrixUserIdentifier(identifier or self.mxid)
if password is not None:
kwargs["password"] = password
if device_name is not None:
kwargs["initial_device_display_name"] = device_name
if device_id:
kwargs["device_id"] = device_id
elif self.device_id:
kwargs["device_id"] = self.device_id
resp = await self.api.request(
Method.POST,
Path.v3.login,
{
"type": str(login_type),
"identifier": identifier.serialize(),
**kwargs,
},
sensitive="password" in kwargs or "token" in kwargs,
)
resp_data = LoginResponse.deserialize(resp)
if store_access_token:
self.mxid = resp_data.user_id
self.device_id = resp_data.device_id
self.api.token = resp_data.access_token
if update_hs_url:
base_url = resp_data.well_known.homeserver.base_url
if base_url and base_url != self.api.base_url:
self.log.debug(
"Login response contained new base URL, switching from "
f"{self.api.base_url} to {base_url}"
)
self.api.base_url = base_url.rstrip("/")
return resp_data
async def logout(self, clear_access_token: bool = True) -> None:
"""
Invalidates an existing access token, so that it can no longer be used for authorization.
The device associated with the access token is also deleted.
`Device keys <https://matrix.org/docs/spec/client_server/latest#device-keys>`__ for the
device are deleted alongside the device.
See also: `API reference <https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-logout>`__
Args:
clear_access_token: Whether or not mautrix-python should forget the stored access token.
"""
await self.api.request(Method.POST, Path.v3.logout)
if clear_access_token:
self.api.token = ""
self.device_id = DeviceID("")
async def METHOD_NAME(self, clear_access_token: bool = True) -> None:
"""
Invalidates all access tokens for a user, so that they can no longer be used for
authorization. This includes the access token that made this request. All devices for the
user are also deleted.
`Device keys <https://matrix.org/docs/spec/client_server/latest#device-keys>`__ for the
device are deleted alongside the device.
This endpoint does not require UI (user-interactive) authorization because UI authorization
is designed to protect against attacks where the someone gets hold of a single access token
then takes over the account. This endpoint invalidates all access tokens for the user,
including the token used in the request, and therefore the attacker is unable to take over
the account in this way.
See also: `API reference <https://matrix.org/docs/spec/client_server/latest#post-matrix-client-r0-logout-all>`__
Args:
clear_access_token: Whether or not mautrix-python should forget the stored access token.
"""
await self.api.request(Method.POST, Path.v3.logout.all)
if clear_access_token:
self.api.token = ""
self.device_id = DeviceID("")
# endregion
# TODO other sections
# region 5.7 Current account information
# API reference: https://matrix.org/docs/spec/client_server/r0.6.1.html#current-account-information
async def whoami(self) -> WhoamiResponse:
"""
Get information about the current user.
Returns:
The user ID and device ID of the current user.
"""
resp = await self.api.request(Method.GET, Path.v3.account.whoami)
return WhoamiResponse.deserialize(resp)
# endregion | null |
5,828 | # Copyright (c) 2021, EleutherAI
# This file is based on code by the authors denoted below and has been modified from its original version.
#
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import enum
from ..fused_kernels import load_fused_kernels
class ScaledUpperTriangMaskedSoftmax(torch.autograd.Function):
"""
Fused operation which performs following three operations in sequence
1. Scale the tensor.
2. Apply upper triangular mask (typically used in gpt models).
3. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs, scale):
import scaled_upper_triang_masked_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = scaled_upper_triang_masked_softmax_cuda.forward(
inputs, scale_t[0]
)
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import scaled_upper_triang_masked_softmax_cuda
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_upper_triang_masked_softmax_cuda.backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None
class ScaledMaskedSoftmax(torch.autograd.Function):
"""
Fused operation which performs following three operations in sequence
1. Scale the tensor.
2. Apply the mask.
3. Perform softmax.
"""
@staticmethod
def forward(ctx, inputs, mask, scale):
import scaled_masked_softmax_cuda
scale_t = torch.tensor([scale])
softmax_results = scaled_masked_softmax_cuda.forward(inputs, mask, scale_t[0])
ctx.save_for_backward(softmax_results, scale_t)
return softmax_results
@staticmethod
def backward(ctx, output_grads):
import scaled_masked_softmax_cuda
softmax_results, scale_t = ctx.saved_tensors
input_grads = scaled_masked_softmax_cuda.backward(
output_grads, softmax_results, scale_t[0]
)
return input_grads, None, None
class SoftmaxFusionTypes(enum.Enum):
upper_triang = 1 # causal mask
general = 2 # general mask
none = 3 # no fusion
class FusedScaleMaskSoftmax(nn.Module):
"""
fused operation: scaling + mask + softmax
Arguments:
input_in_fp16: flag to indicate if input in fp16 data format.
input_in_bf16: flag to indicate if input in bf16 data format.
fusion_type: type of fusion to perform, should be either upper_triang, general or none. None will perform a regular torch softmax.
mask_func: mask function to be applied.
softmax_in_fp32: if true, softmax in performed at fp32 precision.
scale: scaling factor used in input tensor scaling.
"""
def __init__(
self,
input_in_fp16,
input_in_bf16,
fusion_type,
mask_func,
softmax_in_fp32,
scale,
):
super().__init__()
self.input_in_fp16 = input_in_fp16
self.input_in_bf16 = input_in_bf16
self.input_in_float16 = self.input_in_fp16 or self.input_in_bf16
assert fusion_type in [
SoftmaxFusionTypes.upper_triang,
SoftmaxFusionTypes.general,
SoftmaxFusionTypes.none,
], f"Invalid fusion type {fusion_type}"
if fusion_type != SoftmaxFusionTypes.none:
load_fused_kernels() # check fused kernels are installed
self.upper_triang_mask_fusion = fusion_type == SoftmaxFusionTypes.upper_triang
self.general_mask_fusion = fusion_type == SoftmaxFusionTypes.general
self.fusion = fusion_type != SoftmaxFusionTypes.none
self.mask_func = mask_func
self.softmax_in_fp32 = softmax_in_fp32
self.scale = scale
assert (
self.scale is None or softmax_in_fp32
), "softmax should be in fp32 when scaled"
def forward(self, input, mask):
# [b, np, sq, sk]
assert input.dim() == 4
if self.is_kernel_available(mask, *input.size()):
return self.forward_fused_softmax(input, mask)
else:
return self.METHOD_NAME(input, mask)
def is_kernel_available(self, mask, b, np, sq, sk):
attn_batches = b * np
if (
self.fusion # user wants to fuse
and self.input_in_float16 # input must be fp16
and mask is not None # mask tensor must not be None
and 16 < sk <= 2048 # sk must be 16 ~ 2048
and sq % 4 == 0 # sq must be divisor of 4
and attn_batches % 4 == 0 # np * b must be divisor of 4
):
if 0 <= sk <= 2048:
batch_per_block = self.get_batch_per_block(sq, sk, b, np)
if self.upper_triang_mask_fusion:
if attn_batches % batch_per_block == 0:
return True
else:
if sq % batch_per_block == 0:
return True
return False
def forward_fused_softmax(self, input, mask):
b, np, sq, sk = input.size()
scale = self.scale if self.scale is not None else 1.0
if self.upper_triang_mask_fusion:
assert sq == sk, "causal mask is only for self attention"
# input is 3D tensor (attn_batches, sq, sk)
input = input.view(-1, sq, sk)
probs = ScaledUpperTriangMaskedSoftmax.apply(input, scale)
return probs.view(b, np, sq, sk)
else:
# input is 4D tensor (b, np, sq, sk)
return ScaledMaskedSoftmax.apply(input, mask, scale)
def METHOD_NAME(self, input, mask):
if self.input_in_float16 and self.softmax_in_fp32:
input = input.float()
if self.scale is not None:
input = input * self.scale
mask_output = self.mask_func(input, mask) if mask is not None else input
probs = torch.nn.Softmax(dim=-1)(mask_output)
if self.input_in_float16 and self.softmax_in_fp32:
if self.input_in_fp16:
probs = probs.half()
else:
probs = probs.bfloat16()
return probs
@staticmethod
def get_batch_per_block(sq, sk, b, np):
import scaled_masked_softmax_cuda
return scaled_masked_softmax_cuda.get_batch_per_block(sq, sk, b, np) | null |
5,829 | # pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
import numpy as np
from matplotlib.cm import gray
from .. import Data, DataCollection, VisualAttributes
from ..util import facet_subsets, colorize_subsets, sample_colormap
class TestRelim(object):
pass
class TestFacetSubsets(object):
def setup_method(self, method):
from .. import Data, DataCollection
self.data = Data(label='data', x=[1, 2, 3, 4, 5, 6, 7])
self.collect = DataCollection([self.data])
def test_facet_fully_specified(self):
grps = facet_subsets(self.collect, self.data.id['x'],
lo=3, hi=6, steps=3)
assert len(grps) == 3
np.testing.assert_array_equal(grps[0].subsets[0].to_mask(),
[False, False, True,
False, False, False, False])
np.testing.assert_array_equal(grps[1].subsets[0].to_mask(),
[False, False, False,
True, False, False, False])
np.testing.assert_array_equal(grps[2].subsets[0].to_mask(),
[False, False, False,
False, True, True, False])
def test_default_lo_value(self):
grps = facet_subsets(self.collect, self.data.id['x'],
hi=7, steps=2)
assert len(grps) == 2
np.testing.assert_array_equal(grps[0].subsets[0].to_mask(),
[True, True, True, False,
False, False, False])
np.testing.assert_array_equal(grps[1].subsets[0].to_mask(),
[False, False, False, True,
True, True, True])
def test_default_hi_value(self):
grps = facet_subsets(self.collect, self.data.id['x'],
lo=3, steps=2)
assert len(grps) == 2
np.testing.assert_array_equal(grps[0].subsets[0].to_mask(),
[False, False, True, True, False,
False, False])
np.testing.assert_array_equal(grps[1].subsets[0].to_mask(),
[False, False, False, False, True,
True, True])
def METHOD_NAME(self):
grps = facet_subsets(self.collect, self.data.id['x'])
assert len(grps) == 5
def test_label(self):
grps = facet_subsets(self.collect, self.data.id['x'])
lbls = ['1.0<=x<2.2', '2.2<=x<3.4', '3.4<=x<4.6', '4.6<=x<5.8',
'5.8<=x<=7.0', None]
for s, lbl in zip(grps, lbls):
assert s.label == lbl
grps = facet_subsets(self.collect, self.data.id['x'], prefix='test_')
for i, s in enumerate(grps, start=1):
assert s.label.startswith('test_')
def test_facet_reversed(self):
grps = facet_subsets(self.collect, self.data.id['x'],
lo=3, hi=1, steps=2)
assert len(grps) == 2
# ranges should be (2, 3] and (1, 2]
np.testing.assert_array_equal(grps[0].subsets[0].to_mask(),
[False, False, True, False, False,
False, False])
np.testing.assert_array_equal(grps[1].subsets[0].to_mask(),
[True, True, False, False, False,
False, False])
def test_facet_styling(self):
visual_attrs = dict(alpha=0.7, markersize=8, marker='o',
linewidth=2, linestyle='dashed')
style = VisualAttributes(**visual_attrs)
lo, hi, steps = 3, 6, 3
grps = facet_subsets(self.collect, self.data.id['x'],
lo=lo, hi=hi, steps=steps,
style=visual_attrs, cmap=gray)
colors = sample_colormap(steps, gray)
for sg, color in zip(grps, colors):
style.color = color
assert sg.style == style
def test_colorize_subsets():
data = Data(label='test', x=[1, 2, 3])
dc = DataCollection(data)
grps = facet_subsets(dc, data.id['x'], steps=2)
colorize_subsets(grps, gray)
assert grps[0].style.color == '#000000'
assert grps[1].style.color == '#ffffff'
def test_colorize_subsets_clip():
data = Data(label='test', x=[1, 2, 3])
grps = facet_subsets(DataCollection(data), data.id['x'], steps=2)
colorize_subsets(grps, gray, hi=0.5)
assert grps[0].style.color == '#000000'
assert grps[1].style.color == '#808080'
colorize_subsets(grps, gray, lo=0.5)
assert grps[0].style.color == '#808080'
assert grps[1].style.color == '#ffffff' | null |
5,830 | """MPF Hardware Service for VPE.
This is separated from the platform because we need to catch a syntax error in python 3.5 and earlier.
"""
import asyncio
from mpf.platforms.visual_pinball_engine import platform_pb2_grpc
from mpf.platforms.visual_pinball_engine import platform_pb2
class MpfHardwareService(platform_pb2_grpc.MpfHardwareServiceServicer):
"""MPF Service for VPE."""
__slots__ = ["machine", "platform", "switch_queue", "command_queue", "_started"]
def __init__(self, machine, platform):
"""Initialise MPF service for VPE."""
self._connected = asyncio.Future()
self.machine = machine
self.platform = platform
self.switch_queue = asyncio.Queue()
self.command_queue = asyncio.Queue()
self._started = asyncio.Future()
def send_command(self, command):
"""Send command to VPE."""
self.command_queue.put_nowait(command)
def get_switch_queue(self):
"""Return switch queue."""
return self.switch_queue
async def wait_for_vpe_connect(self):
"""Wait until VPE has connected."""
return await self._connected
def METHOD_NAME(self):
"""Mark service as ready."""
self._started.set_result(True)
async def Start(self, request, context): # noqa
"""Start MPF."""
self._connected.set_result(request)
while True:
command = await self.command_queue.get()
# this only works in Python 3.6+
yield command
async def GetMachineDescription(self, request, context): # noqa
"""Get Platform Configuration of VPE platform."""
switches = []
await self._started
for switch in self.platform.get_configured_switches():
switch_description = platform_pb2.SwitchDescription()
switch_description.name = switch.config.name
switch_description.hardware_number = switch.number
switch_description.switch_type = "NC" if switch.config.invert else "NO"
switches.append(switch_description)
coils = []
for coil in self.platform.get_configured_coils():
coil_description = platform_pb2.CoilDescription()
coil_description.name = coil.config.name
coil_description.hardware_number = coil.number
coils.append(coil_description)
lights = []
for light in self.platform.get_configured_lights():
light_description = platform_pb2.LightDescription()
light_description.name = light.config.name
light_description.hardware_channel_number = light.number
light_description.hardware_channel_color = light.config.color.name
lights.append(light_description)
dmds = []
for dmd in self.platform.get_configured_dmds():
dmd_description = platform_pb2.DmdDescription()
dmd_description.name = dmd.name
if dmd.color_mapping == "RGB":
dmd_description.color_mapping = platform_pb2.DmdDescription.ColorMapping.RGB
elif dmd.color_mapping == "BW":
dmd_description.color_mapping = platform_pb2.DmdDescription.ColorMapping.BW
else:
raise AssertionError("Invalid color mapping {}".format(dmd.color_mapping))
dmd_description.height = dmd.height
dmd_description.width = dmd.width
dmds.append(dmd_description)
segment_displays = []
for segment_display in self.platform.get_configured_segment_displays():
segment_display_description = platform_pb2.SegmentDisplayDescription()
segment_display_description.name = segment_display.number
segment_display_description.width = segment_display.length_of_display
segment_displays.append(segment_display_description)
machine_description = platform_pb2.MachineDescription(switches=switches, coils=coils, lights=lights, dmds=dmds,
segment_displays=segment_displays)
return machine_description
async def SendSwitchChanges(self, request_iterator, context): # noqa
"""Process a stream of switches."""
async for element in request_iterator:
self.switch_queue.put_nowait(element)
return platform_pb2.EmptyResponse()
async def Quit(self, request, context): # noqa
"""Stop MPF."""
self.machine.stop(reason="VPE exited.")
return platform_pb2.EmptyResponse() | null |
5,831 | import json
import rospy
import subprocess
import threading
import re
from lg_msg_defs.msg import WindowGeometry
from lg_common.logger import get_logger
logger = get_logger('managed_window')
class ManagedWindow(object):
LAYER_BELOW = 'below'
LAYER_NORMAL = 'normal'
LAYER_ABOVE = 'above'
LAYER_TOUCH = 'touch' # touch things are always on top
def __init__(self, w_name=None, w_class=None, w_instance=None,
geometry=None, visible=True, chrome_kiosk_workaround=False,
layer=LAYER_NORMAL):
self.w_name = w_name
self.w_class = w_class
self.w_instance = w_instance
self.geometry = geometry
self.is_visible = visible
self.layer = layer
self.lock = threading.Lock()
def __str__(self):
return 'name={name}, class={cls}, instance={inst}, {w}x{h} {x},{y}'.format(
name=self.w_name,
cls=self.w_class,
inst=self.w_instance,
w=self.geometry.width if self.geometry is not None else None,
h=self.geometry.height if self.geometry is not None else None,
x=self.geometry.x if self.geometry is not None else None,
y=self.geometry.y if self.geometry is not None else None,
)
@staticmethod
def parse_geometry(geometry):
"""
Parses Xorg window geometry in the form WxH[+-]X[+-]Y
Raises ValueError if the geometry string is invalid.
"""
m = re.match(r'^(\d+)x(\d+)([+-]\d+)([+-]\d+)$', geometry)
if m is None:
raise ValueError(
'Invalid window geometry: {}'.format(geometry))
dims = list(map(int, m.groups()))
return WindowGeometry(width=dims[0], height=dims[1],
x=dims[2], y=dims[3])
@staticmethod
def format_geometry(geometry):
"""
Formats WindowGeometry as a string.
"""
return "{}x{}{:+}{:+}".format(geometry.width, geometry.height,
geometry.x, geometry.y)
@staticmethod
def lookup_viewport_geometry(viewport_key):
"""
Looks up geometry for the given viewport name.
Raises KeyError if the viewport is not configured.
"""
param_name = '/viewport/{}'.format(viewport_key)
if not rospy.has_param(param_name):
raise KeyError(
'Viewport parameter not set: {}'.format(param_name))
viewport_value = rospy.get_param(param_name)
return ManagedWindow.parse_geometry(viewport_value)
@staticmethod
def get_viewport_geometry():
"""
Returns WindowGeometry if the private '~viewport' param is set.
Returns None if the private '~viewport' param is not set.
"""
if rospy.has_param('~viewport'):
viewport = rospy.get_param('~viewport')
geometry = ManagedWindow.lookup_viewport_geometry(viewport)
else:
geometry = None
return geometry
def _get_command(self):
msg = {
'op': 'converge',
'data': {}
}
if self.w_name:
msg['data']['wm_name'] = self.w_name
if self.w_instance:
msg['data']['wm_instance'] = self.w_instance
if self.w_class:
msg['data']['wm_class'] = self.w_class
if self.geometry:
msg['data']['rectangle'] = ManagedWindow.format_geometry(self.geometry)
if self.layer:
msg['data']['layer'] = self.layer
msg['data']['hidden'] = not self.is_visible
return ['lg_wm_send', json.dumps(msg, ensure_ascii=False)]
def METHOD_NAME(self, visible):
with self.lock:
self.is_visible = visible
def set_geometry(self, geometry):
with self.lock:
self.geometry = geometry
def converge(self):
with self.lock:
cmd = self._get_command()
logger.warning('running: {}'.format(cmd))
try:
subprocess.check_call(cmd, close_fds=True)
except Exception as e:
logger.error('failed to run {} : {}'.format(cmd, str(e)))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 | null |
5,832 | # SPDX-FileCopyrightText: Kattni Rembor for Adafruit Industries
# SPDX-FileCopyrightText: Limor Fried for Adafruit Industries
# SPDX-FileCopyrightText: 2019 Liz Clark for Adafruit Industries
#
# SPDX-License-Identifier: MIT
"""
Prop-Maker based Darksaber
Adapted from the Prop-Maker based Master Sword code
by Kattni Rembor & Limor Fried
Adafruit invests time and resources providing this open source code.
Please support Adafruit and open source hardware by purchasing
products from Adafruit!
Written by Liz Clark for Adafruit Industries
Copyright (c) 2021 Adafruit Industries
Licensed under the MIT license.
All text above must be included in any redistribution.
"""
import time
import random
import board
from digitalio import DigitalInOut, Direction
import neopixel
import adafruit_lis3dh
from adafruit_led_animation.animation.solid import Solid
from adafruit_led_animation.animation.pulse import Pulse
from adafruit_led_animation.animation.comet import Comet
# CUSTOMISE SENSITIVITY HERE: smaller numbers = more sensitive to motion
HIT_THRESHOLD = 250
SWING_THRESHOLD = 150
# Set to the length in seconds of the "on.wav" file
POWER_ON_SOUND_DURATION = 1.7
# NeoPixel setup
NUM_PIXELS = 34 # Number of pixels used in project
NEOPIXEL_PIN = board.D5
POWER_PIN = board.D10
enable = DigitalInOut(POWER_PIN)
enable.direction = Direction.OUTPUT
enable.value = False
strip = neopixel.NeoPixel(NEOPIXEL_PIN, NUM_PIXELS, brightness=.5, auto_write=False)
strip.fill(0) # NeoPixels off ASAP on startup
strip.show()
# default NeoPixel color is white
COLOR = (255, 255, 255)
# NeoPixel animations
pulse = Pulse(strip, speed=0.05, color=COLOR, period=3)
solid = Solid(strip, color=COLOR)
comet = Comet(strip, speed=0.05, color=COLOR, tail_length=40)
#audio
try:
from audiocore import WaveFile
except ImportError:
from audioio import WaveFile
try:
from audioio import AudioOut
except ImportError:
try:
from audiopwmio import PWMAudioOut as AudioOut
except ImportError:
pass # not always supported by every board!
audio = AudioOut(board.A0) # Speaker
wave_file = None
# Set up accelerometer on I2C bus, 4G range:
i2c = board.I2C() # uses board.SCL and board.SDA
# i2c = board.STEMMA_I2C() # For using the built-in STEMMA QT connector on a microcontroller
accel = adafruit_lis3dh.LIS3DH_I2C(i2c)
accel.range = adafruit_lis3dh.RANGE_4_G
def METHOD_NAME(name, loop=False):
"""
Play a WAV file in the 'sounds' directory.
:param name: partial file name string, complete name will be built around
this, e.g. passing 'foo' will play file 'sounds/foo.wav'.
:param loop: if True, sound will repeat indefinitely (until interrupted
by another sound).
"""
global wave_file # pylint: disable=global-statement
print("playing", name)
if wave_file:
wave_file.close()
try:
wave_file = open('sounds/' + name + '.wav', 'rb')
wave = WaveFile(wave_file)
audio.play(wave, loop=loop)
except OSError:
pass # we'll just skip playing then
def power_on(sound, duration):
"""
Animate NeoPixels with accompanying sound effect for power on.
:param sound: sound name (similar format to play_wav() above)
:param duration: estimated duration of sound, in seconds (>0.0)
"""
start_time = time.monotonic() # Save audio start time
METHOD_NAME(sound)
while True:
elapsed = time.monotonic() - start_time # Time spent playing sound
if elapsed > duration: # Past sound duration?
break # Stop animating
comet.animate()
# List of swing wav files without the .wav in the name for use with play_wav()
swing_sounds = [
'swing1',
'swing2',
'swing3',
'swing4',
]
# List of hit wav files without the .wav in the name for use with play_wav()
hit_sounds = [
'hit1',
'hit2',
'hit3',
'hit4',
]
mode = 0 # Initial mode = OFF
#RGB LED
red_led = DigitalInOut(board.D11)
green_led = DigitalInOut(board.D12)
blue_led = DigitalInOut(board.D13)
red_led.direction = Direction.OUTPUT
green_led.direction = Direction.OUTPUT
blue_led.direction = Direction.OUTPUT
blue_led.value = True
red_led.value = True
green_led.value = True
# Main loop
while True:
if mode == 0: # If currently off...
enable.value = True
power_on('on', POWER_ON_SOUND_DURATION) # Power up!
METHOD_NAME('idle', loop=True) # Play idle sound now
mode = 1 # Idle mode
elif mode >= 1: # If not OFF mode...
x, y, z = accel.acceleration # Read accelerometer
accel_total = x * x + z * z
# (Y axis isn't needed, due to the orientation that the Prop-Maker
# Wing is mounted. Also, square root isn't needed, since we're
# comparing thresholds...use squared values instead.)
if accel_total > HIT_THRESHOLD: # Large acceleration = HIT
TRIGGER_TIME = time.monotonic() # Save initial time of hit
METHOD_NAME(random.choice(hit_sounds)) # Start playing 'hit' sound
solid.animate()
mode = 3 # HIT mode
elif mode == 1 and accel_total > SWING_THRESHOLD: # Mild = SWING
TRIGGER_TIME = time.monotonic() # Save initial time of swing
METHOD_NAME(random.choice(swing_sounds)) # Randomly choose from available swing sounds
while audio.playing:
pass # wait till we're done
mode = 2 # we'll go back to idle mode
elif mode == 1:
pulse.animate()
elif mode > 1: # If in SWING or HIT mode...
if audio.playing: # And sound currently playing...
blend = time.monotonic() - TRIGGER_TIME # Time since triggered
if mode == 2: # If SWING,
blend = abs(0.5 - blend) * 2.0 # ramp up, down
else: # No sound now, but still SWING or HIT modes
METHOD_NAME('idle', loop=True) # Resume idle sound
mode = 1 # Return to idle mode | null |
5,833 | # -*- coding: utf-8 -*-
# @Time : 2019/8/24 12:06
# @Author : zhoujun
import os
import sys
import pathlib
__dir__ = pathlib.Path(os.path.abspath(__file__))
sys.path.append(str(__dir__))
sys.path.append(str(__dir__.parent.parent))
import time
import cv2
import paddle
from data_loader import get_transforms
from models import build_model
from post_processing import get_post_processing
def resize_image(img, short_size):
height, width, _ = img.shape
if height < width:
new_height = short_size
new_width = new_height / height * width
else:
new_width = short_size
new_height = new_width / width * height
new_height = int(round(new_height / 32) * 32)
new_width = int(round(new_width / 32) * 32)
resized_img = cv2.resize(img, (new_width, new_height))
return resized_img
class PaddleModel:
def __init__(self, model_path, post_p_thre=0.7, gpu_id=None):
'''
初始化模型
:param model_path: 模型地址(可以是模型的参数或者参数和计算图一起保存的文件)
:param gpu_id: 在哪一块gpu上运行
'''
self.gpu_id = gpu_id
if self.gpu_id is not None and isinstance(
self.gpu_id, int) and paddle.device.is_compiled_with_cuda():
paddle.device.set_device("gpu:{}".format(self.gpu_id))
else:
paddle.device.set_device("cpu")
checkpoint = paddle.load(model_path)
config = checkpoint['config']
config['arch']['backbone']['pretrained'] = False
self.model = build_model(config['arch'])
self.post_process = get_post_processing(config['post_processing'])
self.post_process.box_thresh = post_p_thre
self.img_mode = config['dataset']['train']['dataset']['args'][
'img_mode']
self.model.set_state_dict(checkpoint['state_dict'])
self.model.eval()
self.transform = []
for t in config['dataset']['train']['dataset']['args']['transforms']:
if t['type'] in ['ToTensor', 'Normalize']:
self.transform.append(t)
self.transform = get_transforms(self.transform)
def METHOD_NAME(self,
img_path: str,
is_output_polygon=False,
short_size: int=1024):
'''
对传入的图像进行预测,支持图像地址,opecv 读取图片,偏慢
:param img_path: 图像地址
:param is_numpy:
:return:
'''
assert os.path.exists(img_path), 'file is not exists'
img = cv2.imread(img_path, 1 if self.img_mode != 'GRAY' else 0)
if self.img_mode == 'RGB':
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w = img.shape[:2]
img = resize_image(img, short_size)
# 将图片由(w,h)变为(1,img_channel,h,w)
tensor = self.transform(img)
tensor = tensor.unsqueeze_(0)
batch = {'shape': [(h, w)]}
with paddle.no_grad():
start = time.time()
preds = self.model(tensor)
box_list, score_list = self.post_process(
batch, preds, is_output_polygon=is_output_polygon)
box_list, score_list = box_list[0], score_list[0]
if len(box_list) > 0:
if is_output_polygon:
idx = [x.sum() > 0 for x in box_list]
box_list = [box_list[i] for i, v in enumerate(idx) if v]
score_list = [score_list[i] for i, v in enumerate(idx) if v]
else:
idx = box_list.reshape(box_list.shape[0], -1).sum(
axis=1) > 0 # 去掉全为0的框
box_list, score_list = box_list[idx], score_list[idx]
else:
box_list, score_list = [], []
t = time.time() - start
return preds[0, 0, :, :].detach().cpu().numpy(), box_list, score_list, t
def save_depoly(net, input, save_path):
input_spec = [
paddle.static.InputSpec(
shape=[None, 3, None, None], dtype="float32")
]
net = paddle.jit.to_static(net, input_spec=input_spec)
# save static model for inference directly
paddle.jit.save(net, save_path)
def init_args():
import argparse
parser = argparse.ArgumentParser(description='DBNet.paddle')
parser.add_argument('--model_path', default=r'model_best.pth', type=str)
parser.add_argument(
'--input_folder',
default='./test/input',
type=str,
help='img path for predict')
parser.add_argument(
'--output_folder',
default='./test/output',
type=str,
help='img path for output')
parser.add_argument('--gpu', default=0, type=int, help='gpu for inference')
parser.add_argument(
'--thre', default=0.3, type=float, help='the thresh of post_processing')
parser.add_argument(
'--polygon', action='store_true', help='output polygon or box')
parser.add_argument('--show', action='store_true', help='show result')
parser.add_argument(
'--save_result',
action='store_true',
help='save box and score to txt file')
args = parser.parse_args()
return args
if __name__ == '__main__':
import pathlib
from tqdm import tqdm
import matplotlib.pyplot as plt
from utils.util import show_img, draw_bbox, save_result, get_image_file_list
args = init_args()
print(args)
# 初始化网络
model = PaddleModel(args.model_path, post_p_thre=args.thre, gpu_id=args.gpu)
img_folder = pathlib.Path(args.input_folder)
for img_path in tqdm(get_image_file_list(args.input_folder)):
preds, boxes_list, score_list, t = model.METHOD_NAME(
img_path, is_output_polygon=args.polygon)
img = draw_bbox(cv2.imread(img_path)[:, :, ::-1], boxes_list)
if args.show:
show_img(preds)
show_img(img, title=os.path.basename(img_path))
plt.show()
# 保存结果到路径
os.makedirs(args.output_folder, exist_ok=True)
img_path = pathlib.Path(img_path)
output_path = os.path.join(args.output_folder,
img_path.stem + '_result.jpg')
pred_path = os.path.join(args.output_folder,
img_path.stem + '_pred.jpg')
cv2.imwrite(output_path, img[:, :, ::-1])
cv2.imwrite(pred_path, preds * 255)
save_result(
output_path.replace('_result.jpg', '.txt'), boxes_list, score_list,
args.polygon) | null |
5,834 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops.operations.array_ops as P
from mindspore import Tensor
from mindspore.common.api import jit
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
class Net(nn.Cell):
def __init__(self, nptype):
super(Net, self).__init__()
self.unstack = P.Unstack(axis=3)
self.data_np = np.array([[[[[0, 0],
[-2, -1]],
[[0, 0],
[0, 1]]],
[[[0, 0],
[2, 3]],
[[0, 0],
[4, 5]]],
[[[0, 0],
[6, 7]],
[[0, 0],
[8, 9]]]],
[[[[0, 0],
[10, 11]],
[[0, 0],
[12, 13]]],
[[[0, 0],
[14, 15]],
[[0, 0],
[16, 17]]],
[[[0, 0],
[18, 19]],
[[0, 0],
[20, 21]]]],
[[[[0, 0],
[22, 23]],
[[0, 0],
[24, 25]]],
[[[0, 0],
[26, 27]],
[[0, 0],
[28, 29]]],
[[[0, 0],
[30, 31]],
[[0, 0],
[32, 33]]]]]).astype(nptype)
self.x1 = Parameter(initializer(Tensor(self.data_np), [3, 3, 2, 2, 2]), name='x1')
@jit
def construct(self):
return self.unstack(self.x1)
def unpack(nptype):
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
unpack_ = Net(nptype)
output = unpack_()
expect = (np.reshape(np.array([0] * 36).astype(nptype), (3, 3, 2, 2)),
np.arange(-2, 34, 1).reshape(3, 3, 2, 2).astype(nptype))
for i, exp in enumerate(expect):
assert (output[i].asnumpy() == exp).all()
def unpack_pynative(nptype):
context.set_context(mode=context.PYNATIVE_MODE, device_target='CPU')
x1 = np.array([[[[[0, 0],
[-2, -1]],
[[0, 0],
[0, 1]]],
[[[0, 0],
[2, 3]],
[[0, 0],
[4, 5]]],
[[[0, 0],
[6, 7]],
[[0, 0],
[8, 9]]]],
[[[[0, 0],
[10, 11]],
[[0, 0],
[12, 13]]],
[[[0, 0],
[14, 15]],
[[0, 0],
[16, 17]]],
[[[0, 0],
[18, 19]],
[[0, 0],
[20, 21]]]],
[[[[0, 0],
[22, 23]],
[[0, 0],
[24, 25]]],
[[[0, 0],
[26, 27]],
[[0, 0],
[28, 29]]],
[[[0, 0],
[30, 31]],
[[0, 0],
[32, 33]]]]]).astype(nptype)
x1 = Tensor(x1)
expect = (np.reshape(np.array([0] * 36).astype(nptype), (3, 3, 2, 2)),
np.arange(-2, 34, 1).reshape(3, 3, 2, 2).astype(nptype))
output = P.Unstack(axis=3)(x1)
for i, exp in enumerate(expect):
assert (output[i].asnumpy() == exp).all()
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_unpack_graph_float32():
unpack(np.float32)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_unpack_graph_float16():
unpack(np.float16)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_unpack_graph_int32():
unpack(np.int32)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_unpack_graph_int16():
unpack(np.int16)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_unpack_graph_uint8():
unpack(np.uint8)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_unpack_graph_bool():
unpack(np.bool)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_unpack_pynative_float32():
unpack_pynative(np.float32)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_unpack_pynative_float16():
unpack_pynative(np.float16)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_unpack_pynative_int32():
unpack_pynative(np.int32)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_unpack_pynative_int16():
unpack_pynative(np.int16)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def METHOD_NAME():
unpack_pynative(np.uint8)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_unpack_pynative_bool():
unpack_pynative(np.bool) | null |
5,835 | """String utilities"""
import math
import re
import shlex
import unicodedata
import uuid
from gettext import gettext as _
from typing import List, Union
from lutris.util.log import logger
NO_PLAYTIME = "Never played"
def get_uuid_from_string(value):
return str(uuid.uuid5(uuid.NAMESPACE_URL, str(value)))
def slugify(value) -> str:
"""Remove special characters from a string and slugify it.
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
"""
_value = str(value)
# This differs from the Lutris website implementation which uses the Django
# version of `slugify` and uses the "NFKD" normalization method instead of
# "NFD". This creates some inconsistencies in titles containing a trademark
# symbols or some other special characters. The website version of slugify
# will likely get updated to use the same normalization method.
_value = unicodedata.normalize("NFD", _value).encode("ascii", "ignore")
_value = _value.decode("utf-8")
_value = str(re.sub(r"[^\w\s-]", "", _value)).strip().lower()
slug = re.sub(r"[-\s]+", "-", _value)
if not slug:
# The slug is empty, likely because the string contains only non-latin
# characters
slug = get_uuid_from_string(value)
return slug
def add_url_tags(text) -> str:
"""Surround URL with <a> tags."""
return re.sub(
r"(http[s]?://("
r"?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)",
r'<a href="\1">\1</a>',
text,
)
def lookup_string_in_text(string, text):
"""Return full line if string found in the multi-line text."""
output_lines = text.split("\n")
for line in output_lines:
if string in line:
return line
def parse_version(version):
"""Parse a version string
Return a 3 element tuple containing:
- The version number as a list of integers
- The prefix (whatever characters before the version number)
- The suffix (whatever comes after)
Example::
>>> parse_version("3.6-staging")
([3, 6], '', '-staging')
Returns:
tuple: (version number as list, prefix, suffix)
"""
version_match = re.search(r"(\d[\d\.]+\d)", version)
if not version_match:
return [], "", ""
version_number = version_match.groups()[0]
prefix = version[0:version_match.span()[0]]
suffix = version[version_match.span()[1]:]
return [int(p) for p in version_number.split(".")], suffix, prefix
def unpack_dependencies(string: str) -> List[Union[str, tuple]]:
"""Parse a string to allow for complex dependencies
Works in a similar fashion as Debian dependencies, separate dependencies
are comma separated and multiple choices for satisfying a dependency are
separated by pipes.
Example: quake-steam | quake-gog, some-quake-mod returns:
[('quake-steam', 'quake-gog'), 'some-quake-mod']
"""
def _expand_dep(dep) -> Union[str, tuple]:
if "|" in dep:
return tuple(option.strip() for option in dep.split("|") if option.strip())
return dep.strip()
if not string:
return []
return [_expand_dep(dep) for dep in string.split(",") if dep.strip()]
def gtk_safe(string: str) -> str:
"""Return a string ready to used in Gtk widgets"""
if not string:
string = ""
string = str(string)
return string.replace("&", "&").replace("<", "<").replace(">", ">")
def get_formatted_playtime(playtime) -> str:
"""Return a human readable value of the play time"""
if not playtime:
return NO_PLAYTIME
try:
playtime = float(playtime)
except ValueError:
logger.warning("Invalid playtime value '%s'", playtime)
return NO_PLAYTIME
hours = math.floor(playtime)
if hours == 1:
hours_text = _("1 hour")
elif hours > 1:
hours_text = _("%d hours") % hours
else:
hours_text = ""
minutes = int((playtime - hours) * 60)
if minutes == 1:
minutes_text = _("1 minute")
elif minutes > 1:
minutes_text = _("%d minutes") % minutes
else:
minutes_text = ""
formatted_time = " ".join([text for text in (hours_text, minutes_text) if text])
if formatted_time:
return formatted_time
if playtime:
return _("Less than a minute")
return NO_PLAYTIME
def _split_arguments(args, closing_quot='', quotations=None) -> list:
if quotations is None:
quotations = ["'", '"']
try:
return shlex.split(args + closing_quot)
except ValueError as ex:
message = ex.args[0]
if message == "No closing quotation" and quotations:
return _split_arguments(args, quotations[0], quotations[1:])
logger.error(message)
return []
def split_arguments(args) -> list:
"""Wrapper around shlex.split that is more tolerant of errors"""
if not args:
# shlex.split seems to hangs when passed the None value
return []
return _split_arguments(args)
def METHOD_NAME(size) -> str:
"""Shows a size in bytes in a more readable way"""
units = ("bytes", "kB", "MB", "GB", "TB", "PB", "nuh uh", "no way", "BS")
unit_index = 0
while size > 1024:
size = size / 1024
unit_index += 1
return "%0.1f %s" % (size, units[unit_index]) | null |
5,836 | """Test common.ensure_stdout_handles_unicode"""
from __future__ import print_function
import unittest
import sys
from subprocess import check_call, CalledProcessError
from tempfile import mkstemp
import os
from os.path import isfile
from contextlib import contextmanager
FILE_TEXT = u'The unicode check mark is \u2713.\n'
@contextmanager
def METHOD_NAME(just_name=True):
"""Context manager that creates temp file and deletes it in the end"""
tmp_descriptor = None
tmp_name = None
tmp_handle = None
try:
tmp_descriptor, tmp_name = mkstemp()
# we create our own file handle since we want to be able to close the
# file and open it again for reading.
# We keep the os-level descriptor open so file name is still reserved
# for us
if just_name:
yield tmp_name
else:
tmp_handle = open(tmp_name, 'wb')
yield tmp_handle, tmp_name
except Exception:
raise
finally:
if tmp_descriptor is not None:
os.close(tmp_descriptor)
if tmp_handle is not None:
tmp_handle.close()
if tmp_name is not None and isfile(tmp_name):
os.unlink(tmp_name)
class TestEncodingHandler(unittest.TestCase):
"""Tests replacing stdout encoding in various scenarios"""
def test_print(self):
"""Test regular unicode output not raise error"""
check_call('{python} {this_file} print'.format(python=sys.executable,
this_file=__file__),
shell=True)
def test_print_redirect(self):
"""
Test redirection of unicode output to files does not raise error
TODO: test this on non-linux OSs
"""
with METHOD_NAME() as tmp_file:
check_call('{python} {this_file} print > {tmp_file}'
.format(python=sys.executable, this_file=__file__,
tmp_file=tmp_file),
shell=True)
@unittest.skipIf(not sys.platform.startswith('linux'),
'Only tested on linux sofar')
def test_print_no_lang(self):
"""
Test redirection of unicode output to files does not raise error
TODO: Adapt this for other OSs; for win create batch script
"""
check_call('LANG=C {python} {this_file} print'
.format(python=sys.executable, this_file=__file__),
shell=True)
def test_uopen(self):
"""Test that uopen in a nice environment is ok"""
with METHOD_NAME(False) as (tmp_handle, tmp_file):
tmp_handle.write(FILE_TEXT.encode('utf8'))
tmp_handle.close()
try:
check_call('{python} {this_file} read {tmp_file}'
.format(python=sys.executable, this_file=__file__,
tmp_file=tmp_file),
shell=True)
except CalledProcessError as cpe:
self.fail(cpe.output)
def test_uopen_redirect(self):
"""
Test redirection of unicode output to files does not raise error
TODO: test this on non-linux OSs
"""
with METHOD_NAME(False) as (tmp_handle, tmp_file):
tmp_handle.write(FILE_TEXT.encode('utf8'))
tmp_handle.close()
with METHOD_NAME() as redirect_file:
try:
check_call(
'{python} {this_file} read {tmp_file} >{redirect_file}'
.format(python=sys.executable, this_file=__file__,
tmp_file=tmp_file, redirect_file=redirect_file),
shell=True)
except CalledProcessError as cpe:
self.fail(cpe.output)
@unittest.skipIf(not sys.platform.startswith('linux'),
'Only tested on linux sofar')
def test_uopen_no_lang(self):
"""
Test that uopen in a C-LANG environment is ok
TODO: Adapt this for other OSs; for win create batch script
"""
with METHOD_NAME(False) as (tmp_handle, tmp_file):
tmp_handle.write(FILE_TEXT.encode('utf8'))
tmp_handle.close()
try:
check_call('LANG=C {python} {this_file} read {tmp_file}'
.format(python=sys.executable, this_file=__file__,
tmp_file=tmp_file),
shell=True)
except CalledProcessError as cpe:
self.fail(cpe.output)
def run_read(filename):
"""This is called from test_uopen* tests as script. Reads text, compares"""
from oletools.common.io_encoding import uopen
# open file
with uopen(filename, 'rt') as reader:
# a few tests
if reader.closed:
raise ValueError('handle is closed!')
if reader.name != filename:
raise ValueError('Wrong filename {}'.format(reader.name))
if reader.isatty():
raise ValueError('Reader is a tty!')
if reader.tell() != 0:
raise ValueError('Reader.tell is not 0 at beginning')
# read text
text = reader.read()
# a few more tests
if not reader.closed:
raise ValueError('Reader is not closed outside context')
if reader.name != filename:
raise ValueError('Wrong filename {} after context'.format(reader.name))
# the following test raises an exception because reader is closed, so isatty cannot be called:
# if reader.isatty():
# raise ValueError('Reader has become a tty!')
# compare text
if sys.version_info.major <= 2: # in python2 get encoded byte string
expect = FILE_TEXT.encode('utf8')
else: # python3: should get real unicode
expect = FILE_TEXT
if text != expect:
raise ValueError('Wrong contents: {!r} != {!r}'
.format(text, expect))
return 0
def run_print():
"""This is called from test_read* tests as script. Prints & logs unicode"""
from oletools.common.io_encoding import ensure_stdout_handles_unicode
from oletools.common.log_helper import log_helper
ensure_stdout_handles_unicode()
print(u'Check: \u2713') # print check mark
# check logging as well
logger = log_helper.get_or_create_silent_logger('test_encoding_handler')
log_helper.enable_logging(False, 'debug', stream=sys.stdout)
logger.info(u'Check: \u2713')
return 0
# tests call this file as script
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit(unittest.main())
# hack required to import common from parent dir, not system-wide one
# (usually unittest seems to do that for us)
from os.path import abspath, dirname, join
ole_base = dirname(dirname(dirname(abspath(__file__))))
sys.path.insert(0, ole_base)
if sys.argv[1] == 'print':
if len(sys.argv) > 2:
print('Expect no arg for "print"', file=sys.stderr)
sys.exit(2)
sys.exit(run_print())
elif sys.argv[1] == 'read':
if len(sys.argv) != 3:
print('Expect single arg for "read"', file=sys.stderr)
sys.exit(2)
sys.exit(run_read(sys.argv[2]))
else:
print('Unexpected argument: {}'.format(sys.argv[1]), file=sys.stderr)
sys.exit(2) | null |
5,837 | import uuid
from unittest import TestCase
import jaseci.tests.jac_test_code as jtc
from jaseci.prim.sentinel import Sentinel
from jaseci.prim.element import Element
from jaseci.prim.graph import Graph
from jaseci.prim.node import Node
from jaseci.jsorc.jsorc import JsOrc
from jaseci.utils.utils import TestCaseHelper, get_all_subclasses
from jaseci.prim.architype import Architype
class ArchitypeTests(TestCaseHelper, TestCase):
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
def test_object_creation_basic_no_side_creation(self):
""" """
mast = JsOrc.master()
num_objs = len(mast._h.mem.keys())
node1 = Node(m_id=mast._m_id, h=mast._h)
node2 = Node(m_id=mast._m_id, h=mast._h, parent=node1)
num_new = len(mast._h.mem.keys())
self.assertEqual(num_new, num_objs + 2)
new_graph = Graph(m_id=mast._m_id, h=mast._h)
mast.graph_ids.add_obj(new_graph)
num_new = len(mast._h.mem.keys())
self.assertEqual(num_new, num_objs + 3)
new_graph.attach_outbound(node1)
new_graph.attach_outbound(node2)
num_new = len(mast._h.mem.keys())
self.assertEqual(num_new, num_objs + 5)
def test_edge_removal_updates_nodes_edgelist(self):
""" """
mast = JsOrc.master()
node1 = Node(m_id=mast._m_id, h=mast._h)
node2 = Node(m_id=mast._m_id, h=mast._h)
edge = node1.attach_outbound(node2)
self.assertEqual(len(node1.smart_edge_list), 1)
self.assertEqual(len(node2.smart_edge_list), 1)
self.assertEqual(len(edge), 1)
edge[0].destroy()
self.assertEqual(len(node1.smart_edge_list), 0)
self.assertEqual(len(node2.smart_edge_list), 0)
def test_object_creation_by_sentinel_no_leaks(self):
"""
Test that the destroy of sentinels clears owned objects
"""
mast = JsOrc.master()
num_objs = len(mast._h.mem.keys())
self.assertEqual(num_objs, 2)
new_graph = Graph(m_id=mast._m_id, h=mast._h)
sent = Sentinel(m_id=mast._m_id, h=mast._h)
code = jtc.prog1
mast.sentinel_ids.add_obj(sent)
mast.graph_ids.add_obj(new_graph)
num_new = len(mast._h.mem.keys())
self.assertEqual(num_new, num_objs + 2)
sent.register_code(code)
num_objs = len(mast._h.mem.keys())
sent.register_code(code)
new_num = len(mast._h.mem.keys())
self.assertEqual(num_objs, new_num)
def test_json_blob_of_objects(self):
"""
Test saving object to json and back to python dict
"""
for i in get_all_subclasses(Element):
kwargs = {"m_id": 0, "h": JsOrc.hook()}
orig = i(**kwargs)
blob1 = orig.json(detailed=True)
new = i(**kwargs)
self.assertNotEqual(orig.id, new.id)
new.json_load(blob1)
self.assertEqual(orig.id, new.id)
self.assertTrue(orig.is_equivalent(new))
def test_supermaster_can_touch_all_data(self):
mh = JsOrc.hook()
mast = JsOrc.master(h=mh)
mast2 = JsOrc.master(h=mh)
node12 = Node(m_id=mast2._m_id, h=mast2._h)
supmast = JsOrc.super_master(h=mh)
bad = mh.get_obj(mast._m_id, node12.jid)
good = mh.get_obj(supmast._m_id, node12.jid)
self.assertEqual(good, node12)
self.assertNotEqual(bad, node12)
self.assertIsNone(bad)
def test_id_list_smart_name_error(self):
mast = JsOrc.master()
sent = Sentinel(m_id=mast._m_id, h=mast._h)
self.assertIn("arch_ids", sent.arch_ids.obj_for_id_not_exist_error(0))
def test_dont_store_invalid_feilds_in_blob(self):
mast = JsOrc.master()
sent = Sentinel(m_id=mast._m_id, h=mast._h)
sent.fake_data = 5
stored = sent.jsci_payload()
sent2 = Sentinel(m_id=mast._m_id, h=mast._h)
sent2.json_load(stored)
self.assertNotIn("fake_data", vars(sent2).keys())
def test_sentinel_default_archs_dont_grow(self):
mast = JsOrc.master()
sent = Sentinel(m_id=mast._m_id, h=mast._h)
sent.register_code(text="node simple; walker init {}")
before = sent._h.get_object_distribution()[Architype]
stored = sent.jsci_payload()
sent2 = Sentinel(m_id=mast._m_id, h=mast._h)
sent2.json_load(stored)
sent2 = Sentinel(m_id=mast._m_id, h=mast._h)
sent2.json_load(stored)
after = sent2._h.get_object_distribution()[Architype]
self.assertEqual(before, after)
def test_sentinel_default_archs_dont_grow_multi_compile(self):
mast = JsOrc.master()
sent = Sentinel(m_id=mast._m_id, h=mast._h)
sent.register_code(text="node simple; walker init {}")
before = sent._h.get_object_distribution()[Architype]
stored = sent.jsci_payload()
sent2 = Sentinel(m_id=mast._m_id, h=mast._h)
sent2.json_load(stored)
sent2.register_code(text="node simple; walker init {}")
before_id = sent2.arch_ids[0]
sent2.register_code(text="node simple; walker init {}")
sent2.register_code(text="node simple; walker init {}")
after_id = sent2.arch_ids[0]
after = sent2._h.get_object_distribution()[Architype]
self.assertEqual(before, after)
self.assertNotEqual(before_id, after_id)
def METHOD_NAME(self):
mast = JsOrc.master()
sent = Sentinel(m_id=mast._m_id, h=mast._h)
sent.register_code(text="node simple; walker init {}")
before = len(sent.arch_ids)
sent._h.get_obj(mast._m_id, sent.arch_ids[1]).destroy()
sent._h.get_obj(mast._m_id, sent.arch_ids[3]).destroy()
sent.arch_ids.obj_list()
after = len(sent.arch_ids)
self.assertEqual(after, before - 2) | null |
5,838 | # Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Stub Tensor implementation."""
import inspect
from functools import reduce
from mindspore.common.tensor import Tensor
from mindspore.common.dtype import type_size_in_bytes
from mindspore._c_expression import TensorNode, SequenceNode, NoneTypeNode, AnyTypeNode
from mindspore._c_expression import Tensor as Tensor_
from mindspore.common.api import _convert_python_data
def _stub_member(var, init):
"""handle stub tensor's member, use a member cache to improve performance"""
def getx(stub):
if stub.tensor is not None:
return getattr(stub.tensor, var)
if hasattr(stub, "member_cache"):
return getattr(stub.member_cache, var, init)
return init
def setx(stub, value):
if stub.tensor is not None:
setattr(stub.tensor, var, value)
else:
if not hasattr(stub, "member_cache"):
stub.member_cache = {}
stub.member_cache[var] = value
return property(getx, setx)
def _stub_method(method):
def fun(*arg, **kwargs):
stub = arg[0]
arg = (stub.stub_sync(),) + arg[1:]
return method(*arg, **kwargs)
return fun
class StubTensor:
"""stub tensor for async op run."""
const_arg = _stub_member("const_arg", None)
init = _stub_member("init", None)
init_finished = _stub_member("init_finished", False)
virtual_flag = _stub_member("virtual_flag", False)
adapter_flag = _stub_member("adapter_flag", False)
parent_tensor_ = _stub_member("parent_tensor_", None)
index_of_parent_ = _stub_member("index_of_parent_", None)
slice_num_of_persistent_data_ = _stub_member("slice_num_of_persistent_data_", None)
slice_shape_of_persistent_data_ = _stub_member("slice_shape_of_persistent_data_", None)
def __init__(self, stub=None, tensor=None):
self.stub = stub
self.tensor = tensor
__str__ = _stub_method(Tensor.__str__)
__repr__ = _stub_method(Tensor.__repr__)
__setitem__ = _stub_method(Tensor.__setitem__)
__lt__ = Tensor.__lt__
__le__ = Tensor.__le__
__gt__ = Tensor.__gt__
__ge__ = Tensor.__ge__
__eq__ = Tensor.__eq__
__ne__ = Tensor.__ne__
@property
def shape(self):
"""shape stub."""
if self.stub:
if not hasattr(self, "stub_shape"):
self.stub_shape = self.stub.get_shape()
return self.stub_shape
return self.tensor.shape
@property
def dtype(self):
"""dtype stub."""
if self.stub:
if not hasattr(self, "stub_dtype"):
self.stub_dtype = self.stub.get_dtype()
return self.stub_dtype
return self.tensor.dtype
@property
def size(self):
"""size stub."""
shape = self.shape
return reduce((lambda x, y: x * y), shape) if shape else 1
@property
def itemsize(self):
"""itemsize stub."""
return type_size_in_bytes(self.dtype)
@property
def nbytes(self):
"""nbytes stub."""
return self.size * self.itemsize
@property
def ndim(self):
"""ndim stub."""
return len(self.shape)
@property
def strides(self):
"""strides stub."""
return self.stub_sync().strides
@property
def has_init(self):
"""has_init stub."""
return False
def ndimension(self):
r"""
Alias for :func:`mindspore.Tensor.ndim`.
"""
return self.ndim
def METHOD_NAME(self):
r"""
Alias for :func:`mindspore.Tensor.ndim`.
"""
return self.ndim
asnumpy = _stub_method(Tensor.asnumpy)
is_persistent_data = _stub_method(Tensor.is_persistent_data)
asnumpy_of_slice_persistent_data = _stub_method(Tensor.asnumpy_of_slice_persistent_data)
slice_num_of_persistent_data = _stub_method(Tensor.slice_num_of_persistent_data)
slice_shape_of_persistent_data = _stub_method(Tensor.slice_shape_of_persistent_data)
flush_from_cache = _stub_method(Tensor.flush_from_cache)
def stub_sync(self):
"""sync real tensor."""
if self.stub:
val = self.stub.get_value()
self.tensor = Tensor(val, internal=True)
if hasattr(self, "member_cache"):
for k, v in self.member_cache.items():
setattr(self.tensor, k, v)
self.stub = None
return self.tensor
def _init_stub_tensor_api():
"""adapt to python tensor and cpp tensor api"""
need_init_func = set(dir(Tensor)) - set(dir(StubTensor))
cpp_tensor_func = dir(Tensor_)
for attr in need_init_func:
func = inspect.getattr_static(Tensor, attr)
if attr in cpp_tensor_func:
# for cpp tensor api, we always need to sync for real tensor first
setattr(StubTensor, attr, _stub_method(func))
else:
setattr(StubTensor, attr, func)
_init_stub_tensor_api()
def _convert_stub(stub):
"convert stub to StubNode or Value"
if isinstance(stub, TensorNode):
return StubTensor(stub)
if isinstance(stub, tuple):
return tuple(_convert_stub(e) for e in stub)
if isinstance(stub, SequenceNode):
elements = stub.get_elements()
return tuple(_convert_stub(e) for e in elements)
if isinstance(stub, NoneTypeNode):
val = stub.get_real_value()
return _convert_python_data(val)
if isinstance(stub, AnyTypeNode):
val = stub.get_real_node()
return _convert_stub(val)
return _convert_python_data(stub) | null |
5,839 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from unittest.mock import patch
from pgadmin.utils import server_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tables.tests \
import utils as tables_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as compound_triggers_utils
import sys
from config import PG_DEFAULT_DRIVER
class CompoundTriggersUpdateTestCase(BaseTestGenerator):
"""This class will update compound trigger under table node."""
scenarios = utils.generate_scenarios('update_compound_trigger',
compound_triggers_utils.test_cases)
def setUp(self):
super().setUp()
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
server_con = server_utils.connect_server(self, self.server_id)
if server_con:
if "type" in server_con["data"]:
if server_con["data"]["type"] == "pg":
message = "Compound Triggers are not supported by PG."
self.skipTest(message)
elif server_con["data"]["type"] == "ppas" \
and server_con["data"]["version"] < 120000:
message = "Compound Triggers are not supported by " \
"EPAS server less than 12"
self.skipTest(message)
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception(
"Could not connect to database to update a compound trigger.")
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema to update a trigger.")
self.table_name = \
"table_compound_trigger_%s" % (str(uuid.uuid4())[1:8])
self.table_id = tables_utils.create_table(self.server, self.db_name,
self.schema_name,
self.table_name)
self.trigger_name = \
"test_compound_trigger_update_%s" % (str(uuid.uuid4())[1:8])
self.trigger_id = \
compound_triggers_utils.create_compound_trigger(self.server,
self.db_name,
self.schema_name,
self.table_name,
self.trigger_name)
def METHOD_NAME(self):
return self.tester.put(
"{0}{1}/{2}/{3}/{4}/{5}/{6}".format(self.url, utils.SERVER_GROUP,
self.server_id, self.db_id,
self.schema_id, self.table_id,
self.trigger_id),
data=json.dumps(self.test_data),
follow_redirects=True
)
def runTest(self):
"""This function will get trigger under table node."""
trigger_response = \
compound_triggers_utils.verify_compound_trigger(self.server,
self.db_name,
self.trigger_name)
if not trigger_response:
raise Exception("Could not find the compound trigger to update.")
if hasattr(self, 'disable_trigger') and self.disable_trigger:
compound_triggers_utils.enable_disable_compound_trigger(
self.server,
self.db_name,
self.schema_name,
self.table_name,
self.trigger_name,
False
)
self.test_data.update({"id": self.trigger_id})
if self.is_positive_test:
if hasattr(self, "wrong_compound_trigger_id"):
self.trigger_id = 9999
response = self.METHOD_NAME()
elif hasattr(self, "new_compound_trigger_id"):
with patch(self.mock_data["function_name"],
side_effect=eval(self.mock_data["return_value"])):
response = self.METHOD_NAME()
elif hasattr(self, "dummy_dict"):
self.mock_data['return_value'] = [(True, self.dummy_dict), (
False, self.dummy_data)]
with patch(self.mock_data["function_name"],
side_effect=self.mock_data["return_value"]), patch(
'pgadmin.utils.driver.{0}.connection.Connection.'
'execute_scalar'.format(PG_DEFAULT_DRIVER),
side_effect=[(True, True),
(True, "Mocked response")]):
response = self.METHOD_NAME()
else:
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
if hasattr(self, "wrong_compound_trigger_id"):
self.trigger_id = 9999
response = self.METHOD_NAME()
self.assertEqual(response.status_code,
self.expected_data["status_code"])
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id) | null |
5,840 | # SPDX-License-Identifier: MIT
"""
These are keyword-only APIs that call `attr.s` and `attr.ib` with different
default values.
"""
from functools import partial
from . import setters
from ._funcs import asdict as _asdict
from ._funcs import METHOD_NAME as _astuple
from ._make import (
NOTHING,
_frozen_setattrs,
_ng_default_on_setattr,
attrib,
attrs,
)
from .exceptions import UnannotatedAttributeError
def define(
maybe_cls=None,
*,
these=None,
repr=None,
unsafe_hash=None,
hash=None,
init=None,
slots=True,
frozen=False,
weakref_slot=True,
str=False,
auto_attribs=None,
kw_only=False,
cache_hash=False,
auto_exc=True,
eq=None,
order=False,
auto_detect=True,
getstate_setstate=None,
on_setattr=None,
field_transformer=None,
match_args=True,
):
r"""
Define an *attrs* class.
Differences to the classic `attr.s` that it uses underneath:
- Automatically detect whether or not *auto_attribs* should be `True` (c.f.
*auto_attribs* parameter).
- If *frozen* is `False`, run converters and validators when setting an
attribute by default.
- *slots=True*
.. caution::
Usually this has only upsides and few visible effects in everyday
programming. But it *can* lead to some suprising behaviors, so please
make sure to read :term:`slotted classes`.
- *auto_exc=True*
- *auto_detect=True*
- *order=False*
- Some options that were only relevant on Python 2 or were kept around for
backwards-compatibility have been removed.
Please note that these are all defaults and you can change them as you
wish.
:param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves
exactly like `attr.s`. If left `None`, `attr.s` will try to guess:
1. If any attributes are annotated and no unannotated `attrs.fields`\ s
are found, it assumes *auto_attribs=True*.
2. Otherwise it assumes *auto_attribs=False* and tries to collect
`attrs.fields`\ s.
For now, please refer to `attr.s` for the rest of the parameters.
.. versionadded:: 20.1.0
.. versionchanged:: 21.3.0 Converters are also run ``on_setattr``.
.. versionadded:: 22.2.0
*unsafe_hash* as an alias for *hash* (for :pep:`681` compliance).
"""
def do_it(cls, auto_attribs):
return attrs(
maybe_cls=cls,
these=these,
repr=repr,
hash=hash,
unsafe_hash=unsafe_hash,
init=init,
slots=slots,
frozen=frozen,
weakref_slot=weakref_slot,
str=str,
auto_attribs=auto_attribs,
kw_only=kw_only,
cache_hash=cache_hash,
auto_exc=auto_exc,
eq=eq,
order=order,
auto_detect=auto_detect,
collect_by_mro=True,
getstate_setstate=getstate_setstate,
on_setattr=on_setattr,
field_transformer=field_transformer,
match_args=match_args,
)
def wrap(cls):
"""
Making this a wrapper ensures this code runs during class creation.
We also ensure that frozen-ness of classes is inherited.
"""
nonlocal frozen, on_setattr
had_on_setattr = on_setattr not in (None, setters.NO_OP)
# By default, mutable classes convert & validate on setattr.
if frozen is False and on_setattr is None:
on_setattr = _ng_default_on_setattr
# However, if we subclass a frozen class, we inherit the immutability
# and disable on_setattr.
for base_cls in cls.__bases__:
if base_cls.__setattr__ is _frozen_setattrs:
if had_on_setattr:
msg = "Frozen classes can't use on_setattr (frozen-ness was inherited)."
raise ValueError(msg)
on_setattr = setters.NO_OP
break
if auto_attribs is not None:
return do_it(cls, auto_attribs)
try:
return do_it(cls, True)
except UnannotatedAttributeError:
return do_it(cls, False)
# maybe_cls's type depends on the usage of the decorator. It's a class
# if it's used as `@attrs` but ``None`` if used as `@attrs()`.
if maybe_cls is None:
return wrap
return wrap(maybe_cls)
mutable = define
frozen = partial(define, frozen=True, on_setattr=None)
def field(
*,
default=NOTHING,
validator=None,
repr=True,
hash=None,
init=True,
metadata=None,
type=None,
converter=None,
factory=None,
kw_only=False,
eq=None,
order=None,
on_setattr=None,
alias=None,
):
"""
Identical to `attr.ib`, except keyword-only and with some arguments
removed.
.. versionadded:: 23.1.0
The *type* parameter has been re-added; mostly for `attrs.make_class`.
Please note that type checkers ignore this metadata.
.. versionadded:: 20.1.0
"""
return attrib(
default=default,
validator=validator,
repr=repr,
hash=hash,
init=init,
metadata=metadata,
type=type,
converter=converter,
factory=factory,
kw_only=kw_only,
eq=eq,
order=order,
on_setattr=on_setattr,
alias=alias,
)
def asdict(inst, *, recurse=True, filter=None, value_serializer=None):
"""
Same as `attr.asdict`, except that collections types are always retained
and dict is always used as *dict_factory*.
.. versionadded:: 21.3.0
"""
return _asdict(
inst=inst,
recurse=recurse,
filter=filter,
value_serializer=value_serializer,
retain_collection_types=True,
)
def METHOD_NAME(inst, *, recurse=True, filter=None):
"""
Same as `attr.astuple`, except that collections types are always retained
and `tuple` is always used as the *tuple_factory*.
.. versionadded:: 21.3.0
"""
return _astuple(
inst=inst, recurse=recurse, filter=filter, retain_collection_types=True
) | null |
5,841 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2017 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
#
import unittest
from mantidqt.project.encoderfactory import EncoderFactory
from mantidqt.project.decoderfactory import DecoderFactory
from mantidqt.widgets.instrumentview.io import InstrumentViewEncoder, InstrumentViewDecoder
from mantid.simpleapi import CreateSampleWorkspace
from mantidqt.utils.qt.testing import start_qapplication
INSTRUMENT_VIEW_DICT = {
"workspaceName": "ws",
"tabs": {
"maskTab": {
"activeType": {"roiOn": False, "groupingOn": False, "maskingOn": True},
"activeTools": {
"ellipseButton": False,
"moveButton": True,
"pointerButton": False,
"ringRectangleButton": False,
"freeDrawButton": False,
"ringEllipseButton": False,
"tubeButton": False,
"pixelButton": False,
},
"maskWorkspaceSaved": False,
},
"renderTab": {
"displayWireframe": False,
"displayLighting": False,
"labelPrecision": 1,
"useUCorrection": False,
"maintainAspectRatio": True,
"autoScaling": False,
"colorBar": {"max": "40", "scaleType": 0, "power": "2", "min": "40"},
"showLabels": True,
"flipView": False,
"displayDetectorsOnly": True,
"displayAxes": False,
"axesView": 0,
"showRows": True,
"useOpenGL": True,
"showRelativeIntensity": False,
"freezeRotation": False,
},
"treeTab": {"expandedItems": []},
"pickTab": {
"freeDraw": False,
"ringEllipse": False,
"edit": False,
"tube": False,
"peakErase": False,
"zoom": False,
"one": True,
"ringRectangle": False,
"peakAdd": False,
"ellipse": False,
"rectangle": False,
},
},
"surfaceType": 0,
"actor": {"binMasks": [], "fileName": "viridis", "highlightZeroCounts": False},
"energyTransfer": [0.0, 99.0],
"surface": {
"shapes": [],
"alignmentInfo": [],
"backgroundColor": {"blue": 0, "alpha": 255, "green": 0, "red": 0},
"projection3D": {"viewport": {"rotation": [1.0, 0.0, 0.0, 0.0], "translation": {"xTrans": 0.0, "yTrans": 0.0}, "zoom": 1.0}},
"projection3DSuccess": True,
},
"currentTab": 0,
}
@start_qapplication
class InstrumentViewEncoderTest(unittest.TestCase):
def setUp(self):
self.encoder = InstrumentViewEncoder()
CreateSampleWorkspace(OutputWorkspace="ws")
self.instrumentView = InstrumentViewDecoder().decode(INSTRUMENT_VIEW_DICT)
def test_encoder_is_in_encoder_factory(self):
# Shows that the decoder has been registered on import of something from mantidqt.widget.instrumentview
found_encoder = EncoderFactory.find_encoder(self.instrumentView)
self.assertIs(InstrumentViewEncoder, found_encoder.__class__)
def test_encoder_encode_function_returns_none_when_obj_is_none(self):
self.assertIs(None, self.encoder.encode(None))
def test_encoder_encodes_a_dict_similar_to_set_dict(self):
self.maxDiff = None
self.assertDictEqual(self.encoder.encode(self.instrumentView), INSTRUMENT_VIEW_DICT)
@start_qapplication
class InstrumentViewDecoderTest(unittest.TestCase):
def setUp(self):
self.decoder = InstrumentViewDecoder()
def test_decoder_is_in_decoder_factory(self):
# Shows that the decoder has been registered on import of something from mantidqt.widget.instrumentview
found_decoder = DecoderFactory.find_decoder("InstrumentView")
self.assertIs(InstrumentViewDecoder, found_decoder.__class__)
def test_decoder_decode_function_returns_none_when_obj_is_none(self):
self.assertIs(None, self.decoder.decode(None))
def METHOD_NAME(self):
CreateSampleWorkspace(OutputWorkspace="ws")
self.decoder.decode(INSTRUMENT_VIEW_DICT) | null |
5,842 | """
Unittest for time.strftime
"""
import calendar
import sys
import re
from test import support
import time
import unittest
# helper functions
def fixasctime(s):
if s[8] == ' ':
s = s[:8] + '0' + s[9:]
return s
def escapestr(text, ampm):
"""
Escape text to deal with possible locale values that have regex
syntax while allowing regex syntax used for comparison.
"""
new_text = re.escape(text)
new_text = new_text.replace(re.escape(ampm), ampm)
new_text = new_text.replace(r'\%', '%')
new_text = new_text.replace(r'\:', ':')
new_text = new_text.replace(r'\?', '?')
return new_text
class StrftimeTest(unittest.TestCase):
def _update_variables(self, now):
# we must update the local variables on every cycle
self.gmt = time.gmtime(now)
now = time.localtime(now)
if now[3] < 12: self.ampm='(AM|am)'
else: self.ampm='(PM|pm)'
self.jan1 = time.localtime(time.mktime((now[0], 1, 1, 0, 0, 0, 0, 1, 0)))
try:
if now[8]: self.tz = time.tzname[1]
else: self.tz = time.tzname[0]
except AttributeError:
self.tz = ''
if now[3] > 12: self.clock12 = now[3] - 12
elif now[3] > 0: self.clock12 = now[3]
else: self.clock12 = 12
self.now = now
def setUp(self):
try:
import java
java.util.Locale.setDefault(java.util.Locale.US)
except ImportError:
from locale import setlocale, LC_TIME
saved_locale = setlocale(LC_TIME)
setlocale(LC_TIME, 'C')
self.addCleanup(setlocale, LC_TIME, saved_locale)
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'a Display implementation returned an error unexpectedly: Error'")
def METHOD_NAME(self):
now = time.time()
self._update_variables(now)
self.strftest1(now)
self.strftest2(now)
if support.verbose:
print("Strftime test, platform: %s, Python version: %s" % \
(sys.platform, sys.version.split()[0]))
for j in range(-5, 5):
for i in range(25):
arg = now + (i+j*100)*23*3603
self._update_variables(arg)
self.strftest1(arg)
self.strftest2(arg)
def strftest1(self, now):
if support.verbose:
print("strftime test for", time.ctime(now))
now = self.now
# Make sure any characters that could be taken as regex syntax is
# escaped in escapestr()
expectations = (
('%a', calendar.day_abbr[now[6]], 'abbreviated weekday name'),
('%A', calendar.day_name[now[6]], 'full weekday name'),
('%b', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%B', calendar.month_name[now[1]], 'full month name'),
# %c see below
('%d', '%02d' % now[2], 'day of month as number (00-31)'),
('%H', '%02d' % now[3], 'hour (00-23)'),
('%I', '%02d' % self.clock12, 'hour (01-12)'),
('%j', '%03d' % now[7], 'julian day (001-366)'),
('%m', '%02d' % now[1], 'month as number (01-12)'),
('%M', '%02d' % now[4], 'minute, (00-59)'),
('%p', self.ampm, 'AM or PM as appropriate'),
('%S', '%02d' % now[5], 'seconds of current time (00-60)'),
('%U', '%02d' % ((now[7] + self.jan1[6])//7),
'week number of the year (Sun 1st)'),
('%w', '0?%d' % ((1+now[6]) % 7), 'weekday as a number (Sun 1st)'),
('%W', '%02d' % ((now[7] + (self.jan1[6] - 1)%7)//7),
'week number of the year (Mon 1st)'),
# %x see below
('%X', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%y', '%02d' % (now[0]%100), 'year without century'),
('%Y', '%d' % now[0], 'year with century'),
# %Z see below
('%%', '%', 'single percent sign'),
)
for e in expectations:
# mustn't raise a value error
try:
result = time.strftime(e[0], now)
except ValueError as error:
self.fail("strftime '%s' format gave error: %s" % (e[0], error))
if re.match(escapestr(e[1], self.ampm), result):
continue
if not result or result[0] == '%':
self.fail("strftime does not support standard '%s' format (%s)"
% (e[0], e[2]))
else:
self.fail("Conflict for %s (%s): expected %s, but got %s"
% (e[0], e[2], e[1], result))
def strftest2(self, now):
nowsecs = str(int(now))[:-1]
now = self.now
nonstandard_expectations = (
# These are standard but don't have predictable output
('%c', fixasctime(time.asctime(now)), 'near-asctime() format'),
('%x', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)),
'%m/%d/%y %H:%M:%S'),
('%Z', '%s' % self.tz, 'time zone name'),
# These are some platform specific extensions
('%D', '%02d/%02d/%02d' % (now[1], now[2], (now[0]%100)), 'mm/dd/yy'),
('%e', '%2d' % now[2], 'day of month as number, blank padded ( 0-31)'),
('%h', calendar.month_abbr[now[1]], 'abbreviated month name'),
('%k', '%2d' % now[3], 'hour, blank padded ( 0-23)'),
('%n', '\n', 'newline character'),
('%r', '%02d:%02d:%02d %s' % (self.clock12, now[4], now[5], self.ampm),
'%I:%M:%S %p'),
('%R', '%02d:%02d' % (now[3], now[4]), '%H:%M'),
('%s', nowsecs, 'seconds since the Epoch in UCT'),
('%t', '\t', 'tab character'),
('%T', '%02d:%02d:%02d' % (now[3], now[4], now[5]), '%H:%M:%S'),
('%3y', '%03d' % (now[0]%100),
'year without century rendered using fieldwidth'),
)
for e in nonstandard_expectations:
try:
result = time.strftime(e[0], now)
except ValueError as result:
msg = "Error for nonstandard '%s' format (%s): %s" % \
(e[0], e[2], str(result))
if support.verbose:
print(msg)
continue
if re.match(escapestr(e[1], self.ampm), result):
if support.verbose:
print("Supports nonstandard '%s' format (%s)" % (e[0], e[2]))
elif not result or result[0] == '%':
if support.verbose:
print("Does not appear to support '%s' format (%s)" % \
(e[0], e[2]))
else:
if support.verbose:
print("Conflict for nonstandard '%s' format (%s):" % \
(e[0], e[2]))
print(" Expected %s, but got %s" % (e[1], result))
class Y1900Tests(unittest.TestCase):
"""A limitation of the MS C runtime library is that it crashes if
a date before 1900 is passed with a format string containing "%y"
"""
@unittest.expectedFailureIfWindows("TODO: RUSTPYTHON")
def test_y_before_1900(self):
# Issue #13674, #19634
t = (1899, 1, 1, 0, 0, 0, 0, 0, 0)
if (sys.platform == "win32"
or sys.platform.startswith(("aix", "sunos", "solaris"))):
with self.assertRaises(ValueError):
time.strftime("%y", t)
else:
self.assertEqual(time.strftime("%y", t), "99")
def test_y_1900(self):
self.assertEqual(
time.strftime("%y", (1900, 1, 1, 0, 0, 0, 0, 0, 0)), "00")
def test_y_after_1900(self):
self.assertEqual(
time.strftime("%y", (2013, 1, 1, 0, 0, 0, 0, 0, 0)), "13")
if __name__ == '__main__':
unittest.main() | null |
5,843 | from __future__ import annotations
import pathlib
from typing import List, Any, Optional, TYPE_CHECKING
from ..locale import list_timezones, list_keyboard_languages
from ..menu import MenuSelectionType, Menu, TextInput
from ..models.audio_configuration import Audio, AudioConfiguration
from ..output import warn
from ..packages.packages import validate_package_list
from ..storage import storage
from ..translationhandler import Language
if TYPE_CHECKING:
_: Any
def ask_ntp(preset: bool = True) -> bool:
prompt = str(_('Would you like to use automatic time synchronization (NTP) with the default time servers?\n'))
prompt += str(_('Hardware time and other post-configuration steps might be required in order for NTP to work.\nFor more information, please check the Arch wiki'))
if preset:
preset_val = Menu.yes()
else:
preset_val = Menu.no()
choice = Menu(prompt, Menu.yes_no(), skip=False, preset_values=preset_val, default_option=Menu.yes()).run()
return False if choice.value == Menu.no() else True
def ask_hostname(preset: str = '') -> str:
while True:
hostname = TextInput(
str(_('Desired hostname for the installation: ')),
preset
).run().strip()
if hostname:
return hostname
def ask_for_a_timezone(preset: Optional[str] = None) -> Optional[str]:
timezones = list_timezones()
default = 'UTC'
choice = Menu(
_('Select a timezone'),
list(timezones),
preset_values=preset,
default_option=default
).run()
match choice.type_:
case MenuSelectionType.Skip: return preset
case MenuSelectionType.Selection: return choice.single_value
return None
def ask_for_audio_selection(
current: Optional[AudioConfiguration] = None
) -> Optional[AudioConfiguration]:
choices = [
Audio.Pipewire.name,
Audio.Pulseaudio.name,
Audio.no_audio_text()
]
preset = current.audio.name if current else None
choice = Menu(
_('Choose an audio server'),
choices,
preset_values=preset
).run()
match choice.type_:
case MenuSelectionType.Skip: return current
case MenuSelectionType.Selection:
value = choice.single_value
if value == Audio.no_audio_text():
return None
else:
return AudioConfiguration(Audio[value])
return None
def select_language(preset: Optional[str] = None) -> Optional[str]:
"""
Asks the user to select a language
Usually this is combined with :ref:`archinstall.list_keyboard_languages`.
:return: The language/dictionary key of the selected language
:rtype: str
"""
kb_lang = list_keyboard_languages()
# sort alphabetically and then by length
sorted_kb_lang = sorted(sorted(list(kb_lang)), key=len)
choice = Menu(
_('Select keyboard layout'),
sorted_kb_lang,
preset_values=preset,
sort=False
).run()
match choice.type_:
case MenuSelectionType.Skip: return preset
case MenuSelectionType.Selection: return choice.single_value
return None
def select_archinstall_language(languages: List[Language], preset: Language) -> Language:
# these are the displayed language names which can either be
# the english name of a language or, if present, the
# name of the language in its own language
options = {lang.display_name: lang for lang in languages}
title = 'NOTE: If a language can not displayed properly, a proper font must be set manually in the console.\n'
title += 'All available fonts can be found in "/usr/share/kbd/consolefonts"\n'
title += 'e.g. setfont LatGrkCyr-8x16 (to display latin/greek/cyrillic characters)\n'
choice = Menu(
title,
list(options.keys()),
default_option=preset.display_name,
preview_size=0.5
).run()
match choice.type_:
case MenuSelectionType.Skip: return preset
case MenuSelectionType.Selection: return options[choice.single_value]
raise ValueError('Language selection not handled')
def ask_additional_packages_to_install(preset: List[str] = []) -> List[str]:
# Additional packages (with some light weight error handling for invalid package names)
print(_('Only packages such as base, base-devel, linux, linux-firmware, efibootmgr and optional profile packages are installed.'))
print(_('If you desire a web browser, such as firefox or chromium, you may specify it in the following prompt.'))
def read_packages(p: List = []) -> list:
display = ' '.join(p)
input_packages = TextInput(_('Write additional packages to install (space separated, leave blank to skip): '), display).run().strip()
return input_packages.split() if input_packages else []
preset = preset if preset else []
packages = read_packages(preset)
if not storage['arguments']['offline'] and not storage['arguments']['no_pkg_lookups']:
while True:
if len(packages):
# Verify packages that were given
print(_("Verifying that additional packages exist (this might take a few seconds)"))
valid, invalid = validate_package_list(packages)
if invalid:
warn(f"Some packages could not be found in the repository: {invalid}")
packages = read_packages(valid)
continue
break
return packages
def add_number_of_parrallel_downloads(input_number :Optional[int] = None) -> Optional[int]:
max_recommended = 5
print(_(f"This option enables the number of parallel downloads that can occur during package downloads"))
print(_("Enter the number of parallel downloads to be enabled.\n\nNote:\n"))
print(str(_(" - Maximum recommended value : {} ( Allows {} parallel downloads at a time )")).format(max_recommended, max_recommended))
print(_(" - Disable/Default : 0 ( Disables parallel downloading, allows only 1 download at a time )\n"))
while True:
try:
input_number = int(TextInput(_("[Default value: 0] > ")).run().strip() or 0)
if input_number <= 0:
input_number = 0
break
except:
print(str(_("Invalid input! Try again with a valid input [or 0 to disable]")).format(max_recommended))
pacman_conf_path = pathlib.Path("/etc/pacman.conf")
with pacman_conf_path.open() as f:
pacman_conf = f.read().split("\n")
with pacman_conf_path.open("w") as fwrite:
for line in pacman_conf:
if "ParallelDownloads" in line:
fwrite.write(f"ParallelDownloads = {input_number}\n") if not input_number == 0 else fwrite.write("#ParallelDownloads = 0\n")
else:
fwrite.write(f"{line}\n")
return input_number
def METHOD_NAME(preset: List[str]) -> List[str]:
"""
Allows the user to select additional repositories (multilib, and testing) if desired.
:return: The string as a selected repository
:rtype: string
"""
repositories = ["multilib", "testing"]
choice = Menu(
_('Choose which optional additional repositories to enable'),
repositories,
sort=False,
multi=True,
preset_values=preset,
allow_reset=True
).run()
match choice.type_:
case MenuSelectionType.Skip: return preset
case MenuSelectionType.Reset: return []
case MenuSelectionType.Selection: return choice.single_value
return [] | null |
5,844 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetUserAssignedIdentityResult',
'AwaitableGetUserAssignedIdentityResult',
'get_user_assigned_identity',
'get_user_assigned_identity_output',
]
@pulumi.output_type
class GetUserAssignedIdentityResult:
"""
A collection of values returned by getUserAssignedIdentity.
"""
def __init__(__self__, client_id=None, id=None, METHOD_NAME=None, name=None, principal_id=None, resource_group_name=None, tags=None, tenant_id=None):
if client_id and not isinstance(client_id, str):
raise TypeError("Expected argument 'client_id' to be a str")
pulumi.set(__self__, "client_id", client_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if principal_id and not isinstance(principal_id, str):
raise TypeError("Expected argument 'principal_id' to be a str")
pulumi.set(__self__, "principal_id", principal_id)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> str:
"""
The Client ID of the User Assigned Identity.
"""
return pulumi.get(self, "client_id")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The Azure location where the User Assigned Identity exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The Service Principal ID of the User Assigned Identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags assigned to the User Assigned Identity.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The Tenant ID of the User Assigned Identity.
"""
return pulumi.get(self, "tenant_id")
class AwaitableGetUserAssignedIdentityResult(GetUserAssignedIdentityResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetUserAssignedIdentityResult(
client_id=self.client_id,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
principal_id=self.principal_id,
resource_group_name=self.resource_group_name,
tags=self.tags,
tenant_id=self.tenant_id)
def get_user_assigned_identity(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetUserAssignedIdentityResult:
"""
Use this data source to access information about an existing User Assigned Identity.
## Example Usage
### Reference An Existing)
```python
import pulumi
import pulumi_azure as azure
example = azure.authorization.get_user_assigned_identity(name="name_of_user_assigned_identity",
resource_group_name="name_of_resource_group")
pulumi.export("uaiClientId", example.client_id)
pulumi.export("uaiPrincipalId", example.principal_id)
pulumi.export("uaiTenantId", example.tenant_id)
```
:param str name: The name of the User Assigned Identity.
:param str resource_group_name: The name of the Resource Group in which the User Assigned Identity exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:authorization/getUserAssignedIdentity:getUserAssignedIdentity', __args__, opts=opts, typ=GetUserAssignedIdentityResult).value
return AwaitableGetUserAssignedIdentityResult(
client_id=pulumi.get(__ret__, 'client_id'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
principal_id=pulumi.get(__ret__, 'principal_id'),
resource_group_name=pulumi.get(__ret__, 'resource_group_name'),
tags=pulumi.get(__ret__, 'tags'),
tenant_id=pulumi.get(__ret__, 'tenant_id'))
@_utilities.lift_output_func(get_user_assigned_identity)
def get_user_assigned_identity_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetUserAssignedIdentityResult]:
"""
Use this data source to access information about an existing User Assigned Identity.
## Example Usage
### Reference An Existing)
```python
import pulumi
import pulumi_azure as azure
example = azure.authorization.get_user_assigned_identity(name="name_of_user_assigned_identity",
resource_group_name="name_of_resource_group")
pulumi.export("uaiClientId", example.client_id)
pulumi.export("uaiPrincipalId", example.principal_id)
pulumi.export("uaiTenantId", example.tenant_id)
```
:param str name: The name of the User Assigned Identity.
:param str resource_group_name: The name of the Resource Group in which the User Assigned Identity exists.
"""
... | null |
5,845 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
#
import os
import shutil
import tempfile
import unittest
from mantid.api import AnalysisDataService as ADS
from unittest import mock
from mantid.simpleapi import CreateSampleWorkspace
from mantidqt.utils.testing.strict_mock import StrictContextManagerMock
from workbench.projectrecovery.projectrecovery import ProjectRecovery
unicode = str
def add_main_window_mock(loader):
loader.main_window = mock.Mock()
loader.main_window.algorithm_selector = mock.Mock()
loader.main_window.algorithm_selector.block_progress_widget_updates = StrictContextManagerMock()
class ProjectRecoveryLoaderTest(unittest.TestCase):
def setUp(self):
self.working_directory = tempfile.mkdtemp()
self.pr = ProjectRecovery(None)
self.pr_loader = self.pr.loader
def tearDown(self):
ADS.clear()
if os.path.exists(self.pr.recovery_directory_hostname):
shutil.rmtree(self.pr.recovery_directory_hostname)
if os.path.exists(self.working_directory):
shutil.rmtree(self.working_directory)
@mock.patch("workbench.projectrecovery.projectrecoveryloader.ProjectRecoveryPresenter")
def test_attempt_recovery_and_recovery_passes(self, presenter):
presenter.return_value.start_recovery_view.return_value = True
presenter.return_value.start_recovery_failure.return_value = True
add_main_window_mock(self.pr.loader)
self.pr.clear_all_unused_checkpoints = mock.MagicMock()
self.pr.start_recovery_thread = mock.MagicMock()
self.pr.attempt_recovery()
self.assertEqual(presenter.return_value.start_recovery_view.call_count, 1)
self.assertEqual(presenter.return_value.start_recovery_failure.call_count, 0)
self.assertEqual(self.pr.clear_all_unused_checkpoints.call_count, 1)
self.assertEqual(self.pr.start_recovery_thread.call_count, 1)
self.pr.loader.main_window.algorithm_selector.block_progress_widget_updates.assert_context_triggered()
@mock.patch("workbench.projectrecovery.projectrecoveryloader.ProjectRecoveryPresenter")
def test_attempt_recovery_and_recovery_fails_first_time_but_is_successful_on_failure_view(self, presenter):
presenter.return_value.start_recovery_view.return_value = False
presenter.return_value.start_recovery_failure.return_value = True
add_main_window_mock(self.pr.loader)
self.pr.clear_all_unused_checkpoints = mock.MagicMock()
self.pr.start_recovery_thread = mock.MagicMock()
self.pr.attempt_recovery()
self.assertEqual(presenter.return_value.start_recovery_view.call_count, 1)
self.assertEqual(presenter.return_value.start_recovery_failure.call_count, 1)
self.assertEqual(self.pr.clear_all_unused_checkpoints.call_count, 1)
self.assertEqual(self.pr.start_recovery_thread.call_count, 1)
self.pr.loader.main_window.algorithm_selector.block_progress_widget_updates.assert_context_triggered()
@mock.patch("workbench.projectrecovery.projectrecoveryloader.ProjectLoader")
def test_load_project_interfaces_call(self, loader):
loader.return_value.load_project.return_value = True
self.pr_loader._load_project_interfaces("")
self.assertEqual(loader.return_value.load_project.call_args, mock.call(file_name=self.pr.recovery_file_ext, load_workspaces=False))
def METHOD_NAME(self):
# make sure to clear out the script if it exists
if os.path.exists(self.pr.recovery_order_workspace_history_file):
os.remove(self.pr.recovery_order_workspace_history_file)
# Create checkpoint
CreateSampleWorkspace(OutputWorkspace="ws")
self.pr.saver._spin_off_another_time_thread = mock.MagicMock()
self.pr.recovery_save()
# Find the checkpoint
checkpoints = os.listdir(self.pr.recovery_directory_pid)
checkpoint = os.path.join(self.pr.recovery_directory_pid, checkpoints[0])
self.pr_loader._compile_recovery_script(checkpoint)
self.assertTrue(os.path.exists(self.pr.recovery_order_workspace_history_file))
# Confirm contents is correct
with open(self.pr.recovery_order_workspace_history_file, "r") as f:
actual_file_contents = f.read()
file_contents = ""
# Strip out the time
for ii in actual_file_contents:
if ii == "#":
break
file_contents += ii
self.assertEqual(file_contents, "from mantid.simpleapi import *\n\nCreateSampleWorkspace(OutputWorkspace='ws') ")
def test_load_checkpoint(self):
# Create the checkpoint
CreateSampleWorkspace(OutputWorkspace="ws")
self.pr.saver._spin_off_another_time_thread = mock.MagicMock()
self.pr.recovery_save()
# Mock out excess function calls
self.pr_loader.recovery_presenter = mock.MagicMock()
self.pr_loader._open_script_in_editor_call = mock.MagicMock()
self.pr_loader._run_script_in_open_editor = mock.MagicMock()
self.pr_loader._load_project_interfaces = mock.MagicMock()
# Find the checkpoint
checkpoints = os.listdir(self.pr.recovery_directory_pid)
checkpoint = os.path.join(self.pr.recovery_directory_pid, checkpoints[0])
self.pr_loader.load_checkpoint(checkpoint)
# Test the calls are made properly
self.assertEqual(self.pr_loader._open_script_in_editor_call.call_count, 1)
self.assertEqual(self.pr_loader._run_script_in_open_editor.call_count, 1)
self.assertEqual(self.pr_loader._load_project_interfaces.call_count, 1)
def test_open_script_in_editor(self):
self.pr_loader.recovery_presenter = mock.MagicMock()
self.pr_loader._open_script_in_editor_call = mock.MagicMock()
# Ensure a script file exists
script = os.path.join(self.working_directory, "script")
open(script, "a").close()
self.pr_loader._open_script_in_editor(script)
self.assertEqual(self.pr_loader._open_script_in_editor_call.call_count, 1)
self.assertEqual(self.pr_loader.recovery_presenter.set_up_progress_bar.call_count, 1)
self.assertEqual(self.pr_loader.recovery_presenter.set_up_progress_bar.call_args, mock.call(0)) | null |
5,846 | import subprocess
import logging
import graphviz
from pathlib import Path
from unittest import mock
from BALSAMIC import __version__ as balsamic_version
def test_init_reference_write_json(
invoke_cli,
tmp_path,
):
# Given test_reference.json
test_genome_version = "hg19"
test_container_version = "develop"
test_new_dir = tmp_path / "test_reference_dir"
test_new_dir.mkdir()
# WHEN creating config.json in reference dir
test_output_reference_config = (
test_new_dir / balsamic_version / test_genome_version / "config.json"
)
test_output_reference_pdf = (
test_new_dir
/ balsamic_version
/ test_genome_version
/ "generate_ref_worflow_graph.pdf"
)
result = invoke_cli(
[
"init",
"-o",
str(test_new_dir),
"--cosmic-key",
"secret_key",
"-v",
test_container_version,
]
)
# THEN output config and pdf file generate and command exit code 0
assert result.exit_code == 0
assert Path(test_output_reference_pdf).exists()
assert Path(test_output_reference_config).exists()
def test_init_reference_no_write_perm(tmp_path, invoke_cli, no_write_perm_path):
# Given a path with no write permission
test_genome_version = "hg19"
test_container_version = "develop"
test_new_dir = str(no_write_perm_path)
# WHEN invoking config sample
result = invoke_cli(
[
"init",
"-o",
str(test_new_dir),
"--cosmic-key",
"secret_key",
"-v",
test_container_version,
"-g",
test_genome_version,
]
)
# THEN it should create test_reference.json and exist with no error
assert result.exit_code == 1
def test_init_reference_no_cosmic_abort(tmp_path, invoke_cli):
# Given a path with no write permission
test_genome_version = "hg19"
test_container_version = "develop"
test_new_dir = tmp_path / "test_reference_dir"
test_new_dir.mkdir()
# WHEN invoking config sample
result = invoke_cli(
[
"init",
"-o",
str(test_new_dir),
"-v",
test_container_version,
"-g",
test_genome_version,
]
)
# THEN it should create test_reference.json and exist with no error
assert result.exit_code == 1
def test_init_reference_no_cosmic_run(tmp_path, invoke_cli):
# Given a path with no write permission
test_genome_version = "canfam3"
test_container_version = "develop"
test_new_dir = tmp_path / "test_reference_dir"
test_new_dir.mkdir()
# WHEN invoking config sample
result = invoke_cli(
[
"init",
"-o",
str(test_new_dir),
"-v",
test_container_version,
"-g",
test_genome_version,
]
)
# THEN it should create test_reference.json and exist with no error
assert result.exit_code == 0
def test_init_reference_click_abort(invoke_cli, tmp_path):
# Given test_reference output directory
test_container_version = "develop"
test_new_dir = tmp_path / "test_reference_dir"
test_new_dir.mkdir()
# WHEN running the command
result = invoke_cli(
[
"init",
"-o",
str(test_new_dir),
"--cosmic-key",
"secret_key",
"-v",
test_container_version,
"--run-analysis",
]
)
# THEN it should exit code for not providing the run-mode
assert result.exit_code == 1
def test_init_reference_mail_type(invoke_cli, tmp_path):
# Given test_reference output directory
test_container_version = "develop"
test_new_dir = tmp_path / "test_reference_dir"
test_new_dir.mkdir()
dummy_mail_type = "END"
dummy_mail_user = "[email protected]"
# WHEN running the command
result = invoke_cli(
[
"init",
"-o",
str(test_new_dir),
"--cosmic-key",
"secret_key",
"-v",
test_container_version,
"--run-analysis",
"--run-mode",
"local",
"--mail-type",
dummy_mail_type,
"--mail-user",
dummy_mail_user,
]
)
# THEN it should exit code for not providing the run-mode
assert result.exit_code == 0
def test_init_reference_graph_exception(invoke_cli, tmp_path):
# Given test_reference.json
test_new_dir = tmp_path / "test_reference_nonfunctional_graph"
test_new_dir.mkdir()
with mock.patch.object(graphviz, "Source") as mocked:
mocked.return_value = None
result = invoke_cli(
[
"init",
"-o",
str(test_new_dir),
"--cosmic-key",
"secret_key",
]
)
assert result.exit_code == 1
def test_init_container_force_dry(invoke_cli, tmp_path):
# Given a dummy path
test_new_dir = tmp_path / "test_container_dry_force"
test_new_dir.mkdir()
test_container_version = "develop"
# WHEN force pull dry-run container
result = invoke_cli(
[
"init",
"--outdir",
str(test_new_dir),
"--cosmic-key",
"secret_key",
"--force",
"-v",
test_container_version,
]
)
# THEN command exit code 0
assert result.exit_code == 0
def test_init_container_specific_tag(invoke_cli, tmp_path):
# Given a dummy path
test_new_dir = tmp_path / "test_container_dir"
test_new_dir.mkdir()
dummy_tag = "develop"
# WHEN pulling a specific tag other than standard version
result = invoke_cli(
[
"init",
"--outdir",
str(test_new_dir),
"--cosmic-key",
"secret_key",
"--container-version",
dummy_tag,
]
)
# THEN command exit code 0
assert result.exit_code == 0
def METHOD_NAME(invoke_cli, tmp_path):
# Given a dummy path
test_new_dir = tmp_path / "test_container_dir"
test_new_dir.mkdir()
with mock.patch.object(subprocess, "run") as mocked:
mocked.return_value = 0
# WHEN pulling a container in a non dry-run mode
result = invoke_cli(
[
"init",
"--outdir",
str(test_new_dir),
"--cosmic-key",
"secret_key",
"--run-analysis",
"--account",
"development",
]
)
# THEN output config and pdf file generate and command exit code 0
assert result.exit_code == 0
def test_init_container_wrong_tag(invoke_cli, tmp_path):
# Given a dummy path
test_new_dir = tmp_path / "test_container_dir"
test_new_dir.mkdir()
dummy_tag = "some_tag_that_does_not_exist_ngrtf123jsds3wqe2"
# WHEN pulling a wrong container tag
result = invoke_cli(
[
"init",
"--outdir",
str(test_new_dir),
"--cosmic-key",
"secret_key",
"--container-version",
dummy_tag,
]
)
# THEN capture error log and error code
assert result.exit_code > 0 | null |
5,847 | """
Support for logging over comms
====================================================
The comms mechanism can be used to support consolidated logging at the
manager by an appropriate choice of handlers and filters. The default
logging behavior is to install a CommLogHandler at each worker, which is
used to pass messages to be handled at the manager, where they are then
selected and emitted. The WorkerID filter is used to add contextual
information (in the form of a worker field) that identifies the origin of
a given log message (manager or worker ID).
"""
import logging
import sys
from pathlib import Path
from libensemble.utils.timer import Timer
class LogConfig:
"""Class for storing logging configuration info"""
config = None
def __init__(self, name: str) -> None:
"""Instantiate a new LogConfig instance."""
LogConfig.config = self
self.logger_set = False
self.log_level = logging.INFO
self.name = name
self.stats_name = name + ".calc stats"
self.filename = "ensemble.log"
self.stat_filename = "libE_stats.txt"
self.fmt = "[%(worker)s] %(asctime)s %(name)s (%(levelname)s): %(message)s"
self.stderr_level = logging.MANAGER_WARNING
def set_level(self, level: int) -> None:
"""Set logger level either before or after creating loggers"""
numeric_level = getattr(logging, level.upper(), 10)
self.log_level = numeric_level
if self.logger_set:
logger = logging.getLogger(self.name)
logger.setLevel(self.log_level)
def set_stderr_level(self, level: int) -> None:
"""Set logger level for copying messages to stderr"""
numeric_level = getattr(logging, level.upper(), 30)
self.stderr_level = numeric_level
def set_directory(self, dirname: str) -> None:
"""Sets target directory to contain logfiles if loggers not yet created"""
dirname = Path(dirname)
if not dirname.exists():
dirname.mkdir(parents=True)
if self.logger_set:
logger = logging.getLogger(self.name)
logger.warning("Cannot set directory after loggers initialized")
else:
baselog = Path(self.filename).name
basestat = Path(self.stat_filename).name
self.filename = str(dirname / baselog)
self.stat_filename = str(dirname / basestat)
class CommLogHandler(logging.Handler):
"""Logging handler class that forwards LogRecords to a Comm."""
def __init__(self, comm, pack=None, level=logging.NOTSET):
"""Initialize the handler instance, setting the level and the comm."""
super().__init__(level)
self.comm = comm
self.pack = pack
def emit(self, record):
"""Actually log the record."""
if self.pack is not None:
self.comm.send(*self.pack(record))
else:
self.comm.send(record)
class WorkerIDFilter(logging.Filter):
"""Logging filter to add worker ID to records."""
# Give min. width adjustment (uses more space if needs more).
margin_align = 5
def __init__(self, worker_id):
super().__init__()
self.worker_id = worker_id
# Prefix used by stats logger
if worker_id == 0:
self.prefix = "Manager" + " " * (WorkerIDFilter.margin_align)
else:
worker_str = str(self.worker_id).rjust(WorkerIDFilter.margin_align, " ")
self.prefix = f"Worker {worker_str}"
def filter(self, record):
"""Add worker ID to a LogRecord"""
record.worker = getattr(record, "worker", self.worker_id)
record.prefix = getattr(record, "prefix", self.prefix)
return True
class ErrorFilter(logging.Filter):
"""Filter to choose messages for stderr of user-defined level"""
def __init__(self, level):
super().__init__()
self.level = level
def filter(self, record):
"""Confirm messages that exceed specified level"""
return record.levelno >= self.level
def remove_handlers(logr):
"""Removes all handlers from a logger
Required, for example, to remove any manager handlers that
get undesirably given to a worker via 'fork' (from a previous
libE function call).
"""
for hdl in logr.handlers[:]:
logr.removeHandler(hdl)
hdl.close()
def init_worker_logger(logr, lev):
"""Initialize a worker logger attributes"""
logr.propagate = False
logr.setLevel(lev)
def METHOD_NAME(comm, worker_id=None):
"""Add a comm handler with worker ID filter to the indicated logger."""
logconfig = LogConfig.config
logger = logging.getLogger(logconfig.name)
slogger = logging.getLogger(logconfig.stats_name)
ch = CommLogHandler(comm, pack=lambda rec: (0, rec))
ch.addFilter(WorkerIDFilter(worker_id or comm.rank))
if logconfig.logger_set:
remove_handlers(logger)
remove_handlers(slogger)
else:
init_worker_logger(logger, logconfig.log_level)
init_worker_logger(slogger, logconfig.log_level)
logconfig.logger_set = True
logger.addHandler(ch)
slogger.addHandler(ch)
def manager_logging_config(specs={}):
"""Add file-based logging at manager."""
stat_timer = Timer()
stat_timer.start()
# Regular logging
logconfig = LogConfig.config
if not logconfig.logger_set:
if specs.get("use_workflow_dir"): # placing logfiles in separate directory
logconfig.set_directory(specs.get("workflow_dir_path"))
formatter = logging.Formatter(logconfig.fmt)
wfilter = WorkerIDFilter(0)
fh = logging.FileHandler(logconfig.filename, mode="w")
fh.addFilter(wfilter)
fh.setFormatter(formatter)
logger = logging.getLogger(logconfig.name)
logger.propagate = False
logger.setLevel(logconfig.log_level) # Formatter filters on top of this
logger.addHandler(fh)
logconfig.logger_set = True
# Stats logging
# NB: Could add a specialized handler for immediate flushing
fhs = logging.FileHandler(logconfig.stat_filename, mode="w")
fhs.addFilter(wfilter)
fhs.setFormatter(logging.Formatter("%(prefix)s: %(message)s"))
stat_logger = logging.getLogger(logconfig.stats_name)
stat_logger.propagate = False
stat_logger.setLevel(logging.DEBUG)
stat_logger.addHandler(fhs)
# Mirror error-logging to stderr of user-specified level
fhe = logging.StreamHandler(stream=sys.stderr)
fhe.addFilter(wfilter)
efilter = ErrorFilter(logconfig.stderr_level)
fhe.addFilter(efilter)
fhe.setFormatter(formatter)
logger.addHandler(fhe)
else:
stat_logger = logging.getLogger(logconfig.stats_name)
stat_logger.info(f"Starting ensemble at: {stat_timer.date_start}")
def exit_logger():
stat_timer.stop()
stat_logger.info(f"Exiting ensemble at: {stat_timer.date_end} Time Taken: {stat_timer.elapsed}")
# If closing logs - each libE() call will log to a new file.
# fh.close()
# fhs.close()
return exit_logger | null |
5,848 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing PitchShift Python API
"""
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.audio as audio
from mindspore import log as logger
from mindspore.dataset.audio.utils import WindowType
def count_unequal_element(data_expected, data_me, rtol, atol):
""" Precision calculation func """
assert data_expected.shape == data_me.shape
total_count = len(data_expected.flatten())
error = np.abs(data_expected - data_me)
greater = np.greater(error, atol + np.abs(data_expected) * rtol)
loss_count = np.count_nonzero(greater)
assert (loss_count / total_count) < rtol, "\ndata_expected_std:{0}\ndata_me_error:{1}\nloss:{2}".format(
data_expected[greater], data_me[greater], error[greater])
def test_pitchshift_pipeline():
"""
Feature: Test pipeline mode normal testcase: PitchShift op
Description: Input audio signal to test pipeline
Expectation: Generate expected output after cases were executed
"""
logger.info("test_PitchShift_pipeline")
wav = [[[1, 1, 2, 3, 2, 3, 4, 5, 1, 2, 3, 4, 5, 2, 3, 2, 1, 2, 3, 0, 1, 0, 2, 4, 5, 3, 1, 2, 3, 4]]]
dataset = ds.NumpySlicesDataset(wav, column_names=["audio"], shuffle=False)
out = audio.PitchShift(sample_rate=16000, n_steps=4, bins_per_octave=12, n_fft=16, win_length=16,
hop_length=4, window=WindowType.HANN)
dataset = dataset.map(operations=out, input_columns=["audio"], output_columns=["PitchShift"])
result = np.array([[0.8897, 1.0983, 2.4355, 1.8842, 2.2082,
3.6461, 2.4232, 1.7691, 3.2835, 3.3354,
2.1773, 3.3544, 4.0488, 3.1631, 1.9124,
2.2346, 2.2417, 3.6008, 1.9539, 1.3373,
0.4311, 2.0768, 2.6538, 1.5035, 1.5668,
2.3749, 3.9702, 3.5922, 1.7618, 1.2730]])
for data1 in dataset.create_dict_iterator(num_epochs=1, output_numpy=True):
count_unequal_element(data1["PitchShift"], result, 0.0001, 0.0001)
def METHOD_NAME():
"""
Feature: Mindspore eager mode normal testcase: pitchshift op
Description: Input audio signal to test eager
Expectation: Generate expected output after cases were executed
"""
logger.info("test_pitchshift_eager")
wav = np.array([[[1, 1, 2, 3, 2, 3, 4, 5, 1, 2, 3, 4, 5, 2, 3, 2, 1, 2, 3, 0, 1, 0, 2, 4, 5, 3, 1, 2, 3, 4]]])
out = audio.PitchShift(sample_rate=16000, n_steps=4, bins_per_octave=12, n_fft=16, win_length=16, hop_length=4,
window=WindowType.HANN)(wav)
result = np.array([[[0.8897, 1.0983, 2.4355, 1.8842, 2.2082,
3.6461, 2.4232, 1.7691, 3.2835, 3.3354,
2.1773, 3.3544, 4.0488, 3.1631, 1.9124,
2.2346, 2.2417, 3.6008, 1.9539, 1.3373,
0.4311, 2.0768, 2.6538, 1.5035, 1.5668,
2.3749, 3.9702, 3.5922, 1.7618, 1.2730]]])
count_unequal_element(out, result, 0.0001, 0.0001)
def test_pitchshift_param():
"""
Feature: Test pitchshift invalid parameter
Description: Test some invalid parameters
Expectation: throw ValueError, TypeError or RuntimeError exception
"""
try:
_ = audio.PitchShift(sample_rate="s", n_steps=4)
except TypeError as error:
logger.info("Got an exception in pitchshift: {}".format(str(error)))
assert "Argument sample_rate with value s is not of type [<class 'int'>], but got <class 'str'>." in str(error)
try:
_ = audio.PitchShift(sample_rate=-1, n_steps=4)
except ValueError as error:
logger.info("Got an exception in pitchshift: {}".format(str(error)))
assert "Input sample_rate is not within the required interval of [0, 2147483647]." in str(error)
try:
_ = audio.PitchShift(n_steps="s", sample_rate=16)
except TypeError as error:
logger.info("Got an exception in pitchshift: {}".format(str(error)))
assert "Argument n_steps with value s is not of type [<class 'int'>], but got <class 'str'>." in str(error)
try:
_ = audio.PitchShift(bins_per_octave=0, sample_rate=16, n_steps=4)
except ValueError as error:
logger.info("Got an exception in pitchshift: {}".format(str(error)))
assert "Input bins_per_octave is not within the required interval of [-2147483648, 0) and (0, 2147483647]." \
in str(error)
try:
_ = audio.PitchShift(bins_per_octave="s", sample_rate=16, n_steps=4)
except TypeError as error:
logger.info("Got an exception in pitchshift: {}".format(str(error)))
assert "Argument bins_per_octave with value s is not of type [<class 'int'>], but got <class 'str'>." \
in str(error)
try:
_ = audio.PitchShift(n_fft=-1, sample_rate=16, n_steps=4)
except ValueError as error:
logger.info("Got an exception in pitchshift: {}".format(str(error)))
assert "Input n_fft is not within the required interval of [1, 2147483647]." in str(error)
try:
_ = audio.PitchShift(n_fft=0, sample_rate=16, n_steps=4)
except ValueError as error:
logger.info("Got an exception in pitchshift: {}".format(str(error)))
assert "Input n_fft is not within the required interval of [1, 2147483647]." in str(error)
try:
_ = audio.PitchShift(win_length=-1, sample_rate=16, n_steps=4)
except ValueError as error:
logger.info("Got an exception in pitchshift: {}".format(str(error)))
assert "Input win_length is not within the required interval of [1, 2147483647]." in str(error)
try:
_ = audio.PitchShift(win_length="s", sample_rate=16, n_steps=4)
except TypeError as error:
logger.info("Got an exception in pitchshift: {}".format(str(error)))
assert "Argument win_length with value s is not of type [<class 'int'>], but got <class 'str'>." in str(error)
try:
_ = audio.PitchShift(hop_length=-1, sample_rate=16, n_steps=4)
except ValueError as error:
logger.info("Got an exception in pitchshift: {}".format(str(error)))
assert "Input hop_length is not within the required interval of [1, 2147483647]." in str(error)
try:
_ = audio.PitchShift(hop_length=-100, sample_rate=16, n_steps=4)
except ValueError as error:
logger.info("Got an exception in pitchshift: {}".format(str(error)))
assert "Input hop_length is not within the required interval of [1, 2147483647]." in str(error)
try:
_ = audio.PitchShift(win_length=300, n_fft=200, sample_rate=16, n_steps=4)
except ValueError as error:
logger.info("Got an exception in pitchshift: {}".format(str(error)))
try:
_ = audio.PitchShift(window=False, sample_rate=16, n_steps=4)
except TypeError as error:
logger.info("Got an exception in pitchshift: {}".format(str(error)))
assert "Argument window with value False is not of type [<enum 'WindowType'>], but got <class 'bool'>." \
in str(error)
if __name__ == "__main__":
test_pitchshift_pipeline()
METHOD_NAME()
test_pitchshift_param() | null |
5,849 | from backend.layers.business.business_interface import BusinessLogicInterface
from backend.layers.common.entities import (
DatasetArtifactType,
DatasetConversionStatus,
DatasetStatusKey,
DatasetVersionId,
)
from backend.layers.processing.h5ad_data_file import H5ADDataFile
from backend.layers.processing.logger import logit
from backend.layers.processing.process_logic import ProcessingLogic
from backend.layers.thirdparty.s3_provider import S3ProviderInterface
from backend.layers.thirdparty.uri_provider import UriProviderInterface
class ProcessCxg(ProcessingLogic):
"""
Base class for handling the `Process CXG` step of the step function.
This will:
1. Download the labeled h5ad artifact from S3 (uploaded by DownloadAndValidate)
2. Convert to cxg
3. Upload the cxg artifact (a directory) to S3
If this step completes successfully, and ProcessSeurat is completed, the handle_success lambda will be invoked
If this step fails, the handle_failures lambda will be invoked
"""
def __init__(
self,
business_logic: BusinessLogicInterface,
uri_provider: UriProviderInterface,
s3_provider: S3ProviderInterface,
) -> None:
super().__init__()
self.business_logic = business_logic
self.uri_provider = uri_provider
self.s3_provider = s3_provider
def process(self, dataset_id: DatasetVersionId, artifact_bucket: str, cellxgene_bucket: str, is_reprocess=False):
"""
1. Download the labeled dataset from the artifact bucket
2. Convert the labeled dataset to CXG
3. Upload the CXG to the cellxgene bucket
:param dataset_id:
:param artifact_bucket:
:param cellxgene_bucket:
:param is_reprocess: flag indicating whether this job is reprocessing an existing cxg in-place
:return:
"""
labeled_h5ad_filename = "local.h5ad"
# Download the labeled dataset from the artifact bucket
object_key = None
current_artifacts = None
if is_reprocess:
current_artifacts = self.business_logic.get_dataset_artifacts(dataset_id)
existing_h5ad = [artifact for artifact in current_artifacts if artifact.type == DatasetArtifactType.H5AD][0]
if existing_h5ad:
_, object_key = self.s3_provider.parse_s3_uri(existing_h5ad.uri)
if object_key is None:
key_prefix = self.get_key_prefix(dataset_id.id)
object_key = f"{key_prefix}/{labeled_h5ad_filename}"
self.download_from_s3(artifact_bucket, object_key, labeled_h5ad_filename)
# Convert the labeled dataset to CXG and upload it to the cellxgene bucket
self.process_cxg(labeled_h5ad_filename, dataset_id, cellxgene_bucket, current_artifacts)
@logit
def make_cxg(self, local_filename):
"""
Convert the uploaded H5AD file to the CXG format servicing the cellxgene Explorer.
"""
cxg_output_container = local_filename.replace(".h5ad", ".cxg")
try:
h5ad_data_file = H5ADDataFile(local_filename, var_index_column_name="feature_name")
h5ad_data_file.to_cxg(cxg_output_container, sparse_threshold=25.0)
except Exception as ex:
# TODO use a specialized exception
msg = "CXG conversion failed."
self.logger.exception(msg)
raise RuntimeError(msg) from ex
return cxg_output_container
def METHOD_NAME(self, cxg_dir, s3_uri):
"""
Copy cxg files to the cellxgene bucket (under the given object key) for access by the explorer
"""
self.s3_provider.upload_directory(cxg_dir, s3_uri)
def process_cxg(self, local_filename, dataset_id, cellxgene_bucket, current_artifacts=None):
cxg_dir = self.convert_file(
self.make_cxg, local_filename, "Issue creating cxg.", dataset_id, DatasetStatusKey.CXG
)
s3_uri = None
if current_artifacts:
existing_cxg = [artifact for artifact in current_artifacts if artifact.type == DatasetArtifactType.CXG][0]
if existing_cxg:
s3_uri = existing_cxg.uri
if s3_uri is None:
key_prefix = self.get_key_prefix(dataset_id.id)
s3_uri = f"s3://{cellxgene_bucket}/{key_prefix}.cxg/"
self.update_processing_status(dataset_id, DatasetStatusKey.CXG, DatasetConversionStatus.UPLOADING)
self.METHOD_NAME(cxg_dir, s3_uri)
self.logger.info(f"Updating database with cxg artifact for dataset {dataset_id}. s3_uri is {s3_uri}")
if not current_artifacts:
self.business_logic.add_dataset_artifact(dataset_id, DatasetArtifactType.CXG, s3_uri)
self.update_processing_status(dataset_id, DatasetStatusKey.CXG, DatasetConversionStatus.UPLOADED) | null |
5,850 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
from .rec_postprocess import AttnLabelDecode
class TableLabelDecode(AttnLabelDecode):
""" """
def __init__(self,
character_dict_path,
merge_no_span_structure=False,
**kwargs):
dict_character = []
with open(character_dict_path, "rb") as fin:
lines = fin.readlines()
for line in lines:
line = line.decode('utf-8').strip("\n").strip("\r\n")
dict_character.append(line)
if merge_no_span_structure:
if "<td></td>" not in dict_character:
dict_character.append("<td></td>")
if "<td>" in dict_character:
dict_character.remove("<td>")
dict_character = self.add_special_char(dict_character)
self.dict = {}
for i, char in enumerate(dict_character):
self.dict[char] = i
self.character = dict_character
self.td_token = ['<td>', '<td', '<td></td>']
def __call__(self, preds, batch=None):
structure_probs = preds['structure_probs']
bbox_preds = preds['loc_preds']
if isinstance(structure_probs, paddle.Tensor):
structure_probs = structure_probs.numpy()
if isinstance(bbox_preds, paddle.Tensor):
bbox_preds = bbox_preds.numpy()
shape_list = batch[-1]
result = self.decode(structure_probs, bbox_preds, shape_list)
if len(batch) == 1: # only contains shape
return result
label_decode_result = self.decode_label(batch)
return result, label_decode_result
def decode(self, structure_probs, bbox_preds, shape_list):
"""convert text-label into text-index.
"""
ignored_tokens = self.METHOD_NAME()
end_idx = self.dict[self.end_str]
structure_idx = structure_probs.argmax(axis=2)
structure_probs = structure_probs.max(axis=2)
structure_batch_list = []
bbox_batch_list = []
batch_size = len(structure_idx)
for batch_idx in range(batch_size):
structure_list = []
bbox_list = []
score_list = []
for idx in range(len(structure_idx[batch_idx])):
char_idx = int(structure_idx[batch_idx][idx])
if idx > 0 and char_idx == end_idx:
break
if char_idx in ignored_tokens:
continue
text = self.character[char_idx]
if text in self.td_token:
bbox = bbox_preds[batch_idx, idx]
bbox = self._bbox_decode(bbox, shape_list[batch_idx])
bbox_list.append(bbox)
structure_list.append(text)
score_list.append(structure_probs[batch_idx, idx])
structure_batch_list.append([structure_list, np.mean(score_list)])
bbox_batch_list.append(np.array(bbox_list))
result = {
'bbox_batch_list': bbox_batch_list,
'structure_batch_list': structure_batch_list,
}
return result
def decode_label(self, batch):
"""convert text-label into text-index.
"""
structure_idx = batch[1]
gt_bbox_list = batch[2]
shape_list = batch[-1]
ignored_tokens = self.METHOD_NAME()
end_idx = self.dict[self.end_str]
structure_batch_list = []
bbox_batch_list = []
batch_size = len(structure_idx)
for batch_idx in range(batch_size):
structure_list = []
bbox_list = []
for idx in range(len(structure_idx[batch_idx])):
char_idx = int(structure_idx[batch_idx][idx])
if idx > 0 and char_idx == end_idx:
break
if char_idx in ignored_tokens:
continue
structure_list.append(self.character[char_idx])
bbox = gt_bbox_list[batch_idx][idx]
if bbox.sum() != 0:
bbox = self._bbox_decode(bbox, shape_list[batch_idx])
bbox_list.append(bbox)
structure_batch_list.append(structure_list)
bbox_batch_list.append(bbox_list)
result = {
'bbox_batch_list': bbox_batch_list,
'structure_batch_list': structure_batch_list,
}
return result
def _bbox_decode(self, bbox, shape):
h, w, ratio_h, ratio_w, pad_h, pad_w = shape
bbox[0::2] *= w
bbox[1::2] *= h
return bbox
class TableMasterLabelDecode(TableLabelDecode):
""" """
def __init__(self,
character_dict_path,
box_shape='ori',
merge_no_span_structure=True,
**kwargs):
super(TableMasterLabelDecode, self).__init__(character_dict_path,
merge_no_span_structure)
self.box_shape = box_shape
assert box_shape in [
'ori', 'pad'
], 'The shape used for box normalization must be ori or pad'
def add_special_char(self, dict_character):
self.beg_str = '<SOS>'
self.end_str = '<EOS>'
self.unknown_str = '<UKN>'
self.pad_str = '<PAD>'
dict_character = dict_character
dict_character = dict_character + [
self.unknown_str, self.beg_str, self.end_str, self.pad_str
]
return dict_character
def METHOD_NAME(self):
pad_idx = self.dict[self.pad_str]
start_idx = self.dict[self.beg_str]
end_idx = self.dict[self.end_str]
unknown_idx = self.dict[self.unknown_str]
return [start_idx, end_idx, pad_idx, unknown_idx]
def _bbox_decode(self, bbox, shape):
h, w, ratio_h, ratio_w, pad_h, pad_w = shape
if self.box_shape == 'pad':
h, w = pad_h, pad_w
bbox[0::2] *= w
bbox[1::2] *= h
bbox[0::2] /= ratio_w
bbox[1::2] /= ratio_h
x, y, w, h = bbox
x1, y1, x2, y2 = x - w // 2, y - h // 2, x + w // 2, y + h // 2
bbox = np.array([x1, y1, x2, y2])
return bbox | null |
5,851 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import functional as F
from mindspore.common.api import jit
context.set_context(mode=context.PYNATIVE_MODE, device_target='GPU')
class NetP(nn.Cell):
def __init__(self, output_size):
super(NetP, self).__init__()
self.adaptive_max_pool2d = nn.AdaptiveMaxPool2d(output_size)
@jit
def construct(self, x):
return self.adaptive_max_pool2d(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_normal():
"""
Feature: test adaptivemaxpool2d op.
Description: test the ops.
Expectation: expect correct shape result.
"""
x = np.random.randn(1, 32, 9, 9)
net = NetP((3, 5))
output = net(Tensor(x, mindspore.float32))
expect_shape = (1, 32, 3, 5)
assert output.asnumpy().shape == expect_shape
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_h_none():
"""
Feature: test adaptivemaxpool2d op.
Description: test the none value of output_size attr.
Expectation: expect correct shape result.
"""
x = np.random.randn(1, 32, 9, 9)
net = NetP((None, 5))
output = net(Tensor(x, mindspore.float32))
expect_shape = (1, 32, 9, 5)
assert output.asnumpy().shape == expect_shape
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_hxh():
"""
Feature: test adaptivemaxpool2d op.
Description: test the int type of output_size attr.
Expectation: expect correct shape result.
"""
x = np.random.randn(1, 32, 9, 9)
net = NetP((5))
output = net(Tensor(x, mindspore.float32))
expect_shape = (1, 32, 5, 5)
assert output.asnumpy().shape == expect_shape
class NetWithIndices(nn.Cell):
def __init__(self, output_size):
super(NetWithIndices, self).__init__()
self.adaptive_max_pool2d = nn.AdaptiveMaxPool2d(output_size, True)
@jit
def construct(self, x):
return self.adaptive_max_pool2d(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_with_indices():
"""
Feature: test adaptivemaxpool2d op.
Description: test the return_indices attr.
Expectation: expect correct shape result.
"""
x = np.random.randn(1, 32, 9, 9)
net = NetWithIndices((3, 5))
output = net(Tensor(x, mindspore.float32))
expect_shape = (1, 32, 3, 5)
assert output[1].asnumpy().shape == expect_shape
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_f():
"""
Feature: test adaptivemaxpool2d op.
Description: test the ops in functional.
Expectation: expect correct shape result.
"""
x = np.random.randn(1, 32, 9, 9)
output = F.adaptive_max_pool2d(Tensor(x, mindspore.float32), (3, 5), True)
expect_shape = (1, 32, 3, 5)
assert output[1].asnumpy().shape == expect_shape
class Netnn(nn.Cell):
def __init__(self):
super(Netnn, self).__init__()
self.adaptive_max_pool2d = nn.AdaptiveMaxPool2d((3, 5))
@jit
def construct(self, x):
return self.adaptive_max_pool2d(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_net_nn():
"""
Feature: test adaptivemaxpool2d op.
Description: test the ops in nn.
Expectation: expect correct shape result.
"""
x = np.random.randn(1, 32, 9, 9)
net = Netnn()
output = net(Tensor(x, mindspore.float32))
expect_shape = (1, 32, 3, 5)
assert output.asnumpy().shape == expect_shape
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def METHOD_NAME():
"""
Feature: test adaptivemaxpool2d op.
Description: test the ops in dynamic shape.
Expectation: expect correct shape result.
"""
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
net = Netnn()
x_dyn = Tensor(shape=[1, 32, 9, None], dtype=mindspore.float32)
net.set_inputs(x_dyn)
x = np.random.randn(1, 32, 9, 9)
output = net(Tensor(x, mindspore.float32))
expect_shape = (1, 32, 3, 5)
assert output.asnumpy().shape == expect_shape | null |
5,852 | ''' workbench.py - Workflow Benchmark Suite
Examples:
# Generate Chained workflow with 128 tasks
$ ./weaver.py -o workbench examples/workbench.py chained noop 128
# Generate Concurrent workflow with 128 tasks
$ ./weaver.py -o workbench examples/workbench.py concurrent noop 128
# Generate FanOut workflow with 128 tasks and 1K input file
$ ./weaver.py -o workbench examples/workbench.py fanout cat 128 1024
# Generate FanIn workflow with 128 tasks and 1K input file
$ ./weaver.py -o workbench examples/workbench.py fanin cat 128 1024
# Generate Map workflow with 128 tasks and 1K input files
$ ./weaver.py -o workbench examples/workbench.py map cat 128 1024
'''
import itertools
import os
from weaver.logger import debug, fatal, D_USER
# WorkBench Functions
def make_noop_function():
executable = os.path.join(CurrentNest().work_dir, 'noop_script')
function = ShellFunction('exit 0', executable=executable)
return function
def make_cat_function():
executable = os.path.join(CurrentNest().work_dir, 'cat_script')
function = ShellFunction('cat $@', executable=executable, cmd_format='{EXE} {IN} > {OUT}')
return function
WORKBENCH_FUNCTIONS = {
'noop' : make_noop_function,
'cat' : make_cat_function,
}
def make_function(func_name, *func_args):
try:
function = WORKBENCH_FUNCTIONS[func_name](*func_args)
except KeyError:
fatal(D_USER, 'Invalid function {0}'.format(func_name))
return function
# WorkBench Utilities
KB = 2**10
MB = 2*210
GB = 2**30
BYTE_FILES = {
1*KB : 'input.1K',
1*MB : 'input.1M',
16*MB : 'input.16M',
64*MB : 'input.64M',
128*MB : 'input.128M',
1*GB : 'input.1G',
2*GB : 'input.2G',
4*GB : 'input.4G',
8*GB : 'input.8G',
}
CACHE_DIR = '/var/tmp/pdonnel3/data/'
CHUNK_SIZE = 1024 * 1024
CurrentNest().Counter = itertools.count()
def METHOD_NAME(bytes, name=None):
nest = CurrentNest()
name = name or '{0:08X}.input'.format(next(nest.Counter))
path = os.path.join(nest.work_dir, name)
try:
cache = os.path.join(CACHE_DIR, BYTE_FILES[bytes])
except (OSError, KeyError):
cache = None
if os.path.exists(cache):
os.symlink(cache, path)
else:
with open(path, 'w+') as fs:
chunk_size = CHUNK_SIZE if bytes > CHUNK_SIZE else bytes
bytes_written = 0
bytes_data = 'x' * chunk_size
for i in range(0, bytes, chunk_size):
fs.write(bytes_data)
bytes_written += chunk_size
if bytes_written < bytes:
fs.write('x'*(bytes - bytes_written))
return name
# WorkBench Patterns
def run_chained(func_name, tasks, *func_args):
debug(D_USER, 'Generating Chained Pattern with Function {0}'.format(func_name))
tasks = int(tasks)
arguments = map(int, func_args)
function = make_function(func_name, *arguments)
output = None
for task in range(tasks):
output = function(output, '{0:04d}.output'.format(task))
def run_concurrent(func_name, tasks, *func_args):
debug(D_USER, 'Generating Concurrent Pattern with Function {0}'.format(func_name))
tasks = int(tasks)
arguments = map(int, func_args)
function = make_function(func_name, *arguments)
Iterate(function, tasks, '{NUMBER}.output')
def run_fanout(func_name, tasks, bytes, *func_args):
debug(D_USER, 'Generating FanOut Pattern with Function {0}'.format(func_name))
tasks = int(tasks)
bytes = int(bytes)
input = METHOD_NAME(bytes, 'fanout.input')
arguments = map(int, func_args)
function = make_function(func_name, *arguments)
Iterate(function, tasks, '{NUMBER}.output', includes=input)
def run_fanin(func_name, tasks, bytes, *func_args):
debug(D_USER, 'Generating FanIn Pattern with Function {0}'.format(func_name))
tasks = int(tasks)
bytes = int(bytes)
arguments = map(int, func_args)
function = make_function(func_name, *arguments)
inputs = []
for input in range(tasks):
inputs.append(METHOD_NAME(bytes))
function(inputs, 'fanin.output')
def run_map(func_name, tasks, bytes, *func_args):
debug(D_USER, 'Generating Map Pattern with Function {0}'.format(func_name))
tasks = int(tasks)
bytes = int(bytes)
arguments = map(int, func_args)
function = make_function(func_name, *arguments)
inputs = []
for input in range(tasks):
inputs.append(METHOD_NAME(bytes))
Map(function, inputs, '{BASE_WOEXT}.output')
WORKBENCH_PATTERNS = {
'chained' : run_chained,
'concurrent': run_concurrent,
'fanout' : run_fanout,
'fanin' : run_fanin,
'map' : run_map,
}
# WorkBench Main Dispatch
Arguments = CurrentScript().arguments
try:
WORKBENCH_PATTERNS[Arguments[0]](*Arguments[1:])
except KeyError:
fatal(D_USER, 'Invalid pattern: {0}'.format(Arguments[0]), print_traceback=True)
except IndexError:
fatal(D_USER, 'No pattern specified', print_traceback=True) | null |
5,853 | import numpy as np
from rlberry.agents import AgentWithSimplePolicy
from rlberry.agents.dynprog.utils import backward_induction, value_iteration
from gymnasium.spaces import Discrete
import rlberry
logger = rlberry.logger
class MBQVIAgent(AgentWithSimplePolicy):
"""
Model-Basel Q-Value iteration (MBQVI).
Builds an empirical MDP and runs value iteration on it.
Corresponds to the "indirect" algorithm studied by Kearns and Singh (1999).
Parameters
-----------
env : Model
generative model with finite state-action space
n_samples : int
number of samples *per state-action pair* used to estimate
the empirical MDP.
gamma : double
discount factor in [0, 1]
horizon : int
horizon, if the problem is finite-horizon. if None, the discounted
problem is solved. default = None
epsilon : double
precision of value iteration, only used in discounted problems
(when horizon is None).
References
----------
Kearns, Michael J., and Satinder P. Singh.
"Finite-sample convergence rates for Q-learning and indirect algorithms."
Advances in neural information processing systems. 1999.
"""
name = "MBQVI"
def __init__(
self, env, n_samples=10, gamma=0.99, horizon=None, epsilon=1e-6, **kwargs
):
AgentWithSimplePolicy.__init__(self, env, **kwargs)
# initialize base class
assert self.env.is_generative(), "MBQVI requires a generative model."
assert isinstance(
self.env.observation_space, Discrete
), "MBQVI requires a finite state space."
assert isinstance(
self.env.action_space, Discrete
), "MBQVI requires a finite action space."
#
self.n_samples = n_samples
self.gamma = gamma
self.horizon = horizon
self.epsilon = epsilon
# empirical MDP, created in fit()
self.R_hat = None
self.P_hat = None
# value functions
self.V = None
self.Q = None
def _update(self, state, action, next_state, reward):
"""Update model statistics."""
self.N_sa[state, action] += 1
self.N_sas[state, action, next_state] += 1
self.S_sa[state, action] += reward
def METHOD_NAME(self, budget=None, **kwargs):
"""
Build empirical MDP and run value iteration.
Parameters
----------
budget: None
Not used. Only defined for compatibility purpose with rlberry.
Changing `budget` value has no effect.
"""
del kwargs
S = self.env.observation_space.n
A = self.env.action_space.n
self.N_sa = np.zeros((S, A))
self.N_sas = np.zeros((S, A, S))
self.S_sa = np.zeros((S, A))
# collect data
total_samples = S * A * self.n_samples
count = 0
logger.debug(
f"[{self.name}] collecting {self.n_samples} samples per (s,a)"
f", total = {total_samples} samples."
)
for ss in range(S):
for aa in range(A):
for _ in range(self.n_samples):
next_state, reward, _, _, _ = self.env.sample(ss, aa)
self._update(ss, aa, next_state, reward)
count += 1
if count % 10000 == 0:
completed = 100 * count / total_samples
logger.debug(
"[{}] ... {}/{} ({:0.0f}%)".format(
self.name, count, total_samples, completed
)
)
# build model and run VI
logger.debug(f"{self.name} building model and running backward induction...")
N_sa = np.maximum(self.N_sa, 1)
self.R_hat = self.S_sa / N_sa
self.P_hat = np.zeros((S, A, S))
for ss in range(S):
self.P_hat[:, :, ss] = self.N_sas[:, :, ss] / N_sa
info = {}
info["n_samples"] = self.n_samples
info["total_samples"] = total_samples
if self.horizon is None:
assert self.gamma < 1.0, "The discounted setting requires gamma < 1.0"
self.Q, self.V, n_it = value_iteration(
self.R_hat, self.P_hat, self.gamma, self.epsilon
)
info["n_iterations"] = n_it
info["precision"] = self.epsilon
else:
self.Q, self.V = backward_induction(
self.R_hat, self.P_hat, self.horizon, self.gamma
)
info["n_iterations"] = self.horizon
info["precision"] = 0.0
return info
def policy(self, observation):
state = observation
assert self.env.observation_space.contains(state)
if self.horizon is None:
return self.Q[state, :].argmax()
else:
return self.Q[0, state, :].argmax() | null |
5,854 | """Implementation of :class:`QuotientRing` class."""
from sympy.polys.agca.modules import FreeModuleQuotientRing
from sympy.polys.domains.ring import Ring
from sympy.polys.polyerrors import NotReversible, CoercionFailed
from sympy.utilities import public
# TODO
# - successive quotients (when quotient ideals are implemented)
# - poly rings over quotients?
# - division by non-units in integral domains?
@public
class QuotientRingElement:
"""
Class representing elements of (commutative) quotient rings.
Attributes:
- ring - containing ring
- data - element of ring.ring (i.e. base ring) representing self
"""
def __init__(self, ring, data):
self.ring = ring
self.data = data
def __str__(self):
from sympy.printing.str import sstr
return sstr(self.data) + " + " + str(self.ring.base_ideal)
__repr__ = __str__
def __bool__(self):
return not self.ring.is_zero(self)
def __add__(self, om):
if not isinstance(om, self.__class__) or om.ring != self.ring:
try:
om = self.ring.convert(om)
except (NotImplementedError, CoercionFailed):
return NotImplemented
return self.ring(self.data + om.data)
__radd__ = __add__
def __neg__(self):
return self.ring(self.data*self.ring.ring.convert(-1))
def __sub__(self, om):
return self.__add__(-om)
def __rsub__(self, om):
return (-self).__add__(om)
def __mul__(self, o):
if not isinstance(o, self.__class__):
try:
o = self.ring.convert(o)
except (NotImplementedError, CoercionFailed):
return NotImplemented
return self.ring(self.data*o.data)
__rmul__ = __mul__
def __rtruediv__(self, o):
return self.ring.revert(self)*o
def __truediv__(self, o):
if not isinstance(o, self.__class__):
try:
o = self.ring.convert(o)
except (NotImplementedError, CoercionFailed):
return NotImplemented
return self.ring.revert(o)*self
def __pow__(self, oth):
if oth < 0:
return self.ring.revert(self) ** -oth
return self.ring(self.data ** oth)
def __eq__(self, om):
if not isinstance(om, self.__class__) or om.ring != self.ring:
return False
return self.ring.is_zero(self - om)
def __ne__(self, om):
return not self == om
class QuotientRing(Ring):
"""
Class representing (commutative) quotient rings.
You should not usually instantiate this by hand, instead use the constructor
from the base ring in the construction.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> I = QQ.old_poly_ring(x).ideal(x**3 + 1)
>>> QQ.old_poly_ring(x).quotient_ring(I)
QQ[x]/<x**3 + 1>
Shorter versions are possible:
>>> QQ.old_poly_ring(x)/I
QQ[x]/<x**3 + 1>
>>> QQ.old_poly_ring(x)/[x**3 + 1]
QQ[x]/<x**3 + 1>
Attributes:
- ring - the base ring
- base_ideal - the ideal used to form the quotient
"""
has_assoc_Ring = True
has_assoc_Field = False
dtype = QuotientRingElement
def __init__(self, ring, ideal):
if not ideal.ring == ring:
raise ValueError('Ideal must belong to %s, got %s' % (ring, ideal))
self.ring = ring
self.base_ideal = ideal
self.zero = self(self.ring.zero)
self.one = self(self.ring.one)
def __str__(self):
return str(self.ring) + "/" + str(self.base_ideal)
def __hash__(self):
return hash((self.__class__.__name__, self.dtype, self.ring, self.base_ideal))
def new(self, a):
"""Construct an element of ``self`` domain from ``a``. """
if not isinstance(a, self.ring.dtype):
a = self.ring(a)
# TODO optionally disable reduction?
return self.dtype(self, self.base_ideal.reduce_element(a))
def __eq__(self, other):
"""Returns ``True`` if two domains are equivalent. """
return isinstance(other, QuotientRing) and \
self.ring == other.ring and self.base_ideal == other.base_ideal
def from_ZZ(K1, a, K0):
"""Convert a Python ``int`` object to ``dtype``. """
return K1(K1.ring.convert(a, K0))
from_ZZ_python = from_ZZ
from_QQ_python = from_ZZ_python
from_ZZ_gmpy = from_ZZ_python
from_QQ_gmpy = from_ZZ_python
from_RealField = from_ZZ_python
from_GlobalPolynomialRing = from_ZZ_python
from_FractionField = from_ZZ_python
def from_sympy(self, a):
return self(self.ring.from_sympy(a))
def to_sympy(self, a):
return self.ring.to_sympy(a.data)
def from_QuotientRing(self, a, K0):
if K0 == self:
return a
def poly_ring(self, *gens):
"""Returns a polynomial ring, i.e. ``K[X]``. """
raise NotImplementedError('nested domains not allowed')
def frac_field(self, *gens):
"""Returns a fraction field, i.e. ``K(X)``. """
raise NotImplementedError('nested domains not allowed')
def revert(self, a):
"""
Compute a**(-1), if possible.
"""
I = self.ring.ideal(a.data) + self.base_ideal
try:
return self(I.in_terms_of_generators(1)[0])
except ValueError: # 1 not in I
raise NotReversible('%s not a unit in %r' % (a, self))
def is_zero(self, a):
return self.base_ideal.contains(a.data)
def METHOD_NAME(self, rank):
"""
Generate a free module of rank ``rank`` over ``self``.
>>> from sympy.abc import x
>>> from sympy import QQ
>>> (QQ.old_poly_ring(x)/[x**2 + 1]).free_module(2)
(QQ[x]/<x**2 + 1>)**2
"""
return FreeModuleQuotientRing(self, rank) | null |
5,855 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import ops
import mindspore.common.dtype as mstype
import mindspore as ms
class FractionalMaxPool2dNet(nn.Cell):
"""FractionalMaxPool2d ops"""
def construct(self, x, _random_sample):
output1 = ops.fractional_max_pool2d(x, kernel_size=2, output_size=(2, 2), return_indices=True,
_random_samples=_random_sample)
output2 = ops.fractional_max_pool2d(x, kernel_size=2, output_ratio=(0.5, 0.5), return_indices=True,
_random_samples=_random_sample)
return output1, output2
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def test_fractional_maxpool2d_normal(mode):
"""
Feature: FractionalMaxPool2d
Description: Verify the result of FractionalMaxPool2d
Expectation: success
"""
ms.set_context(mode=mode)
net = FractionalMaxPool2dNet()
input_x = Tensor(np.random.rand(25).reshape([1, 5, 5]), mstype.float32)
_random_sample = Tensor(np.zeros((1, 1, 2)), mstype.float32)
output1, output2 = net(input_x, _random_sample)
assert output1[0].shape == output1[1].shape == (1, 2, 2)
assert output2[0].shape == output2[1].shape == (1, 2, 2)
input_x = Tensor([[[[5.58954370e-001, 6.63938331e-001, 6.21228504e-001, 2.42979444e-001, 3.76893662e-001],
[1.81983045e-003, 3.52343421e-001, 4.62048613e-001, 1.10343760e-001, 1.39571702e-001],
[4.99799584e-001, 4.64907907e-001, 6.20357162e-001, 3.59420753e-001, 1.26215309e-001],
[7.71829579e-002, 4.58553624e-001, 3.58015698e-001, 3.53923170e-001, 1.75972716e-001],
[5.65106732e-001, 6.46603699e-001, 6.05013040e-001, 3.82114821e-001, 4.62306777e-003]]]],
mstype.float32)
_random_sample = Tensor(np.zeros((1, 1, 2)), mstype.float32)
output1, output2 = net(input_x, _random_sample)
expect_output_y = np.array([[[[6.63938344e-001, 3.76893669e-001],
[6.46603703e-001, 3.82114828e-001]]]])
expect_output_argmax = np.array([[[[1, 4],
[21, 23]]]])
assert np.allclose(output1[0].asnumpy(), expect_output_y)
assert np.allclose(output1[1].asnumpy(), expect_output_argmax)
assert np.allclose(output2[0].asnumpy(), expect_output_y)
assert np.allclose(output2[1].asnumpy(), expect_output_argmax)
class FractionalMaxPool3dNet(nn.Cell):
"""FractionalMaxPool3d ops"""
def construct(self, x, _random_sample):
output1 = ops.fractional_max_pool3d(x, kernel_size=(1, 1, 1), output_size=(1, 1, 2), return_indices=True,
_random_samples=_random_sample)
output2 = ops.fractional_max_pool3d(x, kernel_size=(1, 1, 1), output_ratio=(0.5, 0.5, 0.5),
return_indices=True, _random_samples=_random_sample)
return output1, output2
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_arm_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [ms.GRAPH_MODE, ms.PYNATIVE_MODE])
def METHOD_NAME(mode):
"""
Feature: Test FractioanlMaxPool3d
Description: Test the functionality of FractionalMaxPool3d
Expectation: Success
"""
ms.set_context(mode=mode)
input_x = Tensor(np.random.rand(16).reshape([1, 2, 2, 4]), mstype.float32)
net = FractionalMaxPool3dNet()
_random_sample = Tensor(np.zeros((1, 1, 3)), mstype.float32)
output1, output2 = net(input_x, _random_sample)
assert output1[0].shape == output1[1].shape == (1, 1, 1, 2)
assert output2[0].shape == output2[1].shape == (1, 1, 1, 2)
input_x = Tensor([[[[[5.76273143e-001, 7.97047436e-001, 5.05385816e-001, 7.98332036e-001],
[5.79880655e-001, 9.75979388e-001, 3.17571498e-002, 8.08261558e-002]],
[[3.82758647e-001, 7.09801614e-001, 4.39641386e-001, 5.71077049e-001],
[9.16305065e-001, 3.71438652e-001, 6.52868748e-001, 6.91260636e-001]]]]], mstype.float32)
_random_sample = Tensor(np.zeros((1, 1, 3)), mstype.float32)
output1, output2 = net(input_x, _random_sample)
expect_output_y = np.array([[[[[9.16305065e-001, 6.91260636e-001]]]]])
expect_output_argmax = np.array([[[[[12, 15]]]]])
assert np.allclose(output1[0].asnumpy(), expect_output_y)
assert np.allclose(output1[1].asnumpy(), expect_output_argmax)
assert np.allclose(output2[0].asnumpy(), expect_output_y)
assert np.allclose(output2[1].asnumpy(), expect_output_argmax) | null |
5,856 | from typing import Any, Dict, Tuple, Union
import torch
from kornia.core import Tensor
from .utils import arange_sequence, batch_2x2_ellipse, batch_2x2_inv, draw_first_k_couples, piecewise_arange
def METHOD_NAME(residuals: Tensor, ransidx: Tensor) -> Tuple[Tensor, Tensor]:
logres = torch.log(residuals + 1e-10)
minlogres = torch.min(logres)
maxlogres = torch.max(logres)
sorting_score = ransidx.unsqueeze(0).float() + 0.99 * (logres - minlogres) / (maxlogres - minlogres)
sorting_idxes = torch.argsort(sorting_score, dim=-1) # (niters, numsamples)
iters_range = torch.arange(residuals.shape[0], device=residuals.device)
return residuals[iters_range.unsqueeze(-1), sorting_idxes], sorting_idxes
def group_sum_and_cumsum(
scores_mat: Tensor, end_group_idx: Tensor, group_idx: Union[Tensor, slice, None] = None
) -> Tuple[Tensor, Union[Tensor, None]]:
cumulative_scores = torch.cumsum(scores_mat, dim=1)
ending_cumusums = cumulative_scores[:, end_group_idx]
shifted_ending_cumusums = torch.cat(
[
torch.zeros(size=(ending_cumusums.shape[0], 1), dtype=ending_cumusums.dtype, device=scores_mat.device),
ending_cumusums[:, :-1],
],
dim=1,
)
grouped_sums = ending_cumusums - shifted_ending_cumusums
if group_idx is not None:
grouped_cumsums = cumulative_scores - shifted_ending_cumusums[:, group_idx]
return grouped_sums, grouped_cumsums
return grouped_sums, None
def confidence_based_inlier_selection(
residuals: Tensor, ransidx: Tensor, rdims: Tensor, idxoffsets: Tensor, dv: torch.device, min_confidence: Tensor
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
numransacs = rdims.shape[0]
numiters = residuals.shape[0]
sorted_res, sorting_idxes = METHOD_NAME(residuals, ransidx)
sorted_res_sqr = sorted_res**2
too_perfect_fits = sorted_res_sqr <= 1e-8
end_rans_indexing = torch.cumsum(rdims, dim=0) - 1
_, inv_indices, res_dup_counts = torch.unique_consecutive(
sorted_res_sqr.half().float(), dim=1, return_counts=True, return_inverse=True
)
duplicates_per_sample = res_dup_counts[inv_indices]
inlier_weights = (1.0 / duplicates_per_sample).repeat(numiters, 1)
inlier_weights[too_perfect_fits] = 0.0
balanced_rdims, weights_cumsums = group_sum_and_cumsum(inlier_weights, end_rans_indexing, ransidx)
if not isinstance(weights_cumsums, Tensor):
raise TypeError('Expected the `weights_cumsums` to be a Tensor!')
progressive_inl_rates = weights_cumsums.float() / (balanced_rdims.repeat_interleave(rdims, dim=1)).float()
good_inl_mask = (sorted_res_sqr * min_confidence <= progressive_inl_rates) | too_perfect_fits
inlier_weights[~good_inl_mask] = 0.0
inlier_counts_matrix, _ = group_sum_and_cumsum(inlier_weights, end_rans_indexing)
inl_counts, inl_iters = torch.max(inlier_counts_matrix.long(), dim=0)
relative_inl_idxes = arange_sequence(inl_counts)
inl_ransidx = torch.arange(numransacs, device=dv).repeat_interleave(inl_counts)
inl_sampleidx = sorting_idxes[inl_iters.repeat_interleave(inl_counts), idxoffsets[inl_ransidx] + relative_inl_idxes]
highest_accepted_sqr_residuals = sorted_res_sqr[inl_iters, idxoffsets + inl_counts - 1]
expected_extra_inl = (
balanced_rdims[inl_iters, torch.arange(numransacs, device=dv)].float() * highest_accepted_sqr_residuals
)
return inl_ransidx, inl_sampleidx, inl_counts, inl_iters, inl_counts.float() / expected_extra_inl
def sample_padded_inliers(
xsamples: Tensor,
ysamples: Tensor,
inlier_counts: Tensor,
inl_ransidx: Tensor,
inl_sampleidx: Tensor,
numransacs: int,
dv: torch.device,
) -> Tuple[Tensor, Tensor]:
maxinliers = int(torch.max(inlier_counts).item())
dtype = xsamples.dtype
padded_inlier_x = torch.zeros(size=(numransacs, maxinliers, 2), device=dv, dtype=dtype)
padded_inlier_y = torch.zeros(size=(numransacs, maxinliers, 2), device=dv, dtype=dtype)
padded_inlier_x[inl_ransidx, piecewise_arange(inl_ransidx)] = xsamples[inl_sampleidx]
padded_inlier_y[inl_ransidx, piecewise_arange(inl_ransidx)] = ysamples[inl_sampleidx]
return padded_inlier_x, padded_inlier_y
def ransac(
xsamples: Tensor, ysamples: Tensor, rdims: Tensor, config: Dict[str, Any], iters: int = 128, refit: bool = True
) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
DET_THR = config['detected_scale_rate_threshold']
MIN_CONFIDENCE = config['min_confidence']
dv: torch.device = config['device']
numransacs = rdims.shape[0]
ransidx = torch.arange(numransacs, device=dv).repeat_interleave(rdims)
idxoffsets = torch.cat([torch.tensor([0], device=dv), torch.cumsum(rdims[:-1], dim=0)], dim=0)
rand_samples_rel = draw_first_k_couples(iters, rdims, dv)
rand_samples_abs = rand_samples_rel + idxoffsets
sampled_x = torch.transpose(
xsamples[rand_samples_abs], dim0=1, dim1=2
) # (niters, 2, numransacs, 2) -> (niters, numransacs, 2, 2)
sampled_y = torch.transpose(ysamples[rand_samples_abs], dim0=1, dim1=2)
# minimal fit for sampled_x @ A^T = sampled_y
affinities_fit = torch.transpose(batch_2x2_inv(sampled_x, check_dets=True) @ sampled_y, -1, -2)
if not refit:
eigenvals, eigenvecs = batch_2x2_ellipse(affinities_fit)
bad_ones = (eigenvals[..., 1] < 1 / DET_THR**2) | (eigenvals[..., 0] > DET_THR**2)
affinities_fit[bad_ones] = torch.eye(2, device=dv)
y_pred = (affinities_fit[:, ransidx] @ xsamples.unsqueeze(-1)).squeeze(-1)
residuals = torch.norm(y_pred - ysamples, dim=-1) # (niters, numsamples)
inl_ransidx, inl_sampleidx, inl_counts, inl_iters, inl_confidence = confidence_based_inlier_selection(
residuals, ransidx, rdims, idxoffsets, dv=dv, min_confidence=MIN_CONFIDENCE
)
if len(inl_sampleidx) == 0:
# If no inliers have been found, there is nothing to re-fit!
refit = False
if not refit:
return (
inl_sampleidx,
affinities_fit[inl_iters, torch.arange(inl_iters.shape[0], device=dv)],
inl_confidence,
inl_counts,
)
# Organize inliers found into a matrix for efficient GPU re-fitting.
# Cope with the irregular number of inliers per sample by padding with zeros
padded_inlier_x, padded_inlier_y = sample_padded_inliers(
xsamples, ysamples, inl_counts, inl_ransidx, inl_sampleidx, numransacs, dv
)
# A @ pad_x.T = pad_y.T
# A = pad_y.T @ pad_x @ (pad_x.T @ pad_x)^-1
refit_affinity = (
padded_inlier_y.transpose(-2, -1)
@ padded_inlier_x
@ batch_2x2_inv(padded_inlier_x.transpose(-2, -1) @ padded_inlier_x, check_dets=True)
)
# Filter out degenerate affinities with large scale changes
eigenvals, eigenvecs = batch_2x2_ellipse(refit_affinity)
bad_ones = (eigenvals[..., 1] < 1 / DET_THR**2) | (eigenvals[..., 0] > DET_THR**2)
refit_affinity[bad_ones] = torch.eye(2, device=dv, dtype=refit_affinity.dtype)
y_pred = (refit_affinity[ransidx] @ xsamples.unsqueeze(-1)).squeeze(-1)
residuals = torch.norm(y_pred - ysamples, dim=-1)
inl_ransidx, inl_sampleidx, inl_counts, inl_iters, inl_confidence = confidence_based_inlier_selection(
residuals.unsqueeze(0), ransidx, rdims, idxoffsets, dv=dv, min_confidence=MIN_CONFIDENCE
)
return inl_sampleidx, refit_affinity, inl_confidence, inl_counts | null |
5,857 | from unittest.mock import patch
import pytest
from django.test import override_settings
from jwt import PyJWKClientError
from social_core.exceptions import AuthException # type: ignore
from overwolf_auth.backends import OverwolfOAuth2, UserDetails
from overwolf_auth.cached_jwk_client import CachedJWKClient
ID_TOKEN = {
"sub": "potato",
"nickname": "fry",
"picture": "https://upload.wikimedia.org/wikipedia/commons/0/02/Potato_with_sprouts.jpg",
"preferred_username": "fry",
"email": "[email protected]",
"email_verified": True,
}
@patch.object(OverwolfOAuth2, "_decode_response_jwt", return_value=ID_TOKEN)
def test_extra_data_is_returned(mocked_decode) -> None:
extra_data = OverwolfOAuth2().extra_data(None, None, {})
assert extra_data["email_verified"] == ID_TOKEN["email_verified"]
assert extra_data["nickname"] == ID_TOKEN["nickname"]
assert extra_data["picture"] == ID_TOKEN["picture"]
assert extra_data["preferred_username"] == ID_TOKEN["preferred_username"]
@patch.object(OverwolfOAuth2, "_decode_response_jwt", return_value=ID_TOKEN)
def test_get_user_id_reads_correct_field(mocked_decode) -> None:
details: UserDetails = {
"username": "potato",
"email": "[email protected]",
"fullname": "",
"first_name": "",
"last_name": "",
}
user_id = OverwolfOAuth2().get_user_id(details, {})
assert user_id == details["username"]
@patch.object(OverwolfOAuth2, "_decode_response_jwt", return_value={})
def METHOD_NAME(mocked_decode) -> None:
with pytest.raises(AuthException) as exception_info:
OverwolfOAuth2().get_user_details({})
assert exception_info.value.backend == "JWT contained no username (sub)"
@patch.object(OverwolfOAuth2, "_decode_response_jwt", return_value=ID_TOKEN)
def test_get_user_details_returns_correct_data(mocked_decode) -> None:
data = OverwolfOAuth2().get_user_details({})
assert data["username"] == ID_TOKEN["sub"]
assert data["email"] == ID_TOKEN["email"]
assert data["fullname"] == ""
assert data["first_name"] == ""
assert data["last_name"] == ""
def test__decode_response_jwt_fails_on_missing_id_token() -> None:
with pytest.raises(AuthException) as exception_info:
OverwolfOAuth2()._decode_response_jwt({})
assert exception_info.value.backend == "No id_token in auth response"
@override_settings(SOCIAL_AUTH_OVERWOLF_KEY="test")
@patch.object(CachedJWKClient, "clear_cache")
def test_cached_jwt_client_refetches_jwk_on_error(mocked_clear_cache) -> None:
def args_storage_wrapper(func):
"""
Wrapper for keeping records with what arguments a function was
called with. Using the patch() method with a function that
recursively calls itself seemed to lead to a situation where
only the latest function call was stored.
"""
def wrapper(*args, **kwargs):
wrapper.call_args_list.append(args)
return func(*args, **kwargs)
wrapper.call_args_list = []
return wrapper
from overwolf_auth import cached_jwk_client
wrapped_func = args_storage_wrapper(cached_jwk_client.decode_jwt)
# Rubbish, decoding this will always fail.
broken_jwt = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuYW1lIjoiSm9obiBEb2UifQ.DjwRE2jZhren2Wt37t5hlVru6Myq4AhpGLiiefF69u8"
with patch("overwolf_auth.cached_jwk_client.decode_jwt", new=wrapped_func):
with pytest.raises(PyJWKClientError):
cached_jwk_client.decode_jwt(broken_jwt, False)
# When the client fails the first time, it should clear the
# cache and call itself recursively with reraise flag set to
# True. Due to broken JWT, this will also fail.
assert len(wrapped_func.call_args_list) == 2
assert wrapped_func.call_args_list[0] == (broken_jwt, False)
assert wrapped_func.call_args_list[1] == (broken_jwt, True)
# Cached JWK client should clear the cache whenever decoding a JWT fails.
mocked_clear_cache.call_count == 2 | null |
5,858 | """Tests for the clear cache operation from proselint.command_line."""
import os
import unittest
from proselint import command_line as cl
try:
from unittest import mock
except ImportError:
# Py2.x
from unittest import mock
try:
from builtins import PermissionError
except ImportError:
class PermissionError(OSError):
"""Introduced in Py3.3, emulate for earlier versions."""
def __init__(self, *args, **kwargs):
"""Constructor."""
OSError.__init__(self, *args, **kwargs)
try:
from builtins import FileNotFoundError
except ImportError:
class FileNotFoundError(OSError):
"""Introduced in Py3.3, emulate for earlier versions."""
def __init__(self, *args, **kwargs):
"""Constructor."""
OSError.__init__(self, *args, **kwargs)
try:
from builtins import IsADirectoryError
except ImportError:
class IsADirectoryError(OSError):
"""Introduced in Py3.3, emulate for earlier versions."""
def __init__(self, *args, **kwargs):
"""Constructor."""
OSError.__init__(self, *args, **kwargs)
class Test__delete_compiled_python_files(unittest.TestCase):
"""proselint.command_line._delete_compiled_python_files()."""
def setUp(self):
"""init common data."""
self.base_dir = '.'
self.python_file = 'a.py'
self.pyc_file = 'a.pyc'
self.dot_pyc = '.pyc'
self.files = [
(self.base_dir, ('dummy',), (self.pyc_file,
self.python_file,
self.dot_pyc))
]
self.pyc_file_path = os.path.join(self.base_dir, self.pyc_file)
self.python_file_path = os.path.join(self.base_dir, self.python_file)
self.dot_pyc_path = os.path.join(self.base_dir, self.python_file)
@mock.patch('os.walk')
@mock.patch('os.remove')
def test_delete_pyc_file(self, mock_remove, mock_walk):
"""Ensure 'pyc' files are removed."""
mock_walk.return_value = self.files
cl._delete_compiled_python_files()
mock_remove.assert_called_with(self.pyc_file_path)
@mock.patch('os.walk')
@mock.patch('os.remove')
def test_files_not_deleted(self, mock_remove, mock_walk):
"""Ensure non 'pyc' files are not removed."""
mock_walk.return_value = self.files
cl._delete_compiled_python_files()
with self.assertRaises(AssertionError):
mock_remove.assert_called_with(self.python_file_path)
with self.assertRaises(AssertionError):
mock_remove.assert_called_with(self.dot_pyc_path)
@mock.patch('os.walk')
@mock.patch('os.remove', side_effect=PermissionError)
def test_no_permission(self, mock_remove, mock_walk):
"""Ignore if unable to delete files."""
mock_walk.return_value = self.files
cl._delete_compiled_python_files()
@mock.patch('os.walk')
@mock.patch('os.remove', side_effect=OSError)
def test_on_oserror(self, mock_remove, mock_walk):
"""Ignore if OSError."""
mock_walk.return_value = self.files
cl._delete_compiled_python_files()
@mock.patch('os.walk')
@mock.patch('os.remove', side_effect=FileNotFoundError)
def test_files_not_found(self, mock_remove, mock_walk):
"""Ignore if file not found."""
mock_walk.return_value = self.files
cl._delete_compiled_python_files()
@mock.patch('os.walk')
@mock.patch('os.remove', side_effect=IsADirectoryError)
def METHOD_NAME(self, mock_remove, mock_walk):
"""Ignore if attempt to delete a directory."""
mock_walk.return_value = self.files
cl._delete_compiled_python_files()
class Test__delete_cache(unittest.TestCase):
"""proselint.command_line.__delete_cache()."""
def setUp(self):
"""Init common data."""
self.cache_path = os.path.join("proselint", "cache")
@mock.patch('shutil.rmtree')
def test_rm_cache(self, mock_rmtree):
"""Correct directory is removed."""
cl._delete_cache()
mock_rmtree.assert_called_with(self.cache_path)
@mock.patch('shutil.rmtree', side_effect=PermissionError)
def test_no_permission(self, mock_rmtree):
"""Ignore if unable to delete."""
cl._delete_cache()
@mock.patch('shutil.rmtree', side_effect=OSError)
def test_on_oserror(self, mock_rmtree):
"""Ignore if general OSError."""
cl._delete_cache() | null |
5,859 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.kernel import *
from mantid.api import *
from mantid.simpleapi import *
import numpy as np
class PoldiMergeTest(unittest.TestCase):
def __init__(self, *args):
unittest.TestCase.__init__(self, *args)
properties = ["TablePositionX", "TablePositionY", "TablePositionZ", "ChopperSpeed"]
leftData = [0.0, 1.0, 2.0, 3.0]
rightDataGood = [0.0, 1.0, 2.0, 3.0]
rightDataBadOffset = [1.0, 2.0, 3.0, 4.0]
rightDataBadDelta = [0.0, 2.0, 4.0, 6.0]
ydata = np.ones(len(leftData))
self.base = CreateWorkspace(leftData, ydata, OutputWorkspace="Base")
self.goodTiming = CreateWorkspace(rightDataGood, ydata, OutputWorkspace="GoodTiming")
self.goodTimingBadProperties = CreateWorkspace(rightDataGood, ydata, OutputWorkspace="GoodTimingBadProperties")
self.badTimingOffset = CreateWorkspace(rightDataBadOffset, ydata, OutputWorkspace="BadTimingOffset")
self.badTimingDelta = CreateWorkspace(rightDataBadDelta, ydata, OutputWorkspace="BadTimingDelta")
self.groupGood = GroupWorkspaces(["Base", "GoodTiming"], OutputWorkspace="GoodGroup")
goodProperty = 10.0
badProperty = 20.0
for p in properties:
self.base.getRun().addProperty(p, goodProperty, True)
self.goodTiming.getRun().addProperty(p, goodProperty, True)
self.badTimingOffset.getRun().addProperty(p, goodProperty, True)
self.badTimingDelta.getRun().addProperty(p, goodProperty, True)
self.goodTimingBadProperties.getRun().addProperty(p, badProperty, True)
def __runMerge__(self, workspaceNames, checkInstruments=False):
return PoldiMerge(WorkspaceNames=workspaceNames, OutputWorkspace="PoldiMergeOutput", CheckInstruments=checkInstruments)
def test_happyCase(self):
output = self.__runMerge__("Base,GoodTiming")
self.assertTrue(isinstance(output, MatrixWorkspace))
dataX = output.dataX(0)
self.assertEqual(dataX[0], 0.0)
self.assertEqual(dataX[-1], 3.0)
self.assertEqual(len(dataX), 4)
dataY = output.dataY(0)
self.assertEqual(dataY[0], 2.0)
self.assertEqual(dataY[1], 2.0)
self.assertEqual(len(dataY), 4)
DeleteWorkspace("PoldiMergeOutput")
def test_workspaceGroup(self):
output = self.__runMerge__("GoodGroup")
self.assertTrue(isinstance(output, MatrixWorkspace))
dataX = output.dataX(0)
self.assertEqual(dataX[0], 0.0)
self.assertEqual(dataX[-1], 3.0)
self.assertEqual(len(dataX), 4)
dataY = output.dataY(0)
self.assertEqual(dataY[0], 2.0)
self.assertEqual(dataY[1], 2.0)
self.assertEqual(len(dataY), 4)
DeleteWorkspace("PoldiMergeOutput")
def METHOD_NAME(self):
self.assertRaisesRegex(
RuntimeError, "Workspaces can not be merged. Timings don't match. Aborting.", lambda: self.__runMerge__("Base,BadTimingDelta")
)
self.assertFalse(AnalysisDataService.doesExist("PoldiMergeOutput"))
def test_timingOffset(self):
self.assertRaisesRegex(
RuntimeError, "Workspaces can not be merged. Timings don't match. Aborting.", lambda: self.__runMerge__("Base,BadTimingOffset")
)
self.assertFalse(AnalysisDataService.doesExist("PoldiMergeOutput"))
def test_badProperties(self):
self.assertRaisesRegex(
RuntimeError,
"Workspaces can not be merged. Property 'TablePositionX' does not match. Aborting.",
lambda: self.__runMerge__("Base,GoodTimingBadProperties", True),
)
self.assertFalse(AnalysisDataService.doesExist("PoldiMergeOutput"))
def test_badName(self):
self.assertRaisesRegex(
RuntimeError, "Not all strings in the input list are valid workspace names.", lambda: self.__runMerge__("Base,NotExisting")
)
self.assertFalse(AnalysisDataService.doesExist("PoldiMergeOutput"))
if __name__ == "__main__":
unittest.main() | null |
5,860 | #!/usr/bin/python
"""
django-helpdesk - A Django powered ticket tracker for small enterprise.
(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.
scripts/escalate_tickets.py - Easy way to escalate tickets based on their age,
designed to be run from Cron or similar.
"""
from datetime import date, timedelta
from django.core.management.base import BaseCommand, CommandError
from django.db.models import Q
from django.utils import timezone
from django.utils.translation import gettext as _
import getopt
from helpdesk.lib import safe_template_context
from helpdesk.models import EscalationExclusion, FollowUp, Queue, Ticket, TicketChange
from optparse import make_option
import sys
class Command(BaseCommand):
def __init__(self):
BaseCommand.__init__(self)
self.option_list = (
make_option(
'--queues',
help='Queues to include (default: all). Use queue slugs'),
make_option(
'--verboseescalation',
action='store_true',
default=False,
help='Display a list of dates excluded'),
)
def handle(self, *args, **options):
verbose = False
queue_slugs = None
queues = []
if 'verboseescalation' in options:
verbose = True
if 'queues' in options:
queue_slugs = options['queues']
if queue_slugs is not None:
queue_set = queue_slugs.split(',')
for queue in queue_set:
try:
Queue.objects.get(slug__exact=queue)
except Queue.DoesNotExist:
raise CommandError("Queue %s does not exist." % queue)
queues.append(queue)
METHOD_NAME(queues=queues, verbose=verbose)
def METHOD_NAME(queues, verbose):
""" Only include queues with escalation configured """
queryset = Queue.objects.filter(
escalate_days__isnull=False).exclude(escalate_days=0)
if queues:
queryset = queryset.filter(slug__in=queues)
for q in queryset:
last = date.today() - timedelta(days=q.escalate_days)
today = date.today()
workdate = last
days = 0
while workdate < today:
if EscalationExclusion.objects.filter(date=workdate).count() == 0:
days += 1
workdate = workdate + timedelta(days=1)
req_last_escl_date = date.today() - timedelta(days=days)
if verbose:
print("Processing: %s" % q)
for t in q.ticket_set.filter(
Q(status=Ticket.OPEN_STATUS) |
Q(status=Ticket.REOPENED_STATUS)
).exclude(
priority=1
).filter(
Q(on_hold__isnull=True) |
Q(on_hold=False)
).filter(
Q(last_escalation__lte=req_last_escl_date) |
Q(last_escalation__isnull=True, created__lte=req_last_escl_date)
):
t.last_escalation = timezone.now()
t.priority -= 1
t.save()
context = safe_template_context(t)
t.send(
{'submitter': ('escalated_submitter', context),
'ticket_cc': ('escalated_cc', context),
'assigned_to': ('escalated_owner', context)},
fail_silently=True,
)
if verbose:
print(" - Esclating %s from %s>%s" % (
t.ticket,
t.priority + 1,
t.priority
)
)
f = FollowUp(
ticket=t,
title='Ticket Escalated',
date=timezone.now(),
public=True,
comment=_('Ticket escalated after %s days' % q.escalate_days),
)
f.save()
tc = TicketChange(
followup=f,
field=_('Priority'),
old_value=t.priority + 1,
new_value=t.priority,
)
tc.save()
def usage():
print("Options:")
print(" --queues: Queues to include (default: all). Use queue slugs")
print(" --verboseescalation: Display a list of dates excluded")
if __name__ == '__main__':
try:
opts, args = getopt.getopt(
sys.argv[1:], ['queues=', 'verboseescalation'])
except getopt.GetoptError:
usage()
sys.exit(2)
verbose = False
queue_slugs = None
queues = []
for o, a in opts:
if o == '--verboseescalation':
verbose = True
if o == '--queues':
queue_slugs = a
if queue_slugs is not None:
queue_set = queue_slugs.split(',')
for queue in queue_set:
try:
q = Queue.objects.get(slug__exact=queue)
except Queue.DoesNotExist:
print("Queue %s does not exist." % queue)
sys.exit(2)
queues.append(queue)
METHOD_NAME(queues=queues, verbose=verbose) | null |
5,861 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetLocalNetworkGatewayResult',
'AwaitableGetLocalNetworkGatewayResult',
'get_local_network_gateway',
'get_local_network_gateway_output',
]
@pulumi.output_type
class GetLocalNetworkGatewayResult:
"""
A collection of values returned by getLocalNetworkGateway.
"""
def __init__(__self__, METHOD_NAME=None, bgp_settings=None, gateway_address=None, gateway_fqdn=None, id=None, location=None, name=None, resource_group_name=None, tags=None):
if METHOD_NAME and not isinstance(METHOD_NAME, list):
raise TypeError("Expected argument 'address_spaces' to be a list")
pulumi.set(__self__, "address_spaces", METHOD_NAME)
if bgp_settings and not isinstance(bgp_settings, list):
raise TypeError("Expected argument 'bgp_settings' to be a list")
pulumi.set(__self__, "bgp_settings", bgp_settings)
if gateway_address and not isinstance(gateway_address, str):
raise TypeError("Expected argument 'gateway_address' to be a str")
pulumi.set(__self__, "gateway_address", gateway_address)
if gateway_fqdn and not isinstance(gateway_fqdn, str):
raise TypeError("Expected argument 'gateway_fqdn' to be a str")
pulumi.set(__self__, "gateway_fqdn", gateway_fqdn)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="addressSpaces")
def METHOD_NAME(self) -> Sequence[str]:
"""
The list of string CIDRs representing the address spaces the gateway exposes.
"""
return pulumi.get(self, "address_spaces")
@property
@pulumi.getter(name="bgpSettings")
def bgp_settings(self) -> Sequence['outputs.GetLocalNetworkGatewayBgpSettingResult']:
"""
A `bgp_settings` block as defined below containing the Local Network Gateway's BGP speaker settings.
"""
return pulumi.get(self, "bgp_settings")
@property
@pulumi.getter(name="gatewayAddress")
def gateway_address(self) -> str:
"""
The gateway IP address the Local Network Gateway uses.
"""
return pulumi.get(self, "gateway_address")
@property
@pulumi.getter(name="gatewayFqdn")
def gateway_fqdn(self) -> str:
"""
The gateway FQDN the Local Network Gateway uses.
"""
return pulumi.get(self, "gateway_fqdn")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The Azure Region where the Local Network Gateway exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags assigned to the Local Network Gateway.
"""
return pulumi.get(self, "tags")
class AwaitableGetLocalNetworkGatewayResult(GetLocalNetworkGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLocalNetworkGatewayResult(
METHOD_NAME=self.METHOD_NAME,
bgp_settings=self.bgp_settings,
gateway_address=self.gateway_address,
gateway_fqdn=self.gateway_fqdn,
id=self.id,
location=self.location,
name=self.name,
resource_group_name=self.resource_group_name,
tags=self.tags)
def get_local_network_gateway(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLocalNetworkGatewayResult:
"""
Use this data source to access information about an existing Local Network Gateway.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.network.get_local_network_gateway(name="existing-local-network-gateway",
resource_group_name="existing-resources")
pulumi.export("id", example.id)
```
:param str name: The name of the Local Network Gateway.
:param str resource_group_name: The name of the Resource Group where the Local Network Gateway exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:network/getLocalNetworkGateway:getLocalNetworkGateway', __args__, opts=opts, typ=GetLocalNetworkGatewayResult).value
return AwaitableGetLocalNetworkGatewayResult(
METHOD_NAME=pulumi.get(__ret__, 'address_spaces'),
bgp_settings=pulumi.get(__ret__, 'bgp_settings'),
gateway_address=pulumi.get(__ret__, 'gateway_address'),
gateway_fqdn=pulumi.get(__ret__, 'gateway_fqdn'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
resource_group_name=pulumi.get(__ret__, 'resource_group_name'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_local_network_gateway)
def get_local_network_gateway_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetLocalNetworkGatewayResult]:
"""
Use this data source to access information about an existing Local Network Gateway.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.network.get_local_network_gateway(name="existing-local-network-gateway",
resource_group_name="existing-resources")
pulumi.export("id", example.id)
```
:param str name: The name of the Local Network Gateway.
:param str resource_group_name: The name of the Resource Group where the Local Network Gateway exists.
"""
... | null |
5,862 | from __future__ import annotations
import json
from collections.abc import Iterator
from pathlib import Path
from typing import TYPE_CHECKING, Any
from dcs import Point
from dcs.mapping import Point as DcsPoint
from dcs.terrain import Terrain
from numpy import float64, array
from numpy._typing import NDArray
from shapely import transform, METHOD_NAME
from shapely.geometry.base import BaseGeometry
if TYPE_CHECKING:
from .waypointstrategy import WaypointStrategy
class NoSolutionsError(RuntimeError):
pass
class WaypointSolver:
def __init__(self) -> None:
self.strategies: list[WaypointStrategy] = []
self.debug_output_directory: Path | None = None
self._terrain: Terrain | None = None
def add_strategy(self, strategy: WaypointStrategy) -> None:
self.strategies.append(strategy)
def set_debug_properties(self, path: Path, terrain: Terrain) -> None:
self.debug_output_directory = path
self._terrain = terrain
def METHOD_NAME(self, geometry: BaseGeometry) -> dict[str, Any]:
if geometry.is_empty:
return json.loads(METHOD_NAME(geometry))
assert self._terrain is not None
origin = DcsPoint(0, 0, self._terrain)
def xy_to_ll(points: NDArray[float64]) -> NDArray[float64]:
ll_points = []
for point in points:
p = origin.new_in_same_map(point[0], point[1])
latlng = p.latlng()
# Longitude is unintuitively first because it's the "X" coordinate:
# https://datatracker.ietf.org/doc/html/rfc7946#section-3.1.1
ll_points.append([latlng.lng, latlng.lat])
return array(ll_points)
transformed = transform(geometry, xy_to_ll)
return json.loads(METHOD_NAME(transformed))
def describe_metadata(self) -> dict[str, Any]:
return {}
def describe_inputs(self) -> Iterator[tuple[str, BaseGeometry]]:
yield from []
def describe_debug(self) -> dict[str, Any]:
assert self._terrain is not None
metadata = {"name": self.__class__.__name__, "terrain": self._terrain.name}
metadata.update(self.describe_metadata())
return {
"type": "FeatureCollection",
# The GeoJSON spec forbids us from adding a "properties" field to a feature
# collection, but it doesn't restrict us from adding our own custom fields.
# https://gis.stackexchange.com/a/209263
#
# It's possible that some consumers won't work with this, but we don't read
# collections directly with shapely and geojson.io is happy with it, so it
# works where we need it to.
"metadata": metadata,
"features": list(self.describe_features()),
}
def describe_features(self) -> Iterator[dict[str, Any]]:
for description, geometry in self.describe_inputs():
yield {
"type": "Feature",
"properties": {
"description": description,
},
"geometry": self.METHOD_NAME(geometry),
}
def dump_debug_info(self) -> None:
path = self.debug_output_directory
if path is None:
return
path.mkdir(exist_ok=True, parents=True)
inputs_path = path / "solver.json"
with inputs_path.open("w", encoding="utf-8") as inputs_file:
json.dump(self.describe_debug(), inputs_file)
features = list(self.describe_features())
for idx, strategy in enumerate(self.strategies):
strategy_path = path / f"{idx}.json"
with strategy_path.open("w", encoding="utf-8") as strategy_debug_file:
json.dump(
{
"type": "FeatureCollection",
"metadata": {
"name": strategy.__class__.__name__,
"prerequisites": [
p.describe_debug_info(self.METHOD_NAME)
for p in strategy.prerequisites
],
},
# Include the solver's features in the strategy feature
# collection for easy copy/paste into geojson.io.
"features": features
+ [
d.METHOD_NAME(self.METHOD_NAME)
for d in strategy.iter_debug_info()
],
},
strategy_debug_file,
)
def solve(self) -> Point:
if not self.strategies:
raise ValueError(
"WaypointSolver.solve() called before any strategies were added"
)
for strategy in self.strategies:
if (point := strategy.find()) is not None:
return point
self.dump_debug_info()
debug_details = "No debug output directory set"
if (debug_path := self.debug_output_directory) is not None:
debug_details = f"Debug details written to {debug_path}"
raise NoSolutionsError(f"No solutions found for waypoint. {debug_details}") | null |
5,863 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementation for internal polymorphism `div` operations."""
from __future__ import division
from mindspore.ops.composite.multitype_ops import _compile_utils as utils
from mindspore.ops.composite.multitype_ops._constexpr_utils import log_warning, check_equal
from mindspore.ops.composite import base
from mindspore.ops import functional as F
from mindspore.common import COOTensor
div = base.MultitypeFuncGraph("div", True, True)
"""
div is a metafuncgraph object which will div two objects according to input type
using ".register" decorator
"""
@div.register("CSRTensor", "Tensor")
def _csrtensor_div_tensor(x, y):
"""
Returns x / y where x is CSRTensor and y is Tensor.
Outputs:
CSRTensor, equal to x / y.
"""
log_warning("For CSR divide, zero values in the dense tensor are ignored.")
return F.csr_div(x, y)
@div.register("COOTensor", "Tensor")
def _cootensor_div_tensor(x, y):
"""
Returns x / y where x is COOTensor and y is Tensor.
Outputs:
COOTensor, equal to x / y.
"""
check_equal(x.shape, y.shape, "input1 (shape={}) and input2(shape={}) should be the same shape.")
log_warning("For sparse divide, zero values in the dense tensor are ignored.")
other_values = F.gather_nd(y, x.indices)
return COOTensor(x.indices, x.values / other_values, x.shape)
@div.register("Number", "Number")
def _div_scalar(x, y):
"""
Two numbers divide.
Args:
x (Number): x
y (Number): y
Returns:
Number, equal to x / y, the type is same as x.
"""
return F.scalar_div(x, y)
@div.register("Tensor", "Tensor")
def _div_tensor(x, y):
"""
Two tensors divide by element.
Args:
x (Tensor): The first input tensor.
y (Tensor): The second input tensor.
Returns:
Tensor, has the same dtype as x.
"""
return F.tensor_div(x, y)
@div.register("Number", "Tensor")
def _scalar_div_tensor(x, y):
"""
Number divided by tensor.
Args:
x (Number): x
y (Tensor): The dtype is same as x.
Returns:
Tensor, has the same dtype as x.
"""
return F.tensor_div(x, y)
@div.register("Tensor", "Number")
def _tensor_div_scalar(x, y):
"""
Tensor divided by number.
Args:
x (Tensor): x
y (Number): The dtype is same as x.
Returns:
Tensor, has the same dtype as x.
"""
return F.tensor_div(x, y)
@div.register("Tuple", "Tensor")
def _tuple_div_tensor(x, y):
"""
Tuple divided by tensor.
Args:
x (Tuple): x
y (Tensor): The dtype is same as x.
Returns:
Tensor, has the same dtype as x.
"""
x = utils.sequence_to_tensor(x, y.dtype)
return F.tensor_div(x, y)
@div.register("Tensor", "Tuple")
def METHOD_NAME(x, y):
"""
Tensor divided by tuple.
Args:
x (Tensor): x
y (Tuple): The dtype is same as x.
Returns:
Tensor, has the same dtype as x.
"""
y = utils.sequence_to_tensor(y, x.dtype)
return F.tensor_div(x, y)
@div.register("List", "Tensor")
def _list_div_tensor(x, y):
"""
List divided by tensor.
Args:
x (List): x
y (Tensor): The dtype is same as x.
Returns:
Tensor, has the same dtype as x.
"""
x = utils.sequence_to_tensor(x, y.dtype)
return F.tensor_div(x, y)
@div.register("Tensor", "List")
def _tensor_div_list(x, y):
"""
Tensor divided by list
Args:
x (Tensor): x
y (List): The dtype is same as x.
Returns:
Tensor, has the same dtype as x.
"""
y = utils.sequence_to_tensor(y, x.dtype)
return F.tensor_div(x, y) | null |
5,864 | from typing import List, Optional, Type
from unittest.mock import Mock, patch
import pytest
import requests
from thunderstore.social.providers import (
BaseOauthHelper,
DiscordOauthHelper,
GitHubOauthHelper,
get_helper,
)
@patch.object(
requests,
"post",
return_value=Mock(
json=lambda: {
"access_token": "mellon",
"scope": "read:user,user:email",
"token_type": "bearer",
}
),
)
def METHOD_NAME(mocked_request_post) -> None:
helper = GitHubOauthHelper("code", "redirect_uri")
helper.complete_login()
mocked_request_post.assert_called_once()
assert helper.token == "mellon"
@pytest.mark.parametrize(
"helper_class, method_name",
(
(DiscordOauthHelper, "get_user_info"),
(GitHubOauthHelper, "get_user_info"),
(GitHubOauthHelper, "get_user_email"),
),
)
def test_api_methods_check_for_token(
helper_class: Type[BaseOauthHelper], method_name: str
) -> None:
helper = helper_class("code", "redirect_uri")
with pytest.raises(
Exception,
match="No token found. Did you call .complete_login()?",
):
api_method = getattr(helper, method_name)
api_method()
@patch.object(
requests,
"get",
return_value=Mock(
json=lambda: {
"email": "[email protected]",
"id": "5678",
"something": "extra",
"username": "Foo",
}
),
)
def test_discord_get_user_info(mocked_request_get) -> None:
helper = DiscordOauthHelper("code", "redirect_uri")
helper.token = "token"
info = helper.get_user_info()
mocked_request_get.assert_called_once()
assert info.email == "[email protected]"
assert info.extra_data["email"] == "[email protected]"
assert info.extra_data["id"] == "5678"
assert info.extra_data["something"] == "extra"
assert info.extra_data["username"] == "Foo"
assert info.name == ""
assert info.uid == "5678"
assert info.username == "Foo"
@patch.object(
requests,
"get",
return_value=Mock(
json=lambda: {
"email": "[email protected]",
"id": "5678",
"login": "Foo",
"name": "Foo Bar",
"something": "extra",
}
),
)
def test_github_get_user_info_with_public_email(mocked_request_get) -> None:
helper = GitHubOauthHelper("code", "redirect_uri")
helper.token = "token"
info = helper.get_user_info()
mocked_request_get.assert_called_once()
assert info.email == "[email protected]"
assert info.extra_data["email"] == "[email protected]"
assert info.extra_data["id"] == "5678"
assert info.extra_data["login"] == "Foo"
assert info.extra_data["name"] == "Foo Bar"
assert info.extra_data["something"] == "extra"
assert info.name == "Foo Bar"
assert info.uid == "5678"
assert info.username == "Foo"
@patch.object(
requests,
"get",
return_value=Mock(
json=lambda: [
{
"email": "[email protected]",
"primary": False,
"verified": True,
},
{
"email": "[email protected]",
"primary": True,
"verified": True,
},
]
),
)
def test_github_get_user_email_with_valid_email(mocked_request_get) -> None:
helper = GitHubOauthHelper("code", "redirect_uri")
helper.token = "token"
email = helper.get_user_email()
mocked_request_get.assert_called_once()
assert email == "[email protected]"
@pytest.mark.parametrize(
"mock_response",
(
[],
[
{
"email": "[email protected]",
"primary": False,
"verified": True,
},
{
"email": "[email protected]",
"primary": False,
"verified": True,
},
],
[
{
"email": "[email protected]",
"primary": True,
"verified": False,
},
{
"email": "[email protected]",
"primary": False,
"verified": True,
},
],
),
)
def test_github_get_user_email_without_valid_email(mock_response: List) -> None:
helper = GitHubOauthHelper("code", "redirect_uri")
helper.token = "token"
with patch.object(requests, "get", return_value=Mock(json=lambda: mock_response)):
with pytest.raises(Exception, match="User has no email available"):
helper.get_user_email()
@pytest.mark.parametrize(
"provider, expected",
(
("discord", DiscordOauthHelper),
("GiThUb", GitHubOauthHelper),
("acme", None),
),
)
def test_get_helper(provider: str, expected: Optional[BaseOauthHelper]) -> None:
actual = get_helper(provider)
assert type(actual) is type(expected) | null |
5,865 | from __future__ import annotations
import pickle
import pytest
from dxtbx.model import ExperimentList
from libtbx import phil
from dials.command_line.dials_import import do_import
from dials.command_line.dials_import import phil_scope as import_phil_scope
from dials.command_line.generate_mask import generate_mask, phil_scope
@pytest.fixture(
params=[
{
"directory": "centroid_test_data",
"filename": "imported_experiments.json",
"masks": ["pixels.mask"],
},
{
"directory": "l_cysteine_dials_output",
"filename": "imported.expt",
"masks": ["pixels_%d.mask" % (i + 1) for i in range(4)],
},
],
ids=["One sequence", "Four sequences"],
)
def experiments_masks(request, dials_data):
filename = (
dials_data(request.param["directory"], pathlib=True) / request.param["filename"]
)
return ExperimentList.from_file(filename), request.param["masks"]
def test_generate_mask(experiments_masks, run_in_tmp_path):
experiments, masks = experiments_masks
params = phil_scope.fetch().extract()
generate_mask(experiments, params)
assert all(run_in_tmp_path.joinpath(mask).is_file() for mask in masks)
def test_generate_mask_with_untrusted_rectangle(
experiments_masks, tmp_path, monkeypatch
):
experiments, masks = experiments_masks
params = phil_scope.fetch(
phil.parse("untrusted.rectangle=100,200,100,200")
).extract()
params.output.experiments = "masked.expt"
monkeypatch.chdir(tmp_path)
generate_mask(experiments, params)
assert all((tmp_path / mask).is_file() for mask in masks)
assert (tmp_path / "masked.expt").is_file()
experiments = ExperimentList.from_file(tmp_path / "masked.expt")
imageset = experiments.imagesets()[0]
assert imageset.external_lookup.mask.filename == str(tmp_path / masks[0])
def test_generate_mask_with_untrusted_circle(experiments_masks, tmp_path, monkeypatch):
experiments, masks = experiments_masks
params = phil_scope.fetch(phil.parse("untrusted.circle=100,100,10")).extract()
monkeypatch.chdir(tmp_path)
generate_mask(experiments, params)
assert all((tmp_path / mask).is_file() for mask in masks)
def test_generate_mask_with_resolution_range(experiments_masks, tmp_path, monkeypatch):
experiments, masks = experiments_masks
params = phil_scope.fetch().extract()
params.resolution_range = [(2, 3)]
monkeypatch.chdir(tmp_path)
generate_mask(experiments, params)
assert all((tmp_path / mask).is_file() for mask in masks)
def test_generate_mask_with_d_min_d_max(experiments_masks, tmp_path, monkeypatch):
experiments, masks = experiments_masks
params = phil_scope.fetch().extract()
params.d_min = 2
params.d_max = 3
monkeypatch.chdir(tmp_path)
generate_mask(experiments, params)
assert all((tmp_path / mask).is_file() for mask in masks)
def test_generate_mask_with_d_max_and_beam_at_pixel_centre(
experiments_masks, tmp_path, monkeypatch
):
# https://github.com/dials/dials/issues/2322
experiments, masks = experiments_masks
params = phil_scope.fetch().extract()
params.d_max = 20
# Modify experiment to put beam in the centre of a pixel
beam = experiments[0].beam
panel = experiments[0].detector[0]
px_size = 0.1
panel.set_pixel_size((px_size, px_size)) # ensure this is exact
beam.set_s0((0, 0, -1))
new_origin = (-1235.5 * px_size, 1279.5 * px_size, -190)
panel.set_frame((1, 0, 0), (0, -1, 0), new_origin)
assert (panel.get_beam_centre_px(beam.get_s0())) == (1235.5, 1279.5)
params = phil_scope.fetch().extract()
params.d_max = 10
monkeypatch.chdir(tmp_path)
generate_mask(experiments, params)
assert all((tmp_path / mask).is_file() for mask in masks)
def METHOD_NAME(experiments_masks, tmp_path, monkeypatch):
experiments, masks = experiments_masks
params = phil_scope.fetch().extract()
params.ice_rings.filter = True
params.ice_rings.d_min = 2
monkeypatch.chdir(tmp_path)
generate_mask(experiments, params)
assert all((tmp_path / mask).is_file() for mask in masks)
def test_generate_mask_with_untrusted_polygon_and_pixels(
experiments_masks, tmp_path, monkeypatch
):
experiments, masks = experiments_masks
params = phil_scope.fetch(
phil.parse(
"""
untrusted {
polygon = 100 100 100 200 200 200 200 100
}
untrusted {
pixel = 0 0
}
untrusted {
pixel = 1 1
}"""
)
).extract()
monkeypatch.chdir(tmp_path)
generate_mask(experiments, params)
assert all((tmp_path / mask).is_file() for mask in masks)
with (tmp_path / masks[0]).open("rb") as fh:
mask = pickle.load(fh)
assert not mask[0][0, 0]
assert not mask[0][1, 1]
assert mask[0][0, 1]
def test_generate_mask_function_with_untrusted_rectangle(experiments_masks, tmp_path):
experiments, masks = experiments_masks
masks = [tmp_path / mask.replace("pixels", "pixels4") for mask in masks]
params = phil_scope.fetch().extract()
params.output.mask = str(tmp_path / "pixels4.mask")
params.output.experiments = str(tmp_path / "masked.expt")
params.untrusted.rectangle = [100, 200, 100, 200]
generate_mask(experiments, params)
assert all(mask.is_file() for mask in masks)
assert (tmp_path / "masked.expt").is_file()
experiments = ExperimentList.from_file(tmp_path / "masked.expt")
associated_masks = [
imageset.external_lookup.mask.filename for imageset in experiments.imagesets()
]
assert all(
assoc_mask == str(mask) for assoc_mask, mask in zip(associated_masks, masks)
)
def test_generate_mask_trusted_range(dials_data, tmp_path, monkeypatch):
# https://github.com/dials/dials/issues/978
image_files = sorted(
str(f) for f in dials_data("x4wide", pathlib=True).glob("*.cbf")
)
monkeypatch.chdir(tmp_path)
# Import as usual
do_import(
["output.experiments=no-overloads.expt"] + image_files,
phil=import_phil_scope,
)
experiments = ExperimentList.from_file(tmp_path / "no-overloads.expt")
params = phil_scope.fetch(
phil.parse("untrusted.rectangle=100,200,100,200")
).extract()
params.output.mask = "pixels1.mask"
generate_mask(experiments, params)
# Import with narrow trusted range to produce overloads
do_import(
["trusted_range=0,100", "output.experiments=overloads.expt"] + image_files,
phil=import_phil_scope,
)
experiments = ExperimentList.from_file(tmp_path / "overloads.expt")
params = phil_scope.fetch(
phil.parse("untrusted.rectangle=100,200,100,200")
).extract()
params.output.mask = "pixels2.mask"
generate_mask(experiments, params)
with (tmp_path / "pixels1.mask").open("rb") as fh:
mask1 = pickle.load(fh)
with (tmp_path / "pixels2.mask").open("rb") as fh:
mask2 = pickle.load(fh)
# Overloads should not be included in the mask
assert (mask1[0] == mask2[0]).all_eq(True)
def test_generate_whole_panel_mask(experiments_masks, tmp_path, monkeypatch):
experiments, masks = experiments_masks
params = phil_scope.fetch(
phil.parse(
"""
untrusted {
panel = 0
}
"""
)
).extract()
monkeypatch.chdir(tmp_path)
generate_mask(experiments, params)
assert all((tmp_path / mask).is_file() for mask in masks)
with (tmp_path / masks[0]).open("rb") as fh:
mask = pickle.load(fh)
assert mask[0].count(False) == len(mask[0]) | null |
5,866 | from sympy.core.expr import unchanged
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.sets.contains import Contains
from sympy.sets.fancysets import Interval
from sympy.sets.powerset import PowerSet
from sympy.sets.sets import FiniteSet
from sympy.testing.pytest import raises, XFAIL
def test_powerset_creation():
assert unchanged(PowerSet, FiniteSet(1, 2))
assert unchanged(PowerSet, S.EmptySet)
raises(ValueError, lambda: PowerSet(123))
assert unchanged(PowerSet, S.Reals)
assert unchanged(PowerSet, S.Integers)
def METHOD_NAME():
assert PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet) == \
FiniteSet(S.EmptySet, FiniteSet(1), FiniteSet(2), FiniteSet(1, 2))
assert PowerSet(S.EmptySet).rewrite(FiniteSet) == FiniteSet(S.EmptySet)
assert PowerSet(S.Naturals).rewrite(FiniteSet) == PowerSet(S.Naturals)
def test_finiteset_rewrite_powerset():
assert FiniteSet(S.EmptySet).rewrite(PowerSet) == PowerSet(S.EmptySet)
assert FiniteSet(
S.EmptySet, FiniteSet(1),
FiniteSet(2), FiniteSet(1, 2)).rewrite(PowerSet) == \
PowerSet(FiniteSet(1, 2))
assert FiniteSet(1, 2, 3).rewrite(PowerSet) == FiniteSet(1, 2, 3)
def test_powerset__contains__():
subset_series = [
S.EmptySet,
FiniteSet(1, 2),
S.Naturals,
S.Naturals0,
S.Integers,
S.Rationals,
S.Reals,
S.Complexes]
l = len(subset_series)
for i in range(l):
for j in range(l):
if i <= j:
assert subset_series[i] in \
PowerSet(subset_series[j], evaluate=False)
else:
assert subset_series[i] not in \
PowerSet(subset_series[j], evaluate=False)
@XFAIL
def test_failing_powerset__contains__():
# XXX These are failing when evaluate=True,
# but using unevaluated PowerSet works fine.
assert FiniteSet(1, 2) not in PowerSet(S.EmptySet).rewrite(FiniteSet)
assert S.Naturals not in PowerSet(S.EmptySet).rewrite(FiniteSet)
assert S.Naturals not in PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet)
assert S.Naturals0 not in PowerSet(S.EmptySet).rewrite(FiniteSet)
assert S.Naturals0 not in PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet)
assert S.Integers not in PowerSet(S.EmptySet).rewrite(FiniteSet)
assert S.Integers not in PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet)
assert S.Rationals not in PowerSet(S.EmptySet).rewrite(FiniteSet)
assert S.Rationals not in PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet)
assert S.Reals not in PowerSet(S.EmptySet).rewrite(FiniteSet)
assert S.Reals not in PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet)
assert S.Complexes not in PowerSet(S.EmptySet).rewrite(FiniteSet)
assert S.Complexes not in PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet)
def test_powerset__len__():
A = PowerSet(S.EmptySet, evaluate=False)
assert len(A) == 1
A = PowerSet(A, evaluate=False)
assert len(A) == 2
A = PowerSet(A, evaluate=False)
assert len(A) == 4
A = PowerSet(A, evaluate=False)
assert len(A) == 16
def test_powerset__iter__():
a = PowerSet(FiniteSet(1, 2)).__iter__()
assert next(a) == S.EmptySet
assert next(a) == FiniteSet(1)
assert next(a) == FiniteSet(2)
assert next(a) == FiniteSet(1, 2)
a = PowerSet(S.Naturals).__iter__()
assert next(a) == S.EmptySet
assert next(a) == FiniteSet(1)
assert next(a) == FiniteSet(2)
assert next(a) == FiniteSet(1, 2)
assert next(a) == FiniteSet(3)
assert next(a) == FiniteSet(1, 3)
assert next(a) == FiniteSet(2, 3)
assert next(a) == FiniteSet(1, 2, 3)
def test_powerset_contains():
A = PowerSet(FiniteSet(1), evaluate=False)
assert A.contains(2) == Contains(2, A)
x = Symbol('x')
A = PowerSet(FiniteSet(x), evaluate=False)
assert A.contains(FiniteSet(1)) == Contains(FiniteSet(1), A)
def test_powerset_method():
# EmptySet
A = FiniteSet()
pset = A.powerset()
assert len(pset) == 1
assert pset == FiniteSet(S.EmptySet)
# FiniteSets
A = FiniteSet(1, 2)
pset = A.powerset()
assert len(pset) == 2**len(A)
assert pset == FiniteSet(FiniteSet(), FiniteSet(1),
FiniteSet(2), A)
# Not finite sets
A = Interval(0, 1)
assert A.powerset() == PowerSet(A)
def test_is_subset():
# covers line 101-102
# initialize powerset(1), which is a subset of powerset(1,2)
subset = PowerSet(FiniteSet(1))
pset = PowerSet(FiniteSet(1, 2))
bad_set = PowerSet(FiniteSet(2, 3))
# assert "subset" is subset of pset == True
assert subset.is_subset(pset)
# assert "bad_set" is subset of pset == False
assert not pset.is_subset(bad_set) | null |
5,867 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test setitem operation for tuple/list with variable index or dynamic length sequence"""
import pytest
from mindspore.common import mutable
from mindspore.ops import functional as F
from mindspore import jit
from mindspore import context
context.set_context(mode=context.GRAPH_MODE)
def test_setitem_dynamic_length_list_constant_index():
"""
Feature: Setitem operation including variable.
Description: setitem for dynamic length list and constant index return dynamic length list.
Expectation: No exception.
"""
@jit
def foo():
a = mutable([1, 2, 3, 4], True)
a[0] = 20
return isinstance(a, list), F.is_sequence_shape_unknown(a)
ret1, ret2 = foo()
assert ret1
assert ret2
def test_setitem_dynamic_length_list_constant_index_2():
"""
Feature: Setitem operation including variable.
Description: setitem for dynamic length list and constant index return dynamic length list.
Expectation: Raise TypeError.
"""
@jit
def foo():
a = mutable([1, 2, 3, 4], True)
a[0] = 1.0
return a
with pytest.raises(TypeError) as ex:
foo()
assert "element within dynamic length sequence" in str(ex.value)
def test_setitem_constant_length_list_variable_index():
"""
Feature: Setitem operation including variable.
Description: setitem for constant length list and dynamic index return constant length list.
Expectation: No exception.
"""
@jit
def foo():
a = [1, 2]
index = mutable(0)
a[index] = 10
return isinstance(a, list), F.isconstant(a[0]), F.isconstant(a[1])
ret1, ret2, ret3 = foo()
assert ret1
assert not ret2
assert not ret3
def METHOD_NAME():
"""
Feature: Setitem operation including variable.
Description: setitem for constant length list and dynamic index return constant length list.
Expectation: Raise TypeError.
"""
@jit
def foo():
a = [1, 2.0]
index = mutable(0)
a[index] = 10
return a
with pytest.raises(TypeError) as ex:
foo()
assert "sequence[0] item" in str(ex.value)
def test_setitem_constant_length_list_variable_index_3():
"""
Feature: Setitem operation including variable.
Description: setitem for constant length list and dynamic index return constant length list.
Expectation: Raise TypeError.
"""
@jit
def foo():
a = [1, 2]
index = mutable(0)
a[index] = 1.0
return a
with pytest.raises(TypeError) as ex:
foo()
assert "element within constant length sequence" in str(ex.value)
def test_slice_setitem_dynamic_length_list():
"""
Feature: Slice setitem operation including variable.
Description: setitem for dynamic length list and constant index return dynamic length list.
Expectation: No exception.
"""
@jit
def foo():
a = mutable([1, 2, 3, 4], True)
a[0:2] = [2, 3, 4, 5, 6]
return isinstance(a, list), F.is_sequence_shape_unknown(a)
ret1, ret2 = foo()
assert ret1
assert ret2
def test_slice_setitem_dynamic_length_list_2():
"""
Feature: Slice setitem operation including variable.
Description: setitem for dynamic length list and constant index return dynamic length list.
Expectation: Raise ValueError.
"""
@jit
def foo():
a = mutable([1, 2, 3, 4], True)
a[0:2] = [2, 3, 4.0, 5]
return a
with pytest.raises(ValueError) as ex:
foo()
assert "The element type do not match, can not convert to dynamic length sequence." in str(ex.value)
def test_slice_setitem_dynamic_length_target():
"""
Feature: Slice setitem operation including variable.
Description: setitem for dynamic length list and constant index return dynamic length list.
Expectation: No exception.
"""
@jit
def foo():
a = [1, 2, 3, 4]
a[0:2] = mutable([1, 2, 3, 4], True)
return isinstance(a, list), F.is_sequence_shape_unknown(a)
ret1, ret2 = foo()
assert ret1
assert ret2
def test_slice_setitem_dynamic_length_target_2():
"""
Feature: Slice setitem operation including variable.
Description: setitem for dynamic length list and constant index return dynamic length list.
Expectation: No exception.
"""
@jit
def foo():
a = [1, 2, 3, 4.0]
a[0:2] = mutable([1, 2, 3, 4], True)
return a
with pytest.raises(ValueError) as ex:
foo()
assert "The element type do not match, can not convert to dynamic length sequence." in str(ex.value)
def test_slice_setitem_dynamic_slice():
"""
Feature: Slice setitem operation including variable.
Description: setitem for dynamic length list and constant index return dynamic length list.
Expectation: No exception.
"""
@jit
def foo():
a = [1, 2, 3, 4]
start = mutable(0)
a[start:2] = [1, 2, 3, 4]
return isinstance(a, list), F.is_sequence_shape_unknown(a)
ret1, ret2 = foo()
assert ret1
assert ret2
def test_slice_setitem_dynamic_slice_2():
"""
Feature: Slice setitem operation including variable.
Description: setitem for dynamic length list and constant index return dynamic length list.
Expectation: No exception.
"""
@jit
def foo():
a = [1.0, 2.0, 3.0, 4.0]
start = mutable(0)
a[start:2] = [1, 2, 3, 4]
return a
with pytest.raises(TypeError) as ex:
foo()
assert "element within origin sequence" in str(ex.value) | null |
5,868 | # -*- coding: utf-8 -*-# Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.api import (
AlgorithmFactory,
DataProcessorAlgorithm,
MatrixWorkspaceProperty,
MatrixWorkspace,
PropertyMode,
WorkspaceGroup,
WorkspaceProperty,
)
from mantid.kernel import CompositeValidator, StringArrayLengthValidator, StringArrayMandatoryValidator, StringArrayProperty, Direction
class ReflectometryISISPreprocess(DataProcessorAlgorithm):
_RUNS = "InputRunList"
_GROUP_TOF = "GroupTOFWorkspaces"
_OUTPUT_WS = "OutputWorkspace"
_MONITOR_WS = "MonitorWorkspace"
_EVENT_MODE = "EventMode"
_CALIBRATION_FILE = "CalibrationFile"
def __init__(self):
"""Initialize an instance of the algorithm."""
DataProcessorAlgorithm.__init__(self)
def category(self):
"""Return the categories of the algorithm."""
return "Reflectometry\\ISIS;Workflow\\Reflectometry"
def name(self):
"""Return the name of the algorithm."""
return "ReflectometryISISPreprocess"
def summary(self):
"""Return a summary of the algorithm."""
return "Preprocess ISIS reflectometry data, including optional loading and summing of the input runs."
def seeAlso(self):
"""Return a list of related algorithm names."""
return ["ReflectometryISISLoadAndProcess", "ReflectometryReductionOneAuto"]
def PyInit(self):
self.declareProperty(
StringArrayProperty(self._RUNS, values=[], validator=self.METHOD_NAME()),
doc="A list of run numbers or workspace names to load and preprocess",
)
self.declareProperty(self._EVENT_MODE, False, direction=Direction.Input, doc="If true, load the input workspaces as event data")
self.declareProperty(
WorkspaceProperty(self._OUTPUT_WS, "", direction=Direction.Output),
doc="The preprocessed output workspace. If multiple input runs are specified "
"they will be summed into a single output workspace.",
)
self.declareProperty(
MatrixWorkspaceProperty(self._MONITOR_WS, "", direction=Direction.Output, optional=PropertyMode.Optional),
doc="The loaded monitors workspace. This is only output in event mode.",
)
self.copyProperties("ReflectometryISISCalibration", [self._CALIBRATION_FILE])
def PyExec(self):
workspace, monitor_ws = self._loadRun(self.getPropertyValue(self._RUNS))
calibration_file = self.getPropertyValue(self._CALIBRATION_FILE)
if calibration_file:
workspace = self._applyCalibration(workspace, calibration_file)
self.setProperty(self._OUTPUT_WS, workspace)
if monitor_ws:
self.setProperty(self._MONITOR_WS, monitor_ws)
@staticmethod
def METHOD_NAME():
mandatoryInputRuns = CompositeValidator()
mandatoryInputRuns.add(StringArrayMandatoryValidator())
lenValidator = StringArrayLengthValidator()
lenValidator.setLengthMin(1)
mandatoryInputRuns.add(lenValidator)
return mandatoryInputRuns
def _loadRun(self, run: str) -> MatrixWorkspace:
"""Load a run as an event workspace if slicing is requested, or a histogram
workspace otherwise. Transmission runs are always loaded as histogram workspaces."""
event_mode = self.getProperty(self._EVENT_MODE).value
monitor_ws = None
if event_mode:
alg = self.createChildAlgorithm("LoadEventNexus", Filename=run, LoadMonitors=True)
alg.execute()
ws = alg.getProperty("OutputWorkspace").value
monitor_ws = alg.getProperty("MonitorWorkspace").value
self._validate_event_ws(ws)
self.log().information("Loaded event workspace")
else:
alg = self.createChildAlgorithm("LoadNexus", Filename=run)
alg.execute()
ws = alg.getProperty("OutputWorkspace").value
self.log().information("Loaded workspace ")
return ws, monitor_ws
def _applyCalibration(self, ws: MatrixWorkspace, calibration_filepath: str) -> MatrixWorkspace:
if isinstance(ws, WorkspaceGroup):
raise RuntimeError("Calibrating a Workspace Group as part of pre-processing is not currently supported")
alg = self.createChildAlgorithm("ReflectometryISISCalibration")
alg.setProperty("InputWorkspace", ws)
alg.setProperty("CalibrationFile", calibration_filepath)
alg.execute()
calibrated_ws = alg.getProperty("OutputWorkspace").value
self.log().information("Calibrated workspace")
return calibrated_ws
@staticmethod
def _validate_event_ws(workspace):
if isinstance(workspace, WorkspaceGroup):
# Our reduction algorithm doesn't currently support this due to slicing
# (which would result in a group of groups)
raise RuntimeError("Loading Workspace Groups in event mode is not supported currently.")
if not workspace.run().hasProperty("proton_charge"):
# Reduction algorithm requires proton_charge
raise RuntimeError("Event workspaces must contain proton_charge")
AlgorithmFactory.subscribe(ReflectometryISISPreprocess) | null |
5,869 | import http
from typing import FrozenSet, Optional
from fastapi import FastAPI, Path, Query
app = FastAPI()
@app.api_route("/api_route")
def non_operation():
return {"message": "Hello World"}
def non_decorated_route():
return {"message": "Hello World"}
app.add_api_route("/non_decorated_route", non_decorated_route)
@app.get("/text")
def get_text():
return "Hello World"
@app.get("/path/{item_id}")
def get_id(item_id):
return item_id
@app.get("/path/str/{item_id}")
def get_str_id(item_id: str):
return item_id
@app.get("/path/int/{item_id}")
def get_int_id(item_id: int):
return item_id
@app.get("/path/float/{item_id}")
def get_float_id(item_id: float):
return item_id
@app.get("/path/bool/{item_id}")
def get_bool_id(item_id: bool):
return item_id
@app.get("/path/param/{item_id}")
def get_path_param_id(item_id: Optional[str] = Path()):
return item_id
@app.get("/path/param-minlength/{item_id}")
def get_path_param_min_length(item_id: str = Path(min_length=3)):
return item_id
@app.get("/path/param-maxlength/{item_id}")
def METHOD_NAME(item_id: str = Path(max_length=3)):
return item_id
@app.get("/path/param-min_maxlength/{item_id}")
def get_path_param_min_max_length(item_id: str = Path(max_length=3, min_length=2)):
return item_id
@app.get("/path/param-gt/{item_id}")
def get_path_param_gt(item_id: float = Path(gt=3)):
return item_id
@app.get("/path/param-gt0/{item_id}")
def get_path_param_gt0(item_id: float = Path(gt=0)):
return item_id
@app.get("/path/param-ge/{item_id}")
def get_path_param_ge(item_id: float = Path(ge=3)):
return item_id
@app.get("/path/param-lt/{item_id}")
def get_path_param_lt(item_id: float = Path(lt=3)):
return item_id
@app.get("/path/param-lt0/{item_id}")
def get_path_param_lt0(item_id: float = Path(lt=0)):
return item_id
@app.get("/path/param-le/{item_id}")
def get_path_param_le(item_id: float = Path(le=3)):
return item_id
@app.get("/path/param-lt-gt/{item_id}")
def get_path_param_lt_gt(item_id: float = Path(lt=3, gt=1)):
return item_id
@app.get("/path/param-le-ge/{item_id}")
def get_path_param_le_ge(item_id: float = Path(le=3, ge=1)):
return item_id
@app.get("/path/param-lt-int/{item_id}")
def get_path_param_lt_int(item_id: int = Path(lt=3)):
return item_id
@app.get("/path/param-gt-int/{item_id}")
def get_path_param_gt_int(item_id: int = Path(gt=3)):
return item_id
@app.get("/path/param-le-int/{item_id}")
def get_path_param_le_int(item_id: int = Path(le=3)):
return item_id
@app.get("/path/param-ge-int/{item_id}")
def get_path_param_ge_int(item_id: int = Path(ge=3)):
return item_id
@app.get("/path/param-lt-gt-int/{item_id}")
def get_path_param_lt_gt_int(item_id: int = Path(lt=3, gt=1)):
return item_id
@app.get("/path/param-le-ge-int/{item_id}")
def get_path_param_le_ge_int(item_id: int = Path(le=3, ge=1)):
return item_id
@app.get("/query")
def get_query(query):
return f"foo bar {query}"
@app.get("/query/optional")
def get_query_optional(query=None):
if query is None:
return "foo bar"
return f"foo bar {query}"
@app.get("/query/int")
def get_query_type(query: int):
return f"foo bar {query}"
@app.get("/query/int/optional")
def get_query_type_optional(query: Optional[int] = None):
if query is None:
return "foo bar"
return f"foo bar {query}"
@app.get("/query/int/default")
def get_query_type_int_default(query: int = 10):
return f"foo bar {query}"
@app.get("/query/param")
def get_query_param(query=Query(default=None)):
if query is None:
return "foo bar"
return f"foo bar {query}"
@app.get("/query/param-required")
def get_query_param_required(query=Query()):
return f"foo bar {query}"
@app.get("/query/param-required/int")
def get_query_param_required_type(query: int = Query()):
return f"foo bar {query}"
@app.get("/enum-status-code", status_code=http.HTTPStatus.CREATED)
def get_enum_status_code():
return "foo bar"
@app.get("/query/frozenset")
def get_query_type_frozenset(query: FrozenSet[int] = Query(...)):
return ",".join(map(str, sorted(query))) | null |
5,870 | # Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""internal utility functions"""
from __future__ import absolute_import
import types
from mindspore.common import Tensor
from mindspore.ops import functional as F
from mindspore.common import dtype as mstype
from mindspore.numpy.utils_const import _tile_size, _add_unit_axes, _raise_type_error, _type_convert, \
_tuple_setitem, _callable_const, _check_is_float, _get_device
def _deep_list(array_like):
"""convert nested tuple/list mixtures to pure nested list"""
if isinstance(array_like, (list, tuple)):
return list(map(_deep_list, array_like))
return array_like
def _deep_tensor_to_nparray(array_like):
"""
convert a nested list of tensor to nested list of np_array.
Args:
array_like(list(tensor)): In any format of nested lists that may contain
tensors.
Returns:
array_like(list(np_array)): Formatted array that can be directly processed
by numpy.array(), with all tensor elements converted to numpy_array.
"""
# Recursively check whether each element is a tensor or not, if is tensor,
# convert it to a numpy array in place
if isinstance(array_like, Tensor):
return array_like.asnumpy()
if isinstance(array_like, list):
for idx, value in enumerate(array_like):
array_like[idx] = _deep_tensor_to_nparray(value)
return array_like
def _check_input_for_asarray(array_like):
"""check whether array_like argument is a valid type for np.asarray conversion"""
if not isinstance(array_like, (Tensor, list, tuple, int, float, bool)):
_raise_type_error("input data must be `int`, `float`, `bool`, `Tensor`, `list`, `tuple`, but got ", array_like)
def _is_scalar(shape):
"""check whether input shape is a scalar"""
return F.shape_mul(shape) == 1
def _convert_list_tensor_to_tuple_tensor(list_of_tensor):
"""Convert a list of tensor to a tuple of tensor"""
if isinstance(list_of_tensor, list):
tuple_of_tensor = ()
for tensor in list_of_tensor:
tuple_of_tensor += (tensor,)
return tuple_of_tensor
return list_of_tensor
def METHOD_NAME(x, ndim, axis=0):
"""Expand x to ndim from axis, which can be 0 or -1."""
shape = _add_unit_axes(F.shape(x), ndim, axis == -1)
return F.reshape(x, shape)
def _broadcast_to(x, shape_cur, shape_to, ndim_to):
"""Broadcasts x from shape_cur to shape_to."""
size = _tile_size(shape_cur, shape_to, ndim_to)
return F.tile(x, size)
def _broadcast_to_shape(x, shape):
"""Broadcasts x from current shape to shape"""
ndim_to = len(shape)
x = METHOD_NAME(x, ndim_to)
return _broadcast_to(x, F.shape(x), shape, ndim_to)
def _get_size(x, axis=None):
"""Get the number of elements along the given axis of tensor x."""
if axis is None or F.tuple_len(axis) == 0:
axis = F.make_range(x.ndim)
nums = 1
for ax in axis:
nums *= x.shape[ax]
return nums
def _check_input_tensor(*tensors):
for tensor in tensors:
if not isinstance(tensor, Tensor):
_raise_type_error('expect Tensor, but got ', F.typeof(tensor))
return True
def _convert_64_to_32(tensor):
"""Convert tensor with float64/int64 types to float32/int32."""
if tensor.dtype == mstype.float64:
return tensor.astype("float32")
if tensor.dtype == mstype.int64:
return tensor.astype("int32")
return tensor
def _to_tensor(*args):
"""Returns each input as Tensor"""
res = ()
for arg in args:
if isinstance(arg, (int, float, bool, list, tuple)):
arg = _convert_64_to_32(_type_convert(Tensor, arg))
elif not isinstance(arg, Tensor):
_raise_type_error("Expect input to be array like.")
res += (arg,)
if len(res) == 1:
return res[0]
return res
def _get_dtype_from_scalar(*input_numbers):
"""
Get the final dtype from series of input numbers, compared with F.typeof, we
return int32/float32 for python int/float instead.
"""
bool_flag = True
int_flag = True
for number in input_numbers:
if number is not None:
if not isinstance(number, bool):
bool_flag = False
if not isinstance(number, int):
int_flag = False
if bool_flag:
return mstype.bool_
if int_flag:
return mstype.int32
return mstype.float32
def _convert_bool_to_int(tensor):
"""Convert tensor with bool type to int32."""
if tensor.dtype == mstype.bool_:
return tensor.astype("int32")
return tensor
def _slice_along_axis(f, axis, slice_start, slice_end):
"""
Slice a tensor along a given axis
Args:
f (Tensor): Input Tensor.
axis (int): Specified axis.
slice_start (int): The start of the slice.
slice_end (int): The end of the slice.
Returns:
Sliced tensor.
"""
index_start = (0,) * f.ndim
index_end = f.shape
slice_size = slice_end - slice_start
index_start = _tuple_setitem(index_start, axis, slice_start)
index_end = _tuple_setitem(index_end, axis, slice_size)
return F.tensor_slice(f, index_start, index_end)
def _to_tensor_origin_dtype(*args):
"""Returns each input as Tensor and remains original dtype."""
res = []
for arg in args:
if isinstance(arg, (int, float, bool, list, tuple)):
arg = _type_convert(Tensor, arg)
elif not isinstance(arg, Tensor):
_raise_type_error("Expect input to be array like.")
res.append(arg)
if len(res) == 1:
return res[0]
return res
def _callable(tensor, obj):
"""Returns True if `obj` is a function."""
if F.isconstant(tensor):
return isinstance(obj, types.FunctionType)
return _callable_const(F.typeof(obj))
def _isnan(x):
if _get_device() == 'Ascend' and not _check_is_float(F.dtype(x)):
return F.fill(mstype.bool_, F.shape(x), False)
return F.isnan(x) | null |
5,871 | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Pocketcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the behavior of RPC importprivkey on set and unset labels of
addresses.
It tests different cases in which an address is imported with importaddress
with or without a label and then its private key is imported with importprivkey
with and without a label.
"""
from test_framework.test_framework import PocketcoinTestFramework
from test_framework.wallet_util import test_address
class ImportWithLabel(PocketcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def METHOD_NAME(self):
"""Main test logic"""
self.log.info(
"Test importaddress with label and importprivkey without label."
)
self.log.info("Import a watch-only address with a label.")
address = self.nodes[0].getnewaddress()
label = "Test Label"
self.nodes[1].importaddress(address, label)
test_address(self.nodes[1],
address,
iswatchonly=True,
ismine=False,
labels=[label])
self.log.info(
"Import the watch-only address's private key without a "
"label and the address should keep its label."
)
priv_key = self.nodes[0].dumpprivkey(address)
self.nodes[1].importprivkey(priv_key)
test_address(self.nodes[1], address, labels=[label])
self.log.info(
"Test importaddress without label and importprivkey with label."
)
self.log.info("Import a watch-only address without a label.")
address2 = self.nodes[0].getnewaddress()
self.nodes[1].importaddress(address2)
test_address(self.nodes[1],
address2,
iswatchonly=True,
ismine=False,
labels=[""])
self.log.info(
"Import the watch-only address's private key with a "
"label and the address should have its label updated."
)
priv_key2 = self.nodes[0].dumpprivkey(address2)
label2 = "Test Label 2"
self.nodes[1].importprivkey(priv_key2, label2)
test_address(self.nodes[1], address2, labels=[label2])
self.log.info("Test importaddress with label and importprivkey with label.")
self.log.info("Import a watch-only address with a label.")
address3 = self.nodes[0].getnewaddress()
label3_addr = "Test Label 3 for importaddress"
self.nodes[1].importaddress(address3, label3_addr)
test_address(self.nodes[1],
address3,
iswatchonly=True,
ismine=False,
labels=[label3_addr])
self.log.info(
"Import the watch-only address's private key with a "
"label and the address should have its label updated."
)
priv_key3 = self.nodes[0].dumpprivkey(address3)
label3_priv = "Test Label 3 for importprivkey"
self.nodes[1].importprivkey(priv_key3, label3_priv)
test_address(self.nodes[1], address3, labels=[label3_priv])
self.log.info(
"Test importprivkey won't label new dests with the same "
"label as others labeled dests for the same key."
)
self.log.info("Import a watch-only p2sh-segwit address with a label.")
address4 = self.nodes[0].getnewaddress("", "p2sh-segwit")
label4_addr = "Test Label 4 for importaddress"
self.nodes[1].importaddress(address4, label4_addr)
test_address(self.nodes[1],
address4,
iswatchonly=True,
ismine=False,
labels=[label4_addr],
embedded=None)
self.log.info(
"Import the watch-only address's private key without a "
"label and new destinations for the key should have an "
"empty label while the 'old' destination should keep "
"its label."
)
priv_key4 = self.nodes[0].dumpprivkey(address4)
self.nodes[1].importprivkey(priv_key4)
embedded_addr = self.nodes[1].getaddressinfo(address4)['embedded']['address']
test_address(self.nodes[1], embedded_addr, labels=[""])
test_address(self.nodes[1], address4, labels=[label4_addr])
self.stop_nodes()
if __name__ == "__main__":
ImportWithLabel().main() | null |
5,872 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright (c) 2018 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from sys import exit
from os.path import join, dirname, basename, realpath
from csv import DictReader as csv_dict_reader
from subprocess import Popen
from argparse import ArgumentParser as argument_parser
###############################################################################
def printable_cmd(c):
"""Converts a `list` of `str`s representing a shell command to a printable
`str`."""
return " ".join(map(lambda e: '"' + str(e) + '"', c))
###############################################################################
def METHOD_NAME(p):
"""Open the path `p` and print its contents to `stdout`."""
print "********************************************************************************"
with open(p) as f:
for line in f:
print line,
print "********************************************************************************"
###############################################################################
ap = argument_parser(
description = (
"CUDA Eris driver script: runs a benchmark suite multiple times, combines "
"the results, and outputs them in the CUDA Eris performance result format."
)
)
ap.add_argument(
"-b", "--benchmark",
help = ("The location of the benchmark suite executable to run."),
type = str,
default = join(dirname(realpath(__file__)), "bench"),
metavar = "R"
)
ap.add_argument(
"-p", "--postprocess",
help = ("The location of the postprocessing script to run to combine the "
"results."),
type = str,
default = join(dirname(realpath(__file__)), "combine_benchmark_results.py"),
metavar = "R"
)
ap.add_argument(
"-r", "--runs",
help = ("Run the benchmark suite `R` times.a),"),
type = int, default = 5,
metavar = "R"
)
args = ap.parse_args()
if args.runs <= 0:
print "ERROR: `--runs` must be greater than `0`."
ap.print_help()
exit(1)
BENCHMARK_EXE = args.benchmark
BENCHMARK_NAME = basename(BENCHMARK_EXE)
POSTPROCESS_EXE = args.postprocess
OUTPUT_FILE_NAME = lambda i: BENCHMARK_NAME + "_" + str(i) + ".csv"
COMBINED_OUTPUT_FILE_NAME = BENCHMARK_NAME + "_combined.csv"
###############################################################################
print '&&&& RUNNING {0}'.format(BENCHMARK_NAME)
print '#### RUNS {0}'.format(args.runs)
###############################################################################
print '#### CMD {0}'.format(BENCHMARK_EXE)
for i in xrange(args.runs):
with open(OUTPUT_FILE_NAME(i), "w") as output_file:
print '#### RUN {0} OUTPUT -> {1}'.format(i, OUTPUT_FILE_NAME(i))
p = None
try:
p = Popen(BENCHMARK_EXE, stdout = output_file, stderr = output_file)
p.communicate()
except OSError as ex:
METHOD_NAME(OUTPUT_FILE_NAME(i))
print '#### ERROR Caught OSError `{0}`.'.format(ex)
print '&&&& FAILED {0}'.format(BENCHMARK_NAME)
exit(-1)
METHOD_NAME(OUTPUT_FILE_NAME(i))
if p.returncode != 0:
print '#### ERROR Process exited with code {0}.'.format(p.returncode)
print '&&&& FAILED {0}'.format(BENCHMARK_NAME)
exit(p.returncode)
###############################################################################
post_cmd = [POSTPROCESS_EXE]
# Add dependent variable options.
post_cmd += ["-dSTL Average Walltime,STL Walltime Uncertainty,STL Trials"]
post_cmd += ["-dSTL Average Throughput,STL Throughput Uncertainty,STL Trials"]
post_cmd += ["-dThrust Average Walltime,Thrust Walltime Uncertainty,Thrust Trials"]
post_cmd += ["-dThrust Average Throughput,Thrust Throughput Uncertainty,Thrust Trials"]
post_cmd += [OUTPUT_FILE_NAME(i) for i in range(args.runs)]
print '#### CMD {0}'.format(printable_cmd(post_cmd))
with open(COMBINED_OUTPUT_FILE_NAME, "w") as output_file:
p = None
try:
p = Popen(post_cmd, stdout = output_file, stderr = output_file)
p.communicate()
except OSError as ex:
METHOD_NAME(COMBINED_OUTPUT_FILE_NAME)
print '#### ERROR Caught OSError `{0}`.'.format(ex)
print '&&&& FAILED {0}'.format(BENCHMARK_NAME)
exit(-1)
METHOD_NAME(COMBINED_OUTPUT_FILE_NAME)
if p.returncode != 0:
print '#### ERROR Process exited with code {0}.'.format(p.returncode)
print '&&&& FAILED {0}'.format(BENCHMARK_NAME)
exit(p.returncode)
with open(COMBINED_OUTPUT_FILE_NAME) as input_file:
reader = csv_dict_reader(input_file)
variable_units = reader.next() # Get units header row.
distinguishing_variables = reader.fieldnames
measured_variables = [
("STL Average Throughput", "+"),
("Thrust Average Throughput", "+")
]
for record in reader:
for variable, directionality in measured_variables:
# Don't monitor regressions for STL implementations, nvbug 28980890:
if "STL" in variable:
continue
print "&&&& PERF {0}_{1}_{2}bit_{3}mib_{4} {5} {6}{7}".format(
record["Algorithm"],
record["Element Type"],
record["Element Size"],
record["Total Input Size"],
variable.replace(" ", "_").lower(),
record[variable],
directionality,
variable_units[variable]
)
###############################################################################
print '&&&& PASSED {0}'.format(BENCHMARK_NAME)
| null |
5,873 | """Implementation of :class:`FiniteField` class. """
from sympy.polys.domains.field import Field
from sympy.polys.domains.modularinteger import ModularIntegerFactory
from sympy.polys.domains.simpledomain import SimpleDomain
from sympy.polys.galoistools import gf_zassenhaus, gf_irred_p_rabin
from sympy.polys.polyerrors import CoercionFailed
from sympy.utilities import public
from sympy.polys.domains.groundtypes import SymPyInteger
@public
class FiniteField(Field, SimpleDomain):
r"""Finite field of prime order :ref:`GF(p)`
A :ref:`GF(p)` domain represents a `finite field`_ `\mathbb{F}_p` of prime
order as :py:class:`~.Domain` in the domain system (see
:ref:`polys-domainsintro`).
A :py:class:`~.Poly` created from an expression with integer
coefficients will have the domain :ref:`ZZ`. However, if the ``modulus=p``
option is given then the domain will be a finite field instead.
>>> from sympy import Poly, Symbol
>>> x = Symbol('x')
>>> p = Poly(x**2 + 1)
>>> p
Poly(x**2 + 1, x, domain='ZZ')
>>> p.domain
ZZ
>>> p2 = Poly(x**2 + 1, modulus=2)
>>> p2
Poly(x**2 + 1, x, modulus=2)
>>> p2.domain
GF(2)
It is possible to factorise a polynomial over :ref:`GF(p)` using the
modulus argument to :py:func:`~.factor` or by specifying the domain
explicitly. The domain can also be given as a string.
>>> from sympy import factor, GF
>>> factor(x**2 + 1)
x**2 + 1
>>> factor(x**2 + 1, modulus=2)
(x + 1)**2
>>> factor(x**2 + 1, domain=GF(2))
(x + 1)**2
>>> factor(x**2 + 1, domain='GF(2)')
(x + 1)**2
It is also possible to use :ref:`GF(p)` with the :py:func:`~.cancel`
and :py:func:`~.gcd` functions.
>>> from sympy import cancel, gcd
>>> cancel((x**2 + 1)/(x + 1))
(x**2 + 1)/(x + 1)
>>> cancel((x**2 + 1)/(x + 1), domain=GF(2))
x + 1
>>> gcd(x**2 + 1, x + 1)
1
>>> gcd(x**2 + 1, x + 1, domain=GF(2))
x + 1
When using the domain directly :ref:`GF(p)` can be used as a constructor
to create instances which then support the operations ``+,-,*,**,/``
>>> from sympy import GF
>>> K = GF(5)
>>> K
GF(5)
>>> x = K(3)
>>> y = K(2)
>>> x
3 mod 5
>>> y
2 mod 5
>>> x * y
1 mod 5
>>> x / y
4 mod 5
Notes
=====
It is also possible to create a :ref:`GF(p)` domain of **non-prime**
order but the resulting ring is **not** a field: it is just the ring of
the integers modulo ``n``.
>>> K = GF(9)
>>> z = K(3)
>>> z
3 mod 9
>>> z**2
0 mod 9
It would be good to have a proper implementation of prime power fields
(``GF(p**n)``) but these are not yet implemented in SymPY.
.. _finite field: https://en.wikipedia.org/wiki/Finite_field
"""
rep = 'FF'
alias = 'FF'
is_FiniteField = is_FF = True
is_Numerical = True
has_assoc_Ring = False
has_assoc_Field = True
dom = None
mod = None
def __init__(self, mod, symmetric=True):
from sympy.polys.domains import ZZ
dom = ZZ
if mod <= 0:
raise ValueError('modulus must be a positive integer, got %s' % mod)
self.dtype = ModularIntegerFactory(mod, dom, symmetric, self)
self.zero = self.dtype(0)
self.one = self.dtype(1)
self.dom = dom
self.mod = mod
def __str__(self):
return 'GF(%s)' % self.mod
def __hash__(self):
return hash((self.__class__.__name__, self.dtype, self.mod, self.dom))
def __eq__(self, other):
"""Returns ``True`` if two domains are equivalent. """
return isinstance(other, FiniteField) and \
self.mod == other.mod and self.dom == other.dom
def characteristic(self):
"""Return the characteristic of this domain. """
return self.mod
def get_field(self):
"""Returns a field associated with ``self``. """
return self
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
return SymPyInteger(int(a))
def from_sympy(self, a):
"""Convert SymPy's Integer to SymPy's ``Integer``. """
if a.is_Integer:
return self.dtype(self.dom.dtype(int(a)))
elif a.is_Float and int(a) == a:
return self.dtype(self.dom.dtype(int(a)))
else:
raise CoercionFailed("expected an integer, got %s" % a)
def from_FF(K1, a, K0=None):
"""Convert ``ModularInteger(int)`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ(a.val, K0.dom))
def from_FF_python(K1, a, K0=None):
"""Convert ``ModularInteger(int)`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_python(a.val, K0.dom))
def from_ZZ(K1, a, K0=None):
"""Convert Python's ``int`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_python(a, K0))
def from_ZZ_python(K1, a, K0=None):
"""Convert Python's ``int`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_python(a, K0))
def from_QQ(K1, a, K0=None):
"""Convert Python's ``Fraction`` to ``dtype``. """
if a.denominator == 1:
return K1.from_ZZ_python(a.numerator)
def from_QQ_python(K1, a, K0=None):
"""Convert Python's ``Fraction`` to ``dtype``. """
if a.denominator == 1:
return K1.from_ZZ_python(a.numerator)
def from_FF_gmpy(K1, a, K0=None):
"""Convert ``ModularInteger(mpz)`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_gmpy(a.val, K0.dom))
def from_ZZ_gmpy(K1, a, K0=None):
"""Convert GMPY's ``mpz`` to ``dtype``. """
return K1.dtype(K1.dom.from_ZZ_gmpy(a, K0))
def from_QQ_gmpy(K1, a, K0=None):
"""Convert GMPY's ``mpq`` to ``dtype``. """
if a.denominator == 1:
return K1.from_ZZ_gmpy(a.numerator)
def from_RealField(K1, a, K0):
"""Convert mpmath's ``mpf`` to ``dtype``. """
p, q = K0.to_rational(a)
if q == 1:
return K1.dtype(K1.dom.dtype(p))
def is_square(self, a):
"""Returns True if ``a`` is a quadratic residue modulo p. """
# a is not a square <=> x**2-a is irreducible
poly = [x.val for x in [self.one, self.zero, -a]]
return not gf_irred_p_rabin(poly, self.mod, self.dom)
def METHOD_NAME(self, a):
"""Square root modulo p of ``a`` if it is a quadratic residue.
Explanation
===========
Always returns the square root that is no larger than ``p // 2``.
"""
# x**2-a is not square-free if a=0 or the field is characteristic 2
if self.mod == 2 or a == 0:
return a
# Otherwise, use square-free factorization routine to factorize x**2-a
poly = [x.val for x in [self.one, self.zero, -a]]
for factor in gf_zassenhaus(poly, self.mod, self.dom):
if len(factor) == 2 and factor[1] <= self.mod // 2:
return self.dtype(factor[1])
return None
FF = GF = FiniteField | null |
5,874 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.utils import server_utils
from regression import trigger_funcs_utils as fts_dict_funcs_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.schemas \
.fts_dictionaries.tests import utils as fts_dict_utils
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as fts_dictionaries_utils
class FTSDictionariesDependencyDependentTestCase(BaseTestGenerator):
""" This class will get the dependency and dependents FTS dictionaries
under test schema. """
scenarios = utils.generate_scenarios(
'dependency_dependent_fts_dictionaries',
fts_dictionaries_utils.test_cases
)
def METHOD_NAME(self):
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.schema_name = self.schema_data['schema_name']
self.schema_id = self.schema_data['schema_id']
self.extension_name = "postgres_fdw"
self.db_name = parent_node_dict["database"][-1]["db_name"]
self.db_user = self.server["username"]
self.func_name = "fts_dictionaries_func_%s" % str(uuid.uuid4())[1:8]
self.fts_dictionaries_name = "fts_dictionaries_delete_%s" % (
str(uuid.uuid4())[1:8])
server_con = server_utils.connect_server(self, self.server_id)
if not server_con["info"] == "Server connected.":
raise Exception("Could not connect to server to add resource "
"groups.")
server_version = 0
if "type" in server_con["data"]:
if server_con["data"]["version"] < 90500:
message = "FTS Dictionaries are not supported by PG9.4 " \
"and PPAS9.4 and below."
self.skipTest(message)
self.function_info = fts_dict_funcs_utils.create_trigger_function(
self.server, self.db_name, self.schema_name, self.func_name,
server_version)
self.fts_dictionaries = fts_dictionaries_utils. \
create_fts_dictionary(
self.server, self.db_name, self.schema_name,
self.fts_dictionaries_name)
def runTest(self):
""" This function will add new FTS dictionaries under test schema. """
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema.")
fts_dict_response = fts_dictionaries_utils.verify_fts_dict(
self.server, self.db_name, self.fts_dictionaries_name
)
if not fts_dict_response:
raise Exception("Could not find the FTS Dictionaries.")
if self.is_positive_test:
response = self.get_dependency_dependent()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def get_dependency_dependent(self):
"""
This function returns the fts dictionaries dependency and dependent
:return: fts dictionaries dependency and dependent
"""
return self.tester.get(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) + '/' +
str(self.schema_id) + '/' +
str(self.fts_dictionaries),
follow_redirects=True)
def tearDown(self):
"""This function delete the fts_dict and disconnect the test
database."""
fts_dict_utils.delete_fts_dictionaries(self.server, self.db_name,
self.schema_name,
self.fts_dictionaries_name)
database_utils.disconnect_database(self, self.server_id,
self.db_id) | null |
5,875 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor, ops
from mindspore.ops import operations as P
class OpNetWrapper(nn.Cell):
def __init__(self, op):
super(OpNetWrapper, self).__init__()
self.op = op
def construct(self, *inputs):
return self.op(*inputs)
class GreaterFunc(nn.Cell):
def construct(self, *inputs):
return ops.gt(*inputs)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@pytest.mark.parametrize('dtype', [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64])
def test_greater_op_dtype_1(mode, dtype):
"""
Feature: Test Greater op.
Description: Test Greater with dtype input.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
op = P.Greater()
op_wrapper = OpNetWrapper(op)
input_x = Tensor(np.array([1, -2, 3]).astype(dtype))
input_y = Tensor(np.array([3, 2, 1]).astype(dtype))
outputs = op_wrapper(input_x, input_y)
assert outputs.shape == (3,)
assert np.allclose(outputs.asnumpy(), [False, False, True])
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@pytest.mark.parametrize('dtype', [np.uint8, np.uint16, np.uint32, np.uint64])
def test_greater_op_dtype_2(mode, dtype):
"""
Feature: Test Greater op.
Description: Test Greater with dtype input.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
op = P.Greater()
op_wrapper = OpNetWrapper(op)
input_x = Tensor(np.array([1, 0, 3]).astype(dtype))
input_y = Tensor(np.array([3, 2, 1]).astype(dtype))
outputs = op_wrapper(input_x, input_y)
assert outputs.shape == (3,)
assert np.allclose(outputs.asnumpy(), [False, False, True])
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
@pytest.mark.parametrize('dtype', [np.bool])
def test_greater_op_dtype_3(mode, dtype):
"""
Feature: Test Greater op.
Description: Test Greater with dtype input.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
op = P.Greater()
op_wrapper = OpNetWrapper(op)
input_x = Tensor(np.array([False, False, True]).astype(dtype))
input_y = Tensor(np.array([True, True, False]).astype(dtype))
outputs = op_wrapper(input_x, input_y)
assert outputs.shape == (3,)
assert np.allclose(outputs.asnumpy(), [False, False, True])
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def test_greater_op_functional(mode):
"""
Feature: Test Greater op.
Description: Test Greater with with functional.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
op_wrapper = GreaterFunc()
input_x = Tensor(np.array([1, -2, 3]).astype(np.float32))
input_y = Tensor(np.array([3, 2, 1]).astype(np.float32))
outputs = op_wrapper(input_x, input_y)
assert outputs.shape == (3,)
assert np.allclose(outputs.asnumpy(), [False, False, True])
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize('mode', [context.GRAPH_MODE, context.PYNATIVE_MODE])
def METHOD_NAME(mode):
"""
Feature: Test Greater op.
Description: Test Greater with Tensor.
Expectation: The result match to the expect value.
"""
context.set_context(mode=mode, device_target="GPU")
input_x = Tensor(np.array([1, -2, 3]).astype(np.float32))
input_y = Tensor(np.array([3, 2, 1]).astype(np.float32))
outputs = input_x.gt(input_y)
assert outputs.shape == (3,)
assert np.allclose(outputs.asnumpy(), [False, False, True]) | null |
5,876 | import warnings
from typing import List, Optional, TypeVar
from docarray.typing.bytes.video_bytes import VideoBytes, VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.mimetypes import VIDEO_MIMETYPE
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='VideoUrl')
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a video file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def mime_type(cls) -> str:
return VIDEO_MIMETYPE
@classmethod
def extra_extensions(cls) -> List[str]:
"""
Returns a list of additional file extensions that are valid for this class
but cannot be identified by the mimetypes library.
"""
return []
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a `NamedTuple` of
[`VideoNdArray`][docarray.typing.VideoNdArray],
[`AudioNdArray`][docarray.typing.AudioNdArray]
and [`NdArray`][docarray.typing.NdArray].
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDoc):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
```
---
You can load only the key frames (or video, audio respectively):
---
```python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: [`AudioNdArray`][docarray.typing.AudioNdArray] representing the audio content,
[`VideoNdArray`][docarray.typing.VideoNdArray] representing the images of the video,
[`NdArray`][docarray.typing.NdArray] of the key frame indices.
"""
buffer = self.METHOD_NAME(**kwargs)
return buffer.load()
def METHOD_NAME(self, timeout: Optional[float] = None) -> VideoBytes:
"""
Convert url to [`VideoBytes`][docarray.typing.VideoBytes]. This will either load or download
the file and save it into an [`VideoBytes`][docarray.typing.VideoBytes] object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: [`VideoBytes`][docarray.typing.VideoBytes] object
"""
bytes_ = super().METHOD_NAME(timeout=timeout)
return VideoBytes(bytes_)
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.METHOD_NAME()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.') | null |
5,877 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config management."""
import os
import posixpath
from common import benchmark_utils
from common import environment
from common import experiment_path as exp_path
DEFAULT_SNAPSHOT_SECONDS = 15 * 60 # Seconds.
CONFIG_DIR = 'config'
def get_internal_experiment_config_relative_path():
"""Returns the path of the internal config file relative to the data
directory of an experiment."""
return os.path.join(CONFIG_DIR, 'experiment.yaml')
def METHOD_NAME():
"""Returns the amount of time in seconds between snapshots of a
fuzzer's corpus during an experiment."""
return environment.get('SNAPSHOT_PERIOD', DEFAULT_SNAPSHOT_SECONDS)
def get_cycle_time(cycle):
"""Return time elapsed for a cycle."""
return cycle * METHOD_NAME()
def get_work_dir():
"""Returns work directory."""
return os.environ['WORK']
def get_experiment_name():
"""Returns experiment name."""
return os.environ['EXPERIMENT']
def get_experiment_folders_dir():
"""Returns experiment folders directory."""
return exp_path.path('experiment-folders')
def get_experiment_type(benchmarks):
"""Returns the experiment type based on the type of |benchmarks|, i.e.,
'code' or 'bug'.
Raises ValueError if the benchmark types are mixed.
"""
for benchmark_type in benchmark_utils.BenchmarkType:
type_value = benchmark_type.value
if all(
benchmark_utils.get_type(benchmark) == type_value
for benchmark in benchmarks):
return type_value
benchmark_types = ';'.join(
[f'{b}: {benchmark_utils.get_type(b)}' for b in benchmarks])
raise ValueError('Cannot mix bug benchmarks with code coverage benchmarks: '
f'{benchmark_types}.')
def get_cloud_project():
"""Returns the cloud project."""
return os.environ['CLOUD_PROJECT']
def get_experiment_filestore_path():
"""Returns experiment filestore path."""
experiment_filestore = os.environ['EXPERIMENT_FILESTORE']
experiment_name = get_experiment_name()
return posixpath.join(experiment_filestore, experiment_name)
def get_oss_fuzz_corpora_filestore_path():
"""Returns path containing OSS-Fuzz corpora for various fuzz targets."""
return posixpath.join(get_experiment_filestore_path(), 'oss_fuzz_corpora')
def get_custom_seed_corpora_filestore_path():
"""Returns path containing the user-provided seed corpora."""
return posixpath.join(get_experiment_filestore_path(),
'custom_seed_corpora')
def get_dispatcher_instance_name(experiment: str) -> str:
"""Returns a dispatcher instance name for an experiment."""
return f'd-{experiment}'
def get_trial_instance_name(experiment: str, trial_id: int) -> str:
"""Returns a unique instance name for each trial of an experiment."""
return f'r-{experiment}-{trial_id}'
def get_cycle_filename(basename: str, cycle: int) -> str:
"""Returns a filename for a file that is relevant to a particular snapshot
cycle."""
return f'{basename}-{cycle:04d}'
def get_corpus_archive_name(cycle: int) -> str:
"""Returns a corpus archive name given a cycle."""
return get_cycle_filename('corpus-archive', cycle) + '.tar.gz'
def get_stats_filename(cycle: int) -> str:
"""Returns a corpus archive name given a cycle."""
return get_cycle_filename('stats', cycle) + '.json'
def get_crash_metadata_filename(cycle: int) -> str:
"""Returns a crash metadata name given a cycle."""
return get_cycle_filename('crashes', cycle) + '.json'
def get_crashes_archive_name(cycle: int) -> str:
"""Returns a crashes archive name given a cycle."""
return get_cycle_filename('crashes', cycle) + '.tar.gz'
def is_local_experiment():
"""Returns True if running a local experiment."""
return bool(environment.get('LOCAL_EXPERIMENT'))
def get_trial_dir(fuzzer, benchmark, trial_id):
"""Returns the unique directory for |fuzzer|, |benchmark|, and
|trial_id|."""
benchmark_fuzzer_directory = get_benchmark_fuzzer_dir(benchmark, fuzzer)
trial_subdir = f'trial-{trial_id}'
return posixpath.join(benchmark_fuzzer_directory, trial_subdir)
def get_benchmark_fuzzer_dir(benchmark, fuzzer):
"""Returns the directory for |benchmark| and |fuzzer|."""
return f'{benchmark}-{fuzzer}'
def get_trial_bucket_dir(fuzzer, benchmark, trial_id):
"""Returns the unique directory in experiment-folders int the bucket for
|fuzzer|, |benchmark|, and |trial_id|."""
bucket = os.environ['EXPERIMENT_FILESTORE']
return posixpath.join(bucket, get_experiment_name(), 'experiment-folders',
get_trial_dir(fuzzer, benchmark, trial_id)) | null |
5,878 | #!/usr/bin/env python
data = {
"default_prefix": "OSVC_COMP_GROUP_",
"example_value": """
{
"tibco": {
"gid": 1000,
},
"tibco1": {
"gid": 1001,
}
}
""",
"description": """* Verify a local system group configuration
* A minus (-) prefix to the group name indicates the user should not exist
""",
"form_definition": """
Desc: |
A rule defining a list of Unix groups and their properties. Used by the groups compliance objects.
Css: comp48
Outputs:
-
Dest: compliance variable
Type: json
Format: dict of dict
Key: group
EmbedKey: No
Class: group
Inputs:
-
Id: group
Label: Group name
DisplayModeLabel: group
LabelCss: guys16
Mandatory: Yes
Type: string
Help: The Unix group name.
-
Id: gid
Label: Group id
DisplayModeLabel: gid
LabelCss: guys16
Type: string or integer
Help: The Unix gid of this group.
""",
}
import os
import sys
import json
import grp
import re
from subprocess import Popen
sys.path.append(os.path.dirname(__file__))
from comp import *
blacklist = [
"root",
"bin",
"daemon",
"sys",
"adm",
"tty",
"disk",
"lp",
"mem",
"kmem",
"wheel",
"mail",
"uucp",
"man",
"games",
"gopher",
"video",
"dip",
"ftp",
"lock",
"audio",
"nobody",
"users",
"utmp",
"utempter",
"floppy",
"vcsa",
"cdrom",
"tape",
"dialout",
"saslauth",
"postdrop",
"postfix",
"sshd",
"opensvc",
"mailnull",
"smmsp",
"slocate",
"rpc",
"rpcuser",
"nfsnobody",
"tcpdump",
"ntp"
]
class CompGroup(CompObject):
def __init__(self, prefix=None):
CompObject.__init__(self, prefix=prefix, data=data)
def init(self):
self.grt = {
'gid': 'gr_gid',
}
self.groupmod_p = {
'gid': '-g',
}
self.sysname, self.nodename, x, x, self.machine = os.uname()
if self.sysname == "FreeBSD":
self.groupadd = ["pw", "groupadd"]
self.groupmod = ["pw", "groupmod"]
self.groupdel = ["pw", "groupdel"]
elif self.sysname == 'AIX':
self.groupmod = ['chgroup']
self.groupadd = ['mkgroup']
self.groupdel = ['rmgroup']
self.groupmod_p = {
'gid': 'id',
}
else:
self.groupadd = ["groupadd"]
self.groupmod = ["groupmod"]
self.groupdel = ["groupdel"]
if self.sysname not in ['SunOS', 'Linux', 'HP-UX', 'AIX', 'OSF1', 'FreeBSD']:
perror('group: module not supported on', self.sysname)
raise NotApplicable
self.groups = {}
for d in self.get_rules():
self.groups.update(d)
for group, d in self.groups.items():
for k in ('uid', 'gid'):
if k in d:
self.groups[group][k] = int(d[k])
def fixable(self):
return RET_NA
def fmt_opt_gen(self, item, target):
return [item, target]
def fmt_opt_aix(self, item, target):
return ['='.join((item, target))]
def fmt_opt(self, item, target):
if self.sysname == 'AIX':
return self.fmt_opt_aix(item, target)
else:
return self.fmt_opt_gen(item, target)
def fix_item(self, group, item, target):
if item in self.groupmod_p:
cmd = [] + self.groupmod
if self.sysname == "FreeBSD":
cmd += [group]
cmd += self.fmt_opt(self.groupmod_p[item], str(target))
if self.sysname != "FreeBSD":
cmd += [group]
pinfo("group:", ' '.join(cmd))
p = Popen(cmd)
out, err = p.communicate()
r = p.returncode
if r == 0:
return RET_OK
else:
return RET_ERR
else:
perror('group: no fix implemented for', item)
return RET_ERR
def check_item(self, group, item, target, current, verbose=False):
if type(current) == int and current < 0:
current += 4294967296
if target == current:
if verbose:
pinfo('group', group, item+':', current)
return RET_OK
else:
if verbose:
perror('group', group, item+':', current, 'target:', target)
return RET_ERR
def try_create_group(self, props):
#
# don't try to create group if passwd db is not 'files'
# beware: 'files' db is the implicit default
#
if 'db' in props and props['db'] != 'files':
return False
if set(self.grt.keys()) <= set(props.keys()):
return True
return False
def check_group_del(self, group):
try:
groupinfo = grp.getgrnam(group)
except KeyError:
pinfo('group', group, 'does not exist, on target')
return RET_OK
perror('group', group, "exists, shouldn't")
return RET_ERR
def check_group(self, group, props):
if group.startswith('-'):
return self.check_group_del(group.lstrip('-'))
r = 0
try:
groupinfo = grp.getgrnam(group)
except KeyError:
if self.try_create_group(props):
perror('group', group, 'does not exist')
return RET_ERR
else:
pinfo('group', group, 'does not exist and not enough info to create it')
return RET_OK
for prop in self.grt:
if prop in props:
r |= self.check_item(group, prop, props[prop], getattr(groupinfo, self.grt[prop]), verbose=True)
return r
def create_group(self, group, props):
cmd = [] + self.groupadd
if self.sysname == "FreeBSD":
cmd += [group]
for item in self.grt:
cmd += self.fmt_opt(self.groupmod_p[item], str(props[item]))
if self.sysname != "FreeBSD":
cmd += [group]
pinfo("group:", ' '.join(cmd))
p = Popen(cmd)
out, err = p.communicate()
r = p.returncode
if r == 0:
return RET_OK
else:
return RET_ERR
def fix_group_del(self, group):
if group in blacklist:
perror("group", group, "... cowardly refusing to delete")
return RET_ERR
try:
groupinfo = grp.getgrnam(group)
except KeyError:
return RET_OK
cmd = self.groupdel + [group]
pinfo("group:", ' '.join(cmd))
p = Popen(cmd)
out, err = p.communicate()
r = p.returncode
if r == 0:
return RET_OK
else:
return RET_ERR
def fix_group(self, group, props):
if group.startswith('-'):
return self.fix_group_del(group.lstrip('-'))
r = 0
try:
groupinfo = grp.getgrnam(group)
except KeyError:
if self.try_create_group(props):
return self.create_group(group, props)
else:
perror('group', group, 'does not exist')
return RET_OK
for prop in self.grt:
if prop in props and \
self.check_item(group, prop, props[prop], getattr(groupinfo, self.grt[prop])) != RET_OK:
r |= self.fix_item(group, prop, props[prop])
return r
def METHOD_NAME(self):
r = 0
for group, props in self.groups.items():
r |= self.check_group(group, props)
return r
def fix(self):
r = 0
for group, props in self.groups.items():
r |= self.fix_group(group, props)
return r
if __name__ == "__main__":
main(CompGroup) | null |
5,879 | # Tests of the full ZIP64 functionality of zipfile
# The support.requires call is the only reason for keeping this separate
# from test_zipfile
from test import support
# XXX(nnorwitz): disable this test by looking for extralargefile resource,
# which doesn't exist. This test takes over 30 minutes to run in general
# and requires more disk space than most of the buildbots.
support.requires(
'extralargefile',
'test requires loads of disk-space bytes and a long time to run'
)
import zipfile, os, unittest
import time
import sys
from tempfile import TemporaryFile
from test.support import os_helper
from test.support import requires_zlib
TESTFN = os_helper.TESTFN
TESTFN2 = TESTFN + "2"
# How much time in seconds can pass before we print a 'Still working' message.
_PRINT_WORKING_MSG_INTERVAL = 60
class TestsWithSourceFile(unittest.TestCase):
def setUp(self):
# Create test data.
line_gen = ("Test of zipfile line %d." % i for i in range(1000000))
self.data = '\n'.join(line_gen).encode('ascii')
# And write it to a file.
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def zipTest(self, f, compression):
# Create the ZIP archive.
with zipfile.ZipFile(f, "w", compression) as zipfp:
# It will contain enough copies of self.data to reach about 6 GiB of
# raw data to store.
filecount = 6*1024**3 // len(self.data)
next_time = time.monotonic() + _PRINT_WORKING_MSG_INTERVAL
for num in range(filecount):
zipfp.writestr("testfn%d" % num, self.data)
# Print still working message since this test can be really slow
if next_time <= time.monotonic():
next_time = time.monotonic() + _PRINT_WORKING_MSG_INTERVAL
print((
' zipTest still writing %d of %d, be patient...' %
(num, filecount)), file=sys.__stdout__)
sys.__stdout__.flush()
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
for num in range(filecount):
self.assertEqual(zipfp.read("testfn%d" % num), self.data)
# Print still working message since this test can be really slow
if next_time <= time.monotonic():
next_time = time.monotonic() + _PRINT_WORKING_MSG_INTERVAL
print((
' zipTest still reading %d of %d, be patient...' %
(num, filecount)), file=sys.__stdout__)
sys.__stdout__.flush()
def METHOD_NAME(self):
# Try the temp file first. If we do TESTFN2 first, then it hogs
# gigabytes of disk space for the duration of the test.
with TemporaryFile() as f:
self.zipTest(f, zipfile.ZIP_STORED)
self.assertFalse(f.closed)
self.zipTest(TESTFN2, zipfile.ZIP_STORED)
@requires_zlib()
def testDeflated(self):
# Try the temp file first. If we do TESTFN2 first, then it hogs
# gigabytes of disk space for the duration of the test.
with TemporaryFile() as f:
self.zipTest(f, zipfile.ZIP_DEFLATED)
self.assertFalse(f.closed)
self.zipTest(TESTFN2, zipfile.ZIP_DEFLATED)
def tearDown(self):
for fname in TESTFN, TESTFN2:
if os.path.exists(fname):
os.remove(fname)
class OtherTests(unittest.TestCase):
def testMoreThan64kFiles(self):
# This test checks that more than 64k files can be added to an archive,
# and that the resulting archive can be read properly by ZipFile
with zipfile.ZipFile(TESTFN, mode="w", allowZip64=True) as zipf:
zipf.debug = 100
numfiles = (1 << 16) * 3//2
for i in range(numfiles):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles)
with zipfile.ZipFile(TESTFN, mode="r") as zipf2:
self.assertEqual(len(zipf2.namelist()), numfiles)
for i in range(numfiles):
content = zipf2.read("foo%08d" % i).decode('ascii')
self.assertEqual(content, "%d" % (i**3 % 57))
def testMoreThan64kFilesAppend(self):
with zipfile.ZipFile(TESTFN, mode="w", allowZip64=False) as zipf:
zipf.debug = 100
numfiles = (1 << 16) - 1
for i in range(numfiles):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles)
with self.assertRaises(zipfile.LargeZipFile):
zipf.writestr("foo%08d" % numfiles, b'')
self.assertEqual(len(zipf.namelist()), numfiles)
with zipfile.ZipFile(TESTFN, mode="a", allowZip64=False) as zipf:
zipf.debug = 100
self.assertEqual(len(zipf.namelist()), numfiles)
with self.assertRaises(zipfile.LargeZipFile):
zipf.writestr("foo%08d" % numfiles, b'')
self.assertEqual(len(zipf.namelist()), numfiles)
with zipfile.ZipFile(TESTFN, mode="a", allowZip64=True) as zipf:
zipf.debug = 100
self.assertEqual(len(zipf.namelist()), numfiles)
numfiles2 = (1 << 16) * 3//2
for i in range(numfiles, numfiles2):
zipf.writestr("foo%08d" % i, "%d" % (i**3 % 57))
self.assertEqual(len(zipf.namelist()), numfiles2)
with zipfile.ZipFile(TESTFN, mode="r") as zipf2:
self.assertEqual(len(zipf2.namelist()), numfiles2)
for i in range(numfiles2):
content = zipf2.read("foo%08d" % i).decode('ascii')
self.assertEqual(content, "%d" % (i**3 % 57))
def tearDown(self):
os_helper.unlink(TESTFN)
os_helper.unlink(TESTFN2)
if __name__ == "__main__":
unittest.main() | null |
5,880 | from pathlib import Path
from BALSAMIC.assets.scripts.generate_cnv_report import (
get_pdf_instance,
add_data_to_pdf,
add_plots_to_pdf,
generate_cnv_report,
PDF,
get_pdf_data,
)
def test_get_pdf_instance():
"""Test FPDF instance generation."""
# WHEN creating a dummy FPDF file
pdf: PDF = get_pdf_instance()
# THEN check if the PDF has been correctly created
assert isinstance(pdf, PDF)
def test_get_pdf_data():
"""Test pdf data extraction from a list of files."""
# GIVEN a list of input files
data_paths = ["statistics.txt", "plot_0.png", "plot_1.png"]
# WHEN retrieving the statistics and plots tuple
statistics, plots = get_pdf_data(data_paths)
# THEN the expected files should be returned
assert data_paths[0] in statistics
assert data_paths[1] in plots
assert data_paths[2] in plots
def test_add_data_to_pdf():
"""Test add statistics to a PDF instance."""
# GIVEN a PDF instance and an output sample statistics .txt file
pdf: PDF = get_pdf_instance()
statistics_paths = [
"tests/test_data/cnv_report/CNV.somatic.sample_tumor_normal_wgs.ascat.samplestatistics.txt"
]
# WHEN generating the PDF with the statistics
pdf: PDF = add_data_to_pdf(pdf=pdf, data_paths=statistics_paths)
# THEN check if the statistics are appended to the created PDF
assert isinstance(pdf, PDF)
assert pdf.page_no() == 1
def METHOD_NAME():
"""Test plots appending to a PDF file."""
# GIVEN a PDF instance and some dummy PNG plots
pdf: PDF = get_pdf_instance()
plot_paths = [
"tests/test_data/cnv_report/CNV.somatic.sample_tumor_normal_wgs.ascat.sunrise.png",
"tests/test_data/cnv_report/CNV.somatic.sample_tumor_normal_wgs.ascat.germline.png",
]
# WHEN adding the plots to a PDF instance
pdf: PDF = add_plots_to_pdf(pdf, plot_paths)
# THEN check if the images are correctly appended to the PDF
assert isinstance(pdf, PDF)
assert pdf.page_no() == len(plot_paths)
def test_generate_cnv_report_tumor_normal(tmp_path, cli_runner):
"""Test generation of a PDF report for a WGS TN case."""
# GIVEN dummy input data and plots
data_paths = [
"tests/test_data/cnv_report/CNV.somatic.sample_tumor_normal_wgs.ascat.samplestatistics.txt",
"tests/test_data/cnv_report/CNV.somatic.sample_tumor_normal_wgs.ascat.germline.png",
"tests/test_data/cnv_report/CNV.somatic.sample_tumor_normal_wgs.ascat.sunrise.png",
"tests/test_data/cnv_report/CNV.somatic.sample_tumor_only_wgs.cnvpytor.circular.png",
"tests/test_data/cnv_report/CNV.somatic.sample_tumor_only_wgs.cnvpytor.scatter.png",
]
# GIVEN the output path
output_path: Path = Path(tmp_path, "report.pdf")
# WHEN invoking the python script
result = cli_runner.invoke(
generate_cnv_report,
[
data_paths[0],
data_paths[1],
data_paths[2],
data_paths[3],
data_paths[4],
"--output",
output_path,
],
)
# THEN check if the PDF is correctly created and there is no errors
assert result.exit_code == 0
assert Path(output_path).exists()
def test_generate_cnv_report_tumor_only(tmp_path, cli_runner):
"""Test generation of a PDF report for a WGS TO case."""
# GIVEN dummy input data and plots
plot_paths = [
"tests/test_data/cnv_report/CNV.somatic.sample_tumor_only_wgs.cnvpytor.circular.png",
"tests/test_data/cnv_report/CNV.somatic.sample_tumor_only_wgs.cnvpytor.scatter.png",
]
# GIVEN the output path
output_path: Path = Path(tmp_path, "report.pdf")
# WHEN invoking the python script
result = cli_runner.invoke(
generate_cnv_report,
[
plot_paths[0],
plot_paths[1],
"--output",
output_path,
],
)
# THEN check if the PDF is correctly created and there is no errors
assert result.exit_code == 0
assert Path(output_path).exists() | null |
5,881 | # Microsoft Azure Linux Agent
#
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import threading
import time
from azurelinuxagent.common import logger
from azurelinuxagent.common.event import add_event, WALAEventOperation
from azurelinuxagent.common.exception import ServiceStoppedError
from azurelinuxagent.common.future import ustr, Queue, Empty
from azurelinuxagent.common.interfaces import ThreadHandlerInterface
from azurelinuxagent.common.utils import textutil
def METHOD_NAME(protocol_util):
return SendTelemetryEventsHandler(protocol_util)
class SendTelemetryEventsHandler(ThreadHandlerInterface):
"""
This Handler takes care of sending all telemetry out of the agent to Wireserver. It sends out data as soon as
there's any data available in the queue to send.
"""
_THREAD_NAME = "SendTelemetryHandler"
_MAX_TIMEOUT = datetime.timedelta(seconds=5).seconds
_MIN_EVENTS_TO_BATCH = 30
_MIN_BATCH_WAIT_TIME = datetime.timedelta(seconds=5)
def __init__(self, protocol_util):
self._protocol = protocol_util.get_protocol()
self.should_run = True
self._thread = None
# We're using a Queue for handling the communication between threads. We plan to remove any dependency on the
# filesystem in the future and use add_event to directly queue events into the queue rather than writing to
# a file and then parsing it later.
# Once we move add_event to directly queue events, we need to add a maxsize here to ensure some limitations are
# being set (currently our limits are enforced by collector_threads but that would become obsolete once we
# start enqueuing events directly).
self._queue = Queue()
@staticmethod
def get_thread_name():
return SendTelemetryEventsHandler._THREAD_NAME
def run(self):
logger.info("Start SendTelemetryHandler service.")
self.start()
def is_alive(self):
return self._thread is not None and self._thread.is_alive()
def start(self):
self._thread = threading.Thread(target=self._process_telemetry_thread)
self._thread.setDaemon(True)
self._thread.setName(self.get_thread_name())
self._thread.start()
def stop(self):
"""
Stop server communication and join the thread to main thread.
"""
self.should_run = False
if self.is_alive():
self.join()
def join(self):
self._queue.join()
self._thread.join()
def stopped(self):
return not self.should_run
def enqueue_event(self, event):
# Add event to queue and set event
if self.stopped():
raise ServiceStoppedError("{0} is stopped, not accepting anymore events".format(self.get_thread_name()))
# Queue.put() can block if the queue is full which can be an uninterruptible wait. Blocking for a max of
# SendTelemetryEventsHandler._MAX_TIMEOUT seconds and raising a ServiceStoppedError to retry later.
# Todo: Queue.put() will only raise a Full exception if a maxsize is set for the Queue. Once some size
# limitations are set for the Queue, ensure to handle that correctly here.
try:
self._queue.put(event, timeout=SendTelemetryEventsHandler._MAX_TIMEOUT)
except Exception as error:
raise ServiceStoppedError(
"Unable to enqueue due to: {0}, stopping any more enqueuing until the next run".format(ustr(error)))
def _wait_for_event_in_queue(self):
"""
Wait for atleast one event in Queue or timeout after SendTelemetryEventsHandler._MAX_TIMEOUT seconds.
In case of a timeout, set the event to None.
:return: event if an event is added to the Queue or None to signify no events were added in queue.
This would raise in case of an error.
"""
try:
event = self._queue.get(timeout=SendTelemetryEventsHandler._MAX_TIMEOUT)
self._queue.task_done()
except Empty:
# No elements in Queue, return None
event = None
return event
def _process_telemetry_thread(self):
logger.info("Successfully started the {0} thread".format(self.get_thread_name()))
try:
# On demand wait, start processing as soon as there is any data available in the queue. In worst case,
# also keep checking every SendTelemetryEventsHandler._MAX_TIMEOUT secs to avoid uninterruptible waits.
# Incase the service is stopped but we have events in queue, ensure we send them out before killing the thread.
while not self.stopped() or not self._queue.empty():
first_event = self._wait_for_event_in_queue()
if first_event:
# Start processing queue only if first event is not None (i.e. Queue has atleast 1 event),
# else do nothing
self._send_events_in_queue(first_event)
except Exception as error:
err_msg = "An unknown error occurred in the {0} thread main loop, stopping thread.{1}".format(
self.get_thread_name(), textutil.format_exception(error))
add_event(op=WALAEventOperation.UnhandledError, message=err_msg, is_success=False)
def _send_events_in_queue(self, first_event):
# Process everything in Queue
start_time = datetime.datetime.utcnow()
while not self.stopped() and (self._queue.qsize() + 1) < self._MIN_EVENTS_TO_BATCH and (
start_time + self._MIN_BATCH_WAIT_TIME) > datetime.datetime.utcnow():
# To promote batching, we either wait for atleast _MIN_EVENTS_TO_BATCH events or _MIN_BATCH_WAIT_TIME secs
# before sending out the first request to wireserver.
# If the thread is requested to stop midway, we skip batching and send whatever we have in the queue.
logger.verbose("Waiting for events to batch. Total events so far: {0}, Time elapsed: {1} secs",
self._queue.qsize()+1, (datetime.datetime.utcnow() - start_time).seconds)
time.sleep(1)
# Delete files after sending the data rather than deleting and sending
self._protocol.report_event(self._get_events_in_queue(first_event))
def _get_events_in_queue(self, first_event):
yield first_event
while not self._queue.empty():
try:
event = self._queue.get_nowait()
self._queue.task_done()
yield event
except Exception as error:
logger.error("Some exception when fetching event from queue: {0}".format(textutil.format_exception(error)) | null |
5,882 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2022 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.simpleapi import SimpleShapeDiscusInelastic, Load, ConvertUnits, DeleteWorkspace
import unittest
class SimpleShapeDiscusInelasticTest(unittest.TestCase):
@classmethod
def setUpClass(self):
red_ws = Load("irs26176_graphite002_red.nxs")
red_ws.run().addProperty("deltaE-mode", "Indirect", True)
red_ws.run().addProperty("Ei", 1.845, True)
self._red_ws = red_ws
sqw_ws = Load("iris26176_graphite002_sqw.nxs")
self._sqw_ws = sqw_ws
self._arguments = {
"SampleChemicalFormula": "H2-O",
"SampleMassDensity": 1.0,
"NeutronPathsSingle": 50,
"NeutronPathsMultiple": 50,
"Height": 2.0,
"NumberScatterings": 2,
}
self._annulus_arguments = self._arguments.copy()
self._annulus_arguments.update({"Shape": "Annulus", "SampleOuterRadius": 2.0})
@classmethod
def METHOD_NAME(self):
DeleteWorkspace(self._red_ws)
DeleteWorkspace(self._sqw_ws)
def _test_corrections_workspace(self, corr_ws_grp):
number_ws = corr_ws_grp.getNumberOfEntries()
# Scatter_1, Scatter_1_NoAbs, Scatter_2, Scatter_1_2_Summed, Scatter_2_2_Summed
# Scatter_1_Integrated, Scatter_2_Integrated, Ratio x 2
self.assertEqual(number_ws, 9)
for i in range(number_ws):
x_unit = corr_ws_grp[i].getAxis(0).getUnit().unitID()
y_unit = corr_ws_grp[i].YUnitLabel()
blocksize = corr_ws_grp[i].blocksize()
if corr_ws_grp[i].name().endswith("Integrated"):
self.assertEqual(blocksize, 1)
else:
self.assertEqual(blocksize, 1905)
self.assertEqual(x_unit, "DeltaE")
self.assertEqual(y_unit, "Scattered Weight")
num_hists = corr_ws_grp[i].getNumberHistograms()
self.assertEqual(num_hists, 10)
def test_flat_plate(self):
# Test flat plate shape
kwargs = self._arguments
results = SimpleShapeDiscusInelastic(
ReducedWorkspace=self._red_ws, SqwWorkspace=self._sqw_ws, Shape="FlatPlate", Width=2.0, Thickness=2.0, **kwargs
)
self._test_corrections_workspace(results)
def test_cylinder(self):
# Test cylinder shape
kwargs = self._arguments
results = SimpleShapeDiscusInelastic(
ReducedWorkspace=self._red_ws, SqwWorkspace=self._sqw_ws, Shape="Cylinder", SampleRadius=2.0, **kwargs
)
self._test_corrections_workspace(results)
def test_annulus(self):
# Test annulus shape
kwargs = self._annulus_arguments
results = SimpleShapeDiscusInelastic(ReducedWorkspace=self._red_ws, SqwWorkspace=self._sqw_ws, SampleInnerRadius=1.0, **kwargs)
self._test_corrections_workspace(results)
def test_annulus_with_container(self):
kwargs = self._annulus_arguments
results = SimpleShapeDiscusInelastic(
ReducedWorkspace=self._red_ws,
SqwWorkspace=self._sqw_ws,
SampleInnerRadius=1.0,
CanInnerRadius=0.9,
CanOuterRadius=2.1,
Container=True,
**kwargs,
)
self._test_corrections_workspace(results)
# ------------------------------------- Failure Cases --------------------
def test_no_chemical_formula_or_cross_sections_causes_an_error(self):
kwargs = {
"ReducedWorkspace": self._red_ws,
"SqwWorkspace": self._sqw_ws,
"SampleMassDensity": 1.0,
"NeutronPathsSingle": 50,
"NeutronPathsMultiple": 50,
"Height": 2.0,
"Shape": "FlatPlate",
"Width": 1.4,
"Thickness": 2.1,
}
with self.assertRaisesRegex(RuntimeError, "Please enter a chemical formula."):
SimpleShapeDiscusInelastic(**kwargs)
def test_flat_plate_no_params(self):
# If the shape is flat plate but the relevant parameters haven't been entered this should throw
# relevant params are Height, Width, Thickness
params = ["Height", "Width", "Thickness"]
for param in params:
kwargs = {
"ReducedWorkspace": self._red_ws,
"SqwWorkspace": self._sqw_ws,
"SampleChemicalFormula": "H2-O",
"SampleMassDensity": 1.0,
"NeutronPathsSingle": 50,
"NeutronPathsMultiple": 50,
param: 0,
"Shape": "FlatPlate",
}
with self.assertRaisesRegex(RuntimeError, f"Please enter a non-zero number for {param.lower()}"):
SimpleShapeDiscusInelastic(**kwargs)
def test_not_in_deltaE(self):
red_ws_not_deltaE = Load("irs26176_graphite002_red.nxs")
red_ws_not_deltaE = ConvertUnits(InputWorkspace=self._red_ws, Target="Wavelength", EMode="Indirect", EFixed=1.845)
kwargs = {
"ReducedWorkspace": red_ws_not_deltaE,
"SqwWorkspace": self._sqw_ws,
"SampleChemicalFormula": "H2-O",
"SampleMassDensity": 1.0,
"NeutronPathsSingle": 50,
"NeutronPathsMultiple": 50,
"Height": 2.0,
"Shape": "FlatPlate",
"Width": 1.4,
"Thickness": 2.1,
}
with self.assertRaisesRegex(RuntimeError, "Input workspace must have units of DeltaE for inelastic instrument"):
SimpleShapeDiscusInelastic(**kwargs)
DeleteWorkspace(red_ws_not_deltaE)
if __name__ == "__main__":
unittest.main() | null |
5,883 | # -*- coding: utf-8 -*-
"""Public functions from sirepo
Use this to call sirepo from other packages or Python notebooks.
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
from sirepo.template import lattice
from sirepo.template.lattice import LatticeUtil
import copy
import inspect
import py.error
import pykern.pkio
import sirepo.sim_data
import sirepo.util
class LibAdapterBase:
"""Common functionality between code specific LibAdapter implementations."""
def __init__(self, ignore_files=None):
m = inspect.getmodule(self)
self._sim_data, _, self._schema = sirepo.sim_data.template_globals(m.SIM_TYPE)
self._code_var = m.code_var
self._ignore_files = ignore_files if ignore_files else []
def _convert(self, data):
def _model(model, name):
s = self._schema.model[name]
k = x = v = None
try:
for k, x in s.items():
t = x[1]
v = model[k] if k in model else x[2]
if t == "RPNValue":
t = "Float"
if cv.is_var_value(v):
model[k] = cv.eval_var_with_assert(v)
continue
if t == "Float":
model[k] = float(v) if v else 0.0
elif t == "Integer":
model[k] = int(v) if v else 0
except Exception as e:
pkdlog(
"model={} field={} decl={} value={} exception={}", name, k, x, v, e
)
raise
cv = self._code_var(data.models.rpnVariables)
for x in data.models.rpnVariables:
x.value = cv.eval_var_with_assert(x.value)
for k, v in data.models.items():
if k in self._schema.model:
_model(v, k)
for x in ("elements", "commands"):
for m in data.models[x]:
_model(m, LatticeUtil.model_name_for_data(m))
for bl in data.models.beamlines:
if "positions" in bl:
for p in bl.positions:
p.elemedge = cv.eval_var_with_assert(p.elemedge)
return data
def METHOD_NAME(self, path, filenames):
for f in filenames:
if f in self._ignore_files:
continue
p = path.dirpath().join(f)
assert p.check(file=True), f"file={f} missing"
def _write_input_files(self, data, source_path, dest_dir):
for f in set(
LatticeUtil(data, self._schema)
.iterate_models(
lattice.InputFileIterator(self._sim_data, update_filenames=False),
)
.result,
):
f = self._sim_data.lib_file_name_without_type(f)
try:
d = dest_dir.join(f)
pykern.pkio.mkdir_parent_only(d)
if f not in self._ignore_files:
d.mksymlinkto(source_path.dirpath().join(f), absolute=False)
except py.error.EEXIST:
pass
class GenerateBase:
"""Common functionality between code specific Generate implementations."""
@property
def util(self):
from sirepo.template.lattice import LatticeUtil
if not hasattr(self, "_util"):
self._util = LatticeUtil(self.data, self._schema)
return self._util
class Importer:
"""
Imports a code's native files into Sirepo representation
Args:
sim_type (str): type of simulation (eg. 'elegant' or 'madx')
ignore_files (list): files ignored during verification and symlink routines [None]
"""
def __init__(self, sim_type, ignore_files=None):
import sirepo.template
self.__adapter = sirepo.template.import_module(sim_type).LibAdapter(
ignore_files or []
)
def parse_file(self, path):
p = pykern.pkio.py_path(path)
with pykern.pkio.save_chdir(p.dirpath()):
return SimData(
self.__adapter.parse_file(p),
p,
self.__adapter,
)
class SimData(PKDict):
"""Represents data of simulation"""
def __init__(self, data, source, adapter):
super().__init__(data)
self.pkdel("report")
self.__source = source
self.__adapter = adapter
def copy(self):
"""Allows copy.deepcopy"""
return self.__class__(self, self.__source, self.__adapter)
def write_files(self, dest_dir):
"""Writes files for simulation state
Args:
dest_dir (str or py.path): where to write files
Returns:
PKDict: files written (debugging only)
"""
return self.__adapter.write_files(
# need to make a copy, b/c generate_parameters_file modifies
copy.deepcopy(self),
self.__source,
pykern.pkio.py_path(dest_dir),
) | null |
5,884 | # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Tasks spawn a sequence of POINTS (P) separated by INTERVALS (I).
Each task may have multiple sequences, e.g. 12-hourly and 6-hourly.
"""
from typing import Optional, Type, overload
from cylc.flow.cycling import PointBase, integer, iso8601
from metomi.isodatetime.data import Calendar
ISO8601_CYCLING_TYPE = iso8601.CYCLER_TYPE_ISO8601
INTEGER_CYCLING_TYPE = integer.CYCLER_TYPE_INTEGER
IS_OFFSET_ABSOLUTE_IMPLS = {
INTEGER_CYCLING_TYPE: integer.is_offset_absolute,
ISO8601_CYCLING_TYPE: iso8601.is_offset_absolute,
}
POINTS = {INTEGER_CYCLING_TYPE: integer.IntegerPoint,
ISO8601_CYCLING_TYPE: iso8601.ISO8601Point}
DUMP_FORMAT_GETTERS = {INTEGER_CYCLING_TYPE: integer.get_dump_format,
ISO8601_CYCLING_TYPE: iso8601.get_dump_format}
POINT_RELATIVE_GETTERS = {
INTEGER_CYCLING_TYPE: integer.get_point_relative,
ISO8601_CYCLING_TYPE: iso8601.get_point_relative
}
INTERVALS = {INTEGER_CYCLING_TYPE: integer.IntegerInterval,
ISO8601_CYCLING_TYPE: iso8601.ISO8601Interval}
SEQUENCES = {INTEGER_CYCLING_TYPE: integer.IntegerSequence,
ISO8601_CYCLING_TYPE: iso8601.ISO8601Sequence}
INIT_FUNCTIONS = {INTEGER_CYCLING_TYPE: integer.init_from_cfg,
ISO8601_CYCLING_TYPE: iso8601.init_from_cfg}
class DefaultCycler:
"""Store the default TYPE for Cyclers."""
TYPE: str
@overload
def get_point(value: str, cycling_type: Optional[str] = None) -> PointBase:
...
@overload
def get_point(value: None, cycling_type: Optional[str] = None) -> None:
...
def get_point(
value: Optional[str], cycling_type: Optional[str] = None
) -> Optional[PointBase]:
"""Return a cylc.flow.cycling.PointBase-derived object from a string."""
if value is None:
return None
return get_point_cls(cycling_type=cycling_type)(value)
def get_point_cls(cycling_type: Optional[str] = None) -> Type[PointBase]:
"""Return the cylc.flow.cycling.PointBase-derived class we're using."""
if cycling_type is None:
cycling_type = DefaultCycler.TYPE
return POINTS[cycling_type]
def get_dump_format(cycling_type=None):
"""Return cycle point dump format, or None."""
return DUMP_FORMAT_GETTERS[cycling_type]()
def get_point_relative(*args, **kwargs):
"""Return a point from an offset expression and a base point."""
cycling_type = kwargs.pop("cycling_type", DefaultCycler.TYPE)
return POINT_RELATIVE_GETTERS[cycling_type](*args, **kwargs)
def get_interval(*args, **kwargs):
"""Return a cylc.flow.cycling.IntervalBase-derived object from a string."""
if args[0] is None:
return None
cycling_type = kwargs.pop("cycling_type", DefaultCycler.TYPE)
return get_interval_cls(cycling_type=cycling_type)(*args, **kwargs)
def get_interval_cls(cycling_type=None):
"""Return the cylc.flow.cycling.IntervalBase-derived class we're using."""
if cycling_type is None:
cycling_type = DefaultCycler.TYPE
return INTERVALS[cycling_type]
def METHOD_NAME(*args, **kwargs):
"""Return a cylc.flow.cycling.SequenceBase-derived object from a string."""
if args[0] is None:
return None
cycling_type = kwargs.pop("cycling_type", DefaultCycler.TYPE)
return get_sequence_cls(cycling_type=cycling_type)(*args, **kwargs)
def get_sequence_cls(cycling_type=None):
"""Return the cylc.flow.cycling.SequenceBase-derived class we're using."""
if cycling_type is None:
cycling_type = DefaultCycler.TYPE
return SEQUENCES[cycling_type]
def init_cyclers(cfg):
"""Initialise cycling specifics using the workflow configuration (cfg)."""
DefaultCycler.TYPE = cfg['scheduling']['cycling mode']
if DefaultCycler.TYPE in Calendar.MODES:
DefaultCycler.TYPE = ISO8601_CYCLING_TYPE
INIT_FUNCTIONS[DefaultCycler.TYPE](cfg)
def is_offset_absolute(offset_string, **kwargs):
"""Return True if offset_string is a point rather than an interval."""
cycling_type = kwargs.pop("cycling_type", DefaultCycler.TYPE)
return IS_OFFSET_ABSOLUTE_IMPLS[cycling_type](offset_string)
@overload
def standardise_point_string(
point_string: str, cycling_type: Optional[str] = None
) -> str:
...
@overload
def standardise_point_string(
point_string: None, cycling_type: Optional[str] = None
) -> None:
...
def standardise_point_string(
point_string: Optional[str], cycling_type: Optional[str] = None
) -> Optional[str]:
"""Return a standardised version of point_string."""
if point_string is None:
return None
point = get_point(point_string, cycling_type=cycling_type)
if point is not None:
point.standardise()
point_string = str(point)
return point_string | null |
5,885 | import abc
import uuid
from typing import Any, Dict, Optional
from django.conf import settings
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Manager
from polymorphic.models import PolymorphicModel
from roles.models import Role
class ProjectType(models.TextChoices):
DOCUMENT_CLASSIFICATION = "DocumentClassification"
SEQUENCE_LABELING = "SequenceLabeling"
SEQ2SEQ = "Seq2seq"
INTENT_DETECTION_AND_SLOT_FILLING = "IntentDetectionAndSlotFilling"
SPEECH2TEXT = "Speech2text"
IMAGE_CLASSIFICATION = "ImageClassification"
BOUNDING_BOX = "BoundingBox"
SEGMENTATION = "Segmentation"
IMAGE_CAPTIONING = "ImageCaptioning"
class Project(PolymorphicModel):
name = models.CharField(max_length=100)
description = models.TextField(default="")
guideline = models.TextField(default="", blank=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
created_by = models.ForeignKey(
User,
on_delete=models.SET_NULL,
null=True,
)
project_type = models.CharField(max_length=30, choices=ProjectType.choices)
random_order = models.BooleanField(default=False)
collaborative_annotation = models.BooleanField(default=False)
single_class_classification = models.BooleanField(default=False)
allow_member_to_create_label_type = models.BooleanField(default=False)
def add_admin(self):
admin_role = Role.objects.get(name=settings.ROLE_PROJECT_ADMIN)
Member.objects.create(
project=self,
user=self.created_by,
role=admin_role,
)
@property
@abc.abstractmethod
def is_text_project(self) -> bool:
return False
def clone(self) -> "Project":
"""Clone the project.
See https://docs.djangoproject.com/en/4.2/topics/db/queries/#copying-model-instances
Returns:
The cloned project.
"""
project = Project.objects.get(pk=self.pk)
project.pk = None
project.id = None
project._state.adding = True
project.save()
def bulk_clone(queryset: models.QuerySet, field_initializers: Optional[Dict[Any, Any]] = None):
"""Clone the queryset.
Args:
queryset: The queryset to clone.
field_initializers: The field initializers.
"""
if field_initializers is None:
field_initializers = {}
items = []
for item in queryset:
item.id = None
item.pk = None
for field, value_or_callable in field_initializers.items():
if callable(value_or_callable):
value_or_callable = value_or_callable()
setattr(item, field, value_or_callable)
item.project = project
item._state.adding = True
items.append(item)
queryset.model.objects.bulk_create(items)
bulk_clone(self.role_mappings.all())
bulk_clone(self.tags.all())
# clone examples
bulk_clone(self.examples.all(), field_initializers={"uuid": uuid.uuid4})
# clone label types
bulk_clone(self.categorytype_set.all())
bulk_clone(self.spantype_set.all())
bulk_clone(self.relationtype_set.all())
return project
def __str__(self):
return self.name
class TextClassificationProject(Project):
@property
def is_text_project(self) -> bool:
return True
class SequenceLabelingProject(Project):
allow_overlapping = models.BooleanField(default=False)
grapheme_mode = models.BooleanField(default=False)
use_relation = models.BooleanField(default=False)
@property
def is_text_project(self) -> bool:
return True
class Seq2seqProject(Project):
@property
def is_text_project(self) -> bool:
return True
class IntentDetectionAndSlotFillingProject(Project):
@property
def is_text_project(self) -> bool:
return True
class Speech2textProject(Project):
@property
def is_text_project(self) -> bool:
return False
class ImageClassificationProject(Project):
@property
def is_text_project(self) -> bool:
return False
class BoundingBoxProject(Project):
@property
def is_text_project(self) -> bool:
return False
class SegmentationProject(Project):
@property
def is_text_project(self) -> bool:
return False
class ImageCaptioningProject(Project):
@property
def is_text_project(self) -> bool:
return False
class Tag(models.Model):
text = models.TextField()
project = models.ForeignKey(to=Project, on_delete=models.CASCADE, related_name="tags")
def __str__(self):
return self.text
class MemberManager(Manager):
def can_update(self, project: int, member_id: int, new_role: str) -> bool:
"""The project needs at least 1 admin.
Args:
project: The project id.
member_id: The member id.
new_role: The new role name.
Returns:
Whether the mapping can be updated or not.
"""
queryset = self.filter(project=project, role__name=settings.ROLE_PROJECT_ADMIN)
if queryset.count() > 1:
return True
else:
admin = queryset.first()
# we can change the role except for the only admin.
return admin.id != member_id or new_role == settings.ROLE_PROJECT_ADMIN
def has_role(self, project_id: int, user: User, role_name: str):
return self.filter(project=project_id, user=user, role__name=role_name).exists()
class Member(models.Model):
user = models.ForeignKey(to=User, on_delete=models.CASCADE, related_name="role_mappings")
project = models.ForeignKey(to=Project, on_delete=models.CASCADE, related_name="role_mappings")
role = models.ForeignKey(to=Role, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = MemberManager()
def clean(self):
members = self.__class__.objects.exclude(id=self.id)
if members.filter(user=self.user, project=self.project).exists():
message = "This user is already assigned to a role in this project."
raise ValidationError(message)
def is_admin(self):
return self.role.name == settings.ROLE_PROJECT_ADMIN
@property
def METHOD_NAME(self):
return self.user.METHOD_NAME
class Meta:
unique_together = ("user", "project") | null |
5,886 | from unittest import mock
import pytest
from zigpy.config import CONF_OTA_THIRDREALITY
import zigpy.ota
import zigpy.ota.image
from zigpy.ota.provider import LOCK_REFRESH, ThirdReality, ThirdRealityImage
from tests.async_mock import AsyncMock, patch
MANUFACTURER_ID = 4659
@pytest.fixture
def thirdreality_prov():
p = ThirdReality()
p.enable()
return p
@pytest.fixture
def thirdreality_image():
return ThirdRealityImage.from_json(
{
"modelId": "3RSB22BZ",
"url": "https://tr-zha.s3.amazonaws.com/firmwares/SmartButton_Zigbee_PROD_OTA_V21_1.00.21.ota",
"version": "1.00.21",
"imageType": 54184,
"manufacturerId": 4659,
"fileVersion": 33,
}
)
async def METHOD_NAME(thirdreality_prov):
thirdreality_prov.enable = mock.MagicMock()
thirdreality_prov.refresh_firmware_list = AsyncMock()
r = await thirdreality_prov.initialize_provider({CONF_OTA_THIRDREALITY: True})
assert r is None
assert thirdreality_prov.enable.call_count == 1
assert thirdreality_prov.refresh_firmware_list.call_count == 1
async def test_thirdreality_get_image_no_cache(thirdreality_prov, thirdreality_image):
thirdreality_image.fetch_image = AsyncMock(return_value=mock.sentinel.image)
thirdreality_prov._cache = mock.MagicMock()
thirdreality_prov._cache.__getitem__.side_effect = KeyError()
thirdreality_prov.refresh_firmware_list = AsyncMock()
# ThirdReality manufacturer_id, but not in cache
assert thirdreality_image.key not in thirdreality_prov._cache
r = await thirdreality_prov.get_image(thirdreality_image.key)
assert r is None
assert thirdreality_prov.refresh_firmware_list.call_count == 1
assert thirdreality_prov._cache.__getitem__.call_count == 1
assert thirdreality_image.fetch_image.call_count == 0
async def test_thirdreality_get_image(thirdreality_prov, thirdreality_image):
thirdreality_image.fetch_image = AsyncMock(return_value=mock.sentinel.image)
thirdreality_prov._cache = mock.MagicMock()
thirdreality_prov._cache.__getitem__.return_value = thirdreality_image
thirdreality_prov.refresh_firmware_list = AsyncMock()
r = await thirdreality_prov.get_image(thirdreality_image.key)
assert r is mock.sentinel.image
assert thirdreality_prov._cache.__getitem__.call_count == 1
assert (
thirdreality_prov._cache.__getitem__.mock_calls[0].args[0]
== thirdreality_image.key
)
assert thirdreality_image.fetch_image.call_count == 1
@patch("aiohttp.ClientSession.get")
async def test_thirdreality_refresh_list(
mock_get, thirdreality_prov, thirdreality_image
):
mock_get.return_value.__aenter__.return_value.json = AsyncMock(
return_value={
"versions": [
{
"modelId": "3RSB22BZ",
"url": "https://tr-zha.s3.amazonaws.com/firmwares/SmartButton_Zigbee_PROD_OTA_V21_1.00.21.ota",
"version": "1.00.21",
"imageType": 54184,
"manufacturerId": 4659,
"fileVersion": 33,
}
]
}
)
mock_get.return_value.__aenter__.return_value.status = 200
mock_get.return_value.__aenter__.return_value.reason = "OK"
await thirdreality_prov.refresh_firmware_list()
assert mock_get.call_count == 1
assert len(thirdreality_prov._cache) == 1
assert thirdreality_image.key in thirdreality_prov._cache
cached = thirdreality_prov._cache[thirdreality_image.key]
assert cached.image_type == thirdreality_image.image_type
assert (
cached.url
== "https://tr-zha.s3.amazonaws.com/firmwares/SmartButton_Zigbee_PROD_OTA_V21_1.00.21.ota"
)
assert not thirdreality_prov.expired
@patch("aiohttp.ClientSession.get")
async def test_thirdreality_refresh_list_locked(mock_get, thirdreality_prov):
await thirdreality_prov._locks[LOCK_REFRESH].acquire()
mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]])
await thirdreality_prov.refresh_firmware_list()
assert mock_get.call_count == 0
@patch("aiohttp.ClientSession.get")
async def test_thirdreality_refresh_list_failed(mock_get, thirdreality_prov):
mock_get.return_value.__aenter__.return_value.json = AsyncMock(side_effect=[[]])
mock_get.return_value.__aenter__.return_value.status = 434
mock_get.return_value.__aenter__.return_value.reason = "UNK"
with patch.object(thirdreality_prov, "update_expiration") as update_exp:
await thirdreality_prov.refresh_firmware_list()
assert mock_get.call_count == 1
assert update_exp.call_count == 0
@patch("aiohttp.ClientSession.get")
async def test_thirdreality_fetch_image(mock_get, thirdreality_image):
image = zigpy.ota.image.OTAImage(
header=zigpy.ota.image.OTAImageHeader(
upgrade_file_id=200208670,
header_version=256,
header_length=56,
field_control=zigpy.ota.image.FieldControl(0),
manufacturer_id=MANUFACTURER_ID,
image_type=54184,
file_version=33,
stack_version=2,
header_string="Telink OTA Sample Usage",
image_size=66,
),
subelements=[
zigpy.ota.image.SubElement(
tag_id=zigpy.ota.image.ElementTagId.UPGRADE_IMAGE, data=b"abcd"
)
],
)
thirdreality_image.url = mock.sentinel.url
mock_get.return_value.__aenter__.return_value.read = AsyncMock(
return_value=image.serialize()
)
r = await thirdreality_image.fetch_image()
assert isinstance(r, zigpy.ota.image.OTAImage)
assert mock_get.call_count == 1
assert mock_get.mock_calls[0].args[0] == mock.sentinel.url
assert r == image | null |
5,887 | # SPDX-License-Identifier: MIT
import functools
import types
from ._make import _make_ne
_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="}
def cmp_using(
eq=None,
lt=None,
le=None,
gt=None,
ge=None,
require_same_type=True,
class_name="Comparable",
):
"""
Create a class that can be passed into `attrs.field`'s ``eq``, ``order``,
and ``cmp`` arguments to customize field comparison.
The resulting class will have a full set of ordering methods if at least
one of ``{lt, le, gt, ge}`` and ``eq`` are provided.
:param Optional[callable] eq: `callable` used to evaluate equality of two
objects.
:param Optional[callable] lt: `callable` used to evaluate whether one
object is less than another object.
:param Optional[callable] le: `callable` used to evaluate whether one
object is less than or equal to another object.
:param Optional[callable] gt: `callable` used to evaluate whether one
object is greater than another object.
:param Optional[callable] ge: `callable` used to evaluate whether one
object is greater than or equal to another object.
:param bool require_same_type: When `True`, equality and ordering methods
will return `NotImplemented` if objects are not of the same type.
:param Optional[str] class_name: Name of class. Defaults to 'Comparable'.
See `comparison` for more details.
.. versionadded:: 21.1.0
"""
body = {
"__slots__": ["value"],
"__init__": METHOD_NAME(),
"_requirements": [],
"_is_comparable_to": _is_comparable_to,
}
# Add operations.
num_order_functions = 0
has_eq_function = False
if eq is not None:
has_eq_function = True
body["__eq__"] = _make_operator("eq", eq)
body["__ne__"] = _make_ne()
if lt is not None:
num_order_functions += 1
body["__lt__"] = _make_operator("lt", lt)
if le is not None:
num_order_functions += 1
body["__le__"] = _make_operator("le", le)
if gt is not None:
num_order_functions += 1
body["__gt__"] = _make_operator("gt", gt)
if ge is not None:
num_order_functions += 1
body["__ge__"] = _make_operator("ge", ge)
type_ = types.new_class(
class_name, (object,), {}, lambda ns: ns.update(body)
)
# Add same type requirement.
if require_same_type:
type_._requirements.append(_check_same_type)
# Add total ordering if at least one operation was defined.
if 0 < num_order_functions < 4:
if not has_eq_function:
# functools.total_ordering requires __eq__ to be defined,
# so raise early error here to keep a nice stack.
msg = "eq must be define is order to complete ordering from lt, le, gt, ge."
raise ValueError(msg)
type_ = functools.total_ordering(type_)
return type_
def METHOD_NAME():
"""
Create __init__ method.
"""
def __init__(self, value):
"""
Initialize object with *value*.
"""
self.value = value
return __init__
def _make_operator(name, func):
"""
Create operator method.
"""
def method(self, other):
if not self._is_comparable_to(other):
return NotImplemented
result = func(self.value, other.value)
if result is NotImplemented:
return NotImplemented
return result
method.__name__ = f"__{name}__"
method.__doc__ = (
f"Return a {_operation_names[name]} b. Computed by attrs."
)
return method
def _is_comparable_to(self, other):
"""
Check whether `other` is comparable to `self`.
"""
return all(func(self, other) for func in self._requirements)
def _check_same_type(self, other):
"""
Return True if *self* and *other* are of the same type, False otherwise.
"""
return other.value.__class__ is self.value.__class__ | null |
5,888 | from contextlib import redirect_stderr, redirect_stdout
from io import StringIO
from pybind11_tests import iostream as m
def test_captured(capsys):
msg = "I've been redirected to Python, I hope!"
m.captured_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
m.captured_err(msg)
stdout, stderr = capsys.readouterr()
assert stdout == ""
assert stderr == msg
def test_captured_large_string(capsys):
# Make this bigger than the buffer used on the C++ side: 1024 chars
msg = "I've been redirected to Python, I hope!"
msg = msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_2byte_offset0(capsys):
msg = "\u07FF"
msg = "" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_2byte_offset1(capsys):
msg = "\u07FF"
msg = "1" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_3byte_offset0(capsys):
msg = "\uFFFF"
msg = "" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_3byte_offset1(capsys):
msg = "\uFFFF"
msg = "1" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_3byte_offset2(capsys):
msg = "\uFFFF"
msg = "12" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_4byte_offset0(capsys):
msg = "\U0010FFFF"
msg = "" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_4byte_offset1(capsys):
msg = "\U0010FFFF"
msg = "1" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_4byte_offset2(capsys):
msg = "\U0010FFFF"
msg = "12" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_captured_utf8_4byte_offset3(capsys):
msg = "\U0010FFFF"
msg = "123" + msg * (1024 // len(msg) + 1)
m.captured_output_default(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_guard_capture(capsys):
msg = "I've been redirected to Python, I hope!"
m.guard_output(msg)
stdout, stderr = capsys.readouterr()
assert stdout == msg
assert stderr == ""
def test_series_captured(capture):
with capture:
m.captured_output("a")
m.captured_output("b")
assert capture == "ab"
def test_flush(capfd):
msg = "(not flushed)"
msg2 = "(flushed)"
with m.ostream_redirect():
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == ""
m.noisy_function(msg2, flush=True)
stdout, stderr = capfd.readouterr()
assert stdout == msg + msg2
m.noisy_function(msg, flush=False)
stdout, stderr = capfd.readouterr()
assert stdout == msg
def test_not_captured(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ""
assert stream.getvalue() == ""
stream = StringIO()
with redirect_stdout(stream):
m.captured_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stderr == ""
assert stream.getvalue() == msg
def test_err(capfd):
msg = "Something that should not show up in log"
stream = StringIO()
with redirect_stderr(stream):
m.raw_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stderr == msg
assert stream.getvalue() == ""
stream = StringIO()
with redirect_stderr(stream):
m.captured_err(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stderr == ""
assert stream.getvalue() == msg
def test_multi_captured(capfd):
stream = StringIO()
with redirect_stdout(stream):
m.captured_output("a")
m.raw_output("b")
m.captured_output("c")
m.raw_output("d")
stdout, stderr = capfd.readouterr()
assert stdout == "bd"
assert stream.getvalue() == "ac"
def METHOD_NAME(capsys):
m.captured_dual("a", "b")
stdout, stderr = capsys.readouterr()
assert stdout == "a"
assert stderr == "b"
def test_redirect(capfd):
msg = "Should not be in log!"
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ""
stream = StringIO()
with redirect_stdout(stream):
with m.ostream_redirect():
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stream.getvalue() == msg
stream = StringIO()
with redirect_stdout(stream):
m.raw_output(msg)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stream.getvalue() == ""
def test_redirect_err(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
with redirect_stderr(stream):
with m.ostream_redirect(stdout=False):
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == msg
assert stderr == ""
assert stream.getvalue() == msg2
def test_redirect_both(capfd):
msg = "StdOut"
msg2 = "StdErr"
stream = StringIO()
stream2 = StringIO()
with redirect_stdout(stream):
with redirect_stderr(stream2):
with m.ostream_redirect():
m.raw_output(msg)
m.raw_err(msg2)
stdout, stderr = capfd.readouterr()
assert stdout == ""
assert stderr == ""
assert stream.getvalue() == msg
assert stream2.getvalue() == msg2
def test_threading():
with m.ostream_redirect(stdout=True, stderr=False):
# start some threads
threads = []
# start some threads
for _j in range(20):
threads.append(m.TestThread())
# give the threads some time to fail
threads[0].sleep()
# stop all the threads
for t in threads:
t.stop()
for t in threads:
t.join()
# if a thread segfaults, we don't get here
assert True | null |
5,889 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import pandas as pd
from ....backtest.profit_attribution import get_stock_weight_df
def parse_position(position: dict = None) -> pd.DataFrame:
"""Parse position dict to position DataFrame
:param position: position data
:return: position DataFrame;
.. code-block:: python
position_df = parse_position(positions)
print(position_df.head())
# status: 0-hold, -1-sell, 1-buy
amount cash count price status weight
instrument datetime
SZ000547 2017-01-04 44.154290 211405.285654 1 205.189575 1 0.031255
SZ300202 2017-01-04 60.638845 211405.285654 1 154.356506 1 0.032290
SH600158 2017-01-04 46.531681 211405.285654 1 153.895142 1 0.024704
SH600545 2017-01-04 197.173093 211405.285654 1 48.607037 1 0.033063
SZ000930 2017-01-04 103.938300 211405.285654 1 80.759453 1 0.028958
"""
position_weight_df = get_stock_weight_df(position)
# If the day does not exist, use the last weight
position_weight_df.fillna(method="ffill", inplace=True)
previous_data = {"date": None, "code_list": []}
result_df = pd.DataFrame()
for _trading_date, _value in position.items():
_value = _value.position
# pd_date type: pd.Timestamp
_cash = _value.pop("cash")
for _item in ["now_account_value"]:
if _item in _value:
_value.pop(_item)
_trading_day_df = pd.DataFrame.from_dict(_value, orient="index")
_trading_day_df["weight"] = position_weight_df.loc[_trading_date]
_trading_day_df["cash"] = _cash
_trading_day_df["date"] = _trading_date
# status: 0-hold, -1-sell, 1-buy
_trading_day_df["status"] = 0
# T not exist, T-1 exist, T sell
_cur_day_sell = set(previous_data["code_list"]) - set(_trading_day_df.index)
# T exist, T-1 not exist, T buy
_cur_day_buy = set(_trading_day_df.index) - set(previous_data["code_list"])
# Trading day buy
_trading_day_df.loc[_trading_day_df.index.isin(_cur_day_buy), "status"] = 1
# Trading day sell
if not result_df.empty:
_trading_day_sell_df = result_df.loc[
(result_df["date"] == previous_data["date"]) & (result_df.index.isin(_cur_day_sell))
].copy()
if not _trading_day_sell_df.empty:
_trading_day_sell_df["status"] = -1
_trading_day_sell_df["date"] = _trading_date
_trading_day_df = pd.concat([_trading_day_df, _trading_day_sell_df], sort=False)
result_df = pd.concat([result_df, _trading_day_df], sort=True)
previous_data = dict(
date=_trading_date,
code_list=_trading_day_df[_trading_day_df["status"] != -1].index,
)
result_df.reset_index(inplace=True)
result_df.rename(columns={"date": "datetime", "index": "instrument"}, inplace=True)
return result_df.set_index(["instrument", "datetime"])
def _add_label_to_position(position_df: pd.DataFrame, label_data: pd.DataFrame) -> pd.DataFrame:
"""Concat position with custom label
:param position_df: position DataFrame
:param label_data:
:return: concat result
"""
_start_time = position_df.index.get_level_values(level="datetime").min()
_end_time = position_df.index.get_level_values(level="datetime").max()
label_data = label_data.loc(axis=0)[:, pd.to_datetime(_start_time) :]
_result_df = pd.concat([position_df, label_data], axis=1, sort=True).reindex(label_data.index)
_result_df = _result_df.loc[_result_df.index.get_level_values(1) <= _end_time]
return _result_df
def _add_bench_to_position(position_df: pd.DataFrame = None, bench: pd.Series = None) -> pd.DataFrame:
"""Concat position with bench
:param position_df: position DataFrame
:param bench: report normal data
:return: concat result
"""
_temp_df = position_df.reset_index(level="instrument")
# FIXME: After the stock is bought and sold, the rise and fall of the next trading day are calculated.
_temp_df["bench"] = bench.shift(-1)
res_df = _temp_df.set_index(["instrument", _temp_df.index])
return res_df
def METHOD_NAME(df: pd.DataFrame) -> pd.DataFrame:
"""calculate label rank
:param df:
:return:
"""
_label_name = "label"
def _calculate_day_value(g_df: pd.DataFrame):
g_df = g_df.copy()
g_df["rank_ratio"] = g_df[_label_name].rank(ascending=False) / len(g_df) * 100
# Sell: -1, Hold: 0, Buy: 1
for i in [-1, 0, 1]:
g_df.loc[g_df["status"] == i, "rank_label_mean"] = g_df[g_df["status"] == i]["rank_ratio"].mean()
g_df["excess_return"] = g_df[_label_name] - g_df[_label_name].mean()
return g_df
return df.groupby(level="datetime").apply(_calculate_day_value)
def get_position_data(
position: dict,
label_data: pd.DataFrame,
report_normal: pd.DataFrame = None,
calculate_label_rank=False,
start_date=None,
end_date=None,
) -> pd.DataFrame:
"""Concat position data with pred/report_normal
:param position: position data
:param report_normal: report normal, must be container 'bench' column
:param label_data:
:param calculate_label_rank:
:param start_date: start date
:param end_date: end date
:return: concat result,
columns: ['amount', 'cash', 'count', 'price', 'status', 'weight', 'label',
'rank_ratio', 'rank_label_mean', 'excess_return', 'score', 'bench']
index: ['instrument', 'date']
"""
_position_df = parse_position(position)
# Add custom_label, rank_ratio, rank_mean, and excess_return field
_position_df = _add_label_to_position(_position_df, label_data)
if calculate_label_rank:
_position_df = METHOD_NAME(_position_df)
if report_normal is not None:
# Add bench field
_position_df = _add_bench_to_position(_position_df, report_normal["bench"])
_date_list = _position_df.index.get_level_values(level="datetime")
start_date = _date_list.min() if start_date is None else start_date
end_date = _date_list.max() if end_date is None else end_date
_position_df = _position_df.loc[(start_date <= _date_list) & (_date_list <= end_date)]
return _position_df | null |
5,890 | from collections import defaultdict
from typing import ClassVar, Self
from discord import Message, Thread
from discord.errors import HTTPException
from pydis_core.utils import scheduling
from pydis_core.utils.logging import get_logger
import bot
from bot.constants import Channels
from bot.exts.filtering._filter_context import Event, FilterContext
from bot.exts.filtering._settings_types.settings_entry import ActionEntry
from bot.exts.filtering._utils import FakeContext
from bot.utils.messages import send_attachments
log = get_logger(__name__)
SUPERSTAR_REASON = (
"Your nickname was found to be in violation of our code of conduct. "
"If you believe this is a mistake, please let us know."
)
async def upload_messages_attachments(ctx: FilterContext, messages: list[Message]) -> None:
"""Re-upload the messages' attachments for future logging."""
if not messages:
return
destination = messages[0].guild.get_channel(Channels.attachment_log)
for message in messages:
if message.attachments and message.id not in ctx.uploaded_attachments:
ctx.uploaded_attachments[message.id] = await send_attachments(message, destination, link_large=False)
class RemoveContext(ActionEntry):
"""A setting entry which tells whether to delete the offending message(s)."""
name: ClassVar[str] = "remove_context"
description: ClassVar[str] = (
"A boolean field. If True, the filter being triggered will cause the offending context to be removed. "
"An offending message will be deleted, while an offending nickname will be superstarified."
)
remove_context: bool
async def action(self, ctx: FilterContext) -> None:
"""Remove the offending context."""
if not self.remove_context:
return
if ctx.event in (Event.MESSAGE, Event.MESSAGE_EDIT):
await self._handle_messages(ctx)
elif ctx.event == Event.NICKNAME:
await self._handle_nickname(ctx)
elif ctx.event == Event.THREAD_NAME:
await self._handle_thread(ctx)
@staticmethod
async def _handle_messages(ctx: FilterContext) -> None:
"""Delete any messages involved in this context."""
if not ctx.message or not ctx.message.guild:
return
# If deletion somehow fails at least this will allow scheduling for deletion.
ctx.messages_deletion = True
channel_messages = defaultdict(set) # Duplicates will cause batch deletion to fail.
for message in {ctx.message} | ctx.related_messages:
channel_messages[message.channel].add(message)
success = fail = 0
deleted = list()
for channel, messages in channel_messages.items():
try:
await channel.delete_messages(messages)
except HTTPException:
fail += len(messages)
else:
success += len(messages)
deleted.extend(messages)
scheduling.create_task(upload_messages_attachments(ctx, deleted))
if not fail:
if success == 1:
ctx.action_descriptions.append("deleted")
else:
ctx.action_descriptions.append("deleted all")
elif not success:
if fail == 1:
ctx.action_descriptions.append("failed to delete")
else:
ctx.action_descriptions.append("all failed to delete")
else:
ctx.action_descriptions.append(f"{success} deleted, {fail} failed to delete")
@staticmethod
async def _handle_nickname(ctx: FilterContext) -> None:
"""Apply a superstar infraction to remove the user's nickname."""
alerts_channel = bot.instance.get_channel(Channels.mod_alerts)
if not alerts_channel:
log.error(f"Unable to apply superstar as the context channel {alerts_channel} can't be found.")
return
command = bot.instance.get_command("superstar")
if not command:
user = ctx.author
await alerts_channel.send(f":warning: Could not apply superstar to {user.mention}: command not found.")
log.warning(f":warning: Could not apply superstar to {user.mention}: command not found.")
ctx.action_descriptions.append("failed to superstar")
return
await command(FakeContext(ctx.message, alerts_channel, command), ctx.author, None, reason=SUPERSTAR_REASON)
ctx.action_descriptions.append("superstarred")
@staticmethod
async def _handle_thread(ctx: FilterContext) -> None:
"""Delete the context thread."""
if isinstance(ctx.channel, Thread):
try:
await ctx.channel.delete()
except HTTPException:
ctx.action_descriptions.append("failed to delete thread")
else:
ctx.action_descriptions.append("deleted thread")
def METHOD_NAME(self, other: Self) -> Self:
"""Combines two actions of the same type. Each type of action is executed once per filter."""
return RemoveContext(remove_context=self.remove_context or other.remove_context) | null |
5,891 | """
app.py
Mainloop of the validphys application. Here we define tailoted extensions to
the reporthengine application (such as extra command line flags). Additionally
the *provider modules* that serve as source to the validphys actions are
declared here.
The entry point of the validphys application is the ``main`` funcion of this
module.
"""
import contextlib
import logging
import os
import sys
import lhapdf
from reportengine import app
from validphys import mplstyles, uploadutils
from validphys.config import Config, Environment
providers = [
"validphys.results",
"validphys.commondata",
"validphys.pdfgrids",
"validphys.pdfplots",
"validphys.dataplots",
"validphys.fitdata",
"validphys.arclength",
"validphys.sumrules",
"validphys.reweighting",
"validphys.kinematics",
"validphys.correlations",
"validphys.chi2grids",
"validphys.eff_exponents",
"validphys.asy_exponents",
"validphys.paramfits.dataops",
"validphys.paramfits.plots",
"validphys.theorycovariance.construction",
"validphys.theorycovariance.output",
"validphys.theorycovariance.tests",
"validphys.replica_selector",
"validphys.closuretest",
"validphys.mc_gen",
"validphys.theoryinfo",
"validphys.pseudodata",
"validphys.renametools",
"validphys.covmats",
"validphys.hyperoptplot",
"validphys.deltachi2",
"validphys.n3fit_data",
"validphys.mc2hessian",
"reportengine.report",
"validphys.overfit_metric",
]
log = logging.getLogger(__name__)
class App(app.App):
environment_class = Environment
config_class = Config
critical_message = """A critical error ocurred. This is likely due to one of the following reasons:
- A bug in validphys.
- Corruption of the provided resources (e.g. incorrect plotting files).
- Cosmic rays hitting your CPU and altering the registers.
The traceback above should help determine the cause of the problem. If you
believe this is a bug in validphys (please discard the cosmic rays first),
please open an issue on GitHub<https://github.com/NNPDF/nnpdf/issues>,
including the contents of the following file:
%s
"""
@property
def METHOD_NAME(self):
return os.fspath(mplstyles.smallstyle)
def __init__(self, name="validphys", providers=providers):
super().__init__(name, providers)
@property
def argparser(self):
parser = super().argparser
cout = parser.add_mutually_exclusive_group()
# We want True False or None, so that none defaults to debug or quiet.
# That's why we use store_const
cout.add_argument(
"--cout",
action="store_const",
const=True,
help="display C output. Default depends on log level",
)
cout.add_argument("--no-cout", dest="cout", action="store_const", const=False)
net = parser.add_mutually_exclusive_group()
net.add_argument(
"--net",
action="store_true",
default=True,
help="Enable remote loader. " "Try to download missing resources. This is the default",
)
net.add_argument(
"--no-net",
dest="net",
action="store_false",
help="Disable remote loader. Use only local resources.",
)
parser.add_argument(
"--upload",
action="store_true",
help="Upload the resulting output folder to the Milan server.",
)
return parser
def init(self):
super().init()
cout = self.args["cout"]
if cout is None:
if self.args["loglevel"] <= logging.DEBUG:
cout = True
if not cout:
lhapdf.setVerbosity(0)
@staticmethod
def upload_context(do_upload, output):
"""If do_upload is False, do notihing. Otherwise, on enter, check the
requiements for uploading and on exit,
upload the output path if do_upload is True. Otherwise do nothing.
Raise SystemExit on error."""
if do_upload:
return uploadutils.ReportUploader().upload_or_exit_context(output)
return contextlib.ExitStack()
def run(self):
if sys.version_info < (3, 9):
log.warning(
"validphys 2 is discontinued on Python<3.9 and will "
"not be longer updated. Please run\n"
"conda install python=3.9\n\n"
"If you have any problems, please open an issue "
"on https://github.com/NNPDF/nnpdf/issues."
)
with self.upload_context(self.args["upload"], self.args["output"]):
super().run()
def main():
a = App()
a.main()
if __name__ == "__main__":
main() | null |
5,892 | import datetime
import sys
import time
from typing import Any, List, Tuple
from IPython.display import display
from ipywidgets import HTML, FloatProgress, HBox, VBox # NOQA
from pytorch_pfn_extras.training import extension, trigger # NOQA
from pytorch_pfn_extras.training._manager_protocol import (
ExtensionsManagerProtocol,
)
class ProgressBarNotebook(extension.Extension):
"""An extension to print a progress bar and recent training status.
It is aimed to work on jupyter notebook as replacement of `ProgressBar`.
This extension prints a progress bar at every call. It watches the current
iteration and epoch to print the bar.
Args:
training_length (tuple or None): Length of whole training. It consists
of an integer and either ``'epoch'`` or ``'iteration'``. If this
value is omitted and the stop trigger of the manager is
:class:`IntervalTrigger`, this extension uses its attributes to
determine the length of the training.
update_interval (int): Number of iterations to skip printing the
progress bar.
bar_length (int): This is not used, argument is kept to be consistent
with `ProgressBar`.
out: This is not used, argument is kept to be consistent with
`ProgressBar`.
"""
def __init__(
self,
training_length: Any = None,
update_interval: int = 100,
bar_length: int = 50,
out: Any = sys.stdout,
):
self._training_length = training_length
if training_length is not None:
self.METHOD_NAME()
self._update_interval = update_interval
self._recent_timing: List[Tuple[float, float, float]] = []
self._total_bar = FloatProgress(
description="total", min=0, max=1, value=0, bar_style="info"
)
self._total_html = HTML()
self._epoch_bar = FloatProgress(
description="this epoch", min=0, max=1, value=0, bar_style="info"
)
self._epoch_html = HTML()
self._status_html = HTML()
self._widget = VBox(
[
HBox([self._total_bar, self._total_html]),
HBox([self._epoch_bar, self._epoch_html]),
self._status_html,
]
)
def initialize(self, manager: ExtensionsManagerProtocol) -> None:
if self._training_length is None:
t = manager._stop_trigger
if not isinstance(t, trigger.IntervalTrigger):
raise TypeError(
"cannot retrieve the training length from %s" % type(t)
)
self._training_length = t.period, t.unit
self.METHOD_NAME()
self.update(manager.iteration, manager.epoch_detail)
display(self._widget)
def __call__(self, manager: ExtensionsManagerProtocol) -> None:
length, unit = self._training_length
iteration, epoch_detail = manager.iteration, manager.epoch_detail
if unit == "iteration":
is_finished = iteration == length
else:
is_finished = epoch_detail == length
if iteration % self._update_interval == 0 or is_finished:
self.update(iteration, epoch_detail)
def finalize(self, manager: ExtensionsManagerProtocol) -> None:
if self._total_bar.value != 1:
self._total_bar.bar_style = "warning"
self._epoch_bar.bar_style = "warning"
@property
def widget(self) -> VBox:
return self._widget
def update(self, iteration: int, epoch_detail: float) -> None:
length, unit = self._training_length
recent_timing = self._recent_timing
now = time.time()
recent_timing.append((iteration, epoch_detail, now))
if unit == "iteration":
rate = iteration / length
else:
rate = epoch_detail / length
self._total_bar.value = rate
self._total_html.value = "{:6.2%}".format(rate)
epoch_rate = epoch_detail - int(epoch_detail)
self._epoch_bar.value = epoch_rate
self._epoch_html.value = "{:6.2%}".format(epoch_rate)
status = self._status_template.format(
iteration=iteration, epoch=int(epoch_detail)
)
if rate == 1:
self._total_bar.bar_style = "success"
self._epoch_bar.bar_style = "success"
old_t, old_e, old_sec = recent_timing[0]
span = now - old_sec
if span != 0:
speed_t = (iteration - old_t) / span
speed_e = (epoch_detail - old_e) / span
else:
speed_t = float("inf")
speed_e = float("inf")
if unit == "iteration":
estimated_time = (length - iteration) / speed_t
else:
estimated_time = (length - epoch_detail) / speed_e
estimate = "{:10.5g} iters/sec. Estimated time to finish: {}.".format(
speed_t, datetime.timedelta(seconds=estimated_time)
)
self._status_html.value = status + estimate
if len(recent_timing) > 100:
del recent_timing[0]
def METHOD_NAME(self) -> None:
self._status_template = (
"{iteration:10} iter, {epoch} epoch / %s %ss<br />"
% self._training_length
) | null |
5,893 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AccountIdentity',
'AccountManagedResource',
]
@pulumi.output_type
class AccountIdentity(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "identityIds":
suggest = "identity_ids"
elif key == "principalId":
suggest = "principal_id"
elif key == "tenantId":
suggest = "tenant_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccountIdentity. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccountIdentity.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccountIdentity.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
identity_ids: Optional[Sequence[str]] = None,
METHOD_NAME: Optional[str] = None,
tenant_id: Optional[str] = None):
"""
:param str type: Specifies the type of Managed Service Identity that should be configured on this Purview Account. Possible values are `UserAssigned` and `SystemAssigned`.
:param Sequence[str] identity_ids: Specifies a list of User Assigned Managed Identity IDs to be assigned to this Purview Account.
> **NOTE:** This is required when `type` is set to `UserAssigned`.
:param str principal_id: The Principal ID associated with this Managed Service Identity.
:param str tenant_id: The Tenant ID associated with this Managed Service Identity.
"""
pulumi.set(__self__, "type", type)
if identity_ids is not None:
pulumi.set(__self__, "identity_ids", identity_ids)
if METHOD_NAME is not None:
pulumi.set(__self__, "principal_id", METHOD_NAME)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of Managed Service Identity that should be configured on this Purview Account. Possible values are `UserAssigned` and `SystemAssigned`.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="identityIds")
def identity_ids(self) -> Optional[Sequence[str]]:
"""
Specifies a list of User Assigned Managed Identity IDs to be assigned to this Purview Account.
> **NOTE:** This is required when `type` is set to `UserAssigned`.
"""
return pulumi.get(self, "identity_ids")
@property
@pulumi.getter(name="principalId")
def METHOD_NAME(self) -> Optional[str]:
"""
The Principal ID associated with this Managed Service Identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[str]:
"""
The Tenant ID associated with this Managed Service Identity.
"""
return pulumi.get(self, "tenant_id")
@pulumi.output_type
class AccountManagedResource(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "eventHubNamespaceId":
suggest = "event_hub_namespace_id"
elif key == "resourceGroupId":
suggest = "resource_group_id"
elif key == "storageAccountId":
suggest = "storage_account_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AccountManagedResource. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AccountManagedResource.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AccountManagedResource.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
event_hub_namespace_id: Optional[str] = None,
resource_group_id: Optional[str] = None,
storage_account_id: Optional[str] = None):
"""
:param str event_hub_namespace_id: The ID of the managed event hub namespace.
:param str resource_group_id: The ID of the managed resource group.
:param str storage_account_id: The ID of the managed storage account.
"""
if event_hub_namespace_id is not None:
pulumi.set(__self__, "event_hub_namespace_id", event_hub_namespace_id)
if resource_group_id is not None:
pulumi.set(__self__, "resource_group_id", resource_group_id)
if storage_account_id is not None:
pulumi.set(__self__, "storage_account_id", storage_account_id)
@property
@pulumi.getter(name="eventHubNamespaceId")
def event_hub_namespace_id(self) -> Optional[str]:
"""
The ID of the managed event hub namespace.
"""
return pulumi.get(self, "event_hub_namespace_id")
@property
@pulumi.getter(name="resourceGroupId")
def resource_group_id(self) -> Optional[str]:
"""
The ID of the managed resource group.
"""
return pulumi.get(self, "resource_group_id")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> Optional[str]:
"""
The ID of the managed storage account.
"""
return pulumi.get(self, "storage_account_id")
| null |
5,894 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetSshPublicKeyResult',
'AwaitableGetSshPublicKeyResult',
'get_ssh_public_key',
'get_ssh_public_key_output',
]
@pulumi.output_type
class GetSshPublicKeyResult:
"""
A collection of values returned by getSshPublicKey.
"""
def __init__(__self__, id=None, name=None, public_key=None, resource_group_name=None, tags=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if public_key and not isinstance(public_key, str):
raise TypeError("Expected argument 'public_key' to be a str")
pulumi.set(__self__, "public_key", public_key)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publicKey")
def public_key(self) -> str:
"""
The SSH public key used to authenticate to a virtual machine through ssh.
"""
return pulumi.get(self, "public_key")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
class AwaitableGetSshPublicKeyResult(GetSshPublicKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSshPublicKeyResult(
id=self.id,
name=self.name,
public_key=self.public_key,
resource_group_name=self.resource_group_name,
tags=self.tags)
def get_ssh_public_key(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSshPublicKeyResult:
"""
Use this data source to access information about an existing SSH Public Key.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.compute.get_ssh_public_key(name="existing",
resource_group_name="existing")
pulumi.export("id", example.id)
```
:param str name: The name of this SSH Public Key.
:param str resource_group_name: The name of the Resource Group where the SSH Public Key exists.
:param Mapping[str, str] tags: A mapping of tags which should be assigned to the SSH Public Key.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['tags'] = tags
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:compute/getSshPublicKey:getSshPublicKey', __args__, opts=opts, typ=GetSshPublicKeyResult).value
return AwaitableGetSshPublicKeyResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
public_key=pulumi.get(__ret__, 'public_key'),
resource_group_name=pulumi.get(__ret__, 'resource_group_name'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_ssh_public_key)
def METHOD_NAME(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSshPublicKeyResult]:
"""
Use this data source to access information about an existing SSH Public Key.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.compute.get_ssh_public_key(name="existing",
resource_group_name="existing")
pulumi.export("id", example.id)
```
:param str name: The name of this SSH Public Key.
:param str resource_group_name: The name of the Resource Group where the SSH Public Key exists.
:param Mapping[str, str] tags: A mapping of tags which should be assigned to the SSH Public Key.
"""
... | null |
5,895 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration code for AFL fuzzer."""
import json
import os
import shutil
import subprocess
from fuzzers import utils
def prepare_build_environment():
"""Set environment variables used to build targets for AFL-based
fuzzers."""
cflags = ['-fsanitize-coverage=trace-pc-guard']
utils.append_flags('CFLAGS', cflags)
utils.append_flags('CXXFLAGS', cflags)
os.environ['CC'] = 'clang'
os.environ['CXX'] = 'clang++'
os.environ['FUZZER_LIB'] = '/libAFL.a'
def METHOD_NAME():
"""Build benchmark."""
prepare_build_environment()
utils.build_benchmark()
print('[post_build] Copying afl-fuzz to $OUT directory')
# Copy out the afl-fuzz binary as a build artifact.
shutil.copy('/afl/afl-fuzz', os.environ['OUT'])
def get_stats(output_corpus, fuzzer_log): # pylint: disable=unused-argument
"""Gets fuzzer stats for AFL."""
# Get a dictionary containing the stats AFL reports.
stats_file = os.path.join(output_corpus, 'fuzzer_stats')
with open(stats_file, encoding='utf-8') as file_handle:
stats_file_lines = file_handle.read().splitlines()
stats_file_dict = {}
for stats_line in stats_file_lines:
key, value = stats_line.split(': ')
stats_file_dict[key.strip()] = value.strip()
# Report to FuzzBench the stats it accepts.
stats = {'execs_per_sec': float(stats_file_dict['execs_per_sec'])}
return json.dumps(stats)
def prepare_fuzz_environment(input_corpus):
"""Prepare to fuzz with AFL or another AFL-based fuzzer."""
# Tell AFL to not use its terminal UI so we get usable logs.
os.environ['AFL_NO_UI'] = '1'
# Skip AFL's CPU frequency check (fails on Docker).
os.environ['AFL_SKIP_CPUFREQ'] = '1'
# No need to bind affinity to one core, Docker enforces 1 core usage.
os.environ['AFL_NO_AFFINITY'] = '1'
# AFL will abort on startup if the core pattern sends notifications to
# external programs. We don't care about this.
os.environ['AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES'] = '1'
# Don't exit when crashes are found. This can happen when corpus from
# OSS-Fuzz is used.
os.environ['AFL_SKIP_CRASHES'] = '1'
# Shuffle the queue
os.environ['AFL_SHUFFLE_QUEUE'] = '1'
# AFL needs at least one non-empty seed to start.
utils.create_seed_file_for_empty_corpus(input_corpus)
def check_skip_det_compatible(additional_flags):
""" Checks if additional flags are compatible with '-d' option"""
# AFL refuses to take in '-d' with '-M' or '-S' options for parallel mode.
# (cf. https://github.com/google/AFL/blob/8da80951/afl-fuzz.c#L7477)
if '-M' in additional_flags or '-S' in additional_flags:
return False
return True
def run_afl_fuzz(input_corpus,
output_corpus,
target_binary,
additional_flags=None,
hide_output=False):
"""Run afl-fuzz."""
# Spawn the afl fuzzing process.
print('[run_afl_fuzz] Running target with afl-fuzz')
command = [
'./afl-fuzz',
'-i',
input_corpus,
'-o',
output_corpus,
# Use no memory limit as ASAN doesn't play nicely with one.
'-m',
'none',
'-t',
'1000+', # Use same default 1 sec timeout, but add '+' to skip hangs.
]
# Use '-d' to skip deterministic mode, as long as it it compatible with
# additional flags.
if not additional_flags or check_skip_det_compatible(additional_flags):
command.append('-d')
if additional_flags:
command.extend(additional_flags)
dictionary_path = utils.get_dictionary_path(target_binary)
if dictionary_path:
command.extend(['-x', dictionary_path])
command += [
'--',
target_binary,
# Pass INT_MAX to afl the maximize the number of persistent loops it
# performs.
'2147483647'
]
print('[run_afl_fuzz] Running command: ' + ' '.join(command))
output_stream = subprocess.DEVNULL if hide_output else None
subprocess.check_call(command, stdout=output_stream, stderr=output_stream)
def fuzz(input_corpus, output_corpus, target_binary):
"""Run afl-fuzz on target."""
prepare_fuzz_environment(input_corpus)
run_afl_fuzz(input_corpus, output_corpus, target_binary) | null |
5,896 | import struct
from io import BytesIO
# for the record:
# - int32 = signed varint
# - int64 = signed varint
# - enum = signed varint
# - uint32 = unsigned varint
# - uint64 = unsigned varint
# - sint32 = zigzag signed varint
# - sint64 = zigzag signed varint
# https://developers.google.com/protocol-buffers/docs/encoding
# 0 Varint int32, int64, uint32, uint64, sint32, sint64, bool, enum
# 1 64-bit fixed64, sfixed64, double
# 2 Length-delimited string, bytes, embedded messages, packed repeated fields
# 3 Start group groups (deprecated)
# 4 End group groups (deprecated)
# 5 32-bit fixed32, sfixed32, float
class PrimativeType:
@staticmethod
def decode(data):
raise NotImplementedError()
class type_double(PrimativeType):
wire_type = 1
@staticmethod
def decode(data):
# data = 64-bit
val, = struct.unpack("<d", data)
return val
class type_float(PrimativeType):
wire_type = 5
@staticmethod
def decode(data):
# data = 32-bit
val, = struct.unpack("<f", data)
return val
class type_int32(PrimativeType):
wire_type = 0
@staticmethod
def decode(data):
# data = signed varint
val = Message.signed_to_long(data, 32)
return int(val)
class type_int64(PrimativeType):
wire_type = 0
@staticmethod
def decode(data):
# data = signed varint
val = Message.signed_to_long(data, 64)
return int(val)
class type_uint32(PrimativeType):
wire_type = 0
@staticmethod
def decode(data):
# data = unsigned varint
return int(data)
class type_uint64(PrimativeType):
wire_type = 0
@staticmethod
def decode(data):
# data = unsigned varint
return int(data)
class type_sint32(PrimativeType):
wire_type = 0
@staticmethod
def decode(data):
# data = zigzag signed varint
val = Message.signed_to_long(data, 32)
val = Message.zigzag_to_long(val)
return int(val)
class type_sint64(PrimativeType):
wire_type = 0
@staticmethod
def decode(data):
# data = zigzag signed varint
val = Message.signed_to_long(data, 64)
val = Message.zigzag_to_long(val)
return int(val)
class type_fixed32(PrimativeType):
wire_type = 5
@staticmethod
def decode(data):
# data = 32-bit
val, = struct.unpack("<I", data)
return int(val)
class type_fixed64(PrimativeType):
wire_type = 1
@staticmethod
def decode(data):
# data = 64-bit
val, = struct.unpack("<Q", data)
return int(val)
class type_sfixed32(PrimativeType):
wire_type = 5
@staticmethod
def decode(data):
# data = 32-bit
val, = struct.unpack("<i", data)
return int(val)
class type_sfixed64(PrimativeType):
wire_type = 1
@staticmethod
def decode(data):
# data = 64-bit
val, = struct.unpack("<q", data)
return int(val)
class type_bool(PrimativeType):
wire_type = 0
@staticmethod
def decode(data):
# data = signed varint
return data != 0
class type_string(PrimativeType):
wire_type = 2
@staticmethod
def decode(data):
# data = binary string
return data
class type_bytes(PrimativeType):
wire_type = 2
@staticmethod
def decode(data):
# data = binary string
return data
# probs best to go with int64
type_enum = type_int64
class Message:
@staticmethod
def read_varint(stream):
res = 0
i = 0
while 1:
c = ord(stream.read(1))
res |= ((c & 127) << (i * 7))
if c & 128 == 0:
break
i += 1
return res
@staticmethod
def signed_to_long(x, bits):
" converts a previously read signed varint into a long "
if x > 0x7fffffffffffffff:
x -= (1 << 64)
x |= ~((1 << bits) - 1)
else:
x &= (1 << bits) - 1
return x
# zigzag conversion from google
# https://github.com/google/protobuf/blob/master/python/google/protobuf/internal/wire_format.py
@staticmethod
def zigzag_to_long(x):
if not x & 0x1:
return x >> 1
return (x >> 1) ^ (~0)
@staticmethod
def read_tag(stream):
var = Message.read_varint(stream)
field_number = var >> 3
wire_type = var & 7
if wire_type == 0:
data = Message.read_varint(stream)
elif wire_type == 1:
data = stream.read(8)
elif wire_type == 2:
length = Message.read_varint(stream)
data = stream.read(length)
elif wire_type in (3, 4):
raise NotImplementedError("groups are deprecated")
elif wire_type == 5:
data = stream.read(4)
else:
raise TypeError("unknown wire type (%d)" % wire_type)
return (field_number, wire_type, data)
def METHOD_NAME(self, _id):
for _, i in enumerate(self.__lookup__):
if i[3] == _id:
return i
def decode(self, s: bytes):
f = BytesIO(s)
while f.tell() < len(s):
field_number, _, data = self.read_tag(f)
field = self.METHOD_NAME(field_number)
if not field:
continue
field_multiplicity, field_type, field_name, _ = field
if issubclass(field_type, PrimativeType):
value = field_type.decode(data)
elif issubclass(field_type, Message):
value = field_type()
value.decode(data)
else:
raise TypeError("field type must be a subclass of PrimativeType or Message")
if field_multiplicity == "repeated":
if getattr(self, field_name) is None:
# if not isinstance(getattr(self, field_name), list):
# ? what if the attribute was already filled with data ?
setattr(self, field_name, [])
getattr(self, field_name).append(value)
else:
setattr(self, field_name, value)
def __lookup__(self):
return | null |
5,897 | # SPDX-FileCopyrightText: Copyright DB Netz AG and the capellambse contributors
# SPDX-License-Identifier: Apache-2.0
# pylint: disable=abstract-method, useless-suppression
# For some reason, pylint in Github CI didn't get the memo that these aren't
# actually abstract methods. Other pylint installations seem to agree that
# implementing these methods isn't necessary. So we just ignore the warning
# about that here.
# TODO Revisit this decision some time in the future
from __future__ import annotations
import collections.abc as cabc
import itertools
import os
import pathlib
import re
import typing as t
import urllib.parse
import requests
from capellambse import helpers, loader
from . import abc
class DownloadStream(t.BinaryIO):
__stream: cabc.Iterator[bytes]
__buffer: memoryview
def __init__(
self, session: requests.Session, url: str, chunk_size: int = 1024**2
) -> None:
self.url = url
self.chunk_size = chunk_size
response = session.get(self.url, stream=True)
if response.status_code == 404:
raise FileNotFoundError(url)
response.raise_for_status()
self.__stream = response.iter_content(
self.chunk_size, decode_unicode=False
)
self.__buffer = memoryview(b"")
def __enter__(self) -> DownloadStream:
return self
def __exit__(self, *args: t.Any) -> None:
self.close()
def read(self, n: int = -1) -> bytes:
if n == -1:
return b"".join(itertools.chain((self.__buffer,), self.__stream))
if not self.__buffer:
try:
self.__buffer = memoryview(next(self.__stream))
except StopIteration:
return b""
chunk = bytes(self.__buffer[:n])
self.__buffer = self.__buffer[n:]
return chunk
def readable(self) -> bool:
return True
def METHOD_NAME(self, s: bytes | bytearray) -> int: # type: ignore[override]
raise TypeError("Cannot write to a read-only stream")
def writable(self) -> bool:
return False
def close(self) -> None:
del self.__stream
del self.__buffer
class HTTPFileHandler(abc.FileHandler):
"""A remote file handler that fetches files using HTTP GET."""
def __init__(
self,
path: str | os.PathLike,
username: str | None = None,
password: str | None = None,
*,
headers: (
dict[str, str]
| requests.structures.CaseInsensitiveDict[str]
| None
) = None,
subdir: str | pathlib.PurePosixPath = "/",
) -> None:
"""Connect to a remote server through HTTP or HTTPS.
This file handler supports three ways of specifying a URL:
1. If a plain URL is passed, the requested file name is appended
after a forward slash ``/``.
2. If the URL contains ``%s``, it will be replaced by the
requested file name, instead of appending it at the end. This
allows for example to pass query parameters after the file
name. File names are percent-escaped as implemented by
``urllib.parse.quote``.
3. The sequence ``%q`` is replaced similar to ``%s``, but the
forward slash ``/`` is not considered a safe character and is
percent-escaped as well.
Examples: When requesting the file name ``demo/my model.aird``,
...
- ``https://example.com/~user`` as ``path`` results in the URL
``https://example.com/~user/demo/my%20model.aird``
- ``https://example.com/~user/%s`` results in
``https://example.com/~user/demo/my%20model.aird``
- ``https://example.com/?file=%q`` results in
``https://example.com/?file=demo%2Fmy%20model.aird``
Note that the file name that is inserted into the URL will never
start with a forward slash. This means that a URL like
``https://example.com%s`` will not work; you need to hard-code
the slash at the appropriate place.
This also applies to the ``%q`` escape. If the server expects
the file name argument to start with a slash, hard-code a
percent-escaped slash in the URL. For example, instead of
``...?file=%q`` use ``...?file=%2F%q``.
Parameters
----------
path
The base URL to fetch files from. Must start with
``http://`` or ``https://``. See above for how to specify
complex URLs.
username
The username for HTTP Basic Auth.
password
The password for HTTP Basic Auth.
headers
Additional HTTP headers to send to the server.
subdir
Prepend this path to all requested files. It is subject to
the same file name escaping rules explained above.
"""
if not isinstance(path, str):
raise TypeError(
"HTTPFileHandler requires a str path, not"
f" {type(path).__name__}"
)
if bool(username) != bool(password):
raise ValueError(
"Either both username and password must be given, or neither"
)
if "%s" not in path and "%q" not in path:
path = path.rstrip("/") + "/%s"
super().__init__(path, subdir=subdir)
self.session = requests.Session()
self.session.headers.update(headers or {})
if username and password:
self.session.auth = (username, password)
def get_model_info(self) -> loader.ModelInfo:
assert isinstance(self.path, str)
parts = urllib.parse.urlparse(self.path)
return loader.ModelInfo(
title=parts.path.rsplit("/", maxsplit=1)[-1],
url=self.path,
)
def open(
self,
filename: str | pathlib.PurePosixPath,
mode: t.Literal["r", "rb", "w", "wb"] = "rb",
) -> t.BinaryIO:
if "w" in mode:
raise NotImplementedError("Cannot upload to HTTP(S) locations")
assert isinstance(self.path, str)
fname = self.subdir / helpers.normalize_pure_path(filename)
fname_str = str(fname).lstrip("/")
replace = {
"%s": urllib.parse.quote(fname_str, safe="/"),
"%q": urllib.parse.quote(fname_str, safe=""),
}
url = re.sub("%[sq]", lambda m: replace[m.group(0)], self.path)
assert url != self.path
return DownloadStream( # type: ignore[abstract] # false-positive
self.session, url
)
def write_transaction(self, **kw: t.Any) -> t.NoReturn:
raise NotImplementedError(
"Write transactions for HTTP(S) are not implemented"
) | null |
5,898 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest, os
from mantid import AnalysisDataServiceImpl, config, simpleapi
class DakotaChiSquaredTest(unittest.TestCase):
def makeFiles(self):
simpleapi.CreateWorkspace(OutputWorkspace="data", DataX="1,2,3,4,5", DataY="1,0,1,4,4", DataE="1,0,1,2,2")
simpleapi.CreateWorkspace(OutputWorkspace="sim", DataX="1,2,3,4,5", DataY="1,1,1,1,1", DataE="0,0,0,0,0")
simpleapi.CreateWorkspace(OutputWorkspace="simwrong", DataX="1,2,3,4", DataY="1,1,1,1", DataE="0,0,0,0")
self.datafile = os.path.join(config.getString("defaultsave.directory"), "DakotaChiSquared_data.nxs")
self.simfile = os.path.join(config.getString("defaultsave.directory"), "DakotaChiSquared_sim.nxs")
self.simwrongfile = os.path.join(config.getString("defaultsave.directory"), "DakotaChiSquared_simwrong.nxs")
self.chifile = os.path.join(config.getString("defaultsave.directory"), "DakotaChiSquared_chi.txt")
simpleapi.SaveNexus("data", self.datafile)
simpleapi.SaveNexus("sim", self.simfile)
simpleapi.SaveNexus("simwrong", self.simwrongfile)
ads = AnalysisDataServiceImpl.Instance()
ads.remove("data")
ads.remove("sim")
ads.remove("simwrong")
def cleanup(self):
if os.path.exists(self.datafile):
os.remove(self.datafile)
if os.path.exists(self.simfile):
os.remove(self.simfile)
if os.path.exists(self.simwrongfile):
os.remove(self.simwrongfile)
if os.path.exists(self.chifile):
os.remove(self.chifile)
def test_wrongType(self):
self.makeFiles()
try:
simpleapi.DakotaChiSquared(self.datafile, "CNCS_7860_event.nxs", self.chifile)
except RuntimeError as e:
self.assertNotEqual(str(e).find("Wrong workspace type for calculated file"), -1)
except:
assert False, "Raised the wrong exception type"
else:
assert False, "Didn't raise any exception"
try:
simpleapi.DakotaChiSquared("CNCS_7860_event.nxs", self.simfile, self.chifile)
except RuntimeError as e:
self.assertNotEqual(str(e).find("Wrong workspace type for data file"), -1)
except:
assert False, "Raised the wrong exception type"
else:
assert False, "Didn't raise any exception"
self.cleanup()
def test_wrongSize(self):
self.makeFiles()
try:
simpleapi.DakotaChiSquared(self.datafile, self.simwrongfile, self.chifile)
except RuntimeError as e:
self.assertNotEqual(str(e).find("The file sizes are different"), -1)
except:
assert False, "Raised the wrong exception type"
else:
assert False, "Didn't raise any exception"
self.cleanup()
def METHOD_NAME(self):
self.makeFiles()
try:
simpleapi.DakotaChiSquared(self.datafile, self.simfile, self.chifile)
f = open(self.chifile, "r")
chistr = f.read()
self.assertEqual(chistr, "4.5 obj_fn\n")
f.close()
except:
assert False, "Raised an exception"
self.cleanup()
def test_output(self):
self.makeFiles()
try:
alg = simpleapi.DakotaChiSquared(self.datafile, self.simfile, self.chifile)
self.assertEqual(len(alg), 2)
self.assertEqual(alg[0], 4.5)
self.assertEqual(alg[1].name(), "alg")
self.assertEqual(alg[1].blocksize(), 5)
self.assertEqual(alg[1].getNumberHistograms(), 1)
self.assertEqual(alg[1].dataY(0)[3], 1.5)
ads = AnalysisDataServiceImpl.Instance()
ads.remove("alg")
alg1 = simpleapi.DakotaChiSquared(self.datafile, self.simfile, self.chifile, ResidualsWorkspace="res")
self.assertEqual(alg1[0], 4.5)
self.assertEqual(alg1[1].name(), "res")
ads.remove("res")
except:
assert False, "Raised an exception"
self.cleanup()
if __name__ == "__main__":
unittest.main() | null |
5,899 | """
test_pythonmakereplica.py
Module for testing the python implementation of make replica
"""
from copy import deepcopy
import numpy as np
from pandas.testing import assert_frame_equal, assert_series_equal
import pytest
from validphys.api import API
from validphys.tests.conftest import DATA
from validphys.tests.test_covmats import CORR_DATA
SEED = 123456
#Datasets to be tested
SINGLE_SYS_DATASETS = [
{"dataset": "DYE886R"},
{"dataset": "D0ZRAP", "cfac": ["QCD"]},
{"dataset": "NMC"},
{"dataset": "NMCPD"},
{"dataset": "ATLASZPT8TEVMDIST", "cfac": ["QCD"]},
{"dataset": "ATLASWZRAP36PB"},
{"dataset": "ATLASZHIGHMASS49FB"},
{"dataset": "CMSWEASY840PB"},
{"dataset": "CMSWMASY47FB"}
]
@pytest.mark.parametrize("use_cuts", ["nocuts", "internal"])
@pytest.mark.parametrize("dataset_inputs", [DATA, CORR_DATA, SINGLE_SYS_DATASETS])
def test_commondata_unchanged(data_config, dataset_inputs, use_cuts):
"""Load the commondata, then generate some pseudodata using make replica
Check that the following attributes of the commondata have not been
modified: central_values, commondata_table, systematics_table.
"""
config = dict(data_config)
config["dataset_inputs"] = dataset_inputs
config["use_cuts"] = use_cuts
config["replica_mcseed"] = SEED
ld_cds = API.dataset_inputs_loaded_cd_with_cuts(**config)
# keep a copy of all dataframes/series pre make replica
pre_mkrep_cvs = [deepcopy(cd.central_values) for cd in ld_cds]
pre_mkrep_sys_tabs = [deepcopy(cd.systematics_table) for cd in ld_cds]
pre_mkrep_cd_tabs = [deepcopy(cd.commondata_table) for cd in ld_cds]
API.make_replica(**config)
for post_mkrep_cd, pre_mkrep_cv in zip(ld_cds, pre_mkrep_cvs):
assert_series_equal(post_mkrep_cd.central_values, pre_mkrep_cv)
for post_mkrep_cd, pre_mkrep_sys_tab in zip(ld_cds, pre_mkrep_sys_tabs):
assert_frame_equal(post_mkrep_cd.systematics_table, pre_mkrep_sys_tab)
for post_mkrep_cd, pre_mkrep_cd_tab in zip(ld_cds, pre_mkrep_cd_tabs):
assert_frame_equal(post_mkrep_cd.commondata_table, pre_mkrep_cd_tab)
@pytest.mark.parametrize("use_cuts", ["nocuts", "internal"])
@pytest.mark.parametrize("dataset_inputs", [DATA, CORR_DATA, SINGLE_SYS_DATASETS])
def test_pseudodata_seeding(data_config, dataset_inputs, use_cuts):
"""Check that using a seed reproduces the pseudodata. Note that this also
will check that the commondata hasn't been modified since reproducing
the same commondata requires that the commondata is unchanged and that
the random numbers are generated and used identically.
"""
config = dict(data_config)
config["dataset_inputs"] = dataset_inputs
config["use_cuts"] = use_cuts
config["replica_mcseed"] = SEED
rep_1 = API.make_replica(**config)
rep_2 = API.make_replica(**config)
np.testing.assert_allclose(rep_1, rep_2)
@pytest.mark.parametrize("use_cuts", ["nocuts", "internal"])
@pytest.mark.parametrize("dataset_inputs", [DATA, CORR_DATA, SINGLE_SYS_DATASETS])
def test_pseudodata_has_correct_ndata(data_config, dataset_inputs, use_cuts):
"""Check that we get the correct ndata when generating pseudodata"""
config = dict(data_config)
config["dataset_inputs"] = dataset_inputs
config["use_cuts"] = use_cuts
config["replica_mcseed"] = SEED
ld_cds = API.dataset_inputs_loaded_cd_with_cuts(**config)
rep = API.make_replica(**config)
ndata = np.sum([cd.ndata for cd in ld_cds])
assert len(rep) == ndata
@pytest.mark.parametrize("use_cuts", ["nocuts", "internal"])
@pytest.mark.parametrize("dataset_inputs", [DATA, CORR_DATA, SINGLE_SYS_DATASETS])
def METHOD_NAME(data_config, dataset_inputs, use_cuts):
"""Check that when genrep is set to False replicas are not generated."""
config = dict(data_config)
config["dataset_inputs"] = dataset_inputs
config["use_cuts"] = use_cuts
config["replica_mcseed"] = SEED
config["genrep"] = False
ld_cds = API.dataset_inputs_loaded_cd_with_cuts(**config)
not_replica = API.make_replica(**config)
central_data = np.concatenate([d.central_values for d in ld_cds])
np.testing.assert_allclose(not_replica, central_data) | null |