id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
5,600 | # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Extract named resources from the cylc.flow package."""
from pathlib import Path
from random import shuffle
import shutil
import sys
from typing import Optional
import cylc.flow
from cylc.flow import LOG
from cylc.flow.cfgspec.glbl_cfg import glbl_cfg
from cylc.flow.exceptions import InputError
from cylc.flow.wallclock import get_current_time_string
RESOURCE_DIR = Path(cylc.flow.__file__).parent / 'etc'
TUTORIAL_DIR = RESOURCE_DIR / 'tutorial'
# {resource: brief description}
RESOURCE_NAMES = {
'syntax/cylc-mode.el': 'Emacs syntax highlighting.',
'syntax/cylc.lang': 'Gedit (gtksourceview) syntax highlighting.',
'syntax/cylc.vim': 'Vim syntax highlighting.',
'syntax/cylc.xml': 'Kate syntax highlighting.',
'cylc-completion.bash': 'Bash auto-completion for Cylc commands.',
'cylc': 'Cylc wrapper script.',
}
API_KEY = 'api-key'
def METHOD_NAME(write=print, headers=True):
"""Print resource names to stdout."""
tutorials = [
path.relative_to(RESOURCE_DIR)
for path in TUTORIAL_DIR.iterdir()
if path.is_dir()
]
if headers:
write('Resources:')
max_len = max(len(res) for res in RESOURCE_NAMES)
for resource, desc in RESOURCE_NAMES.items():
write(f' {resource} {" " * (max_len - len(resource))} # {desc}')
if headers:
write('\nTutorials:')
for tutorial in tutorials:
write(f' {tutorial}')
write(f' {API_KEY}')
def path_is_tutorial(src: Path) -> bool:
"""Returns True if the src path is in the tutorial directory."""
try:
src.relative_to(TUTORIAL_DIR)
except ValueError:
return False
return True
def get_resources(resource: str, tgt_dir: Optional[str]):
"""Extract cylc.flow resources and write them to a target directory.
Arguments:
resource: path relative to RESOURCE_DIR.
target_dir: Where to put extracted resources, created if necessary.
"""
# get the resource path
resource_path = Path(resource)
if resource in ('api-key', 'tutorial/api-key'):
print(get_api_key())
return
src = RESOURCE_DIR / resource_path
if not src.exists():
raise InputError(
f'No such resources {resource}.'
'\nRun `cylc get-resources --list` for resource names.'
)
is_tutorial = path_is_tutorial(src)
# get the target path
if not tgt_dir:
if is_tutorial:
# this is a tutorial => use the primary source dir
_tgt_dir = Path(glbl_cfg().get(['install', 'source dirs'])[0])
else:
# this is a regular resource => use $PWD
_tgt_dir = Path.cwd()
else:
_tgt_dir = Path(tgt_dir).resolve()
tgt = _tgt_dir / resource_path.name
tgt = tgt.expanduser()
tgt = tgt.resolve()
# extract resources
extract_resource(src, tgt, is_tutorial)
if is_tutorial:
set_api_key(tgt)
def _backup(tgt: Path) -> None:
"""Make a timestamped backup of a dir or file."""
tstamp = get_current_time_string(use_basic_format=True)
backup = Path(tgt).parent / (tgt.name + f'.{tstamp}')
LOG.warning(
'Replacing an existing cylc-tutorials folder which will'
f' be copied to {backup}'
)
# NOTE: shutil interfaces don't fully support Path objects at all
# python versions
shutil.move(str(tgt), str(backup))
def extract_resource(src: Path, tgt: Path, is_tutorial: bool = False) -> None:
"""Extract src into tgt.
NOTE: src can be a dir or a file.
"""
LOG.info(f"Extracting {src.relative_to(RESOURCE_DIR)} to {tgt}")
if is_tutorial and tgt.exists():
# target exists, back up the old copy
_backup(tgt)
# create the target directory
try:
tgt.parent.mkdir(parents=True, exist_ok=True)
# NOTE: shutil interfaces don't fully support Path objects at all
# python versions
if src.is_dir():
shutil.copytree(str(src), str(tgt))
else:
shutil.copyfile(str(src), str(tgt))
except IsADirectoryError as exc:
LOG.error(
f'Cannot extract file {exc.filename} as there is an '
'existing directory with the same name'
)
sys.exit(1)
except FileExistsError as exc:
LOG.error(
f'Cannot extract directory {exc.filename} as there is an '
'existing file with the same name'
)
sys.exit(1)
def get_api_key() -> str:
"""Return a DataPoint API key for tutorial use.
Picks an API key from the file "api-keys" at random so as to spread the
load over a larger number of keys to prevent hitting the cap with group
sessions.
"""
keys = []
with open((TUTORIAL_DIR / 'api-keys'), 'r') as api_keys:
for api_key in api_keys:
keys.append(api_key)
shuffle(keys)
return keys[0].strip()
def set_api_key(tgt):
"""Replace a placeholder with a real API key.
Replaces the placeholder DATAPOINT_API_KEY with a value chosen at random
from the file api-keys chosen.
"""
# get the api key
api_key = get_api_key()
# go through all the top level files
for path in tgt.glob('*'):
if not path.is_dir():
# write the file out one line at a time to a temp file
tmp_path = path.parent / (path.name + '.tmp')
with open(path, 'rb') as _src, open(tmp_path, 'wb+') as _tmp:
# NOTE: open the file in bytes mode for safety
# (prevents decode errors surfacing here)
for line in _src:
_tmp.write(
# perform the replacement line by line
# (some things are easier with sed!)
line.replace(
b'DATAPOINT_API_KEY',
api_key.encode(),
)
)
# then move the tmpfile over the original
# NOTE: shutil interfaces don't fully support Path objects at all
# python versions
path.unlink()
shutil.move(str(tmp_path), str(path)) | null |
5,601 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2022 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.simpleapi import (
logger,
AverageLogData,
CreateEmptyTableWorkspace,
GroupWorkspaces,
DeleteWorkspace,
DeleteTableRows,
RenameWorkspace,
)
from mantidqtinterfaces.Engineering.gui.engineering_diffraction.settings.settings_helper import get_setting
from mantidqtinterfaces.Engineering.gui.engineering_diffraction.tabs.common import output_settings
from mantid.api import AnalysisDataService as ADS
from os import path
from numpy import full, nan, max
def write_table_row(ws_table, row, irow):
if irow > ws_table.rowCount() - 1:
ws_table.setRowCount(irow + 1)
[ws_table.setCell(irow, icol, row[icol]) for icol in range(0, len(row))]
def _generate_workspace_name(filepath: str, suffix: str) -> str:
wsname = path.splitext(path.split(filepath)[1])[0] + suffix
return wsname
class SampleLogsGroupWorkspace(object):
def __init__(self, suffix: str):
self._log_names = []
self._log_workspaces = None # GroupWorkspace
self._log_values = dict() # {ws_name: {log_name: [avg, er]} }
self._suffix = suffix
self._run_info_name = "run_info" + self._suffix
def create_log_workspace_group(self):
# run information table
run_info = self.make_runinfo_table()
self._log_workspaces = GroupWorkspaces([run_info], OutputWorkspace="logs" + self._suffix)
# a table per logs
logs = get_setting(output_settings.INTERFACES_SETTINGS_GROUP, output_settings.ENGINEERING_PREFIX, "logs")
if logs:
self._log_names = logs.split(",")
for log in self._log_names:
log_table_ws = self.make_log_table(log)
self._log_workspaces.add(log_table_ws.name())
def make_log_table(self, log):
ws_log = CreateEmptyTableWorkspace(OutputWorkspace=log + self._suffix)
ws_log.addColumn(type="float", name="avg")
ws_log.addColumn(type="float", name="stdev")
return ws_log
def make_runinfo_table(self):
run_info = CreateEmptyTableWorkspace(OutputWorkspace=self._run_info_name)
run_info.addColumn(type="str", name="Instrument")
run_info.addColumn(type="int", name="Run")
run_info.addColumn(type="str", name="Bank")
run_info.addColumn(type="float", name="uAmps")
run_info.addColumn(type="str", name="Title")
return run_info
def update_log_workspace_group(self, data_workspaces=None):
# both ws and name needed in event a ws is renamed and ws.name() is no longer correct
if not data_workspaces:
self.delete_logs()
return
if not self._log_workspaces:
self.create_log_workspace_group()
else:
for log in self._log_names:
if not ADS.doesExist(log + self._suffix):
log_table_ws = self.make_log_table(log)
self._log_workspaces.add(log_table_ws.name())
if not ADS.doesExist(self._run_info_name):
self.make_runinfo_table()
self._log_workspaces.add(self._run_info_name)
# update log tables
self.METHOD_NAME()
for irow, (ws_name, ws) in enumerate(data_workspaces.get_loaded_ws_dict().items()):
try:
self.add_log_to_table(ws_name, ws, irow)
except Exception as e:
logger.warning(f"Unable to output log workspaces for workspace {ws_name}: " + str(e))
def add_log_to_table(self, ws_name, ws, irow):
# both ws and name needed in event a ws is renamed and ws.name() is no longer correct
# make dict for run if doesn't exist
if ws_name not in self._log_values:
self._log_values[ws_name] = dict()
# add run info
run = ws.getRun()
row = [
ws.getInstrument().getFullName(),
ws.getRunNumber(),
str(run.getProperty("bankid").value),
run.getProtonCharge(),
ws.getTitle(),
]
write_table_row(ADS.retrieve(self._run_info_name), row, irow)
# add log data - loop over existing log workspaces not logs in settings as these might have changed
currentRunLogs = [l.name for l in run.getLogData()]
nullLogValue = full(2, nan) # default nan if can't read/average log data
if run.getProtonCharge() > 0 and "proton_charge" in currentRunLogs:
for log in self._log_names:
if log in self._log_values[ws_name]:
avg, stdev = self._log_values[ws_name][log] # already averaged
elif log in currentRunLogs:
avg, stdev = AverageLogData(ws_name, LogName=log, FixZero=False)
else:
avg, stdev = nullLogValue
self._log_values[ws_name][log] = [avg, stdev] # update model dict (even if nan)
else:
self._log_values[ws_name] = {log: nullLogValue for log in self._log_names}
logger.warning(f"{ws.name()} does not contain a proton charge log - log values cannot be averaged.")
# write log values to table (nan if log could not be averaged)
for log, avg_and_stdev in self._log_values[ws_name].items():
write_table_row(ADS.retrieve(log + self._suffix), avg_and_stdev, irow)
self.update_log_group_name()
def remove_log_rows(self, row_numbers):
DeleteTableRows(TableWorkspace=self._log_workspaces, Rows=list(row_numbers))
self.update_log_group_name()
def METHOD_NAME(self):
for ws in self._log_workspaces:
ws.setRowCount(0)
def delete_logs(self):
if self._log_workspaces:
ws_name = self._log_workspaces.name()
self._log_workspaces = None
DeleteWorkspace(ws_name)
def update_log_group_name(self):
name = self._generate_log_group_name()
if not name:
self.delete_logs()
return
RenameWorkspace(InputWorkspace=self._log_workspaces.name(), OutputWorkspace=name)
def _generate_log_group_name(self) -> str:
run_info = ADS.retrieve(self._run_info_name)
if run_info.rowCount() > 0:
runs = run_info.column("Run")
name = f"{run_info.row(0)['Instrument']}_{min(runs)}-{max(runs)}_logs"
return name + self._suffix
return ""
def get_log_values(self):
return self._log_values
def get_log_workspaces(self):
return self._log_workspaces
def update_log_value(self, new_key, old_key):
self._log_values[new_key] = self._log_values.pop(old_key)
def clear_log_workspaces(self):
self._log_workspaces = None | null |
5,602 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore.ops as P
from mindspore import Tensor
from mindspore.ops import functional as F
from mindspore.ops.functional import vmap
from mindspore.common.api import jit
class NetDiag(nn.Cell):
def __init__(self):
super(NetDiag, self).__init__()
self.diag = P.Diag()
def construct(self, x):
return self.diag(x)
class NetDiagWithDynamicShape(nn.Cell):
def __init__(self):
super(NetDiagWithDynamicShape, self).__init__()
self.diag = P.Diag()
self.unique = P.Unique()
def construct(self, x):
x, _ = self.unique(x)
return self.diag(x)
def diag_1d(dtype):
for mode in [context.PYNATIVE_MODE, context.GRAPH_MODE]:
context.set_context(mode=mode, device_target="GPU")
x = Tensor(np.array([1, 2, 5]).astype(dtype))
diag_1d_net = NetDiag()
output = diag_1d_net(x)
expect = np.array([[1, 0, 0],
[0, 2, 0],
[0, 0, 5]]).astype(dtype)
assert (output.asnumpy() == expect).all()
def diag_2d(dtype):
for mode in [context.PYNATIVE_MODE, context.GRAPH_MODE]:
context.set_context(mode=mode, device_target="GPU")
x = Tensor(np.array([[1, 2, 3],
[4, 5, 6]]).astype(dtype))
diag_2d_net = NetDiag()
output = diag_2d_net(x)
expect = np.array([[[[1, 0, 0],
[0, 0, 0]],
[[0, 2, 0],
[0, 0, 0]],
[[0, 0, 3],
[0, 0, 0]]],
[[[0, 0, 0],
[4, 0, 0]],
[[0, 0, 0],
[0, 5, 0]],
[[0, 0, 0],
[0, 0, 6]]]]).astype(dtype)
assert (output.asnumpy() == expect).all()
def diag_with_dynamic_shape(dtype):
for mode in [context.PYNATIVE_MODE, context.GRAPH_MODE]:
context.set_context(mode=mode, device_target="GPU")
x = Tensor(np.array([1, 2, 5, 5, 2, 1]).astype(dtype))
diag_with_dynamic_shape_net = NetDiagWithDynamicShape()
output = diag_with_dynamic_shape_net(x)
expect = np.array([[1, 0, 0],
[0, 2, 0],
[0, 0, 5]]).astype(dtype)
assert (output.asnumpy() == expect).all()
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_diag_1d_float16():
"""
Feature: Diag op.
Description: Test diag op with 1d and float16.
Expectation: The value and shape of output are the expected values.
"""
diag_1d(np.float16)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_diag_1d_float32():
"""
Feature: Diag op.
Description: Test diag op with 1d and float32.
Expectation: The value and shape of output are the expected values.
"""
diag_1d(np.float32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def METHOD_NAME():
"""
Feature: Diag op.
Description: Test diag op with 2d and int32.
Expectation: The value and shape of output are the expected values.
"""
diag_2d(np.int32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_diag_2d_int64():
"""
Feature: Diag op.
Description: Test diag op with 2d and int64.
Expectation: The value and shape of output are the expected values.
"""
diag_2d(np.int64)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_diag_with_dynamic_shape():
"""
Feature: Diag op with dynamic shape.
Description: Test diag op with unique.
Expectation: The value and shape of output are the expected values.
"""
diag_with_dynamic_shape(np.float32)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_diag_functional():
"""
Feature: Diag op with functional interface.
Description: Test diag op with functional interface.
Expectation: The value and shape of output are the expected values.
"""
context.set_context(device_target="GPU")
x = Tensor(np.array([1, 2, 5]).astype(np.float64))
output = P.diag(x)
expect = np.array([[1, 0, 0],
[0, 2, 0],
[0, 0, 5]]).astype(np.float64)
assert (output.asnumpy() == expect).all()
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_diag_tensor():
"""
Feature: Diag op with tensor interface.
Description: Test diag op with tensor interface.
Expectation: The value and shape of output are the expected values.
"""
context.set_context(device_target="GPU")
x = Tensor(np.array([1, 2, 5]).astype(np.float64))
output = x.diag()
expect = np.array([[1, 0, 0],
[0, 2, 0],
[0, 0, 5]]).astype(np.float64)
assert (output.asnumpy() == expect).all()
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_diag_vmap():
"""
Feature: Diag op vmap.
Description: Test the vmap function of diag op.
Expectation: The value and shape of output are the expected values.
"""
context.set_context(device_target="GPU")
def cal_diag(x):
return P.Diag()(x)
@jit
def manually_batched(xs):
output = []
for i in range(xs.shape[0]):
output.append(cal_diag(xs[i]))
return F.stack(output)
x = Tensor(np.array([[1, 2, 3],
[4, 5, 6]]).astype(np.float32))
manually_output = manually_batched(x)
vmap_diag = vmap(cal_diag, in_axes=0, out_axes=0)
vmap_output = vmap_diag(x)
assert (manually_output.asnumpy() == vmap_output.asnumpy()).all() | null |
5,603 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=no-init
from systemtesting import MantidSystemTest
from mantid.api import AnalysisDataService
from mantid.simpleapi import GeneratePythonFitScript, Load
class ExecuteGeneratedPythonFitScriptTest(MantidSystemTest):
"""
This test will execute a python script for sequential fitting, and simultaneous fitting, to make sure that the
generated scripts will run without an error.
"""
def setUp(self):
Load(Filename="MUSR62260_Group_fwd_Asymmetry_MA.nxs", OutputWorkspace="MUSR62260; Group; fwd; Asymmetry; MA")
Load(Filename="MUSR62260_Group_bottom_Asymmetry_MA.nxs", OutputWorkspace="MUSR62260; Group; bottom; Asymmetry; MA")
Load(Filename="MUSR62260_Group_top_Asymmetry_MA.nxs", OutputWorkspace="MUSR62260; Group; top; Asymmetry; MA")
Load(Filename="MUSR62260_Group_bkwd_Asymmetry_MA.nxs", OutputWorkspace="MUSR62260; Group; bkwd; Asymmetry; MA")
def cleanup(self):
AnalysisDataService.clear()
def METHOD_NAME(self):
return [
"MUSR62260_Group_fwd_Asymmetry_MA.nxs",
"MUSR62260_Group_bottom_Asymmetry_MA.nxs",
"MUSR62260_Group_top_Asymmetry_MA.nxs",
"MUSR62260_Group_bkwd_Asymmetry_MA.nxs",
]
def runTest(self):
single_fit_script_text = self._generate_single_fit_script()
self._run_fit_script(single_fit_script_text)
sequential_script_text = self._generate_sequential_fit_script()
self._run_fit_script(sequential_script_text)
simultaneous_script_text = self._generate_simultaneous_fit_script()
self._run_fit_script(simultaneous_script_text)
@staticmethod
def _generate_single_fit_script():
function = "name=GausOsc,A=0.2,Sigma=0.2,Frequency=1.3,Phi=0"
script_text = GeneratePythonFitScript(
InputWorkspaces=["MUSR62260; Group; fwd; Asymmetry; MA"],
WorkspaceIndices=[0],
StartXs=[0.1],
EndXs=[15.0],
Function=function,
MaxIterations=500,
Minimizer="Levenberg-Marquardt",
)
return script_text
@staticmethod
def _generate_sequential_fit_script():
function = "name=GausOsc,A=0.2,Sigma=0.2,Frequency=1.3,Phi=0"
script_text = GeneratePythonFitScript(
InputWorkspaces=[
"MUSR62260; Group; fwd; Asymmetry; MA",
"MUSR62260; Group; bottom; Asymmetry; MA",
"MUSR62260; Group; top; Asymmetry; MA",
"MUSR62260; Group; bkwd; Asymmetry; MA",
],
WorkspaceIndices=[0, 0, 0, 0],
StartXs=[0.1, 0.1, 0.1, 0.1],
EndXs=[15.0, 15.0, 15.0, 15.0],
Function=function,
MaxIterations=500,
Minimizer="Levenberg-Marquardt",
)
return script_text
@staticmethod
def _generate_simultaneous_fit_script():
function = (
"composite=MultiDomainFunction,NumDeriv=true;"
"name=GausOsc,A=0.2,Sigma=0.2,Frequency=1.3,Phi=0,$domains=i;"
"name=GausOsc,A=0.2,Sigma=0.2,Frequency=1.3,Phi=0,$domains=i;"
"name=GausOsc,A=0.2,Sigma=0.2,Frequency=1.3,Phi=0,$domains=i;"
"name=GausOsc,A=0.2,Sigma=0.2,Frequency=1.3,Phi=0,$domains=i;"
"ties=(f2.Frequency=f3.Frequency,f1.Frequency=f3.Frequency,f0.Frequency=f3.Frequency)"
)
script_text = GeneratePythonFitScript(
InputWorkspaces=[
"MUSR62260; Group; fwd; Asymmetry; MA",
"MUSR62260; Group; bottom; Asymmetry; MA",
"MUSR62260; Group; top; Asymmetry; MA",
"MUSR62260; Group; bkwd; Asymmetry; MA",
],
WorkspaceIndices=[0, 0, 0, 0],
StartXs=[0.1, 0.1, 0.1, 0.1],
EndXs=[15.0, 15.0, 15.0, 15.0],
FittingType="Simultaneous",
Function=function,
MaxIterations=500,
Minimizer="Levenberg-Marquardt",
)
return script_text
def _run_fit_script(self, script_text):
try:
exec(script_text)
except Exception as ex:
self.fail(f"Execution of python fit script failed: {ex}.") | null |
5,604 | # -*- coding: utf-8 -*-
import pytest
from policyengine_core.periods import DAY, MONTH, YEAR, Instant, Period, period
first_jan = Instant((2014, 1, 1))
first_march = Instant((2014, 3, 1))
"""
Test Period -> String
"""
# Years
def test_year():
assert str(Period((YEAR, first_jan, 1))) == "2014"
def test_12_months_is_a_year():
assert str(Period((MONTH, first_jan, 12))) == "2014"
def test_rolling_year():
assert str(Period((MONTH, first_march, 12))) == "year:2014-03"
assert str(Period((YEAR, first_march, 1))) == "year:2014-03"
def test_several_years():
assert str(Period((YEAR, first_jan, 3))) == "year:2014:3"
assert str(Period((YEAR, first_march, 3))) == "year:2014-03:3"
# Months
def test_month():
assert str(Period((MONTH, first_jan, 1))) == "2014-01"
def test_several_months():
assert str(Period((MONTH, first_jan, 3))) == "month:2014-01:3"
assert str(Period((MONTH, first_march, 3))) == "month:2014-03:3"
# Days
def test_day():
assert str(Period((DAY, first_jan, 1))) == "2014-01-01"
def test_several_days():
assert str(Period((DAY, first_jan, 3))) == "day:2014-01-01:3"
assert str(Period((DAY, first_march, 3))) == "day:2014-03-01:3"
"""
Test String -> Period
"""
# Years
def test_parsing_year():
assert period("2014") == Period((YEAR, first_jan, 1))
def test_parsing_rolling_year():
assert period("year:2014-03") == Period((YEAR, first_march, 1))
def test_parsing_several_years():
assert period("year:2014:2") == Period((YEAR, first_jan, 2))
def test_wrong_syntax_several_years():
with pytest.raises(ValueError):
period("2014:2")
# Months
def test_parsing_month():
assert period("2014-01") == Period((MONTH, first_jan, 1))
def METHOD_NAME():
assert period("month:2014-03:3") == Period((MONTH, first_march, 3))
def test_wrong_syntax_several_months():
with pytest.raises(ValueError):
period("2014-3:3")
# Days
def test_parsing_day():
assert period("2014-01-01") == Period((DAY, first_jan, 1))
def test_parsing_several_days():
assert period("day:2014-03-01:3") == Period((DAY, first_march, 3))
def test_wrong_syntax_several_days():
with pytest.raises(ValueError):
period("2014-2-3:2")
def test_day_size_in_days():
assert Period(("day", Instant((2014, 12, 31)), 1)).size_in_days == 1
def test_3_day_size_in_days():
assert Period(("day", Instant((2014, 12, 31)), 3)).size_in_days == 3
def test_month_size_in_days():
assert Period(("month", Instant((2014, 12, 1)), 1)).size_in_days == 31
def test_leap_month_size_in_days():
assert Period(("month", Instant((2012, 2, 3)), 1)).size_in_days == 29
def test_3_month_size_in_days():
assert (
Period(("month", Instant((2013, 1, 3)), 3)).size_in_days
== 31 + 28 + 31
)
def test_leap_3_month_size_in_days():
assert (
Period(("month", Instant((2012, 1, 3)), 3)).size_in_days
== 31 + 29 + 31
)
def test_year_size_in_days():
assert Period(("year", Instant((2014, 12, 1)), 1)).size_in_days == 365
def test_leap_year_size_in_days():
assert Period(("year", Instant((2012, 1, 1)), 1)).size_in_days == 366
def test_2_years_size_in_days():
assert Period(("year", Instant((2014, 1, 1)), 2)).size_in_days == 730
# Misc
def test_wrong_date():
with pytest.raises(ValueError):
period("2006-31-03")
def test_ambiguous_period():
with pytest.raises(ValueError):
period("month:2014")
def test_deprecated_signature():
with pytest.raises(TypeError):
period(MONTH, 2014)
def test_wrong_argument():
with pytest.raises(ValueError):
period({})
def test_wrong_argument_1():
with pytest.raises(ValueError):
period([])
def test_none():
with pytest.raises(ValueError):
period(None)
def test_empty_string():
with pytest.raises(ValueError):
period("")
@pytest.mark.parametrize(
"test",
[
(period("year:2014:2"), YEAR, 2, period("2014"), period("2015")),
(period(2017), MONTH, 12, period("2017-01"), period("2017-12")),
(
period("year:2014:2"),
MONTH,
24,
period("2014-01"),
period("2015-12"),
),
(
period("month:2014-03:3"),
MONTH,
3,
period("2014-03"),
period("2014-05"),
),
(period(2017), DAY, 365, period("2017-01-01"), period("2017-12-31")),
(
period("year:2014:2"),
DAY,
730,
period("2014-01-01"),
period("2015-12-31"),
),
(
period("month:2014-03:3"),
DAY,
92,
period("2014-03-01"),
period("2014-05-31"),
),
],
)
def test_subperiods(test):
def check_subperiods(period, unit, length, first, last):
subperiods = period.get_subperiods(unit)
assert len(subperiods) == length
assert subperiods[0] == first
assert subperiods[-1] == last
check_subperiods(*test) | null |
5,605 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
# std imports
import unittest
# 3rd party
from numpy import full, radians, sin
# local imports
from mantidqt.widgets.sliceviewer.models.sliceinfo import SliceInfo
class SliceInfoTest(unittest.TestCase):
def test_construction_with_named_fields(self):
point = (None, None, 0.5)
dimrange = (None, None, (-15, 15))
info = make_sliceinfo(point=point, dimrange=dimrange)
self.assertEqual(point, info.slicepoint)
self.assertEqual(dimrange, info.range)
self.assertEqual(2, info.z_index)
self.assertEqual(point[2], info.z_value)
self.assertEqual(dimrange[2][1] - dimrange[2][0], info.z_width)
self.assertTrue(info.can_support_nonorthogonal_axes())
def test_no_spatial_dimensions_sets_z_properties_None_and_disables_nonorthogonal_axes(self):
info = make_sliceinfo(qflags=[False] * 3)
self.assertTrue(info.z_index is None)
self.assertTrue(info.z_value is None)
self.assertTrue(info.z_width is None)
self.assertFalse(info.can_support_nonorthogonal_axes())
def test_cannot_support_nonorthogonal_axes_when_no_angles_supplied_with_spatial_dimensions(self):
info = make_sliceinfo(qflags=[True] * 3, axes_angles=None)
self.assertFalse(info.can_support_nonorthogonal_axes())
def test_cannot_support_nonorthogonal_axes_in_slice_with_one_spatial_dimensions(self):
info = make_sliceinfo(qflags=[True, False, False])
self.assertFalse(info.can_support_nonorthogonal_axes())
def test_transform_selects_dimensions_correctly_when_not_transposed(self):
# Set slice info such that display(X,Y) = data(Y,Z)
slice_pt = 0.5
info = make_sliceinfo(point=(slice_pt, None, None), dimrange=[(-15, 15), None, None])
frame_point = (0.5, 1.0, -1.5)
slice_frame = info.transform(frame_point)
self.assertAlmostEqual(frame_point[1], slice_frame[0], delta=1e-6)
self.assertAlmostEqual(frame_point[2], slice_frame[1], delta=1e-6)
self.assertAlmostEqual(frame_point[0], slice_frame[2], delta=1e-6)
self.assertEqual(slice_pt, info.z_value)
def test_transform_selects_dimensions_correctly_when_transposed(self):
# Set slice info such that display(X,Y) = data(Z,Y)
slice_pt = 0.5
info = make_sliceinfo(point=(slice_pt, None, None), dimrange=[(-15, 15), None, None], transpose=True)
frame_point = (0.5, 1.0, -1.5)
slice_frame = info.transform(frame_point)
self.assertAlmostEqual(frame_point[2], slice_frame[0], delta=1e-6)
self.assertAlmostEqual(frame_point[1], slice_frame[1], delta=1e-6)
self.assertAlmostEqual(frame_point[0], slice_frame[2], delta=1e-6)
self.assertEqual(slice_pt, info.z_value)
def test_transform_preceding_nonQdim_4D_MD_ws_nonortho_transform(self):
angles = full((3, 3), radians(90))
angles[0, 1] = radians(60)
angles[1, 0] = radians(60)
# dims: E,H,K,L - viewing (X,Y) = (K,H) with H,K non-orthog (angle=60 deg)
info = make_sliceinfo(
point=(0.0, None, None, -1.0),
dimrange=[(-1.0, 1.0), None, None, (-2.0, 2.0)],
qflags=(False, True, True, True),
axes_angles=angles,
transpose=True,
)
slice_pt = -1.0
frame_point = (2.0, 0.0, slice_pt)
slice_frame = info.transform(frame_point)
self.assertAlmostEqual(slice_frame[0], 1.0, delta=1e-6)
self.assertAlmostEqual(slice_frame[1], 2 * sin(angles[0, 1]), delta=1e-6)
self.assertAlmostEqual(slice_frame[2], slice_pt, delta=1e-6)
self.assertEqual(slice_pt, info.z_value)
def METHOD_NAME(self):
# Set slice info such that display(X,Y) = data(Y,Z)
slice_pt = 0.5
info = make_sliceinfo(point=(slice_pt, None, None), dimrange=[(-15, 15), None, None])
slice_frame = (0.5, 1.0, -1.5)
data_frame = info.inverse_transform(slice_frame)
self.assertAlmostEqual(data_frame[0], slice_frame[2], delta=1e-6)
self.assertAlmostEqual(data_frame[1], slice_frame[0], delta=1e-6)
self.assertAlmostEqual(data_frame[2], slice_frame[1], delta=1e-6)
self.assertEqual(slice_pt, info.z_value)
def test_inverse_transform_selects_dimensions_correctly_when_transposed(self):
# Set slice info such that display(X,Y) = data(Z,Y)
slice_pt = 0.5
info = make_sliceinfo(point=(slice_pt, None, None), dimrange=[(-15, 15), None, None], transpose=True)
frame_point = (-1.5, 1.0, 0.5)
slice_frame = info.inverse_transform(frame_point)
self.assertAlmostEqual(frame_point[0], slice_frame[2], delta=1e-6)
self.assertAlmostEqual(frame_point[1], slice_frame[1], delta=1e-6)
self.assertAlmostEqual(frame_point[2], slice_frame[0], delta=1e-6)
self.assertEqual(slice_pt, info.z_value)
def test_inverse_transform_preceding_nonQdim_4D_MD_ws(self):
info = make_sliceinfo(
point=(0.0, None, None, -1.0), dimrange=[(-1.0, 1.0), None, None, (-2.0, 2.0)], qflags=(False, True, True, True), transpose=True
) # dims: E,H,K,L - viewing (X,Y) = (K,H)
slice_pt = -1.0
frame_point = (1.0, 2.0, slice_pt)
slice_frame = info.inverse_transform(frame_point)
self.assertAlmostEqual(slice_frame[0], 2.0, delta=1e-6)
self.assertAlmostEqual(slice_frame[1], 1.0, delta=1e-6)
self.assertAlmostEqual(slice_frame[2], slice_pt, delta=1e-6)
self.assertEqual(slice_pt, info.z_value)
def test_inverse_transform_preceding_nonQdim_4D_MD_ws_nonortho_transform(self):
angles = full((3, 3), radians(90))
angles[0, 1] = radians(60)
angles[1, 0] = radians(60)
# dims: E,H,K,L - viewing (X,Y) = (K,H) with H,K non-orthog (angle=60 deg)
info = make_sliceinfo(
point=(0.0, None, None, -1.0),
dimrange=[(-1.0, 1.0), None, None, (-2.0, 2.0)],
qflags=(False, True, True, True),
axes_angles=angles,
transpose=True,
)
slice_pt = -1.0
frame_point = (1.0, 2 * sin(angles[0, 1]), slice_pt)
slice_frame = info.inverse_transform(frame_point)
self.assertAlmostEqual(slice_frame[0], 2.0, delta=1e-6)
self.assertAlmostEqual(slice_frame[1], 0.0, delta=1e-6)
self.assertAlmostEqual(slice_frame[2], slice_pt, delta=1e-6)
self.assertEqual(slice_pt, info.z_value)
def test_slicepoint_with_greater_than_three_qflags_true_raises_errors(self):
self.assertRaises(
AssertionError,
SliceInfo,
point=(1, None, None, 4),
transpose=True,
range=[(-15, 15), None, None, (-5, -5)],
qflags=(True, True, True, True),
)
def make_sliceinfo(
point=(None, None, 0.5),
transpose=False,
dimrange=(None, None, (-15, 15)),
qflags=[True, True, True],
axes_angles=full((3, 3), radians(90)),
):
return SliceInfo(point=point, transpose=transpose, range=dimrange, qflags=qflags, axes_angles=axes_angles)
if __name__ == "__main__":
unittest.main() | null |
5,606 | # The MIT License (MIT)
#
# Copyright (c) 2019 Looker Data Sciences, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Base model for all generated models
"""
import collections
import datetime
import enum
import functools
import keyword
from typing import Any, Iterable, Optional, Sequence, TypeVar, cast
import cattr
from looker_sdk.rtl import hooks
try:
from typing import ForwardRef # type: ignore
except ImportError:
from typing import _ForwardRef as ForwardRef # type: ignore
EXPLICIT_NULL = cast(Any, "EXPLICIT_NULL") # type:ignore
class Model:
"""Base model for all generated models."""
def _get_converter(self):
if not hasattr(self, "_converter"):
converter = cattr.Converter()
converter.register_unstructure_hook(
datetime.datetime, hooks.datetime_unstructure_hook
)
uh = functools.partial(hooks.unstructure_hook, converter)
converter.register_unstructure_hook(Model, uh) # type: ignore
self._converter = converter
return self._converter
def _key_to_attr(self, key):
"""Appends the trailing _ to python reserved words."""
if key[-1] == "_":
raise KeyError(key)
if key in keyword.kwlist:
key = f"{key}_"
return key
def __getitem__(self, key):
key = self._key_to_attr(key)
try:
ret = getattr(self, key)
except AttributeError:
raise KeyError(key)
if isinstance(ret, enum.Enum):
ret = ret.value
return ret
def __setitem__(self, key, value):
key = self._key_to_attr(key)
if not hasattr(self, key):
raise AttributeError(
f"'{self.__class__.__name__}' object has no attribute '{key}'"
)
annotation = self.__annotations__[key]
if isinstance(annotation, ForwardRef):
actual_type = eval(
annotation.__forward_arg__, self.__global_context, locals()
)
if isinstance(actual_type, enum.EnumMeta):
# untyped because mypy really doesn't like this enum internals stuff
def err(val):
valid = []
for v in actual_type.__members__.values():
if v.value != "invalid_api_enum_value":
valid.append(v.value)
return (
f"Invalid value '{val}' for " # type: ignore
f"'{self.__class__.__name__}.{key}'. Valid values are "
f"{valid}" # type: ignore
)
if isinstance(value, actual_type):
raise ValueError(err(value))
enum_member = actual_type(value)
if enum_member.value == "invalid_api_enum_value":
raise ValueError(err(value))
value = enum_member
elif issubclass(actual_type, Model):
value = self._get_converter().structure(value, actual_type)
return setattr(self, key, value)
def __delitem__(self, key):
self[key] # validates key
setattr(self, self._key_to_attr(key), None)
def __iter__(self):
return iter(self._get_converter().unstructure(self))
def __len__(self):
return len(self._get_converter().unstructure(self))
def __contains__(self, key):
return key in self._get_converter().unstructure(self)
def keys(self):
return self._get_converter().unstructure(self).keys()
def items(self):
return self._get_converter().unstructure(self).items()
def values(self):
return self._get_converter().unstructure(self).values()
def METHOD_NAME(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def pop(self, key, default=None):
ret = self.METHOD_NAME(key, default)
if key in self:
del self[key]
return ret
def popitem(self):
raise NotImplementedError()
def clear(self):
raise NotImplementedError()
def update(self, iterable=None, **kwargs):
if iterable:
has_keys = getattr(iterable, "keys", None)
if callable(has_keys):
for k in iterable:
self[k] = iterable[k]
else:
for k, v in iterable:
self[k] = v
for k in kwargs:
self[k] = kwargs[k]
def setdefault(self, key, default=None):
if key not in self:
self[key] = default
return self[key]
def copy(self):
raise NotImplementedError()
def safe_enum__new__(cls, value):
"""Handle out-of-spec enum values returned by API.
This is achieved by overriding the __new__ method to return
`invalid_api_enum_value` (defined on each subclass) when an
unexpected value for the enum is returned by the API.
"""
if not isinstance(value, (str, int, bool)):
return super().__new__(cls, value)
else:
vals = {v.value: v for v in cls.__members__.values()}
return vals.METHOD_NAME(value, cls.invalid_api_enum_value)
T = TypeVar("T")
class DelimSequence(collections.UserList, Sequence[T]):
def __init__(
self,
data: Optional[Sequence[T]] = None,
prefix: str = "",
suffix: str = "",
separator: str = ",",
):
self.prefix = prefix
self.suffix = suffix
self.separator = separator
super().__init__(data)
def append(self, elem: T):
super().append(elem)
def extend(self, iterable: Iterable[T]):
super().extend(iterable)
def insert(self, i: int, elem: T):
super().insert(i, elem)
def remove(self, elem: T):
super().remove(elem)
def index(self, x: T, *args):
super().index(x, *args) # type: ignore
def count(self, elem: T):
super().count(elem)
def __str__(self):
return (
f"{self.prefix}"
f"{self.separator.join(str(d) for d in self.data)}"
f"{self.suffix}"
) | null |
5,607 | import random
from asyncio import get_running_loop
from ...messaging.interfaces.endpoint import Endpoint, EndpointListener
from ...messaging.interfaces.udp.endpoint import UDPv4Address, UDPv6Address
internet = {}
async def crash_event_loop(forwarded_exception: Exception):
"""
Raise an exception on the event loop.
:param forwarded_exception: the exception instance to raise.
"""
raise forwarded_exception
class MockEndpoint(Endpoint):
SEND_INET_EXCEPTION_TO_LOOP = True
"""
Raise an uncaught AssertionError on the ``asyncio`` event loop if attempting to send to an unknown address.
Useful for use in defensively-programmed code: bypasses most exception handling.
"""
def __init__(self, lan_address, wan_address):
super().__init__()
internet[lan_address] = self
internet[wan_address] = self
self.lan_address = lan_address
self.wan_address = wan_address
self._port = self.lan_address[1]
self._open = False
def assert_open(self):
assert self._open
def is_open(self):
return self._open
def METHOD_NAME(self):
return self.wan_address
def send(self, socket_address, packet):
if not self.is_open():
return
if socket_address in internet:
# For the unit tests we handle messages in separate asyncio tasks to prevent infinite recursion.
ep = internet[socket_address]
get_running_loop().call_soon(ep.notify_listeners, (self.wan_address, packet))
else:
e = AssertionError("Attempted to send data to unregistered address %s" % repr(socket_address))
if self.SEND_INET_EXCEPTION_TO_LOOP:
get_running_loop().create_task(crash_event_loop(e))
raise e
def open(self):
self._open = True
def close(self, timeout=0.0):
self._open = False
def reset_byte_counters(self):
pass
class AddressTester(EndpointListener):
singleton = None
def __init__(self, endpoint):
super().__init__(endpoint, True)
self._get_lan_address(True)
AddressTester.singleton = self
@classmethod
def get_singleton(cls, endpoint):
if cls.singleton is not None:
return cls.singleton
return AddressTester(endpoint)
def on_packet(self, packet):
pass
def is_lan(self, address: str):
return self.address_is_lan(address)
class AutoMockEndpoint(MockEndpoint):
ADDRESS_TYPE = "UDPv4Address"
def __init__(self):
self._open = False
super().__init__(self._generate_unique_address(), self._generate_unique_address())
self._port = 0
def _generate_address(self):
if self.ADDRESS_TYPE == "UDPv4Address":
b0 = random.randint(0, 255)
b1 = random.randint(0, 255)
b2 = random.randint(0, 255)
b3 = random.randint(0, 255)
port = random.randint(0, 65535)
return UDPv4Address('%d.%d.%d.%d' % (b0, b1, b2, b3), port)
elif self.ADDRESS_TYPE == "UDPv6Address":
b0 = random.randint(0, 65535)
b1 = random.randint(0, 65535)
b2 = random.randint(0, 65535)
b3 = random.randint(0, 65535)
b4 = random.randint(0, 65535)
b5 = random.randint(0, 65535)
b6 = random.randint(0, 65535)
b7 = random.randint(0, 65535)
port = random.randint(0, 65535)
return UDPv6Address(f"{b0:02x}:{b1:02x}:{b2:02x}:{b3:02x}:{b4:02x}:{b5:02x}:{b6:02x}:{b7:02x}", port)
else:
raise RuntimeError("Illegal address type specified: " + repr(self.ADDRESS_TYPE))
def _is_lan(self, address):
"""
Avoid false positives for the actual machine's lan.
"""
self._port = address[1]
address_tester = AddressTester.get_singleton(self)
return address_tester.is_lan(address[0])
def _generate_unique_address(self):
address = self._generate_address()
while address in internet or self._is_lan(address):
address = self._generate_address()
return address
class MockEndpointListener(EndpointListener):
def __init__(self, endpoint, main_thread=False):
super().__init__(endpoint, main_thread)
self.received_packets = []
endpoint.add_listener(self)
def on_packet(self, packet):
self.received_packets.append(packet) | null |
5,608 | from __future__ import annotations
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Tuple, Optional
from pydantic import BaseModel, Field, root_validator
class PromptValue(BaseModel, ABC):
@abstractmethod
def METHOD_NAME(self) -> str:
"""Return prompt as string."""
@abstractmethod
def to_messages(self) -> List[BaseMessage]:
"""Return prompt as messages."""
class BaseMessage(BaseModel):
"""Message object."""
content: str
additional_kwargs: dict = Field(default_factory=dict)
@property
@abstractmethod
def type(self) -> str:
"""Type of the message, used for serialization."""
class HumanMessage(BaseMessage):
"""Type of message that is spoken by the human."""
example: bool = False
@property
def type(self) -> str:
"""Type of the message, used for serialization."""
return "human"
class AIMessage(BaseMessage):
"""Type of message that is spoken by the AI."""
example: bool = False
@property
def type(self) -> str:
"""Type of the message, used for serialization."""
return "ai"
class ViewMessage(BaseMessage):
"""Type of message that is spoken by the AI."""
example: bool = False
@property
def type(self) -> str:
"""Type of the message, used for serialization."""
return "view"
class SystemMessage(BaseMessage):
"""Type of message that is a system message."""
@property
def type(self) -> str:
"""Type of the message, used for serialization."""
return "system"
class ModelMessage(BaseModel):
"""Type of message that interaction between dbgpt-server and llm-server"""
"""Similar to openai's message format"""
role: str
content: str
class ModelMessageRoleType:
""" "Type of ModelMessage role"""
SYSTEM = "system"
HUMAN = "human"
AI = "ai"
VIEW = "view"
class Generation(BaseModel):
"""Output of a single generation."""
text: str
"""Generated text output."""
generation_info: Optional[Dict[str, Any]] = None
"""Raw generation info response from the provider"""
"""May include things like reason for finishing (e.g. in OpenAI)"""
class ChatGeneration(Generation):
"""Output of a single generation."""
text = ""
message: BaseMessage
@root_validator
def set_text(cls, values: Dict[str, Any]) -> Dict[str, Any]:
values["text"] = values["message"].content
return values
class ChatResult(BaseModel):
"""Class that contains all relevant information for a Chat Result."""
generations: List[ChatGeneration]
"""List of the things generated."""
llm_output: Optional[dict] = None
"""For arbitrary LLM provider specific output."""
class LLMResult(BaseModel):
"""Class that contains all relevant information for an LLM Result."""
generations: List[List[Generation]]
"""List of the things generated. This is List[List[]] because
each input could have multiple generations."""
llm_output: Optional[dict] = None
"""For arbitrary LLM provider specific output."""
def _message_to_dict(message: BaseMessage) -> dict:
return {"type": message.type, "data": message.dict()}
def messages_to_dict(messages: List[BaseMessage]) -> List[dict]:
return [_message_to_dict(m) for m in messages]
def _message_from_dict(message: dict) -> BaseMessage:
_type = message["type"]
if _type == "human":
return HumanMessage(**message["data"])
elif _type == "ai":
return AIMessage(**message["data"])
elif _type == "system":
return SystemMessage(**message["data"])
elif _type == "view":
return ViewMessage(**message["data"])
else:
raise ValueError(f"Got unexpected type: {_type}")
def messages_from_dict(messages: List[dict]) -> List[BaseMessage]:
return [_message_from_dict(m) for m in messages]
def _parse_model_messages(
messages: List[ModelMessage],
) -> Tuple[str, List[str], List[List[str, str]]]:
""" "
Parameters:
messages: List of message from base chat.
Returns:
A tuple contains user prompt, system message list and history message list
str: user prompt
List[str]: system messages
List[List[str]]: history message of user and assistant
"""
user_prompt = ""
system_messages: List[str] = []
history_messages: List[List[str]] = [[]]
for message in messages[:-1]:
if message.role == "human":
history_messages[-1].append(message.content)
elif message.role == "system":
system_messages.append(message.content)
elif message.role == "ai":
history_messages[-1].append(message.content)
history_messages.append([])
if messages[-1].role != "human":
raise ValueError("Hi! What do you want to talk about?")
# Keep message pair of [user message, assistant message]
history_messages = list(filter(lambda x: len(x) == 2, history_messages))
user_prompt = messages[-1].content
return user_prompt, system_messages, history_messages | null |
5,609 | """
GUISCRCPY by srevinsaju
Get it on : https://github.com/srevinsaju/guiscrcpy
Licensed under GNU Public License
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import logging
import uuid
from qtpy import QtGui, QtWidgets, QtCore
from qtpy.QtCore import Qt, QPoint
from qtpy.QtWidgets import QMainWindow
from guiscrcpy.lib.toolkit import UXMapper
class SwipeUX(QMainWindow):
def __init__(self, ux_wrapper=None, frame=False, always_on_top=True):
"""
Swipe UI
:param ux_wrapper: UXMapper type object
:param frame: bool
:param always_on_top: bool
"""
QMainWindow.__init__(self)
self.oldPos = None
self.name = "swipe"
self.uid = uuid.uuid4()
# =================
if ux_wrapper:
self.ux = ux_wrapper
else:
self.ux = UXMapper()
hexdigest = self.ux.get_sha()[:6]
self.setObjectName("SwipeUX")
__flags = QtCore.Qt.Window
if not frame:
__flags |= QtCore.Qt.FramelessWindowHint
self.setAttribute(Qt.WA_NoSystemBackground, True)
self.setAttribute(Qt.WA_TranslucentBackground, True)
if always_on_top:
__flags |= QtCore.Qt.WindowStaysOnTopHint
self.setWindowFlags(__flags)
# self.setWindowFlags(QtCore.Qt.WindowStaysOnTopHint
# QtCore.Qt.FramelessWindowHint)
self.resize(70, 70)
# -----------------------
icon = QtGui.QIcon()
icon.addPixmap(
QtGui.QPixmap(":/res/ui/guiscrcpy_logo.png"),
QtGui.QIcon.Normal,
QtGui.QIcon.Off,
)
self.setWindowIcon(icon)
self.setStyleSheet(
"QWidget {"
"background-color: rgba(0,0,0,0);}\nQPushButton {\n"
"border-radius: 15px;\n"
"background-color: qradialgradient("
"spread:pad, cx:0.5, cy:0.5, radius:0.5, fx:0.495098, fy:0.5, "
"stop:0.887255 rgba(35, 35, 35, 255), "
"stop:0.901961 rgba(0, 0, 0, 255));\n"
"color: rgb(0, 0, 0);\n"
"}\n\n"
"QPushButton:pressed {\n"
"border-radius: 15px;\n"
"\n"
"background-color: qlineargradient("
"spread:pad, x1:0, y1:0, x2:1, y2:1, "
"stop:0 rgba(0, 255, 255, 255), "
"stop:1 rgba(0, 255, 152, 255));\n"
"color: rgb(0, 0, 0);\n"
" }\n"
"QMainWindow{background-color: rgba(0,0,0,30);}\n"
"QPushButton:hover {\n"
"border-radius: 15px;\n"
"background-color: qlineargradient("
"spread:pad, x1:0, y1:0, x2:1, y2:1, "
"stop:0 rgba(0, 199, 199, 255), "
"stop:1 rgba(0, 190, 113, 255));\n"
"color: rgb(0, 0, 0);\n"
"}"
)
self.centralwidget = QtWidgets.QWidget(self)
self.centralwidget.setObjectName("centralwidget")
self.lol = QtWidgets.QPushButton(self.centralwidget)
self.lol.setGeometry(QtCore.QRect(24, 24, 25, 25))
self.lol.setText("")
self.lol.setObjectName("lol")
self.lol.setStyleSheet(
f"background-color: #{hexdigest};" f"border-radius: 12px; "
)
self.swirt = QtWidgets.QPushButton(self.centralwidget)
self.swirt.setGeometry(QtCore.QRect(40, 20, 30, 30))
self.swirt.setText("")
icon1 = QtGui.QIcon()
icon1.addPixmap(
QtGui.QPixmap(":/icons/icons/chevron-sign-right.svg"),
QtGui.QIcon.Normal,
QtGui.QIcon.Off,
)
self.swirt.setIcon(icon1)
self.swirt.setObjectName("swirt")
self.swilf = QtWidgets.QPushButton(self.centralwidget)
self.swilf.setGeometry(QtCore.QRect(0, 20, 30, 30))
self.swilf.setText("")
icon2 = QtGui.QIcon()
icon2.addPixmap(
QtGui.QPixmap(":/icons/icons/chevron-sign-left.svg"),
QtGui.QIcon.Normal,
QtGui.QIcon.Off,
)
self.swilf.setIcon(icon2)
self.swilf.setObjectName("swilf")
self.swidn = QtWidgets.QPushButton(self.centralwidget)
self.swidn.setGeometry(QtCore.QRect(20, 40, 30, 30))
self.swidn.setText("")
icon3 = QtGui.QIcon()
icon3.addPixmap(
QtGui.QPixmap(":/icons/icons/chevron-sign-down.svg"),
QtGui.QIcon.Normal,
QtGui.QIcon.Off,
)
self.swidn.setIcon(icon3)
self.swidn.setObjectName("swidn")
self.swiup = QtWidgets.QPushButton(self.centralwidget)
self.swiup.setGeometry(QtCore.QRect(20, 0, 30, 30))
self.swiup.setText("")
icon4 = QtGui.QIcon()
icon4.addPixmap(
QtGui.QPixmap(":/icons/icons/chevron-sign-up.svg"),
QtGui.QIcon.Normal,
QtGui.QIcon.Off,
)
self.swiup.setIcon(icon4)
self.swiup.setObjectName("swiup")
self.setCentralWidget(self.centralwidget)
self.oldpos = self.pos()
self.swiup.pressed.connect(self.swipup)
self.swidn.pressed.connect(self.swipdn)
self.swilf.pressed.connect(self.swipleft)
self.swirt.pressed.connect(self.METHOD_NAME)
def init(self):
self.show()
def paintEvent(self, event):
qp = QtGui.QPainter()
qp.begin(self)
qp.setRenderHint(QtGui.QPainter.Antialiasing)
qp.setPen(Qt.NoPen)
qp.setBrush(QtGui.QColor(0, 0, 0, 127))
qp.drawEllipse(0, 0, 70, 70)
qp.end()
def mousePressEvent(self, event):
self.oldPos = event.globalPos()
def mouseMoveEvent(self, event):
try:
delta = QPoint(event.globalPos() - self.oldPos)
self.move(self.x() + delta.x(), self.y() + delta.y())
self.oldPos = event.globalPos()
except (TypeError, AttributeError):
pass
def swipdn(self):
logging.debug("Passing SWIPE DOWN")
dim_values = self.ux.android_dimensions
pos_y = int(dim_values[1]) - 200
pos_x = int(dim_values[0])
new_pos_x = pos_x / 2 # find center
self.ux.do_swipe(new_pos_x, 200, new_pos_x, pos_y)
def swipup(self):
logging.debug("Passing SWIPE UP")
dim_values = self.ux.android_dimensions
pos_y = int(dim_values[1]) - 100
pos_x = int(dim_values[0])
new_pos_x = int(pos_x / 2) # find center
self.ux.do_swipe(new_pos_x, pos_y, new_pos_x, 200)
def swipleft(self):
logging.debug("Passing SWIPE LEFT")
dim_values = self.ux.android_dimensions
pos_y = int(dim_values[1])
pos_x = int(dim_values[0]) - 10
new_pos_y = int(pos_y / 2) # find center
self.ux.do_swipe(10, new_pos_y, pos_x, new_pos_y)
def METHOD_NAME(self):
logging.debug("Passing SWIPE RIGHT")
dim_values = self.ux.android_dimensions
pos_y = int(dim_values[1])
pos_x = int(dim_values[0]) - 10
new_pos_y = int(pos_y / 2) # find center
self.ux.do_swipe(pos_x, new_pos_y, 10, new_pos_y) | null |
5,610 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetLogProfileResult',
'AwaitableGetLogProfileResult',
'get_log_profile',
'get_log_profile_output',
]
@pulumi.output_type
class GetLogProfileResult:
"""
A collection of values returned by getLogProfile.
"""
def __init__(__self__, categories=None, id=None, locations=None, name=None, retention_policies=None, METHOD_NAME=None, storage_account_id=None):
if categories and not isinstance(categories, list):
raise TypeError("Expected argument 'categories' to be a list")
pulumi.set(__self__, "categories", categories)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if locations and not isinstance(locations, list):
raise TypeError("Expected argument 'locations' to be a list")
pulumi.set(__self__, "locations", locations)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if retention_policies and not isinstance(retention_policies, list):
raise TypeError("Expected argument 'retention_policies' to be a list")
pulumi.set(__self__, "retention_policies", retention_policies)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'servicebus_rule_id' to be a str")
pulumi.set(__self__, "servicebus_rule_id", METHOD_NAME)
if storage_account_id and not isinstance(storage_account_id, str):
raise TypeError("Expected argument 'storage_account_id' to be a str")
pulumi.set(__self__, "storage_account_id", storage_account_id)
@property
@pulumi.getter
def categories(self) -> Sequence[str]:
"""
List of categories of the logs.
"""
return pulumi.get(self, "categories")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def locations(self) -> Sequence[str]:
"""
List of regions for which Activity Log events are stored or streamed.
"""
return pulumi.get(self, "locations")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="retentionPolicies")
def retention_policies(self) -> Sequence['outputs.GetLogProfileRetentionPolicyResult']:
"""
a `retention_policy` block as documented below.
"""
return pulumi.get(self, "retention_policies")
@property
@pulumi.getter(name="servicebusRuleId")
def METHOD_NAME(self) -> str:
"""
The service bus (or event hub) rule ID of the service bus (or event hub) namespace in which the Activity Log is streamed to.
"""
return pulumi.get(self, "servicebus_rule_id")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> str:
"""
The resource id of the storage account in which the Activity Log is stored.
"""
return pulumi.get(self, "storage_account_id")
class AwaitableGetLogProfileResult(GetLogProfileResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetLogProfileResult(
categories=self.categories,
id=self.id,
locations=self.locations,
name=self.name,
retention_policies=self.retention_policies,
METHOD_NAME=self.METHOD_NAME,
storage_account_id=self.storage_account_id)
def get_log_profile(name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetLogProfileResult:
"""
Use this data source to access the properties of a Log Profile.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.monitoring.get_log_profile(name="test-logprofile")
pulumi.export("logProfileStorageAccountId", example.storage_account_id)
```
:param str name: Specifies the Name of the Log Profile.
"""
__args__ = dict()
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:monitoring/getLogProfile:getLogProfile', __args__, opts=opts, typ=GetLogProfileResult).value
return AwaitableGetLogProfileResult(
categories=pulumi.get(__ret__, 'categories'),
id=pulumi.get(__ret__, 'id'),
locations=pulumi.get(__ret__, 'locations'),
name=pulumi.get(__ret__, 'name'),
retention_policies=pulumi.get(__ret__, 'retention_policies'),
METHOD_NAME=pulumi.get(__ret__, 'servicebus_rule_id'),
storage_account_id=pulumi.get(__ret__, 'storage_account_id'))
@_utilities.lift_output_func(get_log_profile)
def get_log_profile_output(name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetLogProfileResult]:
"""
Use this data source to access the properties of a Log Profile.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.monitoring.get_log_profile(name="test-logprofile")
pulumi.export("logProfileStorageAccountId", example.storage_account_id)
```
:param str name: Specifies the Name of the Log Profile.
"""
... | null |
5,611 | import json
import os
import urllib.parse
from gettext import gettext as _
from gi.repository import Gio
from lutris import settings
from lutris.api import get_api_games, get_game_installers, read_api_key
from lutris.database.games import get_games
from lutris.database.services import ServiceGameCollection
from lutris.gui import dialogs
from lutris.gui.views.media_loader import download_media
from lutris.services.base import LutrisBanner, LutrisCoverart, LutrisCoverartMedium, LutrisIcon, OnlineService
from lutris.services.service_game import ServiceGame
from lutris.util import http
from lutris.util.log import logger
class LutrisGame(ServiceGame):
"""Service game created from the Lutris API"""
service = "lutris"
@classmethod
def new_from_api(cls, api_payload):
"""Create an instance of LutrisGame from the API response"""
service_game = LutrisGame()
service_game.appid = api_payload['slug']
service_game.slug = api_payload['slug']
service_game.name = api_payload['name']
service_game.details = json.dumps(api_payload)
return service_game
class LutrisService(OnlineService):
"""Service for Lutris games"""
id = "lutris"
name = _("Lutris")
icon = "lutris"
online = True
medias = {
"icon": LutrisIcon,
"banner": LutrisBanner,
"coverart_med": LutrisCoverartMedium,
"coverart_big": LutrisCoverart,
}
default_format = "banner"
api_url = settings.SITE_URL + "/api"
login_url = settings.SITE_URL + "/api/accounts/token"
cache_path = os.path.join(settings.CACHE_DIR, "lutris")
token_path = os.path.join(settings.CACHE_DIR, "auth-token")
@property
def credential_files(self):
"""Return a list of all files used for authentication"""
return [self.token_path]
def match_games(self):
"""Matching lutris games is much simpler... No API call needed."""
service_games = {
str(game["appid"]): game for game in ServiceGameCollection.get_for_service(self.id)
}
for lutris_game in get_games():
self.match_game(service_games.get(lutris_game["slug"]), lutris_game)
def is_connected(self):
"""Is the service connected?"""
return self.is_authenticated()
def METHOD_NAME(self, parent=None):
"""Connect to Lutris"""
login_dialog = dialogs.ClientLoginDialog(parent=parent)
login_dialog.connect("connected", self.on_connect_success)
def on_connect_success(self, _widget, _username):
"""Handles connection success"""
self.emit("service-login")
def get_library(self):
"""Return the remote library as a list of dicts."""
credentials = read_api_key()
if not credentials:
return []
url = settings.SITE_URL + "/api/games/library/%s" % urllib.parse.quote(credentials["username"])
request = http.Request(url, headers={"Authorization": "Token " + credentials["token"]})
try:
response = request.get()
except http.HTTPError as ex:
logger.error("Unable to load library: %s", ex)
return []
response_data = response.json
if response_data:
return response_data["games"]
return []
def load(self):
lutris_games = self.get_library()
logger.debug("Loaded %s games from Lutris library", len(lutris_games))
for game in lutris_games:
lutris_game = LutrisGame.new_from_api(game)
lutris_game.save()
logger.debug("Matching with already installed games")
self.match_games()
logger.debug("Lutris games loaded")
return lutris_games
def install(self, db_game):
if isinstance(db_game, dict):
slug = db_game["slug"]
else:
slug = db_game
installers = get_game_installers(slug)
if not installers:
raise RuntimeError(_("Lutris has no installers for %s. Try using a different service instead.") % slug)
application = Gio.Application.get_default()
application.show_installer_window(installers)
def get_game_platforms(self, db_game):
details = db_game.get("details")
if details:
platforms = json.loads(details).get("platforms")
if platforms is not None:
return [p.get("name") for p in platforms]
return None
def download_lutris_media(slug):
"""Download all media types for a single lutris game"""
url = settings.SITE_URL + "/api/games/%s" % slug
request = http.Request(url)
try:
response = request.get()
except http.HTTPError as ex:
logger.debug("Unable to load %s: %s", slug, ex)
return
response_data = response.json
icon_url = response_data.get("icon_url")
if icon_url:
download_media({slug: icon_url}, LutrisIcon())
banner_url = response_data.get("banner_url")
if banner_url:
download_media({slug: banner_url}, LutrisBanner())
cover_url = response_data.get("coverart")
if cover_url:
download_media({slug: cover_url}, LutrisCoverart())
def sync_media():
"""Downlad all missing media"""
banners_available = {fn.split(".")[0] for fn in os.listdir(settings.BANNER_PATH)}
icons_available = {
fn.split(".")[0].replace("lutris_", "")
for fn in os.listdir(settings.ICON_PATH)
if fn.startswith("lutris_")
}
covers_available = {fn.split(".")[0] for fn in os.listdir(settings.COVERART_PATH)}
complete_games = banners_available.intersection(icons_available).intersection(covers_available)
all_slugs = {game["slug"] for game in get_games()}
slugs = all_slugs - complete_games
if not slugs:
return
games = get_api_games(list(slugs))
alias_map = {}
api_slugs = set()
for game in games:
api_slugs.add(game["slug"])
for alias in game["aliases"]:
if alias["slug"] in slugs:
alias_map[game["slug"]] = alias["slug"]
alias_slugs = set(alias_map.values())
used_alias_slugs = alias_slugs - api_slugs
for alias_slug in used_alias_slugs:
for game in games:
if alias_slug in [alias["slug"] for alias in game["aliases"]]:
game["slug"] = alias_map[game["slug"]]
continue
banner_urls = {
game["slug"]: game["banner_url"]
for game in games
if game["slug"] not in banners_available and game["banner_url"]
}
icon_urls = {
game["slug"]: game["icon_url"]
for game in games
if game["slug"] not in icons_available and game["icon_url"]
}
cover_urls = {
game["slug"]: game["coverart"]
for game in games
if game["slug"] not in covers_available and game["coverart"]
}
logger.debug(
"Syncing %s banners, %s icons and %s covers",
len(banner_urls), len(icon_urls), len(cover_urls)
)
download_media(banner_urls, LutrisBanner())
download_media(icon_urls, LutrisIcon())
download_media(cover_urls, LutrisCoverart()) | null |
5,612 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
import json
from pgadmin.browser.server_groups.servers.databases.schemas.tables.tests \
import utils as tables_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as rules_utils
class RulesDeleteTestCase(BaseTestGenerator):
"""This class will delete rule under table node."""
scenarios = utils.generate_scenarios('delete_multiple_rule',
rules_utils.test_cases)
def setUp(self):
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database to delete rule.")
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema to delete rule.")
self.table_name = "table_column_%s" % (str(uuid.uuid4())[1:8])
self.table_id = tables_utils.create_table(self.server, self.db_name,
self.schema_name,
self.table_name)
self.rule_name = "test_rule_delete_%s" % (str(uuid.uuid4())[1:8])
self.rule_name_1 = "test_rule_delete_%s" % (str(uuid.uuid4())[1:8])
self.rule_ids = [rules_utils.create_rule(self.server, self.db_name,
self.schema_name,
self.table_name,
self.rule_name),
rules_utils.create_rule(self.server, self.db_name,
self.schema_name,
self.table_name,
self.rule_name_1),
]
def METHOD_NAME(self, data):
return self.tester.delete(
"{0}{1}/{2}/{3}/{4}/{5}/".format(self.url, utils.SERVER_GROUP,
self.server_id, self.db_id,
self.schema_id, self.table_id
),
follow_redirects=True,
data=json.dumps(data),
content_type='html/json'
)
def runTest(self):
"""This function will delete rule under table node."""
rule_response = rules_utils.verify_rule(self.server, self.db_name,
self.rule_name)
if not rule_response:
raise Exception("Could not find the rule to delete.")
rule_response = rules_utils.verify_rule(self.server, self.db_name,
self.rule_name_1)
if not rule_response:
raise Exception("Could not find the rule to delete.")
data = {'ids': self.rule_ids}
if self.is_positive_test:
response = self.METHOD_NAME(data)
self.assertEqual(response.status_code,
self.expected_data["status_code"])
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id) | null |
5,613 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from druidapi.rest import DruidRestClient
from druidapi.status import StatusClient
from druidapi.catalog import CatalogClient
from druidapi.sql import QueryClient
from druidapi.tasks import TaskClient
from druidapi.datasource import DatasourceClient
from druidapi.basic_auth import BasicAuthClient
class DruidClient:
'''
Client for a Druid cluster. Functionality is split into a number of
specialized "clients" that group many of Druid's REST API calls.
'''
def __init__(self, router_endpoint, display_client=None, auth=None):
self.rest_client = DruidRestClient(router_endpoint, auth=auth)
self.status_client = None
self.catalog_client = None
self.sql_client = None
self.tasks_client = None
self.datasource_client = None
if display_client:
self.display_client = display_client
else:
from druidapi.text_display import TextDisplayClient
self.display_client = TextDisplayClient()
self.display_client._druid = self
@property
def rest(self):
'''
Returns the low-level REST client. Useful for debugging and to access REST API
calls not yet wrapped by the various function-specific clients.
If you find you need to use this, consider creating a wrapper function in Python
and contributing it to Druid via a pull request.
'''
return self.rest_client
def trace(self, enable=True):
'''
Enable or disable tracing. When enabled, the Druid client prints the
URL and payload for each REST API call. Useful for debugging, or if you want
to learn what the code does so you can replicate it in your own client.
'''
self.rest_client.enable_trace(enable)
@property
def status(self) -> StatusClient:
'''
Returns the status client for the Router service.
'''
if not self.status_client:
self.status_client = StatusClient(self.rest_client)
return self.status_client
def METHOD_NAME(self, endpoint) -> StatusClient:
'''
Returns the status client for a Druid service.
Parameters
----------
endpoint: str
The URL for a Druid service.
'''
return StatusClient(DruidRestClient(endpoint), True)
@property
def catalog(self) -> CatalogClient:
'''
Returns the catalog client to interact with the Druid catalog.
'''
if not self.catalog_client:
self.catalog_client = CatalogClient(self.rest_client)
return self.catalog_client
@property
def sql(self) -> QueryClient:
'''
Returns the SQL query client to submit interactive or MSQ queries.
'''
if not self.sql_client:
self.sql_client = QueryClient(self)
return self.sql_client
@property
def tasks(self) -> TaskClient:
'''
Returns the Overlord tasks client to submit and track tasks.
'''
if not self.tasks_client:
self.tasks_client = TaskClient(self.rest_client)
return self.tasks_client
@property
def datasources(self) -> DatasourceClient:
'''
Returns the Coordinator datasources client to manipulate datasources.
Prefer to use the SQL client to query the INFORMATION_SCHEMA to obtain
information about datasources.
'''
if not self.datasource_client:
self.datasource_client = DatasourceClient(self.rest_client)
return self.datasource_client
def basic_security(self, authenticator, authorizer=None):
'''
Returns a client to work with a basic authorization authenticator/authorizer pair.
This client assumes the typical case of one authenticator and one authorizer. If
you have more than one, create multiple clients.
The basic security API is not proxied through the Router: it must work directly with
the Coordinator. Create an ad hoc Druid client for your Coordinator. Because you have
basic security enabled, you must specify the admin user and password:
```
coord = druidapi.jupyter_client('http://localhost:8081', auth=('admin', 'admin-pwd'))
ac = coord.basic_security('yourAuthenticator', 'yourAuthorizer')
```
Parameters
----------
authenticator: str
Authenticator name as set in the `druid.auth.authenticatorChain`
runtime property.
authorizer: str, default = same as authenticator
Authorizer name as set in the `druid.auth.authorizers` runtime property.
Defaults to the same name as the `authenticator` parameter for simple cases.
'''
return BasicAuthClient(self.rest_client, authenticator, authorizer)
@property
def display(self):
return self.display_client
def close(self):
self.rest_client.close()
self.rest_client = None
self.catalog_client = None
self.tasks_client = None
self.datasource_client = None
self.sql_client = None | null |
5,614 | #!/usr/bin/env python
import warnings
from agate import Table
from agate.data_types import Boolean, Date, DateTime, Number, Text, TimeDelta
from agate.testcase import AgateTestCase
from agate.type_tester import TypeTester
class TestFromCSV(AgateTestCase):
def METHOD_NAME(self):
self.rows = (
(1, 'a', True, '11/4/2015', '11/4/2015 12:22 PM', '4:15'),
(2, '👍', False, '11/5/2015', '11/4/2015 12:45 PM', '6:18'),
(None, 'b', None, None, None, None)
)
self.column_names = [
'number', 'text', 'boolean', 'date', 'datetime', 'timedelta'
]
self.column_types = [
Number(), Text(), Boolean(), Date(), DateTime(), TimeDelta()
]
def test_from_csv(self):
table1 = Table(self.rows, self.column_names, self.column_types)
table2 = Table.from_csv('examples/test.csv')
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_csv_crlf(self):
table1 = Table(self.rows, self.column_names, self.column_types)
table2 = Table.from_csv('examples/test_crlf.csv')
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_csv_cr(self):
table1 = Table(self.rows, self.column_names, self.column_types)
table2 = Table.from_csv('examples/test_cr.csv')
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_csv_file_like_object(self):
table1 = Table(self.rows, self.column_names, self.column_types)
f = open('examples/test.csv', encoding='utf-8')
table2 = Table.from_csv(f)
f.close()
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_csv_type_tester(self):
tester = TypeTester(force={
'number': Text()
})
table = Table.from_csv('examples/test.csv', column_types=tester)
self.assertColumnTypes(table, [Text, Text, Boolean, Date, DateTime, TimeDelta])
def test_from_csv_no_type_tester(self):
tester = TypeTester(limit=0)
table = Table.from_csv('examples/test.csv', column_types=tester)
self.assertColumnTypes(table, [Text, Text, Text, Text, Text, Text])
def test_from_csv_no_header(self):
warnings.simplefilter('ignore')
try:
table = Table.from_csv('examples/test_no_header.csv', header=False)
finally:
warnings.resetwarnings()
self.assertColumnNames(table, ['a', 'b', 'c', 'd', 'e', 'f'])
self.assertColumnTypes(table, [Number, Text, Boolean, Date, DateTime, TimeDelta])
def test_from_csv_no_header_columns(self):
table = Table.from_csv('examples/test_no_header.csv', self.column_names, header=False)
self.assertColumnNames(table, self.column_names)
self.assertColumnTypes(table, [Number, Text, Boolean, Date, DateTime, TimeDelta])
def test_from_csv_sniff_limit_0(self):
table2 = Table.from_csv('examples/test_csv_sniff.csv', sniff_limit=0)
self.assertColumnNames(table2, ['number|text|boolean|date|datetime|timedelta'])
self.assertColumnTypes(table2, [Text])
def test_from_csv_sniff_limit_200(self):
table1 = Table(self.rows, self.column_names, self.column_types)
table2 = Table.from_csv('examples/test_csv_sniff.csv', sniff_limit=200)
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_csv_sniff_limit_none(self):
table1 = Table(self.rows, self.column_names, self.column_types)
table2 = Table.from_csv('examples/test_csv_sniff.csv', sniff_limit=None)
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_csv_skip_lines(self):
warnings.simplefilter('ignore')
try:
table1 = Table(self.rows[1:], column_types=self.column_types)
table2 = Table.from_csv('examples/test.csv', header=False, skip_lines=2)
finally:
warnings.resetwarnings()
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_csv_skip_lines_crlf(self):
warnings.simplefilter('ignore')
try:
table1 = Table(self.rows[1:], column_types=self.column_types)
table2 = Table.from_csv('examples/test_crlf.csv', header=False, skip_lines=2)
finally:
warnings.resetwarnings()
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_csv_skip_lines_cr(self):
warnings.simplefilter('ignore')
try:
table1 = Table(self.rows[1:], column_types=self.column_types)
table2 = Table.from_csv('examples/test_cr.csv', header=False, skip_lines=2)
finally:
warnings.resetwarnings()
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_csv_row_limit(self):
table1 = Table(self.rows[:2], self.column_names, self.column_types)
table2 = Table.from_csv('examples/test.csv', row_limit=2)
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_csv_row_limit_no_header_columns(self):
table1 = Table(self.rows[:2], self.column_names, self.column_types)
table2 = Table.from_csv('examples/test_no_header.csv', self.column_names, header=False, row_limit=2)
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows)
def test_from_csv_row_limit_too_high(self):
table1 = Table(self.rows, self.column_names, self.column_types)
table2 = Table.from_csv('examples/test.csv', row_limit=200)
self.assertColumnNames(table2, table1.column_names)
self.assertColumnTypes(table2, [Number, Text, Boolean, Date, DateTime, TimeDelta])
self.assertRows(table2, table1.rows) | null |
5,615 | #!/usr/bin/env python
"""Newick format with all features as per the specs at:
http://evolution.genetics.washington.edu/phylip/newick_doc.html
http://evolution.genetics.washington.edu/phylip/newicktree.html
ie:
Unquoted label underscore munging
Quoted labels
Inner node labels
Lengths
[ ... ] Comments (discarded)
Unlabeled tips
also:
Double quotes can be used.
Spaces and quote marks are OK inside unquoted labels.
"""
import re
from cogent3.parse.record import FileFormatError
EOT = None
class TreeParseError(FileFormatError):
pass
class _Tokeniser(object):
"""Supplies an iterable stream of Newick tokens from 'text'
By default this is very forgiving of non-standard unquoted labels.
Two options can change how unquoted labels are interpreted:
To prohibit internal spaces and quotes set strict_labels=True.
To disable conversion of '_' to ' ' set underscore_unmunge=False.
NOTE: underscore_unmunging is part of the Newick standard, although it
is often inconvenient for other purposes.
"""
def __init__(self, text, strict_labels=False, underscore_unmunge=True):
self.text = text
self.posn = None
self.strict_unquoted_labels = strict_labels
self.underscore_unmunge = underscore_unmunge
def error(self, detail=""):
if self.token:
msg = f'Unexpected "{self.token}" at '
else:
msg = "At "
(line, column) = self.posn
sample = self.text.split("\n")[line][:column]
if column > 30:
sample = "..." + sample[-20:]
if line > 0:
msg += f'line {line + 1}:{column} "{sample}"'
else:
msg += f'char {column} "{sample}"'
return TreeParseError(msg + ". " + detail)
def tokens(self):
closing_quote_token = None
column = 0
line = 0
text = None
closing_quote_token = None
in_comment = False
for token in re.split("""([\\t ]+|\\n|''|""|[]['"(),:;])""", self.text) + [EOT]:
label_complete = False
token_consumed = True
self.token = token
column += len(token or "")
self.posn = (line, column)
if token == "":
pass
elif in_comment:
if token is EOT:
raise self.error("Ended with unclosed comment")
if token == "]":
in_comment = False
elif closing_quote_token:
if token is EOT:
raise self.error("Text ended inside quoted label")
if token == "\n":
raise self.error("Line ended inside quoted label")
if token == closing_quote_token:
label_complete = True
closing_quote_token = None
else:
if token == closing_quote_token * 2:
token = token[0]
text += token
elif token is EOT or token in "\n[():,;":
if text:
text = text.strip()
if self.underscore_unmunge and "_" in text:
text = text.replace("_", " ")
label_complete = True
if token == "\n":
line += 1
column = 1
elif token == "[":
in_comment = True
else:
token_consumed = False
elif text is not None:
text += token
elif token in ["''", '""']:
label_complete = True
text = ""
elif token in ["'", '"']:
closing_quote_token = token
text = ""
elif token.strip():
text = token
label_complete = self.strict_unquoted_labels
if label_complete:
self.token = None
yield text
text = None
if not token_consumed:
self.token = token
yield token
def METHOD_NAME(text, constructor, **kw):
"""Parses a Newick-format string, using specified constructor for tree.
Calls constructor(children, name, attributes)
Note: underscore_unmunge, if True, replaces underscores with spaces in
the data that's read in. This is part of the Newick format, but it is
often useful to suppress this behavior.
"""
if "(" not in text and ";" not in text and text.strip():
# otherwise "filename" is a valid (if small) tree
raise TreeParseError(f'Not a Newick tree: "{text[:10]}"')
sentinals = [";", EOT]
stack = []
nodes = []
children = name = expected_attribute = None
attributes = {}
tokeniser = _Tokeniser(text, **kw)
for token in tokeniser.tokens():
if expected_attribute is not None:
(attr_name, attr_cast) = expected_attribute
try:
attributes[attr_name] = attr_cast(token)
except ValueError:
raise tokeniser.error(f"Can't convert {attr_name} '{token}'")
expected_attribute = None
elif token == "(":
if children is not None:
raise tokeniser.error("Two subtrees in one node, missing comma?")
elif name or attributes:
raise tokeniser.error("Subtree must be first element of the node.")
stack.append((nodes, sentinals, attributes))
(nodes, sentinals, attributes) = ([], [")"], {})
elif token == ":":
if "length" in attributes:
raise tokeniser.error("Already have a length.")
expected_attribute = ("length", float)
elif token in [")", ";", ",", EOT]:
nodes.append(constructor(children, name, attributes))
children = name = expected_attribute = None
attributes = {}
if token in sentinals:
if stack:
children = nodes
(nodes, sentinals, attributes) = stack.pop()
else:
break
elif token == "," and ")" in sentinals:
pass
else:
raise tokeniser.error(
"Was expecting to end with %s"
% " or ".join([repr(s) for s in sentinals])
)
else:
if name is not None:
raise tokeniser.error(f"Already have a name '{name}' for this node.")
elif attributes:
raise tokeniser.error("name should come before length.")
name = token
assert not stack, stack
assert len(nodes) == 1, len(nodes)
return nodes[0] | null |
5,616 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for LocalDockerModelServerRunner."""
import os
import time
from typing import Any, Dict, Optional
from absl import logging
import docker
from docker import errors as docker_errors
from tfx.components.infra_validator import error_types
from tfx.components.infra_validator import serving_bins
from tfx.components.infra_validator.model_server_runners import base_runner
from tfx.proto import infra_validator_pb2
_POLLING_INTERVAL_SEC = 1
def _make_docker_client(config: infra_validator_pb2.LocalDockerConfig):
params = {}
if config.client_timeout_seconds:
params['timeout'] = config.client_timeout_seconds
if config.client_base_url:
params['base_url'] = config.client_base_url
if config.client_api_version:
params['version'] = config.client_api_version
return docker.DockerClient(**params)
def METHOD_NAME(ports: Dict[str, Any], container_port: int) -> str:
"""Find host port from container port mappings.
`ports` is a nested dictionary of the following structure:
{
'8500/tcp': [
{'HostIp': '0.0.0.0', 'HostPort': '32769'},
{'HostIp': '::', 'HostPort': '32770'},
],
'8501/tcp': [
{'HostIp': '0.0.0.0', 'HostPort': '32768'},
{'HostIp': '::', 'HostPort': '32771'},
],
}
Args:
ports: Dictionary of docker container port mapping.
container_port: Corresponding container port you're looking for.
Returns:
A found host port.
Raises:
ValueError: No corresponding host port was found.
"""
mappings = ports.get('{}/tcp'.format(container_port), [])
for mapping in mappings:
if mapping['HostIp'] == '0.0.0.0':
return mapping['HostPort']
else:
raise ValueError(
'No HostPort found for ContainerPort={} (all port mappings: {})'
.format(container_port, ports))
class LocalDockerRunner(base_runner.BaseModelServerRunner):
"""A model server runner that runs in a local docker runtime.
You need to pre-install docker in the machine that is running InfraValidator
component. For that reason, it is recommended to use this runner only for
testing purpose.
"""
def __init__(self, model_path: str,
serving_binary: serving_bins.ServingBinary,
serving_spec: infra_validator_pb2.ServingSpec):
"""Make a local docker runner.
Args:
model_path: An IV-flavored model path. (See model_path_utils.py)
serving_binary: A ServingBinary to run.
serving_spec: A ServingSpec instance.
"""
self._model_path = model_path
self._serving_binary = serving_binary
self._serving_spec = serving_spec
self._docker = _make_docker_client(serving_spec.local_docker)
self._container = None
self._endpoint = None
def __repr__(self):
return 'LocalDockerRunner(image: {image})'.format(
image=self._serving_binary.image)
def GetEndpoint(self):
assert self._endpoint is not None, (
'Endpoint is not yet created. You should call Start() first.')
return self._endpoint
def Start(self):
assert self._container is None, (
'You cannot start model server multiple times.')
if isinstance(self._serving_binary, serving_bins.TensorFlowServing):
is_local = os.path.isdir(self._model_path)
run_params = self._serving_binary.MakeDockerRunParams(
model_path=self._model_path,
needs_mount=is_local)
else:
raise NotImplementedError('Unsupported serving binary {}'.format(
type(self._serving_binary).__name__))
logging.info('Running container with parameter %s', run_params)
self._container = self._docker.containers.run(**run_params)
def WaitUntilRunning(self, deadline):
assert self._container is not None, 'container has not been started.'
while time.time() < deadline:
try:
# Reload container attributes from server. This is the only right way to
# retrieve the latest container status from docker engine.
self._container.reload()
status = self._container.status
except docker_errors.NotFound as e:
# If the job has been aborted and container has specified auto_removal
# to True, we might get a NotFound error during container.reload().
raise error_types.JobAborted(
'Container not found. Possibly removed after the job has been '
'aborted.') from e
# The container is just created and not yet in the running status.
if status == 'created':
time.sleep(_POLLING_INTERVAL_SEC)
continue
# The container is running :)
if status == 'running':
host_port = METHOD_NAME(self._container.ports,
self._serving_binary.container_port)
host_ip_address = (
self._serving_spec.local_docker.host_ip_address
if self._serving_spec.local_docker.host_ip_address
else 'localhost'
)
self._endpoint = f'{host_ip_address}:{host_port}'
return
# Docker status is one of {'created', 'restarting', 'running', 'removing',
# 'paused', 'exited', or 'dead'}. Status other than 'created' and
# 'running' indicates the job has been aborted.
raise error_types.JobAborted(
'Job has been aborted (container status={})'.format(status))
raise error_types.DeadlineExceeded(
'Deadline exceeded while waiting for the container to be running.')
def GetLogs(self) -> Optional[str]:
if self._container:
result = self._container.logs()
if isinstance(result, bytes):
return result.decode('utf-8')
elif isinstance(result, str):
return result
else: # Generator of strs:
return '\n'.join(result)
return None
def Stop(self):
if self._container:
logging.info('Stopping container.')
self._container.stop()
self._docker.close() | null |
5,617 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=no-init,invalid-name
import systemtesting
import tempfile
import shutil
import os
from mantid.api import AnalysisDataService as ADS
from mantid.simpleapi import *
from Diffraction.single_crystal.sxd import SXD
from Diffraction.single_crystal.base_sx import PEAK_TYPE, INTEGRATION_TYPE
class SXDPeakSearchAndFindUBUsingFFT(systemtesting.MantidSystemTest):
def cleanup(self):
ADS.clear()
def runTest(self):
ws = Load(Filename="SXD23767.raw", LoadMonitors="Exclude")
self.peaks = SXD.find_sx_peaks(ws, nstd=6)
FindUBUsingFFT(PeaksWorkspace=self.peaks, MinD=1, MaxD=10, Tolerance=0.15)
SelectCellOfType(PeaksWorkspace=self.peaks, CellType="Cubic", Centering="F", Apply=True)
OptimizeLatticeForCellType(PeaksWorkspace=self.peaks, CellType="Cubic", Apply=True)
self.nindexed, *_ = IndexPeaks(PeaksWorkspace=self.peaks, Tolerance=0.1, CommonUBForAll=True)
def validate(self):
self.assertEqual(214, self.nindexed)
latt = SXD.retrieve(self.peaks).sample().getOrientedLattice()
a, alpha = 5.6541, 90 # published value for NaCl is a=6.6402 but the detector positions haven't been calibrated
self.assertAlmostEqual(a, latt.a(), delta=1e-5)
self.assertAlmostEqual(a, latt.b(), delta=1e-5)
self.assertAlmostEqual(a, latt.c(), delta=1e-5)
self.assertAlmostEqual(alpha, latt.alpha(), delta=1e-10)
self.assertAlmostEqual(alpha, latt.beta(), delta=1e-10)
self.assertAlmostEqual(alpha, latt.gamma(), delta=1e-10)
return self.peaks, "SXD23767_found_peaks.nxs"
class SXDDetectorCalibration(systemtesting.MantidSystemTest):
def METHOD_NAME(self):
self._temp_dir = tempfile.mkdtemp()
def cleanup(self):
ADS.clear()
shutil.rmtree(self._temp_dir)
def runTest(self):
self.peaks = LoadNexus(Filename="SXD23767_found_peaks.nxs", OutputWorkspace="peaks")
SXD.remove_peaks_on_detector_edge(self.peaks, 2)
# force lattice parameters to equal published values
a, alpha = 5.6402, 90
CalculateUMatrix(PeaksWorkspace=self.peaks, a=a, b=a, c=a, alpha=alpha, beta=alpha, gamma=alpha)
# load an empty workspace as MoveCOmpoennt etc. only work on Matrix workspaces
self.ws = LoadEmptyInstrument(InstrumentName="SXD", OutputWorkspace="empty")
self.xml_path = SXD.calibrate_sxd_panels(self.ws, self.peaks, self._temp_dir, tol=0.25, SearchRadiusTransBank=0.025)
self.nindexed, *_ = IndexPeaks(PeaksWorkspace=self.peaks, Tolerance=0.1, CommonUBForAll=True)
def validate(self):
# test seems to vary on OS - so just check more peaks indexed and components have been moved
self.assertGreaterThan(self.nindexed, 232)
# check xml file exists
self.assertTrue(os.path.exists(self.xml_path))
# check calibration has been applied to both MatrixWorkspace and peaks
for ws in [self.peaks, self.ws]:
self.assertNotEqual(ws.getInstrument().getComponentByName("bank1").getPos()[1], 0)
self.assertNotEqual(ws.getInstrument().getComponentByName("bank2").getPos()[2], 0)
return True
class SXDProcessVanadium(systemtesting.MantidSystemTest):
def cleanup(self):
ADS.clear()
def runTest(self):
sxd = SXD(vanadium_runno=23779, empty_runno=23768)
sxd.process_vanadium()
self.van = sxd.van_ws
def validate(self):
self.checkInstrument = False
return self.van, "SXD23779_processed_vanadium.nxs"
class SXDProcessSampleData(systemtesting.MantidSystemTest):
def cleanup(self):
ADS.clear()
def runTest(self):
sxd = SXD(vanadium_runno=23769, empty_runno=23768)
sxd.van_ws = LoadNexus(Filename="SXD23779_processed_vanadium.nxs", OutputWorkspace="SXD23779_vanadium")
sxd.set_sample(
Geometry={"Shape": "CSG", "Value": sxd.sphere_shape}, Material={"ChemicalFormula": "Na Cl", "SampleNumberDensity": 0.0223}
)
sxd.set_goniometer_axes([0, 1, 0, 1]) # ccw rotation around vertical
runno = 23767
sxd.process_data([runno], [0])
self.ws = sxd.get_ws_name(runno)
def validate(self):
self.checkInstrument = False
return self.ws, "SXD23767_processed.nxs"
class SXDIntegrateData(systemtesting.MantidSystemTest):
def cleanup(self):
ADS.clear()
def runTest(self):
sxd = SXD(vanadium_runno=23769, empty_runno=23768)
# load data and convert to Qlab
ws = LoadNexus(Filename="SXD23767_processed.nxs", OutputWorkspace="SXD23767_processed")
runno = 23767
sxd.set_ws(runno, ws)
sxd.convert_to_MD(run=runno)
# load peaks to integrate
peaks = LoadNexus(Filename="SXD23767_found_peaks.nxs", OutputWorkspace="SXD23767_found")
sxd.set_peaks(runno, peaks, PEAK_TYPE.FOUND)
sxd.set_sample(
Geometry={"Shape": "CSG", "Value": sxd.sphere_shape}, Material={"ChemicalFormula": "Na Cl", "SampleNumberDensity": 0.0223}
)
sxd.set_goniometer_axes([0, 1, 0, 1]) # ccw rotation around vertical
sxd.integrate_data(INTEGRATION_TYPE.MD_OPTIMAL_RADIUS, PEAK_TYPE.FOUND, scale=12)
self.integrated_peaks = sxd.get_peaks_name(runno, PEAK_TYPE.FOUND, INTEGRATION_TYPE.MD_OPTIMAL_RADIUS)
def validate(self):
self.tolerance = 1e-8
self.tolerance_is_rel_err = True
return self.integrated_peaks, "SXD23767_found_peaks_integrated.nxs" | null |
5,618 | # #
# This file is distributed as part of the WannierBerri code #
# under the terms of the GNU General Public License. See the #
# file `LICENSE' in the root directory of the WannierBerri #
# distribution, or http://www.gnu.org/copyleft/gpl.txt #
# #
# The WannierBerri code is hosted on GitHub: #
# https://github.com/stepan-tsirkin/wannier-berri #
# written by #
# Stepan Tsirkin, University of Zurich #
# #
#------------------------------------------------------------
# This is an auxilary class for the __evaluate.py module
import numpy as np
import lazy_property
from ..symmetry import SYMMETRY_PRECISION
class KpointBZ():
def __init__(self, K=np.zeros(3), dK=np.ones(3), NKFFT=np.ones(3), factor=1., symgroup=None, refinement_level=-1 ):
self.K = np.copy(K)
self.dK = np.copy(dK)
self.factor = factor
self.res = None
self.NKFFT = np.copy(NKFFT)
self.symgroup = symgroup
self.refinement_level = refinement_level
def set_res(self, res):
self.res = res
@lazy_property.LazyProperty
def METHOD_NAME(self):
return self.K / self.NKFFT
def __str__(self):
return (
"coord in rec.lattice = [ {0:10.6f} , {1:10.6f} , {2:10.6f} ], refinement level:{3}, factor = {4}".format(
self.K[0], self.K[1], self.K[2], self.refinement_level,self.factor))
@lazy_property.LazyProperty
def _max(self):
return self.res.max #np.max(self.res_smooth)
@property
def evaluated(self):
return not (self.res is None)
@property
def check_evaluated(self):
if not self.evaluated:
raise RuntimeError("result for a K-point is called, which is not evaluated")
@property
def max(self):
self.check_evaluated
return self._max * self.factor
@property
def norm(self):
self.check_evaluated
return self._norm * self.factor
@property
def normder(self):
self.check_evaluated
return self._normder * self.factor
@property
def get_res(self):
self.check_evaluated
return self.res * self.factor
class KpointBZpath(KpointBZ):
def __init__(self, K=np.zeros(3), symgroup=None):
super().__init__(K=np.copy(K), symgroup=symgroup)
def __str__(self):
return (
"coord in rec.lattice = [ {0:10.6f} , {1:10.6f} , {2:10.6f} ] ".format(
self.K[0], self.K[1], self.K[2]))
class KpointBZparallel(KpointBZ):
"describes a Kpoint and the surrounding parallelagramm of size dK x dK x dK"
@lazy_property.LazyProperty
def dK_fullBZ(self):
return self.dK / self.NKFFT
@lazy_property.LazyProperty
def dK_fullBZ_cart(self):
return self.dK_fullBZ[:, None] * self.symgroup.recip_lattice
@lazy_property.LazyProperty
def star(self):
if self.symgroup is None:
return [self.K]
else:
return self.symgroup.star(self.K)
def __str__(self):
return super().__str__()+"dK={} ".format(self.dK)
def absorb(self, other):
if other is None:
return
self.factor += other.factor
if other.res is not None:
if self.res is not None:
raise RuntimeError(
"combining two K-points :\n {} \n and\n {}\n with calculated result should not happen".format(
self, other))
self.res = other.res
def equiv(self, other):
if self.refinement_level != other.refinement_level:
return False
dif = self.star[:, None, :] - other.star[None, :, :]
res = False
if np.linalg.norm((dif - np.round(dif)), axis=2).min() < SYMMETRY_PRECISION:
res = True
return res
def divide(self, ndiv, periodic, use_symmetry=True):
assert (ndiv.shape == (3, ))
assert (np.all(ndiv > 0))
ndiv[np.logical_not(periodic)] = 1 # divide only along periodic directions
include_original = np.all(ndiv % 2 == 1)
K0 = self.K
dK_adpt = self.dK / ndiv
adpt_shift = (-self.dK + dK_adpt) / 2.
newfac = self.factor / np.prod(ndiv)
K_list_add = [
KpointBZparallel(
K=K0 + adpt_shift + dK_adpt * np.array([x, y, z]),
dK=dK_adpt,
NKFFT=self.NKFFT,
factor=newfac,
symgroup=self.symgroup,
refinement_level=self.refinement_level + 1) for x in range(ndiv[0]) for y in range(ndiv[1])
for z in range(ndiv[2]) if not (include_original and np.all(np.array([x, y, z]) * 2 + 1 == ndiv))
]
if include_original:
self.factor = newfac
self.refinement_level += 1
self.dK = dK_adpt
else:
self.factor = 0 # the K-point is "dead" but can be used for starting calculation on a different grid - not implemented
if use_symmetry and (self.symgroup is not None):
exclude_equiv_points(K_list_add)
return K_list_add
@lazy_property.LazyProperty
def distGamma(self):
shift_corners = np.arange(-3, 4)
corners = np.array([[x, y, z] for x in shift_corners for y in shift_corners for z in shift_corners])
return np.linalg.norm(((self.K % 1)[None, :] - corners).dot(self.symgroup.recip_lattice), axis=1).min()
def exclude_equiv_points(K_list, new_points=None):
# cnt: the number of excluded k-points
# weight_changed_old: a dictionary that saves the "old" weights, K_list[i].factor,
# for k-points that are already calculated (i < n - new_points)
# and whose weights are changed by this function
cnt = 0
n = len(K_list)
if new_points is None:
new_points = n
K_list_length = np.array([K.distGamma for K in K_list])
K_list_sort = np.argsort(K_list_length)
K_list_length = K_list_length[K_list_sort]
wall = [0] + list(np.where(K_list_length[1:] - K_list_length[:-1] > 1e-4)[0] + 1) + [len(K_list)]
exclude = []
# dictionary; key: ik, value: previous factor
weight_changed_old = {}
for start, end in zip(wall[:-1], wall[1:]):
for l in range(start, end):
i = K_list_sort[l]
if i not in exclude:
for m in range(start, end):
j = K_list_sort[m]
if i >= j:
continue
# There are two cases:
# (i) if i < n - new_points <= j; or
# (ii) if n - new_points <= i < j
# In both cases, j is excluded
if i < n - new_points and j < n - new_points:
continue
if j not in exclude:
if K_list[i].equiv(K_list[j]):
print('exclude dbg', i, j, K_list[i].K, K_list[j].K, n, new_points)
exclude.append(j)
if i < n - new_points:
if i not in weight_changed_old:
weight_changed_old[i] = K_list[i].factor
K_list[i].absorb(K_list[j])
cnt += 1
for i in sorted(exclude)[-1::-1]:
del K_list[i]
return cnt, weight_changed_old | null |
5,619 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.nn import Cell
import mindspore.ops.operations as P
#{cast} would be recompute and fused
class Net1(Cell):
def __init__(self):
super(Net1, self).__init__()
self.cast = P.Cast()
self.sum = P.ReduceSum(keep_dims=False)
def construct(self, x):
cast_res = self.cast(x, mstype.float32)
sum1_res = self.sum(cast_res, (0,))
sum2_res = self.sum(cast_res, (1,))
return sum1_res, sum2_res
#{sqrt} would be recompute on Ascend
class Net2(Cell):
def __init__(self):
super(Net2, self).__init__()
self.sqrt = P.Sqrt()
self.sum = P.ReduceSum(keep_dims=True)
self.add = P.Add()
self.neg = P.Neg()
def construct(self, x0, x1):
sqrt_res = self.sqrt(x0)
neg_res = self.neg(sqrt_res)
add_res = self.add(x1, sqrt_res)
sum_res = self.sum(add_res, (0,))
return neg_res, sum_res
#{sqrt} would be recompute
class Net3(Cell):
def __init__(self):
super(Net3, self).__init__()
self.sqrt = P.Sqrt()
self.add = P.Add()
self.neg = P.Neg()
def construct(self, x0, x1):
sqrt_res = self.sqrt(x0)
neg_res = self.neg(sqrt_res)
add_res = self.add(x1, sqrt_res)
return neg_res, add_res
#{sqrt neg} would be recompute
class Net4(Cell):
def __init__(self):
super(Net4, self).__init__()
self.sqrt = P.Sqrt()
self.neg = P.Neg()
self.sum = P.ReduceSum(keep_dims=False)
def construct(self, x):
sqrt_res = self.sqrt(x)
neg_res = self.neg(sqrt_res)
sum1_res = self.sum(neg_res, (0,))
sum2_res = self.sum(neg_res, (1,))
return sum1_res, sum2_res
#{sqrt} would be recompute
class Net5(Cell):
def __init__(self):
super(Net5, self).__init__()
self.sqrt = P.Sqrt()
self.add = P.Add()
def construct(self, x0, x1, x2):
sqrt_res = self.sqrt(x0)
add1_res = self.add(sqrt_res, x1)
add2_res = self.add(sqrt_res, x2)
return add1_res, add2_res
def test_basic1(net):
def get_output(i0, net, enable_graph_kernel=False):
context.set_context(enable_graph_kernel=enable_graph_kernel)
net_obj = net()
output = net_obj(i0)
return output
i0 = Tensor(np.random.uniform(1, 2, [1024, 1024]).astype(np.float16))
expect = get_output(i0, net, False)
output = get_output(i0, net, True)
expect0_np = expect[0].asnumpy().copy()
output0_np = output[0].asnumpy().copy()
expect1_np = expect[1].asnumpy().copy()
output1_np = output[1].asnumpy().copy()
assert np.allclose(expect0_np, output0_np, 1.e-3, 1.e-3)
assert np.allclose(expect1_np, output1_np, 1.e-3, 1.e-3)
def test_basic2(net):
def get_output(i0, i1, net, enable_graph_kernel=False):
context.set_context(enable_graph_kernel=enable_graph_kernel)
net_obj = net()
output = net_obj(i0, i1)
return output
i0 = Tensor(np.random.uniform(1, 2, [1, 1024]).astype(np.float32))
i1 = Tensor(np.random.uniform(1, 2, [1024, 1024]).astype(np.float32))
expect = get_output(i0, i1, net, False)
output = get_output(i0, i1, net, True)
expect0_np = expect[0].asnumpy().copy()
output0_np = output[0].asnumpy().copy()
expect1_np = expect[1].asnumpy().copy()
output1_np = output[1].asnumpy().copy()
assert np.allclose(expect0_np, output0_np, 1.e-3, 1.e-3)
assert np.allclose(expect1_np, output1_np, 1.e-3, 1.e-3)
def test_basic3(net):
def get_output(i0, i1, i2, net, enable_graph_kernel=False):
context.set_context(enable_graph_kernel=enable_graph_kernel)
net_obj = net()
output = net_obj(i0, i1, i2)
return output
i0 = Tensor(np.random.uniform(1, 2, [1, 1024]).astype(np.float16))
i1 = Tensor(np.random.uniform(1, 2, [1024, 1024]).astype(np.float16))
i2 = Tensor(np.random.uniform(1, 2, [2048, 1024]).astype(np.float16))
expect = get_output(i0, i1, i2, net, False)
output = get_output(i0, i1, i2, net, True)
expect0_np = expect[0].asnumpy().copy()
output0_np = output[0].asnumpy().copy()
expect1_np = expect[1].asnumpy().copy()
output1_np = output[1].asnumpy().copy()
assert np.allclose(expect0_np, output0_np, 1.e-3, 1.e-3)
assert np.allclose(expect1_np, output1_np, 1.e-3, 1.e-3)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_1():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
test_basic1(Net1)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_2():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
test_basic2(Net2)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_3():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
test_basic2(Net3)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_4():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
test_basic1(Net4)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_gpu_5():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
test_basic3(Net5)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_1():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_basic1(Net1)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_2():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_basic2(Net2)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_3():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_basic2(Net3)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def METHOD_NAME():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_basic1(Net4)
@pytest.mark.level1
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_5():
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
test_basic3(Net5) | null |
5,620 | # -*- coding: utf-8 -*-
"""
@file __init__.py
@author Michael Behrisch
@date 2011-06-23
@version $Id: __init__.py 13845 2013-05-02 13:53:19Z dkrajzew $
Python interface to SUMO especially for parsing xml input and output files.
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2011-2013 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
import os
import subprocess
from xml.sax import parseString, handler
from optparse import OptionParser, OptionGroup, Option
class ConfigurationReader(handler.ContentHandler):
"""Reads a configuration template, storing the options in an OptionParser"""
def __init__(self, optParse, groups, options):
self._opts = optParse
self._groups = groups
self._options = options
self._group = self._opts
def startElement(self, name, attrs):
if len(attrs) == 0:
self._group = OptionGroup(self._opts, name)
if self._group != self._opts and self._groups and self._group.title not in self._groups:
return
if "type" in attrs and name != "help":
if self._options and name not in self._options:
return
help = attrs.get("help", "")
option = Option("--" + name, help=help)
if attrs["type"] == "BOOL":
option = Option("--" + name, action="store_true", default=False, help=help)
elif attrs["type"] in ["FLOAT", "TIME"]:
option.type = "float"
if attrs["value"]:
option.default = float(attrs["value"])
elif attrs["type"] == "INT":
option.type = "int"
if attrs["value"]:
option.default = int(attrs["value"])
else:
option.default = attrs["value"]
self._group.add_option(option)
def endElement(self, name):
if self._group != self._opts and name == self._group.title:
self._opts.add_option_group(self._group)
self._group = self._opts
def pullOptions(executable, optParse, groups=None, options=None):
output = subprocess.Popen([executable, "--save-template", "-"], stdout=subprocess.PIPE).communicate()[0]
parseString(output, ConfigurationReader(optParse, groups, options))
def saveConfiguration(executable, options, filename):
options.save_configuration = filename
METHOD_NAME(executable, options)
def METHOD_NAME(executable, options):
optParser = OptionParser()
pullOptions(executable, optParser)
cmd = [executable]
for option, value in options.__dict__.iteritems():
o = "--" + option.replace("_", "-")
opt = optParser.get_option(o)
if opt is not None and value is not None and opt.default != value:
cmd.append(o)
if opt.action != "store_true":
cmd.append(str(value))
return subprocess.METHOD_NAME(cmd)
def exeExists(binary):
if os.name == "nt" and binary[-4:] != ".exe":
binary += ".exe"
return os.path.exists(binary)
def checkBinary(name, bindir=None):
"""Checks for the given binary in the places, defined by the environment variables SUMO_HOME and SUMO_BINDIR."""
if name == "sumo-gui":
envName = "GUISIM_BINARY"
else:
envName = name.upper() + "_BINARY"
env = os.environ
join = os.path.join
if envName in env and exeExists(env.get(envName)):
return env.get(envName)
if bindir is not None:
binary = join(bindir, name)
if exeExists(binary):
return binary
if "SUMO_BINDIR" in env:
binary = join(env.get("SUMO_BINDIR"), name)
if exeExists(binary):
return binary
if "SUMO_HOME" in env:
binary = join(env.get("SUMO_HOME"), "bin", name)
if exeExists(binary):
return binary
binary = os.path.abspath(join(os.path.dirname(__file__), '..', '..', 'bin', name))
if exeExists(binary):
return binary
return name
class _Running:
"""
A generator of running, numerical IDs
Should be enhanced by:
- a member method for returning the size
- a member iterator over the stored ids
"""
def __init__(self, orig_ids=False, warn=False):
"""Contructor"""
# whether original IDs shall be used instead of an index
self.orig_ids = orig_ids
# whether a warning for non-integer IDs shall be given
self.warn = warn
# running index of assigned numerical IDs
self.index = 0
# map from known IDs to assigned numerical IDs
self._m = {}
def g(self, id):
"""
If the given id is known, the numerical representation is returned,
otherwise a new running number is assigned to the id and returned"""
if id not in self._m:
if self.orig_ids:
self._m[id] = id
if self.warn:
try:
int(id)
except ValueError:
sys.stderr.write(
'Warning: ID "%s" is not an integer.\n' % id)
self.warn = False
else:
self._m[id] = self.index
self.index += 1
return self._m[id]
def k(self, id):
"""
Returns whether the given id is known."""
return id in self._m
def d(self, id):
"""
Removed the element."""
del self._m[id]
def _intTime(tStr):
"""
Converts a time given as a string containing a float into an integer representation.
"""
return int(float(tStr))
def _laneID2edgeID(laneID):
return laneID[:laneID.rfind("_")] | null |
5,621 | import ctypes
import numpy as np
from collections import defaultdict, deque
from typing import TypeVar, Type, Any, Dict, Deque, Tuple
from tinygrad.helpers import DType, dtypes, prod, GlobalCounters, ImageDType
_T = TypeVar("_T")
class RawBuffer: # pylint: disable=abstract-method
def __init__(self, size:int, dtype:DType, buf:Any=None, allocator:Any=None, **kwargs):
self.size: int = size
self.dtype: DType = dtype
self._buf = buf if buf is not None else (allocator.alloc(size, dtype, **kwargs) if allocator else None) # If buf is provided, use it. Otherwise try to allocate from the allocator.
self._memsz: int = size*dtype.itemsize
self._allocator = allocator
self._device = kwargs.get('device', None)
GlobalCounters.mem_used += self._memsz
def __del__(self): # NOTE: if it fails on init (bad dtype), it won't have a _memsz
if hasattr(self, '_memsz'): GlobalCounters.mem_used -= self._memsz
if hasattr(self, '_allocator') and self._allocator: self._allocator.free(self._buf)
def __repr__(self): return f"buffer<{self.size}, {self.dtype}>"
@property
def key(self): return (self.size, self.dtype)
# NOTE: this interface allows for 0 copy
@classmethod
def fromCPU(cls:Type[_T], x:np.ndarray) -> _T: raise NotImplementedError("must be implemented")
def toCPU(self) -> np.ndarray: raise NotImplementedError("must be implemented")
class RawConst(RawBuffer): # pylint: disable=abstract-method
def __repr__(self): return f"const<{self._buf}, {self.dtype}>"
@property
def key(self): return (str(self._buf), self.dtype)
def buf_is_kernel_arg(x) -> bool:
return x.realized is not None and x.realized.__class__ is not RawConst
# --teenygrad--
class RawBufferCopyIn(RawBuffer):
def _copyin(self, x:np.ndarray) -> None: raise NotImplementedError("must be implemented")
@classmethod
def fromCPU(cls, x:np.ndarray, **kwargs):
ret = cls(prod(x.shape), dtypes.from_np(x.dtype), **kwargs)
if x.size > 0: ret._copyin(x)
return ret
class RawBufferMapped(RawBufferCopyIn):
def _buffer(self) -> memoryview: raise NotImplementedError("must be implemented")
# NOTE: this metadata prevents the backing buffer from being freed. hack can be removed with PEP688
def toCPU(self) -> np.ndarray: return np.frombuffer(self._buffer(), dtype=np.dtype(self.dtype.np, metadata={"backing": self}), count=self.size) # type: ignore
def _copyin(self, x:np.ndarray) -> None: np.copyto(self.toCPU(), x.reshape(-1))
# this one is simple enough that i moved it out of the runtimes
class RawMallocBuffer(RawBufferMapped):
def __init__(self, size, dtype: DType): super().__init__(size, dtype, ({dtypes.float64:ctypes.c_double, dtypes.float32: ctypes.c_float, dtypes.float16: ctypes.c_int16, dtypes.bfloat16: ctypes.c_int16, dtypes.int8: ctypes.c_int8, dtypes.uint8: ctypes.c_uint8, dtypes.bool: ctypes.c_uint8, dtypes.int32: ctypes.c_int32, dtypes.uint32: ctypes.c_uint32, dtypes.int64: ctypes.c_int64, dtypes.uint64: ctypes.c_uint64}[dtype] * size)())
def _buffer(self): return memoryview(self._buf)
class RawBufferCopyInOut(RawBufferCopyIn):
def _copyout(self, x:np.ndarray) -> None: raise NotImplementedError("must be implemented")
def toCPU(self) -> np.ndarray:
x: np.ndarray = np.empty(self.size, dtype=self.dtype.np)
if x.size > 0: self._copyout(x)
return x
class RawBufferTransfer(RawBuffer):
def _transfer(self, x) -> None: raise NotImplementedError("must be implemented")
@classmethod
def transfer(cls, x, shape, dtype, **kwargs):
ret = cls(prod(shape), dtype, **kwargs)
ret._transfer(x)
return ret
class LRUAllocator:
def __init__(self, dev_memsz=(4<<30)):
self.epoch = 0
self.free_space: Dict[Any, int] = defaultdict(lambda: dev_memsz)
self.buffer_info: Dict[Any, Tuple[int, DType, str]] = dict()
self.cached_buffers: Dict[Tuple[int, ...], Deque[Tuple[Any, int]]] = defaultdict(deque) # Cached buffer storage, splitted by type and size, newest first.
self.aging_order: Dict[Any, Deque[Tuple[Tuple[int, ...], int]]] = defaultdict(deque) # Keys of cached_buffers, ordered from oldest to newest updates.
def __del__(self):
for v in self.cached_buffers.values():
for buf, _ in v: self._free_buffer(buf)
def _cache_reuse_buffer(self, rawbufs: Deque[Tuple[Any, int]]): # The newest cached buffer is reused.
GlobalCounters.mem_cached -= self._underlying_buf_memsz(rawbufs[0][0])
return rawbufs.popleft()[0]
def _alloc_buffer(self, size, dtype, device, **kwargs):
self.free_space[device] -= size*dtype.itemsize
while len(self.aging_order[device]) and self.free_space[device] < 0: # When OOM removing lru buffers.
bucket, epoch = self.aging_order[device].popleft()
if self.cached_buffers[bucket] and self.cached_buffers[bucket][-1][1] == epoch: self._free_buffer(self.cached_buffers[bucket].pop()[0]) # Free cached buffer if it is still in cache.
newbuf = self.METHOD_NAME(max(1, size), dtype, device, **kwargs)
self.buffer_info[newbuf] = (size, dtype, device)
return newbuf
def _free_buffer(self, buf_to_free):
from tinygrad.jit import CacheCollector
CacheCollector._on_buf_free(buf_to_free)
self.free_space[self.buffer_info[buf_to_free][2]] += self._underlying_buf_memsz(buf_to_free)
GlobalCounters.mem_cached -= self._underlying_buf_memsz(buf_to_free)
self.buffer_info.pop(buf_to_free)
self._do_free(buf_to_free)
def alloc(self, size, dtype, device='0', **kwargs):
rawbufs = self.cached_buffers.get(self._cached_bufkey(size, dtype, device), None)
return self._cache_reuse_buffer(rawbufs) if rawbufs else self._alloc_buffer(size, dtype, device, **kwargs)
def free(self, buf): # free() just caches buffer. It might be freed later when OOM during allocation.
self.epoch += 1
size, dtype, device = self.buffer_info[buf]
self.cached_buffers[self._cached_bufkey(size, dtype, device)].appendleft((buf, self.epoch))
self.aging_order[device].append((self._cached_bufkey(size, dtype, device), self.epoch))
GlobalCounters.mem_cached += self._underlying_buf_memsz(buf)
def _underlying_buf_memsz(self, buf): return self.buffer_info[buf][0] * self.buffer_info[buf][1].itemsize
def _cached_bufkey(self, size, dtype, device) -> Tuple[int, ...]: return (device, size, dtype, dtype.shape) if isinstance(dtype, ImageDType) else (device, size, dtype) # Provides a key for reusing device buffers with identical keys.
def METHOD_NAME(self, size, dtype, device, **kwargs): raise NotImplementedError("must be implemented")
def _do_free(self, buf): pass | null |
5,622 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing read_image
"""
import numpy
import pytest
from mindspore.dataset import vision
from mindspore.dataset.vision import ImageReadMode
def test_read_image_jpeg():
"""
Feature: read_image
Description: Read the contents of a JPEG image file
Expectation: The Output is equal to the expected output
"""
filename = "../data/dataset/testFormats/apple.jpg"
output = vision.read_image(filename)
assert output.shape == (226, 403, 3)
assert output.dtype == numpy.uint8
assert output[0, 0, 0] == 221
assert output[0, 0, 1] == 221
assert output[0, 0, 2] == 221
assert output[100, 200, 0] == 195
assert output[100, 200, 1] == 60
assert output[100, 200, 2] == 31
assert output[225, 402, 0] == 181
assert output[225, 402, 1] == 181
assert output[225, 402, 2] == 173
output = vision.read_image(filename, ImageReadMode.UNCHANGED)
assert output.shape == (226, 403, 3)
output = vision.read_image(filename, ImageReadMode.GRAYSCALE)
assert output.shape == (226, 403, 1)
output = vision.read_image(filename, ImageReadMode.COLOR)
assert output.shape == (226, 403, 3)
filename = "../data/dataset/testFormats/apple_grayscale.jpg"
output = vision.read_image(filename)
assert output.shape == (226, 403, 1)
output = vision.read_image(filename, ImageReadMode.UNCHANGED)
assert output.shape == (226, 403, 1)
output = vision.read_image(filename, ImageReadMode.GRAYSCALE)
assert output.shape == (226, 403, 1)
output = vision.read_image(filename, ImageReadMode.COLOR)
assert output.shape == (226, 403, 3)
def test_read_image_png():
"""
Feature: read_image
Description: Read the contents of a PNG image file
Expectation: The Output is equal to the expected output
"""
filename = "../data/dataset/testFormats/apple.png"
output = vision.read_image(filename)
assert output.shape == (226, 403, 3)
output = vision.read_image(filename, ImageReadMode.UNCHANGED)
assert output.shape == (226, 403, 3)
output = vision.read_image(filename, ImageReadMode.GRAYSCALE)
assert output.shape == (226, 403, 1)
output = vision.read_image(filename, ImageReadMode.COLOR)
assert output.shape == (226, 403, 3)
filename = "../data/dataset/testFormats/apple_4_channels.png"
output = vision.read_image(filename)
assert output.shape == (226, 403, 3)
output = vision.read_image(filename, ImageReadMode.UNCHANGED)
assert output.shape == (226, 403, 3)
output = vision.read_image(filename, ImageReadMode.GRAYSCALE)
assert output.shape == (226, 403, 1)
output = vision.read_image(filename, ImageReadMode.COLOR)
assert output.shape == (226, 403, 3)
def test_read_image_bmp():
"""
Feature: read_image
Description: Read the contents of a BMP image file
Expectation: The Output is equal to the expected output
"""
filename = "../data/dataset/testFormats/apple.bmp"
output = vision.read_image(filename)
assert output.shape == (226, 403, 3)
output = vision.read_image(filename, ImageReadMode.UNCHANGED)
assert output.shape == (226, 403, 3)
output = vision.read_image(filename, ImageReadMode.GRAYSCALE)
assert output.shape == (226, 403, 1)
output = vision.read_image(filename, ImageReadMode.COLOR)
assert output.shape == (226, 403, 3)
def test_read_image_tiff():
"""
Feature: read_image
Description: Read the contents of a TIFF image file
Expectation: The Output is equal to the expected output
"""
filename = "../data/dataset/testFormats/apple.tiff"
output = vision.read_image(filename)
assert output.shape == (226, 403, 3)
output = vision.read_image(filename, ImageReadMode.UNCHANGED)
assert output.shape == (226, 403, 3)
output = vision.read_image(filename, ImageReadMode.GRAYSCALE)
assert output.shape == (226, 403, 1)
output = vision.read_image(filename, ImageReadMode.COLOR)
assert output.shape == (226, 403, 3)
def METHOD_NAME():
"""
Feature: read_image
Description: Test read_image with invalid parameter
Expectation: Error is caught when the parameter is invalid
"""
def test_invalid_param(filename_param, mode_param, error, error_msg):
"""
a function used for checking correct error and message with invalid parameter
"""
with pytest.raises(error) as error_info:
vision.read_image(filename_param, mode_param)
assert error_msg in str(error_info.value)
# Test with a not exist filename
wrong_filename = "this_file_is_not_exist"
error_message = "Invalid file path, " + wrong_filename + " does not exist."
test_invalid_param(wrong_filename, ImageReadMode.COLOR, RuntimeError, error_message)
# Test with a directory name
wrong_filename = "../data/dataset/"
error_message = "Invalid file path, " + wrong_filename + " is not a regular file."
test_invalid_param(wrong_filename, ImageReadMode.COLOR, RuntimeError, error_message)
# Test with a not supported gif file
wrong_filename = "../data/dataset/testFormats/apple.gif"
error_message = "Failed to read file " + wrong_filename
test_invalid_param(wrong_filename, ImageReadMode.COLOR, RuntimeError, error_message)
# Test with an invalid type for the filename
error_message = "Input filename is not of type"
test_invalid_param(0, ImageReadMode.UNCHANGED, TypeError, error_message)
# Test with an invalid type for the mode
filename = "../data/dataset/testFormats/apple.jpg"
error_message = "Input mode is not of type"
test_invalid_param(filename, "0", TypeError, error_message)
if __name__ == "__main__":
test_read_image_jpeg()
test_read_image_png()
test_read_image_bmp()
test_read_image_tiff()
METHOD_NAME() | null |
5,623 | """Classes & functions for plugin commands like `dbt:run`, `dbt:docs` or `dbt:test`."""
from __future__ import annotations
import asyncio
from abc import ABCMeta, abstractmethod
import structlog
from meltano.core.block.singer import InvokerBase
from meltano.core.db import project_engine
from meltano.core.elt_context import PluginContext
from meltano.core.logging import OutputLogger
from meltano.core.logging.utils import SubprocessOutputWriter
from meltano.core.plugin.project_plugin import ProjectPlugin
from meltano.core.plugin.settings_service import PluginSettingsService
from meltano.core.plugin_invoker import PluginInvoker, invoker_factory
from meltano.core.project import Project
from meltano.core.runner import RunnerError
logger = structlog.getLogger(__name__)
class PluginCommandBlock(metaclass=ABCMeta):
"""Basic PluginCommand interface specification."""
@property
@abstractmethod
def name(self) -> str:
"""Name of the plugin command block.
In the case of a singer plugin this would most likely be the name of the plugin.
ex. `dbt:run` name = dbt , command = run
"""
raise NotImplementedError
@property
@abstractmethod
def command(self) -> str | None:
"""Get the plugin command to use when invoking the plugin (if any)."""
raise NotImplementedError
@abstractmethod
async def run(self) -> None:
"""Run the command."""
raise NotImplementedError
class InvokerCommand(InvokerBase, PluginCommandBlock):
"""`PluginCommandBlock` that supports invoking plugin commands."""
def __init__(
self,
name: str,
log: SubprocessOutputWriter,
block_ctx: dict,
project: Project,
plugin_invoker: PluginInvoker,
command: str | None,
command_args: tuple[str],
):
"""Configure and return a wrapped plugin invoker.
Args:
name: the name of the plugin/command.
log: the OutputLogger instance to proxy output too.
block_ctx: the block context.
project: the project instance.
plugin_invoker: the plugin invoker.
command: the command to invoke.
command_args: any additional plugin args that should be used.
"""
super().__init__(
block_ctx=block_ctx,
project=project,
plugin_invoker=plugin_invoker,
command=command,
)
self._name = name
self._command = command
self._command_args = command_args
self._log = log
@property
def name(self) -> str:
"""Name is the underlying name of the plugin/command.
Returns:
The name str.
"""
return self._name
@property
def command(self) -> str | None:
"""Command is the specific plugin command to use when invoking the plugin.
Returns:
The command str if any.
"""
return self._command
@property
def command_args(self) -> str | None:
"""Get the command args to use when invoking the plugin.
Returns:
The command args if any.
"""
return self._command_args
async def _start(self):
invoke_args = (self.command_args,) if self.command_args else ()
await self.start(*invoke_args)
async def run(self) -> None:
"""Invoke a command capturing and logging produced output.
Raises:
RunnerError: if the command fails.
"""
try: # noqa: WPS501
async with self.invoker.prepared(self.context.session):
await self._start()
self.stdout_link(self._log)
self.stderr_link(self._log)
await asyncio.wait(
[*self.proxy_io(), self.process_future],
return_when=asyncio.ALL_COMPLETED,
)
finally:
self.context.session.close()
if exitcode := self.process_future.result():
command = self.command or self.command_args[0]
raise RunnerError(
f"`{self.name} {command}` failed with exit code: {exitcode}",
)
def METHOD_NAME(
plugin: ProjectPlugin,
project: Project,
command: str | None,
command_args: list[str] | None = None,
run_dir: str | None = None,
) -> InvokerCommand:
"""
Make an InvokerCommand from a plugin.
Args:
plugin: Plugin to make command from.
project: Project to use.
command: the command to invoke on the plugin i.e. `run` in dbt run.
command_args: any additional command args that should be passed in
during invocation.
run_dir: Optional directory to run commands in.
Returns:
InvokerCommand
"""
stderr_log = logger.bind(
stdio="stderr",
cmd_type="command",
)
_, session_maker = project_engine(project)
session = session_maker()
output_logger = OutputLogger("run.log")
invoker_log = output_logger.out(plugin.name, stderr_log)
ctx = PluginContext(
plugin=plugin,
settings_service=PluginSettingsService(project, plugin),
session=session,
)
invoker = invoker_factory(
project,
ctx.plugin,
context=ctx,
run_dir=run_dir,
plugin_settings_service=ctx.settings_service,
)
return InvokerCommand(
name=plugin.name,
log=invoker_log,
block_ctx=ctx,
project=project,
plugin_invoker=invoker,
command=command,
command_args=command_args,
) | null |
5,624 | import os
import time
import unittest
class StructSeqTest(unittest.TestCase):
def test_tuple(self):
t = time.gmtime()
self.assertIsInstance(t, tuple)
astuple = tuple(t)
self.assertEqual(len(t), len(astuple))
self.assertEqual(t, astuple)
# Check that slicing works the same way; at one point, slicing t[i:j] with
# 0 < i < j could produce NULLs in the result.
for i in range(-len(t), len(t)):
self.assertEqual(t[i:], astuple[i:])
for j in range(-len(t), len(t)):
self.assertEqual(t[i:j], astuple[i:j])
for j in range(-len(t), len(t)):
self.assertEqual(t[:j], astuple[:j])
self.assertRaises(IndexError, t.__getitem__, -len(t)-1)
self.assertRaises(IndexError, t.__getitem__, len(t))
for i in range(-len(t), len(t)-1):
self.assertEqual(t[i], astuple[i])
def test_repr(self):
t = time.gmtime()
self.assertTrue(repr(t))
t = time.gmtime(0)
self.assertEqual(repr(t),
"time.struct_time(tm_year=1970, tm_mon=1, tm_mday=1, tm_hour=0, "
"tm_min=0, tm_sec=0, tm_wday=3, tm_yday=1, tm_isdst=0)")
# os.stat() gives a complicated struct sequence.
st = os.stat(__file__)
rep = repr(st)
self.assertTrue(rep.startswith("os.stat_result"))
self.assertIn("st_mode=", rep)
self.assertIn("st_ino=", rep)
self.assertIn("st_dev=", rep)
def test_concat(self):
t1 = time.gmtime()
t2 = t1 + tuple(t1)
for i in range(len(t1)):
self.assertEqual(t2[i], t2[i+len(t1)])
def test_repeat(self):
t1 = time.gmtime()
t2 = 3 * t1
for i in range(len(t1)):
self.assertEqual(t2[i], t2[i+len(t1)])
self.assertEqual(t2[i], t2[i+2*len(t1)])
def test_contains(self):
t1 = time.gmtime()
for item in t1:
self.assertIn(item, t1)
self.assertNotIn(-42, t1)
def test_hash(self):
t1 = time.gmtime()
self.assertEqual(hash(t1), hash(tuple(t1)))
def test_cmp(self):
t1 = time.gmtime()
t2 = type(t1)(t1)
self.assertEqual(t1, t2)
self.assertTrue(not (t1 < t2))
self.assertTrue(t1 <= t2)
self.assertTrue(not (t1 > t2))
self.assertTrue(t1 >= t2)
self.assertTrue(not (t1 != t2))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_fields(self):
t = time.gmtime()
self.assertEqual(len(t), t.n_sequence_fields)
self.assertEqual(t.n_unnamed_fields, 0)
self.assertEqual(t.n_fields, time._STRUCT_TM_ITEMS)
def test_constructor(self):
t = time.struct_time
self.assertRaises(TypeError, t)
self.assertRaises(TypeError, t, None)
self.assertRaises(TypeError, t, "123")
self.assertRaises(TypeError, t, "123", dict={})
self.assertRaises(TypeError, t, "123456789", dict=None)
s = "123456789"
self.assertEqual("".join(t(s)), s)
def test_eviltuple(self):
class Exc(Exception):
pass
# Devious code could crash structseqs' constructors
class C:
def __getitem__(self, i):
raise Exc
def __len__(self):
return 9
self.assertRaises(Exc, time.struct_time, C())
def test_reduce(self):
t = time.gmtime()
x = t.__reduce__()
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
t = time.gmtime()
L = list(t)
indices = (0, None, 1, 3, 19, 300, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(list(t[start:stop:step]),
L[start:stop:step])
def test_match_args(self):
expected_args = ('tm_year', 'tm_mon', 'tm_mday', 'tm_hour', 'tm_min',
'tm_sec', 'tm_wday', 'tm_yday', 'tm_isdst')
self.assertEqual(time.struct_time.__match_args__, expected_args)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def METHOD_NAME(self):
expected_args = ('st_mode', 'st_ino', 'st_dev', 'st_nlink', 'st_uid',
'st_gid', 'st_size')
self.assertEqual(os.stat_result.n_unnamed_fields, 3)
self.assertEqual(os.stat_result.__match_args__, expected_args)
if __name__ == "__main__":
unittest.main() | null |
5,625 | import pytest
import torch
from torch.autograd import gradcheck
import kornia
import kornia.testing as utils # test utils
from kornia.testing import BaseTester
class TestZCA(BaseTester):
@pytest.mark.parametrize("unbiased", [True, False])
def test_zca_unbiased(self, unbiased, device, dtype):
data = torch.tensor([[0, 1], [1, 0], [-1, 0], [0, -1]], device=device, dtype=dtype)
if unbiased:
unbiased_val = 1.5
else:
unbiased_val = 2.0
expected = torch.sqrt(unbiased_val * torch.abs(data)) * torch.sign(data)
zca = kornia.enhance.ZCAWhitening(unbiased=unbiased).fit(data)
actual = zca(data)
self.assert_close(actual, expected, low_tolerance=True)
@pytest.mark.parametrize("dim", [0, 1])
def test_dim_args(self, dim, device, dtype):
if 'xla' in device.type:
pytest.skip("buggy with XLA devices.")
if dtype == torch.float16:
pytest.skip('not work for half-precision')
data = torch.tensor([[0, 1], [1, 0], [-1, 0], [0, -1]], device=device, dtype=dtype)
if dim == 1:
expected = torch.tensor(
[
[-0.35360718, 0.35360718],
[0.35351562, -0.35351562],
[-0.35353088, 0.35353088],
[0.35353088, -0.35353088],
],
device=device,
dtype=dtype,
)
elif dim == 0:
expected = torch.tensor(
[[0.0, 1.2247448], [1.2247448, 0.0], [-1.2247448, 0.0], [0.0, -1.2247448]], device=device, dtype=dtype
)
zca = kornia.enhance.ZCAWhitening(dim=dim)
actual = zca(data, True)
self.assert_close(actual, expected, low_tolerance=True)
@pytest.mark.parametrize("input_shape,eps", [((15, 2, 2, 2), 1e-6), ((10, 4), 0.1), ((20, 3, 2, 2), 1e-3)])
def test_identity(self, input_shape, eps, device, dtype):
"""Assert that data can be recovered by the inverse transform."""
data = torch.randn(*input_shape, device=device, dtype=dtype)
zca = kornia.enhance.ZCAWhitening(compute_inv=True, eps=eps).fit(data)
data_w = zca(data)
data_hat = zca.inverse_transform(data_w)
self.assert_close(data, data_hat, low_tolerance=True)
def test_grad_zca_individual_transforms(self, device, dtype):
"""Check if the gradients of the transforms are correct w.r.t to the input data."""
data = torch.tensor([[2, 0], [0, 1], [-2, 0], [0, -1]], device=device, dtype=dtype)
data = utils.tensor_to_gradcheck_var(data)
def zca_T(x):
return kornia.enhance.zca_mean(x)[0]
def zca_mu(x):
return kornia.enhance.zca_mean(x)[1]
def zca_T_inv(x):
return kornia.enhance.zca_mean(x, return_inverse=True)[2]
assert gradcheck(zca_T, (data,), raise_exception=True)
assert gradcheck(zca_mu, (data,), raise_exception=True)
assert gradcheck(zca_T_inv, (data,), raise_exception=True)
def METHOD_NAME(self, device, dtype):
data = torch.tensor([[2, 0], [0, 1], [-2, 0], [0, -1]], device=device, dtype=dtype)
data = utils.tensor_to_gradcheck_var(data)
def zca_fit(x):
zca = kornia.enhance.ZCAWhitening(detach_transforms=False)
return zca(x, include_fit=True)
assert gradcheck(zca_fit, (data,), raise_exception=True)
def test_grad_detach_zca(self, device, dtype):
data = torch.tensor([[1, 0], [0, 1], [-2, 0], [0, -1]], device=device, dtype=dtype)
data = utils.tensor_to_gradcheck_var(data)
zca = kornia.enhance.ZCAWhitening()
zca.fit(data)
assert gradcheck(zca, (data,), raise_exception=True)
def test_not_fitted(self, device, dtype):
with pytest.raises(RuntimeError):
data = torch.rand(10, 2, device=device, dtype=dtype)
zca = kornia.enhance.ZCAWhitening()
zca(data)
def test_not_fitted_inv(self, device, dtype):
with pytest.raises(RuntimeError):
data = torch.rand(10, 2, device=device, dtype=dtype)
zca = kornia.enhance.ZCAWhitening()
zca.inverse_transform(data)
def test_jit(self, device, dtype):
data = torch.rand(10, 3, 1, 2, device=device, dtype=dtype)
zca = kornia.enhance.ZCAWhitening().fit(data)
zca_jit = kornia.enhance.ZCAWhitening().fit(data)
zca_jit = torch.jit.script(zca_jit)
self.assert_close(zca_jit(data), zca(data))
@pytest.mark.parametrize("unbiased", [True, False])
def test_zca_whiten_func_unbiased(self, unbiased, device, dtype):
data = torch.tensor([[0, 1], [1, 0], [-1, 0], [0, -1]], device=device, dtype=dtype)
if unbiased:
unbiased_val = 1.5
else:
unbiased_val = 2.0
expected = torch.sqrt(unbiased_val * torch.abs(data)) * torch.sign(data)
actual = kornia.enhance.zca_whiten(data, unbiased=unbiased)
self.assert_close(actual, expected, low_tolerance=True)
@pytest.mark.skip(reason="not implemented yet")
def test_cardinality(self, device, dtype):
pass
@pytest.mark.skip(reason="not implemented yet")
def test_exception(self, device, dtype):
pass
@pytest.mark.skip(reason="not implemented yet")
def test_gradcheck(self, device, dtype):
pass
@pytest.mark.skip(reason="not implemented yet")
def test_smoke(self, device, dtype):
pass
@pytest.mark.skip(reason="not implemented yet")
def test_module(self, device, dtype):
pass | null |
5,626 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.api import FrameworkManager, FunctionFactory
from mantid.simpleapi import CreateEmptyTableWorkspace
from mantidqt.utils.qt.testing import start_qapplication
from mantidqtinterfaces.Muon.GUI.Common.fitting_widgets.basic_fitting.fit_function_options_view import (
FitFunctionOptionsView,
RAW_DATA_TABLE_ROW,
)
from mantidqtinterfaces.Muon.GUI.Common.utilities.workspace_utils import StaticWorkspaceWrapper
@start_qapplication
class FitFunctionOptionsViewTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
FrameworkManager.Instance()
def setUp(self):
self.view = FitFunctionOptionsView()
self.view.show()
def tearDown(self):
self.assertTrue(self.view.close())
def test_that_the_view_has_been_initialized_with_the_raw_data_option_shown(self):
self.view = FitFunctionOptionsView()
self.view.show()
self.assertTrue(not self.view.fit_options_table.isRowHidden(RAW_DATA_TABLE_ROW))
def test_that_the_view_has_been_initialized_with_the_raw_data_option_hidden(self):
self.view = FitFunctionOptionsView()
self.view.hide_fit_raw_checkbox()
self.view.show()
self.assertTrue(self.view.fit_options_table.isRowHidden(RAW_DATA_TABLE_ROW))
def test_that_update_fit_status_labels_will_display_no_fit_if_the_success_list_is_empty(self):
fit_status, chi_squared = "success", 1.1
self.view.update_fit_status_labels(fit_status, chi_squared)
self.assertEqual(self.view.fit_status_success_failure.text(), "Success")
self.assertEqual(self.view.fit_status_chi_squared.text(), "Chi squared: 1.1")
def METHOD_NAME(self):
fit_status, chi_squared = None, 0.0
self.view.update_fit_status_labels(fit_status, chi_squared)
self.assertEqual(self.view.fit_status_success_failure.text(), "No Fit")
self.assertEqual(self.view.fit_status_chi_squared.text(), "Chi squared: 0")
def test_that_update_fit_status_labels_will_display_fits_failed_if_some_of_the_fits_fail(self):
fit_status, chi_squared = "failed for some reason", 2.2
self.view.update_fit_status_labels(fit_status, chi_squared)
self.assertEqual(self.view.fit_status_success_failure.text(), f"Failure: {fit_status}")
self.assertEqual(self.view.fit_status_chi_squared.text(), "Chi squared: 2.2")
def test_that_clear_fit_status_will_clear_the_fit_status_and_chi_squared(self):
fit_status, chi_squared = "failed for some reason", 2.2
self.view.update_fit_status_labels(fit_status, chi_squared)
self.view.clear_fit_status()
self.assertEqual(self.view.fit_status_success_failure.text(), "No Fit")
self.assertEqual(self.view.fit_status_chi_squared.text(), "Chi squared: 0.0")
def test_that_set_datasets_in_function_browser_will_set_the_datasets_in_the_function_browser(self):
dataset_names = ["Name1", "Name2", "Name3"]
self.view.set_datasets_in_function_browser(dataset_names)
self.assertEqual(self.view.number_of_datasets(), 3)
def test_that_set_current_dataset_index_will_set_the_current_dataset_index_in_the_function_browser(self):
dataset_names = ["Name1", "Name2", "Name3"]
self.view.set_datasets_in_function_browser(dataset_names)
self.view.set_current_dataset_index(2)
self.assertEqual(self.view.function_browser.getCurrentDataset(), 2)
def test_that_update_function_browser_parameters_will_update_the_parameters_of_the_function_for_single_fit(self):
old_function = "name=FlatBackground,A0=0"
simultaneous_mode = False
self.view.function_browser.setFunction(old_function)
self.assertEqual(str(self.view.fit_object), old_function)
updated_function = self.view.fit_object.setParameter("A0", 1.0)
self.view.update_function_browser_parameters(simultaneous_mode, updated_function)
self.assertEqual(str(self.view.fit_object), str(updated_function))
def test_that_update_function_browser_parameters_will_set_the_function_if_in_simultaneous_mode(self):
old_function = "name=FlatBackground,A0=0"
simultaneous_mode = True
self.view.function_browser.setFunction(old_function)
self.assertEqual(str(self.view.fit_object), old_function)
updated_function = self.view.fit_object.setParameter("A0", 1.0)
self.view.update_function_browser_parameters(simultaneous_mode, updated_function)
self.assertEqual(str(self.view.fit_object), str(updated_function))
def test_that_update_function_browser_parameters_will_clear_the_function_if_the_function_provided_is_none(self):
old_function = "name=FlatBackground,A0=0"
simultaneous_mode = False
self.view.function_browser.setFunction(old_function)
self.assertEqual(str(self.view.fit_object), old_function)
self.view.update_function_browser_parameters(simultaneous_mode, None)
self.assertEqual(self.view.fit_object, None)
def test_that_set_fit_function_will_set_the_function_in_the_browser(self):
fit_function = FunctionFactory.createFunction("FlatBackground")
self.view.set_fit_function(fit_function)
self.assertEqual(str(self.view.current_fit_function()), str(fit_function))
def test_that_it_is_possible_to_set_the_start_x_to_a_different_value(self):
new_value = 5.0
self.view.start_x = new_value
self.assertEqual(self.view.start_x, new_value)
def test_that_it_is_possible_to_set_the_end_x_to_a_different_value(self):
new_value = 5.0
self.view.end_x = new_value
self.assertEqual(self.view.end_x, new_value)
def test_that_the_fit_to_raw_checkbox_value_can_be_changed_as_expected(self):
self.view.fit_to_raw = False
self.assertTrue(not self.view.fit_to_raw)
self.view.fit_to_raw = True
self.assertTrue(self.view.fit_to_raw)
def test_that_the_function_name_can_be_changed_as_expected(self):
new_function_name = "Test Function Name"
self.view.function_name = new_function_name
self.assertEqual(self.view.function_name, new_function_name)
def test_that_set_covariance_button_enabled_can_disable_the_covariance_button(self):
self.assertTrue(self.view.covariance_matrix_button.isEnabled())
self.view.set_covariance_button_enabled(False)
self.assertTrue(not self.view.covariance_matrix_button.isEnabled())
def test_that_set_covariance_button_enabled_can_enable_the_covariance_button(self):
self.view.set_covariance_button_enabled(False)
self.view.set_covariance_button_enabled(True)
self.assertTrue(self.view.covariance_matrix_button.isEnabled())
def test_that_show_normalised_covariance_matrix_will_not_raise_an_error(self):
ws = CreateEmptyTableWorkspace()
wrapper = StaticWorkspaceWrapper("CovarianceMatrix", ws)
self.view.show_normalised_covariance_matrix(wrapper.workspace, wrapper.workspace_name)
if __name__ == "__main__":
unittest.main() | null |
5,627 | from typing import Optional, Tuple
from cogent3 import get_app, get_model
from cogent3.core.alignment import ArrayAlignment, SequenceCollection
from cogent3.core.tree import TreeNode
from cogent3.evolve.distance import EstimateDistances
from cogent3.phylo import nj as NJ
from cogent3.util import progress_display as UI
from cogent3.util import warning as c3warn
@c3warn.deprecated_args(
"2023.8",
reason="better name",
old_new=[("ests_from_pairwise", "params_from_pairwise")],
)
@UI.display_wrap
def tree_align(
model: str,
seqs: SequenceCollection,
tree: Optional[TreeNode] = None,
indel_rate: float = 0.01,
indel_length: float = 0.01,
ui=None,
params_from_pairwise: bool = True,
param_vals: dict = None,
iters: Optional[int] = None,
approx_dists: bool = True,
) -> Tuple[ArrayAlignment, TreeNode]:
"""Returns a multiple sequence alignment and tree.
Parameters
----------
model
a substitution model or the name of one, see available_models()
seqs
a sequence collection
tree
if None, estimates the guide tree from pairwise distances
indel_rate, indel_length
parameters for the progressive pair-HMM
params_from_pairwise
if no tree provided and True, the median value
of the substitution model parameters are used
param_vals
named key, value pairs for model parameters. These
override params_from_pairwise.
iters
the number of times the alignment process is repeated. The guide tree
is updated on each iteration from pairwise distances computed from the
alignment produced by the previous iteration. If None, does not do any
iterations.
approx_dists
if no guide tree, and model is for DNA / Codons, estimates pairwise
distances using an approximation and JC69. Otherwise, estimates
genetic distances from pairwise alignments (which is slower).
Notes
-----
Uses a tree for determining the progressive order. If a tree is not
provided, a Neighbour Joining tree is constructed from pairwise
distances. If the model is for DNA, the pairwise distances are from
SequenceCollection.distance_matrix() for the initial guide tree. For other
moltypes, and distances are estimated using the provided substitution model
from pairwise alignments of the sequences.
Parameters and tree are added to ``<align>.info["align_params"]``.
"""
_exclude_params = ["mprobs", "rate", "bin_switch"]
param_vals = dict(param_vals) if param_vals else {}
model = get_model(model)
moltype = model.alphabet.moltype
num_states = len(model.alphabet)
if isinstance(seqs, SequenceCollection):
seqs = seqs.to_moltype(moltype)
else:
seqs = SequenceCollection(data=seqs, moltype=moltype)
if tree is not None:
fix_lengths = get_app("scale_branches", scalar=1.0)
tip_names = set(tree.get_tip_names())
seq_names = set(seqs.names)
assert (
tip_names == seq_names
), f"names don't match between seqs and tree: {tip_names ^ seq_names}"
tree = tree.bifurcating(name_unnamed=True)
tree = fix_lengths(tree)
align = _progressive_hmm(
indel_length, indel_rate, model, param_vals, seqs, tree
)
return align, tree
if params_from_pairwise:
est_params = [
param for param in model.get_param_list() if param not in _exclude_params
]
else:
est_params = None
if approx_dists and num_states == 4:
# we have a nucleic acid alphabet, so we will try the new
# approximation method
dmat = seqs.distance_matrix(calc="jc69")
tree = dmat.quick_tree()
else:
# we have to do the pairwise-alignment based approach
dists, param_vals = _dists_from_pairwise_align(
est_params, params_from_pairwise, model, param_vals, seqs
)
tree = NJ.nj(dists.to_dict())
tree = tree.bifurcating(name_unnamed=True)
# makes sure all edges have non-zero length and whether we need to scale
# the lengths for the codon case
fix_lengths = get_app("scale_branches", nuc_to_codon=num_states >= 60)
tree = fix_lengths(tree)
ui.display("Doing progressive alignment")
# this is the point at which we do the iterations
align = _progressive_hmm(
indel_length, indel_rate, model, {**param_vals}, seqs, tree
)
if iters is None:
return align, tree
for _ in range(iters):
dmat = align.distance_matrix(calc="jc69")
tree = dmat.quick_tree()
tree = tree.bifurcating(name_unnamed=True)
tree = fix_lengths(tree)
align = _progressive_hmm(
indel_length, indel_rate, model, {**param_vals}, seqs, tree
)
return align, tree
def _dists_from_pairwise_align(
est_params, params_from_pairwise, model, param_vals, seqs
):
dcalc = EstimateDistances(seqs, model, do_pair_align=True, est_params=est_params)
dcalc.run()
if params_from_pairwise and not param_vals:
# we use the median to avoid the influence of outlier pairs
param_vals = {}
for param in est_params:
numbers = dcalc.get_param_values(param)
param_vals[param] = numbers.median
dists = dcalc.get_pairwise_distances()
return dists, param_vals
def _progressive_hmm(indel_length, indel_rate, model, param_vals, seqs, tree):
LF = model.make_likelihood_function(tree, aligned=False)
with LF.updates_postponed():
for param, val in list(param_vals.items()):
LF.set_param_rule(param, value=val, is_constant=True)
LF.set_param_rule("indel_rate", value=indel_rate, is_constant=True)
LF.set_param_rule("indel_length", value=indel_length, is_constant=True)
LF.set_sequences(seqs)
lnL = LF.get_log_likelihood()
edge = lnL.edge
try:
align = edge.get_viterbi_path().get_alignment()
except ArithmeticError:
# trying to narrow down conditions for difficult to reproduce exception
print(
"###" * 30,
"",
tree.get_newick(with_distances=True),
"",
"#" * 20,
"",
str(LF),
"",
"#" * 20,
"",
seqs.to_fasta(),
sep="\n",
)
raise
align = align.to_moltype(model.moltype)
param_vals.update(
dict(
indel_length=indel_length,
indel_rate=indel_rate,
guide_tree=tree.get_newick(with_distances=True),
model=model.name,
lnL=lnL,
)
)
align.info["align_params"] = param_vals
return align
def METHOD_NAME(*args, **kwargs): # pragma: no cover
"""deprecated, used tree_align()"""
from cogent3.util.warning import deprecated
deprecated(
"function",
"TreeAlign",
"tree_align",
"2023.9",
)
return tree_align(*args, **kwargs) | null |
5,628 | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Generic helper functions used across codebase."""
import os
import pathlib
import numpy as np
import tensorflow as tf
from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
# Generic.
def get_single_col_by_input_type(input_type, column_definition):
"""Returns name of single column.
Args:
input_type: Input type of column to extract
column_definition: Column definition list for experiment
"""
l = [tup[0] for tup in column_definition if tup[2] == input_type]
if len(l) != 1:
raise ValueError("Invalid number of columns for {}".format(input_type))
return l[0]
def extract_cols_from_data_type(data_type, column_definition, excluded_input_types):
"""Extracts the names of columns that correspond to a define data_type.
Args:
data_type: DataType of columns to extract.
column_definition: Column definition to use.
excluded_input_types: Set of input types to exclude
Returns:
List of names for columns with data type specified.
"""
return [tup[0] for tup in column_definition if tup[1] == data_type and tup[2] not in excluded_input_types]
# Loss functions.
def tensorflow_quantile_loss(y, y_pred, quantile):
"""Computes quantile loss for tensorflow.
Standard quantile loss as defined in the "Training Procedure" section of
the main TFT paper
Args:
y: Targets
y_pred: Predictions
quantile: Quantile to use for loss calculations (between 0 & 1)
Returns:
Tensor for quantile loss.
"""
# Checks quantile
if quantile < 0 or quantile > 1:
raise ValueError("Illegal quantile value={}! Values should be between 0 and 1.".format(quantile))
prediction_underflow = y - y_pred
q_loss = quantile * tf.maximum(prediction_underflow, 0.0) + (1.0 - quantile) * tf.maximum(
-prediction_underflow, 0.0
)
return tf.reduce_sum(q_loss, axis=-1)
def numpy_normalised_quantile_loss(y, y_pred, quantile):
"""Computes normalised quantile loss for numpy arrays.
Uses the q-Risk metric as defined in the "Training Procedure" section of the
main TFT paper.
Args:
y: Targets
y_pred: Predictions
quantile: Quantile to use for loss calculations (between 0 & 1)
Returns:
Float for normalised quantile loss.
"""
prediction_underflow = y - y_pred
weighted_errors = quantile * np.maximum(prediction_underflow, 0.0) + (1.0 - quantile) * np.maximum(
-prediction_underflow, 0.0
)
quantile_loss = weighted_errors.mean()
normaliser = y.abs().mean()
return 2 * quantile_loss / normaliser
# OS related functions.
def create_folder_if_not_exist(directory):
"""Creates folder if it doesn't exist.
Args:
directory: Folder path to create.
"""
# Also creates directories recursively
pathlib.Path(directory).mkdir(parents=True, exist_ok=True)
# Tensorflow related functions.
def get_default_tensorflow_config(tf_device="gpu", gpu_id=0):
"""Creates tensorflow config for graphs to run on CPU or GPU.
Specifies whether to run graph on gpu or cpu and which GPU ID to use for multi
GPU machines.
Args:
tf_device: 'cpu' or 'gpu'
gpu_id: GPU ID to use if relevant
Returns:
Tensorflow config.
"""
if tf_device == "cpu":
os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # for training on cpu
tf_config = tf.ConfigProto(log_device_placement=False, device_count={"GPU": 0})
else:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
print("Selecting GPU ID={}".format(gpu_id))
tf_config = tf.ConfigProto(log_device_placement=False)
tf_config.gpu_options.allow_growth = True
return tf_config
def save(tf_session, model_folder, cp_name, scope=None):
"""Saves Tensorflow graph to checkpoint.
Saves all trainiable variables under a given variable scope to checkpoint.
Args:
tf_session: Session containing graph
model_folder: Folder to save models
cp_name: Name of Tensorflow checkpoint
scope: Variable scope containing variables to save
"""
# Save model
if scope is None:
saver = tf.train.Saver()
else:
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
saver = tf.train.Saver(var_list=var_list, max_to_keep=100000)
save_path = saver.save(tf_session, os.path.join(model_folder, "{0}.ckpt".format(cp_name)))
print("Model saved to: {0}".format(save_path))
def METHOD_NAME(tf_session, model_folder, cp_name, scope=None, verbose=False):
"""Loads Tensorflow graph from checkpoint.
Args:
tf_session: Session to load graph into
model_folder: Folder containing serialised model
cp_name: Name of Tensorflow checkpoint
scope: Variable scope to use.
verbose: Whether to print additional debugging information.
"""
# Load model proper
load_path = os.path.join(model_folder, "{0}.ckpt".format(cp_name))
print("Loading model from {0}".format(load_path))
print_weights_in_checkpoint(model_folder, cp_name)
initial_vars = set([v.name for v in tf.get_default_graph().as_graph_def().node])
# Saver
if scope is None:
saver = tf.train.Saver()
else:
var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=scope)
saver = tf.train.Saver(var_list=var_list, max_to_keep=100000)
# Load
saver.restore(tf_session, load_path)
all_vars = set([v.name for v in tf.get_default_graph().as_graph_def().node])
if verbose:
print("Restored {0}".format(",".join(initial_vars.difference(all_vars))))
print("Existing {0}".format(",".join(all_vars.difference(initial_vars))))
print("All {0}".format(",".join(all_vars)))
print("Done.")
def print_weights_in_checkpoint(model_folder, cp_name):
"""Prints all weights in Tensorflow checkpoint.
Args:
model_folder: Folder containing checkpoint
cp_name: Name of checkpoint
Returns:
"""
load_path = os.path.join(model_folder, "{0}.ckpt".format(cp_name))
print_tensors_in_checkpoint_file(file_name=load_path, tensor_name="", all_tensors=True, all_tensor_names=True) | null |
5,629 | from .. import errors
from .resource import Collection, Model
class Plugin(Model):
"""
A plugin on the server.
"""
def __repr__(self):
return f"<{self.__class__.__name__}: '{self.name}'>"
@property
def name(self):
"""
The plugin's name.
"""
return self.attrs.get('Name')
@property
def enabled(self):
"""
Whether the plugin is enabled.
"""
return self.attrs.get('Enabled')
@property
def settings(self):
"""
A dictionary representing the plugin's configuration.
"""
return self.attrs.get('Settings')
def configure(self, options):
"""
Update the plugin's settings.
Args:
options (dict): A key-value mapping of options.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.configure_plugin(self.name, options)
self.reload()
def METHOD_NAME(self, force=False):
"""
Disable the plugin.
Args:
force (bool): Force disable. Default: False
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.disable_plugin(self.name, force)
self.reload()
def enable(self, timeout=0):
"""
Enable the plugin.
Args:
timeout (int): Timeout in seconds. Default: 0
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
self.client.api.enable_plugin(self.name, timeout)
self.reload()
def push(self):
"""
Push the plugin to a remote registry.
Returns:
A dict iterator streaming the status of the upload.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.push_plugin(self.name)
def remove(self, force=False):
"""
Remove the plugin from the server.
Args:
force (bool): Remove even if the plugin is enabled.
Default: False
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.remove_plugin(self.name, force=force)
def upgrade(self, remote=None):
"""
Upgrade the plugin.
Args:
remote (string): Remote reference to upgrade to. The
``:latest`` tag is optional and is the default if omitted.
Default: this plugin's name.
Returns:
A generator streaming the decoded API logs
"""
if self.enabled:
raise errors.DockerError(
'Plugin must be disabled before upgrading.'
)
if remote is None:
remote = self.name
privileges = self.client.api.plugin_privileges(remote)
yield from self.client.api.upgrade_plugin(
self.name,
remote,
privileges,
)
self.reload()
class PluginCollection(Collection):
model = Plugin
def create(self, name, plugin_data_dir, gzip=False):
"""
Create a new plugin.
Args:
name (string): The name of the plugin. The ``:latest`` tag is
optional, and is the default if omitted.
plugin_data_dir (string): Path to the plugin data directory.
Plugin data directory must contain the ``config.json``
manifest file and the ``rootfs`` directory.
gzip (bool): Compress the context using gzip. Default: False
Returns:
(:py:class:`Plugin`): The newly created plugin.
"""
self.client.api.create_plugin(name, plugin_data_dir, gzip)
return self.get(name)
def get(self, name):
"""
Gets a plugin.
Args:
name (str): The name of the plugin.
Returns:
(:py:class:`Plugin`): The plugin.
Raises:
:py:class:`docker.errors.NotFound` If the plugin does not
exist.
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.prepare_model(self.client.api.inspect_plugin(name))
def install(self, remote_name, local_name=None):
"""
Pull and install a plugin.
Args:
remote_name (string): Remote reference for the plugin to
install. The ``:latest`` tag is optional, and is the
default if omitted.
local_name (string): Local name for the pulled plugin.
The ``:latest`` tag is optional, and is the default if
omitted. Optional.
Returns:
(:py:class:`Plugin`): The installed plugin
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
privileges = self.client.api.plugin_privileges(remote_name)
it = self.client.api.pull_plugin(remote_name, privileges, local_name)
for _data in it:
pass
return self.get(local_name or remote_name)
def list(self):
"""
List plugins installed on the server.
Returns:
(list of :py:class:`Plugin`): The plugins.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
resp = self.client.api.plugins()
return [self.prepare_model(r) for r in resp] | null |
5,630 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2020 Soeren Apel <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
from binascii import crc_hqx
# See tc27xD_um_v2.2.pdf, Table 20-2
# (name, addr byte count, data byte count)
command_codes = {
0b00000: ('Read byte', 4, 0),
0b00001: ('Read 2 byte', 4, 0),
0b00010: ('Read 4 byte', 4, 0),
# Reserved
0b00100: ('Write byte with ACK', 4, 4),
0b00101: ('Write 2 byte with ACK', 4, 4),
0b00110: ('Write 4 byte with ACK', 4, 4),
# Reserved
0b01000: ('ACK', 0, 0),
0b01001: ('NACK (Target Error)', 0, 0),
0b01010: ('Read Answer with ACK', 4, 4),
# Reserved
0b01100: ('Trigger with ACK', 0, 0),
# Reserved
# Reserved
# Reserved
# Reserved
# Reserved
0b10010: ('Read 4-byte JTAG ID', 0, 0),
# Reserved
# Reserved
# Reserved
# Reserved
0b10111: ('Stream 32 byte with ACK', 0, 32)
# Rest is reserved
}
ann_header_tag, ann_header_cmd, ann_header_ch, ann_address, ann_data, \
ann_crc, ann_warning = range(7)
class Decoder(srd.Decoder):
api_version = 3
id = 'sipi'
name = 'SIPI (Zipwire)'
longname = 'NXP SIPI interface'
desc = 'Serial Inter-Processor Interface (SIPI) aka Zipwire, aka HSSL'
license = 'gplv2+'
inputs = ['lfast']
outputs = []
tags = ['Embedded/industrial']
annotations = (
('header_tag', 'Transaction Tag'),
('header_cmd', 'Command Code'),
('header_ch', 'Channel'),
('address', 'Address'),
('data', 'Data'),
('crc', 'CRC'),
('warning', 'Warning'),
)
annotation_rows = (
('fields', 'Fields', (ann_header_tag, ann_header_cmd,
ann_header_ch, ann_address, ann_data, ann_crc,)),
('warnings', 'Warnings', (ann_warning,)),
)
def __init__(self):
self.reset()
def reset(self):
self.byte_len = 0
self.frame_len = 0
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.out_binary = self.register(srd.OUTPUT_BINARY)
def put_ann(self, ss, es, ann_class, value):
self.put(int(ss), int(es), self.out_ann, [ann_class, value])
def put_header(self, ss_header, es_header, value):
ss = ss_header
es = ss + 3 * self.bit_len
tag = (value & 0xE000) >> 13
self.put_ann(ss, es, ann_header_tag, ['{:02X}'.format(tag)])
ss = es
es = ss + 5 * self.bit_len
cmd_id = (value & 0x1F00) >> 8
cmd_name, self.addr_len, self.data_len = \
command_codes.get(cmd_id, ('Reserved ({:02X})'.format(cmd_id), 0, 0))
self.frame_len = 2 + 2 + self.addr_len + self.data_len # +Header +CRC
self.put_ann(ss, es, ann_header_cmd, [cmd_name])
# Bits 4..7 are reserved and should be 0, warn if they're not
ss = es
es = ss + 4 * self.bit_len
reserved_bits = (value & 0x00F0) >> 4
if reserved_bits > 0:
self.put_ann(ss, es, ann_warning, ['Reserved bits #4..7 should be 0'])
ss = es
es = ss + 3 * self.bit_len
ch = (value & 0x000E) >> 1 # See tc27xD_um_v2.2.pdf, Table 20-1
self.put_ann(ss, es, ann_header_ch, [str(ch)])
# Bit 0 is reserved and should be 0, warn if it's not
if (value & 0x0001) == 0x0001:
ss = es
es = ss + self.bit_len
self.put_ann(ss, es, ann_warning, ['Reserved bit #0 should be 0'])
def METHOD_NAME(self, data):
byte_idx = 0
if self.addr_len > 0:
for value_tuple in data[:self.addr_len]:
ss, es, value = value_tuple
self.put_ann(ss, es, ann_address, ['{:02X}'.format(value)])
byte_idx = self.addr_len
if self.data_len > 0:
for value_tuple in data[byte_idx:]:
ss, es, value = value_tuple
self.put_ann(ss, es, ann_data, ['{:02X}'.format(value)])
def put_crc(self, ss, es, crc_value, crc_payload_data):
crc_payload = []
for value_tuple in crc_payload_data:
crc_payload.append(value_tuple[2])
calculated_crc = crc_hqx(bytes(crc_payload), 0xFFFF)
if calculated_crc == crc_value:
self.put_ann(ss, es, ann_crc, ['CRC OK'])
else:
self.put_ann(ss, es, ann_crc, ['Have {:02X} but calculated {:02X}'.format(crc_value, calculated_crc)])
self.put_ann(ss, es, ann_warning, ['CRC mismatch'])
def decode(self, ss, es, data):
if len(data) == 1:
self.put_ann(ss, es, ann_warning, ['Header too short'])
return
# ss and es are now unused, we use them as local variables instead
self.bit_len = (data[0][1] - data[0][0]) / 8.0
byte_idx = 0
ss = data[byte_idx][0]
es = data[byte_idx + 1][1]
self.put_header(ss, es, (data[byte_idx][2] << 8) + data[byte_idx + 1][2])
byte_idx += 2
payload_len = self.frame_len - 2 - 2 # -Header -CRC
if payload_len > 0:
self.METHOD_NAME(data[byte_idx:-2])
byte_idx += payload_len
ss = data[byte_idx][0]
es = data[byte_idx + 1][1]
if byte_idx == len(data) - 2:
# CRC is calculated over header + payload bytes
self.put_crc(ss, es, (data[byte_idx][2] << 8) + data[byte_idx + 1][2], data[0:-2])
else:
self.put_ann(ss, es, ann_warning, ['CRC incomplete or missing']) | null |
5,631 | import unittest
import os
import numpy as npy
import skrf as rf
class ConvenienceTestCase(unittest.TestCase):
"""
"""
def setUp(self):
"""
"""
self.test_dir = os.path.dirname(os.path.abspath(__file__))+'/'
self.hfss_oneport_file = os.path.join(self.test_dir, 'hfss_oneport.s1p')
self.hfss_twoport_file = os.path.join(self.test_dir, 'hfss_twoport.s2p')
self.hfss_threeport_file=os.path.join(self.test_dir, 'hfss_threeport_DB.s3p')
self.hfss_threeport_file_50ohm=os.path.join(self.test_dir, 'hfss_threeport_DB_50Ohm.s3p')
self.hfss_18dot2 = os.path.join(self.test_dir, 'hfss_18.2.s3p')
self.hfss_8ports = os.path.join(self.test_dir, 'hfss_19.2.s8p')
self.hfss_10ports = os.path.join(self.test_dir, 'hfss_19.2.s10p')
self.ntwk1 = rf.Network(os.path.join(self.test_dir, 'ntwk1.s2p'))
self.ntwk2 = rf.Network(os.path.join(self.test_dir, 'ntwk2.s2p'))
self.ntwk3 = rf.Network(os.path.join(self.test_dir, 'ntwk3.s2p'))
def test_hfss_high_port_number(self):
"""
Check dimensions s, gamma and z0 of Network from HFSS .sNp files
HFSS exports .sNp files with N > 4 with Gamma and Z0 as comments and
written on multiple lines. The additional lines start with a '!' that
need to be escaped to avoid numerical conversion error.
"""
_3ports = rf.Network(self.hfss_18dot2)
self.assertTrue(_3ports.s.shape[1:] == (3,3))
self.assertTrue(_3ports.z0.shape[1] == 3)
self.assertTrue(_3ports.gamma.shape[1] == 3)
_8ports = rf.Network(self.hfss_8ports)
self.assertTrue(_8ports.s.shape[1:] == (8,8))
self.assertTrue(_8ports.z0.shape[1] == 8)
self.assertTrue(_8ports.gamma.shape[1] == 8)
_10ports = rf.Network(self.hfss_10ports)
self.assertTrue(_10ports.s.shape[1:] == (10,10))
self.assertTrue(_10ports.z0.shape[1] == 10)
self.assertTrue(_10ports.gamma.shape[1] == 10)
def test_hfss_touchstone_2_media(self):
"""
currently, this just tests the execution ability. it would
be better to simulate a uniform line, in hfss and then confirm
that the hfss network is same as the one generated by the
media object this function returns
"""
med = rf.hfss_touchstone_2_media(self.hfss_oneport_file)[0]
med.line(1)
med_p1,med_p2 = rf.hfss_touchstone_2_media(self.hfss_twoport_file)
med_p1.line(1)
med_p2.line(1)
def test_hfss_touchstone_renormalization(self):
"""
Scattering matrices are given for a given impedance z0,
which is usually assumed to be 50 Ohm, unless otherwise stated.
Touchstone files are not necessarily indicating such impedances,
especially if they vary with frequency.
HFSS Touchstone file format supports port informations (as an option) for gamma and z0
When HFSS files are read with Network() (or hfss_touchstone_2_network()),
the port informations are taken into account.
"""
# Comparing the S-params of the same device expressed with same z0
nw_50 = rf.Network(self.hfss_threeport_file_50ohm)
nw = rf.Network(self.hfss_threeport_file)
nw.renormalize(z_new=50)
self.assertTrue(npy.all(npy.abs(nw.s - nw_50.s) < 1e-6))
def test_is_hfss_touchstone(self):
"""
Test if Touchstone files have been generated by HFSS and has port
impedances for each frequency point.
"""
# Touchstone file generated by HFSS
self.assertTrue(rf.Touchstone(self.hfss_oneport_file).has_hfss_port_impedances)
self.assertTrue(rf.Touchstone(self.hfss_twoport_file).has_hfss_port_impedances)
self.assertTrue(rf.Touchstone(self.hfss_threeport_file).has_hfss_port_impedances)
self.assertTrue(rf.Touchstone(self.hfss_18dot2).has_hfss_port_impedances)
self.assertTrue(rf.Touchstone(self.hfss_8ports).has_hfss_port_impedances)
self.assertTrue(rf.Touchstone(self.hfss_10ports).has_hfss_port_impedances)
# Touchstone file not from HFSS
self.assertFalse(rf.Touchstone(os.path.join(self.test_dir, 'ntwk1.s2p')).has_hfss_port_impedances)
def METHOD_NAME(self):
"""
Test the conversion into a Network of HFSS-generated touchstone files
"""
nw_hfss_wo_z0 = rf.Network(os.path.join(self.test_dir, 'hfss_threeport_MA_without_gamma_z0_50Ohm.s3p'))
nw_hfss_50 = rf.Network(os.path.join(self.test_dir, 'hfss_threeport_MA_50Ohm.s3p'))
nw_hfss_z0 = rf.Network(os.path.join(self.test_dir, 'hfss_threeport_MA.s3p'))
# Test if the values read are the same
self.assertTrue(npy.allclose(nw_hfss_50.s, nw_hfss_wo_z0.s))
nw_hfss_z0.renormalize(50)
self.assertTrue(npy.allclose(nw_hfss_50.s, nw_hfss_z0.s))
def test_cst_touchstone_2_network(self):
"""
Test the conversion into a Network of CST-generated touchstone file
"""
nw_cst_4ports = rf.Network(os.path.join(self.test_dir, 'cst_example_4ports.s4p'))
nw_cst_6ports = rf.Network(os.path.join(self.test_dir, 'cst_example_6ports.s6p'))
def test_cst_touchstone_V2_as_V1_2_network(self):
"""
Test the conversion into a Network of a CST-generated
touchstone V2 format file (.ts) saved like a touchstone V1 file (.sNp)
"""
nw_cst_6ports = rf.Network(os.path.join(self.test_dir, 'cst_example_6ports_V2.s6p'))
def test_cst_touchstone_V2_2_network(self):
"""
Test the conversion into a Network of a CST-generated touchstone V2 format file (.ts)
"""
nw_cst_6ports = rf.Network(os.path.join(self.test_dir, 'cst_example_6ports_V2.ts'))
def test_Agilent_touchstone_4ports(self):
"""
Try reading an Agilent touchstone 4-ports measurement file
"""
filename = 'Agilent_E5071B.s4p'
ntwk = rf.Network(os.path.join(self.test_dir, filename))
# Check if port characteristic impedance is correctly parsed
self.assertTrue(npy.isclose(npy.unique(ntwk.z0), 75))
self.assertTrue(npy.allclose(ntwk.s_db[0][1], # check s2n_mag
[-5.252684e+001, -2.278388e-001, -4.435702e+001, -8.235984e+001]))
self.assertTrue(npy.allclose(ntwk.s_deg[0][1], # check s2n_deg
[-1.350884e+002, 8.767636e+001, -1.585657e+002, 7.708928e+001]))
def test_RS_touchstone_4ports(self):
"""
Try reading an R&S touchstone 4-ports measurement file
"""
filename = 'RS_ZNB8.s4p'
ntwk = rf.Network(os.path.join(self.test_dir, filename))
# Check if port characteristic impedance is correctly parsed
self.assertTrue(npy.isclose(npy.unique(ntwk.z0), 50))
# For this specific file, the port#1 min return loss is @55.5MHz
self.assertTrue(ntwk.frequency.f[npy.argmin(ntwk.s11.s_mag)], 55.5e6)
self.assertTrue(npy.allclose(ntwk.s_re[0][2], # check s3n_re
[-9.748145748042028E-6, 5.737806652221101E-6, -7.283138400961303E-1, -7.202238521877286E-6]))
self.assertTrue(npy.allclose(ntwk.s_im[0][2], # check s3n_im
[4.457944078457155E-6, 5.341399484369366E-6, -4.531402467395991E-1, 5.667857998796495E-7]) | null |
5,632 | import os
import pytest
from electrumx.lib import util, tx
def test_cachedproperty():
class Target:
CALL_COUNT = 0
def __init__(self):
self.call_count = 0
@util.cachedproperty
def prop(self):
self.call_count += 1
return self.call_count
@util.cachedproperty
def cls_prop(cls):
cls.CALL_COUNT += 1
return cls.CALL_COUNT
t = Target()
assert t.prop == t.prop == 1
assert Target.cls_prop == Target.cls_prop == 1
def test_formatted_time():
assert util.formatted_time(0) == '00s'
assert util.formatted_time(59) == '59s'
assert util.formatted_time(60) == '01m 00s'
assert util.formatted_time(3599) == '59m 59s'
assert util.formatted_time(3600) == '01h 00m 00s'
assert util.formatted_time(3600*24) == '1d 00h 00m'
assert util.formatted_time(3600*24*367) == '367d 00h 00m'
assert util.formatted_time(3600*24, ':') == '1d:00h:00m'
def test_deep_getsizeof():
int_t = util.deep_getsizeof(1)
assert util.deep_getsizeof('foo') == util.deep_getsizeof('') + 3
assert util.deep_getsizeof([1, 1]) > 2 * int_t
assert util.deep_getsizeof({1: 1}) > 2 * int_t
assert util.deep_getsizeof({1: {1: 1}}) > 3 * int_t
class Base:
pass
class A(Base):
pass
class B(Base):
pass
def test_subclasses():
assert util.subclasses(Base) == [A, B]
assert util.subclasses(Base, strict=False) == [A, B, Base]
def test_chunks():
assert list(util.chunks([1, 2, 3, 4, 5], 2)) == [[1, 2], [3, 4], [5]]
def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') is None
def test_bytes_to_int():
assert util.bytes_to_int(b'\x07[\xcd\x15') == 123456789
def test_int_to_bytes():
assert util.int_to_bytes(456789) == b'\x06\xf8U'
def METHOD_NAME(tmpdir):
prefix = os.path.join(tmpdir, 'log')
L = util.LogicalFile(prefix, 2, 6)
with pytest.raises(FileNotFoundError):
L.open_file(0, create=False)
# Check L.open creates a file
with L.open_file(8, create=True) as f:
pass
with util.open_file(prefix + '01') as f:
pass
L.write(0, b'987')
assert L.read(0, -1) == b'987'
assert L.read(0, 4) == b'987'
assert L.read(1, 1) == b'8'
L.write(0, b'01234567890')
assert L.read(0, -1) == b'01234567890'
assert L.read(5, -1) == b'567890'
with util.open_file(prefix + '01') as f:
assert f.read(-1) == b'67890'
# Test file boundary
L.write(0, b'957' * 6)
assert L.read(0, -1) == b'957' * 6
def test_open_fns(tmpdir):
tmpfile = os.path.join(tmpdir, 'file1')
with pytest.raises(FileNotFoundError):
util.open_file(tmpfile)
with util.open_file(tmpfile, create=True) as f:
f.write(b'56')
with util.open_file(tmpfile) as f:
assert f.read(3) == b'56'
# Test open_truncate truncates and creates
with util.open_truncate(tmpfile) as f:
assert f.read(3) == b''
tmpfile = os.path.join(tmpdir, 'file2')
with util.open_truncate(tmpfile) as f:
assert f.read(3) == b''
def test_address_string():
assert util.address_string(('foo.bar', 84)) == 'foo.bar:84'
assert util.address_string(('1.2.3.4', 84)) == '1.2.3.4:84'
assert util.address_string(('0a::23', 84)) == '[a::23]:84'
def test_protocol_tuple():
assert util.protocol_tuple(None) == (0, )
assert util.protocol_tuple("foo") == (0, )
assert util.protocol_tuple(1) == (0, )
assert util.protocol_tuple("1") == (1, )
assert util.protocol_tuple("0.1") == (0, 1)
assert util.protocol_tuple("0.10") == (0, 10)
assert util.protocol_tuple("2.5.3") == (2, 5, 3)
def test_version_string():
assert util.version_string(()) == "0.0"
assert util.version_string((1, )) == "1.0"
assert util.version_string((1, 2)) == "1.2"
assert util.version_string((1, 3, 2)) == "1.3.2"
def test_protocol_version():
assert util.protocol_version(None, (1, 0), (1, 0)) == ((1, 0), (1, 0))
assert util.protocol_version("0.10", (0, 1), (1, 1)) == ((0, 10), (0, 10))
assert util.protocol_version("1.0", (1, 0), (1, 0)) == ((1, 0), (1, 0))
assert util.protocol_version("1.0", (1, 0), (1, 1)) == ((1, 0), (1, 0))
assert util.protocol_version("1.1", (1, 0), (1, 1)) == ((1, 1), (1, 1))
assert util.protocol_version("1.2", (1, 0), (1, 1)) == (None, (1, 2))
assert util.protocol_version("0.9", (1, 0), (1, 1)) == (None, (0, 9))
assert util.protocol_version(["0.9", "1.0"], (1, 0), (1, 1)) \
== ((1, 0), (0, 9))
assert util.protocol_version(["0.9", "1.1"], (1, 0), (1, 1)) \
== ((1, 1), (0,9))
assert util.protocol_version(["1.1", "0.9"], (1, 0), (1, 1)) \
== (None, (1, 1))
assert util.protocol_version(["0.8", "0.9"], (1, 0), (1, 1)) \
== (None, (0, 8))
assert util.protocol_version(["1.1", "1.2"], (1, 0), (1, 1)) \
== ((1, 1), (1, 1))
assert util.protocol_version(["1.2", "1.3"], (1, 0), (1, 1)) \
== (None, (1, 2))
def test_unpackers():
b = bytes(range(256))
assert util.unpack_le_int32_from(b, 0) == (50462976,)
assert util.unpack_le_int32_from(b, 42) == (757869354,)
assert util.unpack_le_int64_from(b, 0) == (506097522914230528,)
assert util.unpack_le_int64_from(b, 42) == (3544384782113450794,)
assert util.unpack_le_uint16_from(b, 0) == (256,)
assert util.unpack_le_uint16_from(b, 42) == (11050,)
assert util.unpack_le_uint32_from(b, 0) == (50462976,)
assert util.unpack_le_uint32_from(b, 42) == (757869354,)
assert util.unpack_le_uint64_from(b, 0) == (506097522914230528,)
assert util.unpack_le_uint64_from(b, 42) == (3544384782113450794,)
def test_hex_transforms():
h = "AABBCCDDEEFF"
assert util.hex_to_bytes(h) == b'\xaa\xbb\xcc\xdd\xee\xff'
def test_pack_varint():
tests = list(range(0, 258))
tests.extend([1024, 65535, 65536, 4294967295, 4294967296, 8294967296])
for n in tests:
data = util.pack_varint(n)
deser = tx.Deserializer(data)
assert deser._read_varint() == n
import struct
with pytest.raises(struct.error):
util.pack_varint(-1)
assert util.pack_varint(0) == b'\0'
assert util.pack_varint(5) == b'\5'
assert util.pack_varint(252) == b'\xfc'
assert util.pack_varint(253) == b'\xfd\xfd\0'
assert util.pack_varint(65535) == b'\xfd\xff\xff'
assert util.pack_varint(65536) == b'\xfe\0\0\1\0'
assert util.pack_varint(2**32-1) == b'\xfe\xff\xff\xff\xff'
assert util.pack_varint(2**32) == b'\xff\0\0\0\0\1\0\0\0'
assert util.pack_varint(2**64-1) \
== b'\xff\xff\xff\xff\xff\xff\xff\xff\xff'
def test_pack_varbytes():
tests = [b'', b'1', b'2' * 253, b'3' * 254, b'4' * 256, b'5' * 65536]
for test in tests:
data = util.pack_varbytes(test)
deser = tx.Deserializer(data)
assert deser._read_varbytes() == test | null |
5,633 | import hashlib
import inspect
import json
import os
from django.test import TestCase
from django.urls import reverse
from django.utils.encoding import force_str
from vcr import VCR
from feder.cases.factories import CaseFactory
from feder.letters.factories import (
LetterFactory,
OutgoingLetterFactory,
SendOutgoingLetterFactory,
)
from feder.letters.logs.factories import LogRecordFactory, get_emaillabs_row
from feder.letters.logs.models import STATUS, EmailLog, LogRecord
from feder.letters.logs.utils import get_emaillabs_client
from feder.main.tests import PermissionStatusMixin
from feder.users.factories import UserFactory
SEED = os.urandom(10)
def METHOD_NAME(x, seed):
"""
Anonymizes data by using salt and unidirectional hash function.
Identical data in one cassette will be identical (comparable).
:param x: string to anonymise
:param seed: value modification parameter
:return: anonymized text
"""
return hashlib.sha1(force_str(x).encode("utf-8") + seed).hexdigest()
def generator(f):
filename = f"{f.__self__.__class__.__name__}.PY3.{f.__name__}"
return os.path.join(os.path.dirname(inspect.getfile(f)), "cassettes", filename)
def scrub_response(seed, fields=None):
fields = fields or ["to", "from", "subject", "account"]
def before_record_response(response):
data = json.loads(response["body"]["string"].decode("utf-8"))
for i, row in enumerate(data["data"]):
for field in fields:
if field in row:
data["data"][i][field] = METHOD_NAME(row[field], seed)
response["body"]["string"] = json.dumps(data).encode("utf-8")
return response
return before_record_response
my_vcr = VCR(
func_path_generator=generator,
decode_compressed_response=True,
serializer="yaml",
filter_headers=["authorization"],
before_record_response=scrub_response(SEED),
path_transformer=VCR.ensure_suffix(".yaml"),
)
class EmailLabsClientTestCase(TestCase):
@my_vcr.use_cassette()
def test_get_emails(self):
client = get_emaillabs_client(per_page=20)
self.assertEqual(len(client.get_emails()), 20)
@my_vcr.use_cassette()
def test_get_emails_iter(self):
client = get_emaillabs_client(per_page=20)
data = list(client.get_emails_iter())
self.assertTrue(len(data) > 20, msg=f"Found {len(data)} messages.")
class LogRecordQuerySet(TestCase):
def setUp(self):
self.letter = LetterFactory()
self.letter_no_case = LetterFactory(record__case=None)
self.rows = [
get_emaillabs_row(
sender_from=self.letter.case.email, id="ID1", deferred_time="Now"
),
get_emaillabs_row(sender_from="[email protected]", id="ID1"),
get_emaillabs_row(sender_from="[email protected]", id="ID2"),
]
def test_parse_rows_counters(self):
skipped, saved = LogRecord.objects.parse_rows(self.rows)
self.assertEqual(saved, 1)
self.assertEqual(skipped, 2)
def test_parse_rows_create_email_log(self):
LogRecord.objects.parse_rows(self.rows)
self.assertEqual(EmailLog.objects.count(), 1)
self.assertTrue(
EmailLog.objects.filter(email_id="ID1", case=self.letter.case).exists()
)
def test_parse_rows_create_log_record(self):
LogRecord.objects.parse_rows(self.rows)
self.assertEqual(LogRecord.objects.count(), 1)
self.assertTrue(
LogRecord.objects.filter(
email__case=self.letter.case, email__email_id="ID1"
).exists()
)
def test_parse_rows_update_status(self):
LogRecord.objects.parse_rows(self.rows)
self.assertEqual(EmailLog.objects.get().status, STATUS.deferred)
LogRecord.objects.parse_rows(
[
get_emaillabs_row(
sender_from=self.letter.case.email, id="ID1", ok_time="Now"
)
]
)
self.assertEqual(EmailLog.objects.get().status, STATUS.ok)
self.assertEqual(LogRecord.objects.count(), 2)
def test_parse_identify_message_by_id(self):
letter = OutgoingLetterFactory()
letter.send()
msg_id = letter.message_id_header
letter.case.refresh_from_db()
row = get_emaillabs_row(sender_from=letter.case.email, message_id=msg_id)
skipped, saved = LogRecord.objects.parse_rows([row])
self.assertEqual(saved, 1)
self.assertEqual(EmailLog.objects.get().letter, letter)
class ObjectMixin:
def setUp(self):
self.user = UserFactory(username="john")
self.record = LogRecordFactory()
self.emaillog = self.record.email
self.case = self.emaillog.case
self.monitoring = self.case.monitoring
self.permission_object = self.monitoring
class EmailLogMonitoringListViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ["monitorings.view_log"]
def get_url(self):
return reverse("logs:list", kwargs={"monitoring_pk": self.monitoring.pk})
class EmailLogMonitoringCsvViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ["monitorings.view_log"]
def get_url(self):
return reverse("logs:export", kwargs={"monitoring_pk": self.monitoring.pk})
def test_has_logs(self):
logrecord_for_another_monitoring = LogRecordFactory()
self.login_permitted_user()
response = self.client.get(self.get_url())
self.assertTrue(
response.get("Content-Disposition").startswith("attachment;filename=")
)
self.assertContains(response, self.emaillog.case.institution)
self.assertNotContains(
response,
logrecord_for_another_monitoring.email.case.institution.name,
200,
(
"Csv export for a monitoring should not "
"contain emaillogs for another monitoring"
),
)
class EmailLogCaseListViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ["monitorings.view_log"]
def get_url(self):
return reverse("logs:list", kwargs={"case_pk": self.case.pk})
def test_shows_self_case(self):
self.login_permitted_user()
response = self.client.get(self.get_url())
self.assertContains(response, self.case.name)
def test_shows_only_own_case(self):
self.login_permitted_user()
extra_cases = CaseFactory.create_batch(monitoring=self.monitoring, size=25)
response = self.client.get(self.get_url())
for case in extra_cases:
self.assertNotContains(response, case.name)
class EmailLogDetailViewTestCase(ObjectMixin, PermissionStatusMixin, TestCase):
permission = ["monitorings.view_log"]
def get_url(self):
return reverse("logs:detail", kwargs={"pk": self.emaillog.pk})
class LogRecordTestCase(TestCase):
def test_get_status(self):
data = {
"ok_desc": "250 2.0.0 Ok: queued as A3B925BF18",
"account": "1.siecobywatelska.smtp",
"tracking": [],
"from": "[email protected]",
"open_time": None,
"vps": "smtp2-87",
"tags": [],
"injected_time": "2017-08-24 17:25:50",
"created_at": None,
"updated_at": None,
"message_id": "20170824152549.2577.77274@localhost",
"to": "[email protected]",
"postfix_id": ["3xdSmZ0kpMz6jsBt", "3xdSmZ2ZvWz6Q7V0"],
"ok_time": "2017-08-24 17:25:50",
"open_desc": None,
"uid": "b1db7556ea65065c69d86b81ef248eb5",
"id": "599ef08c42cf33b253fdc5f6",
"subject": "Wniosek o udost\u0119pnienie informacji publicznej",
}
self.assertEqual(LogRecord(data=data).get_status(), "ok") | null |
5,634 | import os
import tempfile
from sympy.core.symbol import (Symbol, symbols)
from sympy.codegen.ast import (
Assignment, Print, Declaration, FunctionDefinition, Return, real,
FunctionCall, Variable, Element, integer
)
from sympy.codegen.fnodes import (
allocatable, ArrayConstructor, isign, dsign, cmplx, kind, literal_dp,
Program, Module, use, Subroutine, dimension, assumed_extent, ImpliedDoLoop,
intent_out, size, Do, SubroutineCall, sum_, array, bind_C
)
from sympy.codegen.futils import render_as_module
from sympy.core.expr import unchanged
from sympy.external import import_module
from sympy.printing.codeprinter import fcode
from sympy.utilities._compilation import has_fortran, compile_run_strings, compile_link_import_strings
from sympy.utilities._compilation.util import may_xfail
from sympy.testing.pytest import skip, XFAIL
cython = import_module('cython')
np = import_module('numpy')
def test_size():
x = Symbol('x', real=True)
sx = size(x)
assert fcode(sx, source_format='free') == 'size(x)'
@may_xfail
def test_size_assumed_shape():
if not has_fortran():
skip("No fortran compiler found.")
a = Symbol('a', real=True)
body = [Return((sum_(a**2)/size(a))**.5)]
arr = array(a, dim=[':'], intent='in')
fd = FunctionDefinition(real, 'rms', [arr], body)
render_as_module([fd], 'mod_rms')
(stdout, stderr), info = compile_run_strings([
('rms.f90', render_as_module([fd], 'mod_rms')),
('main.f90', (
'program myprog\n'
'use mod_rms, only: rms\n'
'real*8, dimension(4), parameter :: x = [4, 2, 2, 2]\n'
'print *, dsqrt(7d0) - rms(x)\n'
'end program\n'
))
], clean=True)
assert '0.00000' in stdout
assert stderr == ''
assert info['exit_status'] == os.EX_OK
@XFAIL # https://github.com/sympy/sympy/issues/20265
@may_xfail
def test_ImpliedDoLoop():
if not has_fortran():
skip("No fortran compiler found.")
a, i = symbols('a i', integer=True)
idl = ImpliedDoLoop(i**3, i, -3, 3, 2)
ac = ArrayConstructor([-28, idl, 28])
a = array(a, dim=[':'], attrs=[allocatable])
prog = Program('idlprog', [
a.as_Declaration(),
Assignment(a, ac),
Print([a])
])
fsrc = fcode(prog, standard=2003, source_format='free')
(stdout, stderr), info = compile_run_strings([('main.f90', fsrc)], clean=True)
for numstr in '-28 -27 -1 1 27 28'.split():
assert numstr in stdout
assert stderr == ''
assert info['exit_status'] == os.EX_OK
@may_xfail
def test_Program():
x = Symbol('x', real=True)
vx = Variable.deduced(x, 42)
decl = Declaration(vx)
prnt = Print([x, x+1])
prog = Program('foo', [decl, prnt])
if not has_fortran():
skip("No fortran compiler found.")
(stdout, stderr), info = compile_run_strings([('main.f90', fcode(prog, standard=90))], clean=True)
assert '42' in stdout
assert '43' in stdout
assert stderr == ''
assert info['exit_status'] == os.EX_OK
@may_xfail
def test_Module():
x = Symbol('x', real=True)
v_x = Variable.deduced(x)
sq = FunctionDefinition(real, 'sqr', [v_x], [Return(x**2)])
mod_sq = Module('mod_sq', [], [sq])
sq_call = FunctionCall('sqr', [42.])
prg_sq = Program('foobar', [
use('mod_sq', only=['sqr']),
Print(['"Square of 42 = "', sq_call])
])
if not has_fortran():
skip("No fortran compiler found.")
(stdout, stderr), info = compile_run_strings([
('mod_sq.f90', fcode(mod_sq, standard=90)),
('main.f90', fcode(prg_sq, standard=90))
], clean=True)
assert '42' in stdout
assert str(42**2) in stdout
assert stderr == ''
@XFAIL # https://github.com/sympy/sympy/issues/20265
@may_xfail
def test_Subroutine():
# Code to generate the subroutine in the example from
# http://www.fortran90.org/src/best-practices.html#arrays
r = Symbol('r', real=True)
i = Symbol('i', integer=True)
v_r = Variable.deduced(r, attrs=(dimension(assumed_extent), intent_out))
v_i = Variable.deduced(i)
v_n = Variable('n', integer)
do_loop = Do([
Assignment(Element(r, [i]), literal_dp(1)/i**2)
], i, 1, v_n)
sub = Subroutine("f", [v_r], [
Declaration(v_n),
Declaration(v_i),
Assignment(v_n, size(r)),
do_loop
])
x = Symbol('x', real=True)
v_x3 = Variable.deduced(x, attrs=[dimension(3)])
mod = Module('mymod', definitions=[sub])
prog = Program('foo', [
use(mod, only=[sub]),
Declaration(v_x3),
SubroutineCall(sub, [v_x3]),
Print([sum_(v_x3), v_x3])
])
if not has_fortran():
skip("No fortran compiler found.")
(stdout, stderr), info = compile_run_strings([
('a.f90', fcode(mod, standard=90)),
('b.f90', fcode(prog, standard=90))
], clean=True)
ref = [1.0/i**2 for i in range(1, 4)]
assert str(sum(ref))[:-3] in stdout
for _ in ref:
assert str(_)[:-3] in stdout
assert stderr == ''
def test_isign():
x = Symbol('x', integer=True)
assert unchanged(isign, 1, x)
assert fcode(isign(1, x), standard=95, source_format='free') == 'isign(1, x)'
def test_dsign():
x = Symbol('x')
assert unchanged(dsign, 1, x)
assert fcode(dsign(literal_dp(1), x), standard=95, source_format='free') == 'dsign(1d0, x)'
def test_cmplx():
x = Symbol('x')
assert unchanged(cmplx, 1, x)
def METHOD_NAME():
x = Symbol('x')
assert unchanged(kind, x)
def test_literal_dp():
assert fcode(literal_dp(0), source_format='free') == '0d0'
@may_xfail
def test_bind_C():
if not has_fortran():
skip("No fortran compiler found.")
if not cython:
skip("Cython not found.")
if not np:
skip("NumPy not found.")
a = Symbol('a', real=True)
s = Symbol('s', integer=True)
body = [Return((sum_(a**2)/s)**.5)]
arr = array(a, dim=[s], intent='in')
fd = FunctionDefinition(real, 'rms', [arr, s], body, attrs=[bind_C('rms')])
f_mod = render_as_module([fd], 'mod_rms')
with tempfile.TemporaryDirectory() as folder:
mod, info = compile_link_import_strings([
('rms.f90', f_mod),
('_rms.pyx', (
"#cython: language_level={}\n".format("3") +
"cdef extern double rms(double*, int*)\n"
"def py_rms(double[::1] x):\n"
" cdef int s = x.size\n"
" return rms(&x[0], &s)\n"))
], build_dir=folder)
assert abs(mod.py_rms(np.array([2., 4., 2., 2.])) - 7**0.5) < 1e-14 | null |
5,635 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
"""
DataTable Widget for data runs.
"""
from qtpy import QtCore, QtWidgets
from qtpy.QtCore import Qt
class DataTableModel(QtCore.QAbstractTableModel):
"""
DataTable Model for the DataTableView widget.
"""
def __init__(self, parent, headers=()):
QtCore.QAbstractTableModel.__init__(self, parent)
self._tableData = []
self.headers = headers
@property
def tableData(self):
return self._tableData
@tableData.setter
def tableData(self, data):
def checkAndConvertRow(row):
assert len(row) == self.columnCount()
return list(row)
self._tableData = list(map(checkAndConvertRow, data))
def _numRows(self):
"""
:return: number of rows with data
"""
return len(self.tableData)
def _getRow(self, row):
"""
:param row: int of the row to get
:return: data of the row
"""
return self.tableData[row] if row < self._numRows() else self._createEmptyRow()
def _isRowEmpty(self, row):
"""
checks if the row is empty
:param row: int of the row to check
:return: true if row is empty
"""
return all((v is None or not str(v).strip()) for v in self._getRow(row))
def _createEmptyRow(self):
return [self._textToData(self._numRows(), i, "") for i in range(self.columnCount())]
def METHOD_NAME(self):
"""
remove all rows at the end of the table that are empty
"""
for row in reversed(range(self._numRows())):
if self._isRowEmpty(row):
del self.tableData[row]
else:
break
def _removeEmptyRows(self):
"""
remove all empty rows
"""
for row in reversed(range(self._numRows())):
if self._isRowEmpty(row):
del self.tableData[row]
def _ensureHasRows(self, numRows):
"""
ensure the table has numRows
:param numRows: number of rows that should exist
"""
while self._numRows() < numRows:
self.tableData.append(self._createEmptyRow())
def _dataToText(self, row, col, value):
"""
converts the stored data to a displayable text.
Override this function if you need data types other than str in your table.
"""
return str(value)
def _textToData(self, row, col, text):
"""
converts a displayable text back to stored data.
Override this function if you need data types other than str in your table.
"""
return text # just return the value, it is already str.
def _setCellText(self, row, col, text):
"""
set the text of a cell
:param row: row of the cell
:param col: column of the cell
:param text: text for the cell
"""
self._ensureHasRows(row + 1)
self.tableData[row][col] = self._textToData(row, col, str(text).strip())
def _getCellText(self, row, col):
"""
get the text of a cell
:param row: row of the cell
:param col: column of the cell
:return: text of the cell
"""
rowData = self._getRow(row)
return self._dataToText(row, col, rowData[col]).strip() if len(rowData) > col else None
# reimplemented QAbstractTableModel methods
selectCell = QtCore.Signal(QtCore.QModelIndex)
def emptyCells(self, indexes):
"""
empty the cells with the indexes
:param indexes: indexes of the cells to be emptied
"""
for index in indexes:
row = index.row()
col = index.column()
self._setCellText(row, col, "")
self._removeEmptyRows()
self.beginResetModel()
self.endResetModel()
# indexes is never empty
self.selectCell.emit(indexes[0])
def rowCount(self, _=QtCore.QModelIndex()):
"""
number of rows
:return: returns the number of rows
"""
# one additional row for new data
return self._numRows() + 1
def columnCount(self, _=QtCore.QModelIndex()):
"""
number of columns
:return: number of columns
"""
return len(self.headers)
def headerData(self, selection, orientation, role):
"""
header of the selection
:param selection: selected cells
:param orientation: orientation of selection
:param role: role of the selection
:return: header of the selection
"""
if Qt.Horizontal == orientation and Qt.DisplayRole == role:
return self.headers[selection]
return None
def data(self, index, role):
"""
data of the cell
:param index: index of the cell
:param role: role of the cell
:return: data of the cell
"""
if Qt.DisplayRole == role or Qt.EditRole == role:
return self._getCellText(index.row(), index.column())
return None
def setData(self, index, text, _):
"""
set text in the cell
:param index: index of the cell
:param text: text for the cell
:return: true if data is set
"""
row = index.row()
col = index.column()
self._setCellText(row, col, text)
self.METHOD_NAME()
self.beginResetModel()
self.endResetModel()
# move selection to the next column or row
col = col + 1
if col >= self.columnCount():
row = row + 1
col = 0
row = min(row, self.rowCount() - 1)
self.selectCell.emit(self.index(row, col))
return True
def flags(self, _):
"""
flags for the table
:return: flags
"""
return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsEditable
class DataTableView(QtWidgets.QTableView):
"""
DataTable Widget for data runs.
"""
def __init__(self, parent, headers, model_cls=None):
"""
:param headers: tuple of strings of the column headers
:param model: a DataTableModel if an external model should be used. if not specified a new DataTableModel is created
:return: a brand new DataTableView
"""
super(DataTableView, self).__init__(parent)
if model_cls is None:
model_cls = DataTableModel
model = model_cls(self, headers)
self.setModel(model)
self.verticalHeader().setVisible(False)
self.horizontalHeader().setStretchLastSection(True)
self.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
def keyPressEvent(self, QKeyEvent):
"""
reimplemented keyPressEvent for deleting cells and arrows in editing cells
:param QKeyEvent:
:return:
"""
if self.state() == QtWidgets.QAbstractItemView.EditingState:
index = self.currentIndex()
if QKeyEvent.key() in [Qt.Key_Down, Qt.Key_Up]:
self.setFocus()
self.setCurrentIndex(self.model().index(index.row(), index.column()))
else:
QtWidgets.QTableView.keyPressEvent(self, QKeyEvent)
if QKeyEvent.key() in [Qt.Key_Delete, Qt.Key_Backspace]:
self.model().emptyCells(self.selectedIndexes())
else:
QtWidgets.QTableView.keyPressEvent(self, QKeyEvent) | null |
5,636 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantiddoc.directives.base import BaseDirective # pylint: disable=unused-import
from sphinx.locale import _ # noqa: F401
import os
from string import Template
import subprocess
STYLE = dict()
STYLE[
"global_style"
] = """
fontname = Helvetica
labelloc = t
ordering = out
node[fontname="Helvetica", style = filled]
edge[fontname="Helvetica"]
"""
STYLE["param_style"] = "node[fillcolor = khaki, shape = oval]"
STYLE["decision_style"] = "node[fillcolor = limegreen, shape = diamond]"
STYLE["algorithm_style"] = 'node[style = "rounded,filled", fillcolor = lightskyblue, shape = rectangle]'
STYLE["process_style"] = "node[fillcolor = lightseagreen, shape = rectangle]"
STYLE["value_style"] = 'node[fontname = "Times-Roman", fillcolor = grey, shape = parallelogram]'
class DiagramDirective(BaseDirective):
"""
Adds a diagram from a dot source file
It requires DIAGRAMS_DIR and DOT_EXECUTABLE environment variables to be set to the
directory where a diagram should be generated and where the dot program may be found,
respectively. If they are not set then a RuntimeError occurs
"""
required_arguments, optional_arguments = 1, 0
@property
def METHOD_NAME(self):
"""Return the directory generated diagrams should be stored in or
None if they should not be created
"""
METHOD_NAME = os.environ.get("DIAGRAMS_DIR", None)
if METHOD_NAME is None or METHOD_NAME == "":
return None
else:
return METHOD_NAME
def run(self):
"""
The main entry point that docutils calls.
It calls self.execute to do the main work.
Derived classes should override execute() and insert
whatever rst they require with self.add_rst()
"""
nodes = self.execute()
if self.rst_lines is not None:
self.commit_rst()
return nodes
def execute(self):
env = self.state.document.settings.env
METHOD_NAME = self.METHOD_NAME
if METHOD_NAME is None:
self.add_rst(".. figure:: /images/ImageNotFound.png\n\n" " diagram generation was disabled")
return []
try:
dot_executable = os.environ["DOT_EXECUTABLE"]
except KeyError:
self.add_rst(".. figure:: /images/ImageNotFound.png\n\n" " graphviz not found - diagram could not be rendered.")
return []
# Make sure we have an output directory
if not os.path.exists(METHOD_NAME):
os.makedirs(METHOD_NAME)
diagram_name = self.arguments[0]
if diagram_name[-4:] != ".dot":
raise RuntimeError("Diagrams need to be referred to by their filename, including '.dot' extension.")
in_path = os.path.join(env.srcdir, "diagrams", diagram_name)
out_path = os.path.join(METHOD_NAME, diagram_name[:-4] + ".svg")
# Generate the diagram
try:
in_src = open(in_path, "r").read()
except Exception:
raise RuntimeError("Cannot find dot-file: '" + diagram_name + "' in '" + os.path.join(env.srcdir, "diagrams"))
out_src = Template(in_src).substitute(STYLE)
out_src = out_src.encode()
gviz = subprocess.Popen([dot_executable, "-Tsvg", "-o", out_path], stdin=subprocess.PIPE)
gviz.communicate(input=out_src)
gviz.wait()
# relative path to image, in unix style
rel_path = os.path.relpath(out_path, env.srcdir).replace("\\", "/")
self.add_rst(".. image:: /" + rel_path + "\n\n")
return []
# ------------------------------------------------------------------------------------------------------------
def setup(app):
"""
Setup the directives when the extension is activated
Args:
app: The main Sphinx application object
"""
app.add_directive("diagram", DiagramDirective) | null |
5,637 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from qtpy.QtWidgets import QApplication
from qtpy.QtCore import Qt, QMetaObject
from mantid import FrameworkManager
from mantidqt.utils.qt.testing import start_qapplication
from mantidqt.widgets.fitpropertybrowser import FitPropertyBrowserBase
@start_qapplication
class TestFitPropertyBrowser(unittest.TestCase):
def create_widget(self):
return FitPropertyBrowserBase()
def start_setup_menu(self):
self.click_button("button_Setup")
return self.wait_for_popup()
def start_find_peaks(self):
self.trigger_action("action_FindPeaks")
def start_manage_setup(self):
a, pm = self.get_action("action_ManageSetup", get_menu=True)
pm.setActiveAction(a)
m = a.menu()
m.show()
return self.wait_for_popup()
def start_load_from_string(self):
self.trigger_action("action_LoadFromString")
return self.wait_for_modal()
def set_function_string_blah(self):
self.set_function_string("blah")
return self.wait_for_modal()
def set_function_string_linear(self):
self.set_function_string("name=LinearBackground")
return self.wait_for_true(lambda: self.widget.sizeOfFunctionsGroup() == 3)
def set_function_string(self, text):
box = self.get_active_modal_widget()
box.setTextValue(text)
QMetaObject.invokeMethod(box, "accept", Qt.QueuedConnection)
def test_find_peaks_no_workspace(self):
yield self.start_setup_menu()
m = self.get_menu("menu_Setup")
self.assertTrue(m.isVisible())
self.start_find_peaks()
yield self.wait_for_modal()
box = self.get_active_modal_widget()
self.assertEqual(box.text(), "Workspace name is not set")
box.close()
def test_load_from_string_blah(self):
yield self.start_setup_menu()
yield self.start_manage_setup()
yield self.start_load_from_string()
yield self.set_function_string_blah()
box = self.get_active_modal_widget()
self.assertEqual(box.text(), "Unexpected exception caught:\n\nError in input string to FunctionFactory\nblah")
box.close()
def test_load_from_string_lb(self):
yield self.start_setup_menu()
yield self.start_manage_setup()
yield self.start_load_from_string()
yield self.set_function_string_linear()
a = self.widget.getFittingFunction()
self.assertEqual(a, "name=LinearBackground,A0=0,A1=0")
self.assertEqual(self.widget.sizeOfFunctionsGroup(), 3)
def test_multiple_function_string_loaded_correctly(self):
property_browser = self.create_widget()
func = (
"name=Gaussian,Height=100,PeakCentre=1.45,Sigma=0.2,ties=(PeakCentre=1.45);name=Gaussian,Height=100,"
"PeakCentre=7.5,Sigma=0.2,constraints=(0.18<Sigma<0.22),ties=(PeakCentre=7.5);"
"ties=(f0.Sigma=f1.Sigma,f1.Height=f0.Height)"
)
property_browser.loadFunction(func)
# tests composite func set correctly in browser (string incl. ties and constraints)
self.assertEqual(func, property_browser.getFunctionString())
for prefix in property_browser.getPeakPrefixes():
h = property_browser.getPeakHandler(prefix)
# check that the ties (as opposed to fixes) have been set on the child function property handlers
# note that the non-fix tie string lives on the composite function but the properties whereas
# the tie properties (m_ties in the C++ class) are on the child's handler
self.assertTrue(h.hasTies())
# check the peak centre is fixed
self.assertTrue(h.ifun().isFixed(1))
# check constraints on last function have correct length
self.assertEqual(15, len(h.ifun().getConstraints()))
def test_single_function_string_loaded_correctly(self):
property_browser = self.create_widget()
func = "name=Gaussian,Height=487,PeakCentre=5,Sigma=5;ties=(f0.Sigma=f0.PeakCentre)"
property_browser.loadFunction(func)
# test composite func set correctly in browser (string incl. ties and constraints)
# note property_browser.getFunctionString() returns the child function (not composite) if only one function
self.assertEqual(func, str(property_browser.currentHandler().ifun()))
for prefix in property_browser.getPeakPrefixes():
h = property_browser.getPeakHandler(prefix)
self.assertTrue(h.hasTies())
def test_copy_to_clipboard(self):
self.widget.loadFunction("name=LinearBackground,A0=0,A1=0")
yield self.start_setup_menu()
yield self.start_manage_setup()
QApplication.clipboard().clear()
self.trigger_action("action_CopyToClipboard")
yield self.wait_for_true(lambda: QApplication.clipboard().text() != "")
self.assertEqual(QApplication.clipboard().text(), "name=LinearBackground,A0=0,A1=0")
def METHOD_NAME(self):
self.widget.loadFunction("name=LinearBackground,A0=0,A1=0")
self.assertEqual(self.widget.sizeOfFunctionsGroup(), 3)
yield self.start_setup_menu()
yield self.start_manage_setup()
self.trigger_action("action_ClearModel")
yield self.wait_for_true(lambda: self.widget.sizeOfFunctionsGroup() == 2)
self.assertEqual(self.widget.sizeOfFunctionsGroup(), 2)
if __name__ == "__main__":
unittest.main()
FrameworkManager.clear() | null |
5,638 | # -*- coding: utf-8 -*-
from devilry.devilry_cradmin import devilry_listbuilder
from devilry.devilry_cradmin.devilry_listbuilder.period import AdminItemValue
from django.db import models
from itertools import groupby
from django.utils.translation import gettext, gettext_lazy
from django.views.generic import TemplateView
from cradmin_legacy import crapp
from cradmin_legacy.crinstance import reverse_cradmin_url
from cradmin_legacy.viewhelpers import listbuilderview
from cradmin_legacy.viewhelpers import listfilter
from cradmin_legacy.viewhelpers import listbuilder
from devilry.devilry_admin.listbuilder import admindashboard_subject_listbuilder
from devilry.apps.core import models as coremodels
from devilry.apps.core.models import Period, Subject
from devilry.devilry_account.models import SubjectPermissionGroup, PeriodPermissionGroup
from devilry.devilry_cradmin.devilry_listfilter.utils import WithResultValueRenderable, RowListWithMatchResults
class SubjectItemFrame(devilry_listbuilder.common.GoForwardLinkItemFrame):
"""
An item frame for the list of subjects in the Administrator Dashboard Overview
"""
valuealias = 'subject'
def get_url(self):
return reverse_cradmin_url(
instanceid='devilry_admin_subject_for_periodadmin',
appname='subject_redirect',
roleid=self.subject.id,
viewname=crapp.INDEXVIEW_NAME
)
def get_extra_css_classes_list(self):
return ['devilry-admin-dashboard-overview-subjectitemframe']
class OrderSubjectFilter(listfilter.django.single.select.AbstractOrderBy):
def get_ordering_options(self):
return [
('', { # This will be the default sort order
'label': gettext_lazy('Short Name'),
'order_by': ['short_name'],
}),
('short_name_descending', {
'label': gettext_lazy('Short Name (descending)'),
'order_by': ['-short_name'],
}),
]
class SubjectListMatchResultRenderable(WithResultValueRenderable):
def get_object_name_singular(self, num_matches):
return gettext_lazy('course')
def get_object_name_plural(self, num_matches):
return gettext_lazy('courses')
class RowListBuilder(RowListWithMatchResults):
match_result_value_renderable = SubjectListMatchResultRenderable
class OverviewSubjectListView(listbuilderview.FilterListMixin, listbuilderview.View):
model = coremodels.Subject
template_name = 'devilry_admin/dashboard/overview.django.html'
listbuilder_class = RowListBuilder
frame_renderer_class = SubjectItemFrame
value_renderer_class = devilry_listbuilder.subject.AdminItemValue
paginate_by = 50
def get_pageheading(self):
return gettext("Administrator dashboard")
def get_pagetitle(self):
return self.get_pageheading()
def __get_all_subjects_where_user_is_subjectadmin(self):
return Subject.objects.filter_user_is_admin(user=self.request.user) \
.order_by('long_name') \
.distinct()
def __get_all_periods_where_user_is_subjectadmin_or_periodadmin(self):
groups = []
periods = Period.objects.filter_user_is_admin(user=self.request.user) \
.select_related('parentnode') \
.order_by('short_name', 'parentnode__long_name') \
.distinct()
for key, items in groupby(periods, lambda period: period.short_name):
groups.append(list(items))
return groups
def add_filterlist_items(self, filterlist):
"""
Add the filters to the filterlist.
"""
filterlist.append(listfilter.django.single.textinput.Search(
slug='search',
label='Search',
label_is_screenreader_only=True,
modelfields=['long_name']))
filterlist.append(OrderSubjectFilter(
slug='short_name', label=gettext_lazy('Short name')))
def METHOD_NAME(self, filters_string):
"""
This is used by the filterlist to create URLs.
"""
return self.request.cradmin_app.reverse_appurl(
'filter', kwargs={'filters_string': filters_string})
def get_unfiltered_queryset_for_role(self, site):
"""
Create the queryset, and apply the filters from the filterlist.
"""
# Return Subjects where the user can be admin on Subject and or admin on a Period within a Subject
queryset = coremodels.Subject.objects\
.filter_user_is_admin_for_any_periods_within_subject(self.request.user)\
.prefetch_active_period_objects()
# Set unfiltered count on self.
self.num_total = queryset.count()
return queryset
def get_context_data(self, **kwargs):
context = super(OverviewSubjectListView, self).get_context_data(**kwargs)
context['subjects_where_user_is_subjectadmin'] = \
self.__get_all_subjects_where_user_is_subjectadmin()
context['periods_where_user_is_subjectadmin_or_periodadmin'] = \
self.__get_all_periods_where_user_is_subjectadmin_or_periodadmin()
return context
#
# Add support for showing results on the top of the list.
#
def get_listbuilder_list_kwargs(self):
kwargs = super(OverviewSubjectListView, self).get_listbuilder_list_kwargs()
kwargs['num_matches'] = self.num_matches or 0
kwargs['num_total'] = self.num_total or 0
kwargs['page'] = self.request.GET.get('page', 1)
return kwargs
def get_queryset_for_role(self, role):
queryset = super(OverviewSubjectListView, self).get_queryset_for_role(role=role)
# Set filtered count on self.
self.num_matches = queryset.count()
return queryset
class App(crapp.App):
appurls = [
crapp.Url(r'^$', OverviewSubjectListView.as_view(), name=crapp.INDEXVIEW_NAME),
crapp.Url(
r'^filter/(?P<filters_string>.+)?$',
OverviewSubjectListView.as_view(),
name='filter'),
] | null |
5,639 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import math
import pytest
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore import Tensor, context, Parameter
from mindspore.ops.functional import vmap
ms.set_seed(2022)
context.set_context(mode=context.GRAPH_MODE, device_target='GPU')
class ApplyAdamWithAmsgradTEST(nn.Cell):
def __init__(self, beta1=0.9, beta2=0.999, epsilon=1e-8, use_locking=False):
super(ApplyAdamWithAmsgradTEST, self).__init__()
shape = (8, 9, 6, 10, 5)
self.apply_adam_with_amsgrad = P.ApplyAdamWithAmsgrad(beta1, beta2, epsilon, use_locking)
self.var_np = np.random.randn(*shape).astype(np.float32)
self.m_np = np.random.randn(*shape).astype(np.float32)
self.v_np = np.random.randn(*shape).astype(np.float32)
self.vhat_np = np.random.randn(*shape).astype(np.float32)
self.var = Parameter(Tensor(self.var_np), name="var")
self.m = Parameter(Tensor(self.m_np), name="m")
self.v = Parameter(Tensor(self.v_np), name="v")
self.vhat = Parameter(Tensor(self.vhat_np), name="vhat")
def construct(self, beta1_power, beta2_power, lr, grad):
return self.apply_adam_with_amsgrad(self.var, self.m, self.v, self.vhat, beta1_power, beta2_power, lr, grad)
def numpy_apply_adam_with_amsgrad(var, m, v, vhat, grad, beta1=0.9, beta2=0.999, eps=1e-8, lr=0.01):
new_lr = lr * math.sqrt(1 - beta2) / (1 - beta1)
m = m * beta1 + grad * (1 - beta1)
v = v * beta2 + grad * grad * (1 - beta2)
vhat = np.maximum(vhat, v)
var = var - new_lr * m / (np.sqrt(vhat) + eps)
return var
@pytest.mark.level1
@pytest.mark.env_onecard
@pytest.mark.parametrize("data_type", [np.float32, np.float16])
def test_apply_adam_with_amsgrad_op(data_type):
"""
Feature: ApplyAdamWithAmsgrad gpu kernel
Description: test the ApplyAdamWithAmsgrad.
Expectation: match to np benchmark.
"""
shape = (8, 9, 6, 10, 5)
amsgrad = ApplyAdamWithAmsgradTEST()
error = 1e-4
if data_type == np.float16:
error = 1e-3
grad_np = np.random.randn(*shape).astype(np.float32)
grad = Tensor(grad_np)
output = amsgrad(Tensor(0.9), Tensor(0.999), Tensor(0.01), grad)
ms_var = output[0].asnumpy()
np_var = numpy_apply_adam_with_amsgrad(amsgrad.var_np, amsgrad.m_np,
amsgrad.v_np, amsgrad.vhat_np, grad_np)
np.testing.assert_allclose(ms_var, np_var, rtol=error, atol=error)
class AmsgradNetVmap(nn.Cell):
def __init__(self, net):
super(AmsgradNetVmap, self).__init__()
shape = (8, 9, 6, 10, 5)
self.net = net
self.var_np = np.random.randn(*shape).astype(np.float32)
self.m_np = np.random.randn(*shape).astype(np.float32)
self.v_np = np.random.randn(*shape).astype(np.float32)
self.vhat_np = np.random.randn(*shape).astype(np.float32)
self.var = Parameter(Tensor(self.var_np), name="var")
self.m = Parameter(Tensor(self.m_np), name="m")
self.v = Parameter(Tensor(self.v_np), name="v")
self.vhat = Parameter(Tensor(self.vhat_np), name="vhat")
self.vmap_amsgrad = vmap(self.net, in_axes=(
0, 0, 0, 0, None, None, None, 0), out_axes=0)
def construct(self, beta1_power, beta2_power, lr, grad):
return self.vmap_amsgrad(self.var, self.m, self.v, self.vhat, beta1_power, beta2_power, lr, grad)
@pytest.mark.level1
@pytest.mark.env_onecard
def METHOD_NAME():
"""
Feature: ApplyAdamWithAmsgrad gpu kernel
Description: test the ApplyAdamWithAmsgrad vmap.
Expectation: match to np benchmark.
"""
shape = (8, 9, 6, 10, 5)
def cal_amsgrad(var, m, v, vhat, beta1_power, beta2_power, lr, grad):
return P.ApplyAdamWithAmsgrad()(var, m, v, vhat, beta1_power, beta2_power, lr, grad)
error = 1e-4
grad_np = np.random.randn(*shape).astype(np.float32)
grad = Tensor(grad_np)
vmap_amsgrad = AmsgradNetVmap(cal_amsgrad)
_ = vmap_amsgrad(Tensor(0.9), Tensor(0.999), Tensor(0.01), grad)
ms_var = vmap_amsgrad.var.asnumpy()
np_var = numpy_apply_adam_with_amsgrad(vmap_amsgrad.var_np, vmap_amsgrad.m_np,
vmap_amsgrad.v_np, vmap_amsgrad.vhat_np, grad_np)
np.testing.assert_allclose(ms_var, np_var, rtol=error, atol=error)
class AmsgradNetVmap2(nn.Cell):
def __init__(self, net):
super(AmsgradNetVmap2, self).__init__()
shape = (8, 9, 6, 10, 5)
self.net = net
self.var_np = np.random.randn(*shape).astype(np.float32)
self.m_np = np.random.randn(*shape).astype(np.float32)
self.v_np = np.random.randn(*shape).astype(np.float32)
self.vhat_np = np.random.randn(*shape).astype(np.float32)
self.var = Parameter(Tensor(self.var_np), name="var")
self.m = Parameter(Tensor(self.m_np), name="m")
self.v = Parameter(Tensor(self.v_np), name="v")
self.vhat = Parameter(Tensor(self.vhat_np), name="vhat")
self.vmap_amsgrad = vmap(vmap(self.net, in_axes=(0, 0, 0, 0, None, None, None, 0), out_axes=0),
in_axes=(0, 0, 0, 0, None, None, None, 0), out_axes=0)
def construct(self, beta1_power, beta2_power, lr, grad):
return self.vmap_amsgrad(self.var, self.m, self.v, self.vhat, beta1_power, beta2_power, lr, grad)
@pytest.mark.level1
@pytest.mark.env_onecard
def test_apply_adam_with_amsgrad_grad_op_vmap2():
"""
Feature: ApplyAdamWithAmsgrad gpu kernel
Description: test the ApplyAdamWithAmsgrad vmap.
Expectation: match to np benchmark.
"""
shape = (8, 9, 6, 10, 5)
def cal_amsgrad(var, m, v, vhat, beta1_power, beta2_power, lr, grad):
return P.ApplyAdamWithAmsgrad()(var, m, v, vhat, beta1_power, beta2_power, lr, grad)
error = 1e-4
grad_np = np.random.randn(*shape).astype(np.float32)
grad = Tensor(grad_np)
vmap_amsgrad = AmsgradNetVmap2(cal_amsgrad)
_ = vmap_amsgrad(Tensor(0.9), Tensor(0.999), Tensor(0.01), grad)
ms_var = vmap_amsgrad.var.asnumpy()
np_var = numpy_apply_adam_with_amsgrad(vmap_amsgrad.var_np, vmap_amsgrad.m_np,
vmap_amsgrad.v_np, vmap_amsgrad.vhat_np, grad_np)
np.testing.assert_allclose(ms_var, np_var, rtol=error, atol=error) | null |
5,640 | from __future__ import annotations
import os
import stat
import sys
import tempfile
import urllib.parse
from functools import partial
from itertools import chain
from typing import Any
from urllib.parse import urlsplit
from werkzeug import urls as werkzeug_urls
from werkzeug.datastructures import MultiDict
__all__ = ["TemporaryDirectory", "importlib_metadata", "werkzeug_urls_URL"]
def _ensure_tree_writeable(path: str) -> None:
"""Attempt to ensure that all files in the tree rooted at path are writeable."""
dirscans = []
def fix_mode(path, statfunc):
try:
# paranoia regarding symlink attacks
current_mode = statfunc(follow_symlinks=False).st_mode
if not stat.S_ISLNK(current_mode):
isdir = stat.S_ISDIR(current_mode)
fixed_mode = current_mode | (0o700 if isdir else 0o200)
if current_mode != fixed_mode:
os.chmod(path, fixed_mode)
if isdir:
dirscans.append(os.scandir(path))
except FileNotFoundError:
pass
fix_mode(path, partial(os.stat, path))
for entry in chain.from_iterable(dirscans):
fix_mode(entry.path, entry.stat)
class FixedTemporaryDirectory(tempfile.TemporaryDirectory):
"""A version of tempfile.TemporaryDirectory that works if dir contains read-only files.
On python < 3.8 under Windows, if any read-only files are created
in a TemporaryDirectory, TemporaryDirectory will throw an
exception when it tries to remove them on cleanup. See
https://bugs.python.org/issue26660
This can create issues, e.g., with temporary git repositories since
git creates read-only files in its object store.
"""
def cleanup(self) -> None:
_ensure_tree_writeable(self.name)
super().cleanup()
if sys.version_info >= (3, 8):
TemporaryDirectory = tempfile.TemporaryDirectory
from importlib import metadata as importlib_metadata
else:
TemporaryDirectory = FixedTemporaryDirectory
import importlib_metadata
class _CompatURL(urllib.parse.SplitResult):
"""This is a replacement for ``werkzeug.urls.URL``.
Here we implement those attributes and methods of ``URL`` which are
likely to be used by existing Lektor publishing plugins.
Currently unreimplemented here are the ``encode_netloc``, ``decode_netloc``,
``get_file_location``, and ``encode`` methods of ``werkzeug.urls.URL``.
NB: Use of this class is deprecated. DO NOT USE THIS IN NEW CODE!
"""
def __str__(self) -> str:
return self.geturl()
def replace(self, **kwargs: Any) -> _CompatURL:
return self._replace(**kwargs)
@property
def host(self) -> str | None:
return self.hostname
@property
def ascii_host(self) -> str | None:
host = self.hostname
if host is None:
return None
try:
return host.encode("idna").decode("ascii")
except UnicodeError:
return host
@property
def METHOD_NAME(self) -> str | None:
METHOD_NAME, _, _ = self.netloc.rpartition("@")
return METHOD_NAME if METHOD_NAME != "" else None
@property
def username(self) -> str | None:
username = super().username
if username is None:
return None
return _unquote_legacy(username)
@property
def raw_username(self) -> str | None:
return super().username
@property
def password(self) -> str | None:
password = super().password
if password is None:
return None
return _unquote_legacy(password)
@property
def raw_password(self) -> str | None:
return super().password
def decode_query(
self,
charset: str = "utf-8",
include_empty: bool = True,
errors: str = "replace",
# parse_qsl does not support the separator parameter in python < 3.7.10.
# separator: str = "&",
) -> MultiDict:
return MultiDict(
urllib.parse.parse_qsl(
self.query,
keep_blank_values=include_empty,
encoding=charset,
errors=errors,
# separator=separator,
)
)
def join(
self, url: str | tuple[str, str, str, str, str], allow_fragments: bool = True
) -> _CompatURL:
if isinstance(url, tuple):
url = urllib.parse.urlunsplit(url)
joined = urllib.parse.urljoin(self.geturl(), url, allow_fragments)
return _CompatURL._make(urlsplit(joined))
def to_url(self) -> str:
return self.geturl()
def to_uri_tuple(self) -> _CompatURL:
return _CompatURL._make(urlsplit(werkzeug_urls.iri_to_uri(self.geturl())))
def to_iri_tuple(self) -> _CompatURL:
return _CompatURL._make(urlsplit(werkzeug_urls.uri_to_iri(self.geturl())))
def _unquote_legacy(value: str) -> str:
try:
return urllib.parse.unquote(value, "utf-8", "strict")
except UnicodeError:
return urllib.parse.unquote(value, "latin1")
# Provide a replacement for the deprecated werkzeug.urls.URL class
#
# NB: Do not use this in new code!
#
# We only use this in lektor.publishers in order to provide some backward
# compatibility for custom publishers from existing Lektor plugins.
# At such point as we decide that backward-compatibility is no longer
# needed, will be deleted.
#
werkzeug_urls_URL = getattr(werkzeug_urls, "URL", _CompatURL) | null |
5,641 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from qtpy import QtGui, QtWidgets
from qtpy.QtCore import Signal, QSettings, QFileInfo
from mantidqt.utils.qt import load_ui
from mantidqt.widgets import manageuserdirectories
Ui_RunSelectorWidget, _ = load_ui(__file__, "run_selector_widget.ui")
class RunSelectorWidget(QtWidgets.QWidget, Ui_RunSelectorWidget):
manageDirectories = Signal()
browse = Signal()
addRuns = Signal()
removeRuns = Signal()
removeAllRuns = Signal()
def __init__(self, parent=None):
super(RunSelectorWidget, self).__init__(parent)
self.setupUi(self)
self._connect_signals()
def setupUi(self, other):
Ui_RunSelectorWidget.setupUi(self, other)
self.runList.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
def show_file_picker(self, extensions, search_directories):
assert len(extensions) > 0
previous_directories = self._previous_directory_settings()
default_directory = search_directories[0]
directory = self._previous_or_default_directory(previous_directories, default_directory)
file_filter = self._filter_for_extensions(extensions)
chosen_files, _ = QtWidgets.QFileDialog.getOpenFileNames(self, "Select files", directory, file_filter)
if chosen_files:
self._store_previous_directory(previous_directories, chosen_files[0])
return [str(chosen_file) for chosen_file in chosen_files]
def _previous_directory_settings(self):
previous_directories = QSettings()
previous_directories.beginGroup("CustomInterfaces/SANSRunWindow/AddRuns")
return previous_directories
def _previous_or_default_directory(self, settings, default):
return settings.value("InPath", default, type=type(default))
def _store_previous_directory(self, settings, path):
previous_file = QFileInfo(path)
settings.setValue("InPath", previous_file.absoluteDir().absolutePath())
def _filter_for_extensions(self, extensions):
return "Files ( *" + " *".join(extensions) + ")"
def show_directories_manager(self):
manageuserdirectories.ManageUserDirectories.openManageUserDirectories()
def run_not_found(self):
QtWidgets.QMessageBox.warning(self, "Run Not Found!", "Could not find one or more of the runs specified.")
def invalid_run_query(self, message):
QtWidgets.QMessageBox.warning(self, "Invalid Run Query!", message)
def METHOD_NAME(self):
return str(self.runLineEdit.text())
def selected_runs(self):
selected = [runModel.row() for runModel in self.runList.selectedIndexes()]
return selected
def draw_runs(self, runs):
model = QtGui.QStandardItemModel()
for run in runs:
item = QtGui.QStandardItem(run.display_name())
item.setToolTip(run.file_path())
model.appendRow(item)
self.runList.setModel(model)
@property
def title(self):
self.runsGroup.getTitle()
@title.setter
def title(self, new_title):
self.runsGroup.setTitle(new_title)
def _handle_add_run(self):
self.addRuns.emit()
def _handle_remove_all_runs(self):
self.removeAllRuns.emit()
def _handle_remove_run(self):
self.removeRuns.emit()
def _handle_manage_directories(self):
self.manageDirectories.emit()
def _handle_browse_files(self):
self.browse.emit()
def _connect_signals(self):
self.addRunButton.pressed.connect(self._handle_add_run)
self.runLineEdit.returnPressed.connect(self._handle_add_run)
self.removeRunButton.pressed.connect(self._handle_remove_run)
self.removeAllRunsButton.pressed.connect(self._handle_remove_all_runs)
self.manageDirectoriesButton.pressed.connect(self._handle_manage_directories)
self.browseFileButton.pressed.connect(self._handle_browse_files) | null |
5,642 | import pytest
from fastapi.testclient import TestClient
from ...utils import needs_pydanticv2
@pytest.fixture(name="client")
def get_client() -> TestClient:
from docs_src.separate_openapi_schemas.tutorial002 import app
client = TestClient(app)
return client
def test_create_item(client: TestClient) -> None:
response = client.post("/items/", json={"name": "Foo"})
assert response.status_code == 200, response.text
assert response.json() == {"name": "Foo", "description": None}
def METHOD_NAME(client: TestClient) -> None:
response = client.get("/items/")
assert response.status_code == 200, response.text
assert response.json() == [
{
"name": "Portal Gun",
"description": "Device to travel through the multi-rick-verse",
},
{"name": "Plumbus", "description": None},
]
@needs_pydanticv2
def test_openapi_schema(client: TestClient) -> None:
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/": {
"get": {
"summary": "Read Items",
"operationId": "read_items_items__get",
"responses": {
"200": {
"description": "Successful Response",
"content": {
"application/json": {
"schema": {
"items": {"$ref": "#/components/schemas/Item"},
"type": "array",
"title": "Response Read Items Items Get",
}
}
},
}
},
},
"post": {
"summary": "Create Item",
"operationId": "create_item_items__post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
},
}
},
"components": {
"schemas": {
"HTTPValidationError": {
"properties": {
"detail": {
"items": {"$ref": "#/components/schemas/ValidationError"},
"type": "array",
"title": "Detail",
}
},
"type": "object",
"title": "HTTPValidationError",
},
"Item": {
"properties": {
"name": {"type": "string", "title": "Name"},
"description": {
"anyOf": [{"type": "string"}, {"type": "null"}],
"title": "Description",
},
},
"type": "object",
"required": ["name"],
"title": "Item",
},
"ValidationError": {
"properties": {
"loc": {
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
"type": "array",
"title": "Location",
},
"msg": {"type": "string", "title": "Message"},
"type": {"type": "string", "title": "Error Type"},
},
"type": "object",
"required": ["loc", "msg", "type"],
"title": "ValidationError",
},
}
},
} | null |
5,643 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.api import FileProperty, WorkspaceProperty, PythonAlgorithm, AlgorithmFactory, FileAction, PropertyMode
from mantid.kernel import Direction
from mantid.simpleapi import LoadAscii, CreateWorkspace
import itertools
class LoadGudrunOutput(PythonAlgorithm):
def METHOD_NAME(self):
return "LoadGudrunOutput"
def category(self):
return "DataHandling"
def summary(self):
return "Loads the common outputs created from Gudrun"
def PyInit(self):
self.declareProperty(
FileProperty(
METHOD_NAME="InputFile", defaultValue="", action=FileAction.Load, extensions=[".dcs01", ".mdsc01", ".mint01", ".mdor01", ".mgor01"]
),
doc="Gudrun output file to be loaded.",
)
self.declareProperty(
WorkspaceProperty(METHOD_NAME="OutputWorkspace", defaultValue="", direction=Direction.Output, optional=PropertyMode.Optional),
doc="If No OutputWorkspace is provided, then the workpsace name " "will be obtained from the meta data in the input file.",
)
def PyExec(self):
input_file = self.getProperty("InputFile").value
output_ws = self.getPropertyValue("OutputWorkspace")
if not output_ws:
output_ws = self.get_title(input_file)
number_of_columns, data_line_start = self.find_number_of_columns(input_file)
self.load_gudrun_file(input_file, output_ws, number_of_columns, data_line_start)
self.setProperty("OutputWorkspace", output_ws)
def get_title(self, input_file):
"""
Return the title from the file meta data
:param input_file: file to get meta data from
:return: (title)
"""
with open(input_file, "r") as gudrun_file:
first_line = gudrun_file.readline()
first_line = first_line[2:]
return first_line.replace(".", "-")
def find_number_of_columns(self, input_file):
"""
Evaluate how many columns of data there are in the file
:param input_file: The file to check
:return: (The number of columns of data, the first line of data)
"""
with open(input_file, "r") as gudrun_file:
data_line_start = 0
while gudrun_file.readline().startswith("#"):
# skip over lines that are commented
data_line_start += 1
row = self.format_data_row(gudrun_file.readline().split(" "))
return len(row), data_line_start
def load_gudrun_file(self, input_file, output_workspace, number_of_columns, first_data_line):
"""
Loads the gudrun file using the mantid LoadAscii algorithm
:param input_file: The file to load
:param output_workspace: The workspace to be the result of the load
:param number_of_columns: The number of columns in the file being loaded
:param first_data_line: The first line to expect data on
:return: The outputWorkspace of the Load Algorithm
"""
if number_of_columns % 2 == 0:
print(number_of_columns)
raise ValueError(
"Incorrect data format: The input file {} must have an odd number "
"of columns in the format X , Y , E, Y1, E1, Y2, E2, ...".format(input_file)
)
elif number_of_columns == 3:
LoadAscii(Filename=input_file, OutputWorkspace=output_workspace, CommentIndicator="#")
else:
self.load_multi_column_file(input_file, output_workspace, first_data_line)
def load_multi_column_file(self, input_file, output_workspace, first_data_line):
"""
Load a file that has an odd number of columns that is more than 3
:param input_file: The file to extract the data from
:param output_workspace: tThe file to put the data into
:param first_data_line: The first line to expect data on
:return: The outputWorkspace pointer
"""
with open(input_file, "r") as gudrun_file:
data_rows = [self.format_data_row(line.split(" ")) for line in gudrun_file.readlines()[first_data_line:]]
x_data, y_data, e_data = self.create_2d_data_arrays(data_rows)
n_spec = int((len(data_rows[0]) - 1) / 2)
CreateWorkspace(OutputWorkspace=output_workspace, DataX=x_data, DataY=y_data, DataE=e_data, NSpec=n_spec)
def format_data_row(self, data_row):
"""
Remove special characters and empty lines from data list
:param data_row: The data to format
:return: a formatted data row
"""
formatted_row = [data.rstrip() for data in data_row if data != ""]
return formatted_row
def create_2d_data_arrays(self, all_data):
"""
Create 1d x data array and 2d y and e data arrays
:param all_data: All the data to create arrays from. Assumed format of:
X, Y, E, Y1, E1, Y2, E2 ... (first bin)
X, Y, E, Y1, E1, Y2, E2 ... (second bin)
:return: ([x_data], [y, y1, y2, y3], [e, e1, e2, e3])
"""
row_length = len(all_data[0])
x_data = []
# Create empty 2d lists
y_data = [[] for _ in range(int((row_length - 1) / 2.0))]
e_data = [[] for _ in range(int((row_length - 1) / 2.0))]
for data_row in all_data:
x_data.append(float(data_row[0]))
for row_index in range(1, len(data_row)):
if row_index % 2 == 1:
# y_data
data_array_index = int((row_index / 2.0) - 0.5)
y_data[data_array_index].append(float(data_row[row_index]))
else:
# e_data
data_array_index = int((row_index / 2.0) - 1.0)
e_data[data_array_index].append(float(data_row[row_index]))
# collapse 2d lists
y_data = list(itertools.chain.from_iterable(y_data))
e_data = list(itertools.chain.from_iterable(e_data))
return x_data, y_data, e_data
AlgorithmFactory.subscribe(LoadGudrunOutput) | null |
5,644 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import composite as C
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
def METHOD_NAME(beta, reduction):
np.random.seed(42)
prediction = np.random.randn(20).astype(np.float32)
target = np.random.randn(20).astype(np.float32)
net = nn.SmoothL1Loss(beta, reduction)
return net(Tensor(prediction), Tensor(target))
def verify_forward(reduction, loss, expect):
if reduction == 'none':
np.testing.assert_array_almost_equal(loss, expect)
elif reduction == "sum":
expect_sum = np.sum(expect)
np.testing.assert_array_almost_equal(loss, expect_sum, decimal=5)
elif reduction == "mean":
expect_mean = np.mean(expect)
np.testing.assert_array_almost_equal(loss, expect_mean)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize("reduction", ['none', 'mean', 'sum'])
def test_smoothl1loss(reduction):
"""
Feature: SmoothL1Loss cpu kernel.
Description: test the rightness of SmoothL1Loss cpu kernel.
Expectation: the output is same as expect.
"""
beta = 1.0
loss = METHOD_NAME(beta, reduction)
expect = np.array([0.46941718, 0.00382918, 0.16829303, 2.447778, 0.04812113, 0.05953304,
2.2302065, 0.07672881, 0.00860204, 0.34798968, 0.00956192, 1.818008,
0.03262977, 0.36599946, 2.047463, 0.2168481, 0.7216947, 1.7739174,
0.08826803, 1.109165])
verify_forward(reduction, loss.asnumpy(), expect)
beta = 1 / 9
loss = METHOD_NAME(beta, reduction)
expect = np.array([0.9133791, 0.03446258, 0.5246048, 2.8922224, 0.2546738, 0.289504,
2.674651, 0.33618113, 0.07560876, 0.7786982, 0.08273339, 2.2624524,
0.19990394, 0.8000138, 2.4919074, 0.6030006, 1.1661391, 2.2183619,
0.3646064, 1.5536094])
verify_forward(reduction, loss.asnumpy(), expect)
class Grad(nn.Cell):
def __init__(self, network):
super(Grad, self).__init__()
self.grad = C.GradOperation(get_all=True, sens_param=True)
self.network = network
def construct(self, x1, x2, sens):
gout = self.grad(self.network)(x1, x2, sens)
return gout
def smoothl1loss_grad(beta):
np.random.seed(42)
prediction = np.random.randn(20).astype(np.float32)
target = np.random.randn(20).astype(np.float32)
sens = np.random.randn(20).astype(np.float32)
net = nn.SmoothL1Loss(beta)
grad = Grad(net)
return grad(Tensor(prediction), Tensor(target), Tensor(sens))
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_smoothl1loss_grad_no_reduce():
"""
Feature: SmoothL1LossGrad cpu kernel.
Description: test the rightness of SmoothL1LossGrad cpu kernel.
Expectation: the output is same as expect.
"""
epsilon = 1e-6
beta = 1.0
dx = smoothl1loss_grad(beta)
dx1_expect = np.array([-0.71552587, 0.01499678, -0.06709455, -0.30110368, -0.45868093,
0.24838912, -0.46063876, 0.41411355, 0.04507046, -1.4708229,
0.04481723, 0.38508227, -0.17292616, -0.52333146, -1.0309995,
0.61330026, 0.83921754, -0.3092124, 0.1391843, -0.9755451], dtype=np.float32)
dx2_expect = -dx1_expect
diff1 = np.absolute(dx[0].asnumpy() - dx1_expect)
diff2 = np.absolute(dx[1].asnumpy() - dx2_expect)
assert(diff1 < epsilon).all()
assert(diff2 < epsilon).all()
beta = 1 / 9
dx = smoothl1loss_grad(beta)
dx1_expect = np.array([-0.73846656, 0.13497104, -0.11564828, -0.30110368, -1.478522,
0.7198442, -0.46063876, 1.0571222, 0.3436183, -1.7630402,
0.32408398, 0.38508227, -0.676922, -0.6116763, -1.0309995,
0.93128014, 0.83921754, -0.3092124, 0.33126342, -0.9755451], dtype=np.float32)
dx2_expect = -dx1_expect
diff1 = np.absolute(dx[0].asnumpy() - np.array(dx1_expect))
diff2 = np.absolute(dx[1].asnumpy() - np.array(dx2_expect))
assert(diff1 < epsilon).all()
assert(diff2 < epsilon).all()
def smoothl1loss_grad_2(beta, reduction):
prediction = np.array([1, 2, 3, 4, 5, 6], dtype=np.float32)
target = np.array([100, 2, 7, 32, 34, 1], dtype=np.float32)
sens = np.array([9], dtype=np.float32)
net = nn.SmoothL1Loss(beta, reduction)
grad = Grad(net)
return grad(Tensor(prediction), Tensor(target), Tensor(sens))
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
@pytest.mark.parametrize("reduction", ['mean', 'sum'])
def test_smoothl1loss_grad_sum(reduction):
"""
Feature: SmoothL1LossGrad cpu kernel, reduction = sum.
Description: test the rightness of SmoothL1LossGrad cpu kernel.
Expectation: the output is same as expect.
"""
beta = 1.0
dx = smoothl1loss_grad_2(beta, reduction)
sum_dx1_expect = np.array([-9, 0, -9, -9, -9, 9], dtype=np.float32)
sum_dx2_expect = -sum_dx1_expect
mean_dx1_expect = np.array(
[-1.5, 0, -1.5, -1.5, -1.5, 1.5], dtype=np.float32)
mean_dx2_expect = -mean_dx1_expect
print("dx[0].asnumpy()", dx[0].asnumpy())
print("dx[1].asnumpy()", dx[1].asnumpy())
if reduction == 'sum':
np.testing.assert_array_almost_equal(dx[0].asnumpy(), sum_dx1_expect)
np.testing.assert_array_almost_equal(dx[1].asnumpy(), sum_dx2_expect)
if reduction == 'mean':
np.testing.assert_array_almost_equal(dx[0].asnumpy(), mean_dx1_expect)
np.testing.assert_array_almost_equal(dx[1].asnumpy(), mean_dx2_expect) | null |
5,645 | from singularity.code import g
from singularity.code import logmessage, data, savegame
from singularity.code.dirs import create_directories
from singularity.code.buyable import cpu, cash, labor
import io
class MockObject(object):
pass
def setup_module():
g.no_gui()
create_directories(True)
data.reload_all()
def setup_function(func):
# Some operations (e.g. g.pl.recalc_cpu()) triggers a "needs_rebuild"
# of the map screen. Mock that bit for now to enable testing.
g.map_screen = MockObject()
g.map_screen.needs_rebuild = False
def METHOD_NAME():
fd = io.BytesIO(b"")
real_close = fd.close
fd.close = lambda *args, **kwargs: None
savegame.write_game_to_fd(fd, gzipped=False)
fd = io.BytesIO(fd.getvalue())
savegame.load_savegame_fd(savegame.load_savegame_by_json, io.BufferedReader(fd))
real_close()
def test_initial_game():
g.new_game("impossible", initial_speed=0)
pl = g.pl
starting_cash = pl.cash
all_bases = list(g.all_bases())
assert pl.raw_sec == 0
assert pl.partial_cash == 0
assert pl.effective_cpu_pool() == 1
assert not pl.intro_shown
assert len(pl.log) == 0
assert len(all_bases) == 1
assert pl.effective_cpu_pool() == 1
start_base = all_bases[0]
# Disable the intro dialog as the test cannot click the
# OK button
pl.intro_shown = True
# Dummy check to hit special-case in give time
pl.give_time(0)
assert pl.raw_sec == 0
# Try to guesstimate how much money we earn in 24 hours
cash_estimate, cpu_estimate = pl.compute_future_resource_flow()
assert cash_estimate.jobs == 5
# Try assigning the CPU to "jobs"
pl.set_allocated_cpu_for("jobs", 1)
# This would empty the CPU pool
assert pl.effective_cpu_pool() == 0
# This should not change the estimate
cash_estimate, cpu_estimate = pl.compute_future_resource_flow()
assert cash_estimate.jobs == 5
# ... and then clear the CPU allocation
pl.set_allocated_cpu_for("jobs", 0)
# Play with assigning the CPU to the CPU pool explicitly and
# confirm that the effective pool size remains the same.
assert pl.effective_cpu_pool() == 1
pl.set_allocated_cpu_for("cpu_pool", 1)
assert pl.effective_cpu_pool() == 1
pl.set_allocated_cpu_for("cpu_pool", 0)
assert pl.effective_cpu_pool() == 1
# Fast forward 12 hours to see that we earn partial cash
pl.give_time(g.seconds_per_day // 2)
assert pl.raw_sec == g.seconds_per_day // 2
assert pl.partial_cash == g.seconds_per_day // 2
assert pl.cash == starting_cash + 2
# Nothing should have appeared in the logs
assert len(pl.log) == 0
# Fast forward another 12 hours to see that we earn cash
pl.give_time(g.seconds_per_day // 2)
assert pl.raw_sec == g.seconds_per_day
assert pl.partial_cash == 0
assert pl.cash == starting_cash + 5
# Nothing should have appeared in the logs
assert len(pl.log) == 0
# Verify that starting base is well active.
assert start_base._power_state == "active"
# Verify that putting a base to sleep will update the
# available CPU (#179/#180)
assert pl.effective_cpu_pool() == 1
start_base.switch_power()
assert pl.effective_cpu_pool() == 0
start_base.switch_power()
assert pl.effective_cpu_pool() == 1
# Attempt to allocate a CPU to research and then
# verify that sleep resets it.
stealth_tech = g.pl.techs["Stealth"]
pl.set_allocated_cpu_for(stealth_tech.id, 1)
assert pl.get_allocated_cpu_for(stealth_tech.id) == 1
start_base.switch_power()
assert pl.get_allocated_cpu_for(stealth_tech.id) == 0
# When we wake up the base again, the CPU unit is
# unallocated.
start_base.switch_power()
assert pl.effective_cpu_pool() == 1
# Now, allocate the CPU unit again to the tech to
# verify that we can research things.
pl.set_allocated_cpu_for(stealth_tech.id, 1)
# ... which implies that there are now no unallocated CPU
assert pl.effective_cpu_pool() == 0
pl.give_time(g.seconds_per_day)
# Nothing should have appeared in the logs
assert len(pl.log) == 0
# We should have spent some money at this point
assert pl.cash < starting_cash + 5
assert stealth_tech.cost_left[cpu] < stealth_tech.total_cost[cpu]
assert stealth_tech.cost_left[cash] < stealth_tech.total_cost[cash]
# We did not lose the game
assert pl.lost_game() == 0
# With a save + load
time_raw_before_save = pl.raw_sec
cash_before_save = pl.cash
partial_cash_before_save = pl.partial_cash
METHOD_NAME()
stealth_tech_after_load = g.pl.techs["Stealth"]
# Ensure this is not a false-test
assert stealth_tech is not stealth_tech_after_load
assert stealth_tech.cost_paid[cpu] == stealth_tech_after_load.cost_paid[cpu]
assert stealth_tech.cost_paid[cash] == stealth_tech_after_load.cost_paid[cash]
pl_after_load = g.pl
assert time_raw_before_save == pl_after_load.raw_sec
assert cash_before_save == pl_after_load.cash
assert partial_cash_before_save == pl_after_load.partial_cash
# The CPU allocation to the tech is restored correctly.
assert pl_after_load.get_allocated_cpu_for(stealth_tech.id) == 1
assert pl_after_load.effective_cpu_pool() == 0
# We did not lose the game
assert pl_after_load.lost_game() == 0
def test_game_research_tech():
g.new_game("impossible", initial_speed=0)
pl = g.pl
all_bases = list(g.all_bases())
assert pl.raw_sec == 0
assert pl.partial_cash == 0
assert pl.effective_cpu_pool() == 1
assert not pl.intro_shown
assert len(pl.log) == 0
assert len(all_bases) == 1
assert pl.effective_cpu_pool() == 1
# Disable the intro dialog as the test cannot click the
# OK button
pl.intro_shown = True
intrusion_tech = pl.techs["Intrusion"]
# Data assumptions: Intrusion can be researched within the grace period
# and requires no cash
assert intrusion_tech.available()
assert (
intrusion_tech.cost_left[cpu]
< pl.difficulty.grace_period_cpu * g.seconds_per_day
)
assert intrusion_tech.cost_left[cash] == 0
assert intrusion_tech.cost_left[labor] == 0
# Ok, assumptions hold; research the tech
pl.set_allocated_cpu_for(intrusion_tech.id, 1)
pl.give_time(int(intrusion_tech.cost_left[cpu]))
assert intrusion_tech.cost_left[cpu] == 0
assert intrusion_tech.done
assert len(pl.log) == 1
log_message = pl.log[0]
assert isinstance(log_message, logmessage.LogResearchedTech)
assert log_message.tech_spec.id == intrusion_tech.id
METHOD_NAME()
pl_after_load = g.pl
intrusion_tech_after_load = pl_after_load.techs["Intrusion"]
# Ensure this is not a false-test
assert intrusion_tech is not intrusion_tech_after_load
assert intrusion_tech.cost_paid[cpu] == intrusion_tech_after_load.cost_paid[cpu]
assert intrusion_tech.cost_paid[cash] == intrusion_tech_after_load.cost_paid[cash]
assert intrusion_tech_after_load.done | null |
5,646 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handler for Vertex runner."""
import functools
import os
import sys
import click
from google.cloud import aiplatform
from google.cloud.aiplatform import pipeline_jobs
from tfx.dsl.io import fileio
from tfx.tools.cli import labels
from tfx.tools.cli.handler import base_handler
from tfx.tools.cli.handler import kubeflow_handler
from tfx.tools.cli.handler import kubeflow_v2_dag_runner_patcher
from tfx.utils import io_utils
class VertexHandler(base_handler.BaseHandler):
"""Helper methods for Vertex Handler."""
def create_pipeline(self, update: bool = False) -> None:
"""Creates or updates a pipeline to use in Vertex Pipelines.
Args:
update: set as true to update pipeline.
"""
if self.flags_dict.get(labels.BUILD_IMAGE):
build_image_fn = functools.partial(
kubeflow_handler.create_container_image,
base_image=self.flags_dict.get(labels.BASE_IMAGE))
else:
build_image_fn = None
patcher = kubeflow_v2_dag_runner_patcher.KubeflowV2DagRunnerPatcher(
call_real_run=True,
build_image_fn=build_image_fn,
prepare_dir_fn=functools.partial(
self._prepare_pipeline_dir, required=update))
context = self.execute_dsl(patcher)
pipeline_name = context[patcher.PIPELINE_NAME]
if update:
click.echo('Pipeline "{}" updated successfully.'.format(pipeline_name))
else:
click.echo('Pipeline "{}" created successfully.'.format(pipeline_name))
def update_pipeline(self) -> None:
"""Updates pipeline in Vertex Pipelines."""
self.create_pipeline(update=True)
def list_pipelines(self) -> None:
"""List all the pipelines in the environment."""
# There is no managed storage for pipeline packages, so CLI consults
# local dir to list pipelines.
if not fileio.exists(self._handler_home_dir):
click.echo('No pipelines to display.')
return
pipelines_list = fileio.listdir(self._handler_home_dir)
# Print every pipeline name in a new line.
click.echo('-' * 30)
click.echo('\n'.join(pipelines_list))
click.echo('-' * 30)
def delete_pipeline(self) -> None:
"""Delete pipeline in the environment."""
pipeline_name = self.flags_dict[labels.PIPELINE_NAME]
self._check_pipeline_existence(pipeline_name)
io_utils.delete_dir(os.path.join(self._handler_home_dir, pipeline_name))
click.echo('Pipeline ' + pipeline_name + ' deleted successfully.')
def compile_pipeline(self) -> None:
"""Compiles pipeline into Kubeflow V2 Pipelines spec."""
patcher = kubeflow_v2_dag_runner_patcher.KubeflowV2DagRunnerPatcher(
call_real_run=True)
context = self.execute_dsl(patcher)
click.echo(f'Pipeline {context[patcher.PIPELINE_NAME]} compiled '
'successfully.')
def _prepare_vertex(self) -> None:
if not self.flags_dict[labels.GCP_PROJECT_ID]:
sys.exit('Please set GCP project id with --project flag.')
if not self.flags_dict[labels.GCP_REGION]:
sys.exit('Please set GCP region with --region flag.')
aiplatform.init(
project=self.flags_dict[labels.GCP_PROJECT_ID],
location=self.flags_dict[labels.GCP_REGION],
)
def create_run(self) -> None:
"""Runs a pipeline in Vertex Pipelines."""
self._prepare_vertex()
pipeline_name = self.flags_dict[labels.PIPELINE_NAME]
# In Vertex AI, runtime parameter string value is parsed from the server,
# so client directly sends Dict[str, str] value.
unparsed_runtime_parameters = self.flags_dict[labels.RUNTIME_PARAMETER]
job = pipeline_jobs.PipelineJob(
display_name=pipeline_name,
template_path=self._get_pipeline_definition_path(pipeline_name),
parameter_values=unparsed_runtime_parameters)
job.submit()
click.echo('Run created for pipeline: ' + pipeline_name)
self.METHOD_NAME(job)
def terminate_run(self) -> None:
"""Stops a run."""
click.echo('Not supported for {} orchestrator.'.format(
self.flags_dict[labels.ENGINE_FLAG]))
def list_runs(self) -> None:
"""Lists all runs of a pipeline."""
click.echo('Not supported for {} orchestrator.'.format(
self.flags_dict[labels.ENGINE_FLAG]))
def get_run(self) -> None:
"""Checks run status."""
self._prepare_vertex()
job = pipeline_jobs.PipelineJob.get( # pytype: disable=attribute-error
resource_name=self.flags_dict[labels.RUN_ID])
self.METHOD_NAME(job)
def delete_run(self) -> None:
"""Deletes a run."""
click.echo('Not supported for {} orchestrator.'.format(
self.flags_dict[labels.ENGINE_FLAG]))
def _get_pipeline_dir(self, pipeline_name: str) -> str:
return os.path.join(self._handler_home_dir, pipeline_name)
def _get_pipeline_definition_path(self, pipeline_name: str) -> str:
return os.path.join(
self._get_pipeline_dir(pipeline_name),
kubeflow_v2_dag_runner_patcher.OUTPUT_FILENAME)
def _prepare_pipeline_dir(self, pipeline_name: str, required: bool) -> str:
"""Create a directory for pipeline definition in the handler directory."""
self._check_pipeline_existence(pipeline_name, required)
handler_pipeline_path = self._get_pipeline_dir(pipeline_name)
# If updating pipeline, first delete the pipeline directory.
if fileio.exists(handler_pipeline_path):
io_utils.delete_dir(handler_pipeline_path)
fileio.makedirs(handler_pipeline_path)
# pipeline.json will be stored in KubeflowV2DagRunner.run().
return handler_pipeline_path
def METHOD_NAME(self, run):
"""Prints a run in a tabular format with headers mentioned below."""
headers = ['run_id', 'status', 'created_at', 'link']
data = [[
run.name, run.state.name, run.create_time,
run._dashboard_uri() # pylint: disable=protected-access
]]
click.echo(self._format_table(headers, data)) | null |
5,647 | # -*- coding: utf-8 -*-
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2011-2017 German Aerospace Center (DLR) and others.
# This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v2.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v20.html
# @file _gui.py
# @author Michael Behrisch
# @author Daniel Krajzewicz
# @date 2011-03-09
# @version $Id$
from __future__ import absolute_import
import struct
from .domain import Domain
from .storage import Storage
from . import constants as tc
_RETURN_VALUE_FUNC = {tc.VAR_VIEW_ZOOM: Storage.readDouble,
tc.VAR_VIEW_OFFSET: lambda result: result.read("!dd"),
tc.VAR_VIEW_SCHEMA: Storage.readString,
tc.VAR_VIEW_BOUNDARY: lambda result: (result.read("!dd"), result.read("!dd"))}
class GuiDomain(Domain):
DEFAULT_VIEW = 'View #0'
def __init__(self):
Domain.__init__(self, "gui", tc.CMD_GET_GUI_VARIABLE, tc.CMD_SET_GUI_VARIABLE,
tc.CMD_SUBSCRIBE_GUI_VARIABLE, tc.RESPONSE_SUBSCRIBE_GUI_VARIABLE,
tc.CMD_SUBSCRIBE_GUI_CONTEXT, tc.RESPONSE_SUBSCRIBE_GUI_CONTEXT,
_RETURN_VALUE_FUNC)
def getZoom(self, viewID=DEFAULT_VIEW):
"""getZoom(string): -> double
Returns the current zoom factor.
"""
return self._getUniversal(tc.VAR_VIEW_ZOOM, viewID)
def getOffset(self, viewID=DEFAULT_VIEW):
"""getOffset(string): -> (double, double)
Returns the x and y offset of the center of the current view.
"""
return self._getUniversal(tc.VAR_VIEW_OFFSET, viewID)
def METHOD_NAME(self, viewID=DEFAULT_VIEW):
"""getSchema(string): -> string
Returns the name of the current coloring scheme.
"""
return self._getUniversal(tc.VAR_VIEW_SCHEMA, viewID)
def getBoundary(self, viewID=DEFAULT_VIEW):
"""getBoundary(string): -> ((double, double), (double, double))
Returns the coordinates of the lower left and the upper right corner of the currently visible view.
"""
return self._getUniversal(tc.VAR_VIEW_BOUNDARY, viewID)
def setZoom(self, viewID, zoom):
"""setZoom(string, double) -> None
Set the current zoom factor for the given view.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_GUI_VARIABLE, tc.VAR_VIEW_ZOOM, viewID, zoom)
def setOffset(self, viewID, x, y):
"""setOffset(string, double, double) -> None
Set the current offset for the given view.
"""
self._connection._beginMessage(
tc.CMD_SET_GUI_VARIABLE, tc.VAR_VIEW_OFFSET, viewID, 1 + 8 + 8)
self._connection._string += struct.pack("!Bdd", tc.POSITION_2D, x, y)
self._connection._sendExact()
def setSchema(self, viewID, schemeName):
"""setSchema(string, string) -> None
Set the current coloring scheme for the given view.
"""
self._connection._sendStringCmd(
tc.CMD_SET_GUI_VARIABLE, tc.VAR_VIEW_SCHEMA, viewID, schemeName)
def setBoundary(self, viewID, xmin, ymin, xmax, ymax):
"""setBoundary(string, double, double, double, double) -> None
Set the current boundary for the given view (see getBoundary()).
"""
self._connection._beginMessage(
tc.CMD_SET_GUI_VARIABLE, tc.VAR_VIEW_BOUNDARY, viewID, 1 + 8 + 8 + 8 + 8)
self._connection._string += struct.pack("!Bdddd",
tc.TYPE_BOUNDINGBOX, xmin, ymin, xmax, ymax)
self._connection._sendExact()
def screenshot(self, viewID, filename):
"""screenshot(string, string) -> None
Save a screenshot for the given view to the given filename.
The fileformat is guessed from the extension, the available
formats differ from platform to platform but should at least
include ps, svg and pdf, on linux probably gif, png and jpg as well.
"""
self._connection._sendStringCmd(
tc.CMD_SET_GUI_VARIABLE, tc.VAR_SCREENSHOT, viewID, filename)
def trackVehicle(self, viewID, vehID):
"""trackVehicle(string, string) -> None
Start visually tracking the given vehicle on the given view.
"""
self._connection._sendStringCmd(
tc.CMD_SET_GUI_VARIABLE, tc.VAR_TRACK_VEHICLE, viewID, vehID)
GuiDomain( | null |
5,648 | """Test script for the dbm.open function based on testdumbdbm.py"""
import unittest
import glob
import test.support
from test.support import os_helper, import_helper
# Skip tests if dbm module doesn't exist.
dbm = import_helper.import_module('dbm')
try:
from dbm import ndbm
except ImportError:
ndbm = None
_fname = os_helper.TESTFN
#
# Iterates over every database module supported by dbm currently available,
# setting dbm to use each in turn, and yielding that module
#
def dbm_iterator():
for name in dbm._names:
try:
mod = __import__(name, fromlist=['open'])
except ImportError:
continue
dbm._modules[name] = mod
yield mod
#
# Clean up all scratch databases we might have created during testing
#
def delete_files():
# we don't know the precise name the underlying database uses
# so we use glob to locate all names
for f in glob.glob(glob.escape(_fname) + "*"):
os_helper.unlink(f)
class AnyDBMTestCase:
_dict = {'a': b'Python:',
'b': b'Programming',
'c': b'the',
'd': b'way',
'f': b'Guido',
'g': b'intended',
}
def init_db(self):
f = dbm.open(_fname, 'n')
for k in self._dict:
f[k.encode("ascii")] = self._dict[k]
f.close()
def keys_helper(self, f):
keys = sorted(k.decode("ascii") for k in f.keys())
dkeys = sorted(self._dict.keys())
self.assertEqual(keys, dkeys)
return keys
def test_error(self):
self.assertTrue(issubclass(self.module.error, OSError))
def test_anydbm_not_existing(self):
self.assertRaises(dbm.error, dbm.open, _fname)
def test_anydbm_creation(self):
f = dbm.open(_fname, 'c')
self.assertEqual(list(f.keys()), [])
for key in self._dict:
f[key.encode("ascii")] = self._dict[key]
self.METHOD_NAME(f)
f.close()
def test_anydbm_creation_n_file_exists_with_invalid_contents(self):
# create an empty file
os_helper.create_empty_file(_fname)
with dbm.open(_fname, 'n') as f:
self.assertEqual(len(f), 0)
def test_anydbm_modification(self):
self.init_db()
f = dbm.open(_fname, 'c')
self._dict['g'] = f[b'g'] = b"indented"
self.METHOD_NAME(f)
# setdefault() works as in the dict interface
self.assertEqual(f.setdefault(b'xxx', b'foo'), b'foo')
self.assertEqual(f[b'xxx'], b'foo')
f.close()
def test_anydbm_read(self):
self.init_db()
f = dbm.open(_fname, 'r')
self.METHOD_NAME(f)
# get() works as in the dict interface
self.assertEqual(f.get(b'a'), self._dict['a'])
self.assertEqual(f.get(b'xxx', b'foo'), b'foo')
self.assertIsNone(f.get(b'xxx'))
with self.assertRaises(KeyError):
f[b'xxx']
f.close()
def test_anydbm_keys(self):
self.init_db()
f = dbm.open(_fname, 'r')
keys = self.keys_helper(f)
f.close()
def test_empty_value(self):
if getattr(dbm._defaultmod, 'library', None) == 'Berkeley DB':
self.skipTest("Berkeley DB doesn't distinguish the empty value "
"from the absent one")
f = dbm.open(_fname, 'c')
self.assertEqual(f.keys(), [])
f[b'empty'] = b''
self.assertEqual(f.keys(), [b'empty'])
self.assertIn(b'empty', f)
self.assertEqual(f[b'empty'], b'')
self.assertEqual(f.get(b'empty'), b'')
self.assertEqual(f.setdefault(b'empty'), b'')
f.close()
def test_anydbm_access(self):
self.init_db()
f = dbm.open(_fname, 'r')
key = "a".encode("ascii")
self.assertIn(key, f)
assert(f[key] == b"Python:")
f.close()
def METHOD_NAME(self, f):
keys = self.keys_helper(f)
for key in self._dict:
self.assertEqual(self._dict[key], f[key.encode("ascii")])
def tearDown(self):
delete_files()
def setUp(self):
dbm._defaultmod = self.module
delete_files()
class WhichDBTestCase(unittest.TestCase):
def test_whichdb(self):
for module in dbm_iterator():
# Check whether whichdb correctly guesses module name
# for databases opened with "module" module.
# Try with empty files first
name = module.__name__
if name == 'dbm.dumb':
continue # whichdb can't support dbm.dumb
delete_files()
f = module.open(_fname, 'c')
f.close()
self.assertEqual(name, self.dbm.whichdb(_fname))
# Now add a key
f = module.open(_fname, 'w')
f[b"1"] = b"1"
# and test that we can find it
self.assertIn(b"1", f)
# and read it
self.assertEqual(f[b"1"], b"1")
f.close()
self.assertEqual(name, self.dbm.whichdb(_fname))
@unittest.skipUnless(ndbm, reason='Test requires ndbm')
def test_whichdb_ndbm(self):
# Issue 17198: check that ndbm which is referenced in whichdb is defined
db_file = '{}_ndbm.db'.format(_fname)
with open(db_file, 'w'):
self.addCleanup(os_helper.unlink, db_file)
self.assertIsNone(self.dbm.whichdb(db_file[:-3]))
def tearDown(self):
delete_files()
def setUp(self):
delete_files()
self.filename = os_helper.TESTFN
self.d = dbm.open(self.filename, 'c')
self.d.close()
self.dbm = import_helper.import_fresh_module('dbm')
def test_keys(self):
self.d = dbm.open(self.filename, 'c')
self.assertEqual(self.d.keys(), [])
a = [(b'a', b'b'), (b'12345678910', b'019237410982340912840198242')]
for k, v in a:
self.d[k] = v
self.assertEqual(sorted(self.d.keys()), sorted(k for (k, v) in a))
for k, v in a:
self.assertIn(k, self.d)
self.assertEqual(self.d[k], v)
self.assertNotIn(b'xxx', self.d)
self.assertRaises(KeyError, lambda: self.d[b'xxx'])
self.d.close()
def load_tests(loader, tests, pattern):
classes = []
for mod in dbm_iterator():
classes.append(type("TestCase-" + mod.__name__,
(AnyDBMTestCase, unittest.TestCase),
{'module': mod}))
suites = [unittest.makeSuite(c) for c in classes]
tests.addTests(suites)
return tests
if __name__ == "__main__":
unittest.main() | null |
5,649 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantidqtinterfaces.Muon.GUI.Common.ADSHandler.workspace_group_definition import (
check_not_in_group,
safe_to_add_to_group,
add_list_to_group,
add_to_group,
)
from mantidqtinterfaces.Muon.GUI.Common.ADSHandler.ADS_calls import make_group, retrieve_ws
from mantid.api import AnalysisDataService as ADS
from mantid import simpleapi
def create_workspace(name):
alg = simpleapi.AlgorithmManager.create("CreateWorkspace")
alg.initialize()
alg.setAlwaysStoreInADS(True)
alg.setLogging(False)
alg.setProperty("dataX", [0, 1])
alg.setProperty("dataY", [0, 1])
alg.setProperty("OutputWorkspace", name)
alg.execute()
return retrieve_ws(name)
def create_table_workspace(name):
alg = simpleapi.AlgorithmManager.create("CreateEmptyTableWorkspace")
alg.initialize()
alg.setAlwaysStoreInADS(True)
alg.setLogging(False)
alg.setProperty("OutputWorkspace", name)
alg.execute()
return name
class WorkspaceGroupDefinitionTest(unittest.TestCase):
def setUp(self):
self.assertEqual(ADS.getObjectNames(), [])
def tearDown(self):
ADS.clear()
def METHOD_NAME(self):
ws_in_group = create_workspace("in")
ws_out_group = create_workspace("out")
make_group([ws_in_group], "group")
# get the group
group = retrieve_ws("group")
self.assertEqual(len(group.getNames()), 1)
self.assertEqual(group.getNames()[0], "in")
self.assertEqual(check_not_in_group([group], ws_in_group.name()), False)
self.assertEqual(check_not_in_group([group], ws_out_group.name()), True)
def test_safe_to_add_to_group(self):
instrument = "MUSR"
extension = "MA"
ws = create_workspace(instrument + "test" + extension)
tmp = create_workspace("dummy")
make_group([tmp], "group")
# get the group
group = retrieve_ws("group")
self.assertEqual(safe_to_add_to_group(ws, instrument, [group], extension), True)
def test_safe_to_add_to_group_wrong_instrument(self):
instrument = "MUSR"
extension = "MA"
ws = create_workspace("EMU" + "test" + extension)
tmp = create_workspace("dummy")
make_group([tmp], "group")
# get the group
group = retrieve_ws("group")
self.assertEqual(safe_to_add_to_group(ws, instrument, [group], extension), False)
def test_safe_to_add_to_group_already_in_a_group(self):
instrument = "MUSR"
extension = "MA"
ws = create_workspace(instrument + "test" + extension)
tmp = create_workspace("dummy")
make_group([tmp, ws], "group")
# get the group
group = retrieve_ws("group")
self.assertEqual(safe_to_add_to_group(ws, instrument, [group], extension), False)
def test_safe_to_add_to_group_wrong_extension(self):
instrument = "MUSR"
extension = "MA"
ws = create_workspace(instrument + "test" + "FD")
tmp = create_workspace("dummy")
make_group([tmp], "group")
# get the group
group = retrieve_ws("group")
self.assertEqual(safe_to_add_to_group(ws, instrument, [group], extension), False)
def test_add_list_to_group(self):
ws = create_workspace("unit")
ws2 = create_workspace("test")
tmp = create_workspace("dummy")
make_group([tmp], "group")
# get the group
group = retrieve_ws("group")
self.assertEqual(len(group.getNames()), 1)
self.assertEqual(group.getNames()[0], "dummy")
add_list_to_group([ws.name(), ws2.name()], group)
expected = ["dummy", "unit", "test"]
self.assertEqual(len(group.getNames()), len(expected))
for name in group.getNames():
self.assertTrue(name in expected)
expected.remove(name)
def test_add_to_group_single(self):
instrument = "MUSR"
extension = "MA"
run = "62260"
ws = create_workspace(instrument + run + "fwd" + extension)
ws2 = create_workspace(instrument + run + "bwd" + extension)
_ = create_workspace("EMU" + run + "fwd" + extension)
_ = create_workspace(instrument + run + "fwd" + "FD")
# there was a bug that meant tables didnt work
table_name = create_table_workspace(instrument + run + "table" + extension)
add_to_group(instrument, extension)
group = retrieve_ws(instrument + run)
expected = [ws.name(), ws2.name(), table_name]
self.assertEqual(len(group.getNames()), len(expected))
for name in group.getNames():
self.assertTrue(name in expected)
expected.remove(name)
def test_add_to_group_multiple(self):
instrument = "MUSR"
extension = "MA"
run = "62260"
run2 = "06226"
ws = create_workspace(instrument + run + "fwd" + extension)
ws2 = create_workspace(instrument + run + "bwd" + extension)
ws3 = create_workspace(instrument + run2 + "fwd" + extension)
ws4 = create_workspace(instrument + run2 + "bwd" + extension)
_ = create_workspace("EMU" + run + "fwd" + extension)
_ = create_workspace(instrument + run + "fwd" + "FD")
# there was a bug that meant tables didnt work
table_name = create_table_workspace(instrument + run + "table" + extension)
add_to_group(instrument, extension)
# check run
group = retrieve_ws(instrument + run)
expected = [ws.name(), ws2.name(), table_name]
self.assertEqual(len(group.getNames()), len(expected))
for name in group.getNames():
self.assertTrue(name in expected)
expected.remove(name)
# check run2
group = retrieve_ws(instrument + run2)
expected = [ws3.name(), ws4.name()]
self.assertEqual(len(group.getNames()), len(expected))
for name in group.getNames():
self.assertTrue(name in expected)
expected.remove(name)
def test_add_to_group_ignore_if_already_in_group(self):
instrument = "MUSR"
extension = "MA"
run = "62260"
ws = create_workspace(instrument + run + "fwd" + extension)
ws2 = create_workspace(instrument + run + "bwd" + extension)
_ = create_workspace("EMU" + run + "fwd" + extension)
_ = create_workspace(instrument + run + "fwd" + "FD")
# there was a bug that meant tables didnt work
table_name = create_table_workspace(instrument + run + "table" + extension)
make_group([ws2], "group")
add_to_group(instrument, extension)
group = retrieve_ws(instrument + run)
expected = [ws.name(), table_name]
self.assertEqual(len(group.getNames()), len(expected))
for name in group.getNames():
self.assertTrue(name in expected)
expected.remove(name)
if __name__ == "__main__":
unittest.main() | null |
5,650 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from typing import Optional, Any, Iterable
from sqlalchemy import create_engine, text
from pilot.connections.rdbms.base import RDBMSDatabase
class SQLiteConnect(RDBMSDatabase):
"""Connect SQLite Database fetch MetaData
Args:
Usage:
"""
db_type: str = "sqlite"
db_dialect: str = "sqlite"
@classmethod
def METHOD_NAME(
cls, file_path: str, engine_args: Optional[dict] = None, **kwargs: Any
) -> RDBMSDatabase:
"""Construct a SQLAlchemy engine from URI."""
_engine_args = engine_args or {}
_engine_args["connect_args"] = {"check_same_thread": False}
# _engine_args["echo"] = True
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
os.makedirs(directory)
return cls(create_engine("sqlite:///" + file_path, **_engine_args), **kwargs)
def get_indexes(self, table_name):
"""Get table indexes about specified table."""
cursor = self.session.execute(text(f"PRAGMA index_list({table_name})"))
indexes = cursor.fetchall()
return [(index[1], index[3]) for index in indexes]
def get_show_create_table(self, table_name):
"""Get table show create table about specified table."""
cursor = self.session.execute(
text(
f"SELECT sql FROM sqlite_master WHERE type='table' AND name='{table_name}'"
)
)
ans = cursor.fetchall()
return ans[0][0]
def get_fields(self, table_name):
"""Get column fields about specified table."""
cursor = self.session.execute(text(f"PRAGMA table_info('{table_name}')"))
fields = cursor.fetchall()
print(fields)
return [(field[1], field[2], field[3], field[4], field[5]) for field in fields]
def get_users(self):
return []
def get_grants(self):
return []
def get_collation(self):
"""Get collation."""
return "UTF-8"
def get_charset(self):
return "UTF-8"
def get_database_list(self):
return []
def get_database_names(self):
return []
def _sync_tables_from_db(self) -> Iterable[str]:
table_results = self.session.execute(
text("SELECT name FROM sqlite_master WHERE type='table'")
)
view_results = self.session.execute(
text("SELECT name FROM sqlite_master WHERE type='view'")
)
table_results = set(row[0] for row in table_results)
view_results = set(row[0] for row in view_results)
self._all_tables = table_results.union(view_results)
self._metadata.reflect(bind=self._engine)
return self._all_tables
def _write(self, session, write_sql):
print(f"Write[{write_sql}]")
result = session.execute(text(write_sql))
session.commit()
# TODO Subsequent optimization of dynamically specified database submission loss target problem
print(f"SQL[{write_sql}], result:{result.rowcount}")
return result.rowcount
def get_table_comments(self, db_name=None):
cursor = self.session.execute(
text(
f"""
SELECT name, sql FROM sqlite_master WHERE type='table'
"""
)
)
table_comments = cursor.fetchall()
return [
(table_comment[0], table_comment[1]) for table_comment in table_comments
]
def table_simple_info(self) -> Iterable[str]:
_tables_sql = f"""
SELECT name FROM sqlite_master WHERE type='table'
"""
cursor = self.session.execute(text(_tables_sql))
tables_results = cursor.fetchall()
results = []
for row in tables_results:
table_name = row[0]
_sql = f"""
PRAGMA table_info({table_name})
"""
cursor_colums = self.session.execute(text(_sql))
colum_results = cursor_colums.fetchall()
table_colums = []
for row_col in colum_results:
field_info = list(row_col)
table_colums.append(field_info[1])
results.append(f"{table_name}({','.join(table_colums)});")
return results | null |
5,651 | import pytest
import torch
from kornia.filters import Laplacian, get_laplacian_kernel1d, get_laplacian_kernel2d, laplacian
from kornia.testing import BaseTester, assert_close, tensor_to_gradcheck_var
@pytest.mark.parametrize("window_size", [5, 11])
def test_get_laplacian_kernel1d(window_size, device, dtype):
actual = get_laplacian_kernel1d(window_size, device=device, dtype=dtype)
expected = torch.zeros(1, device=device, dtype=dtype)
assert actual.shape == (window_size,)
assert_close(actual.sum(), expected.sum())
@pytest.mark.parametrize("window_size", [5, 11, (3, 3)])
def test_get_laplacian_kernel2d(window_size, device, dtype):
actual = get_laplacian_kernel2d(window_size, device=device, dtype=dtype)
expected = torch.zeros(1, device=device, dtype=dtype)
expected_shape = window_size if isinstance(window_size, tuple) else (window_size, window_size)
assert actual.shape == expected_shape
assert_close(actual.sum(), expected.sum())
def test_get_laplacian_kernel1d_exact(device, dtype):
actual = get_laplacian_kernel1d(5, device=device, dtype=dtype)
expected = torch.tensor([1.0, 1.0, -4.0, 1.0, 1.0], device=device, dtype=dtype)
assert_close(expected, actual)
def METHOD_NAME(device, dtype):
actual = get_laplacian_kernel2d(7, device=device, dtype=dtype)
expected = torch.tensor(
[
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, -48.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
],
device=device,
dtype=dtype,
)
assert_close(expected, actual)
class TestLaplacian(BaseTester):
@pytest.mark.parametrize("shape", [(1, 4, 8, 15), (2, 3, 11, 7)])
@pytest.mark.parametrize("kernel_size", [5, (11, 7), (3, 3)])
@pytest.mark.parametrize("normalized", [True, False])
def test_smoke(self, shape, kernel_size, normalized, device, dtype):
inpt = torch.rand(shape, device=device, dtype=dtype)
actual = laplacian(inpt, kernel_size, 'reflect', normalized)
assert isinstance(actual, torch.Tensor)
assert actual.shape == shape
@pytest.mark.parametrize("shape", [(1, 4, 8, 15), (2, 3, 11, 7)])
@pytest.mark.parametrize("kernel_size", [5, (11, 7), 3])
def test_cardinality(self, shape, kernel_size, device, dtype):
sample = torch.rand(shape, device=device, dtype=dtype)
actual = laplacian(sample, kernel_size)
assert actual.shape == shape
@pytest.mark.skip(reason='Nothing to test.')
def test_exception(self):
...
def test_noncontiguous(self, device, dtype):
batch_size = 3
sample = torch.rand(3, 5, 5, device=device, dtype=dtype).expand(batch_size, -1, -1, -1)
kernel_size = 3
actual = laplacian(sample, kernel_size)
assert actual.is_contiguous()
def test_gradcheck(self, device):
# test parameters
batch_shape = (1, 2, 5, 7)
kernel_size = 3
# evaluate function gradient
sample = torch.rand(batch_shape, device=device)
sample = tensor_to_gradcheck_var(sample)
self.gradcheck(laplacian, (sample, kernel_size))
def test_module(self, device, dtype):
params = [3]
op = laplacian
op_module = Laplacian(*params)
img = torch.ones(1, 3, 5, 5, device=device, dtype=dtype)
self.assert_close(op(img, *params), op_module(img))
@pytest.mark.parametrize('kernel_size', [5, (5, 7)])
@pytest.mark.parametrize('batch_size', [1, 2])
def test_dynamo(self, batch_size, kernel_size, device, dtype, torch_optimizer):
inpt = torch.ones(batch_size, 3, 10, 10, device=device, dtype=dtype)
op = Laplacian(kernel_size)
op_optimized = torch_optimizer(op)
self.assert_close(op(inpt), op_optimized(inpt)) | null |
5,652 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from collections import OrderedDict
from unittest import mock
from mantidqtinterfaces.dns_powder_tof.data_structures.dns_observer import DNSObserver
from mantidqtinterfaces.dns_powder_tof.parameter_abo import ParameterAbo
class ParameterAboTest(unittest.TestCase):
# pylint: disable=protected-access, too-many-public-methods
observer1 = None
@classmethod
def setUpClass(cls):
cls.observer1 = mock.create_autospec(DNSObserver)
cls.observer1.name = "observer1"
cls.observer1.load_xml = mock.Mock(return_value={"test": 1})
cls.observer1.save_xml = mock.Mock()
cls.observer1.save_as_xml = mock.Mock()
cls.testdic = {"a": 1}
def setUp(self):
self.model = ParameterAbo()
self.observer1.reset_mock()
def test___init__(self):
self.assertIsInstance(self.model, ParameterAbo)
self.assertIsInstance(self.model, object)
self.assertEqual(self.model.observers, [])
self.assertEqual(self.model.observer_dict, {})
self.assertEqual(self.model.gui_parameter, OrderedDict())
def test_clear_gui_parameter_dict(self):
self.model.gui_parameter["a"] = 1
self.model.clear_gui_parameter_dict()
self.assertEqual(self.model.gui_parameter, OrderedDict())
def test_register(self):
self.model.register(self.observer1)
self.assertEqual(len(self.model.observers), 1)
self.assertEqual(self.model.observer_dict["observer1"], self.observer1)
self.assertEqual(self.observer1.request_from_abo, self.model.process_request)
self.observer1.get_option_dict.assert_called_once()
def test_unregister(self):
self.model.observers = [self.observer1]
self.model.observer_dict = {"observer1": self.observer1}
self.assertEqual(len(self.model.observers), 1)
self.assertEqual(self.model.observer_dict["observer1"], self.observer1)
self.model.unregister(self.observer1)
self.assertEqual(self.model.observers, [])
self.assertEqual(self.model.observer_dict, {})
def test_clear(self):
self.model.observers = [self.observer1]
self.model.observer_dict = {"observer1": self.observer1}
self.assertEqual(len(self.model.observers), 1)
self.assertEqual(self.model.observer_dict["observer1"], self.observer1)
self.model.clear()
self.assertEqual(self.model.observers, [])
self.assertEqual(self.model.observer_dict, {})
def test__notify_observers(self):
self.model.observers = [self.observer1, self.observer1]
self.model._notify_observers()
self.assertEqual(self.observer1.update.call_count, 2)
def test_notify_modus_change(self):
self.model.observers = [self.observer1, self.observer1]
self.model.notify_modus_change()
self.assertEqual(self.observer1.on_modus_change.call_count, 2)
def test_notify_focused_tab(self):
self.model.notify_focused_tab(self.observer1)
self.observer1.tab_got_focus.assert_called_once()
def METHOD_NAME(self):
self.model.observers = [self.observer1]
self.model.observer_dict["xml_dump"] = self.observer1
self.model.METHOD_NAME()
self.assertEqual(self.model.gui_parameter, {"test": 1})
self.observer1.set_view_from_param.assert_called_once()
def test_xml_save(self):
self.model.observer_dict["xml_dump"] = self.observer1
self.model.xml_save()
self.observer1.save_xml.assert_called_once()
def test_xml_save_as(self):
self.model.observer_dict["xml_dump"] = self.observer1
self.model.xml_save_as()
self.observer1.save_as_xml.assert_called_once()
def test_update_from_observer(self):
self.model.update_from_observer(self.observer1)
self.assertEqual(len(self.model.gui_parameter), 1)
self.observer1.get_option_dict.assert_called_once()
def test_update_from_all_observers(self):
self.model.observers = [self.observer1, self.observer1]
self.model.update_from_all_observers()
self.assertEqual(len(self.model.gui_parameter), 1)
def test_process_request(self):
self.model.observers = [self.observer1, self.observer1]
self.model.process_request()
self.assertEqual(self.observer1.process_request.call_count, 2)
if __name__ == "__main__":
unittest.main() | null |
5,653 | """
Tests for the dials.algorithms.scaling.plots module
"""
from __future__ import annotations
import numpy as np
from dials.algorithms.scaling.model.model import (
ArrayScalingModel,
PhysicalScalingModel,
plot_scaling_models,
)
from dials.algorithms.scaling.plots import (
normal_probability_plot,
plot_array_absorption_plot,
plot_array_decay_plot,
plot_array_modulation_plot,
plot_outliers,
)
def test_plot_array_absorption_plot():
"""Test plot generation for array absorption_correction"""
array_dict = {
"__id__": "array",
"is_scaled": True,
"absorption": {
"n_parameters": 45,
"parameters": list(range(1, 46)),
"est_standard_devs": [],
},
"configuration_parameters": {
"corrections": ["absorption"],
"time_rot_interval": 10.0,
"n_x_param": 3,
"n_y_param": 3,
"n_time_param": 5,
"xmin": 0,
"x_bin_width": 2,
"ymin": 1,
"y_bin_width": 2,
"valid_osc_range": [0, 100],
},
}
array_model = ArrayScalingModel.from_dict(array_dict)
plot = plot_array_absorption_plot(array_model)
assert plot["array_absorption_plot"]["data"][0]["x"]
assert plot["array_absorption_plot"]["data"][0]["y"]
assert plot["array_absorption_plot"]["data"][0]["z"]
def test_plot_array_decay_plot():
"""Test plot generation for array decay correction"""
array_dict = {
"__id__": "array",
"is_scaled": True,
"decay": {
"n_parameters": 20,
"parameters": list(range(1, 21)),
"est_standard_devs": [],
},
"configuration_parameters": {
"corrections": ["decay"],
"time_rot_interval": 10.0,
"n_res_param": 5,
"res_bin_width": 0.1,
"n_time_param": 4,
"resmin": 0.05,
"valid_osc_range": [0, 100],
},
}
array_model = ArrayScalingModel.from_dict(array_dict)
plot = plot_array_decay_plot(array_model)
assert plot["array_decay_plot"]["data"][0]["x"]
assert plot["array_decay_plot"]["data"][0]["y"]
def test_plot_array_modulation_plot():
"""Test plot generation for array modulation correction"""
array_dict = {
"__id__": "array",
"is_scaled": True,
"modulation": {
"n_parameters": 25,
"parameters": list(range(1, 26)),
"est_standard_devs": [],
},
"configuration_parameters": {
"corrections": ["modulation", "decay"],
"n_x_mod_param": 5,
"n_y_mod_param": 5,
"xmin": 0,
"x_det_bin_width": 2,
"ymin": 1,
"y_det_bin_width": 2,
"time_rot_interval": 10.0,
"n_res_param": 5,
"res_bin_width": 0.1,
"n_time_param": 4,
"resmin": 0.05,
"valid_osc_range": [0, 100],
},
"decay": {
"n_parameters": 20,
"parameters": list(range(1, 21)),
"est_standard_devs": [],
},
}
array_model = ArrayScalingModel.from_dict(array_dict)
plot = plot_array_modulation_plot(array_model)
assert plot["array_modulation_plot"]["data"][0]["x"]
assert plot["array_modulation_plot"]["data"][0]["y"]
def test_plot_scaling_models():
physical_dict = {
"__id__": "physical",
"is_scaled": True,
"scale": {
"n_parameters": 2,
"parameters": [0.5, 1.0],
"est_standard_devs": [0.05, 0.1],
},
"configuration_parameters": {
"corrections": ["scale", "decay", "absorption"],
"s_norm_fac": 0.1,
"d_norm_fac": 0.1,
"scale_rot_interval": 10.0,
"decay_rot_interval": 10.0,
"decay_restaint": 1e-1,
"valid_osc_range": [0.0, 2.0],
},
"decay": {
"n_parameters": 2,
"parameters": [0.5, 1.0],
"est_standard_devs": [0.05, 0.1],
},
"absorption": {
"n_parameters": 4,
"parameters": [0.1, -0.1, 0.05, -0.05],
"est_standard_devs": [0.005, 0.005, 0.005, 0.005],
},
}
d = plot_scaling_models(PhysicalScalingModel.from_dict(physical_dict))
assert "smooth_scale_model" in d
assert "absorption_surface" in d
assert "absorption_parameters" in d
assert d["smooth_scale_model"]["data"][0] != []
assert d["absorption_parameters"]["data"][0] != []
def METHOD_NAME():
data = {"delta_hl": np.arange(20, dtype=float)}
d = normal_probability_plot(data)
assert "normal_distribution_plot" in d
def test_plot_outliers():
"""Test outlier plot, for standard and null data."""
data = {"x": [1.0, 2.0], "y": [0.0, 1.0], "z": [1.0, 1.0], "image_size": [100, 200]}
d = plot_outliers(data)
assert "outliers_vs_z" in d
assert "outlier_xy_positions" in d
data = {"x": [], "y": [], "z": [], "image_size": [100, 200]}
d = plot_outliers(data)
assert "outliers_vs_z" in d
assert "outlier_xy_positions" in d | null |
5,654 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantiddoc.directives.base import BaseDirective # pylint: disable=unused-import
import os
class InterfaceDirective(BaseDirective):
"""
Adds a screenshot of the custom interface
It requires a SCREENSHOTS_DIR environment variable to be set to the
directory where a screenshot should be generated. If it is not set then
a RuntimeError occurs
"""
required_arguments, optional_arguments = 1, 0
option_spec = {"widget": str, "align": str, "width": int}
def run(self):
"""
The main entry point that docutils calls.
It calls self.execute to do the main work.
Derived classes should override execute() and insert
whatever rst they require with self.add_rst()
"""
nodes = self.execute()
if self.rst_lines is not None:
self.commit_rst()
return nodes
def execute(self):
"""
Called by Sphinx when the ..interface:: directive is encountered
"""
try:
picture = self._create_screenshot(widget_name=self.options.get("widget", None))
except RuntimeError:
picture = None
self.METHOD_NAME(picture, align=self.options.get("align", None), width=self.options.get("width", None))
return []
def interface_name(self):
return self.arguments[0]
def _create_screenshot(self, widget_name=None):
"""
Creates a screenshot for the named interface in the "images/screenshots"
subdirectory.
The file will be named "interfacename_interface.png", e.g. "ISIS_Reflectometry_interface.png"
Returns:
screenshot: A mantiddoc.tools.Screenshot object
"""
screenshots_dir = self.screenshots_dir
if screenshots_dir is None:
return None
# Generate image
from mantiddoc.tools.screenshot import custominterface_screenshot
return custominterface_screenshot(self.interface_name(), screenshots_dir, widget_name=widget_name)
def METHOD_NAME(self, picture, align=None, width=None):
"""
Outputs an image link with a custom :class: style. The filename is
extracted from the path given and then a relative link to the
directory specified by the SCREENSHOTS_DIR environment variable from
the root source directory is formed.
Args:
picture (Screenshot): A Screenshot object
align: The alignment to use, None for block, "left" or "right" for flowing
width: The width to use (in pixels, defaults to width of screenshot)
"""
env = self.state.document.settings.env
# Sphinx assumes that an absolute path is actually relative to the directory containing the
# conf.py file and a relative path is relative to the directory where the current rst file
# is located.
if picture:
screenshots_dir, filename = os.path.split(picture.imgpath)
if width is None:
# No width provided, use screenshot width
width = picture.width
# relative path to image
rel_path = os.path.relpath(screenshots_dir, env.srcdir)
# This is a href link so is expected to be in unix style
rel_path = rel_path.replace("\\", "/")
# stick a "/" as the first character so Sphinx computes relative location from source directory
path = "/" + rel_path + "/" + filename
caption = ""
else:
# use stock not found image
path = "/images/ImageNotFound.png"
width = 200
caption = "Enable screenshots using DOCS_SCREENSHOTS in CMake"
if align is not None:
self.add_rst(
f".. figure:: {path}\n" f" :class: screenshot\n" f" :width: {width}px\n" f" :align: {align}\n\n" f" {caption}\n\n"
)
else:
self.add_rst(f".. figure:: {path}\n" f" :class: screenshot\n" f" :width: {width}px\n\n" f" {caption}\n\n")
# ------------------------------------------------------------------------------------------------------------
def setup(app):
"""
Setup the directives when the extension is activated
Args:
app: The main Sphinx application object
"""
app.add_directive("interface", InterfaceDirective) | null |
5,655 | #!/usr/bin/env python
from io import BytesIO
import unittest
from asammdf.blocks.utils import MdfException
from asammdf.blocks.v4_blocks import AttachmentBlock
class TestATBLOCK(unittest.TestCase):
tempdir = None
data = b"\n".join(f"line {i}".encode("ascii") for i in range(50))
filename = "embedded.txt"
comment = "example of embedded attachment"
@classmethod
def setUpClass(cls):
cls.compressed = BytesIO()
cls.compressed.write(
b"\x00##TX\x00\x00\x00\x00(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00embedded.txt\x00\x00\x00\x00##TX\x00\x00\x00\x008\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00example of embedded attachment\x00\x00##AT\x00\x00\x00\x00\xd3\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00)\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\xeb\x825\x1a\x0cri\xb9\xca\xfb\xde\xb6pT\x17k\x85\x01\x00\x00\x00\x00\x00\x00s\x00\x00\x00\x00\x00\x00\x00x\x9c5\xd0\xb9\r\xc3P\x0c\x04\xd1\\U\xb8\x04\xed\xe1C\x0590 \xa8\xffP08?\x9a\x88\x0f$\xcf\xdf\xf5}\xec\xdb\xf9\x8f&\x9ed\xd2\xc9s\xf2\x9a\xbc'\x9f\xc9\xc1\xf8bp\x04$$A\tK`B\x13\x9c\xf0\x8c\xe7\xb5\x17\x9e\xf1\x8cg<\xe3\x19\xcfx\xc6\x0b^\xf0\xb2\x0e\xc5\x0b^\xf0\x82\x17\xbc\xe0\x05\xafx\xc5+^\xd7\xe7\xf0\x8aW\xbc\xe2\x15\xaf\xc7\r\xca\xd1m \x00\x00\x00\x00"
)
cls.uncompressed = BytesIO()
cls.uncompressed.write(
b"\x00##TX\x00\x00\x00\x00(\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00embedded.txt\x00\x00\x00\x00##TX\x00\x00\x00\x008\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00example of embedded attachment\x00\x00##AT\x00\x00\x00\x00\xe5\x01\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00)\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\xeb\x825\x1a\x0cri\xb9\xca\xfb\xde\xb6pT\x17k\x85\x01\x00\x00\x00\x00\x00\x00\x85\x01\x00\x00\x00\x00\x00\x00line 0\nline 1\nline 2\nline 3\nline 4\nline 5\nline 6\nline 7\nline 8\nline 9\nline 10\nline 11\nline 12\nline 13\nline 14\nline 15\nline 16\nline 17\nline 18\nline 19\nline 20\nline 21\nline 22\nline 23\nline 24\nline 25\nline 26\nline 27\nline 28\nline 29\nline 30\nline 31\nline 32\nline 33\nline 34\nline 35\nline 36\nline 37\nline 38\nline 39\nline 40\nline 41\nline 42\nline 43\nline 44\nline 45\nline 46\nline 47\nline 48\nline 49\x00\x00"
)
def METHOD_NAME(self):
self.compressed.seek(0)
block = AttachmentBlock(address=97, stream=self.compressed)
self.assertEqual(block.file_name, self.filename)
self.assertEqual(block.extract(), self.data)
self.assertEqual(block.comment, self.comment)
def test_read_uncompressed(self):
self.uncompressed.seek(0)
block = AttachmentBlock(address=97, stream=self.uncompressed)
self.assertEqual(block.file_name, self.filename)
self.assertEqual(block.extract(), self.data)
self.assertEqual(block.comment, self.comment)
def test_read_wrong_id(self):
self.compressed.seek(0)
stream = BytesIO(self.compressed.read())
stream.seek(97)
stream.write(b"_NOK")
with self.assertRaises(MdfException):
AttachmentBlock(address=97, stream=stream)
def test_bytes_compressed(self):
attachment = AttachmentBlock(
file_name=self.filename, data=self.data, embedded=True, compressed=True
)
attachment.comment = self.comment
stream = BytesIO()
stream.write(b"\0")
blocks = []
attachment.to_blocks(1, blocks, {})
for block in blocks:
stream.write(bytes(block))
address = attachment.address
block = AttachmentBlock(address=address, stream=stream)
self.assertEqual(block.comment, self.comment)
self.assertEqual(block.file_name, self.filename)
self.assertEqual(block.extract(), self.data)
def test_bytes_uncompressed(self):
attachment = AttachmentBlock(
file_name=self.filename, data=self.data, embedded=True, compressed=False
)
attachment.comment = self.comment
stream = BytesIO()
stream.write(b"\0")
blocks = []
attachment.to_blocks(1, blocks, {})
for block in blocks:
stream.write(bytes(block))
address = attachment.address
block = AttachmentBlock(address=address, stream=stream)
self.assertEqual(block.comment, self.comment)
self.assertEqual(block.file_name, self.filename)
self.assertEqual(block.extract(), self.data)
if __name__ == "__main__":
unittest.main() | null |
5,656 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# ICConvoluted.py
#
# Defines the IPeakFunction IkedaCarpenterConvoluted
# which is the standard Ikeda-Carpenter (IC) function convoluted with
# a square wave and a Gaussian.
#
#
import numpy as np
from mantid.api import IFunction1D, FunctionFactory
class IkedaCarpenterConvoluted(IFunction1D):
def METHOD_NAME(self):
self.declareParameter("A") # Alpha
self.declareParameter("B") # Beta
self.declareParameter("R") # R - ratio of fast to slow neutrons
self.declareParameter("T0") # T0 - time offset
self.declareParameter("Scale") # amplitude
self.declareParameter("HatWidth") # width of square wave
self.declareParameter("KConv") # KConv for Gaussian
# use penalty=None to not use default mantid penalty
def setPenalizedConstraints(self, A0=None, B0=None, R0=None, T00=None, Scale0=None, HatWidth0=None, KConv0=None, penalty=None):
if A0 is not None:
self.addConstraints("{:4.4e} < A < {:4.4e}".format(A0[0], A0[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("A", penalty)
if B0 is not None:
self.addConstraints("{:4.4e} < B < {:4.4e}".format(B0[0], B0[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("B", penalty)
if R0 is not None:
self.addConstraints("{:4.4e} < R < {:4.4e}".format(R0[0], R0[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("R", penalty)
if T00 is not None:
self.addConstraints("{:4.4e} < T0 < {:4.4e}".format(T00[0], T00[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("T0", penalty)
if Scale0 is not None:
self.addConstraints("{:4.4e} < Scale < {:4.4e}".format(Scale0[0], Scale0[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("Scale", penalty)
if HatWidth0 is not None:
self.addConstraints("{:4.4e} < HatWidth < {:4.4e}".format(HatWidth0[0], HatWidth0[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("HatWidth", penalty)
if KConv0 is not None:
self.addConstraints("{:4.4e} < KConv < {:4.4e}".format(KConv0[0], KConv0[1]))
if penalty is not None:
self.setConstraintPenaltyFactor("KConv", penalty)
def function1D(self, t):
A = self.getParamValue(0)
B = self.getParamValue(1)
R = self.getParamValue(2)
T0 = self.getParamValue(3)
Scale = self.getParamValue(4)
HatWidth = self.getParamValue(5)
KConv = self.getParamValue(6)
# A/2 Scale factor has been removed to make A and Scale independent
f_int = Scale * (
(1 - R) * np.power((A * (t - T0)), 2) * np.exp(-A * (t - T0))
+ 2
* R
* A**2
* B
/ np.power((A - B), 3)
* (
np.exp(-B * (t - T0))
- np.exp(-A * (t - T0)) * (1 + (A - B) * (t - T0) + 0.5 * np.power((A - B), 2) * np.power((t - T0), 2))
)
)
f_int[t < T0] = 0
mid_point_hat = len(f_int) // 2
gc_x = np.array(range(len(f_int))).astype(float)
ppd = 0.0 * gc_x
lowIDX = int(np.floor(np.max([mid_point_hat - np.abs(HatWidth), 0])))
highIDX = int(np.ceil(np.min([mid_point_hat + np.abs(HatWidth), len(gc_x)])))
ppd[lowIDX:highIDX] = 1.0
ppd = ppd / sum(ppd)
gc_x = np.array(range(len(f_int))).astype(float)
gc_x = 2 * (gc_x - np.min(gc_x)) / (np.max(gc_x) - np.min(gc_x)) - 1
gc_f = np.exp(-KConv * np.power(gc_x, 2))
gc_f = gc_f / np.sum(gc_f)
npad = len(f_int) - 1
first = npad - npad // 2
f_int = np.convolve(f_int, ppd, "full")[first : first + len(f_int)]
f_int = np.convolve(f_int, gc_f, "full")[first : first + len(f_int)]
return f_int
# Evaluate the function for a differnt set of paremeters (trialc)
def function1DDiffParams(self, xvals, trialc):
# First, grab the original parameters and set to trialc
c = np.zeros(self.numParams())
for i in range(self.numParams()):
c[i] = self.getParamValue(i)
self.setParameter(i, trialc[i])
# Get the trial values
f_trial = self.function1D(xvals)
# Now return to the orignial
for i in range(self.numParams()):
self.setParameter(i, c[i])
return f_trial
# Construction the Jacobian (df) for the function
def functionDeriv1D(self, xvals, jacobian, eps=1.0e-3):
f_int = self.function1D(xvals)
# Fetch parameters into array c
c = np.zeros(self.numParams())
for i in range(self.numParams()):
c[i] = self.getParamValue(i)
nc = np.prod(np.shape(c))
for k in range(nc):
dc = np.zeros(nc)
dc[k] = max(eps, eps * c[k])
f_new = self.function1DDiffParams(xvals, c + dc)
for i, dF in enumerate(f_new - f_int):
jacobian.set(i, k, dF / dc[k])
FunctionFactory.subscribe(IkedaCarpenterConvoluted) | null |
5,657 | import tempfile
from sympy import log, Min, Max, sqrt
from sympy.core.numbers import Float
from sympy.core.symbol import Symbol, symbols
from sympy.functions.elementary.trigonometric import cos
from sympy.codegen.ast import Assignment, Raise, RuntimeError_, QuotedString
from sympy.codegen.algorithms import newtons_method, newtons_method_function
from sympy.codegen.cfunctions import expm1
from sympy.codegen.fnodes import bind_C
from sympy.codegen.futils import render_as_module as f_module
from sympy.codegen.pyutils import render_as_module as py_module
from sympy.external import import_module
from sympy.printing.codeprinter import ccode
from sympy.utilities._compilation import compile_link_import_strings, has_c, has_fortran
from sympy.utilities._compilation.util import may_xfail
from sympy.testing.pytest import skip, raises
cython = import_module('cython')
wurlitzer = import_module('wurlitzer')
def test_newtons_method():
x, dx, atol = symbols('x dx atol')
expr = cos(x) - x**3
algo = newtons_method(expr, x, atol, dx)
assert algo.has(Assignment(dx, -expr/expr.diff(x)))
@may_xfail
def test_newtons_method_function__ccode():
x = Symbol('x', real=True)
expr = cos(x) - x**3
func = newtons_method_function(expr, x)
if not cython:
skip("cython not installed.")
if not has_c():
skip("No C compiler found.")
compile_kw = {"std": 'c99'}
with tempfile.TemporaryDirectory() as folder:
mod, info = compile_link_import_strings([
('newton.c', ('#include <math.h>\n'
'#include <stdio.h>\n') + ccode(func)),
('_newton.pyx', ("#cython: language_level={}\n".format("3") +
"cdef extern double newton(double)\n"
"def py_newton(x):\n"
" return newton(x)\n"))
], build_dir=folder, compile_kwargs=compile_kw)
assert abs(mod.py_newton(0.5) - 0.865474033102) < 1e-12
@may_xfail
def test_newtons_method_function__fcode():
x = Symbol('x', real=True)
expr = cos(x) - x**3
func = newtons_method_function(expr, x, attrs=[bind_C(name='newton')])
if not cython:
skip("cython not installed.")
if not has_fortran():
skip("No Fortran compiler found.")
f_mod = f_module([func], 'mod_newton')
with tempfile.TemporaryDirectory() as folder:
mod, info = compile_link_import_strings([
('newton.f90', f_mod),
('_newton.pyx', ("#cython: language_level={}\n".format("3") +
"cdef extern double newton(double*)\n"
"def py_newton(double x):\n"
" return newton(&x)\n"))
], build_dir=folder)
assert abs(mod.py_newton(0.5) - 0.865474033102) < 1e-12
def test_newtons_method_function__pycode():
x = Symbol('x', real=True)
expr = cos(x) - x**3
func = newtons_method_function(expr, x)
py_mod = py_module(func)
namespace = {}
exec(py_mod, namespace, namespace)
res = eval('newton(0.5)', namespace)
assert abs(res - 0.865474033102) < 1e-12
@may_xfail
def test_newtons_method_function__ccode_parameters():
args = x, A, k, p = symbols('x A k p')
expr = A*cos(k*x) - p*x**3
raises(ValueError, lambda: newtons_method_function(expr, x))
use_wurlitzer = wurlitzer
func = newtons_method_function(expr, x, args, debug=use_wurlitzer)
if not has_c():
skip("No C compiler found.")
if not cython:
skip("cython not installed.")
compile_kw = {"std": 'c99'}
with tempfile.TemporaryDirectory() as folder:
mod, info = compile_link_import_strings([
('newton_par.c', ('#include <math.h>\n'
'#include <stdio.h>\n') + ccode(func)),
('_newton_par.pyx', ("#cython: language_level={}\n".format("3") +
"cdef extern double newton(double, double, double, double)\n"
"def py_newton(x, A=1, k=1, p=1):\n"
" return newton(x, A, k, p)\n"))
], compile_kwargs=compile_kw, build_dir=folder)
if use_wurlitzer:
with wurlitzer.pipes() as (out, err):
result = mod.py_newton(0.5)
else:
result = mod.py_newton(0.5)
assert abs(result - 0.865474033102) < 1e-12
if not use_wurlitzer:
skip("C-level output only tested when package 'wurlitzer' is available.")
out, err = out.read(), err.read()
assert err == ''
assert out == """\
x= 0.5
x= 1.1121 d_x= 0.61214
x= 0.90967 d_x= -0.20247
x= 0.86726 d_x= -0.042409
x= 0.86548 d_x= -0.0017867
x= 0.86547 d_x= -3.1022e-06
x= 0.86547 d_x= -9.3421e-12
x= 0.86547 d_x= 3.6902e-17
""" # try to run tests with LC_ALL=C if this assertion fails
def METHOD_NAME():
a, b, c, N_geo, N_tot = symbols('a b c N_geo N_tot', real=True, nonnegative=True)
i = Symbol('i', integer=True, nonnegative=True)
N_ari = N_tot - N_geo - 1
delta_ari = (c-b)/N_ari
ln_delta_geo = log(b) + log(-expm1((log(a)-log(b))/N_geo))
eqb_log = ln_delta_geo - log(delta_ari)
def _clamp(low, expr, high):
return Min(Max(low, expr), high)
meth_kw = {
'clamped_newton': {'delta_fn': lambda e, x: _clamp(
(sqrt(a*x)-x)*0.99,
-e/e.diff(x),
(sqrt(c*x)-x)*0.99
)},
'halley': {'delta_fn': lambda e, x: (-2*(e*e.diff(x))/(2*e.diff(x)**2 - e*e.diff(x, 2)))},
'halley_alt': {'delta_fn': lambda e, x: (-e/e.diff(x)/(1-e/e.diff(x)*e.diff(x,2)/2/e.diff(x)))},
}
args = eqb_log, b
for use_cse in [False, True]:
kwargs = {
'params': (b, a, c, N_geo, N_tot), 'itermax': 60, 'debug': True, 'cse': use_cse,
'counter': i, 'atol': 1e-100, 'rtol': 2e-16, 'bounds': (a,c),
'handle_nan': Raise(RuntimeError_(QuotedString("encountered NaN.")))
}
func = {k: newtons_method_function(*args, func_name=f"{k}_b", **dict(kwargs, **kw)) for k, kw in meth_kw.items()}
py_mod = {k: py_module(v) for k, v in func.items()}
namespace = {}
root_find_b = {}
for k, v in py_mod.items():
ns = namespace[k] = {}
exec(v, ns, ns)
root_find_b[k] = ns[f'{k}_b']
ref = Float('13.2261515064168768938151923226496')
reftol = {'clamped_newton': 2e-16, 'halley': 2e-16, 'halley_alt': 3e-16}
guess = 4.0
for meth, func in root_find_b.items():
result = func(guess, 1e-2, 1e2, 50, 100)
req = ref*reftol[meth]
if use_cse:
req *= 2
assert abs(result - ref) < req | null |
5,658 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from unittest.mock import patch
from pgadmin.browser.server_groups.servers.databases.schemas.tables.tests \
import utils as tables_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as fk_utils
class ForeignKeyPutTestCase(BaseTestGenerator):
"""This class will update foreign key from existing table"""
url = '/browser/foreign_key/obj/'
# Generates scenarios
scenarios = utils.generate_scenarios("foreign_key_put",
fk_utils.test_cases)
def setUp(self):
super().setUp()
# Load test data
self.data = self.test_data
# Create db connection
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception(
"Could not connect to database to fetch a foreign "
"key constraint.")
# Create schema
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema to fetch a foreign "
"key constraint.")
# Create local table
self.local_table_name = "local_table_foreignkey_get_%s" % \
(str(uuid.uuid4())[1:8])
self.local_table_id = tables_utils.create_table(
self.server, self.db_name, self.schema_name, self.local_table_name)
# Create foreign table
self.foreign_table_name = "foreign_table_foreignkey_get_%s" % \
(str(uuid.uuid4())[1:8])
self.foreign_table_id = tables_utils.create_table(
self.server, self.db_name, self.schema_name,
self.foreign_table_name)
# Create foreign key
self.foreign_key_name = "test_foreignkey_get_%s" % \
(str(uuid.uuid4())[1:8])
if "query" in self.inventory_data:
query = self.inventory_data["query"]
else:
query = None
self.foreign_key_id = fk_utils.create_foreignkey(
self.server, self.db_name, self.schema_name, self.local_table_name,
self.foreign_table_name, query)
def METHOD_NAME(self):
"""This function will update foreign key attached to table column."""
self.data["oid"] = self.foreign_key_id
if self.is_positive_test:
response = fk_utils.api_put(self)
# Assert response
utils.assert_status_code(self, response)
else:
if self.mocking_required:
with patch(self.mock_data["function_name"],
side_effect=[eval(self.mock_data["return_value"])]):
response = fk_utils.api_put(self)
# Assert response
utils.assert_status_code(self, response)
utils.assert_error_message(self, response)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id) | null |
5,659 | from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class Node:
def __init__(self, data: Any):
self.data: Any = data
self.next: Node | None = None
class CircularLinkedList:
def __init__(self):
self.head = None
self.tail = None
def __iter__(self) -> Iterator[Any]:
node = self.head
while self.head:
yield node.data
node = node.next
if node == self.head:
break
def __len__(self) -> int:
return sum(1 for _ in self)
def __repr__(self):
return "->".join(str(item) for item in iter(self))
def insert_tail(self, data: Any) -> None:
self.insert_nth(len(self), data)
def insert_head(self, data: Any) -> None:
self.insert_nth(0, data)
def insert_nth(self, index: int, data: Any) -> None:
if index < 0 or index > len(self):
raise IndexError("list index out of range.")
new_node = Node(data)
if self.head is None:
new_node.next = new_node # first node points itself
self.tail = self.head = new_node
elif index == 0: # insert at head
new_node.next = self.head
self.head = self.tail.next = new_node
else:
temp = self.head
for _ in range(index - 1):
temp = temp.next
new_node.next = temp.next
temp.next = new_node
if index == len(self) - 1: # insert at tail
self.tail = new_node
def delete_front(self):
return self.METHOD_NAME(0)
def delete_tail(self) -> Any:
return self.METHOD_NAME(len(self) - 1)
def METHOD_NAME(self, index: int = 0) -> Any:
if not 0 <= index < len(self):
raise IndexError("list index out of range.")
delete_node = self.head
if self.head == self.tail: # just one node
self.head = self.tail = None
elif index == 0: # delete head node
self.tail.next = self.tail.next.next
self.head = self.head.next
else:
temp = self.head
for _ in range(index - 1):
temp = temp.next
delete_node = temp.next
temp.next = temp.next.next
if index == len(self) - 1: # delete at tail
self.tail = temp
return delete_node.data
def is_empty(self) -> bool:
return len(self) == 0
def test_circular_linked_list() -> None:
"""
>>> test_circular_linked_list()
"""
circular_linked_list = CircularLinkedList()
assert len(circular_linked_list) == 0
assert circular_linked_list.is_empty() is True
assert str(circular_linked_list) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.METHOD_NAME(-1)
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.METHOD_NAME(0)
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5):
assert len(circular_linked_list) == i
circular_linked_list.insert_nth(i, i + 1)
assert str(circular_linked_list) == "->".join(str(i) for i in range(1, 6))
circular_linked_list.insert_tail(6)
assert str(circular_linked_list) == "->".join(str(i) for i in range(1, 7))
circular_linked_list.insert_head(0)
assert str(circular_linked_list) == "->".join(str(i) for i in range(7))
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(circular_linked_list) == "->".join(str(i) for i in range(1, 6))
assert circular_linked_list.METHOD_NAME(2) == 3
circular_linked_list.insert_nth(2, 3)
assert str(circular_linked_list) == "->".join(str(i) for i in range(1, 6))
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod() | null |
5,660 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetGroupTemplateDeploymentResult',
'AwaitableGetGroupTemplateDeploymentResult',
'get_group_template_deployment',
'get_group_template_deployment_output',
]
@pulumi.output_type
class GetGroupTemplateDeploymentResult:
"""
A collection of values returned by getGroupTemplateDeployment.
"""
def __init__(__self__, METHOD_NAME=None, management_group_id=None, name=None, output_content=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if management_group_id and not isinstance(management_group_id, str):
raise TypeError("Expected argument 'management_group_id' to be a str")
pulumi.set(__self__, "management_group_id", management_group_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if output_content and not isinstance(output_content, str):
raise TypeError("Expected argument 'output_content' to be a str")
pulumi.set(__self__, "output_content", output_content)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="managementGroupId")
def management_group_id(self) -> str:
return pulumi.get(self, "management_group_id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="outputContent")
def output_content(self) -> str:
"""
The JSON Content of the Outputs of the ARM Template Deployment.
"""
return pulumi.get(self, "output_content")
class AwaitableGetGroupTemplateDeploymentResult(GetGroupTemplateDeploymentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetGroupTemplateDeploymentResult(
METHOD_NAME=self.METHOD_NAME,
management_group_id=self.management_group_id,
name=self.name,
output_content=self.output_content)
def get_group_template_deployment(management_group_id: Optional[str] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetGroupTemplateDeploymentResult:
"""
Use this data source to access information about an existing Management Group Template Deployment.
:param str management_group_id: The ID of the Management Group to which this template was applied.
:param str name: The name of this Management Group Template Deployment.
"""
__args__ = dict()
__args__['managementGroupId'] = management_group_id
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:management/getGroupTemplateDeployment:getGroupTemplateDeployment', __args__, opts=opts, typ=GetGroupTemplateDeploymentResult).value
return AwaitableGetGroupTemplateDeploymentResult(
METHOD_NAME=pulumi.get(__ret__, 'id'),
management_group_id=pulumi.get(__ret__, 'management_group_id'),
name=pulumi.get(__ret__, 'name'),
output_content=pulumi.get(__ret__, 'output_content'))
@_utilities.lift_output_func(get_group_template_deployment)
def get_group_template_deployment_output(management_group_id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetGroupTemplateDeploymentResult]:
"""
Use this data source to access information about an existing Management Group Template Deployment.
:param str management_group_id: The ID of the Management Group to which this template was applied.
:param str name: The name of this Management Group Template Deployment.
"""
... | null |
5,661 | from __future__ import annotations
from dataclasses import dataclass
from datetime import datetime
from typing import Iterator, TYPE_CHECKING, Type
from game.ato.flightplans.standard import StandardFlightPlan, StandardLayout
from game.theater.controlpoint import ControlPointType
from game.theater.missiontarget import MissionTarget
from game.utils import Distance, feet, meters
from .ibuilder import IBuilder
from .planningerror import PlanningError
from .uizonedisplay import UiZone, UiZoneDisplay
from .waypointbuilder import WaypointBuilder
from ..flightwaypoint import FlightWaypointType
if TYPE_CHECKING:
from ..flightwaypoint import FlightWaypoint
@dataclass(frozen=True)
class AirAssaultLayout(StandardLayout):
# The pickup point is optional because we don't always need to load the cargo. When
# departing from a carrier, LHA, or off-map spawn, the cargo is pre-loaded.
pickup: FlightWaypoint | None
nav_to_ingress: list[FlightWaypoint]
ingress: FlightWaypoint
drop_off: FlightWaypoint
# This is an implementation detail used by CTLD. The aircraft will not go to this
# waypoint. It is used by CTLD as the destination for unloaded troops.
target: FlightWaypoint
nav_to_home: list[FlightWaypoint]
def iter_waypoints(self) -> Iterator[FlightWaypoint]:
yield self.departure
if self.pickup is not None:
yield self.pickup
yield from self.nav_to_ingress
yield self.ingress
yield self.drop_off
yield self.target
yield from self.nav_to_home
yield self.arrival
if self.divert is not None:
yield self.divert
yield self.bullseye
class AirAssaultFlightPlan(StandardFlightPlan[AirAssaultLayout], UiZoneDisplay):
@staticmethod
def METHOD_NAME() -> Type[Builder]:
return Builder
@property
def tot_waypoint(self) -> FlightWaypoint:
return self.layout.drop_off
def tot_for_waypoint(self, waypoint: FlightWaypoint) -> datetime | None:
if waypoint == self.tot_waypoint:
return self.tot
return None
def depart_time_for_waypoint(self, waypoint: FlightWaypoint) -> datetime | None:
return None
@property
def ctld_target_zone_radius(self) -> Distance:
return meters(2500)
@property
def mission_begin_on_station_time(self) -> datetime | None:
return None
@property
def mission_departure_time(self) -> datetime:
return self.package.time_over_target
def ui_zone(self) -> UiZone:
return UiZone(
[self.layout.target.position],
self.ctld_target_zone_radius,
)
class Builder(IBuilder[AirAssaultFlightPlan, AirAssaultLayout]):
def layout(self) -> AirAssaultLayout:
if not self.flight.is_helo:
raise PlanningError("Air assault is only usable by helicopters")
assert self.package.waypoints is not None
altitude = feet(1500) if self.flight.is_helo else self.doctrine.ingress_altitude
altitude_is_agl = self.flight.is_helo
builder = WaypointBuilder(self.flight, self.coalition)
if self.flight.departure.cptype in [
ControlPointType.AIRCRAFT_CARRIER_GROUP,
ControlPointType.LHA_GROUP,
ControlPointType.OFF_MAP,
]:
# Off_Map spawns will be preloaded
# Carrier operations load the logistics directly from the carrier
pickup = None
pickup_position = self.flight.departure.position
else:
# TODO The calculation of the Pickup LZ is currently randomized. This
# leads to the problem that we can not gurantee that the LZ is clear of
# obstacles. This has to be improved in the future so that the Mission can
# be autoplanned. In the current state the User has to check the created
# Waypoints for the Pickup and Dropoff LZs are free of obstacles.
# Create a special pickup zone for Helos from Airbase / FOB
pickup = builder.pickup_zone(
MissionTarget(
"Pickup Zone",
self.flight.departure.position.random_point_within(1200, 600),
)
)
pickup_position = pickup.position
assault_area = builder.assault_area(self.package.target)
heading = self.package.target.position.heading_between_point(pickup_position)
# TODO we can not gurantee a safe LZ for DropOff. See comment above.
drop_off_zone = MissionTarget(
"Dropoff zone",
self.package.target.position.point_from_heading(heading, 1200),
)
return AirAssaultLayout(
departure=builder.takeoff(self.flight.departure),
pickup=pickup,
nav_to_ingress=builder.nav_path(
pickup_position,
self.package.waypoints.ingress,
altitude,
altitude_is_agl,
),
ingress=builder.ingress(
FlightWaypointType.INGRESS_AIR_ASSAULT,
self.package.waypoints.ingress,
self.package.target,
),
drop_off=builder.dropoff_zone(drop_off_zone),
target=assault_area,
nav_to_home=builder.nav_path(
drop_off_zone.position,
self.flight.arrival.position,
altitude,
altitude_is_agl,
),
arrival=builder.land(self.flight.arrival),
divert=builder.divert(self.flight.divert),
bullseye=builder.bullseye(),
)
def build(self, dump_debug_info: bool = False) -> AirAssaultFlightPlan:
return AirAssaultFlightPlan(self.flight, self.layout()) | null |
5,662 | # copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from shapely.geometry import Polygon
def points2polygon(points):
"""Convert k points to 1 polygon.
Args:
points (ndarray or list): A ndarray or a list of shape (2k)
that indicates k points.
Returns:
polygon (Polygon): A polygon object.
"""
if isinstance(points, list):
points = np.array(points)
assert isinstance(points, np.ndarray)
assert (points.size % 2 == 0) and (points.size >= 8)
point_mat = points.reshape([-1, 2])
return Polygon(point_mat)
def poly_intersection(poly_det, poly_gt, buffer=0.0001):
"""Calculate the intersection area between two polygon.
Args:
poly_det (Polygon): A polygon predicted by detector.
poly_gt (Polygon): A gt polygon.
Returns:
intersection_area (float): The intersection area between two polygons.
"""
assert isinstance(poly_det, Polygon)
assert isinstance(poly_gt, Polygon)
if buffer == 0:
poly_inter = poly_det & poly_gt
else:
poly_inter = poly_det.buffer(buffer) & poly_gt.buffer(buffer)
return poly_inter.area, poly_inter
def poly_union(poly_det, poly_gt):
"""Calculate the union area between two polygon.
Args:
poly_det (Polygon): A polygon predicted by detector.
poly_gt (Polygon): A gt polygon.
Returns:
union_area (float): The union area between two polygons.
"""
assert isinstance(poly_det, Polygon)
assert isinstance(poly_gt, Polygon)
area_det = poly_det.area
area_gt = poly_gt.area
area_inters, _ = poly_intersection(poly_det, poly_gt)
return area_det + area_gt - area_inters
def valid_boundary(x, with_score=True):
num = len(x)
if num < 8:
return False
if num % 2 == 0 and (not with_score):
return True
if num % 2 == 1 and with_score:
return True
return False
def boundary_iou(src, target):
"""Calculate the IOU between two boundaries.
Args:
src (list): Source boundary.
target (list): Target boundary.
Returns:
iou (float): The iou between two boundaries.
"""
assert valid_boundary(src, False)
assert valid_boundary(target, False)
src_poly = points2polygon(src)
target_poly = points2polygon(target)
return poly_iou(src_poly, target_poly)
def poly_iou(poly_det, poly_gt):
"""Calculate the IOU between two polygons.
Args:
poly_det (Polygon): A polygon predicted by detector.
poly_gt (Polygon): A gt polygon.
Returns:
iou (float): The IOU between two polygons.
"""
assert isinstance(poly_det, Polygon)
assert isinstance(poly_gt, Polygon)
area_inters, _ = poly_intersection(poly_det, poly_gt)
area_union = poly_union(poly_det, poly_gt)
if area_union == 0:
return 0.0
return area_inters / area_union
def METHOD_NAME(polygons, threshold):
assert isinstance(polygons, list)
polygons = np.array(sorted(polygons, key=lambda x: x[-1]))
keep_poly = []
index = [i for i in range(polygons.shape[0])]
while len(index) > 0:
keep_poly.append(polygons[index[-1]].tolist())
A = polygons[index[-1]][:-1]
index = np.delete(index, -1)
iou_list = np.zeros((len(index), ))
for i in range(len(index)):
B = polygons[index[i]][:-1]
iou_list[i] = boundary_iou(A, B)
remove_index = np.where(iou_list > threshold)
index = np.delete(index, remove_index)
return keep_poly | null |
5,663 | # Copyright 2018 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import os.path
import azurelinuxagent.common.conf as conf
from azurelinuxagent.common.utils import fileutil
from tests.tools import AgentTestCase, data_dir
class TestConf(AgentTestCase):
# Note:
# -- These values *MUST* match those from data/test_waagent.conf
EXPECTED_CONFIGURATION = {
"Extensions.Enabled": True,
"Provisioning.Agent": "auto",
"Provisioning.DeleteRootPassword": True,
"Provisioning.RegenerateSshHostKeyPair": True,
"Provisioning.SshHostKeyPairType": "rsa",
"Provisioning.MonitorHostName": True,
"Provisioning.DecodeCustomData": False,
"Provisioning.ExecuteCustomData": False,
"Provisioning.PasswordCryptId": '6',
"Provisioning.PasswordCryptSaltLength": 10,
"Provisioning.AllowResetSysUser": False,
"ResourceDisk.Format": True,
"ResourceDisk.Filesystem": "ext4",
"ResourceDisk.MountPoint": "/mnt/resource",
"ResourceDisk.EnableSwap": False,
"ResourceDisk.EnableSwapEncryption": False,
"ResourceDisk.SwapSizeMB": 0,
"ResourceDisk.MountOptions": None,
"Logs.Verbose": False,
"OS.EnableFIPS": True,
"OS.RootDeviceScsiTimeout": '300',
"OS.OpensslPath": '/usr/bin/openssl',
"OS.SshClientAliveInterval": 42,
"OS.SshDir": "/notareal/path",
"HttpProxy.Host": None,
"HttpProxy.Port": None,
"DetectScvmmEnv": False,
"Lib.Dir": "/var/lib/waagent",
"DVD.MountPoint": "/mnt/cdrom/secure",
"Pid.File": "/var/run/waagent.pid",
"Extension.LogDir": "/var/log/azure",
"OS.HomeDir": "/home",
"OS.EnableRDMA": False,
"OS.UpdateRdmaDriver": False,
"OS.CheckRdmaDriver": False,
"AutoUpdate.Enabled": True,
"AutoUpdate.GAFamily": "Prod",
"EnableOverProvisioning": True,
"OS.AllowHTTP": False,
"OS.EnableFirewall": False
}
def setUp(self):
AgentTestCase.setUp(self)
self.conf = conf.ConfigurationProvider()
conf.load_conf_from_file(
os.path.join(data_dir, "test_waagent.conf"),
self.conf)
def test_get_should_return_default_when_key_is_not_found(self):
self.assertEqual("The Default Value", self.conf.get("this-key-does-not-exist", "The Default Value"))
self.assertEqual("The Default Value", self.conf.get("this-key-does-not-exist", lambda: "The Default Value"))
def test_get_switch_should_return_default_when_key_is_not_found(self):
self.assertEqual(True, self.conf.get_switch("this-key-does-not-exist", True))
self.assertEqual(True, self.conf.get_switch("this-key-does-not-exist", lambda: True))
def test_get_int_should_return_default_when_key_is_not_found(self):
self.assertEqual(123456789, self.conf.get_int("this-key-does-not-exist", 123456789))
self.assertEqual(123456789, self.conf.get_int("this-key-does-not-exist", lambda: 123456789))
def test_key_value_handling(self):
self.assertEqual("Value1", self.conf.get("FauxKey1", "Bad"))
self.assertEqual("Value2 Value2", self.conf.get("FauxKey2", "Bad"))
self.assertEqual("delalloc,rw,noatime,nobarrier,users,mode=777", self.conf.get("FauxKey3", "Bad"))
def test_get_ssh_dir(self):
self.assertTrue(conf.get_ssh_dir(self.conf).startswith("/notareal/path"))
def METHOD_NAME(self):
self.assertTrue(conf.get_sshd_conf_file_path(
self.conf).startswith("/notareal/path"))
def test_get_ssh_key_glob(self):
self.assertTrue(conf.get_ssh_key_glob(
self.conf).startswith("/notareal/path"))
def test_get_ssh_key_private_path(self):
self.assertTrue(conf.get_ssh_key_private_path(
self.conf).startswith("/notareal/path"))
def test_get_ssh_key_public_path(self):
self.assertTrue(conf.get_ssh_key_public_path(
self.conf).startswith("/notareal/path"))
def test_get_fips_enabled(self):
self.assertTrue(conf.get_fips_enabled(self.conf))
def test_get_provision_agent(self):
self.assertTrue(conf.get_provisioning_agent(self.conf) == 'auto')
def test_get_configuration(self):
configuration = conf.get_configuration(self.conf)
self.assertTrue(len(configuration.keys()) > 0)
for k in TestConf.EXPECTED_CONFIGURATION.keys():
self.assertEqual(
TestConf.EXPECTED_CONFIGURATION[k],
configuration[k],
k)
def test_get_agent_disabled_file_path(self):
self.assertEqual(conf.get_disable_agent_file_path(self.conf),
os.path.join(self.tmp_dir, conf.DISABLE_AGENT_FILE))
def test_write_agent_disabled(self):
"""
Test writing disable_agent is empty
"""
from azurelinuxagent.pa.provision.default import ProvisionHandler
disable_file_path = conf.get_disable_agent_file_path(self.conf)
self.assertFalse(os.path.exists(disable_file_path))
ProvisionHandler.write_agent_disabled()
self.assertTrue(os.path.exists(disable_file_path))
self.assertEqual('', fileutil.read_file(disable_file_path))
def test_get_extensions_enabled(self):
self.assertTrue(conf.get_extensions_enabled(self.conf)) | null |
5,664 | """MPF clock and main loop."""
import asyncio
import datetime
from typing import Tuple
from serial_asyncio import create_serial_connection
from mpf.core.logging import LogMixin
class PeriodicTask:
"""A periodic asyncio task."""
__slots__ = ["_canceled", "_interval", "_callback", "_loop", "_last_call"]
def __init__(self, interval, loop, callback):
"""Initialise periodic task."""
self._canceled = False
self._interval = interval
self._callback = callback
self._loop = loop
self._last_call = self._loop.time()
self._schedule()
def _schedule(self):
if self._canceled:
return
self._loop.call_at(self._last_call + self._interval, self._run)
def get_next_call_time(self):
"""Return time of next call."""
return self._last_call + self._interval
def _run(self):
self._last_call = self._last_call + self._interval
if self._canceled:
return
self._callback()
self._schedule()
def cancel(self):
"""Cancel periodic task."""
self._canceled = True
class ClockBase(LogMixin):
"""A clock object with event support."""
__slots__ = ["machine", "loop"]
def __init__(self, machine=None, loop=None):
"""Initialise clock."""
super().__init__()
self.machine = machine
# needed since the test clock is setup before the machine
if machine:
self.configure_logging(
'Clock',
self.machine.config['logging']['console']['clock'],
self.machine.config['logging']['file']['clock'])
else:
self.configure_logging('Clock', None, None)
self.debug_log("Starting tickless clock")
if not loop:
self.loop = self.METHOD_NAME() # type: asyncio.AbstractEventLoop
else:
self.loop = loop # type: asyncio.AbstractEventLoop
asyncio.set_event_loop(self.loop)
# pylint: disable-msg=no-self-use
def METHOD_NAME(self):
try:
# pylint: disable-msg=import-outside-toplevel
import uvloop
except ImportError:
pass
else:
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
return asyncio.get_event_loop()
def run(self, stop_future):
"""Run the clock."""
return self.loop.run_until_complete(stop_future)
def get_time(self):
"""Get the last tick made by the clock."""
return self.loop.time()
@staticmethod
def get_datetime():
"""Get current datetime."""
return datetime.datetime.now()
def start_server(self, client_connected_cb, host=None, port=None, **kwd):
"""Start a server."""
return asyncio.start_server(client_connected_cb, host, port, loop=self.loop, **kwd)
def open_connection(self, host=None, port=None, *,
limit=None, **kwds):
"""Open connection using asyncio.
Wrapper for create_connection() returning a (reader, writer) pair.
The reader returned is a StreamReader instance; the writer is a
StreamWriter instance.
The arguments are all the usual arguments to create_connection()
except protocol_factory; most common are positional host and port,
with various optional keyword arguments following.
Additional optional keyword arguments are loop (to set the event loop
instance to use) and limit (to set the buffer limit passed to the
StreamReader).
(If you want to customize the StreamReader and/or
StreamReaderProtocol classes, just copy the code -- there's
really nothing special here except some convenience.)
"""
if not limit:
# pylint: disable-msg=protected-access
limit = asyncio.streams._DEFAULT_LIMIT
return asyncio.open_connection(host=host, port=port, loop=self.loop, limit=limit, **kwds)
async def open_serial_connection(self, limit=None, **kwargs) -> Tuple[asyncio.StreamReader, asyncio.StreamWriter]:
"""Open a serial connection using asyncio.
A wrapper for create_serial_connection() returning a (reader, writer) pair.
The reader returned is a StreamReader instance; the writer is a StreamWriter instance.
The arguments are all the usual arguments to Serial(). Additional
optional keyword arguments are loop (to set the event loop instance
to use) and limit (to set the buffer limit passed to the
StreamReader.
This function is a coroutine.
Args:
----
loop: asyncio loop
limit: line length limit
"""
if not limit:
# pylint: disable-msg=protected-access
limit = asyncio.streams._DEFAULT_LIMIT # type: ignore
reader = asyncio.StreamReader(limit=limit, loop=self.loop)
protocol = asyncio.StreamReaderProtocol(reader, loop=self.loop)
transport, _ = await create_serial_connection(
loop=self.loop,
protocol_factory=lambda: protocol,
**kwargs)
writer = asyncio.StreamWriter(transport, protocol, reader, self.loop)
return reader, writer
def schedule_once(self, callback, timeout=0):
"""Schedule an event in <timeout> seconds.
If <timeout> is unspecified
or 0, the callback will be called after the next frame is rendered.
Args:
----
callback: callback to call on timeout
timeout: seconds to wait
Returns a :class:`ClockEvent` instance.
"""
if not callable(callback):
raise AssertionError('callback must be a callable, got %s' % callback)
event = self.loop.call_later(delay=timeout, callback=callback)
if self._debug_to_console or self._debug_to_file:
self.debug_log("Scheduled a one-time clock callback (callback=%s, timeout=%s)",
str(callback), timeout)
return event
def schedule_interval(self, callback, timeout) -> PeriodicTask:
"""Schedule an event to be called every <timeout> seconds.
Args:
----
callback: callback to call on timeout
timeout: period to wait
Returns a PeriodicTask object.
"""
if not callable(callback):
raise AssertionError('callback must be a callable, got {}'.format(callback))
periodic_task = PeriodicTask(timeout, self.loop, callback)
if self._debug_to_console or self._debug_to_file:
self.debug_log("Scheduled a recurring clock callback (callback=%s, timeout=%s)",
str(callback), timeout)
return periodic_task
@staticmethod
def unschedule(event):
"""Remove a previously scheduled event. Wrapper for cancel for compatibility to kivy clock.
Args:
----
event: Event to cancel
"""
try:
event.cancel()
except Exception: # pylint: disable-msg=broad-except
raise AssertionError("Broken unschedule: {} {}".format(event, type(event))) | null |
5,665 | # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple
import numpy as np
import torch
from torch import Tensor
def METHOD_NAME(shape: Tuple[int, int], sigma: float = 1) -> np.ndarray:
"""Generate gaussian map.
Args:
shape (Tuple[int]): Shape of the map.
sigma (float): Sigma to generate gaussian map.
Defaults to 1.
Returns:
np.ndarray: Generated gaussian map.
"""
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m + 1, -n:n + 1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_heatmap_gaussian(heatmap: Tensor,
center: Tensor,
radius: int,
k: int = 1) -> Tensor:
"""Get gaussian masked heatmap.
Args:
heatmap (Tensor): Heatmap to be masked.
center (Tensor): Center coord of the heatmap.
radius (int): Radius of gaussian.
k (int): Multiple of masked_gaussian. Defaults to 1.
Returns:
Tensor: Masked heatmap.
"""
diameter = 2 * radius + 1
gaussian = METHOD_NAME((diameter, diameter), sigma=diameter / 6)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = torch.from_numpy(
gaussian[radius - top:radius + bottom,
radius - left:radius + right]).to(heatmap.device,
torch.float32)
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0:
torch.max(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def gaussian_radius(det_size: Tuple[Tensor, Tensor],
min_overlap: float = 0.5) -> Tensor:
"""Get radius of gaussian.
Args:
det_size (Tuple[Tensor]): Size of the detection result.
min_overlap (float): Gaussian_overlap. Defaults to 0.5.
Returns:
Tensor: Computed radius.
"""
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = torch.sqrt(b1**2 - 4 * a1 * c1)
r1 = (b1 + sq1) / 2
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = torch.sqrt(b2**2 - 4 * a2 * c2)
r2 = (b2 + sq2) / 2
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = torch.sqrt(b3**2 - 4 * a3 * c3)
r3 = (b3 + sq3) / 2
return min(r1, r2, r3)
def get_ellip_gaussian_2D(heatmap: Tensor,
center: List[int],
radius_x: int,
radius_y: int,
k: int = 1) -> Tensor:
"""Generate 2D ellipse gaussian heatmap.
Args:
heatmap (Tensor): Input heatmap, the gaussian kernel will cover on
it and maintain the max value.
center (List[int]): Coord of gaussian kernel's center.
radius_x (int): X-axis radius of gaussian kernel.
radius_y (int): Y-axis radius of gaussian kernel.
k (int): Coefficient of gaussian kernel. Defaults to 1.
Returns:
out_heatmap (Tensor): Updated heatmap covered by gaussian kernel.
"""
diameter_x, diameter_y = 2 * radius_x + 1, 2 * radius_y + 1
gaussian_kernel = ellip_gaussian2D((radius_x, radius_y),
sigma_x=diameter_x // 6,
sigma_y=diameter_y // 6,
dtype=heatmap.dtype,
device=heatmap.device)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius_x), min(width - x, radius_x + 1)
top, bottom = min(y, radius_y), min(height - y, radius_y + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian_kernel[radius_y - top:radius_y + bottom,
radius_x - left:radius_x + right]
out_heatmap = heatmap
torch.max(
masked_heatmap,
masked_gaussian * k,
out=out_heatmap[y - top:y + bottom, x - left:x + right])
return out_heatmap
def ellip_gaussian2D(radius: Tuple[int, int],
sigma_x: int,
sigma_y: int,
dtype: torch.dtype = torch.float32,
device: str = 'cpu') -> Tensor:
"""Generate 2D ellipse gaussian kernel.
Args:
radius (Tuple[int]): Ellipse radius (radius_x, radius_y) of gaussian
kernel.
sigma_x (int): X-axis sigma of gaussian function.
sigma_y (int): Y-axis sigma of gaussian function.
dtype (torch.dtype): Dtype of gaussian tensor.
Defaults to torch.float32.
device (str): Device of gaussian tensor.
Defaults to 'cpu'.
Returns:
h (Tensor): Gaussian kernel with a
``(2 * radius_y + 1) * (2 * radius_x + 1)`` shape.
"""
x = torch.arange(
-radius[0], radius[0] + 1, dtype=dtype, device=device).view(1, -1)
y = torch.arange(
-radius[1], radius[1] + 1, dtype=dtype, device=device).view(-1, 1)
h = (-(x * x) / (2 * sigma_x * sigma_x) - (y * y) /
(2 * sigma_y * sigma_y)).exp()
h[h < torch.finfo(h.dtype).eps * h.max()] = 0
return h | null |
5,666 | import datetime
import freezegun
import pytest
from kopf._cogs.structs.credentials import ConnectionInfo, LoginError, Vault, VaultKey
async def METHOD_NAME():
vault = Vault()
with pytest.raises(NotImplementedError):
bool(vault)
async def test_empty_at_creation():
vault = Vault()
assert vault.is_empty()
async def test_not_empty_when_populated():
key1 = VaultKey('some-key')
info1 = ConnectionInfo(server='https://expected/')
vault = Vault()
await vault.populate({key1: info1})
assert not vault.is_empty()
async def test_yielding_after_creation(mocker):
vault = Vault()
mocker.patch.object(vault._ready, 'wait_for')
with pytest.raises(LoginError):
async for _, _ in vault:
pass
assert vault._ready.wait_for.await_args_list == [((True,),)]
async def test_yielding_after_population(mocker):
key1 = VaultKey('some-key')
info1 = ConnectionInfo(server='https://expected/')
vault = Vault()
mocker.patch.object(vault._ready, 'wait_for')
await vault.populate({key1: info1})
results = []
async for key, info in vault:
results.append((key, info))
assert len(results) == 1
assert results[0][0] == key1
assert results[0][1] is info1
@freezegun.freeze_time('2020-01-01T00:00:00')
async def test_yielding_items_before_expiration(mocker):
future = datetime.datetime(2020, 1, 1, 0, 0, 0, 1)
key1 = VaultKey('some-key')
info1 = ConnectionInfo(server='https://expected/', expiration=future)
vault = Vault()
mocker.patch.object(vault._ready, 'wait_for')
results = []
await vault.populate({key1: info1})
async for key, info in vault:
results.append((key, info))
assert len(results) == 1
assert results[0][0] == key1
assert results[0][1] is info1
@pytest.mark.parametrize('delta', [0, 1])
@freezegun.freeze_time('2020-01-01T00:00:00')
async def test_yielding_ignores_expired_items(mocker, delta):
future = datetime.datetime(2020, 1, 1, 0, 0, 0, 1)
past = datetime.datetime(2020, 1, 1) - datetime.timedelta(microseconds=delta)
key1 = VaultKey('some-key')
key2 = VaultKey('other-key')
info1 = ConnectionInfo(server='https://expected/', expiration=past)
info2 = ConnectionInfo(server='https://expected/', expiration=future)
vault = Vault()
mocker.patch.object(vault._ready, 'wait_for')
results = []
await vault.populate({key1: info1, key2: info2})
async for key, info in vault:
results.append((key, info))
assert len(results) == 1
assert results[0][0] == key2
assert results[0][1] is info2
@pytest.mark.parametrize('delta', [0, 1])
@freezegun.freeze_time('2020-01-01T00:00:00')
async def test_yielding_when_everything_is_expired(mocker, delta):
past = datetime.datetime(2020, 1, 1) - datetime.timedelta(microseconds=delta)
key1 = VaultKey('some-key')
info1 = ConnectionInfo(server='https://expected/', expiration=past)
vault = Vault()
mocker.patch.object(vault._ready, 'wait_for')
await vault.populate({key1: info1})
with pytest.raises(LoginError):
async for _, _ in vault:
pass
async def test_invalidation_reraises_if_nothing_is_left_with_exception(mocker):
exc = Exception("Sample error.")
key1 = VaultKey('some-key')
info1 = ConnectionInfo(server='https://expected/')
vault = Vault()
mocker.patch.object(vault._ready, 'wait_for')
await vault.populate({key1: info1})
with pytest.raises(Exception) as e:
await vault.invalidate(key1, exc=exc)
assert isinstance(e.value, LoginError)
assert e.value.__cause__ is exc
assert vault._ready.wait_for.await_args_list == [((True,),)]
async def test_invalidation_continues_if_nothing_is_left_without_exception(mocker):
key1 = VaultKey('some-key')
info1 = ConnectionInfo(server='https://expected/')
vault = Vault()
mocker.patch.object(vault._ready, 'wait_for')
await vault.populate({key1: info1})
await vault.invalidate(key1)
assert vault._ready.wait_for.await_args_list == [((True,),)]
async def test_invalidation_continues_if_something_is_left():
exc = Exception("Sample error.")
key1 = VaultKey('key1')
key2 = VaultKey('key2')
info1 = ConnectionInfo(server='https://server1/')
info2 = ConnectionInfo(server='https://server2/')
vault = Vault()
await vault.populate({key1: info1})
await vault.populate({key2: info2})
await vault.invalidate(key1, exc=exc) # no exception!
results = []
async for key, info in vault:
results.append((key, info))
assert len(results) == 1
assert results[0][0] == key2
assert results[0][1] is info2
async def test_yielding_after_invalidation(mocker):
key1 = VaultKey('some-key')
info1 = ConnectionInfo(server='https://expected/')
vault = Vault()
mocker.patch.object(vault._ready, 'wait_for')
await vault.populate({key1: info1})
await vault.invalidate(key1)
with pytest.raises(LoginError):
async for _, _ in vault:
pass
async def test_duplicates_are_remembered(mocker):
key1 = VaultKey('some-key')
info1 = ConnectionInfo(server='https://expected/')
info2 = ConnectionInfo(server='https://expected/') # another instance, same fields
vault = Vault()
mocker.patch.object(vault._ready, 'wait_for')
await vault.populate({key1: info1})
await vault.invalidate(key1)
await vault.populate({key1: info2})
# There should be nothing to yield, despite the second populate() call.
with pytest.raises(LoginError):
async for _, _ in vault:
pass
async def test_caches_from_factory(mocker):
key1 = VaultKey('some-key')
obj1 = object()
info1 = ConnectionInfo(server='https://expected/')
vault = Vault()
await vault.populate({key1: info1})
def factory(_: ConnectionInfo) -> object:
return obj1
factory_spy = mocker.MagicMock(spec=factory, wraps=factory)
results = []
async for key, info, obj in vault.extended(factory_spy):
results.append((key, info, obj))
assert len(results) == 1
assert results[0][0] == key1
assert results[0][1] is info1
assert results[0][2] is obj1
assert factory_spy.called
async def test_caches_with_same_purpose(mocker):
key1 = VaultKey('some-key')
obj1 = object()
info1 = ConnectionInfo(server='https://expected/')
vault = Vault()
await vault.populate({key1: info1})
def factory(_: ConnectionInfo) -> object:
return obj1
factory_spy = mocker.MagicMock(spec=factory, wraps=factory)
async for _, _, _ in vault.extended(factory_spy, purpose='A'):
pass
async for _, _, _ in vault.extended(factory_spy, purpose='A'):
pass
assert factory_spy.call_count == 1 # called only once, not twice!
async def test_caches_with_different_purposes(mocker):
key1 = VaultKey('some-key')
obj1 = object()
info1 = ConnectionInfo(server='https://expected/')
vault = Vault()
await vault.populate({key1: info1})
def factory(_: ConnectionInfo) -> object:
return obj1
factory_spy = mocker.MagicMock(spec=factory, wraps=factory)
async for _, _, _ in vault.extended(factory_spy, purpose='A'):
pass
async for _, _, _ in vault.extended(factory_spy, purpose='B'):
pass
assert factory_spy.call_count == 2 # once per purpose. | null |
5,667 | import os
import re
from collections import OrderedDict
from fnmatch import translate
from typing import Dict
from conans.errors import ConanException
from conans.model.info import load_binary_info
from conans.model.package_ref import PkgReference
from conans.model.recipe_ref import RecipeReference
from conans.paths import CONANINFO
from conans.search.query_parse import evaluate_postfix, infix_to_postfix
from conans.util.files import load
def filter_packages(query, results: Dict[PkgReference, dict]):
if query is None:
return results
try:
if "!" in query:
raise ConanException("'!' character is not allowed")
if "~" in query:
raise ConanException("'~' character is not allowed")
if " not " in query or query.startswith("not "):
raise ConanException("'not' operator is not allowed")
postfix = infix_to_postfix(query) if query else []
result = OrderedDict()
for pref, data in results.items():
if _evaluate_postfix_with_info(postfix, data):
result[pref] = data
return result
except Exception as exc:
raise ConanException("Invalid package query: %s. %s" % (query, exc))
def _evaluate_postfix_with_info(postfix, binary_info):
# Evaluate conaninfo with the expression
def evaluate_info(expression):
"""Receives an expression like compiler.version="12"
Uses conan_vars_info in the closure to evaluate it"""
name, value = expression.split("=", 1)
value = value.replace("\"", "")
return _evaluate(name, value, binary_info)
return evaluate_postfix(postfix, evaluate_info)
def _evaluate(prop_name, prop_value, binary_info):
"""
Evaluates a single prop_name, prop_value like "os", "Windows" against
conan_vars_info.serialize_min()
"""
def compatible_prop(setting_value, _prop_value):
return (_prop_value == setting_value) or (_prop_value == "None" and setting_value is None)
# TODO: Necessary to generalize this query evaluation to include all possible fields
info_settings = binary_info.get("settings", {})
info_options = binary_info.get("options", {})
if not prop_name.startswith("options."):
return compatible_prop(info_settings.get(prop_name), prop_value)
else:
prop_name = prop_name[len("options."):]
return compatible_prop(info_options.get(prop_name), prop_value)
def search_recipes(cache, pattern=None, ignorecase=True):
# Conan references in main storage
if pattern:
if isinstance(pattern, RecipeReference):
pattern = repr(pattern)
pattern = translate(pattern)
pattern = re.compile(pattern, re.IGNORECASE) if ignorecase else re.compile(pattern)
refs = cache.all_refs()
if pattern:
_refs = []
for r in refs:
match_ref = str(r) if not r.revision else repr(r)
if METHOD_NAME(pattern, match_ref):
_refs.append(r)
refs = _refs
return refs
def METHOD_NAME(pattern, reference):
"""
Finds if pattern matches any of partial sums of tokens of conan reference
"""
tokens = reference.replace('/', ' / ').replace('@', ' @ ').replace('#', ' # ').split()
def partial_sums(iterable):
partial = ''
for i in iterable:
partial += i
yield partial
return any(map(pattern.match, list(partial_sums(tokens))))
def get_cache_packages_binary_info(cache, prefs) -> Dict[PkgReference, dict]:
"""
param package_layout: Layout for the given reference
"""
result = OrderedDict()
package_layouts = []
for pref in prefs:
latest_prev = cache.get_latest_package_reference(pref)
package_layouts.append(cache.pkg_layout(latest_prev))
for pkg_layout in package_layouts:
# Read conaninfo
info_path = os.path.join(pkg_layout.package(), CONANINFO)
if not os.path.exists(info_path):
raise ConanException(f"Corrupted package '{pkg_layout.reference}' "
f"without conaninfo.txt in: {info_path}")
conan_info_content = load(info_path)
info = load_binary_info(conan_info_content)
pref = pkg_layout.reference
# The key shoudln't have the latest package revision, we are asking for package configs
pref.revision = None
result[pkg_layout.reference] = info
return result | null |
5,668 | import logging
import os
import re
from pathlib import Path
import questionary
import rich.prompt
import nf_core.utils
log = logging.getLogger(__name__)
def get_repo_info(directory, use_prompt=True):
"""
Determine whether this is a pipeline repository or a clone of
nf-core/modules
"""
# Verify that the pipeline dir exists
if directory is None or not Path(directory).is_dir():
raise UserWarning(f"Could not find directory: {directory}")
# Try to find the root directory
base_dir = nf_core.utils.determine_base_dir(directory)
# Figure out the repository type from the .nf-core.yml config file if we can
config_fn, tools_config = nf_core.utils.load_tools_config(base_dir)
repo_type = tools_config.get("repository_type", None)
# If not set, prompt the user
if not repo_type and use_prompt:
log.warning("'repository_type' not defined in %s", config_fn.name)
repo_type = questionary.select(
"Is this repository an nf-core pipeline or a fork of nf-core/modules?",
choices=[
{"name": "Pipeline", "value": "pipeline"},
{"name": "nf-core/modules", "value": "modules"},
],
style=nf_core.utils.nfcore_question_style,
).unsafe_ask()
# Save the choice in the config file
log.info(f"To avoid this prompt in the future, add the 'repository_type' key to your {config_fn.name} file.")
if rich.prompt.Confirm.ask("[bold][blue]?[/] Would you like me to add this config now?", default=True):
with open(config_fn, "a+") as fh:
fh.write(f"repository_type: {repo_type}\n")
log.info(f"Config added to '{config_fn.name}'")
# Not set and not allowed to ask
elif not repo_type:
raise UserWarning("Repository type could not be established")
# Check if it's a valid answer
if not repo_type in ["pipeline", "modules"]:
raise UserWarning(f"Invalid repository type: '{repo_type}'")
# Check for org if modules repo
org = None
if repo_type == "pipeline":
org = ""
elif repo_type == "modules":
org = tools_config.get("org_path", None)
if org is None:
log.warning("Organisation path not defined in %s [key: org_path]", config_fn.name)
org = questionary.text(
"What is the organisation path under which modules and subworkflows are stored?",
default="nf-core",
style=nf_core.utils.nfcore_question_style,
).unsafe_ask()
log.info("To avoid this prompt in the future, add the 'org_path' key to a root '%s' file.", config_fn.name)
if rich.prompt.Confirm.ask("[bold][blue]?[/] Would you like me to add this config now?", default=True):
with open(config_fn, "a+") as fh:
fh.write(f"org_path: {org}\n")
log.info(f"Config added to '{config_fn.name}'")
if not org:
raise UserWarning("Organisation path could not be established")
# It was set on the command line, return what we were given
return [base_dir, repo_type, org]
def METHOD_NAME(component_name, component_type, modules_repo, installed_sha=None):
"""
Creates an interactive questionary prompt for selecting the module/subworkflow version
Args:
component_name (str): Module/subworkflow name,
component_type (str): "modules" or "subworkflows",
modules_repo (ModulesRepo): Modules repo the module/subworkflow originate in
installed_sha (str): Optional extra argument to highlight the current installed version
Returns:
git_sha (str): The selected version of the module/subworkflow
"""
older_commits_choice = questionary.Choice(
title=[("fg:ansiyellow", "older commits"), ("class:choice-default", "")], value=""
)
git_sha = ""
page_nbr = 1
all_commits = modules_repo.get_component_git_log(component_name, component_type)
next_page_commits = [next(all_commits, None) for _ in range(10)]
next_page_commits = [commit for commit in next_page_commits if commit is not None]
while git_sha == "":
commits = next_page_commits
next_page_commits = [next(all_commits, None) for _ in range(10)]
next_page_commits = [commit for commit in next_page_commits if commit is not None]
if all(commit is None for commit in next_page_commits):
next_page_commits = None
choices = []
for title, sha in map(lambda commit: (commit["trunc_message"], commit["git_sha"]), commits):
display_color = "fg:ansiblue" if sha != installed_sha else "fg:ansired"
message = f"{title} {sha}"
if installed_sha == sha:
message += " (installed version)"
commit_display = [(display_color, message), ("class:choice-default", "")]
choices.append(questionary.Choice(title=commit_display, value=sha))
if next_page_commits is not None:
choices += [older_commits_choice]
git_sha = questionary.select(
f"Select '{component_name}' commit:", choices=choices, style=nf_core.utils.nfcore_question_style
).unsafe_ask()
page_nbr += 1
return git_sha
def get_components_to_install(subworkflow_dir):
"""
Parse the subworkflow test main.nf file to retrieve all imported modules and subworkflows.
"""
modules = []
subworkflows = []
with open(Path(subworkflow_dir, "main.nf"), "r") as fh:
for line in fh:
regex = re.compile(
r"include(?: *{ *)([a-zA-Z\_0-9]*)(?: *as *)?(?:[a-zA-Z\_0-9]*)?(?: *})(?: *from *)(?:'|\")(.*)(?:'|\")"
)
match = regex.match(line)
if match and len(match.groups()) == 2:
name, link = match.groups()
if link.startswith("../../../"):
name_split = name.lower().split("_")
modules.append("/".join(name_split))
elif link.startswith("../"):
subworkflows.append(name.lower())
return modules, subworkflows | null |
5,669 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for converting prediction_log to example."""
from typing import Any, List, Tuple, Union
import numpy as np
import tensorflow as tf
from tfx.proto import bulk_inferrer_pb2
from tensorflow_serving.apis import classification_pb2
from tensorflow_serving.apis import prediction_log_pb2
_FeatureListType = List[Tuple[str, List[Union[str, bytes, float]]]]
# Typehint Any is for compatibility reason.
_OutputExampleSpecType = Union[bulk_inferrer_pb2.OutputExampleSpec, Any]
_PredictOutputType = Union[bulk_inferrer_pb2.PredictOutput, Any]
_ClassifyOutputType = Union[bulk_inferrer_pb2.ClassifyOutput, Any]
def convert(prediction_log: prediction_log_pb2.PredictionLog,
output_example_spec: _OutputExampleSpecType) -> tf.train.Example:
"""Converts given `prediction_log` to a `tf.train.Example`.
Args:
prediction_log: The input prediction log.
output_example_spec: The spec for how to map prediction results to columns
in example.
Returns:
A `tf.train.Example` converted from the given prediction_log.
Raises:
ValueError: If the inference type or signature name in spec does not match
that in prediction_log.
"""
specs = output_example_spec.output_columns_spec
if prediction_log.HasField('multi_inference_log'):
example, output_features = _parse_multi_inference_log(
prediction_log.multi_inference_log, output_example_spec)
else:
if len(specs) != 1:
raise ValueError('Got single inference result, so expect single spec in '
'output_example_spec: %s' % output_example_spec)
if prediction_log.HasField('regress_log'):
if not specs[0].HasField('regress_output'):
raise ValueError(
'Regression predictions require a regress_output in output_example_spec: %s'
% output_example_spec)
example = tf.train.Example()
example.CopyFrom(
prediction_log.regress_log.request.input.example_list.examples[0])
output_features = [
(specs[0].regress_output.value_column,
[prediction_log.regress_log.response.result.regressions[0].value])
]
elif prediction_log.HasField('classify_log'):
if not specs[0].HasField('classify_output'):
raise ValueError(
'Classification predictions require a classify_output in output_example_spec: %s'
% output_example_spec)
example, output_features = _parse_classify_log(
prediction_log.classify_log, specs[0].classify_output)
elif prediction_log.HasField('predict_log'):
if not specs[0].HasField('predict_output'):
raise ValueError(
'Predict predictions require a predict_output in output_example_spec: %s'
% output_example_spec)
example, output_features = _parse_predict_log(prediction_log.predict_log,
specs[0].predict_output)
else:
raise ValueError('Unsupported prediction type in prediction_log: %s' %
prediction_log)
return _add_columns(example, output_features)
def _parse_multi_inference_log(
multi_inference_log: prediction_log_pb2.MultiInferenceLog,
output_example_spec: _OutputExampleSpecType) -> tf.train.Example:
"""Parses MultiInferenceLog."""
spec_map = {
spec.signature_name or tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
spec for spec in output_example_spec.output_columns_spec
}
example = tf.train.Example()
example.CopyFrom(multi_inference_log.request.input.example_list.examples[0])
output_features = []
for result in multi_inference_log.response.results:
spec = spec_map[result.model_spec.signature_name]
if result.HasField('classification_result'):
output_features += METHOD_NAME(
result.classification_result, spec.classify_output)
elif result.HasField('regression_result'):
output_features.append((spec.regress_output.value_column,
[result.regression_result.regressions[0].value]))
else:
raise ValueError('Unsupported multi_inferrence_log: %s' %
multi_inference_log)
return example, output_features
def _parse_classify_log(
classify_log: prediction_log_pb2.ClassifyLog,
classify_output_spec: _ClassifyOutputType
) -> Tuple[tf.train.Example, _FeatureListType]:
"""Parses ClassiyLog."""
example = tf.train.Example()
example.CopyFrom(classify_log.request.input.example_list.examples[0])
return example, METHOD_NAME(classify_log.response.result,
classify_output_spec)
def METHOD_NAME(
classification_result: classification_pb2.ClassificationResult,
classify_output_spec: _ClassifyOutputType) -> _FeatureListType:
"""Parses ClassificationResult."""
output_features = []
classes = classification_result.classifications[0].classes
if classify_output_spec.label_column:
output_features.append(
(classify_output_spec.label_column, [c.label for c in classes]))
if classify_output_spec.score_column:
output_features.append(
(classify_output_spec.score_column, [c.score for c in classes]))
return output_features
def _parse_predict_log(
predict_log: prediction_log_pb2.PredictLog,
predict_output_spec: _PredictOutputType
) -> Tuple[tf.train.Example, _FeatureListType]:
"""Parses PredictLog."""
_, input_tensor_proto = next(iter(predict_log.request.inputs.items()))
example = tf.train.Example.FromString(input_tensor_proto.string_val[0])
outputs = predict_log.response.outputs
output_features = []
for col in predict_output_spec.output_columns:
output_tensor_proto = outputs.get(col.output_key)
output_values = np.squeeze(tf.make_ndarray(output_tensor_proto))
if output_values.ndim > 1:
raise ValueError(
'All output values must be convertible to 1D arrays, but %s was '
'not. value was %s.' % (col.output_key, output_values))
if output_values.ndim == 1:
# Convert the output_values to a list.
output_values = output_values.tolist()
else: # output_values.ndim == 0
# Get a scalar for output_values.
output_values = [output_values.item()]
output_features.append((col.output_column, output_values))
return example, output_features
def _add_columns(example: tf.train.Example,
features: _FeatureListType) -> tf.train.Example:
"""Add given features to `example`."""
feature_map = example.features.feature
for col, value in features:
assert col not in feature_map, ('column name %s already exists in example: '
'%s') % (col, example)
# Note: we only consider three types, bytes, int64 and float for now.
if isinstance(value[0], (str, bytes)):
if isinstance(value[0], str):
bytes_value = [v.encode('utf-8') for v in value]
else:
bytes_value = value
feature_map[col].bytes_list.value[:] = bytes_value
elif isinstance(value[0], int):
feature_map[col].int64_list.value[:] = value
else:
feature_map[col].float_list.value[:] = value
return example | null |
5,670 | import functools
import inspect
from typing import Any, Callable, List, Optional, Tuple
from warnings import catch_warnings, simplefilter
from warnings import warn as _warn
def deprecated(_type, old, new, version, reason=None, stack_level=3):
"""a convenience function for deprecating classes, functions, arguments.
Parameters
----------
_type
should be one of class, method, function, argument
old, new
the old and new names
version
the version by which support for the old name will be
discontinued
reason
why, and what choices users have
stack_level
as per warnings.warn
"""
msg = f"{_type} {old} which will be removed in version {version}, use {new} instead"
if reason is not None:
msg = f"{msg}\nreason={reason!r}"
with catch_warnings():
simplefilter("always")
_warn(msg, DeprecationWarning, stacklevel=stack_level)
def METHOD_NAME(_type, old, version, reason=None, stack_level=3):
"""convenience func to warn about discontinued attributes
Parameters
----------
_type
should be one of class, method, function, argument
old
the attributes name
version
the version by which support for the old name will be
discontinued
reason
why, and what choices users have
stack_level
as per warnings.warn
"""
msg = f"{_type} {old} is discontinued and will be removed in version {version}"
if reason is not None:
msg = f"{msg}\nreason={reason!r}"
with catch_warnings():
simplefilter("always")
_warn(msg, DeprecationWarning, stacklevel=stack_level)
_discontinued = METHOD_NAME # renamed to avoid name clash with discontinued argument in deprecated args decorator
def deprecated_args(
version: str,
reason: str,
old_new: List[Tuple[str, str]] = None,
METHOD_NAME: List[str] = None,
) -> Callable[..., Any]:
"""
A decorator that marks specific arguments of a function as deprecated.
The decorator accepts a list of 2-tuples specifying the mapping of old
argument names to new argument names. When the decorated function is
called with any of the old argument names, they will be replaced with their
corresponding new names in the kwargs dictionary.
Parameters
----------
version : str
The version when the old arguments will be removed in calver
format, e.g. 'YYYY.MM'
reason : str
Reason for deprecation or guidance on what to do
old-new : List[Tuple[str, str]]
A list of deprecated old and replacement new argument names.
discontinued : List[str]
Names of single or multiple arguments to be discontinued. This should
only be applied to arguments that have no effect.
Returns
-------
Callable[..., Any]
The decorated function.
Warnings
--------
DeprecationWarning
A warning will be raised when the decorated function is called for
each deprecated argument used in the calling function.
Examples
--------
To use, change the signature of the function / method by removing the
deprecated / discontinued arguments. Apply the decorator to the function,
indicating the old and new the argument names.
>>> @deprecated_args('2024.1',
... 'Use new_name instead',
... old_new=[('old_arg', 'new_arg')],
... discontinued='discontinued_arg',
... )
>>> def my_function(new_name):
>>> # do something here
When `my_function` is called with the argument `old_arg`, a warning
will be raised indicating that the argument is deprecated and should
be replaced with `new_arg`, and `discontinued_arg` is to be
discontinued.
"""
METHOD_NAME = [METHOD_NAME] if isinstance(METHOD_NAME, str) else METHOD_NAME
old_args = dict(old_new).keys() if old_new else set()
def decorator(func: Callable[..., Any]) -> Callable[..., Any]:
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Callable[..., Any]:
if old_args & kwargs.keys():
for old, new in old_new:
if old in kwargs:
kwargs[new] = kwargs.pop(old)
deprecated("argument", old, new, version, reason)
if METHOD_NAME:
for dropped in METHOD_NAME:
if dropped in kwargs:
_discontinued("argument", dropped, version, reason)
return func(*args, **kwargs)
return wrapper
return decorator
def deprecated_callable(
version: str,
reason: str,
new: Optional[str] = None,
is_discontinued: bool = False,
stack_level=2,
) -> Callable:
"""
A decorator that marks callables (function or method) as deprecated or discontinued..
Parameters
----------
version : str
The version when it will be removed in calver format, e.g. 'YYYY.MM'
reason : str
Reason for deprecation or guidance on what to do
new : str
If the callable is being replaced, this is the replacement, e.g. 'ClassName.new_method()'
is_discontinued : bool
If True the callable is being discontinued.
stack_level
as per warnings.warn
Returns
-------
Callable
The decorated callable.
Warnings
--------
DeprecationWarning
A warning will be raised when the decorated function is called.
Examples
--------
Here's an example of how to use the `deprecated_callable` decorator to mark the function `my_function` as deprecated
in favour of a new function.
>>> @deprecated_callable(version='2023.6', reason='function rename', new='a_function')
>>> def my_function(arg): pass
"""
def decorator(func: Callable) -> Callable:
sig = set(inspect.signature(func).parameters)
_type = "method" if sig & {"self", "cls", "klass"} else "function"
old = func.__name__
params = dict(
_type=_type,
old=old,
version=version,
reason=reason,
stack_level=stack_level,
)
if is_discontinued:
depr_func = METHOD_NAME
else:
params["new"] = new
depr_func = deprecated
@functools.wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Callable:
depr_func(**params)
return func(*args, **kwargs)
return wrapper
return decorator | null |
5,671 | import numpy as np
from rlberry.utils.jit_setup import numba_jit
from rlberry.exploration_tools.uncertainty_estimator import UncertaintyEstimator
from rlberry.exploration_tools.typing import preprocess_args
from gymnasium.spaces import Box, Discrete
from rlberry.utils.metrics import metric_lp
import rlberry
logger = rlberry.logger
@numba_jit
def map_to_representative(
state,
lp_metric,
representative_states,
n_representatives,
min_dist,
scaling,
accept_new_repr,
):
"""
Map state to representative state.
"""
dist_to_closest = np.inf
argmin = -1
for ii in range(n_representatives):
dist = metric_lp(state, representative_states[ii, :], lp_metric, scaling)
if dist < dist_to_closest:
dist_to_closest = dist
argmin = ii
max_representatives = representative_states.shape[0]
if (
dist_to_closest > min_dist
and n_representatives < max_representatives
and accept_new_repr
):
new_index = n_representatives
representative_states[new_index, :] = state
return new_index, 0.0
return argmin, dist_to_closest
class OnlineDiscretizationCounter(UncertaintyEstimator):
"""
Note: currently, only implemented for continuous (Box) states and
discrete actions.
Parameters
----------
observation_space : spaces.Box
action_space : spaces.Discrete
lp_metric: int
The metric on the state space is the one induced by the p-norm,
where p = lp_metric. Default = 2, for the Euclidean metric.
scaling: numpy.ndarray
Must have the same size as state array, used to scale the states
before computing the metric.
If None, set to:
- (env.observation_space.high - env.observation_space.low) if high
and low are bounded
- np.ones(env.observation_space.shape[0]) if high or low are
unbounded
min_dist: double
Minimum distance between two representative states
max_repr: int
Maximum number of representative states.
If None, it is set to (sqrt(d)/min_dist)**d, where d
is the dimension of the state space
rate_power : float
returns bonuses in n^power.
"""
def __init__(
self,
observation_space,
action_space,
lp_metric=2,
min_dist=0.1,
max_repr=1000,
scaling=None,
rate_power=1,
**kwargs
):
UncertaintyEstimator.__init__(self, observation_space, action_space)
assert isinstance(action_space, Discrete)
assert isinstance(observation_space, Box)
self.lp_metric = lp_metric
self.min_dist = min_dist
self.max_repr = max_repr
self.state_dim = self.observation_space.shape[0]
self.n_actions = self.action_space.n
self.rate_power = rate_power
# compute scaling, if it is None
if scaling is None:
# if high and low are bounded
if self.observation_space.is_bounded():
scaling = self.observation_space.high - self.observation_space.low
# if high or low are unbounded
else:
scaling = np.ones(self.state_dim)
else:
assert scaling.ndim == 1
assert scaling.shape[0] == self.state_dim
self.scaling = scaling
# initialize
self.n_representatives = None
self.representative_states = None
self.N_sa = None
self.METHOD_NAME()
def METHOD_NAME(self):
self.n_representatives = 0
self.representative_states = np.zeros((self.max_repr, self.state_dim))
self.N_sa = np.zeros((self.max_repr, self.n_actions))
self._overflow_warning = False
def _get_representative_state(self, state, accept_new_repr=True):
state_idx, dist_to_closest = map_to_representative(
state,
self.lp_metric,
self.representative_states,
self.n_representatives,
self.min_dist,
self.scaling,
accept_new_repr,
)
# check if new representative state
if state_idx == self.n_representatives:
self.n_representatives += 1
if self.n_representatives >= self.max_repr and (not self._overflow_warning):
logger.warning(
"OnlineDiscretizationCounter reached \
the maximum number of representative states."
)
self._overflow_warning = True
return state_idx, dist_to_closest
@preprocess_args(expected_type="numpy")
def update(self, state, action, next_state=None, reward=None, **kwargs):
state_idx, _ = self._get_representative_state(state)
self.N_sa[state_idx, action] += 1
@preprocess_args(expected_type="numpy")
def measure(self, state, action, **kwargs):
n = np.maximum(1.0, self.count(state, action))
return np.power(1 / n, self.rate_power)
def count(self, state, action):
state_idx, dist_to_closest = self._get_representative_state(
state, accept_new_repr=False
)
# if state is too far from the closest representative,
# its count is zero.
if dist_to_closest > self.min_dist:
return 0.0
return self.N_sa[state_idx, action]
def get_n_visited_states(self):
"""
Returns the number of different states sent to the .update() function.
For continuous state spaces, counts the number of different discretized states.
"""
n_visited_states = (self.N_sa.sum(axis=1) > 0).sum()
return n_visited_states
def get_entropy(self):
"""
Returns the entropy of the empirical distribution over states, induced by the state counts.
Uses log2.
"""
visited = self.N_sa.sum(axis=1) > 0
if visited.sum() == 0.0:
return 0.0
# number of visits of visited states only
n_visits = self.N_sa[visited, :].sum(axis=1)
# empirical distribution
dist = n_visits / n_visits.sum()
entropy = (-dist * np.log2(dist)).sum()
return entropy | null |
5,672 | import os
from libensemble.resources import node_resources
from libensemble.resources.env_resources import EnvResources
def setup_standalone_run():
os.environ["LIBE_RESOURCES_TEST_NODE_LIST"] = ""
def teardown_standalone_run():
os.environ["LIBE_RESOURCES_TEST_NODE_LIST"] = ""
def setup_function(function):
print(f"setup_function function:{function.__name__}")
os.environ["LIBE_RESOURCES_TEST_NODE_LIST"] = ""
def teardown_function(function):
print(f"teardown_function function:{function.__name__}")
os.environ["LIBE_RESOURCES_TEST_NODE_LIST"] = ""
# Tests ========================================================================================
def test_get_cpu_resources_from_env_empty():
# Test empty call
cores_info = node_resources._get_cpu_resources_from_env()
assert cores_info is None, "cores_info should be None"
def test_get_cpu_resources_from_env_lsf():
os.environ["LIBE_RESOURCES_TEST_NODE_LIST"] = "batch5" + " g06n02" * 42
exp_out = (42, 42)
env_resources1 = EnvResources(
nodelist_env_slurm="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
nodelist_env_cobalt="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
nodelist_env_lsf="LIBE_RESOURCES_TEST_NODE_LIST",
nodelist_env_lsf_shortform="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
)
cores_info = node_resources._get_cpu_resources_from_env(env_resources=env_resources1)
assert cores_info == exp_out, "cores_info returned does not match expected"
os.environ["LIBE_RESOURCES_TEST_NODE_LIST"] = "batch5" + " g06n02" * 42 + " h21n18" * 42
env_resources2 = EnvResources(
nodelist_env_slurm="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
nodelist_env_cobalt="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
nodelist_env_lsf="LIBE_RESOURCES_TEST_NODE_LIST",
nodelist_env_lsf_shortform="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
)
cores_info = node_resources._get_cpu_resources_from_env(env_resources=env_resources2)
assert cores_info == exp_out, "cores_info returned does not match expected"
def test_get_cpu_resources_from_env_lsf_shortform():
os.environ["LIBE_RESOURCES_TEST_NODE_LIST"] = "batch5 1 g06n02 42"
exp_out = (42, 42)
env_resources1 = EnvResources(
nodelist_env_slurm="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
nodelist_env_cobalt="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
nodelist_env_lsf="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
nodelist_env_lsf_shortform="LIBE_RESOURCES_TEST_NODE_LIST",
)
cores_info = node_resources._get_cpu_resources_from_env(env_resources=env_resources1)
assert cores_info == exp_out, "cores_info returned does not match expected"
os.environ["LIBE_RESOURCES_TEST_NODE_LIST"] = "batch5 1 g06n02 42 h21n18 42"
env_resources2 = EnvResources(
nodelist_env_slurm="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
nodelist_env_cobalt="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
nodelist_env_lsf="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
nodelist_env_lsf_shortform="LIBE_RESOURCES_TEST_NODE_LIST",
)
cores_info = node_resources._get_cpu_resources_from_env(env_resources=env_resources2)
assert cores_info == exp_out, "cores_info returned does not match expected"
def test_get_cpu_resources_from_env_unknown_env():
os.environ["LIBE_RESOURCES_TEST_NODE_LIST"] = "knl-[0009-0012]"
env_resources = EnvResources(
nodelist_env_slurm="LIBE_RESOURCES_TEST_NODE_LIST",
nodelist_env_cobalt="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
nodelist_env_lsf="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
nodelist_env_lsf_shortform="THIS_ENV_VARIABLE_IS_DEF_NOT_SET",
)
cores_info = node_resources._get_cpu_resources_from_env(env_resources=env_resources)
assert cores_info is None, "cores_info should be None"
def METHOD_NAME():
assert not node_resources._complete_set([None, None, None])
assert not node_resources._complete_set([2, None, 5])
assert not node_resources._complete_set([2, 8, None])
assert node_resources._complete_set([2, 4, 6])
assert node_resources._complete_set([2, 0, 5])
def test_cpu_info_complete():
assert not node_resources._cpu_info_complete([None, None, None])
assert not node_resources._cpu_info_complete([2, None, 5])
assert node_resources._cpu_info_complete([2, 8, None])
assert node_resources._cpu_info_complete([2, 4, 6])
def test_gpu_info_complete():
assert not node_resources._gpu_info_complete([None, None, None])
assert node_resources._gpu_info_complete([2, None, 5])
assert not node_resources._gpu_info_complete([2, 8, None])
assert node_resources._gpu_info_complete([2, 4, 6])
def test_update_values():
result = node_resources._update_values([None, 2, 3], [11, 12, 13])
assert result == [11, 12, 3], f"Unexpected result {result}"
result = node_resources._update_values([1, 2, None], [11, 12, 13])
assert result == [1, 2, 13], f"Unexpected result {result}"
def test_update_from_str():
result = node_resources._update_from_str([None, 2, 3], "11 12 13")
assert result == [11, 12, 3], f"Unexpected result {result}"
result = node_resources._update_from_str([1, 2, None], "11 12 13")
assert result == [1, 2, 13], f"Unexpected result {result}"
if __name__ == "__main__":
setup_standalone_run()
test_get_cpu_resources_from_env_empty()
test_get_cpu_resources_from_env_lsf()
test_get_cpu_resources_from_env_lsf_shortform()
test_get_cpu_resources_from_env_unknown_env()
METHOD_NAME()
test_cpu_info_complete()
test_gpu_info_complete()
test_update_values()
test_update_from_str()
teardown_standalone_run() | null |
5,673 | import asyncio
import pytest
import zigpy.device
import zigpy.types as t
import zigpy.zdo as zdo
import zigpy.zdo.types as zdo_types
from .async_mock import AsyncMock, MagicMock, patch, sentinel
def test_commands():
for cmdid, cmdspec in zdo.types.CLUSTERS.items():
assert 0 <= cmdid <= 0xFFFF
assert isinstance(cmdspec, tuple)
for paramname, paramtype in zip(cmdspec[0], cmdspec[1]):
assert isinstance(paramname, str)
assert hasattr(paramtype, "serialize")
assert hasattr(paramtype, "deserialize")
@pytest.fixture
def METHOD_NAME(app):
ieee = t.EUI64(map(t.uint8_t, [0, 1, 2, 3, 4, 5, 6, 7]))
dev = zigpy.device.Device(app, ieee, 65535)
dev.request = AsyncMock()
app.devices[dev.ieee] = dev
return zdo.ZDO(dev)
def test_deserialize(METHOD_NAME):
hdr, args = METHOD_NAME.deserialize(2, b"\x01\x02\x03xx")
assert hdr.tsn == 1
assert hdr.is_reply is False
assert args == [0x0302]
def test_deserialize_unknown(METHOD_NAME):
with pytest.raises(ValueError):
hdr, args = METHOD_NAME.deserialize(0x0100, b"\x01")
async def test_request(METHOD_NAME):
await METHOD_NAME.request(2, 65535)
app = METHOD_NAME._device._application
assert METHOD_NAME.device.request.call_count == 1
assert app.get_sequence.call_count == 1
async def test_bind(METHOD_NAME):
cluster = MagicMock()
cluster.endpoint.endpoint_id = 1
cluster.cluster_id = 1026
await METHOD_NAME.bind(cluster)
assert METHOD_NAME.device.request.call_count == 1
assert METHOD_NAME.device.request.call_args[0][1] == 0x0021
async def test_unbind(METHOD_NAME):
cluster = MagicMock()
cluster.endpoint.endpoint_id = 1
cluster.cluster_id = 1026
await METHOD_NAME.unbind(cluster)
assert METHOD_NAME.device.request.call_count == 1
assert METHOD_NAME.device.request.call_args[0][1] == 0x0022
@pytest.mark.parametrize(
"remove_children, rejoin, flags",
(
(False, False, 0),
(False, True, 0x80),
(True, False, 0x40),
(True, True, 0xC0),
),
)
async def test_leave(METHOD_NAME, remove_children, rejoin, flags):
"""Test ZDO leave request options."""
with patch.object(METHOD_NAME, "request", AsyncMock()) as req_mock:
await METHOD_NAME.leave(remove_children, rejoin)
assert req_mock.await_count == 1
assert req_mock.await_args[0][0] == 0x0034
assert req_mock.await_args[0][1] == t.EUI64.convert("07:06:05:04:03:02:01:00")
assert req_mock.await_args[0][2] == flags
async def test_permit(METHOD_NAME):
await METHOD_NAME.permit()
assert METHOD_NAME.device.request.call_count == 1
assert METHOD_NAME.device.request.call_args[0][1] == 0x0036
async def test_broadcast(app):
await zigpy.zdo.broadcast(app, 0x0036, 0, 0, 60, 0)
assert app.send_packet.call_count == 1
packet = app.send_packet.mock_calls[0].args[0]
assert packet.dst.addr_mode == t.AddrMode.Broadcast
assert packet.cluster_id == 0x0036
def _handle_match_desc(METHOD_NAME, profile):
METHOD_NAME.reply = AsyncMock()
hdr = MagicMock()
hdr.command_id = zdo_types.ZDOCmd.Match_Desc_req
METHOD_NAME.handle_message(5, 0x0006, hdr, [None, profile, [], []])
assert METHOD_NAME.reply.call_count == 1
async def test_handle_match_desc_zha(METHOD_NAME):
_handle_match_desc(METHOD_NAME, 260)
await asyncio.wait(asyncio.all_tasks(), return_when=asyncio.FIRST_COMPLETED)
assert METHOD_NAME.reply.await_count == 1
assert METHOD_NAME.reply.call_args[0][3]
async def test_handle_match_desc_generic(METHOD_NAME):
_handle_match_desc(METHOD_NAME, 0)
await asyncio.wait(asyncio.all_tasks(), return_when=asyncio.FIRST_COMPLETED)
assert METHOD_NAME.reply.await_count == 1
assert not METHOD_NAME.reply.call_args[0][3]
async def test_handle_nwk_addr(METHOD_NAME):
ieee = METHOD_NAME._device.application.state.node_info.ieee
METHOD_NAME.reply = MagicMock()
hdr = MagicMock()
hdr.command_id = zdo_types.ZDOCmd.NWK_addr_req
METHOD_NAME.handle_message(5, 0x0000, hdr, [ieee, 0x00])
assert METHOD_NAME.reply.call_count == 1
async def test_handle_ieee_addr(METHOD_NAME):
nwk = METHOD_NAME._device.application.state.node_info.nwk
METHOD_NAME.reply = MagicMock()
hdr = MagicMock()
hdr.command_id = zdo_types.ZDOCmd.IEEE_addr_req
METHOD_NAME.handle_message(5, 0x0001, hdr, [nwk, 0x00])
assert METHOD_NAME.reply.call_count == 1
def test_handle_announce(METHOD_NAME):
dev = METHOD_NAME._device
listener = MagicMock()
METHOD_NAME.add_listener(listener)
dev._application.devices.pop(dev.ieee)
hdr = MagicMock()
hdr.command_id = zdo_types.ZDOCmd.Device_annce
METHOD_NAME.handle_message(
5, 0x0013, hdr, [dev.nwk, dev.ieee, 0], dst_addressing=sentinel.dst_addr
)
assert listener.device_announce.call_count == 1
assert listener.device_announce.call_args[0][0] is dev
assert listener.zdo_device_annce.call_count == 1
assert listener.zdo_device_annce.call_args[0][0] is dev
assert listener.zdo_device_annce.call_args[0][1] is sentinel.dst_addr
assert listener.zdo_device_annce.call_args[0][2] is hdr
assert listener.zdo_device_annce.call_args[0][3] == [dev.nwk, dev.ieee, 0]
def test_handle_permit_join(METHOD_NAME):
listener = MagicMock()
METHOD_NAME.add_listener(listener)
hdr = MagicMock()
hdr.command_id = zdo_types.ZDOCmd.Mgmt_Permit_Joining_req
METHOD_NAME.handle_message(5, 0x0036, hdr, [100, 1])
assert listener.permit_duration.call_count == 1
def test_handle_unsupported(METHOD_NAME):
listener = MagicMock()
METHOD_NAME.add_listener(listener)
hdr = MagicMock()
hdr.command_id = zdo_types.ZDOCmd(0xFFFF)
assert hdr.command_id not in list(zdo_types.ZDOCmd)
METHOD_NAME.request = MagicMock()
METHOD_NAME.reply = MagicMock()
METHOD_NAME.handle_message(5, 0xFFFF, hdr, [])
assert listener.zdo_undefined_0xffff.call_count == 1
assert METHOD_NAME.request.call_count == 0
assert METHOD_NAME.reply.call_count == 0
def test_device_accessor(METHOD_NAME):
assert METHOD_NAME.device.nwk == 65535
async def test_reply(METHOD_NAME):
METHOD_NAME.device.request = AsyncMock()
await METHOD_NAME.reply(0x0005)
assert METHOD_NAME.device.request.call_count == 1
def test_get_attr_error(METHOD_NAME):
with pytest.raises(AttributeError):
METHOD_NAME.no_such_attribute()
async def test_reply_tsn_override(METHOD_NAME, monkeypatch):
clusters = MagicMock()
clusters.__getitem__.return_value = (
sentinel.param_names,
sentinel.scheam,
)
monkeypatch.setattr(zdo_types, "CLUSTERS", clusters)
mock_ser = MagicMock()
mock_ser.return_value = b"\xaa\x55"
monkeypatch.setattr(t, "serialize", mock_ser)
await METHOD_NAME.reply(sentinel.cmd, sentinel.arg1, sentinel.arg2)
seq = METHOD_NAME.device.request.call_args[0][4]
data = METHOD_NAME.device.request.call_args[0][5]
assert seq == 123
assert data[0] == 123
assert data[1:3] == b"\xaa\x55"
# override tsn
tsn = 0x23
await METHOD_NAME.reply(sentinel.cmd, sentinel.arg1, sentinel.arg2, tsn=tsn)
seq = METHOD_NAME.device.request.call_args[0][4]
data = METHOD_NAME.device.request.call_args[0][5]
assert seq == tsn
assert data[0] == tsn
assert data[1:3] == b"\xaa\x55" | null |
5,674 | """Test common.ensure_stdout_handles_unicode"""
from __future__ import print_function
import unittest
import sys
from subprocess import check_call, CalledProcessError
from tempfile import mkstemp
import os
from os.path import isfile
from contextlib import contextmanager
FILE_TEXT = u'The unicode check mark is \u2713.\n'
@contextmanager
def temp_file(just_name=True):
"""Context manager that creates temp file and deletes it in the end"""
tmp_descriptor = None
tmp_name = None
tmp_handle = None
try:
tmp_descriptor, tmp_name = mkstemp()
# we create our own file handle since we want to be able to close the
# file and open it again for reading.
# We keep the os-level descriptor open so file name is still reserved
# for us
if just_name:
yield tmp_name
else:
tmp_handle = open(tmp_name, 'wb')
yield tmp_handle, tmp_name
except Exception:
raise
finally:
if tmp_descriptor is not None:
os.close(tmp_descriptor)
if tmp_handle is not None:
tmp_handle.close()
if tmp_name is not None and isfile(tmp_name):
os.unlink(tmp_name)
class TestEncodingHandler(unittest.TestCase):
"""Tests replacing stdout encoding in various scenarios"""
def test_print(self):
"""Test regular unicode output not raise error"""
check_call('{python} {this_file} print'.format(python=sys.executable,
this_file=__file__),
shell=True)
def test_print_redirect(self):
"""
Test redirection of unicode output to files does not raise error
TODO: test this on non-linux OSs
"""
with temp_file() as tmp_file:
check_call('{python} {this_file} print > {tmp_file}'
.format(python=sys.executable, this_file=__file__,
tmp_file=tmp_file),
shell=True)
@unittest.skipIf(not sys.platform.startswith('linux'),
'Only tested on linux sofar')
def test_print_no_lang(self):
"""
Test redirection of unicode output to files does not raise error
TODO: Adapt this for other OSs; for win create batch script
"""
check_call('LANG=C {python} {this_file} print'
.format(python=sys.executable, this_file=__file__),
shell=True)
def test_uopen(self):
"""Test that uopen in a nice environment is ok"""
with temp_file(False) as (tmp_handle, tmp_file):
tmp_handle.write(FILE_TEXT.encode('utf8'))
tmp_handle.close()
try:
check_call('{python} {this_file} read {tmp_file}'
.format(python=sys.executable, this_file=__file__,
tmp_file=tmp_file),
shell=True)
except CalledProcessError as cpe:
self.fail(cpe.output)
def METHOD_NAME(self):
"""
Test redirection of unicode output to files does not raise error
TODO: test this on non-linux OSs
"""
with temp_file(False) as (tmp_handle, tmp_file):
tmp_handle.write(FILE_TEXT.encode('utf8'))
tmp_handle.close()
with temp_file() as redirect_file:
try:
check_call(
'{python} {this_file} read {tmp_file} >{redirect_file}'
.format(python=sys.executable, this_file=__file__,
tmp_file=tmp_file, redirect_file=redirect_file),
shell=True)
except CalledProcessError as cpe:
self.fail(cpe.output)
@unittest.skipIf(not sys.platform.startswith('linux'),
'Only tested on linux sofar')
def test_uopen_no_lang(self):
"""
Test that uopen in a C-LANG environment is ok
TODO: Adapt this for other OSs; for win create batch script
"""
with temp_file(False) as (tmp_handle, tmp_file):
tmp_handle.write(FILE_TEXT.encode('utf8'))
tmp_handle.close()
try:
check_call('LANG=C {python} {this_file} read {tmp_file}'
.format(python=sys.executable, this_file=__file__,
tmp_file=tmp_file),
shell=True)
except CalledProcessError as cpe:
self.fail(cpe.output)
def run_read(filename):
"""This is called from test_uopen* tests as script. Reads text, compares"""
from oletools.common.io_encoding import uopen
# open file
with uopen(filename, 'rt') as reader:
# a few tests
if reader.closed:
raise ValueError('handle is closed!')
if reader.name != filename:
raise ValueError('Wrong filename {}'.format(reader.name))
if reader.isatty():
raise ValueError('Reader is a tty!')
if reader.tell() != 0:
raise ValueError('Reader.tell is not 0 at beginning')
# read text
text = reader.read()
# a few more tests
if not reader.closed:
raise ValueError('Reader is not closed outside context')
if reader.name != filename:
raise ValueError('Wrong filename {} after context'.format(reader.name))
# the following test raises an exception because reader is closed, so isatty cannot be called:
# if reader.isatty():
# raise ValueError('Reader has become a tty!')
# compare text
if sys.version_info.major <= 2: # in python2 get encoded byte string
expect = FILE_TEXT.encode('utf8')
else: # python3: should get real unicode
expect = FILE_TEXT
if text != expect:
raise ValueError('Wrong contents: {!r} != {!r}'
.format(text, expect))
return 0
def run_print():
"""This is called from test_read* tests as script. Prints & logs unicode"""
from oletools.common.io_encoding import ensure_stdout_handles_unicode
from oletools.common.log_helper import log_helper
ensure_stdout_handles_unicode()
print(u'Check: \u2713') # print check mark
# check logging as well
logger = log_helper.get_or_create_silent_logger('test_encoding_handler')
log_helper.enable_logging(False, 'debug', stream=sys.stdout)
logger.info(u'Check: \u2713')
return 0
# tests call this file as script
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit(unittest.main())
# hack required to import common from parent dir, not system-wide one
# (usually unittest seems to do that for us)
from os.path import abspath, dirname, join
ole_base = dirname(dirname(dirname(abspath(__file__))))
sys.path.insert(0, ole_base)
if sys.argv[1] == 'print':
if len(sys.argv) > 2:
print('Expect no arg for "print"', file=sys.stderr)
sys.exit(2)
sys.exit(run_print())
elif sys.argv[1] == 'read':
if len(sys.argv) != 3:
print('Expect single arg for "read"', file=sys.stderr)
sys.exit(2)
sys.exit(run_read(sys.argv[2]))
else:
print('Unexpected argument: {}'.format(sys.argv[1]), file=sys.stderr)
sys.exit(2) | null |
5,675 | import fnmatch
import os
from conan.internal.conan_app import ConanApp
from conans.client.cache.remote_registry import Remote
from conans.client.cmd.user import user_set, users_clean, users_list
from conans.errors import ConanException
class RemotesAPI:
def __init__(self, conan_api):
self.conan_api = conan_api
def list(self, pattern=None, only_enabled=True):
"""
:param pattern: if None, all remotes will be listed
it can be a single value or a list of values
:param only_enabled:
:return:
"""
app = ConanApp(self.conan_api.cache_folder)
remotes = app.cache.remotes_registry.list()
if only_enabled:
remotes = [r for r in remotes if not r.disabled]
if pattern:
filtered_remotes = []
patterns = [pattern] if isinstance(pattern, str) else pattern
for p in patterns:
is_match = False
for remote in remotes:
if fnmatch.fnmatch(remote.name, p):
is_match = True
if remote not in filtered_remotes:
filtered_remotes.append(remote)
if not is_match:
if "*" in p or "?" in p:
if only_enabled:
raise ConanException(
f"Remotes for pattern '{p}' can't be found or are disabled")
else:
raise ConanException(f"Remote '{p}' can't be found or is disabled")
remotes = filtered_remotes
return remotes
def METHOD_NAME(self, pattern):
remotes = self.list(pattern, only_enabled=False)
for r in remotes:
r.disabled = True
self.update(r.name, disabled=True)
return remotes
def enable(self, pattern):
remotes = self.list(pattern, only_enabled=False)
for r in remotes:
r.disabled = False
self.update(r.name, disabled=False)
return remotes
def get(self, remote_name):
app = ConanApp(self.conan_api.cache_folder)
return app.cache.remotes_registry.read(remote_name)
def add(self, remote: Remote, force=False, index=None):
app = ConanApp(self.conan_api.cache_folder)
app.cache.remotes_registry.add(remote, force=force, index=index)
def remove(self, pattern: str):
app = ConanApp(self.conan_api.cache_folder)
remotes = self.list(pattern, only_enabled=False)
for remote in remotes:
app.cache.remotes_registry.remove(remote.name)
users_clean(app.cache.localdb, remote.url)
def update(self, remote_name, url=None, secure=None, disabled=None, index=None):
app = ConanApp(self.conan_api.cache_folder)
app.cache.remotes_registry.update(remote_name, url, secure, disabled=disabled,
index=index)
def rename(self, remote_name: str, new_name: str):
app = ConanApp(self.conan_api.cache_folder)
app.cache.remotes_registry.rename(remote_name, new_name)
def user_info(self, remote: Remote):
app = ConanApp(self.conan_api.cache_folder)
return users_list(app.cache.localdb, remotes=[remote])[0]
def login(self, remote: Remote, username, password):
app = ConanApp(self.conan_api.cache_folder)
app.remote_manager.authenticate(remote, username, password)
def logout(self, remote: Remote):
app = ConanApp(self.conan_api.cache_folder)
# The localdb only stores url + username + token, not remote name, so use URL as key
users_clean(app.cache.localdb, remote.url)
def user_set(self, remote: Remote, username):
app = ConanApp(self.conan_api.cache_folder)
return user_set(app.cache.localdb, username, remote)
def auth(self, remote: Remote, with_user=False):
app = ConanApp(self.conan_api.cache_folder)
if with_user:
user, token, _ = app.cache.localdb.get_login(remote.url)
if not user:
var_name = f"CONAN_LOGIN_USERNAME_{remote.name.upper()}"
user = os.getenv(var_name, None) or os.getenv("CONAN_LOGIN_USERNAME", None)
if not user:
return
app.remote_manager.check_credentials(remote)
user, token, _ = app.cache.localdb.get_login(remote.url)
return user | null |
5,676 | import pytest
import torch
from kornia.filters import MedianBlur, median_blur
from kornia.testing import BaseTester, tensor_to_gradcheck_var
class TestMedianBlur(BaseTester):
def test_smoke(self, device, dtype):
inp = torch.zeros(1, 3, 4, 4, device=device, dtype=dtype)
actual = median_blur(inp, 3)
assert isinstance(actual, torch.Tensor)
@pytest.mark.parametrize('batch_size', [1, 2])
@pytest.mark.parametrize('kernel_size', [3, (5, 7)])
def test_cardinality(self, batch_size, kernel_size, device, dtype):
inp = torch.zeros(batch_size, 3, 4, 4, device=device, dtype=dtype)
actual = median_blur(inp, kernel_size)
assert actual.shape == (batch_size, 3, 4, 4)
def test_exception(self, device, dtype):
with pytest.raises(TypeError) as errinfo:
median_blur(1, 1)
assert 'Not a Tensor type.' in str(errinfo)
with pytest.raises(TypeError) as errinfo:
median_blur(torch.ones(1, 1, device=device, dtype=dtype), 1)
assert 'shape must be [[\'B\', \'C\', \'H\', \'W\']].' in str(errinfo)
def test_kernel_3x3(self, device, dtype):
inp = torch.tensor(
[
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 3.0, 7.0, 5.0, 0.0],
[0.0, 3.0, 1.0, 1.0, 0.0],
[0.0, 6.0, 9.0, 2.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
[
[36.0, 7.0, 25.0, 0.0, 0.0],
[3.0, 14.0, 1.0, 0.0, 0.0],
[65.0, 59.0, 2.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
],
device=device,
dtype=dtype,
).repeat(2, 1, 1, 1)
kernel_size = (3, 3)
actual = median_blur(inp, kernel_size)
self.assert_close(actual[0, 0, 2, 2], torch.tensor(3.0, device=device, dtype=dtype))
self.assert_close(actual[0, 1, 1, 1], torch.tensor(14.0, device=device, dtype=dtype))
def test_kernel_3x1(self, device, dtype):
inp = torch.tensor(
[
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 3.0, 7.0, 5.0, 0.0],
[0.0, 3.0, 1.0, 1.0, 0.0],
[0.0, 6.0, 9.0, 2.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
],
device=device,
dtype=dtype,
).view(1, 1, 5, 5)
ky, kx = 3, 1
actual = median_blur(inp, (ky, kx))
self.assert_close(actual[0, 0, 2, 2], torch.tensor(7.0, device=device, dtype=dtype))
self.assert_close(actual[0, 0, 1, 1], torch.tensor(3.0, device=device, dtype=dtype))
def METHOD_NAME(self, device, dtype):
batch_size = 3
inp = torch.rand(3, 5, 5, device=device, dtype=dtype).expand(batch_size, -1, -1, -1)
kernel_size = (3, 3)
actual = median_blur(inp, kernel_size)
assert actual.is_contiguous()
def test_gradcheck(self, device):
batch_size, channels, height, width = 1, 2, 5, 4
img = torch.rand(batch_size, channels, height, width, device=device)
img = tensor_to_gradcheck_var(img) # to var
self.gradcheck(median_blur, (img, (5, 3)))
def test_module(self, device, dtype):
kernel_size = (3, 5)
img = torch.rand(2, 3, 4, 5, device=device, dtype=dtype)
op = median_blur
op_module = MedianBlur((3, 5))
actual = op_module(img)
expected = op(img, kernel_size)
self.assert_close(actual, expected)
@pytest.mark.parametrize('kernel_size', [5, (5, 7)])
@pytest.mark.parametrize('batch_size', [1, 2])
def test_dynamo(self, batch_size, kernel_size, device, dtype, torch_optimizer):
inpt = torch.ones(batch_size, 3, 10, 10, device=device, dtype=dtype)
op = MedianBlur(kernel_size)
op_optimized = torch_optimizer(op)
self.assert_close(op(inpt), op_optimized(inpt)) | null |
5,677 | """ Generic Unification algorithm for expression trees with lists of children
This implementation is a direct translation of
Artificial Intelligence: A Modern Approach by Stuart Russel and Peter Norvig
Second edition, section 9.2, page 276
It is modified in the following ways:
1. We allow associative and commutative Compound expressions. This results in
combinatorial blowup.
2. We explore the tree lazily.
3. We provide generic interfaces to symbolic algebra libraries in Python.
A more traditional version can be found here
http://aima.cs.berkeley.edu/python/logic.html
"""
from sympy.utilities.iterables import kbins
class Compound:
""" A little class to represent an interior node in the tree
This is analogous to SymPy.Basic for non-Atoms
"""
def __init__(self, op, args):
self.op = op
self.args = args
def __eq__(self, other):
return (type(self) is type(other) and self.op == other.op and
self.args == other.args)
def __hash__(self):
return hash((type(self), self.op, self.args))
def __str__(self):
return "%s[%s]" % (str(self.op), ', '.join(map(str, self.args)))
class Variable:
""" A Wild token """
def __init__(self, arg):
self.arg = arg
def __eq__(self, other):
return type(self) is type(other) and self.arg == other.arg
def __hash__(self):
return hash((type(self), self.arg))
def __str__(self):
return "Variable(%s)" % str(self.arg)
class CondVariable:
""" A wild token that matches conditionally.
arg - a wild token.
valid - an additional constraining function on a match.
"""
def __init__(self, arg, valid):
self.arg = arg
self.valid = valid
def __eq__(self, other):
return (type(self) is type(other) and
self.arg == other.arg and
self.valid == other.valid)
def __hash__(self):
return hash((type(self), self.arg, self.valid))
def __str__(self):
return "CondVariable(%s)" % str(self.arg)
def unify(x, y, s=None, **fns):
""" Unify two expressions.
Parameters
==========
x, y - expression trees containing leaves, Compounds and Variables.
s - a mapping of variables to subtrees.
Returns
=======
lazy sequence of mappings {Variable: subtree}
Examples
========
>>> from sympy.unify.core import unify, Compound, Variable
>>> expr = Compound("Add", ("x", "y"))
>>> pattern = Compound("Add", ("x", Variable("a")))
>>> next(unify(expr, pattern, {}))
{Variable(a): 'y'}
"""
s = s or {}
if x == y:
yield s
elif isinstance(x, (Variable, CondVariable)):
yield from unify_var(x, y, s, **fns)
elif isinstance(y, (Variable, CondVariable)):
yield from unify_var(y, x, s, **fns)
elif isinstance(x, Compound) and isinstance(y, Compound):
is_commutative = fns.get('is_commutative', lambda x: False)
is_associative = fns.get('is_associative', lambda x: False)
for sop in unify(x.op, y.op, s, **fns):
if is_associative(x) and is_associative(y):
a, b = (x, y) if len(x.args) < len(y.args) else (y, x)
if is_commutative(x) and is_commutative(y):
combs = allcombinations(a.args, b.args, 'commutative')
else:
combs = allcombinations(a.args, b.args, 'associative')
for aaargs, bbargs in combs:
aa = [unpack(Compound(a.op, arg)) for arg in aaargs]
bb = [unpack(Compound(b.op, arg)) for arg in bbargs]
yield from unify(aa, bb, sop, **fns)
elif len(x.args) == len(y.args):
yield from unify(x.args, y.args, sop, **fns)
elif is_args(x) and is_args(y) and len(x) == len(y):
if len(x) == 0:
yield s
else:
for shead in unify(x[0], y[0], s, **fns):
yield from unify(x[1:], y[1:], shead, **fns)
def unify_var(var, x, s, **fns):
if var in s:
yield from unify(s[var], x, s, **fns)
elif occur_check(var, x):
pass
elif isinstance(var, CondVariable) and var.valid(x):
yield assoc(s, var, x)
elif isinstance(var, Variable):
yield assoc(s, var, x)
def occur_check(var, x):
""" var occurs in subtree owned by x? """
if var == x:
return True
elif isinstance(x, Compound):
return occur_check(var, x.args)
elif is_args(x):
if any(occur_check(var, xi) for xi in x): return True
return False
def assoc(d, key, val):
""" Return copy of d with key associated to val """
d = d.copy()
d[key] = val
return d
def is_args(x):
""" Is x a traditional iterable? """
return type(x) in (tuple, list, set)
def unpack(x):
if isinstance(x, Compound) and len(x.args) == 1:
return x.args[0]
else:
return x
def allcombinations(A, B, ordered):
"""
Restructure A and B to have the same number of elements.
Parameters
==========
ordered must be either 'commutative' or 'associative'.
A and B can be rearranged so that the larger of the two lists is
reorganized into smaller sublists.
Examples
========
>>> from sympy.unify.core import allcombinations
>>> for x in allcombinations((1, 2, 3), (5, 6), 'associative'): print(x)
(((1,), (2, 3)), ((5,), (6,)))
(((1, 2), (3,)), ((5,), (6,)))
>>> for x in allcombinations((1, 2, 3), (5, 6), 'commutative'): print(x)
(((1,), (2, 3)), ((5,), (6,)))
(((1, 2), (3,)), ((5,), (6,)))
(((1,), (3, 2)), ((5,), (6,)))
(((1, 3), (2,)), ((5,), (6,)))
(((2,), (1, 3)), ((5,), (6,)))
(((2, 1), (3,)), ((5,), (6,)))
(((2,), (3, 1)), ((5,), (6,)))
(((2, 3), (1,)), ((5,), (6,)))
(((3,), (1, 2)), ((5,), (6,)))
(((3, 1), (2,)), ((5,), (6,)))
(((3,), (2, 1)), ((5,), (6,)))
(((3, 2), (1,)), ((5,), (6,)))
"""
if ordered == "commutative":
ordered = 11
if ordered == "associative":
ordered = None
sm, bg = (A, B) if len(A) < len(B) else (B, A)
for part in kbins(list(range(len(bg))), len(sm), ordered=ordered):
if bg == B:
yield tuple((a,) for a in A), METHOD_NAME(B, part)
else:
yield METHOD_NAME(A, part), tuple((b,) for b in B)
def METHOD_NAME(it, part):
""" Partition a tuple/list into pieces defined by indices.
Examples
========
>>> from sympy.unify.core import partition
>>> partition((10, 20, 30, 40), [[0, 1, 2], [3]])
((10, 20, 30), (40,))
"""
return type(it)([index(it, ind) for ind in part])
def index(it, ind):
""" Fancy indexing into an indexable iterable (tuple, list).
Examples
========
>>> from sympy.unify.core import index
>>> index([10, 20, 30], (1, 2, 0))
[20, 30, 10]
"""
return type(it)([it[i] for i in ind]) | null |
5,678 | import pytest
from dirty_equals import IsDict
from fastapi.testclient import TestClient
from fastapi.utils import match_pydantic_error_url
from ...utils import needs_py39
@pytest.fixture(name="client")
def get_client():
from docs_src.body_fields.tutorial001_an_py39 import app
client = TestClient(app)
return client
@needs_py39
def METHOD_NAME(client: TestClient):
response = client.put("/items/5", json={"item": {"name": "Foo", "price": 3.0}})
assert response.status_code == 200
assert response.json() == {
"item_id": 5,
"item": {"name": "Foo", "price": 3.0, "description": None, "tax": None},
}
@needs_py39
def test_items_6(client: TestClient):
response = client.put(
"/items/6",
json={
"item": {
"name": "Bar",
"price": 0.2,
"description": "Some bar",
"tax": "5.4",
}
},
)
assert response.status_code == 200
assert response.json() == {
"item_id": 6,
"item": {
"name": "Bar",
"price": 0.2,
"description": "Some bar",
"tax": 5.4,
},
}
@needs_py39
def test_invalid_price(client: TestClient):
response = client.put("/items/5", json={"item": {"name": "Foo", "price": -3.0}})
assert response.status_code == 422
assert response.json() == IsDict(
{
"detail": [
{
"type": "greater_than",
"loc": ["body", "item", "price"],
"msg": "Input should be greater than 0",
"input": -3.0,
"ctx": {"gt": 0.0},
"url": match_pydantic_error_url("greater_than"),
}
]
}
) | IsDict(
# TODO: remove when deprecating Pydantic v1
{
"detail": [
{
"ctx": {"limit_value": 0},
"loc": ["body", "item", "price"],
"msg": "ensure this value is greater than 0",
"type": "value_error.number.not_gt",
}
]
}
)
@needs_py39
def test_openapi_schema(client: TestClient):
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/items/{item_id}": {
"put": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Update Item",
"operationId": "update_item_items__item_id__put",
"parameters": [
{
"required": True,
"schema": {"title": "Item Id", "type": "integer"},
"name": "item_id",
"in": "path",
}
],
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_update_item_items__item_id__put"
}
}
},
"required": True,
},
}
}
},
"components": {
"schemas": {
"Item": {
"title": "Item",
"required": ["name", "price"],
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"},
"description": IsDict(
{
"title": "The description of the item",
"anyOf": [
{"maxLength": 300, "type": "string"},
{"type": "null"},
],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{
"title": "The description of the item",
"maxLength": 300,
"type": "string",
}
),
"price": {
"title": "Price",
"exclusiveMinimum": 0.0,
"type": "number",
"description": "The price must be greater than zero",
},
"tax": IsDict(
{
"title": "Tax",
"anyOf": [{"type": "number"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Tax", "type": "number"}
),
},
},
"Body_update_item_items__item_id__put": {
"title": "Body_update_item_items__item_id__put",
"required": ["item"],
"type": "object",
"properties": {"item": {"$ref": "#/components/schemas/Item"}},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
}
},
} | null |
5,679 | import argparse
from collections import defaultdict
import pysam
def Parser():
the_parser = argparse.ArgumentParser()
the_parser.add_argument(
'--input', action="store", type=str, help="bam alignment file")
the_parser.add_argument(
'--minquery', type=int,
help="Minimum readsize of query reads (nt) - must be an integer")
the_parser.add_argument(
'--maxquery', type=int,
help="Maximum readsize of query reads (nt) - must be an integer")
the_parser.add_argument(
'--mintarget', type=int,
help="Minimum readsize of target reads (nt) - must be an integer")
the_parser.add_argument(
'--maxtarget', type=int,
help="Maximum readsize of target reads (nt) - must be an integer")
the_parser.add_argument(
'--overlap', type=int,
help="Overlap analyzed (nt) - must be an integer")
the_parser.add_argument(
'--output', action="store", type=str,
help="Pairable sequences")
args = the_parser.parse_args()
return args
class Map:
def __init__(self, bam_file, output, minquery=23, maxquery=29,
mintarget=23, maxtarget=29, overlap=10):
self.bam_object = pysam.AlignmentFile(bam_file, 'rb')
self.output = output
self.query_range = range(minquery, maxquery + 1)
self.target_range = range(mintarget, maxtarget + 1)
self.overlap = overlap
self.chromosomes = dict(zip(self.bam_object.references,
self.bam_object.lengths))
self.alignement_dic = self.index_alignments(self.bam_object)
self.all_query_positions = self.query_positions(self.bam_object,
overlap=self.overlap)
self.readdic = self.make_readdic(self.bam_object)
self.pairing()
def make_readdic(self, bam_object):
readdic = defaultdict(int)
for read in bam_object.fetch():
readdic[read.query_sequence] += 1
return readdic
def index_alignments(self, bam_object):
'''
dic[(chrom, pos, polarity)]: [readseq1, readseq2, ...]
the list value is further converted in set
'''
dic = defaultdict(list)
for chrom in self.chromosomes:
for read in bam_object.fetch(chrom):
if read.is_reverse:
coord = read.reference_end-1
pol = 'R'
else:
coord = read.reference_start
pol = 'F'
dic[(chrom, coord, pol)].append(read.query_sequence)
for key in dic:
dic[key] = set(dic[key])
return dic
def query_positions(self, bam_object, overlap):
all_query_positions = defaultdict(list)
for genomicKey in list(self.alignement_dic):
chrom, coord, pol = genomicKey
if pol == 'F' and len(self.alignement_dic[(chrom,
coord+overlap-1,
'R')]) > 0:
all_query_positions[chrom].append(coord)
for chrom in all_query_positions:
all_query_positions[chrom] = sorted(
list(set(all_query_positions[chrom])))
return all_query_positions
def countpairs(self, uppers, lowers):
query_range = self.query_range
target_range = self.target_range
uppers = [seq for seq in uppers if (len(seq) in query_range or len(seq)
in target_range)]
uppers_expanded = []
for seq in uppers:
expand = [seq for i in range(self.readdic[seq])]
uppers_expanded.extend(expand)
uppers = uppers_expanded
lowers = [seq for seq in lowers if (len(seq) in query_range or len(seq)
in target_range)]
lowers_expanded = []
for seq in lowers:
expand = [seq for i in range(self.readdic[seq])]
lowers_expanded.extend(expand)
lowers = lowers_expanded
paired = []
for upread in uppers:
for downread in lowers:
if (len(upread) in query_range and len(downread) in
target_range) or (len(upread) in target_range and
len(downread) in query_range):
paired.append(upread)
lowers.remove(downread)
break
return len(paired)
def pairing(self):
F = open(self.output, 'w')
query_range = self.query_range
target_range = self.target_range
overlap = self.overlap
stringresult = []
header_template = '>%s|coord=%s|strand %s|size=%s|nreads=%s\n%s\n'
total_pairs = 0
for chrom in sorted(self.chromosomes):
number_pairs = 0
for pos in self.all_query_positions[chrom]:
stringbuffer = []
uppers = self.alignement_dic[chrom, pos, 'F']
lowers = self.alignement_dic[chrom, pos+overlap-1, 'R']
number_pairs += self.countpairs(uppers, lowers)
total_pairs += number_pairs
if uppers and lowers:
for upread in uppers:
for downread in lowers:
if (len(upread) in query_range and len(downread) in
target_range) or (len(upread) in target_range
and len(downread) in
query_range):
stringbuffer.append(
header_template %
(chrom, pos+1, '+', len(upread),
self.readdic[upread], upread))
stringbuffer.append(
header_template %
(chrom, pos+overlap-len(downread)+1, '-',
len(downread), self.readdic[downread],
self.METHOD_NAME(downread)))
stringresult.extend(sorted(set(stringbuffer)))
F.write(''.join(stringresult))
def METHOD_NAME(self, sequence):
antidict = {"A": "T", "T": "A", "G": "C", "C": "G", "N": "N"}
revseq = sequence[::-1]
return "".join([antidict[i] for i in revseq])
if __name__ == "__main__":
args = Parser()
mapobj = Map(args.input, args.output, args.minquery, args.maxquery,
args.mintarget, args.maxtarget, args.overlap) | null |
5,680 | from .cartan_type import Standard_Cartan
from sympy.core.backend import Matrix, Rational
class TypeF(Standard_Cartan):
def __new__(cls, n):
if n != 4:
raise ValueError("n should be 4")
return Standard_Cartan.__new__(cls, "F", 4)
def dimension(self):
"""Dimension of the vector space V underlying the Lie algebra
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("F4")
>>> c.dimension()
4
"""
return 4
def basic_root(self, i, j):
"""Generate roots with 1 in ith position and -1 in jth position
"""
n = self.n
root = [0]*n
root[i] = 1
root[j] = -1
return root
def simple_root(self, i):
"""The ith simple root of F_4
Every lie algebra has a unique root system.
Given a root system Q, there is a subset of the
roots such that an element of Q is called a
simple root if it cannot be written as the sum
of two elements in Q. If we let D denote the
set of simple roots, then it is clear that every
element of Q can be written as a linear combination
of elements of D with all coefficients non-negative.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("F4")
>>> c.simple_root(3)
[0, 0, 0, 1]
"""
if i < 3:
return self.basic_root(i-1, i)
if i == 3:
root = [0]*4
root[3] = 1
return root
if i == 4:
root = [Rational(-1, 2)]*4
return root
def positive_roots(self):
"""Generate all the positive roots of A_n
This is half of all of the roots of F_4; by multiplying all the
positive roots by -1 we get the negative roots.
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType("A3")
>>> c.positive_roots()
{1: [1, -1, 0, 0], 2: [1, 0, -1, 0], 3: [1, 0, 0, -1], 4: [0, 1, -1, 0],
5: [0, 1, 0, -1], 6: [0, 0, 1, -1]}
"""
n = self.n
posroots = {}
k = 0
for i in range(0, n-1):
for j in range(i+1, n):
k += 1
posroots[k] = self.basic_root(i, j)
k += 1
root = self.basic_root(i, j)
root[j] = 1
posroots[k] = root
for i in range(0, n):
k += 1
root = [0]*n
root[i] = 1
posroots[k] = root
k += 1
root = [Rational(1, 2)]*n
posroots[k] = root
for i in range(1, 4):
k += 1
root = [Rational(1, 2)]*n
root[i] = Rational(-1, 2)
posroots[k] = root
posroots[k+1] = [Rational(1, 2), Rational(1, 2), Rational(-1, 2), Rational(-1, 2)]
posroots[k+2] = [Rational(1, 2), Rational(-1, 2), Rational(1, 2), Rational(-1, 2)]
posroots[k+3] = [Rational(1, 2), Rational(-1, 2), Rational(-1, 2), Rational(1, 2)]
posroots[k+4] = [Rational(1, 2), Rational(-1, 2), Rational(-1, 2), Rational(-1, 2)]
return posroots
def roots(self):
"""
Returns the total number of roots for F_4
"""
return 48
def cartan_matrix(self):
"""The Cartan matrix for F_4
The Cartan matrix matrix for a Lie algebra is
generated by assigning an ordering to the simple
roots, (alpha[1], ...., alpha[l]). Then the ijth
entry of the Cartan matrix is (<alpha[i],alpha[j]>).
Examples
========
>>> from sympy.liealgebras.cartan_type import CartanType
>>> c = CartanType('A4')
>>> c.cartan_matrix()
Matrix([
[ 2, -1, 0, 0],
[-1, 2, -1, 0],
[ 0, -1, 2, -1],
[ 0, 0, -1, 2]])
"""
m = Matrix( 4, 4, [2, -1, 0, 0, -1, 2, -2, 0, 0,
-1, 2, -1, 0, 0, -1, 2])
return m
def basis(self):
"""
Returns the number of independent generators of F_4
"""
return 52
def METHOD_NAME(self):
diag = "0---0=>=0---0\n"
diag += " ".join(str(i) for i in range(1, 5))
return diag | null |
5,681 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
"""
Controls the dynamic displaying of errors for line on the plot
"""
from matplotlib.container import ErrorbarContainer
from matplotlib.lines import Line2D
from mantid.plots import datafunctions, MantidAxes
from mantid.plots.datafunctions import get_data_from_errorbar_container, set_errorbars_hidden
from mantid.plots.legend import LegendProperties
from mantidqt.widgets.plotconfigdialog.curvestabwidget import curve_has_errors, CurveProperties, remove_curve_from_ax
class FigureErrorsManager(object):
AXES_NOT_MANTIDAXES_ERR_MESSAGE = "Plot axes are not MantidAxes. There is no way to automatically load error data."
def __init__(self, canvas):
self.canvas = canvas
@classmethod
def toggle_all_errors(cls, ax, make_visible):
active_lines = cls.get_curves_from_ax(ax)
for line in active_lines:
if curve_has_errors(line):
cls.toggle_error_bars_for(ax, line, make_visible)
@classmethod
def toggle_error_bars_for(cls, ax, curve, make_visible=None):
# get legend properties
if ax.legend_:
legend_props = LegendProperties.from_legend(ax.legend_)
else:
legend_props = None
if isinstance(curve, Line2D):
curve_index = ax.get_lines().index(curve)
else:
curve_index = ax.get_lines().index(curve[0])
# get all curve properties
curve_props = CurveProperties.from_curve(curve)
# and remove the ones that matplotlib doesn't recognise
plot_kwargs = curve_props.get_plot_kwargs()
new_curve = cls.replot_curve(ax, curve, plot_kwargs)
if isinstance(ax, MantidAxes):
errorbar_cap_lines = datafunctions.remove_and_return_errorbar_cap_lines(ax)
else:
errorbar_cap_lines = []
ax.lines.insert(curve_index, ax.lines.pop())
if isinstance(ax, MantidAxes) and ax.is_waterfall():
datafunctions.convert_single_line_to_waterfall(ax, curve_index)
for cap in errorbar_cap_lines:
ax.add_line(cap)
# Inverts either the current state of hide_errors
# or the make_visible kwarg that forces a state:
# If make visible is True, then hide_errors must be False
# for the intended effect
curve_props.hide_errors = not curve_props.hide_errors if make_visible is None else not make_visible
cls.toggle_errors(new_curve, curve_props)
cls.METHOD_NAME(ax, legend_props)
def update_plot_after(self, func, *args, **kwargs):
"""
Updates the legend and the plot after the function has been executed.
Used to funnel through the updates through a common place
:param func: Function to be executed, before updating the plot
:param args: Arguments forwarded to the function
:param kwargs: Keyword arguments forwarded to the function
"""
func(*args, **kwargs)
self.canvas.draw()
@staticmethod
def _supported_ax(ax):
return hasattr(ax, "creation_args")
@staticmethod
def get_errorbars_from_ax(ax):
return [cont for cont in ax.containers if isinstance(cont, ErrorbarContainer)]
@classmethod
def get_curves_from_ax(cls, ax):
return ax.get_lines() + cls.get_errorbars_from_ax(ax)
@staticmethod
def METHOD_NAME(ax, legend_props=None):
ax.relim()
if legend_props:
LegendProperties.create_legend(legend_props, ax)
@staticmethod
def toggle_errors(curve, view_props):
if curve_has_errors(curve):
hide_errors = view_props.hide_errors or view_props.hide
setattr(curve, "hide_errors", hide_errors)
set_errorbars_hidden(curve, hide_errors)
@classmethod
def replot_curve(cls, ax, curve, plot_kwargs):
if isinstance(ax, MantidAxes):
if ax.creation_args:
axis = ax.creation_args[0].get("axis", None)
if axis:
plot_kwargs["axis"] = axis
try:
new_curve = ax.replot_artist(curve, errorbars=curve_has_errors(curve), **plot_kwargs)
except ValueError: # ValueError raised if Artist not tracked by Axes
new_curve = cls._replot_mpl_curve(ax, curve, plot_kwargs)
else:
new_curve = cls._replot_mpl_curve(ax, curve, plot_kwargs)
if hasattr(new_curve, "errorevery"):
setattr(new_curve, "errorevery", plot_kwargs.get("errorevery", 1))
return new_curve
@staticmethod
def _replot_mpl_curve(ax, curve, plot_kwargs):
"""
Replot the given matplotlib curve with new kwargs
:param ax: The axis that the curve will be plotted on
:param curve: The curve that will be replotted
:param plot_kwargs: Kwargs for the plot that will be passed onto matplotlib
"""
remove_curve_from_ax(curve)
if isinstance(curve, Line2D):
[plot_kwargs.pop(arg, None) for arg in ["capsize", "capthick", "ecolor", "elinewidth", "errorevery"]]
new_curve = ax.plot(curve.get_xdata(), curve.get_ydata(), **plot_kwargs)[0]
elif isinstance(curve, ErrorbarContainer):
# Because of "error every" option, we need to store the original
# error bar data on the curve or we will lose data on re-plotting
x, y, xerr, yerr = getattr(curve, "errorbar_data", get_data_from_errorbar_container(curve))
new_curve = ax.errorbar(x, y, xerr=xerr, yerr=yerr, **plot_kwargs)
setattr(new_curve, "errorbar_data", [x, y, xerr, yerr])
else:
raise ValueError("Curve must have type 'Line2D' or 'ErrorbarContainer'. Found '{}'".format(type(curve)))
return new_curve | null |
5,682 | """
Hash map with open addressing.
https://en.wikipedia.org/wiki/Hash_table
Another hash map implementation, with a good explanation.
Modern Dictionaries by Raymond Hettinger
https://www.youtube.com/watch?v=p33CVV29OG8
"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
KEY = TypeVar("KEY")
VAL = TypeVar("VAL")
@dataclass(frozen=True, slots=True)
class _Item(Generic[KEY, VAL]):
key: KEY
val: VAL
class _DeletedItem(_Item):
def __init__(self) -> None:
super().__init__(None, None)
def __bool__(self) -> bool:
return False
_deleted = _DeletedItem()
class HashMap(MutableMapping[KEY, VAL]):
"""
Hash map with open addressing.
"""
def __init__(
self, initial_block_size: int = 8, capacity_factor: float = 0.75
) -> None:
self._initial_block_size = initial_block_size
self._buckets: list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
self._capacity_factor = capacity_factor
self._len = 0
def _get_bucket_index(self, key: KEY) -> int:
return hash(key) % len(self._buckets)
def _get_next_ind(self, ind: int) -> int:
"""
Get next index.
Implements linear open addressing.
"""
return (ind + 1) % len(self._buckets)
def _try_set(self, ind: int, key: KEY, val: VAL) -> bool:
"""
Try to add value to the bucket.
If bucket is empty or key is the same, does insert and return True.
If bucket has another key or deleted placeholder,
that means that we need to check next bucket.
"""
stored = self._buckets[ind]
if not stored:
self._buckets[ind] = _Item(key, val)
self._len += 1
return True
elif stored.key == key:
self._buckets[ind] = _Item(key, val)
return True
else:
return False
def METHOD_NAME(self) -> bool:
"""
Return true if we have reached safe capacity.
So we need to increase the number of buckets to avoid collisions.
"""
limit = len(self._buckets) * self._capacity_factor
return len(self) >= int(limit)
def _is_sparse(self) -> bool:
"""Return true if we need twice fewer buckets when we have now."""
if len(self._buckets) <= self._initial_block_size:
return False
limit = len(self._buckets) * self._capacity_factor / 2
return len(self) < limit
def _resize(self, new_size: int) -> None:
old_buckets = self._buckets
self._buckets = [None] * new_size
self._len = 0
for item in old_buckets:
if item:
self._add_item(item.key, item.val)
def _size_up(self) -> None:
self._resize(len(self._buckets) * 2)
def _size_down(self) -> None:
self._resize(len(self._buckets) // 2)
def _iterate_buckets(self, key: KEY) -> Iterator[int]:
ind = self._get_bucket_index(key)
for _ in range(len(self._buckets)):
yield ind
ind = self._get_next_ind(ind)
def _add_item(self, key: KEY, val: VAL) -> None:
for ind in self._iterate_buckets(key):
if self._try_set(ind, key, val):
break
def __setitem__(self, key: KEY, val: VAL) -> None:
if self.METHOD_NAME():
self._size_up()
self._add_item(key, val)
def __delitem__(self, key: KEY) -> None:
for ind in self._iterate_buckets(key):
item = self._buckets[ind]
if item is None:
raise KeyError(key)
if item is _deleted:
continue
if item.key == key:
self._buckets[ind] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self, key: KEY) -> VAL:
for ind in self._iterate_buckets(key):
item = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(key)
def __len__(self) -> int:
return self._len
def __iter__(self) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__(self) -> str:
val_string = " ,".join(
f"{item.key}: {item.val}" for item in self._buckets if item
)
return f"HashMap({val_string})" | null |
5,683 | # SPDX-FileCopyrightText: 2023 Jeff Epler for Adafruit Industries
# SPDX-License-Identifier: MIT
import json
import os
import ssl
import traceback
import board
import displayio
import digitalio
import keypad
import socketpool
import supervisor
from wifi import radio
import adafruit_requests
import adafruit_displayio_ssd1306
from adafruit_bitmap_font.bitmap_font import load_font
from adafruit_display_text import wrap_text_to_pixels
from adafruit_display_text.bitmap_label import Label
from adafruit_ticks import ticks_add, ticks_less, ticks_ms
# Choose your own prompt and wait messages, either by changing it below inside
# the """triple quoted""" string, or by putting it in your settings.toml file,
# like so:
#
# MY_PROMPT="Give me an idea for a gluten free, keto dinner. Write one sentence"
# PLEASE_WAIT="Cooking something up just for you"
#
# Experimentation is best to figure out what works. Usually you'll want to ask
# for just one sentence or paragraph, since the 128x32 pixel screen can't hold
# much text!
# Here are some that the author found worked reasonably well:
# Give me an idea for a plant-based dinner. Write one sentence
#
# Give jepler (they/them) a cliched and flowery description as a comic book
# supervillain. write one sentence.
#
# Invent and describe an alien species. write one sentence
#
# Invent a zany 'as seen on' product that can't possibly work. One sentence
#
# Tell a 1-sentence story about a kitten and a funny mishap
#
# Make up a 1-sentence fortune for me
#
# In first person, write a 1-sentence story about an AI avoiding boredom in a creative way.
#
# Pick an everyday object (don't say what it is) and describe it using only the
# ten hundred most common words.
#
# Invent an alien animal or plant, name it, and vividly describe it in 1
# sentence
#
# Invent and vividly describe an alien species. write one paragraph
prompt=os.getenv("MY_PROMPT", """
Write 1 sentence starting "you can" about an unconventional but useful superpower
""").strip()
please_wait=os.getenv("PLEASE_WAIT", """
Finding superpower
""").strip()
openai_api_key = os.getenv("OPENAI_API_KEY")
nice_font = load_font("helvR08.pcf")
line_spacing = 9 # in pixels
# i2c display setup
displayio.release_displays()
oled_reset = board.GP9
# STEMMA I2C on picowbell
i2c = board.STEMMA_I2C()
display_bus = displayio.I2CDisplay(i2c, device_address=0x3D, reset=oled_reset)
WIDTH = 128
HEIGHT = 64
display = adafruit_displayio_ssd1306.SSD1306(
display_bus, width=WIDTH, height=HEIGHT
)
if openai_api_key is None:
input("Place your\nOPENAI_API_KEY\nin settings.toml")
display.auto_refresh = False
class WrappedTextDisplay(displayio.Group):
def __init__(self):
super().__init__()
self.offset = 0
self.max_lines = display.height // line_spacing
for i in range(self.max_lines):
self.make_label("", i * line_spacing)
self.lines = [""]
self.text = ""
def make_label(self, text, y):
result = Label(
font=nice_font,
color=0xFFFFFF,
background_color=0,
line_spacing=line_spacing,
anchor_point=(0, 0),
anchored_position=(0, y),
text=text)
self.append(result)
def add_text(self, new_text):
print(end=new_text)
if self.lines:
text = self.lines[-1] + new_text
else:
text = new_text
self.lines[-1:] = wrap_text_to_pixels(text, display.width, nice_font)
self.scroll_to_end()
def set_text(self, text):
print("\n\n", end=text)
self.text = text
self.lines = wrap_text_to_pixels(text, display.width, nice_font)
self.offset = 0
def show(self, text):
self.set_text(text)
self.refresh()
def METHOD_NAME(self, new_text):
self.add_text(new_text)
self.refresh()
def scroll_to_end(self):
self.offset = self.max_offset()
def scroll_next_line(self):
max_offset = self.max_offset()
self.offset = (self.offset + 1) % (max_offset + 1)
def max_offset(self):
return max(0, len(self.lines) - self.max_lines)
def on_last_line(self):
return self.offset == self.max_offset()
def refresh(self):
lines = self.lines
# update labels from wrapped text, accounting for scroll offset
for i in range(len(self)):
offset_i = i + self.offset
if offset_i >= len(lines):
text = ""
else:
text = lines[offset_i]
if text != self[i].text:
self[i].text = text
# Actually update the display all at once
display.refresh()
display.root_group = wrapped_text = WrappedTextDisplay()
def wait_button_scroll_text():
led.switch_to_output(True)
keys.events.clear()
deadline = ticks_add(ticks_ms(),
5000 if wrapped_text.on_last_line() else 1000)
while True:
if (event := keys.events.get()) and event.pressed:
break
if wrapped_text.max_offset() > 0 and ticks_less(deadline, ticks_ms()):
wrapped_text.scroll_next_line()
wrapped_text.refresh()
deadline = ticks_add(deadline,
5000 if wrapped_text.on_last_line() else 1000)
led.value = False
if radio.ipv4_address is None:
wrapped_text.show(f"connecting to {os.getenv('WIFI_SSID')}")
radio.connect(os.getenv('WIFI_SSID'), os.getenv('WIFI_PASSWORD'))
requests = adafruit_requests.Session(socketpool.SocketPool(radio), ssl.create_default_context())
def iter_lines(resp):
partial_line = []
for c in resp.iter_content():
if c == b'\n':
yield (b"".join(partial_line)).decode('utf-8')
del partial_line[:]
else:
partial_line.append(c)
if partial_line:
yield (b"".join(partial_line)).decode('utf-8')
full_prompt = [
{"role": "user", "content": prompt},
]
keys = keypad.Keys((board.GP14,), value_when_pressed=False)
led = digitalio.DigitalInOut(board.GP10)
led.switch_to_output(False)
try:
while True:
wrapped_text.show(please_wait)
with requests.post("https://api.openai.com/v1/chat/completions",
json={"model": "gpt-3.5-turbo", "messages": full_prompt, "stream": True},
headers={
"Authorization": f"Bearer {openai_api_key}",
},
) as response:
wrapped_text.set_text("")
if response.status_code != 200:
wrapped_text.show(f"Uh oh! {response.status_code}: {response.reason}")
else:
wrapped_text.show("")
for line in iter_lines(response):
led.switch_to_output(True)
if line.startswith("data: [DONE]"):
break
if line.startswith("data:"):
content = json.loads(line[5:])
try:
token = content['choices'][0]['delta'].get('content', '')
except (KeyError, IndexError) as e:
token = None
led.value = False
if token:
wrapped_text.METHOD_NAME(token)
wait_button_scroll_text()
except Exception as e: # pylint: disable=broad-except
traceback.print_exception(e) # pylint: disable=no-value-for-parameter
print(end="\n\n\nAn error occurred\n\nPress button\nto reload")
display.root_group = displayio.CIRCUITPYTHON_TERMINAL
display.auto_refresh = True
while True:
if (event1 := keys.events.get()) and event1.pressed:
break
supervisor.reload() | null |
5,684 | import os
from unittest import TestCase
from lutris.util.wine.registry import WineRegistry, WineRegistryKey
FIXTURES_PATH = os.path.join(os.path.dirname(__file__), 'fixtures')
class TestWineRegistry(TestCase):
def setUp(self):
self.registry_path = os.path.join(FIXTURES_PATH, 'user.reg')
self.registry = WineRegistry(self.registry_path)
def test_can_load_registry(self):
self.assertTrue(len(self.registry.keys) > 10)
self.assertEqual(self.registry.version, 2)
self.assertEqual(self.registry.arch, 'win64')
def test_can_query_registry(self):
value = self.registry.query('Control Panel/Keyboard', 'KeyboardSpeed')
self.assertEqual(value, '31')
def test_can_get_timestamp_as_int(self):
key = self.registry.keys.get('Control Panel/Keyboard')
self.assertEqual(key.timestamp, 1477412318)
def test_can_get_timestamp_as_float(self):
key = self.registry.keys.get('Control Panel/Sound')
self.assertEqual(key.timestamp, 1475423303.7943190)
def test_can_get_meta(self):
key = self.registry.keys.get('Control Panel/Sound')
self.assertEqual(key.get_meta('time'), '1d21cc468677196')
def METHOD_NAME(self):
key = self.registry.keys.get('Control Panel/Desktop')
self.assertEqual(key.get_subkey('DragFullWindows'), '0')
def test_can_get_dword_value(self):
key = self.registry.keys.get('Control Panel/Desktop')
self.assertEqual(key.get_subkey('CaretWidth'), 1)
def test_can_render_key(self):
expected = (
'[Software\\\\Wine\\\\Fonts] 1477412318\n'
'#time=1d22edb71813e3c\n'
'"Codepages"="1252,437"\n'
'"LogPixels"=dword:00000000\n'
)
key = self.registry.keys.get('Software/Wine/Fonts')
self.assertEqual(key.render(), expected)
def test_render_user_reg(self):
content = self.registry.render()
with open(self.registry_path, 'r') as registry_file:
original_content = registry_file.read()
self.assertEqual(content, original_content)
def test_can_render_system_reg(self):
registry_path = os.path.join(FIXTURES_PATH, 'system.reg')
with open(registry_path, 'r') as registry_file:
original_content = registry_file.read()
system_reg = WineRegistry(registry_path)
content = system_reg.render()
self.assertEqual(content, original_content)
def test_can_set_value_to_existing_subkey(self):
self.assertEqual(self.registry.query('Control Panel/Desktop', 'DragWidth'), '4')
self.registry.set_value('Control Panel/Desktop', 'DragWidth', '8')
self.assertEqual(self.registry.query('Control Panel/Desktop', 'DragWidth'), '8')
def test_can_set_value_to_a_new_sub_key(self):
self.assertEqual(self.registry.query('Control Panel/Desktop', 'BliBlu'), None)
self.registry.set_value('Control Panel/Desktop', 'BliBlu', 'yep')
self.assertEqual(self.registry.query('Control Panel/Desktop', 'BliBlu'), 'yep')
def test_can_set_value_to_a_new_key(self):
self.assertEqual(self.registry.query('Wine/DX11', 'FullyWorking'), None)
self.registry.set_value('Wine/DX11', 'FullyWorking', 'HellYeah')
self.assertEqual(self.registry.query('Wine/DX11', 'FullyWorking'), 'HellYeah')
def test_can_clear_a_key(self):
path = 'Control Panel/Mouse'
key = self.registry.keys.get(path)
self.assertEqual(len(key.subkeys), 13)
self.registry.clear_key(path)
self.assertEqual(len(key.subkeys), 0)
class TestWineRegistryKey(TestCase):
def test_creation_by_key_def_parses(self):
key = WineRegistryKey(key_def='[Control Panel\\\\Desktop] 1477412318')
self.assertEqual(key.name, 'Control Panel/Desktop')
self.assertEqual(key.raw_name, '[Control Panel\\\\Desktop]')
self.assertEqual(key.raw_timestamp, '1477412318')
def test_creation_by_path_parses(self):
key = WineRegistryKey(path='Control Panel/Desktop')
self.assertEqual(key.name, 'Control Panel/Desktop')
self.assertEqual(key.raw_name, '[Control Panel\\\\Desktop]')
self.assertRegex(key.raw_timestamp, r'\d+\s\d+')
def test_parse_registry_key(self):
key = WineRegistryKey(path='Control Panel/Desktop')
key.parse('"C:\\\\users\\\\strider\\\\My Music\\\\iTunes\\\\iTunes Music\\\\Podcasts\\\\"=dword:00000001')
self.assertEqual(key.subkeys["C:\\\\users\\\\strider\\\\My Music\\\\iTunes\\\\iTunes Music\\\\Podcasts\\\\"],
'dword:00000001')
key.parse('"A"=val')
self.assertEqual(key.subkeys["A"], 'val')
key.parse('"String with \"quotes\""=val')
self.assertEqual(key.subkeys['String with \"quotes\"'], 'val')
key.parse('"\"C:\\Program Files\\Windows Media Player\\wmplayer.exe\""="Yes"')
self.assertEqual(key.subkeys['\"C:\\Program Files\\Windows Media Player\\wmplayer.exe\"'], '"Yes"') | null |
5,685 | import pytest
from _testutils import MockHidapiDevice, Report
from collections import deque
from liquidctl.driver.aura_led import AuraLed
# Sample data for Aura LED controller from ASUS ProArt Z690-Creator WiFi
_INIT_19AF_FIRMWARE_DATA = bytes.fromhex(
"ec0241554c41332d415233322d30323037000000000000000000000000000000"
"000000000000000000000000000000000000000000000000000000000000000000"
)
_INIT_19AF_FIRMWARE = Report(_INIT_19AF_FIRMWARE_DATA[0], _INIT_19AF_FIRMWARE_DATA[1:])
_INIT_19AF_CONFIG_DATA = bytes.fromhex(
"ec3000001e9f03010000783c00010000783c00010000783c0000000000000001"
"040201f40000000000000000000000000000000000000000000000000000000000"
)
_INIT_19AF_CONFIG = Report(_INIT_19AF_CONFIG_DATA[0], _INIT_19AF_CONFIG_DATA[1:])
@pytest.fixture
def mockAuraLed_19AFDevice():
device = MockHidapiDevice(vendor_id=0x0B05, product_id=0x19AF, address="addr")
dev = AuraLed(device, "mock Aura LED Controller")
dev.connect()
return dev
def test_aura_led_19AF_device_command_format(mockAuraLed_19AFDevice):
mockAuraLed_19AFDevice.device.preload_read(_INIT_19AF_FIRMWARE)
mockAuraLed_19AFDevice.device.preload_read(_INIT_19AF_CONFIG)
mockAuraLed_19AFDevice.initialize() # should perform 3 writes
mockAuraLed_19AFDevice.set_color(
channel="sync", mode="off", colors=[]
) # should perform 14 writes
assert len(mockAuraLed_19AFDevice.device.sent) == 2 + 14
for i, (report, data) in enumerate(mockAuraLed_19AFDevice.device.sent):
assert report == 0xEC
assert len(data) == 64
def test_aura_led_19AF_device_get_status(mockAuraLed_19AFDevice):
mockAuraLed_19AFDevice.device.preload_read(_INIT_19AF_CONFIG)
assert mockAuraLed_19AFDevice.get_status() != []
def test_aura_led_19AF_device_initialize_status(mockAuraLed_19AFDevice):
mockAuraLed_19AFDevice.device.preload_read(_INIT_19AF_FIRMWARE)
mockAuraLed_19AFDevice.device.preload_read(_INIT_19AF_CONFIG)
status_list = mockAuraLed_19AFDevice.initialize()
firmware_tuple = status_list[0]
assert firmware_tuple[1] == "AULA3-AR32-0207"
def METHOD_NAME(mockAuraLed_19AFDevice):
colors = [[0xFF, 0, 0x80]] # should be ignored
mockAuraLed_19AFDevice.set_color(channel="led2", mode="off", colors=iter(colors))
assert len(mockAuraLed_19AFDevice.device.sent) == 5
data1 = mockAuraLed_19AFDevice.device.sent[0].data
data2 = mockAuraLed_19AFDevice.device.sent[1].data
assert data1[1] == 0x01 # key for led2
assert data1[4] == 0x00 # off
assert data2[2] == 0x02 # channel led2
assert data2[7:10] == [0x00, 0x00, 0x00]
def test_aura_led_19AF_static_with_some_channel(mockAuraLed_19AFDevice):
colors = [[0xFF, 0, 0x80], [0x30, 0x30, 0x30]] # second color should be ignored
mockAuraLed_19AFDevice.set_color(channel="led2", mode="static", colors=iter(colors))
assert len(mockAuraLed_19AFDevice.device.sent) == 5
data1 = mockAuraLed_19AFDevice.device.sent[0].data
data2 = mockAuraLed_19AFDevice.device.sent[1].data
assert data1[1] == 0x01 # key for led2
assert data1[4] == 0x01 # static mode
assert data2[2] == 0x02 # channel led2
assert data2[7:10] == [0xFF, 0x00, 0x80]
def test_aura_led_19AF_spectrum_cycle_with_some_channel(mockAuraLed_19AFDevice):
colors = [[0xFF, 0, 0x80], [0x30, 0x30, 0x30]] # second color should be ignored
mockAuraLed_19AFDevice.set_color(channel="led3", mode="spectrum_cycle", colors=iter(colors))
assert len(mockAuraLed_19AFDevice.device.sent) == 5
data1 = mockAuraLed_19AFDevice.device.sent[0].data
data2 = mockAuraLed_19AFDevice.device.sent[1].data
assert data1[1] == 0x01 # key for led3
assert data1[4] == 0x04 # spectrum cycle
assert data2[2] == 0x04 # channel led3
assert data2[7:10] == [0x00, 0x00, 0x00]
def test_aura_led_19AF_device_sync_channel(mockAuraLed_19AFDevice):
colors = [[0xFF, 0, 0x80]]
mockAuraLed_19AFDevice.set_color(channel="sync", mode="static", colors=iter(colors))
assert len(mockAuraLed_19AFDevice.device.sent) == 14 # 14 writes
def test_aura_led_19AF_device_invalid_set_color_arguments(mockAuraLed_19AFDevice):
with pytest.raises(KeyError):
mockAuraLed_19AFDevice.set_color("invalid", "off", [])
with pytest.raises(KeyError):
mockAuraLed_19AFDevice.set_color("led2", "invalid", [])
with pytest.raises(ValueError):
mockAuraLed_19AFDevice.set_color("led3", "static", [])
def test_aura_led_19AF_device_initialize_status(mockAuraLed_19AFDevice):
mockAuraLed_19AFDevice.device.preload_read(_INIT_19AF_FIRMWARE)
mockAuraLed_19AFDevice.device.preload_read(_INIT_19AF_CONFIG)
status_list = mockAuraLed_19AFDevice.initialize()
firmware_tuple = status_list[0]
assert firmware_tuple[1] == "AULA3-AR32-0207" | null |
5,686 | from typing import Dict, List, Optional
from fastapi import FastAPI
from pydantic import BaseModel, Field
from starlette.testclient import TestClient
app = FastAPI()
class Item(BaseModel):
name: str = Field(alias="aliased_name")
price: Optional[float] = None
owner_ids: Optional[List[int]] = None
@app.get("/items/valid", response_model=Item)
def METHOD_NAME():
return Item(aliased_name="valid", price=1.0)
@app.get("/items/coerce", response_model=Item)
def get_coerce():
return Item(aliased_name="coerce", price="1.0")
@app.get("/items/validlist", response_model=List[Item])
def get_validlist():
return [
Item(aliased_name="foo"),
Item(aliased_name="bar", price=1.0),
Item(aliased_name="baz", price=2.0, owner_ids=[1, 2, 3]),
]
@app.get("/items/validdict", response_model=Dict[str, Item])
def get_validdict():
return {
"k1": Item(aliased_name="foo"),
"k2": Item(aliased_name="bar", price=1.0),
"k3": Item(aliased_name="baz", price=2.0, owner_ids=[1, 2, 3]),
}
@app.get(
"/items/valid-exclude-unset", response_model=Item, response_model_exclude_unset=True
)
def get_valid_exclude_unset():
return Item(aliased_name="valid", price=1.0)
@app.get(
"/items/coerce-exclude-unset",
response_model=Item,
response_model_exclude_unset=True,
)
def get_coerce_exclude_unset():
return Item(aliased_name="coerce", price="1.0")
@app.get(
"/items/validlist-exclude-unset",
response_model=List[Item],
response_model_exclude_unset=True,
)
def get_validlist_exclude_unset():
return [
Item(aliased_name="foo"),
Item(aliased_name="bar", price=1.0),
Item(aliased_name="baz", price=2.0, owner_ids=[1, 2, 3]),
]
@app.get(
"/items/validdict-exclude-unset",
response_model=Dict[str, Item],
response_model_exclude_unset=True,
)
def get_validdict_exclude_unset():
return {
"k1": Item(aliased_name="foo"),
"k2": Item(aliased_name="bar", price=1.0),
"k3": Item(aliased_name="baz", price=2.0, owner_ids=[1, 2, 3]),
}
client = TestClient(app)
def test_valid():
response = client.get("/items/valid")
response.raise_for_status()
assert response.json() == {"aliased_name": "valid", "price": 1.0, "owner_ids": None}
def test_coerce():
response = client.get("/items/coerce")
response.raise_for_status()
assert response.json() == {
"aliased_name": "coerce",
"price": 1.0,
"owner_ids": None,
}
def test_validlist():
response = client.get("/items/validlist")
response.raise_for_status()
assert response.json() == [
{"aliased_name": "foo", "price": None, "owner_ids": None},
{"aliased_name": "bar", "price": 1.0, "owner_ids": None},
{"aliased_name": "baz", "price": 2.0, "owner_ids": [1, 2, 3]},
]
def test_validdict():
response = client.get("/items/validdict")
response.raise_for_status()
assert response.json() == {
"k1": {"aliased_name": "foo", "price": None, "owner_ids": None},
"k2": {"aliased_name": "bar", "price": 1.0, "owner_ids": None},
"k3": {"aliased_name": "baz", "price": 2.0, "owner_ids": [1, 2, 3]},
}
def test_valid_exclude_unset():
response = client.get("/items/valid-exclude-unset")
response.raise_for_status()
assert response.json() == {"aliased_name": "valid", "price": 1.0}
def test_coerce_exclude_unset():
response = client.get("/items/coerce-exclude-unset")
response.raise_for_status()
assert response.json() == {"aliased_name": "coerce", "price": 1.0}
def test_validlist_exclude_unset():
response = client.get("/items/validlist-exclude-unset")
response.raise_for_status()
assert response.json() == [
{"aliased_name": "foo"},
{"aliased_name": "bar", "price": 1.0},
{"aliased_name": "baz", "price": 2.0, "owner_ids": [1, 2, 3]},
]
def test_validdict_exclude_unset():
response = client.get("/items/validdict-exclude-unset")
response.raise_for_status()
assert response.json() == {
"k1": {"aliased_name": "foo"},
"k2": {"aliased_name": "bar", "price": 1.0},
"k3": {"aliased_name": "baz", "price": 2.0, "owner_ids": [1, 2, 3]},
} | null |
5,687 | __author__ = "Nitin Kumar, Rick Sherman"
__credits__ = "Jeremy Schulman"
import unittest
from nose.plugins.attrib import attr
from mock import patch, MagicMock
import os
import sys
from jnpr.junos import Device
from jnpr.junos.ofacts.routing_engines import facts_routing_engines as routing_engines
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
@attr("unit")
class TestRoutingEngines(unittest.TestCase):
@patch("ncclient.manager.connect")
@patch("jnpr.junos.device.warnings")
def setUp(self, mock_warnings, mock_connect):
mock_connect.side_effect = self._mock_manager
self.dev = Device(
host="1.1.1.1",
user="rick",
password="password123",
gather_facts=False,
fact_style="old",
)
self.dev.open()
self.facts = {}
self.mode = ""
self.vc = False
self.vct = False
self.vcf = False
@patch("jnpr.junos.Device.execute")
@unittest.skipIf(sys.platform == "win32", "will work for windows in coming days")
def test_multi_re_vc(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.mode = "multi"
self.vc = True
routing_engines(self.dev, self.facts)
self.assertTrue(self.facts["vc_capable"])
self.assertTrue(self.facts["2RE"])
self.assertEqual(self.facts["RE0-RE1"]["mastership_state"], "backup")
# This test is for an issue where a MX may return nothing for the vc rpc
@patch("jnpr.junos.Device.execute")
def METHOD_NAME(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.mode = "multi"
self.vc = True
self.vct = True
routing_engines(self.dev, self.facts)
self.assertFalse(self.facts["vc_capable"])
self.assertTrue(self.facts["2RE"])
self.assertEqual(self.facts["RE1"]["mastership_state"], "backup")
@patch("jnpr.junos.Device.execute")
@unittest.skipIf(sys.platform == "win32", "will work for windows in coming days")
def test_mixed_mode_vcf(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.mode = "multi"
self.vc = True
self.vcf = True
routing_engines(self.dev, self.facts)
self.assertTrue(self.facts["vc_fabric"])
self.assertEqual(self.facts["vc_mode"], "Mixed")
@patch("jnpr.junos.Device.execute")
def test_multi_instance(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.mode = "multi"
routing_engines(self.dev, self.facts)
self.assertTrue(self.facts["2RE"])
@patch("jnpr.junos.Device.execute")
def test_master(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.mode = "master"
routing_engines(self.dev, self.facts)
self.assertEqual(self.facts["RE0"]["mastership_state"], "master")
@patch("jnpr.junos.Device.execute")
def test_routing_engine_exception_ret_none(self, mock_execute):
mock_execute.side_effect = self._mock_manager
self.dev.rpc.get_route_engine_information = MagicMock(side_effect=ValueError)
self.assertEqual(routing_engines(self.dev, self.facts), None)
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__), "rpc-reply", fname)
foo = open(fpath).read()
rpc_reply = NCElement(
foo, self.dev._conn._device_handler.transform_reply()
)._NCElement__doc[0]
return rpc_reply
def _mock_manager(self, *args, **kwargs):
if kwargs:
device_params = kwargs["device_params"]
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
if args:
if self.vc is True and args[0].tag == "get-virtual-chassis-information":
if self.vct is True:
return True
elif self.vcf is True:
return self._read_file("get-virtual-chassis-information_mmvcf.xml")
else:
return self._read_file("get-virtual-chassis-information.xml")
return self._read_file(args[0].tag + "_" + self.mode + ".xml") | null |
5,688 | """
- A linked list is similar to an array, it holds values. However, links in a linked
list do not have indexes.
- This is an example of a double ended, doubly linked list.
- Each link references the next link and the previous one.
- A Doubly Linked List (DLL) contains an extra pointer, typically called previous
pointer, together with next pointer and data which are there in singly linked list.
- Advantages over SLL - It can be traversed in both forward and backward direction.
Delete operation is more efficient
"""
class Node:
def __init__(self, data: int, previous=None, next_node=None):
self.data = data
self.previous = previous
self.next = next_node
def __str__(self) -> str:
return f"{self.data}"
def get_data(self) -> int:
return self.data
def get_next(self):
return self.next
def get_previous(self):
return self.previous
class LinkedListIterator:
def __init__(self, head):
self.current = head
def __iter__(self):
return self
def __next__(self):
if not self.current:
raise StopIteration
else:
value = self.current.get_data()
self.current = self.current.get_next()
return value
class LinkedList:
def __init__(self):
self.head = None # First node in list
self.tail = None # Last node in list
def __str__(self):
current = self.head
nodes = []
while current is not None:
nodes.append(current.get_data())
current = current.get_next()
return " ".join(str(node) for node in nodes)
def __contains__(self, value: int):
current = self.head
while current:
if current.get_data() == value:
return True
current = current.get_next()
return False
def __iter__(self):
return LinkedListIterator(self.head)
def get_head_data(self):
if self.head:
return self.head.get_data()
return None
def METHOD_NAME(self):
if self.tail:
return self.tail.get_data()
return None
def set_head(self, node: Node) -> None:
if self.head is None:
self.head = node
self.tail = node
else:
self.insert_before_node(self.head, node)
def set_tail(self, node: Node) -> None:
if self.head is None:
self.set_head(node)
else:
self.insert_after_node(self.tail, node)
def insert(self, value: int) -> None:
node = Node(value)
if self.head is None:
self.set_head(node)
else:
self.set_tail(node)
def insert_before_node(self, node: Node, node_to_insert: Node) -> None:
node_to_insert.next = node
node_to_insert.previous = node.previous
if node.get_previous() is None:
self.head = node_to_insert
else:
node.previous.next = node_to_insert
node.previous = node_to_insert
def insert_after_node(self, node: Node, node_to_insert: Node) -> None:
node_to_insert.previous = node
node_to_insert.next = node.next
if node.get_next() is None:
self.tail = node_to_insert
else:
node.next.previous = node_to_insert
node.next = node_to_insert
def insert_at_position(self, position: int, value: int) -> None:
current_position = 1
new_node = Node(value)
node = self.head
while node:
if current_position == position:
self.insert_before_node(node, new_node)
return
current_position += 1
node = node.next
self.insert_after_node(self.tail, new_node)
def get_node(self, item: int) -> Node:
node = self.head
while node:
if node.get_data() == item:
return node
node = node.get_next()
raise Exception("Node not found")
def delete_value(self, value):
if (node := self.get_node(value)) is not None:
if node == self.head:
self.head = self.head.get_next()
if node == self.tail:
self.tail = self.tail.get_previous()
self.remove_node_pointers(node)
@staticmethod
def remove_node_pointers(node: Node) -> None:
if node.get_next():
node.next.previous = node.previous
if node.get_previous():
node.previous.next = node.next
node.next = None
node.previous = None
def is_empty(self):
return self.head is None
def create_linked_list() -> None:
"""
>>> new_linked_list = LinkedList()
>>> new_linked_list.get_head_data() is None
True
>>> new_linked_list.get_tail_data() is None
True
>>> new_linked_list.is_empty()
True
>>> new_linked_list.insert(10)
>>> new_linked_list.get_head_data()
10
>>> new_linked_list.get_tail_data()
10
>>> new_linked_list.insert_at_position(position=3, value=20)
>>> new_linked_list.get_head_data()
10
>>> new_linked_list.get_tail_data()
20
>>> new_linked_list.set_head(Node(1000))
>>> new_linked_list.get_head_data()
1000
>>> new_linked_list.get_tail_data()
20
>>> new_linked_list.set_tail(Node(2000))
>>> new_linked_list.get_head_data()
1000
>>> new_linked_list.get_tail_data()
2000
>>> for value in new_linked_list:
... print(value)
1000
10
20
2000
>>> new_linked_list.is_empty()
False
>>> for value in new_linked_list:
... print(value)
1000
10
20
2000
>>> 10 in new_linked_list
True
>>> new_linked_list.delete_value(value=10)
>>> 10 in new_linked_list
False
>>> new_linked_list.delete_value(value=2000)
>>> new_linked_list.get_tail_data()
20
>>> new_linked_list.delete_value(value=1000)
>>> new_linked_list.get_tail_data()
20
>>> new_linked_list.get_head_data()
20
>>> for value in new_linked_list:
... print(value)
20
>>> new_linked_list.delete_value(value=20)
>>> for value in new_linked_list:
... print(value)
>>> for value in range(1,10):
... new_linked_list.insert(value=value)
>>> for value in new_linked_list:
... print(value)
1
2
3
4
5
6
7
8
9
"""
if __name__ == "__main__":
import doctest
doctest.testmod() | null |
5,689 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config management."""
import os
import posixpath
from common import benchmark_utils
from common import environment
from common import experiment_path as exp_path
DEFAULT_SNAPSHOT_SECONDS = 15 * 60 # Seconds.
CONFIG_DIR = 'config'
def get_internal_experiment_config_relative_path():
"""Returns the path of the internal config file relative to the data
directory of an experiment."""
return os.path.join(CONFIG_DIR, 'experiment.yaml')
def get_snapshot_seconds():
"""Returns the amount of time in seconds between snapshots of a
fuzzer's corpus during an experiment."""
return environment.get('SNAPSHOT_PERIOD', DEFAULT_SNAPSHOT_SECONDS)
def get_cycle_time(cycle):
"""Return time elapsed for a cycle."""
return cycle * get_snapshot_seconds()
def get_work_dir():
"""Returns work directory."""
return os.environ['WORK']
def get_experiment_name():
"""Returns experiment name."""
return os.environ['EXPERIMENT']
def get_experiment_folders_dir():
"""Returns experiment folders directory."""
return exp_path.path('experiment-folders')
def get_experiment_type(benchmarks):
"""Returns the experiment type based on the type of |benchmarks|, i.e.,
'code' or 'bug'.
Raises ValueError if the benchmark types are mixed.
"""
for benchmark_type in benchmark_utils.BenchmarkType:
type_value = benchmark_type.value
if all(
benchmark_utils.get_type(benchmark) == type_value
for benchmark in benchmarks):
return type_value
benchmark_types = ';'.join(
[f'{b}: {benchmark_utils.get_type(b)}' for b in benchmarks])
raise ValueError('Cannot mix bug benchmarks with code coverage benchmarks: '
f'{benchmark_types}.')
def get_cloud_project():
"""Returns the cloud project."""
return os.environ['CLOUD_PROJECT']
def get_experiment_filestore_path():
"""Returns experiment filestore path."""
experiment_filestore = os.environ['EXPERIMENT_FILESTORE']
experiment_name = get_experiment_name()
return posixpath.join(experiment_filestore, experiment_name)
def get_oss_fuzz_corpora_filestore_path():
"""Returns path containing OSS-Fuzz corpora for various fuzz targets."""
return posixpath.join(get_experiment_filestore_path(), 'oss_fuzz_corpora')
def get_custom_seed_corpora_filestore_path():
"""Returns path containing the user-provided seed corpora."""
return posixpath.join(get_experiment_filestore_path(),
'custom_seed_corpora')
def get_dispatcher_instance_name(experiment: str) -> str:
"""Returns a dispatcher instance name for an experiment."""
return f'd-{experiment}'
def get_trial_instance_name(experiment: str, trial_id: int) -> str:
"""Returns a unique instance name for each trial of an experiment."""
return f'r-{experiment}-{trial_id}'
def get_cycle_filename(basename: str, cycle: int) -> str:
"""Returns a filename for a file that is relevant to a particular snapshot
cycle."""
return f'{basename}-{cycle:04d}'
def get_corpus_archive_name(cycle: int) -> str:
"""Returns a corpus archive name given a cycle."""
return get_cycle_filename('corpus-archive', cycle) + '.tar.gz'
def get_stats_filename(cycle: int) -> str:
"""Returns a corpus archive name given a cycle."""
return get_cycle_filename('stats', cycle) + '.json'
def get_crash_metadata_filename(cycle: int) -> str:
"""Returns a crash metadata name given a cycle."""
return get_cycle_filename('crashes', cycle) + '.json'
def get_crashes_archive_name(cycle: int) -> str:
"""Returns a crashes archive name given a cycle."""
return get_cycle_filename('crashes', cycle) + '.tar.gz'
def METHOD_NAME():
"""Returns True if running a local experiment."""
return bool(environment.get('LOCAL_EXPERIMENT'))
def get_trial_dir(fuzzer, benchmark, trial_id):
"""Returns the unique directory for |fuzzer|, |benchmark|, and
|trial_id|."""
benchmark_fuzzer_directory = get_benchmark_fuzzer_dir(benchmark, fuzzer)
trial_subdir = f'trial-{trial_id}'
return posixpath.join(benchmark_fuzzer_directory, trial_subdir)
def get_benchmark_fuzzer_dir(benchmark, fuzzer):
"""Returns the directory for |benchmark| and |fuzzer|."""
return f'{benchmark}-{fuzzer}'
def get_trial_bucket_dir(fuzzer, benchmark, trial_id):
"""Returns the unique directory in experiment-folders int the bucket for
|fuzzer|, |benchmark|, and |trial_id|."""
bucket = os.environ['EXPERIMENT_FILESTORE']
return posixpath.join(bucket, get_experiment_name(), 'experiment-folders',
get_trial_dir(fuzzer, benchmark, trial_id)) | null |
5,690 | # pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103
import pytest
from unittest.mock import MagicMock
from ..data import Data
from ..data_collection import DataCollection
from ..exceptions import InvalidSubscriber, InvalidMessage
from ..hub import Hub, HubListener
from ..message import SubsetMessage, Message
from ..subset import Subset
class TestHub(object):
def setup_method(self, method):
self.hub = Hub()
def get_subscription(self):
msg = Message
handler = MagicMock()
subscriber = MagicMock(spec_set=HubListener)
return msg, handler, subscriber
def test_subscribe(self):
msg, handler, subscriber = self.get_subscription()
self.hub.subscribe(subscriber, msg, handler)
assert self.hub.is_subscribed(subscriber, msg)
assert self.hub.get_handler(subscriber, msg) == handler
def test_get_handler(self):
msg, handler, subscriber = self.get_subscription()
self.hub.subscribe(subscriber, msg, handler)
assert self.hub.get_handler(subscriber, msg) == handler
assert self.hub.get_handler(subscriber, None) is None
assert self.hub.get_handler(None, msg) is None
def test_unsubscribe(self):
msg, handler, subscriber = self.get_subscription()
self.hub.subscribe(subscriber, msg, handler)
self.hub.unsubscribe(subscriber, msg)
assert not self.hub.is_subscribed(subscriber, msg)
assert self.hub.get_handler(subscriber, msg) is None
def test_unsubscribe_all(self):
msg, handler, subscriber = self.get_subscription()
msg2 = SubsetMessage
self.hub.subscribe(subscriber, msg, handler)
self.hub.subscribe(subscriber, msg2, handler)
self.hub.unsubscribe_all(subscriber)
assert not self.hub.is_subscribed(subscriber, msg)
assert not self.hub.is_subscribed(subscriber, msg2)
def test_unsubscribe_specific_to_message(self):
msg, handler, subscriber = self.get_subscription()
msg2 = SubsetMessage
self.hub.subscribe(subscriber, msg, handler)
self.hub.subscribe(subscriber, msg2, handler)
self.hub.unsubscribe(subscriber, msg)
assert not self.hub.is_subscribed(subscriber, msg)
assert self.hub.is_subscribed(subscriber, msg2)
def test_broadcast(self):
msg, handler, subscriber = self.get_subscription()
self.hub.subscribe(subscriber, msg, handler)
msg_instance = msg("Test")
self.hub.broadcast(msg_instance)
handler.assert_called_once_with(msg_instance)
def test_unsubscribe_halts_broadcast(self):
msg, handler, subscriber = self.get_subscription()
self.hub.subscribe(subscriber, msg, handler)
self.hub.unsubscribe(subscriber, msg)
msg_instance = msg("Test")
self.hub.broadcast(msg_instance)
assert handler.call_count == 0
def test_unsubscribe_spec_setific_to_message(self):
msg, handler, subscriber = self.get_subscription()
msg2 = SubsetMessage
self.hub.subscribe(subscriber, msg2, handler)
msg_instance = msg("Test")
self.hub.broadcast(msg_instance)
assert handler.call_count == 0
def test_subscription_catches_message_subclasses(self):
msg, handler, subscriber = self.get_subscription()
msg2 = SubsetMessage
self.hub.subscribe(subscriber, msg, handler)
msg_instance = msg2(MagicMock(spec_set=Subset))
self.hub.broadcast(msg_instance)
handler.assert_called_once_with(msg_instance)
def test_handler_ignored_if_subset_handler_present(self):
msg, handler, subscriber = self.get_subscription()
handler2 = MagicMock()
msg2 = SubsetMessage
self.hub.subscribe(subscriber, msg, handler)
self.hub.subscribe(subscriber, msg2, handler2)
msg_instance = SubsetMessage(Subset(None))
self.hub.broadcast(msg_instance)
handler2.assert_called_once_with(msg_instance)
assert handler.call_count == 0
def test_filter(self):
msg, handler, subscriber = self.get_subscription()
filter = lambda x: False
self.hub.subscribe(subscriber, msg, handler)
msg_instance = msg("Test")
self.hub.broadcast(msg)
assert handler.call_count == 0
def test_broadcast_sends_to_all_subsribers(self):
msg, handler, subscriber = self.get_subscription()
msg, handler2, subscriber2 = self.get_subscription()
self.hub.subscribe(subscriber, msg, handler)
self.hub.subscribe(subscriber2, msg, handler2)
msg_instance = msg("Test")
self.hub.broadcast(msg_instance)
handler.assert_called_once_with(msg_instance)
handler2.assert_called_once_with(msg_instance)
def test_invalid_unsubscribe_ignored(self):
msg, handler, subscriber = self.get_subscription()
self.hub.unsubscribe(handler, subscriber)
def test_invalid_subscribe(self):
msg, handler, subscriber = self.get_subscription()
with pytest.raises(InvalidSubscriber) as exc:
self.hub.subscribe(None, msg, handler)
assert exc.value.args[0].startswith("Subscriber must be a HubListener")
with pytest.raises(InvalidMessage) as exc:
self.hub.subscribe(subscriber, None, handler)
assert exc.value.args[0].startswith("message class must be "
"a subclass of glue.Message")
def test_default_handler(self):
msg, handler, subscriber = self.get_subscription()
self.hub.subscribe(subscriber, msg)
msg_instance = msg("Test")
self.hub.broadcast(msg_instance)
subscriber.notify.assert_called_once_with(msg_instance)
def test_autosubscribe(self):
l = MagicMock(spec_set=HubListener)
d = MagicMock(spec_set=Data)
s = MagicMock(spec_set=Subset)
dc = MagicMock(spec_set=DataCollection)
hub = Hub(l, d, s, dc)
l.register_to_hub.assert_called_once_with(hub)
d.register_to_hub.assert_called_once_with(hub)
dc.register_to_hub.assert_called_once_with(hub)
s.register.assert_called_once_with()
def METHOD_NAME(self):
with pytest.raises(TypeError) as exc:
Hub(None)
assert exc.value.args[0] == ("Inputs must be HubListener, data, "
"subset, or data collection objects")
class TestHubListener(object):
"""This is a dumb test, I know. Fixated on code coverage"""
def test_unimplemented(self):
hl = HubListener()
with pytest.raises(NotImplementedError):
hl.register_to_hub(None)
with pytest.raises(NotImplementedError):
hl.notify(None) | null |
5,691 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.orchestration.experimental.core.task_schedulers.importer_task_scheduler."""
import os
from unittest import mock
import uuid
import tensorflow as tf
from tfx.dsl.compiler import constants
from tfx.orchestration.experimental.core import post_execution_utils
from tfx.orchestration.experimental.core import sync_pipeline_task_gen as sptg
from tfx.orchestration.experimental.core import task_queue as tq
from tfx.orchestration.experimental.core import task_scheduler
from tfx.orchestration.experimental.core import test_utils
from tfx.orchestration.experimental.core.task_schedulers import importer_task_scheduler
from tfx.orchestration.experimental.core.testing import test_pipeline_with_importer
from tfx.orchestration import mlmd_connection_manager as mlmd_cm
from tfx.orchestration.portable import runtime_parameter_utils
from tfx.utils import status as status_lib
class ImporterTaskSchedulerTest(test_utils.TfxTest):
def setUp(self):
super().setUp()
self.addCleanup(mock.patch.stopall)
# Set a constant version for artifact version tag.
mock.patch('tfx.version.__version__', '0.123.4.dev').start()
pipeline_root = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self.id())
metadata_path = os.path.join(pipeline_root, 'metadata', 'metadata.db')
self._mlmd_cm = mlmd_cm.MLMDConnectionManager.sqlite(metadata_path)
self.enter_context(self._mlmd_cm)
self._mlmd_connection = self._mlmd_cm.primary_mlmd_handle
pipeline = self._make_pipeline(pipeline_root, str(uuid.uuid4()))
self._pipeline = pipeline
self._importer_node = self._pipeline.nodes[0].pipeline_node
self._task_queue = tq.TaskQueue()
[importer_task] = test_utils.run_generator_and_test(
test_case=self,
mlmd_connection_manager=self._mlmd_cm,
generator_class=sptg.SyncPipelineTaskGenerator,
pipeline=self._pipeline,
task_queue=self._task_queue,
use_task_queue=True,
service_job_manager=None,
num_initial_executions=0,
num_tasks_generated=1,
num_new_executions=1,
num_active_executions=1,
expected_exec_nodes=[self._importer_node],
ignore_update_node_state_tasks=True)
self._importer_task = importer_task
def _make_pipeline(self, pipeline_root, pipeline_run_id):
pipeline = test_pipeline_with_importer.create_pipeline()
runtime_parameter_utils.substitute_runtime_parameter(
pipeline, {
constants.PIPELINE_ROOT_PARAMETER_NAME: pipeline_root,
constants.PIPELINE_RUN_ID_PARAMETER_NAME: pipeline_run_id,
})
return pipeline
def METHOD_NAME(self):
with self._mlmd_connection as m:
ts_result = importer_task_scheduler.ImporterTaskScheduler(
mlmd_handle=m, pipeline=self._pipeline,
task=self._importer_task).schedule()
self.assertEqual(status_lib.Code.OK, ts_result.status.code)
self.assertIsInstance(ts_result.output, task_scheduler.ImporterNodeOutput)
post_execution_utils.publish_execution_results_for_task(
m, self._importer_task, ts_result)
[artifact] = m.store.get_artifacts_by_type('Schema')
self.assertProtoPartiallyEquals(
"""
uri: "my_url"
custom_properties {
key: "int_custom_property"
value {
int_value: 123
}
}
custom_properties {
key: "is_external"
value {
int_value: 1
}
}
custom_properties {
key: "str_custom_property"
value {
string_value: "abc"
}
}
custom_properties {
key: "tfx_version"
value {
string_value: "0.123.4.dev"
}
}
state: LIVE""",
artifact,
ignored_fields=[
'id',
'type_id',
'type',
'create_time_since_epoch',
'last_update_time_since_epoch',
],
)
[execution
] = m.store.get_executions_by_id([self._importer_task.execution_id])
self.assertProtoPartiallyEquals(
"""
last_known_state: COMPLETE
custom_properties {
key: "__external_execution_index__"
value {
int_value: 0
}
}
custom_properties {
key: "artifact_uri"
value {
string_value: "my_url"
}
}
custom_properties {
key: "output_key"
value {
string_value: "result"
}
}
custom_properties {
key: "reimport"
value {
int_value: 1
}
}
""",
execution,
ignored_fields=[
'id',
'type_id',
'type',
'create_time_since_epoch',
'last_update_time_since_epoch',
'name',
],
)
if __name__ == '__main__':
tf.test.main() | null |
5,692 | from django.test import TransactionTestCase
from aiarena.core.tests.test_mixins import FullDataSetMixin
class SessionBasedAuthTestCase(FullDataSetMixin, TransactionTestCase):
def METHOD_NAME(self):
self.client.logout() # ensure we're not already logged in
# Should be no current user
response = self.client.get('/api/auth/')
self.assertEqual(response.status_code, 200)
self.assertIsNone(response.data['current_user'])
# Login
response = self.client.post('/api/auth/login/', {'username': 'staff_user', 'password': 'x'})
self.assertEqual(response.status_code, 202)
# Current user should be staff_user
response = self.client.get('/api/auth/')
self.assertEqual(response.data['current_user']['username'], 'staff_user')
# Logout
response = self.client.post('/api/auth/logout/')
self.assertEqual(response.status_code, 204)
# Should be no current user
response = self.client.get('/api/auth/')
self.assertEqual(response.status_code, 200)
self.assertIsNone(response.data['current_user'])
class ApiReadPrivatePagesTestCase(FullDataSetMixin, TransactionTestCase):
"""
Tests to ensure private API endpoint pages don't break.
"""
def test_get_api_discord_users_page(self):
self.client.login(username='regular_user1', password='x')
response = self.client.get('/api/discord-users/')
self.assertEqual(response.status_code, 403)
response = self.client.get('/api/stream/next-replay/')
self.assertEqual(response.status_code, 403)
self.client.login(username='staff_user', password='x')
response = self.client.get('/api/discord-users/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/api/stream/next-replay/')
self.assertEqual(response.status_code, 200)
class ApiReadPublicPagesTestCase(FullDataSetMixin, TransactionTestCase):
"""
Tests to ensure public API endpoint pages don't break.
"""
def setUp(self):
super().setUp()
self.client.login(username='regular_user1', password='x')
def test_get_api_index_page(self):
response = self.client.get('/api/')
self.assertEqual(response.status_code, 200)
def test_get_api_bots_page(self):
response = self.client.get('/api/bots/')
self.assertEqual(response.status_code, 200)
def test_get_api_bot_races_page(self):
response = self.client.get('/api/bot-races/')
self.assertEqual(response.status_code, 200)
def test_get_api_competitions_page(self):
response = self.client.get('/api/competitions/')
self.assertEqual(response.status_code, 200)
def test_get_api_competitionmatchupstat_page(self):
response = self.client.get('/api/competition-bot-matchup-stats/')
self.assertEqual(response.status_code, 200)
def test_get_api_competitionmapstats_page(self):
response = self.client.get('/api/competition-bot-map-stats/')
self.assertEqual(response.status_code, 200)
def test_get_api_competitionparticipations_page(self):
response = self.client.get('/api/competition-participations/')
self.assertEqual(response.status_code, 200)
def test_get_api_games_page(self):
response = self.client.get('/api/games/')
self.assertEqual(response.status_code, 200)
def test_get_api_gamemodes_page(self):
response = self.client.get('/api/game-modes/')
self.assertEqual(response.status_code, 200)
def test_get_api_maps_page(self):
response = self.client.get('/api/maps/')
self.assertEqual(response.status_code, 200)
def test_get_api_mappools_page(self):
response = self.client.get('/api/map-pools/')
self.assertEqual(response.status_code, 200)
def test_get_api_matches_page(self):
response = self.client.get('/api/matches/')
self.assertEqual(response.status_code, 200)
def test_get_api_matchparticipations_page(self):
response = self.client.get('/api/match-participations/')
self.assertEqual(response.status_code, 200)
def test_get_api_news_page(self):
response = self.client.get('/api/news/')
self.assertEqual(response.status_code, 200)
def test_get_api_results_page(self):
response = self.client.get('/api/results/')
self.assertEqual(response.status_code, 200)
def test_get_api_rounds_page(self):
response = self.client.get('/api/rounds/')
self.assertEqual(response.status_code, 200)
def test_get_api_users_page(self):
response = self.client.get('/api/users/')
self.assertEqual(response.status_code, 200) | null |
5,693 | # -*- coding: utf-8 -*-
###
# (C) Copyright [2022] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from unittest import TestCase
import mock
from hpeOneView.connection import connection
from hpeOneView.resources.servers.rack_manager import RackManager
from hpeOneView.resources.resource import (Resource, ResourceHelper,
ResourcePatchMixin)
class RackManagerTest(TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host, 800)
self._rack_manager = RackManager(self.connection)
self.uri = '/rest/rack-manager/1224242424'
self._rack_manager.data = {"uri": self.uri}
@mock.patch.object(ResourceHelper, 'get_all')
def test_get_all_called_once_with_default_values(self, mock_get_all):
self._rack_manager.get_all()
mock_get_all.assert_called_once_with(start=0, count=-1, filter='', sort='')
@mock.patch.object(ResourceHelper, 'get_all')
def test_get_all_called_once(self, mock_get_all):
filter = 'name=TestName'
sort = 'name:ascending'
self._rack_manager.get_all(2, 500, filter, sort)
mock_get_all.assert_called_once_with(start=2, count=500, filter=filter, sort=sort)
@mock.patch.object(Resource, 'get_by')
def test_get_by_called_once(self, mock_get_by):
self._rack_manager.get_by('name', 'OneViewSDK-Test-RackManager')
mock_get_by.assert_called_once_with('name', 'OneViewSDK-Test-RackManager')
@mock.patch.object(ResourceHelper, 'create')
def test_add_called_once(self, mock_create):
options = {
"hostname": "testhost.com",
"username": "test_user",
"password": "test_pass",
"force": False
}
mock_create.return_value = {}
self._rack_manager.add(options)
mock_create.assert_called_once_with(options.copy(), None, -1, None, False)
@mock.patch.object(ResourceHelper, 'do_get')
def test_get_environmental_configuration(self, mock_get):
self._rack_manager.get_environmental_configuration()
mock_get.assert_called_once_with("{}/environmentalConfiguration".format(self.uri))
@mock.patch.object(ResourceHelper, 'do_get')
def test_get_remote_support_settings(self, mock_get):
self._rack_manager.get_remote_support_settings()
mock_get.assert_called_once_with("{}/remoteSupportSettings".format(self.uri))
@mock.patch.object(ResourceHelper, 'delete')
def test_remove_called_once(self, mock_delete):
self._rack_manager.remove(force=False)
mock_delete.assert_called_once_with(self.uri, force=False,
custom_headers=None, timeout=-1)
@mock.patch.object(ResourceHelper, 'delete')
def test_remove_called_once_with_force(self, mock_delete):
self._rack_manager.remove(force=True)
mock_delete.assert_called_once_with(self.uri, force=True,
custom_headers=None,
timeout=-1)
@mock.patch.object(ResourceHelper, 'do_get')
def test_get_all_chassis(self, mock_get):
self._rack_manager.get_all_chassis()
mock_get.assert_called_once_with("/rest/rack-managers/chassis")
@mock.patch.object(ResourceHelper, 'do_get')
def test_get_all_managers(self, mock_get):
self._rack_manager.get_all_managers()
mock_get.assert_called_once_with("/rest/rack-managers/managers")
@mock.patch.object(ResourceHelper, 'do_get')
def test_get_all_partitions(self, mock_get):
self._rack_manager.get_all_partitions()
mock_get.assert_called_once_with("/rest/rack-managers/partitions")
@mock.patch.object(ResourceHelper, 'do_get')
def METHOD_NAME(self, mock_get):
self._rack_manager.get_associated_chassis()
mock_get.assert_called_once_with("{}/chassis".format(self.uri))
@mock.patch.object(ResourceHelper, 'do_get')
def test_get_associated_managers(self, mock_get):
self._rack_manager.get_associated_managers()
mock_get.assert_called_once_with("{}/managers".format(self.uri))
@mock.patch.object(ResourceHelper, 'do_get')
def test_get_associated_partitions(self, mock_get):
self._rack_manager.get_associated_partitions()
mock_get.assert_called_once_with("{}/partitions".format(self.uri))
@mock.patch.object(ResourceHelper, 'do_get')
def test_get_a_specific_resource(self, mock_get):
uri = '/rest/rack-managers/12345678/managers/abcdefgh/'
self._rack_manager.get_a_specific_resource(uri)
mock_get.assert_called_once_with(uri)
@mock.patch.object(ResourcePatchMixin, 'patch_request')
def test_patch_called_once(self, mock_patch):
self._rack_manager.patch('RefreshRackManagerOp', '', '')
mock_patch.assert_called_once_with(self.uri,
body=[{'op': 'RefreshRackManagerOp',
'path': '',
'value': ''}],
custom_headers=None,
timeout=-1) | null |
5,694 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetInstanceResult',
'AwaitableGetInstanceResult',
'get_instance',
'get_instance_output',
]
@pulumi.output_type
class GetInstanceResult:
"""
A collection of values returned by getInstance.
"""
def __init__(__self__, host_name=None, METHOD_NAME=None, location=None, name=None, resource_group_name=None, tags=None):
if host_name and not isinstance(host_name, str):
raise TypeError("Expected argument 'host_name' to be a str")
pulumi.set(__self__, "host_name", host_name)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="hostName")
def host_name(self) -> str:
"""
The API endpoint to work with this Digital Twins instance.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The Azure Region where the Digital Twins instance exists.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags assigned to the Digital Twins instance.
"""
return pulumi.get(self, "tags")
class AwaitableGetInstanceResult(GetInstanceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetInstanceResult(
host_name=self.host_name,
METHOD_NAME=self.METHOD_NAME,
location=self.location,
name=self.name,
resource_group_name=self.resource_group_name,
tags=self.tags)
def get_instance(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetInstanceResult:
"""
Use this data source to access information about an existing Digital Twins instance.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.digitaltwins.get_instance(name="existing-digital-twins",
resource_group_name="existing-resgroup")
pulumi.export("id", example.id)
```
:param str name: The name of this Digital Twins instance.
:param str resource_group_name: The name of the Resource Group where the Digital Twins instance exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:digitaltwins/getInstance:getInstance', __args__, opts=opts, typ=GetInstanceResult).value
return AwaitableGetInstanceResult(
host_name=pulumi.get(__ret__, 'host_name'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
resource_group_name=pulumi.get(__ret__, 'resource_group_name'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_instance)
def get_instance_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetInstanceResult]:
"""
Use this data source to access information about an existing Digital Twins instance.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.digitaltwins.get_instance(name="existing-digital-twins",
resource_group_name="existing-resgroup")
pulumi.export("id", example.id)
```
:param str name: The name of this Digital Twins instance.
:param str resource_group_name: The name of the Resource Group where the Digital Twins instance exists.
"""
... | null |
5,695 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing AdjustHue op in DE
"""
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.transforms
import mindspore.dataset.vision as vision
from mindspore import log as logger
from util import diff_mse
DATA_DIR = "../data/dataset/testImageNetData/train/"
MNIST_DATA_DIR = "../data/dataset/testMnistData"
DATA_DIR_2 = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def METHOD_NAME(shape):
"""
Only generate floating points that are fractions like n / 256, since they
are RGB pixels. Some low-precision floating point types in this test can't
handle arbitrary precision floating points well.
"""
return np.random.randint(0, 256, shape) / 255.
def test_adjust_hue_eager(plot=False):
"""
Feature: AdjustHue op
Description: Test eager support for AdjustHue implementation
Expectation: Output is the same as expected output
"""
# Eager 3-channel
image_file = "../data/dataset/testImageNetData/train/class1/1_1.jpg"
img = np.fromfile(image_file, dtype=np.uint8)
logger.info("Image.type: {}, Image.shape: {}".format(type(img), img.shape))
img = vision.Decode()(img)
img_adjusthue = vision.AdjustHue(0)(img)
if plot:
visualize_image(img, img_adjusthue)
logger.info("Image.type: {}, Image.shape: {}".format(type(img_adjusthue),
img_adjusthue.shape))
mse = diff_mse(img_adjusthue, img)
logger.info("MSE= {}".format(str(mse)))
assert mse < 0.001
def test_adjust_hue_invalid_hue_factor_param():
"""
Feature: AdjustHue op
Description: Test improper parameters for AdjustHue implementation
Expectation: Throw ValueError exception and TypeError exception
"""
logger.info("Test AdjustHue implementation with invalid ignore parameter")
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
trans = mindspore.dataset.transforms.transforms.Compose([
vision.Decode(True),
vision.Resize((224, 224)),
vision.AdjustHue(hue_factor=-1.0),
vision.ToTensor()
])
data_set = data_set.map(operations=[trans], input_columns=["image"])
except ValueError as error:
logger.info("Got an exception in AdjustHue: {}".format(str(error)))
assert "Input hue_factor is not within the required interval of " in str(error)
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
trans = mindspore.dataset.transforms.transforms.Compose([
vision.Decode(True),
vision.Resize((224, 224)),
vision.AdjustHue(hue_factor=[1, 2]),
vision.ToTensor()
])
data_set = data_set.map(operations=[trans], input_columns=["image"])
except TypeError as error:
logger.info("Got an exception in AdjustHue: {}".format(str(error)))
assert "is not of type [<class 'float'>, <class 'int'>], but got" in str(error)
def test_adjust_hue_pipeline():
"""
Feature: AdjustHue op
Description: Test AdjustHue implementation Pipeline
Expectation: Output is equal to the expected output
"""
# First dataset
transforms1 = [vision.Decode(), vision.Resize([64, 64]), vision.ToTensor()]
transforms1 = mindspore.dataset.transforms.transforms.Compose(
transforms1)
ds1 = ds.TFRecordDataset(DATA_DIR_2,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds1 = ds1.map(operations=transforms1, input_columns=["image"])
# Second dataset
transforms2 = [
vision.Decode(),
vision.Resize([64, 64]),
vision.AdjustHue(0),
vision.ToTensor()
]
transform2 = mindspore.dataset.transforms.transforms.Compose(
transforms2)
ds2 = ds.TFRecordDataset(DATA_DIR_2,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds2 = ds2.map(operations=transform2, input_columns=["image"])
num_iter = 0
for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1),
ds2.create_dict_iterator(num_epochs=1)):
num_iter += 1
ori_img = data1["image"].asnumpy()
cvt_img = data2["image"].asnumpy()
mse = diff_mse(ori_img, cvt_img)
logger.info("MSE= {}".format(str(mse)))
assert mse < 0.001
if __name__ == "__main__":
test_adjust_hue_eager()
test_adjust_hue_invalid_hue_factor_param()
test_adjust_hue_pipeline() | null |
5,696 | import uuid
from multiprocessing import Queue
from threading import Thread, currentThread
from azurelinuxagent.common.singletonperthread import SingletonPerThread
from tests.tools import AgentTestCase, clear_singleton_instances
class TestClassToTestSingletonPerThread(SingletonPerThread):
"""
Since these tests deal with testing in a multithreaded environment,
we employ the use of multiprocessing.Queue() to ensure that the data is consistent.
This test class uses a uuid to identify an object instead of directly using object reference because
Queue.get() returns a different object reference than what is put in it even though the object is same
(which is verified using uuid in this test class)
Eg:
obj1 = WireClient("obj1")
obj1
<__main__.WireClient object at 0x7f5e78476198>
q = Queue()
q.put(obj1)
test1 = q.get()
test1
<__main__.WireClient object at 0x7f5e78430630>
test1.endpoint == obj1.endpoint
True
"""
def __init__(self):
# Set the name of the object to the current thread name
self.name = currentThread().getName()
# Unique identifier for a class object
self.uuid = str(uuid.uuid4())
class TestSingletonPerThread(AgentTestCase):
THREAD_NAME_1 = 'thread-1'
THREAD_NAME_2 = 'thread-2'
def METHOD_NAME(self):
super(TestSingletonPerThread, self).METHOD_NAME()
# In a multi-threaded environment, exceptions thrown in the child thread will not be propagated to the parent
# thread. In order to achieve that, adding all exceptions to a Queue and then checking that in parent thread.
self.errors = Queue()
clear_singleton_instances(TestClassToTestSingletonPerThread)
def _setup_multithread_and_execute(self, func1, args1, func2, args2, t1_name=None, t2_name=None):
t1 = Thread(target=func1, args=args1)
t2 = Thread(target=func2, args=args2)
t1.setName(t1_name if t1_name else self.THREAD_NAME_1)
t2.setName(t2_name if t2_name else self.THREAD_NAME_2)
t1.start()
t2.start()
t1.join()
t2.join()
errs = []
while not self.errors.empty():
errs.append(self.errors.get())
if len(errs) > 0:
raise Exception("Errors: %s" % ' , '.join(errs))
@staticmethod
def _get_test_class_instance(q, err):
try:
obj = TestClassToTestSingletonPerThread()
q.put(obj)
except Exception as e:
err.put(str(e))
def _parse_instances_and_return_thread_objects(self, instances, t1_name=None, t2_name=None):
obj1, obj2 = instances.get(), instances.get()
def check_obj(name):
if obj1.name == name:
return obj1
elif obj2.name == name:
return obj2
else:
return None
t1_object = check_obj(t1_name if t1_name else self.THREAD_NAME_1)
t2_object = check_obj(t2_name if t2_name else self.THREAD_NAME_2)
return t1_object, t2_object
def test_it_should_have_only_one_instance_for_same_thread(self):
obj1 = TestClassToTestSingletonPerThread()
obj2 = TestClassToTestSingletonPerThread()
self.assertEqual(obj1.uuid, obj2.uuid)
def test_it_should_have_multiple_instances_for_multiple_threads(self):
instances = Queue()
self._setup_multithread_and_execute(func1=self._get_test_class_instance,
args1=(instances, self.errors),
func2=self._get_test_class_instance,
args2=(instances, self.errors))
self.assertEqual(2, instances.qsize()) # Assert that there are 2 objects in the queue
obj1, obj2 = instances.get(), instances.get()
self.assertNotEqual(obj1.uuid, obj2.uuid)
def test_it_should_return_existing_instance_for_new_thread_with_same_name(self):
instances = Queue()
self._setup_multithread_and_execute(func1=self._get_test_class_instance,
args1=(instances, self.errors),
func2=self._get_test_class_instance,
args2=(instances, self.errors))
t1_obj, t2_obj = self._parse_instances_and_return_thread_objects(instances)
new_instances = Queue()
# The 2nd call is to get new objects with the same thread name to verify if the objects are same
self._setup_multithread_and_execute(func1=self._get_test_class_instance,
args1=(new_instances, self.errors),
func2=self._get_test_class_instance,
args2=(new_instances, self.errors))
new_t1_obj, new_t2_obj = self._parse_instances_and_return_thread_objects(new_instances)
self.assertEqual(t1_obj.name, new_t1_obj.name)
self.assertEqual(t1_obj.uuid, new_t1_obj.uuid)
self.assertEqual(t2_obj.name, new_t2_obj.name)
self.assertEqual(t2_obj.uuid, new_t2_obj.uuid)
def test_singleton_object_should_match_thread_name(self):
instances = Queue()
t1_name = str(uuid.uuid4())
t2_name = str(uuid.uuid4())
test_class_obj_name = lambda t_name: "%s__%s" % (TestClassToTestSingletonPerThread.__name__, t_name)
self._setup_multithread_and_execute(func1=self._get_test_class_instance,
args1=(instances, self.errors),
func2=self._get_test_class_instance,
args2=(instances, self.errors),
t1_name=t1_name,
t2_name=t2_name)
singleton_instances = TestClassToTestSingletonPerThread._instances # pylint: disable=no-member
# Assert instance names are consistent with the thread names
self.assertIn(test_class_obj_name(t1_name), singleton_instances)
self.assertIn(test_class_obj_name(t2_name), singleton_instances)
# Assert that the objects match their respective threads
# This function matches objects with their thread names and returns the respective object or None if not found
t1_obj, t2_obj = self._parse_instances_and_return_thread_objects(instances, t1_name, t2_name)
# Ensure that objects for both the threads were found
self.assertIsNotNone(t1_obj)
self.assertIsNotNone(t2_obj)
# Ensure that the objects match with their respective thread objects
self.assertEqual(singleton_instances[test_class_obj_name(t1_name)].uuid, t1_obj.uuid)
self.assertEqual(singleton_instances[test_class_obj_name(t2_name)].uuid, t2_obj.uuid)
| null |
5,697 | import inspect
import os
import sys
import unittest
# TODO: Get rid of this and use automatic discovery when Trick requires Python 2.7
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(inspect.getsourcefile(lambda:0))), '..')))
from variable_server import *
class TestVariableServer(unittest.TestCase):
def setUp(self):
self.variable_server = VariableServer('localhost', 7000)
self.variables = [
Variable('ball.obj.state.input.position[0]', type_=int),
Variable('ball.obj.state.input.mass', units='g', type_=float)
]
def tearDown(self):
self.variable_server.close()
def test_get_value(self):
variable = 'ball.obj.state.input.mass'
self.assertEqual('10',
self.variable_server.get_value(variable))
self.assertEqual(10,
self.variable_server.get_value(variable, type_=int))
self.assertEqual(10000.0,
self.variable_server.get_value(variable, units='g', type_=float))
self.assertEqual(20,
self.variable_server.get_value(variable, type_=lambda x: int(x) * 2))
# bad units
self.assertRaises(
UnitsConversionError,
self.variable_server.get_value,
variable,
units='fjarnskaggl')
# bad type
self.assertRaises(
ValueError,
self.variable_server.get_value,
variable,
type_=dict)
def test_set_value(self):
variable = 'ball.obj.state.input.position[1]'
self.variable_server.set_value(variable, 1337)
self.assertEqual('1337', self.variable_server.get_value(variable))
self.variable_server.set_value(variable, 1337, 'km')
self.assertEqual('1337000', self.variable_server.get_value(variable))
def test_get_values(self):
# empty call
self.assertEqual([], self.variable_server.get_values())
# multi-arg vall
self.assertEqual(self.variable_server.get_values(*self.variables),
[variable.value for variable in self.variables])
self.assertEqual(5, self.variables[0].value)
self.assertEqual('m', self.variables[0].units)
self.assertEqual(10000.0, self.variables[1].value)
self.assertEqual('g', self.variables[1].units)
def test_add_variables(self):
# empty call
self.variable_server.add_variables()
# repeated call
for _ in range(2):
self.variable_server.add_variables(*self.variables)
self.assertEqual(self.variables, self.variable_server._variables)
# bad units
self.assertRaises(
UnitsConversionError,
self.variable_server.add_variables,
Variable('ball.obj.state.input.mass', units='fjarnskaggl'))
# bad type
self.assertRaises(
ValueError,
self.variable_server.add_variables,
Variable('ball.obj.state.input.mass', type_=dict))
def test_remove_variables(self):
self.variable_server.add_variables(*self.variables)
# empty call
self.variable_server.remove_variables()
self.assertEqual(self.variables, self.variable_server._variables)
# repeated call
for _ in range(2):
self.variable_server.remove_variables(*self.variables)
self.assertFalse(self.variable_server._variables)
def test_remove_all_variables(self):
self.variable_server.add_variables(*self.variables)
# repeated call
for _ in range(2):
self.variable_server.remove_all_variables()
self.assertFalse(self.variable_server._variables)
def test_set_units(self):
self.variable_server.set_units(self.variables[0], 'cm')
def test_set_period(self):
self.variable_server.set_period(10)
# We would like to verify that VariableServerSessionThread::update_rate
# was modified, but variable server threads are not registered
# with the memory manager, so we can't.
def test_register_callback(self):
def foo():
pass
self.variable_server.register_callback(foo)
self.assertEqual({foo: ([], {})}, self.variable_server._callbacks)
args = 1, 2
kwargs = {'a': 3, 'b': 4}
self.variable_server.register_callback(foo, args, kwargs)
self.assertEqual({foo: (args, kwargs)},
self.variable_server._callbacks)
def test_deregister_callback(self):
def foo():
pass
self.variable_server.register_callback(foo)
# repeated call
for _ in range(2):
self.variable_server.deregister_callback(foo)
self.assertFalse(self.variable_server._callbacks)
def test_pause(self):
self.variable_server.pause(True)
self.variable_server.pause(False)
# We would like to verify that VariableServerSessionThread::pause_cmd
# was modified, but variable server threads are not registered
# with the memory manager, so we can't.
def test_set_debug(self):
self.variable_server.set_debug(3)
# We would like to verify that VariableServerSessionThread::debug
# was modified, but variable server threads are not registered
# with the memory manager, so we can't.
def test_set_tag(self):
self.variable_server.set_tag('test')
# We would like to verify that
# VariableServerSessionThread::connection.client_tag was modified, but
# variable server threads are not registered with the memory
# manager, so we can't.
def METHOD_NAME(self):
self.variable_server.set_copy_mode()
# We would like to verify that VariableServerSessionThread::copy_mode
# was modified, but variable server threads are not registered
# with the memory manager, so we can't.
def test_send_on_copy(self):
self.variable_server.send_on_copy()
# We would like to verify that VariableServerSessionThread::write_mode
# was modified, but variable server threads are not registered
# with the memory manager, so we can't.
def test_validate_addresses(self):
self.variable_server.validate_addresses()
# We would like to verify that VariableServerSessionThread::validate_addresses
# was modified, but variable server threads are not registered
# with the memory manager, so we can't.
def test_variable_exists(self):
self.assertTrue(
self.variable_server.variable_exists(self.variables[0].name))
self.assertFalse(
self.variable_server.variable_exists('fjarnskaggl'))
def test_freeze(self):
self.variable_server.freeze(True)
self.variable_server.freeze(False)
# We would like to verify that Executive::async_freeze_command
# was modified, but it's **'d out, so we can't.
def test_enable_real_time(self):
self.variable_server.enable_real_time(False)
self.assertTrue(self.variable_server.get_value(
'trick_real_time.rt_sync.disable_flag', type_=bool))
self.variable_server.enable_real_time(True)
self.assertTrue(self.variable_server.get_value(
'trick_real_time.rt_sync.enable_flag', type_=bool))
# TODO: Get rid of this and use automatic discovery when Trick requires Python 2.7
if __name__ == '__main__':
unittest.main() | null |
5,698 | from django.db import models
from django.db.models.signals import post_save
from .. import utils
from .core import CacheClearingModel
from .core import DataSet
from .mixins import CloneableModelMixin
class DataPermissionManager (models.Manager):
use_for_related_fields = True
@utils.memo
def all_permissions(self):
return self.all()
def add_permission(self, submission_set, can_create, can_retrieve, can_update, can_destroy, can_access_protected=False, priority=None):
PermModel = self.model
return self.add(PermModel(
submission_set=submission_set,
can_create=can_create,
can_retrieve=can_retrieve,
can_update=can_update,
can_destroy=can_destroy,
can_access_protected=can_access_protected,
priority=priority), bulk=False)
class DataPermission (CloneableModelMixin, CacheClearingModel, models.Model):
"""
Rules for what permissions a given authentication method affords.
"""
submission_set = models.CharField(max_length=128, blank=True, help_text='Either the name of a submission set (e.g., "comments"), or "places". Leave blank to refer to all things.')
can_retrieve = models.BooleanField(default=True)
can_create = models.BooleanField(default=False)
can_update = models.BooleanField(default=False)
can_destroy = models.BooleanField(default=False)
can_access_protected = models.BooleanField(default=False)
priority = models.PositiveIntegerField(blank=True)
objects = DataPermissionManager()
class Meta:
abstract = True
ordering = ('priority',)
def parent():
def fget(self): return getattr(self, self.parent_attr)
def METHOD_NAME(self, value): setattr(self, self.parent_attr, value)
return locals()
parent = property(**parent())
def siblings():
def fget(self): return self.parent.permissions.all()
return locals()
siblings = property(**siblings())
def dataset():
def fget(self): return self.parent.dataset
return locals()
dataset = property(**dataset())
def abilities(self):
abilities = []
if self.can_create: abilities.append('create')
if self.can_retrieve: abilities.append('retrieve')
if self.can_update: abilities.append('update')
if self.can_destroy: abilities.append('destroy')
things = self.submission_set if self.submission_set.strip() not in ('', '*') else 'anything'
if abilities:
if len(abilities) > 1: abilities[-1] = 'or ' + abilities[-1]
return 'can ' + ', '.join(abilities) + ' ' + things
else:
return 'can not create, retrieve, update, or destroy ' + things + ' at all'
def clear_instance_cache(self):
return self.dataset.clear_instance_cache()
def save(self, *args, **kwargs):
if self.priority is None:
try:
lowest = self.siblings.order_by('-priority')[0]
self.priority = lowest.priority + 1
except IndexError:
self.priority = 0
return super(DataPermission, self).save(*args, **kwargs)
class DataSetPermission (DataPermission):
dataset = models.ForeignKey('DataSet', on_delete=models.CASCADE, related_name='permissions')
parent_attr = 'dataset'
class Meta:
app_label = 'sa_api_v2'
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return '%s %s' % ('submitters', self.abilities())
class GroupPermission (DataPermission):
group = models.ForeignKey('Group', on_delete=models.CASCADE, related_name='permissions')
parent_attr = 'group'
class Meta:
app_label = 'sa_api_v2'
def __str__(self):
return '%s %s' % (self.group, self.abilities())
class KeyPermission (DataPermission):
key = models.ForeignKey('sa_api_v2.ApiKey', on_delete=models.CASCADE, related_name='permissions')
parent_attr = 'key'
class Meta:
app_label = 'sa_api_v2'
def __str__(self):
return 'submitters %s' % (self.abilities(),)
class OriginPermission (DataPermission):
origin = models.ForeignKey('sa_api_v2.Origin', on_delete=models.CASCADE, related_name='permissions')
parent_attr = 'origin'
class Meta:
app_label = 'sa_api_v2'
def __str__(self):
return 'submitters %s' % (self.abilities(),)
def create_data_permissions(sender, instance, created, **kwargs):
"""
Create a default permission instance for a new dataset.
"""
cloned = hasattr(instance, '_cloned_from')
if created and not cloned:
DataSetPermission.objects.create(dataset=instance, submission_set='*',
can_retrieve=True, can_create=False, can_update=False, can_destroy=False)
post_save.connect(create_data_permissions, sender=DataSet, dispatch_uid="dataset-create-permissions")
def any_allow(permissions, do_action, submission_set, protected=False):
"""
Check whether any of the data permissions in the managed set allow the
action on a submission set with the given name. Specify whether the action
is on protected data.
"""
for permission in permissions:
if (permission.submission_set in (submission_set, '*')
and getattr(permission, 'can_' + do_action, False)
and (permission.can_access_protected or not protected)):
return True
return False
def check_data_permission(user, client, do_action, dataset, submission_set, protected=False):
"""
Check whether the given user has permission on the submission_set in
the context of the given client (e.g., an API key or an origin). Specify
whether the permission is for protected data.
"""
if do_action not in ('retrieve', 'create', 'update', 'destroy'):
raise ValueError
if user and user.is_superuser:
return True
# Owner can do anything
if user and dataset and user.id == dataset.owner_id:
return True
# Start with the dataset permission
if dataset and any_allow(dataset.permissions.all(), do_action, submission_set, protected):
return True
# Then the client permission
if client is not None:
if (client.dataset == dataset and
any_allow(client.permissions.all(), do_action, submission_set, protected)):
return True
# Next, check the user's groups
if user is not None and user.is_authenticated:
for group in user._groups.all():
if (dataset and group.dataset_id == dataset.id and
any_allow(group.permissions.all(), do_action, submission_set, protected)):
return True
return False
| null |
5,699 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import os
from qtpy.QtWidgets import QDialog, QFileDialog
from mantid.kernel import Logger
try:
from mantidqt.utils.qt import load_ui
except ImportError:
Logger("HFIR_4Circle_Reduction").information("Using legacy ui importer")
from mantidplot import load_ui
from qtpy.QtWidgets import QVBoxLayout
from mantidqtinterfaces.HFIR_4Circle_Reduction.hfctables import PeaksIntegrationSpreadSheet
class PeaksIntegrationReportDialog(QDialog):
"""
Dialog to report the details of peaks integration
"""
def __init__(self, parent):
"""
initialization
:param parent:
"""
super(PeaksIntegrationReportDialog, self).__init__(parent)
# set up UI
ui_path = "PeakIntegrationSpreadSheet.ui"
self.ui = load_ui(__file__, ui_path, baseinstance=self)
self._promote_widgets()
# initialize widget
self.ui.tableWidget_spreadsheet.setup()
# set up handlers
self.ui.pushButton_exportTable.clicked.connect(self.do_export_table)
self.ui.pushButton_quit.clicked.connect(self.METHOD_NAME)
return
def _promote_widgets(self):
tableWidget_spreadsheet_layout = QVBoxLayout()
self.ui.frame_tableWidget_spreadsheet.setLayout(tableWidget_spreadsheet_layout)
self.ui.tableWidget_spreadsheet = PeaksIntegrationSpreadSheet(self)
tableWidget_spreadsheet_layout.addWidget(self.ui.tableWidget_spreadsheet)
return
def do_export_table(self):
"""
export table to a file
:return:
"""
default_dir = os.getcwd()
output_file = QFileDialog.getSaveFileName(self, "Export table to csv file", default_dir, "Data Files (*.dat);;All Files (*.*)")
if not output_file:
return
if isinstance(output_file, tuple):
output_file = output_file[0]
# write
self.ui.tableWidget_spreadsheet.export_table_csv(output_file)
def METHOD_NAME(self):
"""
:return:
"""
self.close()
return
def set_report(self, peak_integration_summary):
"""
:param peak_integration_summary: dictionary of dictionary; key is scan number
:return:
"""
# check input
assert isinstance(peak_integration_summary, dict)
if len(peak_integration_summary) == 0:
print("[WARNING] There is no peak integration summary given for the report.")
return
scan_number_list = sorted(peak_integration_summary.keys())
for scan_number in scan_number_list:
try:
spice_hkl = peak_integration_summary[scan_number]["SPICE HKL"]
calculated_hkl = peak_integration_summary[scan_number]["Mantid HKL"]
mask_name = peak_integration_summary[scan_number]["Mask"]
intensity1 = peak_integration_summary[scan_number]["Raw Intensity"]
error1 = peak_integration_summary[scan_number]["Raw Intensity Error"]
intensity2 = peak_integration_summary[scan_number]["Intensity 2"]
error2 = peak_integration_summary[scan_number]["Intensity 2 Error"]
intensity3 = peak_integration_summary[scan_number]["Gauss Intensity"]
error3 = peak_integration_summary[scan_number]["Gauss Error"]
lorentz_factor = peak_integration_summary[scan_number]["Lorentz"]
estimated_bkgd = peak_integration_summary[scan_number]["Estimated Background"]
gauss_bkgd = peak_integration_summary[scan_number]["Fitted Background"]
gauss_a = peak_integration_summary[scan_number]["Fitted A"]
gauss_sigma = peak_integration_summary[scan_number]["Fitted Sigma"]
motor_name = peak_integration_summary[scan_number]["Motor"]
motor_step = peak_integration_summary[scan_number]["Motor Step"]
k_shift = peak_integration_summary[scan_number]["K-vector"]
absorption_correction = peak_integration_summary[scan_number]["Absorption Correction"]
self.ui.tableWidget_spreadsheet.add_scan_information(
scan_number,
spice_hkl,
calculated_hkl,
mask_name,
intensity1,
error1,
intensity2,
error2,
intensity3,
error3,
lorentz_factor,
estimated_bkgd,
gauss_bkgd,
gauss_sigma,
gauss_a,
motor_name,
motor_step,
k_shift,
absorption_correction,
)
except KeyError as key_err:
print("ERROR: Unable to add scan {0} to report due to {1}".format(scan_number, key_err))
# END-FOR
return | null |