id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
5,700 | """
Collaborate with RestApiClient to make remote anonymous and authenticated calls.
Uses user_input to request user's login and password and obtain a token for calling authenticated
methods if receives AuthenticationException from RestApiClient.
Flow:
Directly invoke a REST method in RestApiClient, example: get_conan.
if receives AuthenticationException (not open method) will ask user for login and password
and will invoke RestApiClient.get_token() (with LOGIN_RETRIES retries) and retry to call
get_conan with the new token.
"""
import hashlib
from uuid import getnode as get_mac
from conan.api.output import ConanOutput
from conans.client.cmd.user import update_localdb
from conans.client.rest.remote_credentials import RemoteCredentials
from conans.errors import AuthenticationException, ConanException, ForbiddenException
LOGIN_RETRIES = 3
class ConanApiAuthManager(object):
def __init__(self, rest_client_factory, cache):
self._cache = cache
self._rest_client_factory = rest_client_factory
self._localdb = cache.localdb
def METHOD_NAME(self, remote, method_name, *args, **kwargs):
"""Handles AuthenticationException and request user to input a user and a password"""
user, token, refresh_token = self._localdb.get_login(remote.url)
rest_client = self._get_rest_client(remote)
if method_name == "authenticate":
return self._authenticate(remote, *args, **kwargs)
try:
ret = getattr(rest_client, method_name)(*args, **kwargs)
return ret
except ForbiddenException as e:
raise ForbiddenException(f"Permission denied for user: '{user}': {e}")
except AuthenticationException:
# User valid but not enough permissions
if user is None or token is None:
# token is None when you change user with user command
# Anonymous is not enough, ask for a user
ConanOutput().info('Please log in to "%s" to perform this action. '
'Execute "conan remote login" command.' % remote.name)
return self._retry_with_new_token(user, remote, method_name, *args, **kwargs)
elif token and refresh_token:
# If we have a refresh token try to refresh the access token
try:
self._authenticate(remote, user, None)
except AuthenticationException:
# logger.info("Cannot refresh the token, cleaning and retrying: {}".format(exc))
self._clear_user_tokens_in_db(user, remote)
return self.METHOD_NAME(remote, method_name, *args, **kwargs)
else:
# Token expired or not valid, so clean the token and repeat the call
# (will be anonymous call but exporting who is calling)
# logger.info("Token expired or not valid, cleaning the saved token and retrying")
self._clear_user_tokens_in_db(user, remote)
return self.METHOD_NAME(remote, method_name, *args, **kwargs)
def _retry_with_new_token(self, user, remote, method_name, *args, **kwargs):
"""Try LOGIN_RETRIES to obtain a password from user input for which
we can get a valid token from api_client. If a token is returned,
credentials are stored in localdb and rest method is called"""
for _ in range(LOGIN_RETRIES):
creds = RemoteCredentials(self._cache)
input_user, input_password = creds.auth(remote.name)
try:
self._authenticate(remote, input_user, input_password)
except AuthenticationException:
out = ConanOutput()
if user is None:
out.error('Wrong user or password')
else:
out.error('Wrong password for user "%s"' % user)
out.info('You can change username with "conan remote login <remote> <username>"')
else:
return self.METHOD_NAME(remote, method_name, *args, **kwargs)
raise AuthenticationException("Too many failed login attempts, bye!")
def _get_rest_client(self, remote):
username, token, refresh_token = self._localdb.get_login(remote.url)
custom_headers = {'X-Client-Anonymous-Id': self._get_mac_digest(),
'X-Client-Id': str(username or "")}
return self._rest_client_factory.new(remote, token, refresh_token, custom_headers)
def _clear_user_tokens_in_db(self, user, remote):
try:
self._localdb.store(user, token=None, refresh_token=None, remote_url=remote.url)
except Exception as e:
out = ConanOutput()
out.error('Your credentials could not be stored in local cache\n')
out.debug(str(e) + '\n')
@staticmethod
def _get_mac_digest():
sha1 = hashlib.sha1()
sha1.update(str(get_mac()).encode())
return str(sha1.hexdigest())
def _authenticate(self, remote, user, password):
rest_client = self._get_rest_client(remote)
if user is None: # The user is already in DB, just need the password
prev_user = self._localdb.get_username(remote.url)
if prev_user is None:
raise ConanException("User for remote '%s' is not defined" % remote.name)
else:
user = prev_user
try:
token, refresh_token = rest_client.authenticate(user, password)
except UnicodeDecodeError:
raise ConanException("Password contains not allowed symbols")
# Store result in DB
remote_name, prev_user, user = update_localdb(self._localdb, user, token, refresh_token,
remote)
return remote_name, prev_user, user | null |
5,701 | # pylint: disable=no-member
import struct
from io import BytesIO
try:
import pefile
PEFILE_AVAILABLE = True
except ImportError:
pefile = None
PEFILE_AVAILABLE = False
from PIL import Image
# From https://github.com/firodj/extract-icon-py
class ExtractIcon(object):
GRPICONDIRENTRY_format = ('GRPICONDIRENTRY',
('B,Width', 'B,Height', 'B,ColorCount', 'B,Reserved',
'H,Planes', 'H,BitCount', 'I,BytesInRes', 'H,ID'))
GRPICONDIR_format = ('GRPICONDIR',
('H,Reserved', 'H,Type', 'H,Count'))
RES_ICON = 1
RES_CURSOR = 2
def __init__(self, filepath):
self.pe = pefile.PE(filepath)
def find_resource_base(self, res_type):
rt_base_idx = [entry.id for
entry in self.pe.DIRECTORY_ENTRY_RESOURCE.entries].index(
pefile.RESOURCE_TYPE[res_type]
)
if rt_base_idx is not None:
return self.pe.DIRECTORY_ENTRY_RESOURCE.entries[rt_base_idx]
return None
def METHOD_NAME(self, res_type, res_index):
rt_base_dir = self.find_resource_base(res_type)
if res_index < 0:
try:
idx = [entry.id for entry in rt_base_dir.directory.entries].index(-res_index)
except:
return None
else:
idx = res_index if res_index < len(rt_base_dir.directory.entries) else None
if idx is None:
return None
test_res_dir = rt_base_dir.directory.entries[idx]
res_dir = test_res_dir
if test_res_dir.struct.DataIsDirectory:
# another Directory
# probably language take the first one
res_dir = test_res_dir.directory.entries[0]
if res_dir.struct.DataIsDirectory:
# Ooooooooooiconoo no !! another Directory !!!
return None
return res_dir
def get_group_icons(self):
rt_base_dir = self.find_resource_base('RT_GROUP_ICON')
groups = []
for res_index in range(0, len(rt_base_dir.directory.entries)):
grp_icon_dir_entry = self.METHOD_NAME('RT_GROUP_ICON', res_index)
if not grp_icon_dir_entry:
continue
data_rva = grp_icon_dir_entry.data.struct.OffsetToData
size = grp_icon_dir_entry.data.struct.Size
data = self.pe.get_memory_mapped_image()[data_rva:data_rva + size]
file_offset = self.pe.get_offset_from_rva(data_rva)
grp_icon_dir = pefile.Structure(self.GRPICONDIR_format, file_offset=file_offset)
grp_icon_dir.__unpack__(data)
if grp_icon_dir.Reserved != 0 or grp_icon_dir.Type != self.RES_ICON:
continue
offset = grp_icon_dir.sizeof()
entries = []
for _idx in range(0, grp_icon_dir.Count):
grp_icon = pefile.Structure(self.GRPICONDIRENTRY_format, file_offset=file_offset + offset)
grp_icon.__unpack__(data[offset:])
offset += grp_icon.sizeof()
entries.append(grp_icon)
groups.append(entries)
return groups
def get_icon(self, index):
icon_entry = self.METHOD_NAME('RT_ICON', -index)
if not icon_entry:
return None
data_rva = icon_entry.data.struct.OffsetToData
size = icon_entry.data.struct.Size
data = self.pe.get_memory_mapped_image()[data_rva:data_rva + size]
return data
def export_raw(self, entries, index=None):
if index is not None:
entries = entries[index:index + 1]
ico = struct.pack('<HHH', 0, self.RES_ICON, len(entries))
data_offset = None
data = []
info = []
for grp_icon in entries:
if data_offset is None:
data_offset = len(ico) + ((grp_icon.sizeof() + 2) * len(entries))
nfo = grp_icon.__pack__()[:-2] + struct.pack('<L', data_offset)
info.append(nfo)
raw_data = self.get_icon(grp_icon.ID)
if not raw_data:
continue
data.append(raw_data)
data_offset += len(raw_data)
raw = ico + b''.join(info + data)
return raw
def export(self, entries, index=None):
raw = self.export_raw(entries, index)
return Image.open(BytesIO(raw))
def _get_bmp_header(self, data):
if data[0:4] == b'\x89PNG':
header = b''
else:
dib_size = struct.unpack('<L', data[0:4])[0]
header = b'BM' + struct.pack('<LLL', len(data) + 14, 0, 14 + dib_size)
return header | null |
5,702 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines the handler for resolver node."""
import sys
import traceback
from typing import Any, Dict, Optional
from absl import logging
import grpc
from tfx.orchestration import data_types_utils
from tfx.orchestration import metadata
from tfx.orchestration.portable import data_types
from tfx.orchestration.portable import execution_publish_utils
from tfx.orchestration.portable import inputs_utils
from tfx.orchestration.portable import system_node_handler
from tfx.orchestration.portable.input_resolution import exceptions
from tfx.orchestration.portable.mlmd import context_lib
from tfx.proto.orchestration import execution_result_pb2
from tfx.proto.orchestration import pipeline_pb2
_ERROR_CODE_UNIMPLEMENTED: int = grpc.StatusCode.UNIMPLEMENTED.value[0]
class ResolverNodeHandler(system_node_handler.SystemNodeHandler):
"""The handler for the system Resolver node."""
def METHOD_NAME(
self,
# The actual type of proto message of map<str, pipeline_pb2.Value>.
proto_map: Any) -> Dict[str, Any]:
extract_mlmd_value = lambda v: getattr(v, v.WhichOneof('value'))
return {k: extract_mlmd_value(v.field_value) for k, v in proto_map.items()}
def run(
self, mlmd_connection: metadata.Metadata,
pipeline_node: pipeline_pb2.PipelineNode,
pipeline_info: pipeline_pb2.PipelineInfo,
pipeline_runtime_spec: pipeline_pb2.PipelineRuntimeSpec
) -> data_types.ExecutionInfo:
"""Runs Resolver specific logic.
Args:
mlmd_connection: ML metadata connection.
pipeline_node: The specification of the node that this launcher lauches.
pipeline_info: The information of the pipeline that this node runs in.
pipeline_runtime_spec: The runtime information of the pipeline that this
node runs in.
Returns:
The execution of the run.
"""
logging.info('Running as an resolver node.')
with mlmd_connection as m:
# 1.Prepares all contexts.
contexts = context_lib.prepare_contexts(
metadata_handler=m, node_contexts=pipeline_node.contexts)
# 2. Resolves inputs and execution properties.
exec_properties = data_types_utils.build_parsed_value_dict(
inputs_utils.resolve_parameters_with_schema(
node_parameters=pipeline_node.parameters))
try:
resolved_inputs = inputs_utils.resolve_input_artifacts(
pipeline_node=pipeline_node,
metadata_handler=m)
logging.info('[%s] Resolved inputs: %s', pipeline_node.node_info.id,
resolved_inputs)
except exceptions.InputResolutionError as e:
logging.exception('[%s] Input resolution error: %s',
pipeline_node.node_info.id, e)
execution = execution_publish_utils.register_execution(
metadata_handler=m,
execution_type=pipeline_node.node_info.type,
contexts=contexts,
exec_properties=exec_properties)
execution_publish_utils.publish_failed_execution(
metadata_handler=m,
contexts=contexts,
execution_id=execution.id,
executor_output=self._build_error_output(code=e.grpc_code_value))
return data_types.ExecutionInfo(
execution_id=execution.id,
exec_properties=exec_properties,
pipeline_node=pipeline_node,
pipeline_info=pipeline_info)
# 2a. If Skip (i.e. inside conditional), no execution should be made.
# TODO(b/197907821): Publish special execution for Skip?
if isinstance(resolved_inputs, inputs_utils.Skip):
return data_types.ExecutionInfo()
# 3. Registers execution in metadata.
execution = execution_publish_utils.register_execution(
metadata_handler=m,
execution_type=pipeline_node.node_info.type,
contexts=contexts,
exec_properties=exec_properties)
# TODO(b/197741942): Support len > 1.
if len(resolved_inputs) > 1:
execution_publish_utils.publish_failed_execution(
metadata_handler=m,
contexts=contexts,
execution_id=execution.id,
executor_output=self._build_error_output(
_ERROR_CODE_UNIMPLEMENTED,
'Handling more than one input dicts not implemented yet.'))
return data_types.ExecutionInfo(
execution_id=execution.id,
exec_properties=exec_properties,
pipeline_node=pipeline_node,
pipeline_info=pipeline_info)
input_artifacts = resolved_inputs[0]
# 4. Publish the execution as a cached execution with
# resolved input artifact as the output artifacts.
execution_publish_utils.publish_internal_execution(
metadata_handler=m,
contexts=contexts,
execution_id=execution.id,
output_artifacts=input_artifacts)
return data_types.ExecutionInfo(
execution_id=execution.id,
input_dict=input_artifacts,
output_dict=input_artifacts,
exec_properties=exec_properties,
pipeline_node=pipeline_node,
pipeline_info=pipeline_info)
def _build_error_output(
self, code: int, msg: Optional[str] = None
) -> execution_result_pb2.ExecutorOutput:
if msg is None:
msg = '\n'.join(traceback.format_exception(*sys.exc_info()))
return execution_result_pb2.ExecutorOutput(
execution_result=execution_result_pb2.ExecutionResult(
code=code, result_message=msg)) | null |
5,703 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2022 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from unittest import mock
import sys
from qtpy.QtWidgets import QApplication
from matplotlib.widgets import Rectangle
from mantid.simpleapi import config
from mantid.api import mtd
from mantidqtinterfaces.simplescanviewer.rectangle_controller import RectangleController, RectanglesManager
app = QApplication(sys.argv)
class RectangleManagerTest(unittest.TestCase):
def setUp(self) -> None:
self.facility = config["default.facility"]
self.instrument = config["default.instrument"]
config["default.facility"] = "ILL"
config["default.instrument"] = "D16"
self.manager = RectanglesManager()
def tearDown(self) -> None:
config["default.facility"] = self.facility
config["default.instrument"] = self.instrument
mtd.clear()
def test_add_rectangle(self):
rect1 = Rectangle((0, 0), 1, 1)
rect2 = Rectangle((1, 1), 2, 2)
self.manager.add_rectangle(rect1)
self.assertEqual(len(self.manager.rectangles), 1)
self.assertEqual(self.manager.current_rectangle_index, 0)
self.assertEqual(self.manager.table.rowCount(), 5)
self.manager.add_rectangle(rect2)
self.assertEqual(len(self.manager.rectangles), 2)
self.assertEqual(self.manager.current_rectangle_index, 1)
self.assertEqual(self.manager.table.rowCount(), 10)
def test_remove_rectangle(self):
rect = mock.Mock()
rect.get_xy.return_value = (0, 0)
rect.get_width.return_value = 1
rect.get_height.return_value = 1
self.manager.add_rectangle(rect)
self.manager.delete_current()
self.assertEqual(len(self.manager.get_rectangles()), 0)
self.assertEqual(self.manager.current_rectangle_index, -1)
self.assertEqual(self.manager.table.rowCount(), 0)
rect.remove.assert_called_once()
def test_set_as_current(self):
rect1 = Rectangle((0, 0), 1, 1)
rect2 = Rectangle((1, 1), 2, 2)
self.manager.add_rectangle(rect1)
self.manager.add_rectangle(rect2)
self.manager.set_as_current_rectangle(rect1)
self.assertEqual(self.manager.current_rectangle_index, 0)
self.assertEqual(self.manager.get_current_rectangle(), rect1)
def test_clear(self):
rect1 = mock.Mock()
rect1.get_xy.return_value = (0, 0)
rect1.get_width.return_value = 1
rect1.get_height.return_value = 1
rect2 = mock.Mock()
rect2.get_xy.return_value = (0, 0)
rect2.get_width.return_value = 1
rect2.get_height.return_value = 1
self.manager.add_rectangle(rect1)
self.manager.add_rectangle(rect2)
self.manager.clear()
self.assertEqual(self.manager.current_rectangle_index, -1)
self.assertEqual(len(self.manager.get_rectangles()), 0)
self.assertEqual(self.manager.table.rowCount(), 0)
rect1.remove.assert_called_once()
rect2.remove.assert_called_once()
def test_find(self):
rect1 = Rectangle((0, 0), 1, 1)
rect2 = Rectangle((1, 1), 2, 2)
self.manager.add_rectangle(rect1)
self.manager.add_rectangle(rect2)
self.assertEqual(self.manager.find_controller(0, 0, 1, 1), 0)
self.assertEqual(self.manager.find_controller(1, 1, 3, 3), 1)
self.assertEqual(self.manager.find_controller(1, 2, 3, 4), -1)
def test_field_update(self):
rect1 = Rectangle((0, 0), 1, 1)
rect2 = Rectangle((1, 1), 2, 2)
self.manager.add_rectangle(rect1)
self.manager.add_rectangle(rect2)
trigger_check = mock.Mock()
self.manager.sig_controller_updated.connect(trigger_check)
self.manager.table.item(1, 1).setText("0.123") # set x0 of the first rectangle
self.assertEqual(self.manager.get_rectangles()[0].get_xy()[0], 0.123)
self.assertEqual(self.manager.get_rectangles()[0].get_width(), 0.877)
self.assertEqual(self.manager.current_rectangle_index, 1) # the current rectangle is still the second one
trigger_check.assert_called_once()
trigger_check.reset_mock()
self.manager.table.item(1, 1).setText("azerty")
# check the value revert to the previous one when the input is ill-formed
self.assertEqual(self.manager.get_rectangles()[0].get_xy()[0], 0.123)
self.assertEqual(trigger_check.call_count, 2)
def test_insert_in(self):
controller = RectangleController(1, 2.1, 3.21, 4.321)
controller.insert_in(self.manager.table)
self.assertEqual(self.manager.table.rowCount(), 5)
self.assertEqual(self.manager.table.columnCount(), 2)
self.assertEqual(self.manager.table.item(0, 0).text(), "Parameter")
self.assertEqual(self.manager.table.item(0, 1).text(), "Value")
self.assertEqual(self.manager.table.item(1, 0).text(), "x0")
self.assertEqual(self.manager.table.item(2, 0).text(), "y0")
self.assertEqual(self.manager.table.item(3, 0).text(), "x1")
self.assertEqual(self.manager.table.item(4, 0).text(), "y1")
self.assertEqual(self.manager.table.item(1, 1).text(), " 1.00000")
self.assertEqual(self.manager.table.item(2, 1).text(), " 2.10000")
self.assertEqual(self.manager.table.item(3, 1).text(), " 3.21000")
self.assertEqual(self.manager.table.item(4, 1).text(), " 4.32100")
def METHOD_NAME(self):
controller1 = RectangleController(1, 2, 3, 4)
controller2 = RectangleController(6, 7, 8, 9)
controller1.insert_in(self.manager.table)
controller2.insert_in(self.manager.table)
self.assertEqual(self.manager.table.rowCount(), 10)
controller1.remove_from(self.manager.table)
self.assertEqual(self.manager.table.rowCount(), 5)
self.assertEqual(self.manager.table.item(0, 0).text(), "Parameter")
self.assertEqual(self.manager.table.item(1, 1).text(), " 6.00000")
self.assertEqual(self.manager.table.item(2, 1).text(), " 7.00000")
self.assertEqual(self.manager.table.item(3, 1).text(), " 8.00000")
self.assertEqual(self.manager.table.item(4, 1).text(), " 9.00000")
def test_update_values(self):
controller = RectangleController(1, 2, 3, 4)
controller.insert_in(self.manager.table)
controller.update_values(6, 7, 8, 9)
self.assertEqual(self.manager.table.item(1, 1).text(), " 6.00000")
self.assertEqual(self.manager.table.item(2, 1).text(), " 7.00000")
self.assertEqual(self.manager.table.item(3, 1).text(), " 8.00000")
self.assertEqual(self.manager.table.item(4, 1).text(), " 9.00000")
self.assertEqual(tuple(controller.get_values()), (6, 7, 8, 9))
if __name__ == "__main__":
unittest.main() | null |
5,704 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore import dtype
from mindspore.ops.functional import vmap
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
class NetCeil(nn.Cell):
def __init__(self):
super(NetCeil, self).__init__()
self.ceil = P.Ceil()
def construct(self, x):
return self.ceil(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_ceil_fp32():
"""
Feature: Ceil gpu kernel
Description: test the ceil.
Expectation: match to np benchmark.
"""
ceil = NetCeil()
x = np.random.rand(3, 8).astype(np.float32)
output = ceil(Tensor(x, dtype=dtype.float32))
expect = np.ceil(x)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def METHOD_NAME():
"""
Feature: Ceil gpu kernel
Description: test the ceil.
Expectation: match to np benchmark.
"""
ceil = NetCeil()
x = np.random.rand(3, 8).astype(np.float16)
output = ceil(Tensor(x, dtype=dtype.float16))
expect = np.ceil(x)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_tensor_ceil():
"""
Feature: ALL TO ALL
Description: test cases for ceil in pynative mode cpu backend.
Expectation: the result match numpy ceil
"""
context.set_context(mode=context.PYNATIVE_MODE)
x = Tensor(np.array([1.1, -2.1]).astype(np.float32))
np_x = np.array([1.1, -2.1]).astype(np.float32)
output = x.ceil()
expect = np.ceil(np_x)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_func_ceil():
"""
Feature: ALL TO ALL
Description: test cases for ceil in pynative mode cpu backend.
Expectation: the result match numpy ceil
"""
context.set_context(mode=context.PYNATIVE_MODE)
x = Tensor(np.array([1.1, -2.1]).astype(np.float32))
np_x = np.array([1.1, -2.1]).astype(np.float32)
output = F.ceil(x)
expect = np.ceil(np_x)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_vmap():
"""
Feature: ceil vmap.
Description: test the rightness of ceil vmap feature.
Expectation: Success.
"""
def cal_ceil(x):
return P.Ceil()(x)
np_x = np.array([[[1.1, 0.9], [2.2, 1.8]], [[4.6, 1.3], [2.4, 2.6]],
[[1.0, 1.0], [2.0, 2.7]], [[1.3, 1.7], [2.9, 2.8]],
[[1.1, 1.4], [2.6, 2.0]], [[1.2, 1.4], [2.0, 2.4]],
[[1.5, 1.4], [2.3, 2.0]], [[1.8, 1.0], [2.9, 2.0]]]).astype(np.float32)
x = Tensor(np_x)
expect = np.ceil(np_x)
vmap_ceil = vmap(cal_ceil, in_axes=(0), out_axes=0)
output = vmap_ceil(x)
assert np.allclose(output.asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_vmap2():
"""
Feature: ceil vmap.
Description: test the rightness of ceil vmap feature.
Expectation: Success.
"""
def cal_ceil(x):
return P.Ceil()(x)
np_x = np.array([[[1.1, 0.9], [2.2, 1.8]], [[4.6, 1.3], [2.4, 2.6]],
[[1.0, 1.0], [2.0, 2.7]], [[1.3, 1.7], [2.9, 2.8]],
[[1.1, 1.4], [2.6, 2.0]], [[1.2, 1.4], [2.0, 2.4]],
[[1.5, 1.4], [2.3, 2.0]], [[1.8, 1.0], [2.9, 2.0]]]).astype(np.float32)
x = Tensor(np_x)
expect = np.ceil(np_x)
vmap_ceil = vmap(vmap(cal_ceil, in_axes=(0), out_axes=0), in_axes=(0), out_axes=0)
output = vmap_ceil(x)
assert np.allclose(output.asnumpy(), expect) | null |
5,705 | import math
import os
import pymysql
from wtpy.apps.datahelper.DHDefs import DBHelper
class MysqlHelper(DBHelper):
def __init__(self, host: str, user: str, pwd: str, dbname: str, port: int = 3306):
self.params = {
"host": host,
"user": user,
"password": pwd,
"database": dbname,
"port": port,
}
self.conn: pymysql.Connection = None
def __get_conn__(self):
if self.conn is None:
self.conn = pymysql.connect(**self.params)
try:
self.conn.ping()
except:
self.conn = pymysql.connect(**self.params)
return self.conn
def METHOD_NAME(self):
paths = os.path.split(__file__)
a = paths[:-1] + ("initdb_mysql.sql",)
_path = os.path.join(*a)
f = open(_path, "r", encoding="UTF-8")
content = f.read()
f.close()
conn = self.__get_conn__()
cursor = conn.cursor()
items = content.split(";")
for item in items:
item = item.strip()
if len(item) == 0:
continue
cursor.execute(item + ";")
conn.commit()
cursor.close()
def writeBars(self, bars: list, period="day"):
count = 0
sql = ""
isDay = period == "day"
tbname = "tb_kline_%s" % (period)
for curBar in bars:
if count == 0:
if isDay:
sql = (
"REPLACE INTO %s(exchange,`code`,`date`,open,high,low,close,settle,volume,turnover,interest,diff_interest) VALUES"
% (tbname)
)
else:
sql = (
"REPLACE INTO %s(exchange,`code`,`date`,`time`,open,high,low,close,volume,turnover,interest,diff_interest) VALUES"
% (tbname)
)
if isDay:
subsql = "('%s','%s',%d,%f,%f,%f,%f," % (
curBar["exchange"],
curBar["code"],
curBar["date"],
curBar["open"],
curBar["high"],
curBar["low"],
curBar["close"],
)
if "settle" in curBar:
subsql += str(curBar["settle"]) + ","
else:
subsql += "0,"
if "volume" in curBar:
subsql += str(curBar["volume"]) + ","
else:
subsql += "0,"
if "turnover" in curBar:
subsql += str(curBar["turnover"]) + ","
else:
subsql += "0,"
if "interest" in curBar:
subsql += str(curBar["interest"]) + ","
else:
subsql += "0,"
if "diff_interest" in curBar:
subsql += str(curBar["diff_interest"]) + ","
else:
subsql += "0,"
subsql = subsql[:-1] + "),"
sql += subsql
else:
barTime = (curBar["date"] - 19900000) * 10000 + curBar["time"]
subsql = "('%s','%s',%d,%d,%f,%f,%f,%f," % (
curBar["exchange"],
curBar["code"],
curBar["date"],
barTime,
curBar["open"],
curBar["high"],
curBar["low"],
curBar["close"],
)
if "volume" in curBar:
subsql += str(curBar["volume"]) + ","
else:
subsql += "0,"
if "turnover" in curBar:
subsql += str(curBar["turnover"]) + ","
else:
subsql += "0,"
if "interest" in curBar:
subsql += str(curBar["interest"]) + ","
else:
subsql += "0,"
if "diff_interest" in curBar:
subsql += str(curBar["diff_interest"]) + ","
else:
subsql += "0,"
subsql = subsql[:-1] + "),"
sql += subsql
count += 1
if count == 500:
count = 0
sql = sql[:-1] + ";"
conn = self.__get_conn__()
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
cursor.close()
# 循环完了,再做一次提交
if count > 0:
sql = sql[:-1] + ";"
conn = self.__get_conn__()
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
cursor.close()
def writeFactors(self, factors: dict):
for exchg in factors:
codelist = factors[exchg]
for code in codelist:
items = codelist[code]
sql = (
"REPLACE INTO tb_adj_factors(exchange,`code`,`date`,factor) VALUES"
)
for item in items:
sql += "('%s','%s',%d,%f)," % (
exchg,
code,
item["date"],
item["factor"],
)
sql = sql[:-1] + ";"
conn = self.__get_conn__()
cursor = conn.cursor()
cursor.execute(sql)
conn.commit()
cursor.close() | null |
5,706 | # Copyright (C) 2023- The University of Notre Dame
# This software is distributed under the GNU General Public License.
# See the file COPYING for details.
from uuid import uuid4
from collections import defaultdict
class DaskVineDag:
"""A directed graph that encodes the steps and state a computation needs.
Single computations are encoded as s-expressions, therefore it is 'upside-down',
in the sense that the children of a node are the nodes required to compute it.
E.g., for
dsk = {'x': 1,
'y': 2,
'z': (add, 'x', 'y'),
'w': (sum, ['x', 'y', 'z']),
'v': [(sum, ['w', 'z']), 2]
}
'z' has as children 'x' and 'y'.
Each node is referenced by its key. When the value of a key is list of
sexprs, like 'v' above, and low_memory_mode is True, then a key is automatically computed recursively
for each computation.
Computation is done lazily. The DaskVineDag is initialized from a task graph, but not
computation is decoded. To use the DaskVineDag:
- DaskVineDag.set_targets(keys): Request the computation associated with key to be decoded.
- DaskVineDag.get_ready(): A list of [key, sexpr] of expressions that are ready
to be executed.
- DaskVineDag.set_result(key, value): Sets the result of key to value.
- DaskVineDag.get_result(key): Get result associated with key. Raises DagNoResult
- DaskVineDag.has_result(key): Whether the key has a computed result. """
@staticmethod
def keyp(s):
return DaskVineDag.hashable(s) and not DaskVineDag.taskp(s)
@staticmethod
def taskp(s):
return isinstance(s, tuple) and len(s) > 0 and callable(s[0])
@staticmethod
def listp(s):
return isinstance(s, list)
@staticmethod
def symbolp(s):
return not (DaskVineDag.taskp(s) or DaskVineDag.listp(s))
@staticmethod
def hashable(s):
try:
hash(s)
return True
except TypeError:
return False
def __init__(self, dsk, low_memory_mode=False):
self._dsk = dsk
# child -> parents. I.e., which parents needs the result of child
self._parents_of = defaultdict(lambda: set())
# parent->children still waiting for result. A key is ready to be computed when children left is []
self._missing_of = {}
# parent->nchildren get the number of children for parent computation
self._children_of = {}
# key->value of its computation
self._result_of = {}
# key->depth. The shallowest level the key is found
self._depth_of = defaultdict(lambda: float('inf'))
# target keys that the dag should compute
self._targets = set()
# set of keys which functions are ready to be computed
# only tasks of the form (fn, arg1, arg2, ...) are ever added to the ready set.
self._ready = set()
# set of keys currently being computed.
self._computing = set()
self._working_graph = dict(dsk)
if low_memory_mode:
self._flatten_graph()
self.initialize_graph()
def graph_keyp(self, s):
if DaskVineDag.keyp(s):
return s in self._working_graph
return False
def depth_of(self, key):
return self._depth_of[key]
def initialize_graph(self):
for key, sexpr in self._working_graph.items():
self.set_relations(key, sexpr)
def find_dependencies(self, sexpr, depth=0):
dependencies = set()
if self.graph_keyp(sexpr):
dependencies.add(sexpr)
self._depth_of[sexpr] = min(depth, self._depth_of[sexpr])
elif not DaskVineDag.symbolp(sexpr):
for sub in sexpr:
dependencies.update(self.find_dependencies(sub, depth + 1))
return dependencies
def set_relations(self, key, sexpr):
sexpr = self._working_graph[key]
self._children_of[key] = self.find_dependencies(sexpr)
self._missing_of[key] = set(self._children_of[key])
for c in self._children_of[key]:
self._parents_of[c].add(key)
def METHOD_NAME(self):
""" List of [(key, sexpr),...] ready for computation.
This call should be used only for
bootstrapping. Further calls should use DaskVineDag.set_result to discover
the new computations that become ready to be executed. """
rs = []
for (key, cs) in self._missing_of.items():
if self.has_result(key) or cs:
continue
sexpr = self._working_graph[key]
if self.graph_keyp(sexpr):
rs.extend(self.set_result(key, self.get_result(sexpr)))
elif self.symbolp(sexpr):
rs.extend(self.set_result(key, sexpr))
else:
rs.append((key, sexpr))
return rs
def set_result(self, key, value):
""" Sets new result and propagates in the DaskVineDag. Returns a list of [(key, sexpr),...]
of computations that become ready to be executed """
rs = []
self._result_of[key] = value
for p in self._parents_of[key]:
self._missing_of[p].discard(key)
if self._missing_of[p]:
continue
sexpr = self._working_graph[p]
if self.graph_keyp(sexpr):
rs.extend(
self.set_result(p, self.get_result(sexpr))
) # case e.g, "x": "y", and we just set the value of "y"
elif self.symbolp(sexpr):
rs.extend(self.set_result(p, sexpr))
else:
rs.append((p, sexpr))
return rs
def _flatten_graph(self):
""" Recursively decomposes a sexpr associated with key, so that its arguments, if any
are keys. """
for key in list(self._working_graph.keys()):
self.flatten_rec(key, self._working_graph[key], toplevel=True)
def _add_second_targets(self, key):
if not DaskVineDag.listp(self._working_graph[key]):
return
for c in self._working_graph[key]:
if self.graph_keyp(c):
self._targets.add(c)
self._add_second_targets(c)
def flatten_rec(self, key, sexpr, toplevel=False):
if key in self._working_graph and not toplevel:
return
if DaskVineDag.symbolp(sexpr):
return
nargs = []
next_flat = []
cons = type(sexpr)
for arg in sexpr:
print(arg)
if DaskVineDag.symbolp(arg):
nargs.append(arg)
else:
next_key = uuid4()
nargs.append(next_key)
next_flat.append((next_key, arg))
self._working_graph[key] = cons(nargs)
for (n, a) in next_flat:
self.flatten_rec(n, a)
def has_result(self, key):
return key in self._result_of
def get_result(self, key):
try:
return self._result_of[key]
except KeyError:
raise DaskVineNoResult(key)
def get_children(self, key):
""" Sets new result and propagates in the DaskVineDag. Returns a list of [key, (fn, *args)]
of computations that become ready to be executed """
try:
return self._children_of[key]
except KeyError:
raise DaskVineNoResult(key)
def set_targets(self, keys):
""" Values of keys that need to be computed. """
self._targets.update(keys)
for k in keys:
self._add_second_targets(k)
return self.METHOD_NAME()
def get_targets(self):
return self._targets
class DaskVineNoResult(Exception):
"""Exception raised when asking for a result from a computation that has not been performed."""
pass
# vim: set sts=4 sw=4 ts=4 expandtab ft=python: | null |
5,707 | import pytest
from pybind11_tests import ConstructorStats
def test_regressions():
from pybind11_tests.issues import print_cchar, print_char
# #137: const char* isn't handled properly
assert print_cchar("const char *") == "const char *"
# #150: char bindings broken
assert print_char("c") == "c"
def test_dispatch_issue(msg):
"""#159: virtual function dispatch has problems with similar-named functions"""
from pybind11_tests.issues import DispatchIssue, dispatch_issue_go
class PyClass1(DispatchIssue):
def dispatch(self):
return "Yay.."
class PyClass2(DispatchIssue):
def dispatch(self):
with pytest.raises(RuntimeError) as excinfo:
super(PyClass2, self).dispatch()
assert msg(excinfo.value) == 'Tried to call pure virtual function "Base::dispatch"'
p = PyClass1()
return dispatch_issue_go(p)
b = PyClass2()
assert dispatch_issue_go(b) == "Yay.."
def test_reference_wrapper():
"""#171: Can't return reference wrappers (or STL data structures containing them)"""
from pybind11_tests.issues import Placeholder, return_vec_of_reference_wrapper
assert str(return_vec_of_reference_wrapper(Placeholder(4))) == \
"[Placeholder[1], Placeholder[2], Placeholder[3], Placeholder[4]]"
def test_iterator_passthrough():
"""#181: iterator passthrough did not compile"""
from pybind11_tests.issues import iterator_passthrough
assert list(iterator_passthrough(iter([3, 5, 7, 9, 11, 13, 15]))) == [3, 5, 7, 9, 11, 13, 15]
def test_shared_ptr_gc():
"""// #187: issue involving std::shared_ptr<> return value policy & garbage collection"""
from pybind11_tests.issues import ElementList, ElementA
el = ElementList()
for i in range(10):
el.add(ElementA(i))
pytest.gc_collect()
for i, v in enumerate(el.get()):
assert i == v.value()
def test_no_id(msg):
from pybind11_tests.issues import get_element, expect_float, expect_int
with pytest.raises(TypeError) as excinfo:
get_element(None)
assert msg(excinfo.value) == """
get_element(): incompatible function arguments. The following argument types are supported:
1. (arg0: m.issues.ElementA) -> int
Invoked with: None
"""
with pytest.raises(TypeError) as excinfo:
expect_int(5.2)
assert msg(excinfo.value) == """
expect_int(): incompatible function arguments. The following argument types are supported:
1. (arg0: int) -> int
Invoked with: 5.2
"""
assert expect_float(12) == 12
def test_str_issue(msg):
"""Issue #283: __str__ called on uninitialized instance when constructor arguments invalid"""
from pybind11_tests.issues import StrIssue
assert str(StrIssue(3)) == "StrIssue[3]"
with pytest.raises(TypeError) as excinfo:
str(StrIssue("no", "such", "constructor"))
assert msg(excinfo.value) == """
__init__(): incompatible constructor arguments. The following argument types are supported:
1. m.issues.StrIssue(arg0: int)
2. m.issues.StrIssue()
Invoked with: 'no', 'such', 'constructor'
"""
def test_nested():
""" #328: first member in a class can't be used in operators"""
from pybind11_tests.issues import NestA, NestB, NestC, get_NestA, get_NestB, get_NestC
a = NestA()
b = NestB()
c = NestC()
a += 10
assert get_NestA(a) == 13
b.a += 100
assert get_NestA(b.a) == 103
c.b.a += 1000
assert get_NestA(c.b.a) == 1003
b -= 1
assert get_NestB(b) == 3
c.b -= 3
assert get_NestB(c.b) == 1
c *= 7
assert get_NestC(c) == 35
abase = a.as_base()
assert abase.value == -2
a.as_base().value += 44
assert abase.value == 42
assert c.b.a.as_base().value == -2
c.b.a.as_base().value += 44
assert c.b.a.as_base().value == 42
del c
pytest.gc_collect()
del a # Should't delete while abase is still alive
pytest.gc_collect()
assert abase.value == 42
del abase, b
pytest.gc_collect()
def test_move_fallback():
from pybind11_tests.issues import get_moveissue1, get_moveissue2
m2 = get_moveissue2(2)
assert m2.value == 2
m1 = get_moveissue1(1)
assert m1.value == 1
def test_override_ref():
from pybind11_tests.issues import OverrideTest
o = OverrideTest("asdf")
# Not allowed (see associated .cpp comment)
# i = o.str_ref()
# assert o.str_ref() == "asdf"
assert o.str_value() == "asdf"
assert o.A_value().value == "hi"
a = o.A_ref()
assert a.value == "hi"
a.value = "bye"
assert a.value == "bye"
def test_operators_notimplemented(capture):
from pybind11_tests.issues import OpTest1, OpTest2
with capture:
c1, c2 = OpTest1(), OpTest2()
c1 + c1
c2 + c2
c2 + c1
c1 + c2
assert capture == """
Add OpTest1 with OpTest1
Add OpTest2 with OpTest2
Add OpTest2 with OpTest1
Add OpTest2 with OpTest1
"""
def METHOD_NAME():
""" Issue 388: Can't make iterators via make_iterator() with different r/v policies """
from pybind11_tests.issues import make_iterator_1
from pybind11_tests.issues import make_iterator_2
assert list(make_iterator_1()) == [1, 2, 3]
assert list(make_iterator_2()) == [1, 2, 3]
assert not isinstance(make_iterator_1(), type(make_iterator_2()))
def test_dupe_assignment():
""" Issue 461: overwriting a class with a function """
from pybind11_tests.issues import dupe_exception_failures
assert dupe_exception_failures() == []
def test_enable_shared_from_this_with_reference_rvp():
""" Issue #471: shared pointer instance not dellocated """
from pybind11_tests import SharedParent, SharedChild
parent = SharedParent()
child = parent.get_child()
cstats = ConstructorStats.get(SharedChild)
assert cstats.alive() == 1
del child, parent
assert cstats.alive() == 0
def test_non_destructed_holders():
""" Issue #478: unique ptrs constructed and freed without destruction """
from pybind11_tests import SpecialHolderObj
a = SpecialHolderObj(123)
b = a.child()
assert a.val == 123
assert b.val == 124
cstats = SpecialHolderObj.holder_cstats()
assert cstats.alive() == 1
del b
assert cstats.alive() == 1
del a
assert cstats.alive() == 0
def test_complex_cast(capture):
""" Issue #484: number conversion generates unhandled exceptions """
from pybind11_tests.issues import test_complex
with capture:
test_complex(1)
test_complex(2j)
assert capture == """
1.0
(0.0, 2.0)
"""
def test_inheritance_override_def_static():
from pybind11_tests.issues import MyBase, MyDerived
b = MyBase.make()
d1 = MyDerived.make2()
d2 = MyDerived.make()
assert isinstance(b, MyBase)
assert isinstance(d1, MyDerived)
assert isinstance(d2, MyDerived) | null |
5,708 | from __future__ import annotations
import functools
import logging
from typing import Coroutine
import zigpy.profiles
import zigpy.types as t
from zigpy.typing import AddressingMode
import zigpy.util
from . import types
LOGGER = logging.getLogger(__name__)
ZDO_ENDPOINT = 0
class ZDO(zigpy.util.CatchingTaskMixin, zigpy.util.ListenableMixin):
"""The ZDO endpoint of a device"""
class LeaveOptions(t.bitmap8):
"""ZDO Mgmt_Leave_req Options."""
NONE = 0
RemoveChildren = 1 << 6
Rejoin = 1 << 7
def __init__(self, device):
self._device = device
self._listeners = {}
def _serialize(self, command, *args):
schema = types.CLUSTERS[command][1]
data = t.serialize(args, schema)
return data
def deserialize(self, cluster_id, data):
if cluster_id not in types.CLUSTERS:
raise ValueError(f"Invalid ZDO cluster ID: 0x{cluster_id:04X}")
_, param_types = types.CLUSTERS[cluster_id]
hdr, data = types.ZDOHeader.deserialize(cluster_id, data)
args, data = t.deserialize(data, param_types)
if data:
# TODO: Seems sane to check, but what should we do?
self.warning("Data remains after deserializing ZDO frame: %r", data)
return hdr, args
def METHOD_NAME(self, command, *args, use_ieee=False):
data = self._serialize(command, *args)
tsn = self.device.application.get_sequence()
data = t.uint8_t(tsn).serialize() + data
return self._device.METHOD_NAME(0, command, 0, 0, tsn, data, use_ieee=use_ieee)
def reply(self, command, *args, tsn=None, use_ieee=False):
data = self._serialize(command, *args)
if tsn is None:
tsn = self.device.application.get_sequence()
data = t.uint8_t(tsn).serialize() + data
return self._device.reply(0, command, 0, 0, tsn, data, use_ieee=use_ieee)
def handle_message(
self,
profile: int,
cluster: int,
hdr: types.ZDOHeader,
args: list,
*,
dst_addressing: AddressingMode | None = None,
) -> None:
self.debug("ZDO request %s: %s", hdr.command_id, args)
handler = getattr(self, f"handle_{hdr.command_id.name.lower()}", None)
if handler is not None:
handler(hdr, *args, dst_addressing=dst_addressing)
else:
self.debug("No handler for ZDO request:%s(%s)", hdr.command_id, args)
self.listener_event(
f"zdo_{hdr.command_id.name.lower()}",
self._device,
dst_addressing,
hdr,
args,
)
def handle_nwk_addr_req(
self,
hdr: types.ZDOHeader,
ieee: t.EUI64,
request_type: int,
start_index: int | None = None,
dst_addressing: AddressingMode | None = None,
):
"""Handle ZDO NWK Address request."""
app = self._device.application
if ieee == app.state.node_info.ieee:
self.create_catching_task(
self.NWK_addr_rsp(
0,
app.state.node_info.ieee,
app.state.node_info.nwk,
0,
0,
[],
tsn=hdr.tsn,
)
)
def handle_ieee_addr_req(
self,
hdr: types.ZDOHeader,
nwk: t.NWK,
request_type: int,
start_index: int | None = None,
dst_addressing: AddressingMode | None = None,
):
"""Handle ZDO IEEE Address request."""
app = self._device.application
if nwk in (
t.BroadcastAddress.ALL_DEVICES,
t.BroadcastAddress.RX_ON_WHEN_IDLE,
t.BroadcastAddress.ALL_ROUTERS_AND_COORDINATOR,
app.state.node_info.nwk,
):
self.create_catching_task(
self.IEEE_addr_rsp(
0,
app.state.node_info.ieee,
app.state.node_info.nwk,
0,
0,
[],
tsn=hdr.tsn,
)
)
def handle_device_annce(
self,
hdr: types.ZDOHeader,
nwk: t.NWK,
ieee: t.EUI64,
capability: int,
dst_addressing: AddressingMode | None = None,
):
"""Handle ZDO device announcement request."""
self.listener_event("device_announce", self._device)
def handle_mgmt_permit_joining_req(
self,
hdr: types.ZDOHeader,
permit_duration: int,
tc_significance: int,
dst_addressing: AddressingMode | None = None,
):
"""Handle ZDO permit joining request."""
self.listener_event("permit_duration", permit_duration)
def handle_match_desc_req(
self,
hdr: types.ZDOHeader,
addr: t.NWK,
profile: int,
in_clusters: list,
out_cluster: list,
dst_addressing: AddressingMode | None = None,
):
"""Handle ZDO Match_desc_req request."""
local_addr = self._device.application.state.node_info.nwk
if profile != zigpy.profiles.zha.PROFILE_ID:
self.create_catching_task(
self.Match_Desc_rsp(0, local_addr, [], tsn=hdr.tsn)
)
return
self.create_catching_task(
self.Match_Desc_rsp(0, local_addr, [t.uint8_t(1)], tsn=hdr.tsn)
)
def bind(self, cluster):
return self.Bind_req(
self._device.ieee,
cluster.endpoint.endpoint_id,
cluster.cluster_id,
self.device.application.get_dst_address(cluster),
)
def unbind(self, cluster):
return self.Unbind_req(
self._device.ieee,
cluster.endpoint.endpoint_id,
cluster.cluster_id,
self.device.application.get_dst_address(cluster),
)
def leave(self, remove_children: bool = True, rejoin: bool = False) -> Coroutine:
opts = self.LeaveOptions.NONE
if remove_children:
opts |= self.LeaveOptions.RemoveChildren
if rejoin:
opts |= self.LeaveOptions.Rejoin
return self.Mgmt_Leave_req(self._device.ieee, opts)
def permit(self, duration=60, tc_significance=0):
return self.Mgmt_Permit_Joining_req(duration, tc_significance)
def log(self, lvl, msg, *args, **kwargs):
msg = "[0x%04x:zdo] " + msg
args = (self._device.nwk,) + args
return LOGGER.log(lvl, msg, *args, **kwargs)
@property
def device(self):
return self._device
def __getattr__(self, name):
try:
command = types.ZDOCmd[name]
except KeyError:
raise AttributeError(f"No such '{name}' ZDO command")
if command & 0x8000:
return functools.partial(self.reply, command)
return functools.partial(self.METHOD_NAME, command)
def broadcast(
app,
command,
grpid,
radius,
*args,
broadcast_address=t.BroadcastAddress.RX_ON_WHEN_IDLE,
**kwargs,
):
params, param_types = types.CLUSTERS[command]
named_args = dict(zip(params, args))
named_args.update(kwargs)
assert set(named_args.keys()) & set(params)
sequence = app.get_sequence()
data = bytes([sequence]) + t.serialize(named_args.values(), param_types)
return zigpy.device.broadcast(
app,
0,
command,
0,
0,
grpid,
radius,
sequence,
data,
broadcast_address=broadcast_address,
) | null |
5,709 | # Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Dataset.
"""
from __future__ import annotations
from copy import deepcopy
from pathlib import Path
from typing import Dict, List
import numpy as np
import numpy.typing as npt
import torch
from jaxtyping import Float
from PIL import Image
from torch import Tensor
from torch.utils.data import Dataset
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.data.dataparsers.base_dataparser import DataparserOutputs
from nerfstudio.data.utils.data_utils import get_image_mask_tensor_from_path
class InputDataset(Dataset):
"""Dataset that returns images.
Args:
dataparser_outputs: description of where and how to read input images.
scale_factor: The scaling factor for the dataparser outputs
"""
exclude_batch_keys_from_device: List[str] = ["image", "mask"]
cameras: Cameras
def __init__(self, dataparser_outputs: DataparserOutputs, scale_factor: float = 1.0):
super().__init__()
self._dataparser_outputs = dataparser_outputs
self.scale_factor = scale_factor
self.scene_box = deepcopy(dataparser_outputs.scene_box)
self.metadata = deepcopy(dataparser_outputs.metadata)
self.cameras = deepcopy(dataparser_outputs.cameras)
self.cameras.rescale_output_resolution(scaling_factor=scale_factor)
def __len__(self):
return len(self._dataparser_outputs.image_filenames)
def get_numpy_image(self, image_idx: int) -> npt.NDArray[np.uint8]:
"""Returns the image of shape (H, W, 3 or 4).
Args:
image_idx: The image index in the dataset.
"""
image_filename = self._dataparser_outputs.image_filenames[image_idx]
pil_image = Image.open(image_filename)
if self.scale_factor != 1.0:
width, height = pil_image.size
newsize = (int(width * self.scale_factor), int(height * self.scale_factor))
pil_image = pil_image.resize(newsize, resample=Image.BILINEAR)
image = np.array(pil_image, dtype="uint8") # shape is (h, w) or (h, w, 3 or 4)
if len(image.shape) == 2:
image = image[:, :, None].repeat(3, axis=2)
assert len(image.shape) == 3
assert image.dtype == np.uint8
assert image.shape[2] in [3, 4], f"Image shape of {image.shape} is in correct."
return image
def METHOD_NAME(self, image_idx: int) -> Float[Tensor, "image_height image_width num_channels"]:
"""Returns a 3 channel image.
Args:
image_idx: The image index in the dataset.
"""
image = torch.from_numpy(self.get_numpy_image(image_idx).astype("float32") / 255.0)
if self._dataparser_outputs.alpha_color is not None and image.shape[-1] == 4:
image = image[:, :, :3] * image[:, :, -1:] + self._dataparser_outputs.alpha_color * (1.0 - image[:, :, -1:])
return image
def get_data(self, image_idx: int) -> Dict:
"""Returns the ImageDataset data as a dictionary.
Args:
image_idx: The image index in the dataset.
"""
image = self.METHOD_NAME(image_idx)
data = {"image_idx": image_idx, "image": image}
if self._dataparser_outputs.mask_filenames is not None:
mask_filepath = self._dataparser_outputs.mask_filenames[image_idx]
data["mask"] = get_image_mask_tensor_from_path(filepath=mask_filepath, scale_factor=self.scale_factor)
assert (
data["mask"].shape[:2] == data["image"].shape[:2]
), f"Mask and image have different shapes. Got {data['mask'].shape[:2]} and {data['image'].shape[:2]}"
metadata = self.get_metadata(data)
data.update(metadata)
return data
def get_metadata(self, data: Dict) -> Dict:
"""Method that can be used to process any additional metadata that may be part of the model inputs.
Args:
image_idx: The image index in the dataset.
"""
del data
return {}
def __getitem__(self, image_idx: int) -> Dict:
data = self.get_data(image_idx)
return data
@property
def image_filenames(self) -> List[Path]:
"""
Returns image filenames for this dataset.
The order of filenames is the same as in the Cameras object for easy mapping.
"""
return self._dataparser_outputs.image_filenames | null |
5,710 | from django.template import Template, Context
from django.utils.translation import gettext_lazy
from devilry.apps.core import models as core_models
class UserInfo(object):
def __init__(self, groupuserlookup, user):
self.groupuserlookup = groupuserlookup
self.user = user
@property
def candidate(self):
if not hasattr(self, '_candidate'):
try:
self._candidate = core_models.Candidate.objects.get(
assignment_group=self.groupuserlookup.group,
relatedstudent__user=self.user)
except core_models.Candidate.DoesNotExist:
self._candidate = None
return self._candidate
@property
def relatedexaminer(self):
if not hasattr(self, '_relatedexaminer'):
try:
self._relatedexaminer = core_models.RelatedExaminer.objects.get(
period_id=self.groupuserlookup.assignment.parentnode_id,
user=self.user)
except core_models.RelatedExaminer.DoesNotExist:
self._relatedexaminer = None
return self._relatedexaminer
@property
def relatedstudent(self):
if not hasattr(self, '_relatedstudent'):
try:
self._relatedstudent = core_models.RelatedStudent.objects.get(
period_id=self.groupuserlookup.assignment.parentnode_id,
user=self.user)
except core_models.RelatedStudent.DoesNotExist:
self._relatedstudent = None
return self._relatedstudent
def _render_template(self, templatestring, **contextdata):
return Template(templatestring).render(Context(contextdata))
def _render_span(self, cssclass, content):
return self._render_template("""
<span class="{{ cssclass }}">
{{ content }}
</span>
""", cssclass=cssclass, content=content)
def METHOD_NAME(self, user, html=False):
if user is None:
fallback = gettext_lazy('Deleted user')
if html:
return self._render_span(cssclass='text-danger', content=fallback)
else:
return fallback
if html:
return self._render_template('{% load devilry_account_tags %}{% devilry_user_verbose_inline user %}', user=user)
else:
return user.get_displayname()
def get_unanonymized_short_name_from_user(self, user, html=False):
if user is None:
fallback = gettext_lazy('Deleted user')
if html:
return self._render_span(cssclass='text-danger', content=fallback)
else:
return fallback
return user.get_short_name()
def __get_anonymized_name_from_user(self, user, user_role):
if user_role == 'student':
if self.groupuserlookup.assignment.uses_custom_candidate_ids:
return self.candidate.get_anonymous_name(assignment=self.groupuserlookup.assignment)
elif self.relatedstudent:
return self.relatedstudent.get_anonymous_name()
elif user_role == 'examiner':
if self.relatedexaminer:
return self.relatedexaminer.get_anonymous_name()
else:
raise ValueError('Can only call __get_anonymized_name_from_user '
'with user_role "examiner" or "student".')
return gettext_lazy('User removed from semester')
def get_anonymized_name_from_user(self, user, user_role, html=False):
name = self.__get_anonymized_name_from_user(user=user, user_role=user_role)
if html:
if user_role == 'student':
return self._render_span(cssclass='devilry-core-candidate-anonymous-name',
content=name)
else:
return self._render_span(cssclass='devilry-core-examiner-anonymous-name',
content=name)
return name
class GroupUserLookup(object):
"""
"""
def __init__(self, assignment, group, requestuser_devilryrole, requestuser=None):
"""
Args:
group:
requestuser:
requestuser_devilryrole:
"""
assert assignment.id == group.parentnode_id
self.assignment = assignment
self.group = group
self.requestuser = requestuser
self.requestuser_devilryrole = requestuser_devilryrole
self._usercache = {}
def is_requestuser(self, user):
"""
"""
if not self.requestuser:
return False
return self.requestuser == user
def _get_userinfo(self, user):
if user.id not in self._usercache:
self._usercache[user.id] = UserInfo(groupuserlookup=self, user=user)
return self._usercache[user.id]
def get_long_name_from_user(self, user, user_role, html=False):
userinfo = self._get_userinfo(user=user)
if not self.is_requestuser(user=user):
if user_role == 'student' and self.assignment.students_must_be_anonymized_for_devilryrole(devilryrole=self.requestuser_devilryrole):
return userinfo.get_anonymized_name_from_user(user=user, user_role=user_role, html=html)
elif user_role == 'examiner' and self.assignment.examiners_must_be_anonymized_for_devilryrole(devilryrole=self.requestuser_devilryrole):
return userinfo.get_anonymized_name_from_user(user=user, user_role=user_role, html=html)
return userinfo.METHOD_NAME(user=user, html=html)
def get_plaintext_short_name_from_user(self, user, user_role, html=False):
userinfo = self._get_userinfo(user=user)
if not self.is_requestuser(user=user):
if user_role == 'student' and self.assignment.students_must_be_anonymized_for_devilryrole(devilryrole=self.requestuser_devilryrole):
return userinfo.get_anonymized_name_from_user(user=user, user_role=user_role, html=html)
elif user_role == 'examiner' and self.assignment.examiners_must_be_anonymized_for_devilryrole(devilryrole=self.requestuser_devilryrole):
return userinfo.get_anonymized_name_from_user(user=user, user_role=user_role, html=html)
return userinfo.get_unanonymized_short_name_from_user(user=user) | null |
5,711 | #
# MIT No Attribution
#
# Copyright (C) 2010-2023 Joel Andersson, Joris Gillis, Moritz Diehl, KU Leuven.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from pylab import *
# End time
T = 10.
# Number of control intervals
N = 20
# Number of Runge-Kutta 4 steps per interval and step size
NK = 20
DT = T/(N*NK)
# Number of discrete control values
NU = 101
# Number of discrete state values
NX = 101
# System dynamics, can be called with matricex
def METHOD_NAME(x1,x2,u):
x1_dot = (1 - x2*x2)*x1 - x2 + u
x2_dot = x1
q_dot = x1*x1 + x2*x2 + u*u
return (x1_dot, x2_dot, q_dot)
# Control enumeration
U = linspace(-1,1,NU)
# State space enumeration
x1 = linspace(-1,1,NX)
x2 = linspace(-1,1,NX)
X1,X2 = meshgrid(x1,x2)
# For each control action and state, precalculate next state and stage cost
stage_J = []
next_x1 = []
next_x2 = []
for u in U:
# Take number of integration steps
X1_k = copy(X1)
X2_k = copy(X2)
Q_k = zeros(X1.shape)
for k in range(NK):
# RK4 integration for x1, x2 and q
k1_x1, k1_x2, k1_q = METHOD_NAME(X1_k, X2_k, u)
k2_x1, k2_x2, k2_q = METHOD_NAME(X1_k + DT/2 * k1_x1, X2_k + DT/2 * k1_x2, u)
k3_x1, k3_x2, k3_q = METHOD_NAME(X1_k + DT/2 * k2_x1, X2_k + DT/2 * k2_x2, u)
k4_x1, k4_x2, k4_q = METHOD_NAME(X1_k + DT * k3_x1, X2_k + DT * k3_x2, u)
X1_k += DT/6*(k1_x1 + 2*k2_x1 + 2*k3_x1 + k4_x1)
X2_k += DT/6*(k1_x2 + 2*k2_x2 + 2*k3_x2 + k4_x2)
Q_k += DT/6*(k1_q + 2*k2_q + 2*k3_q + k4_q )
# Find out which state comes next (index)
X1_k = matrix.round((X1_k+1)/2*(NX-1)).astype(int)
X2_k = matrix.round((X2_k+1)/2*(NX-1)).astype(int)
# Infinite cost if state gets out-of-bounds
I = X1_k < 0; Q_k[I]=inf; X1_k[I]=0
I = X2_k < 0; Q_k[I]=inf; X2_k[I]=0
I = X1_k >= NX; Q_k[I]=inf; X1_k[I]=0
I = X2_k >= NX; Q_k[I]=inf; X2_k[I]=0
# Save the stage cost and next state
next_x1.append(X1_k)
next_x2.append(X2_k)
stage_J.append(Q_k)
# Calculate cost-to-go (no end cost) and optimal control
J = zeros(X1.shape)
U_opt = []
for k in reversed(list(range(N))):
# Cost to go for the previous step, optimal control action
J_prev = inf*ones(X1.shape)
u_prev = -ones(X1.shape,dtype=int)
# Test all control actions
for uind in range(NU):
J_prev_test = J[next_x2[uind],next_x1[uind]]+stage_J[uind]
better = J_prev_test<J_prev
u_prev[better] = uind
J_prev[better] = J_prev_test[better]
# Update cost-to-go and save optimal control
J = J_prev
U_opt.append(u_prev)
# Reorder U_opt by stage
U_opt.reverse()
# Find optimal control starting at x1=0, x2=1
i1 = NX//2
i2 = NX-1
u_opt = []
x1_opt = [x1[i1]]
x2_opt = [x2[i2]]
cost = 0
for k in range(N):
# Get the optimal control and go to next step
u_ind = U_opt[k][i2,i1]
cost += stage_J[u_ind][i2,i1]
i1, i2 = next_x1[u_ind][i2,i1], next_x2[u_ind][i2,i1]
# Save the trajectories
u_opt.append(U[u_ind])
x1_opt.append(x1[i1])
x2_opt.append(x2[i2])
# Optimal cost
print("Minimal cost: ", cost)
assert abs(cost-J[NX-1,NX//2])<1e-8 # Consistency check
# Plot
figure(1)
clf()
# Plot optimal cost-to-go
subplot(121)
contourf(X1,X2,J)
colorbar()
xlabel('x1')
ylabel('x2')
title('Cost-to-go')
subplot(122)
plot(linspace(0,T,N+1),x1_opt,'--')
plot(linspace(0,T,N+1),x2_opt,'-.')
step(linspace(0,T,N),u_opt,'-')
plt.title("Dynamic programming solution")
plt.xlabel('time')
plt.legend(['x1 trajectory','x2 trajectory','u trajectory'])
grid(True)
show() | null |
5,712 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QWidget
from mantid.plots.legend import LegendProperties
from mantidqt.utils.qt import load_ui
from mantidqt.widgets.plotconfigdialog.colorselector import ColorSelector
from mantidqt.widgets.plotconfigdialog.legendtabwidget.advancedlegendoptionsdialog.view import AdvancedLegendOptionsView
class LegendTabWidgetView(QWidget):
def __init__(self, parent=None):
super(LegendTabWidgetView, self).__init__(parent=parent)
self.ui = load_ui(__file__, "legend_tab.ui", baseinstance=self)
self.background_color_selector_widget = ColorSelector(parent=self)
self.edge_color_selector_widget = ColorSelector(parent=self)
self.title_color_selector_widget = ColorSelector(parent=self)
self.entries_color_selector_widget = ColorSelector(parent=self)
self.grid_layout.replaceWidget(self.background_color_selector_dummy_widget, self.background_color_selector_widget)
self.grid_layout.replaceWidget(self.edge_color_selector_dummy_widget, self.edge_color_selector_widget)
self.grid_layout.replaceWidget(self.entries_color_selector_dummy_widget, self.entries_color_selector_widget)
self.grid_layout.replaceWidget(self.title_color_selector_dummy_widget, self.title_color_selector_widget)
self.setAttribute(Qt.WA_DeleteOnClose, True)
self.advanced_options = AdvancedLegendOptionsView(self)
def set_transparency_slider(self, transparency):
self.transparency_slider.setValue(transparency)
def get_transparency_slider_value(self):
return self.transparency_slider.value()
def set_transparency_spin_box(self, transparency):
self.transparency_spin_box.setValue(transparency)
def get_transparency_spin_box_value(self):
return self.transparency_spin_box.value()
def hide_transparency(self):
self.transparency_label.setHidden(True)
self.transparency_slider.setHidden(True)
self.transparency_spin_box.setHidden(True)
def set_title(self, title):
self.title_line_edit.setText(title)
def get_title(self):
return self.title_line_edit.text()
def set_background_color(self, color):
self.background_color_selector_widget.set_color(color)
def get_background_color(self):
return self.background_color_selector_widget.get_color()
def METHOD_NAME(self, color):
self.edge_color_selector_widget.set_color(color)
def get_edge_color(self):
return self.edge_color_selector_widget.get_color()
def set_entries_font(self, font):
self.entries_font_combo_box.setCurrentText(font)
def get_entries_font(self):
return self.entries_font_combo_box.currentText()
def set_entries_size(self, size):
self.entries_size_spin_box.setValue(size)
def get_entries_size(self):
return self.entries_size_spin_box.value()
def set_entries_color(self, color):
self.entries_color_selector_widget.set_color(color)
def get_entries_color(self):
return self.entries_color_selector_widget.get_color()
def set_title_font(self, font):
self.title_font_combo_box.setCurrentText(font)
def get_title_font(self):
return self.title_font_combo_box.currentText()
def set_title_size(self, size):
self.title_size_spin_box.setValue(size)
def get_title_size(self):
return self.title_size_spin_box.value()
def set_title_color(self, color):
self.title_color_selector_widget.set_color(color)
def get_title_color(self):
return self.title_color_selector_widget.get_color()
def set_marker_size(self, size):
self.marker_size_spin_box.setValue(size)
def get_marker_size(self):
return self.marker_size_spin_box.value()
def get_hide_box(self):
return self.hide_box_check_box.isChecked()
def set_hide_box(self, hide):
self.hide_box_check_box.setChecked(hide)
def get_hide_legend(self):
return self.hide_legend_check_box.isChecked()
def set_hide_legend(self, hide):
self.hide_legend_check_box.setChecked(hide)
def get_properties(self):
props = LegendProperties.from_view(self)
advanced_props = self.advanced_options.get_properties()
props.update(advanced_props)
return props
def hide_box_properties(self):
self.box_label.setHidden(True)
self.hide_box_check_box.setHidden(True)
self.background_color_label.setHidden(True)
self.background_color_selector_widget.setHidden(True)
self.edge_color_label.setHidden(True)
self.edge_color_selector_widget.setHidden(True)
self.hide_transparency() | null |
5,713 | """
When you need to use random numbers in SymPy library code, import from here
so there is only one generator working for SymPy. Imports from here should
behave the same as if they were being imported from Python's random module.
But only the routines currently used in SymPy are included here. To use others
import ``rng`` and access the method directly. For example, to capture the
current state of the generator use ``rng.getstate()``.
There is intentionally no Random to import from here. If you want
to control the state of the generator, import ``seed`` and call it
with or without an argument to set the state.
Examples
========
>>> from sympy.core.random import random, seed
>>> assert random() < 1
>>> seed(1); a = random()
>>> b = random()
>>> seed(1); c = random()
>>> assert a == c
>>> assert a != b # remote possibility this will fail
"""
from sympy.utilities.iterables import is_sequence
from sympy.utilities.misc import as_int
import random as _random
rng = _random.Random()
choice = rng.choice
random = rng.random
randint = rng.randint
randrange = rng.randrange
sample = rng.sample
# seed = rng.seed
shuffle = rng.shuffle
uniform = rng.uniform
_assumptions_rng = _random.Random()
_assumptions_shuffle = _assumptions_rng.shuffle
def seed(a=None, version=2):
rng.seed(a=a, version=version)
_assumptions_rng.seed(a=a, version=version)
def random_complex_number(a=2, b=-1, c=3, d=1, rational=False, tolerance=None):
"""
Return a random complex number.
To reduce chance of hitting branch cuts or anything, we guarantee
b <= Im z <= d, a <= Re z <= c
When rational is True, a rational approximation to a random number
is obtained within specified tolerance, if any.
"""
from sympy.core.numbers import I
from sympy.simplify.simplify import nsimplify
A, B = uniform(a, c), uniform(b, d)
if not rational:
return A + I*B
return (nsimplify(A, rational=True, tolerance=tolerance) +
I*nsimplify(B, rational=True, tolerance=tolerance))
def verify_numerically(f, g, z=None, tol=1.0e-6, a=2, b=-1, c=3, d=1):
"""
Test numerically that f and g agree when evaluated in the argument z.
If z is None, all symbols will be tested. This routine does not test
whether there are Floats present with precision higher than 15 digits
so if there are, your results may not be what you expect due to round-
off errors.
Examples
========
>>> from sympy import sin, cos
>>> from sympy.abc import x
>>> from sympy.core.random import verify_numerically as tn
>>> tn(sin(x)**2 + cos(x)**2, 1, x)
True
"""
from sympy.core.symbol import Symbol
from sympy.core.sympify import sympify
from sympy.core.numbers import comp
f, g = (sympify(i) for i in (f, g))
if z is None:
z = f.free_symbols | g.free_symbols
elif isinstance(z, Symbol):
z = [z]
reps = list(zip(z, [random_complex_number(a, b, c, d) for _ in z]))
z1 = f.subs(reps).n()
z2 = g.subs(reps).n()
return comp(z1, z2, tol)
def test_derivative_numerically(f, z, tol=1.0e-6, a=2, b=-1, c=3, d=1):
"""
Test numerically that the symbolically computed derivative of f
with respect to z is correct.
This routine does not test whether there are Floats present with
precision higher than 15 digits so if there are, your results may
not be what you expect due to round-off errors.
Examples
========
>>> from sympy import sin
>>> from sympy.abc import x
>>> from sympy.core.random import test_derivative_numerically as td
>>> td(sin(x), x)
True
"""
from sympy.core.numbers import comp
from sympy.core.function import Derivative
z0 = random_complex_number(a, b, c, d)
f1 = f.diff(z).subs(z, z0)
f2 = Derivative(f, z).doit_numerically(z0)
return comp(f1.n(), f2.n(), tol)
def METHOD_NAME(seed=None):
"""Return a randrange generator.
``seed`` can be
* None - return randomly seeded generator
* int - return a generator seeded with the int
* list - the values to be returned will be taken from the list
in the order given; the provided list is not modified.
Examples
========
>>> from sympy.core.random import _randrange
>>> rr = _randrange()
>>> rr(1000) # doctest: +SKIP
999
>>> rr = _randrange(3)
>>> rr(1000) # doctest: +SKIP
238
>>> rr = _randrange([0, 5, 1, 3, 4])
>>> rr(3), rr(3)
(0, 1)
"""
if seed is None:
return randrange
elif isinstance(seed, int):
rng.seed(seed)
return randrange
elif is_sequence(seed):
seed = list(seed) # make a copy
seed.reverse()
def give(a, b=None, seq=seed):
if b is None:
a, b = 0, a
a, b = as_int(a), as_int(b)
w = b - a
if w < 1:
raise ValueError('_randrange got empty range')
try:
x = seq.pop()
except IndexError:
raise ValueError('_randrange sequence was too short')
if a <= x < b:
return x
else:
return give(a, b, seq)
return give
else:
raise ValueError('_randrange got an unexpected seed')
def _randint(seed=None):
"""Return a randint generator.
``seed`` can be
* None - return randomly seeded generator
* int - return a generator seeded with the int
* list - the values to be returned will be taken from the list
in the order given; the provided list is not modified.
Examples
========
>>> from sympy.core.random import _randint
>>> ri = _randint()
>>> ri(1, 1000) # doctest: +SKIP
999
>>> ri = _randint(3)
>>> ri(1, 1000) # doctest: +SKIP
238
>>> ri = _randint([0, 5, 1, 2, 4])
>>> ri(1, 3), ri(1, 3)
(1, 2)
"""
if seed is None:
return randint
elif isinstance(seed, int):
rng.seed(seed)
return randint
elif is_sequence(seed):
seed = list(seed) # make a copy
seed.reverse()
def give(a, b, seq=seed):
a, b = as_int(a), as_int(b)
w = b - a
if w < 0:
raise ValueError('_randint got empty range')
try:
x = seq.pop()
except IndexError:
raise ValueError('_randint sequence was too short')
if a <= x <= b:
return x
else:
return give(a, b, seq)
return give
else:
raise ValueError('_randint got an unexpected seed') | null |
5,714 | """Benchmark for undo/redo.
Run this program without parameters for mode of use."""
from time import perf_counter as clock
import numpy as np
import tables as tb
verbose = 0
class BasicBenchmark:
def __init__(self, filename, testname, vecsize, nobjects, niter):
self.file = filename
self.test = testname
self.vecsize = vecsize
self.nobjects = nobjects
self.niter = niter
# Initialize the arrays
self.a1 = np.arange(0, 1 * self.vecsize)
self.a2 = np.arange(1 * self.vecsize, 2 * self.vecsize)
self.a3 = np.arange(2 * self.vecsize, 3 * self.vecsize)
def setUp(self):
# Create an HDF5 file
self.fileh = tb.open_file(self.file, mode="w")
# open the do/undo
self.fileh.enable_undo()
def tearDown(self):
self.fileh.disable_undo()
self.fileh.close()
# Remove the temporary file
# os.remove(self.file)
def createNode(self):
"""Checking a undo/redo create_array."""
for i in range(self.nobjects):
# Create a new array
self.fileh.create_array('/', 'array' + str(i), self.a1)
# Put a mark
self.fileh.mark()
# Unwind all marks sequentially
for i in range(self.niter):
t1 = clock()
for i in range(self.nobjects):
self.fileh.undo()
if verbose:
print("u", end=' ')
if verbose:
print()
undo = clock() - t1
# Rewind all marks sequentially
t1 = clock()
for i in range(self.nobjects):
self.fileh.redo()
if verbose:
print("r", end=' ')
if verbose:
print()
redo = clock() - t1
print("Time for Undo, Redo (createNode):", undo, "s, ", redo, "s")
def METHOD_NAME(self):
"""Checking a undo/redo copy_children."""
# Create a group
self.fileh.create_group('/', 'agroup')
# Create several objects there
for i in range(10):
# Create a new array
self.fileh.create_array('/agroup', 'array' + str(i), self.a1)
# Excercise copy_children
for i in range(self.nobjects):
# Create another group for destination
self.fileh.create_group('/', 'anothergroup' + str(i))
# Copy children from /agroup to /anothergroup+i
self.fileh.METHOD_NAME('/agroup', '/anothergroup' + str(i))
# Put a mark
self.fileh.mark()
# Unwind all marks sequentially
for i in range(self.niter):
t1 = clock()
for i in range(self.nobjects):
self.fileh.undo()
if verbose:
print("u", end=' ')
if verbose:
print()
undo = clock() - t1
# Rewind all marks sequentially
t1 = clock()
for i in range(self.nobjects):
self.fileh.redo()
if verbose:
print("r", end=' ')
if verbose:
print()
redo = clock() - t1
print(("Time for Undo, Redo (copy_children):", undo, "s, ",
redo, "s"))
def set_attr(self):
"""Checking a undo/redo for setting attributes."""
# Create a new array
self.fileh.create_array('/', 'array', self.a1)
for i in range(self.nobjects):
# Set an attribute
setattr(self.fileh.root.array.attrs, "attr" + str(i), str(self.a1))
# Put a mark
self.fileh.mark()
# Unwind all marks sequentially
for i in range(self.niter):
t1 = clock()
for i in range(self.nobjects):
self.fileh.undo()
if verbose:
print("u", end=' ')
if verbose:
print()
undo = clock() - t1
# Rewind all marks sequentially
t1 = clock()
for i in range(self.nobjects):
self.fileh.redo()
if verbose:
print("r", end=' ')
if verbose:
print()
redo = clock() - t1
print("Time for Undo, Redo (set_attr):", undo, "s, ", redo, "s")
def runall(self):
if testname == "all":
tests = [self.createNode, self.METHOD_NAME, self.set_attr]
elif testname == "createNode":
tests = [self.createNode]
elif testname == "copy_children":
tests = [self.METHOD_NAME]
elif testname == "set_attr":
tests = [self.set_attr]
for meth in tests:
self.setUp()
meth()
self.tearDown()
if __name__ == '__main__':
import sys
import getopt
usage = """usage: %s [-v] [-p] [-t test] [-s vecsize] [-n niter] datafile
-v verbose (total dump of profiling)
-p do profiling
-t {createNode|copy_children|set_attr|all} run the specified test
-s the size of vectors that are undone/redone
-n number of objects in operations
-i number of iterations for reading\n""" % sys.argv[0]
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'vpt:s:n:i:')
except:
sys.stderr.write(usage)
sys.exit(0)
# if we pass too much parameters, abort
if len(pargs) != 1:
sys.stderr.write(usage)
sys.exit(0)
# default options
verbose = 0
profile = 0
testname = "all"
vecsize = 10
nobjects = 1
niter = 1
# Get the options
for option in opts:
if option[0] == '-v':
verbose = 1
elif option[0] == '-p':
profile = 1
elif option[0] == '-t':
testname = option[1]
if testname not in ['createNode', 'copy_children', 'set_attr',
'all']:
sys.stderr.write(usage)
sys.exit(0)
elif option[0] == '-s':
vecsize = int(option[1])
elif option[0] == '-n':
nobjects = int(option[1])
elif option[0] == '-i':
niter = int(option[1])
filename = pargs[0]
bench = BasicBenchmark(filename, testname, vecsize, nobjects, niter)
if profile:
import hotshot
import hotshot.stats
prof = hotshot.Profile("do_undo.prof")
prof.runcall(bench.runall)
prof.close()
stats = hotshot.stats.load("do_undo.prof")
stats.strip_dirs()
stats.sort_stats('time', 'calls')
if verbose:
stats.print_stats()
else:
stats.print_stats(20)
else:
bench.runall()
# Local Variables:
# mode: python
# End: | null |
5,715 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.api import mtd, ITableWorkspace
from mantid.simpleapi import config, GenerateLogbook
import os
from tempfile import gettempdir
class GenerateLogbookTest(unittest.TestCase):
_data_directory = None
def setUp(self):
data_dirs = config["datasearch.directories"].split(";")
unit_test_data_dir = [p for p in data_dirs if "UnitTest" in p][0]
d7_dir = "ILL/D7"
if "ILL" in unit_test_data_dir:
d7_dir = "D7"
self._data_directory = os.path.abspath(os.path.join(unit_test_data_dir, d7_dir))
def tearDown(self):
mtd.clear()
@classmethod
def tearDownClass(cls):
mtd.clear()
if os.path.exists(os.path.join(gettempdir(), "logbook.csv")):
os.remove(os.path.join(gettempdir(), "logbook.csv"))
def test_instrument_does_not_exist(self):
self.assertTrue(os.path.exists(self._data_directory))
with self.assertRaisesRegex(RuntimeError, "There is no parameter file for nonexistent instrument."):
GenerateLogbook(Directory=self._data_directory, OutputWorkspace="__unused", Facility="ISIS", Instrument="nonexistent")
def test_d7_default(self):
self.assertTrue(os.path.exists(self._data_directory))
GenerateLogbook(
Directory=self._data_directory, OutputWorkspace="default_logbook", Facility="ILL", Instrument="D7", NumorRange="396990:396993"
)
self._check_output("default_logbook", numberEntries=3, numberColumns=7)
def test_d7_optional(self):
self.assertTrue(os.path.exists(self._data_directory))
GenerateLogbook(
Directory=self._data_directory,
OutputWorkspace="optional_logbook",
Facility="ILL",
Instrument="D7",
NumorRange="396990:396993",
OptionalHeaders="TOF",
)
self._check_output("optional_logbook", numberEntries=3, numberColumns=8)
def test_d7_custom(self):
self.assertTrue(os.path.exists(self._data_directory))
GenerateLogbook(
Directory=self._data_directory,
OutputWorkspace="custom_logbook",
Facility="ILL",
Instrument="D7",
NumorRange="396990:396993",
CustomEntries="/entry0/acquisition_mode",
)
self._check_output("custom_logbook", numberEntries=3, numberColumns=8)
def test_d7_custom_with_summing(self):
self.assertTrue(os.path.exists(self._data_directory))
GenerateLogbook(
Directory=self._data_directory,
OutputWorkspace="custom_logbook_w_summing",
Facility="ILL",
Instrument="D7",
NumorRange="396990:396993",
CustomEntries="/entry0/acquisition_mode",
OptionalHeaders="wavelength",
)
self._check_output("custom_logbook_w_summing", numberEntries=3, numberColumns=9)
def METHOD_NAME(self):
self.assertTrue(os.path.exists(self._data_directory))
GenerateLogbook(
Directory=self._data_directory,
OutputWorkspace="__unused",
Facility="ILL",
Instrument="D7",
NumorRange="396990:396993",
OutputFile=os.path.join(gettempdir(), "logbook.csv"),
)
self.assertTrue(os.path.join(gettempdir(), "logbook.csv"))
def _check_output(self, ws, numberEntries, numberColumns):
self.assertTrue(mtd[ws])
self.assertTrue(isinstance(mtd[ws], ITableWorkspace))
self.assertEqual(len(mtd[ws].row(0)), numberColumns)
self.assertEqual(len(mtd[ws].column(0)), numberEntries)
if __name__ == "__main__":
unittest.main() | null |
5,716 | #!/usr/bin/env python3
# Copyright (c) 2017-2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various fingerprinting protections.
If a stale block more than a month old or its header are requested by a peer,
the node should pretend that it does not have it to avoid fingerprinting.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.messages import CInv, MSG_BLOCK
from test_framework.p2p import (
P2PInterface,
msg_headers,
msg_block,
msg_getdata,
msg_getheaders,
)
from test_framework.test_framework import PocketcoinTestFramework
from test_framework.util import (
assert_equal,
)
class P2PFingerprintTest(PocketcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
# Build a chain of blocks on top of given one
def build_chain(self, nblocks, prev_hash, prev_height, prev_median_time):
blocks = []
for _ in range(nblocks):
coinbase = create_coinbase(prev_height + 1)
block_time = prev_median_time + 1
block = create_block(int(prev_hash, 16), coinbase, block_time)
block.solve()
blocks.append(block)
prev_hash = block.hash
prev_height += 1
prev_median_time = block_time
return blocks
# Send a getdata request for a given block hash
def METHOD_NAME(self, block_hash, node):
msg = msg_getdata()
msg.inv.append(CInv(MSG_BLOCK, block_hash))
node.send_message(msg)
# Send a getheaders request for a given single block hash
def send_header_request(self, block_hash, node):
msg = msg_getheaders()
msg.hashstop = block_hash
node.send_message(msg)
# Check whether last block received from node has a given hash
def last_block_equals(self, expected_hash, node):
block_msg = node.last_message.get("block")
return block_msg and block_msg.block.rehash() == expected_hash
# Check whether last block header received from node has a given hash
def last_header_equals(self, expected_hash, node):
headers_msg = node.last_message.get("headers")
return (headers_msg and
headers_msg.headers and
headers_msg.headers[0].rehash() == expected_hash)
# Checks that stale blocks timestamped more than a month ago are not served
# by the node while recent stale blocks and old active chain blocks are.
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
node0 = self.nodes[0].add_p2p_connection(P2PInterface())
# Set node time to 60 days ago
self.nodes[0].setmocktime(int(time.time()) - 60 * 24 * 60 * 60)
# Generating a chain of 10 blocks
block_hashes = self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# Create longer chain starting 2 blocks before current tip
height = len(block_hashes) - 2
block_hash = block_hashes[height - 1]
block_time = self.nodes[0].getblockheader(block_hash)["mediantime"] + 1
new_blocks = self.build_chain(5, block_hash, height, block_time)
# Force reorg to a longer chain
node0.send_message(msg_headers(new_blocks))
node0.wait_for_getdata([x.sha256 for x in new_blocks])
for block in new_blocks:
node0.send_and_ping(msg_block(block))
# Check that reorg succeeded
assert_equal(self.nodes[0].getblockcount(), 13)
stale_hash = int(block_hashes[-1], 16)
# Check that getdata request for stale block succeeds
self.METHOD_NAME(stale_hash, node0)
test_function = lambda: self.last_block_equals(stale_hash, node0)
self.wait_until(test_function, timeout=3)
# Check that getheader request for stale block header succeeds
self.send_header_request(stale_hash, node0)
test_function = lambda: self.last_header_equals(stale_hash, node0)
self.wait_until(test_function, timeout=3)
# Longest chain is extended so stale is much older than chain tip
self.nodes[0].setmocktime(0)
tip = self.nodes[0].generatetoaddress(1, self.nodes[0].get_deterministic_priv_key().address)[0]
assert_equal(self.nodes[0].getblockcount(), 14)
# Send getdata & getheaders to refresh last received getheader message
block_hash = int(tip, 16)
self.METHOD_NAME(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
# Request for very old stale block should now fail
self.METHOD_NAME(stale_hash, node0)
time.sleep(3)
assert not self.last_block_equals(stale_hash, node0)
# Request for very old stale block header should now fail
self.send_header_request(stale_hash, node0)
time.sleep(3)
assert not self.last_header_equals(stale_hash, node0)
# Verify we can fetch very old blocks and headers on the active chain
block_hash = int(block_hashes[2], 16)
self.METHOD_NAME(block_hash, node0)
self.send_header_request(block_hash, node0)
node0.sync_with_ping()
self.METHOD_NAME(block_hash, node0)
test_function = lambda: self.last_block_equals(block_hash, node0)
self.wait_until(test_function, timeout=3)
self.send_header_request(block_hash, node0)
test_function = lambda: self.last_header_equals(block_hash, node0)
self.wait_until(test_function, timeout=3)
if __name__ == '__main__':
P2PFingerprintTest().main() | null |
5,717 | import unittest
import numpy as np
from tinygrad.tensor import Tensor, Device
import pytest
pytestmark = [pytest.mark.exclude_cuda]
class TestConv(unittest.TestCase):
def test_simple(self):
x = Tensor.ones(1,12,128,256).contiguous().realize()
w = Tensor.ones(32,12,3,3).contiguous().realize()
ret = x.conv2d(w, stride=(2,2), padding=(1,1)).numpy()
# it's not 108 around the padding
assert (ret[:, :, 1:-1, 1:-1] == 108).all()
assert ret[0,0,0,0] == 48
assert ret[0,0,0,1] == 72
def test_simple_rand(self):
x = Tensor.rand(1,12,128,256)
w = Tensor.rand(32,12,3,3)
ret = x.conv2d(w, stride=(2,2), padding=(1,1)).numpy()
def test_many_simple(self):
x = Tensor(np.arange(8*2*8).reshape(1,8,2,8).astype(np.float32))
#w = Tensor(np.arange(8*8*1*1).reshape(8,8,1,1).astype(np.float32))
w = Tensor.eye(8).reshape((8,8,1,1))
ret = x.conv2d(w, stride=(1,2), padding=(0,0)).numpy()
print(ret)
def test_lazycache(self):
Tensor.no_grad = True
x = Tensor.rand(1, 32)
y = Tensor.rand(32)
out = x + y.reshape((1,32,1)).reshape((1,32)) + y.reshape((1,32,1)).reshape((1,32))
out.numpy()
Tensor.no_grad = False
def test_simple_biased(self):
C = 8
x = Tensor.rand(1,C,5,5)
w = Tensor.eye(C).reshape((C,C,1,1))
b = Tensor(np.arange(C).astype(np.float32))
ret = Tensor.conv2d(x,w,b).relu().conv2d(w,b)
print(ret.numpy())
def METHOD_NAME(self):
Tensor.no_grad = True
x = Tensor.randn(1,12,128,256)
w = Tensor.randn(32,12,3,3)
out = x.conv2d(w, stride=(2,2), padding=(1,1))
r1, r2 = out.relu(), (out-1)
np.testing.assert_allclose(r1.numpy(), np.maximum(out.numpy(), 0))
np.testing.assert_allclose(r2.numpy(), out.numpy() - 1)
Tensor.no_grad = False
def test_two_overlapping_binops_no_rerun(self):
Tensor.no_grad = True
x = Tensor.randn(1,12,128,256)
w = Tensor.randn(32,12,3,3)
out = x.conv2d(w, stride=(2,2), padding=(1,1))
r1, r2 = out.relu(), out.elu()
np.testing.assert_allclose(r1.numpy(), np.maximum(out.numpy(), 0))
np.testing.assert_allclose(r2.numpy(), np.where(out.numpy() > 0, out.numpy(), (np.exp(out.numpy()) - 1)), atol=1e-5)
Tensor.no_grad = False
@unittest.skipIf(Device.DEFAULT != "TORCH", "Takes too long to compile for Compiled backends")
def test_two_overlapping_binops_no_rerun_wino(self):
Tensor.no_grad = True
Tensor.wino = True
x = Tensor.randn(1,4,16,16)
w = Tensor.randn(6,4,3,3)
out = x.conv2d(w, padding=(1,1))
r1, r2 = out.relu(), out.elu()
np.testing.assert_allclose(r1.numpy(), np.maximum(out.numpy(), 0))
np.testing.assert_allclose(r2.numpy(), np.where(out.numpy() > 0, out.numpy(), (np.exp(out.numpy()) - 1)), atol=1e-5)
Tensor.wino = False
Tensor.no_grad = False
def test_first_three(self):
Tensor.no_grad = True
x = Tensor.rand(1,12,128,256)
w = Tensor.rand(32,12,3,3)
x = x.conv2d(w, stride=(2,2), padding=(1,1)).elu()
w = Tensor.rand(32,1,3,3)
x = x.conv2d(w, padding=(1,1), groups=32).elu()
w = Tensor.rand(16,32,1,1)
x = x.conv2d(w).elu()
x = x.numpy()
print(x.shape)
Tensor.no_grad = False
def test_elu(self):
Tensor.no_grad = True
x = Tensor.rand(1,12,128,256)
w = Tensor.rand(32,12,3,3)
x = x.conv2d(w, stride=(2,2), padding=(1,1))
x = x.elu()
w = Tensor.rand(32,1,3,3)
x = x.conv2d(w, padding=(1,1), groups=32)
out = x.numpy()
Tensor.no_grad = False
def test_reduce_relu(self):
Tensor.no_grad = True
x = Tensor.rand(1,12,128,256)
x = x.sum(keepdim=True).relu()
out = x.numpy()
Tensor.no_grad = False
def test_bias(self):
Tensor.no_grad = True
from tinygrad.nn import Conv2d
x = Tensor.rand(1,12,128,256)
c = Conv2d(12, 32, 3)
x = c(x).relu()
w = Tensor.uniform(32, 1, 3, 3)
x = x.conv2d(w, groups=32)
out = x.numpy()
Tensor.no_grad = False
def test_multiadd(self):
w = Tensor.rand(32)
x = Tensor.rand(32).relu()
(w+x).numpy()
def test_reorder(self):
x = Tensor.rand(1,12,128,256)
w = Tensor.rand(12,12,3,3)
x = x.conv2d(w, padding=(1,1))
print(x.shape)
x = x.reshape((1, 12, 256, 128))
x += 1
x += 1
x = x.reshape((1, 12, 128, 256))
x.numpy()
if __name__ == '__main__':
unittest.main( | null |
5,718 | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to run on the dispatcher. Builds each benchmark with each fuzzing
configuration, spawns a runner VM for each benchmark-fuzzer combo, and then
records coverage data received from the runner VMs."""
import datetime
import multiprocessing
import os
import sys
import threading
import time
from typing import List
from common import experiment_path as exp_path
from common import experiment_utils
from common import logs
from common import yaml_utils
from database import models
from database import utils as db_utils
from experiment.build import builder
from experiment.measurer import measure_manager
from experiment import reporter
from experiment import scheduler
from experiment import stop_experiment
LOOP_WAIT_SECONDS = 5 * 60
# TODO(metzman): Convert more uses of os.path.join to exp_path.path.
def _get_config_file_path():
"""Return config file path."""
return exp_path.path(
experiment_utils.get_internal_experiment_config_relative_path())
def METHOD_NAME(subdirs: List[str]):
"""Create |subdirs| in work directory."""
for subdir in subdirs:
os.mkdir(os.path.join(experiment_utils.get_work_dir(), subdir))
def _initialize_experiment_in_db(experiment_config: dict):
"""Initializes |experiment| in the database by creating the experiment
entity."""
with db_utils.session_scope() as session:
experiment_exists = session.query(models.Experiment).filter(
models.Experiment.name == experiment_config['experiment']).first()
if experiment_exists:
raise Exception('Experiment already exists in database.')
db_utils.add_all([
db_utils.get_or_create(
models.Experiment,
name=experiment_config['experiment'],
git_hash=experiment_config['git_hash'],
private=experiment_config.get('private', True),
experiment_filestore=experiment_config['experiment_filestore'],
description=experiment_config['description']),
])
def _record_experiment_time_ended(experiment_name: str):
"""Record |experiment| end time in the database."""
with db_utils.session_scope() as session:
experiment = session.query(models.Experiment).filter(
models.Experiment.name == experiment_name).one()
experiment.time_ended = datetime.datetime.utcnow()
db_utils.add_all([experiment])
def _initialize_trials_in_db(trials: List[models.Trial]):
"""Initializes entities for each trial in the experiment."""
# TODO(metzman): Consider doing this without sqlalchemy. This can get
# slow with SQLalchemy (it's much worse with add_all).
db_utils.bulk_save(trials)
class Experiment:
"""Class representing an experiment."""
def __init__(self, experiment_config_filepath: str):
self.config = yaml_utils.read(experiment_config_filepath)
self.benchmarks = self.config['benchmarks']
self.fuzzers = self.config['fuzzers']
self.num_trials = self.config['trials']
self.experiment_name = self.config['experiment']
self.git_hash = self.config['git_hash']
self.preemptible = self.config.get('preemptible_runners')
def build_images_for_trials(fuzzers: List[str], benchmarks: List[str],
num_trials: int,
preemptible: bool) -> List[models.Trial]:
"""Builds the images needed to run |experiment| and returns a list of trials
that can be run for experiment. This is the number of trials specified in
experiment times each pair of fuzzer+benchmark that builds successfully."""
# This call will raise an exception if the images can't be built which will
# halt the experiment.
builder.build_base_images()
# Only build fuzzers for benchmarks whose measurers built successfully.
benchmarks = builder.build_all_measurers(benchmarks)
build_successes = builder.build_all_fuzzer_benchmarks(fuzzers, benchmarks)
experiment_name = experiment_utils.get_experiment_name()
trials = []
for fuzzer, benchmark in build_successes:
fuzzer_benchmark_trials = [
models.Trial(fuzzer=fuzzer,
experiment=experiment_name,
benchmark=benchmark,
preemptible=preemptible) for _ in range(num_trials)
]
trials.extend(fuzzer_benchmark_trials)
return trials
def dispatcher_main():
"""Do the experiment and report results."""
logs.info('Starting experiment.')
# Set this here because we get failures if we do it in measurer for some
# reason.
multiprocessing.set_start_method('spawn')
db_utils.initialize()
if experiment_utils.is_local_experiment():
models.Base.metadata.create_all(db_utils.engine)
experiment_config_file_path = _get_config_file_path()
experiment = Experiment(experiment_config_file_path)
_initialize_experiment_in_db(experiment.config)
trials = build_images_for_trials(experiment.fuzzers, experiment.benchmarks,
experiment.num_trials,
experiment.preemptible)
_initialize_trials_in_db(trials)
METHOD_NAME(['experiment-folders', 'measurement-folders'])
# Start measurer and scheduler in seperate threads/processes.
scheduler_loop_thread = threading.Thread(target=scheduler.schedule_loop,
args=(experiment.config,))
scheduler_loop_thread.start()
measurer_main_process = multiprocessing.Process(
target=measure_manager.measure_main, args=(experiment.config,))
measurer_main_process.start()
is_complete = False
while True:
time.sleep(LOOP_WAIT_SECONDS)
if not scheduler_loop_thread.is_alive():
is_complete = not measurer_main_process.is_alive()
# Generate periodic output reports.
reporter.output_report(experiment.config,
in_progress=not is_complete,
coverage_report=is_complete)
if is_complete:
# Experiment is complete, bail out.
break
scheduler_loop_thread.join()
measurer_main_process.join()
_record_experiment_time_ended(experiment.experiment_name)
logs.info('Experiment ended.')
def main():
"""Do the experiment and report results."""
logs.initialize(default_extras={
'component': 'dispatcher',
})
try:
dispatcher_main()
except Exception as error:
logs.error('Error conducting experiment.')
raise error
if experiment_utils.is_local_experiment():
return 0
experiment_config_file_path = _get_config_file_path()
if stop_experiment.stop_experiment(experiment_utils.get_experiment_name(),
experiment_config_file_path):
return 0
return 1
if __name__ == '__main__':
sys.exit(main()) | null |
5,719 | import unittest
from nose.plugins.attrib import attr
from jnpr.junos.exception import (
RpcError,
CommitError,
ConnectError,
ConfigLoadError,
RpcTimeoutError,
SwRollbackError,
JSONLoadError,
)
from jnpr.junos import Device
from lxml import etree
__author__ = "Nitin Kumar, Rick Sherman"
__credits__ = "Jeremy Schulman"
commit_xml = """
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/12.1X46/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<source-daemon>dcd</source-daemon>
<error-path>[edit interfaces ge-0/0/1]</error-path>
<error-info>
<bad-element>unit 2</bad-element>
</error-info>
<error-message>
Only unit 0 is valid for this encapsulation
</error-message>
</rpc-error>
"""
rpc_xml = """
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/12.1X47/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>error</error-severity>
<error-info>
<bad-element>bgp</bad-element>
</error-info>
<error-message>syntax error</error-message>
</rpc-error>
"""
conf_xml = """
<rpc-error>
<error-severity>error</error-severity>
<error-info>
<bad-element>system1</bad-element>
</error-info>
<error-message>syntax error</error-message>
</rpc-error>
"""
multi_warning_xml = """
<rpc-reply xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/16.1I0/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<load-configuration-results>
<rpc-error>
<error-severity>warning</error-severity>
<error-message>
statement not found
</error-message>
</rpc-error>
<rpc-error>
<error-severity>warning</error-severity>
<error-message>
statement not found
</error-message>
</rpc-error>
<ok/>
</load-configuration-results>
</rpc-reply>
"""
config_json = """{
"configuration" : {
"system" : {
"services" : {
"telnet" : [nul]
}
}
}
}"""
@attr("unit")
class Test_RpcError(unittest.TestCase):
def test_rpcerror_repr(self):
rsp = etree.XML(rpc_xml)
obj = RpcError(rsp=rsp)
err = "RpcError(severity: error, bad_element: bgp, " "message: syntax error)"
self.assertEqual(str, type(obj.__repr__()))
self.assertEqual(obj.__repr__(), err)
def METHOD_NAME(self):
# this test is intended to hit jxml code
rsp = etree.XML(commit_xml)
obj = CommitError(rsp=rsp)
self.assertEqual(obj.rpc_error["bad_element"], "unit 2")
def test_ConnectError(self):
self.dev = Device(host="1.1.1.1", user="rick")
obj = ConnectError(self.dev)
self.assertEqual(obj.user, "rick")
self.assertEqual(obj.host, "1.1.1.1")
self.assertEqual(obj.port, 830)
self.assertEqual(repr(obj), "ConnectError(1.1.1.1)")
def test_ConnectError_msg(self):
self.dev = Device(host="1.1.1.1", user="rick")
obj = ConnectError(self.dev, msg="underlying exception info")
self.assertEqual(obj.msg, "underlying exception info")
self.assertEqual(
repr(obj), "ConnectError(host: 1.1.1.1, msg: underlying exception info)"
)
def test_CommitError_repr(self):
rsp = etree.XML(commit_xml)
obj = CommitError(rsp=rsp)
err = (
"CommitError(edit_path: [edit interfaces ge-0/0/1], "
"bad_element: unit 2, message: Only unit 0 is valid "
"for this encapsulation)"
)
self.assertEqual(obj.__repr__(), err)
def test_ConfigLoadError_repr(self):
rsp = etree.XML(conf_xml)
obj = ConfigLoadError(rsp=rsp)
err = (
"ConfigLoadError(severity: error, bad_element: "
"system1, message: syntax error)"
)
self.assertEqual(obj.__repr__(), err)
def test_RpcTimeoutError_repr(self):
dev = Device("test")
obj = RpcTimeoutError(dev=dev, cmd="test", timeout=50)
err = "RpcTimeoutError(host: test, cmd: test, timeout: 50)"
self.assertEqual(obj.__repr__(), err)
def test_SwRollbackError_repr(self):
obj = SwRollbackError(rsp="Single RE exception")
err = "SwRollbackError(output: Single RE exception)"
self.assertEqual(obj.__repr__(), err)
def test_SwRollbackError_repr_multi(self):
obj = SwRollbackError(re="test1", rsp="Multi RE exception")
err = "SwRollbackError(re: test1, output: Multi RE exception)"
self.assertEqual(obj.__repr__(), err)
def test_repr_multi_warning(self):
rsp = etree.XML(multi_warning_xml)
from ncclient.operations import RPCError
warn_msg = """
<rpc-error xmlns="urn:ietf:params:xml:ns:netconf:base:1.0" xmlns:junos="http://xml.juniper.net/junos/16.1I0/junos" xmlns:nc="urn:ietf:params:xml:ns:netconf:base:1.0">
<error-severity>warning</error-severity>
<error-message>
statement not found
</error-message>
</rpc-error>"""
errs = RPCError(etree.XML(warn_msg))
errs.errors = [errs, errs]
obj = RpcError(rsp=rsp, errs=errs)
self.assertEqual(obj.rpc_error["severity"], "warning")
def test_json_error(self):
err = "ValueError: No JSON object could be decoded"
obj = JSONLoadError(err, config_json)
errs = "JSONLoadError(reason: ValueError: No JSON object could be decoded)"
self.assertEqual(obj.__repr__(), errs)
def test_json_error_offending_line(self):
err = "ValueError: No"
obj = JSONLoadError(err, config_json)
obj.offending_line = "Value"
errs = (
"JSONLoadError(reason: ValueError: No, "
"\nThe offending config appears to be: "
"\nValue)"
)
self.assertEqual(obj.__repr__(), errs) | null |
5,720 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sync pipeline for testing."""
from tfx.dsl.compiler import compiler
from tfx.dsl.component.experimental.annotations import InputArtifact
from tfx.dsl.component.experimental.annotations import OutputArtifact
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.control_flow.for_each import ForEach
from tfx.dsl.experimental.conditionals import conditional
from tfx.orchestration import pipeline as pipeline_lib
from tfx.proto.orchestration import pipeline_pb2
from tfx.types import standard_artifacts
@component
def METHOD_NAME(examples: OutputArtifact[standard_artifacts.Examples]):
del examples
@component
def _statistics_gen(
examples: InputArtifact[standard_artifacts.Examples],
statistics: OutputArtifact[standard_artifacts.ExampleStatistics]):
del examples, statistics
@component
def _schema_gen(statistics: InputArtifact[standard_artifacts.ExampleStatistics],
schema: OutputArtifact[standard_artifacts.Schema]):
del statistics, schema
@component
def _example_validator(
statistics: InputArtifact[standard_artifacts.ExampleStatistics],
schema: InputArtifact[standard_artifacts.Schema],
anomalies: OutputArtifact[standard_artifacts.ExampleAnomalies]):
del (
statistics,
schema,
anomalies,
)
@component
def _transform(
examples: InputArtifact[standard_artifacts.Examples],
schema: InputArtifact[standard_artifacts.Schema],
transform_graph: OutputArtifact[standard_artifacts.TransformGraph]):
del examples, schema, transform_graph
@component
def _trainer(examples: InputArtifact[standard_artifacts.Examples],
schema: InputArtifact[standard_artifacts.Schema],
transform_graph: InputArtifact[standard_artifacts.TransformGraph],
model: OutputArtifact[standard_artifacts.Model]):
del examples, schema, transform_graph, model
@component
def _evaluator(model: InputArtifact[standard_artifacts.Model],
evals: OutputArtifact[standard_artifacts.ModelEvaluation]):
del model, evals
@component
def _chore():
pass
def create_pipeline() -> pipeline_pb2.Pipeline:
"""Builds a test pipeline.
┌───────────┐
│example_gen│
└┬─┬─┬──────┘
│ │┌▽──────────────┐
│ ││stats_gen │
│ │└┬─────────────┬─┘
│ │┌▽───────────┐│
│ ││schema_gen ││
│ │└┬───────┬─┬──┘│
│┌▽─▽────┐│┌▽──▽─────────────┐
││transform │││example_validator │
│└┬────────┘│└───────────────────┘
┌▽─▽───────▽┐
│trainer │
└┬─────────┬───┘
┌▽─────┐┌▽─────────┐
│chore_a││evaluator │
└┬──────┘└───────────┘
┌▽──────┐
│chore_b │
└────────┘
Returns:
A pipeline proto for the above DAG
"""
# pylint: disable=no-value-for-parameter
example_gen = METHOD_NAME().with_id('my_example_gen')
stats_gen = _statistics_gen(
examples=example_gen.outputs['examples']).with_id('my_statistics_gen')
schema_gen = _schema_gen(
statistics=stats_gen.outputs['statistics']).with_id('my_schema_gen')
example_validator = _example_validator(
statistics=stats_gen.outputs['statistics'],
schema=schema_gen.outputs['schema']).with_id('my_example_validator')
transform = _transform(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema']).with_id('my_transform')
trainer = _trainer(
examples=example_gen.outputs['examples'],
schema=schema_gen.outputs['schema'],
transform_graph=transform.outputs['transform_graph']).with_id(
'my_trainer')
# Nodes with no input or output specs for testing task only dependencies.
chore_a = _chore().with_id('chore_a')
chore_a.add_upstream_node(trainer)
chore_b = _chore().with_id('chore_b')
chore_b.add_upstream_node(chore_a)
with conditional.Cond(
trainer.outputs['model'].future()[0].custom_property('evaluate') == 1):
evaluator = _evaluator(
model=trainer.outputs['model']).with_id('my_evaluator')
# pylint: enable=no-value-for-parameter
pipeline = pipeline_lib.Pipeline(
pipeline_name='my_pipeline',
pipeline_root='/path/to/root',
components=[
example_gen,
stats_gen,
schema_gen,
example_validator,
transform,
trainer,
evaluator,
chore_a,
chore_b,
],
enable_cache=True)
dsl_compiler = compiler.Compiler()
return dsl_compiler.compile(pipeline)
def create_pipeline_with_foreach() -> pipeline_pb2.Pipeline:
"""Builds a test pipeline with ForEach."""
# pylint: disable=no-value-for-parameter
example_gen = METHOD_NAME().with_id('my_example_gen')
with ForEach(example_gen.outputs['examples']) as examples:
stats_gen = _statistics_gen(examples=examples).with_id(
'my_statistics_gen_in_foreach'
)
pipeline = pipeline_lib.Pipeline(
pipeline_name='my_pipeline',
pipeline_root='/path/to/root',
components=[
example_gen,
stats_gen,
],
enable_cache=True,
)
dsl_compiler = compiler.Compiler()
return dsl_compiler.compile(pipeline)
def create_chore_pipeline() -> pipeline_pb2.Pipeline:
"""Creates a pipeline full of chores.
┌─────────────┐┌──────────────┐
│example_gen_1││example_gen_2 │
└┬────────────┘└┬───────┬─────┘
┌▽──────┐┌──────▽───┐┌▽──────┐
│chore_a ││chore_d ││chore_e │
└┬───────┘└┬─────────┬┘└┬───────┘
┌▽──────┐┌▽──────┐┌▽──▽───┐
│chore_b ││chore_f││chore_g │
└┬───────┘└┬───────┘└─────────┘
┌▽────────▽┐
│chore_c │
└────────────┘
Returns:
A pipeline for the above DAG
"""
# pylint: disable=no-value-for-parameter
example_gen_1 = METHOD_NAME().with_id('my_example_gen_1')
example_gen_2 = METHOD_NAME().with_id('my_example_gen_2')
chore_a = _chore().with_id('chore_a')
chore_a.add_upstream_node(example_gen_1)
chore_b = _chore().with_id('chore_b')
chore_b.add_upstream_node(chore_a)
chore_c = _chore().with_id('chore_c')
chore_c.add_upstream_node(chore_b)
chore_d = _chore().with_id('chore_d')
chore_d.add_upstream_node(example_gen_2)
chore_e = _chore().with_id('chore_e')
chore_e.add_upstream_node(example_gen_2)
chore_f = _chore().with_id('chore_f')
chore_f.add_upstream_node(chore_d)
chore_g = _chore().with_id('chore_g')
chore_g.add_upstream_node(chore_d)
chore_g.add_upstream_node(chore_e)
chore_f.add_downstream_node(chore_c)
pipeline = pipeline_lib.Pipeline(
pipeline_name='my_pipeline',
pipeline_root='/path/to/root',
components=[
example_gen_1,
example_gen_2,
chore_a,
chore_b,
chore_d,
chore_e,
chore_f,
chore_g,
chore_c,
],
enable_cache=True,
)
dsl_compiler = compiler.Compiler()
return dsl_compiler.compile(pipeline) | null |
5,721 | #!/usr/bin/python
# Copyright 2011 Steven Watanabe
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
import BoostBuild
import os
def basic():
t = BoostBuild.Tester(pass_toolset=0)
t.write("file.jam", """\
actions do-print
{
echo updating $(<)
}
NOTFILE target1 ;
ALWAYS target1 ;
do-print target1 ;
UPDATE_NOW target1 ;
DEPENDS all : target1 ;
""")
t.run_build_system(["-ffile.jam"], stdout="""\
...found 1 target...
...updating 1 target...
do-print target1
updating target1
...updated 1 target...
...found 1 target...
""")
t.cleanup()
def ignore_minus_n():
t = BoostBuild.Tester(pass_toolset=0)
t.write("file.jam", """\
actions do-print
{
echo updating $(<)
}
NOTFILE target1 ;
ALWAYS target1 ;
do-print target1 ;
UPDATE_NOW target1 : : ignore-minus-n ;
DEPENDS all : target1 ;
""")
t.run_build_system(["-ffile.jam", "-n"], stdout="""\
...found 1 target...
...updating 1 target...
do-print target1
echo updating target1
updating target1
...updated 1 target...
...found 1 target...
""")
t.cleanup()
def failed_target():
t = BoostBuild.Tester(pass_toolset=0)
t.write("file.jam", """\
actions fail
{
exit 1
}
NOTFILE target1 ;
ALWAYS target1 ;
fail target1 ;
actions do-print
{
echo updating $(<)
}
NOTFILE target2 ;
do-print target2 ;
DEPENDS target2 : target1 ;
UPDATE_NOW target1 : : ignore-minus-n ;
DEPENDS all : target1 target2 ;
""")
t.run_build_system(["-ffile.jam", "-n"], stdout="""\
...found 1 target...
...updating 1 target...
fail target1
exit 1
...failed fail target1...
...failed updating 1 target...
...found 2 targets...
...updating 1 target...
do-print target2
echo updating target2
...updated 1 target...
""")
t.cleanup()
def missing_target():
t = BoostBuild.Tester(pass_toolset=0)
t.write("file.jam", """\
actions do-print
{
echo updating $(<)
}
NOTFILE target2 ;
do-print target2 ;
DEPENDS target2 : target1 ;
UPDATE_NOW target1 : : ignore-minus-n ;
DEPENDS all : target1 target2 ;
""")
t.run_build_system(["-ffile.jam", "-n"], status=1, stdout="""\
don't know how to make target1
...found 1 target...
...can't find 1 target...
...found 2 targets...
...can't make 1 target...
""")
t.cleanup()
def build_once():
"""
Make sure that if we call UPDATE_NOW with ignore-minus-n, the target gets
updated exactly once regardless of previous calls to UPDATE_NOW with -n in
effect.
"""
t = BoostBuild.Tester(pass_toolset=0)
t.write("file.jam", """\
actions do-print
{
echo updating $(<)
}
NOTFILE target1 ;
ALWAYS target1 ;
do-print target1 ;
UPDATE_NOW target1 ;
UPDATE_NOW target1 : : ignore-minus-n ;
UPDATE_NOW target1 : : ignore-minus-n ;
DEPENDS all : target1 ;
""")
t.run_build_system(["-ffile.jam", "-n"], stdout="""\
...found 1 target...
...updating 1 target...
do-print target1
echo updating target1
...updated 1 target...
do-print target1
echo updating target1
updating target1
...updated 1 target...
...found 1 target...
""")
t.cleanup()
def return_status():
"""
Make sure that UPDATE_NOW returns a failure status if
the target failed in a previous call to UPDATE_NOW
"""
t = BoostBuild.Tester(pass_toolset=0)
t.write("file.jam", """\
actions fail
{
exit 1
}
NOTFILE target1 ;
ALWAYS target1 ;
fail target1 ;
ECHO "update1:" [ UPDATE_NOW target1 ] ;
ECHO "update2:" [ UPDATE_NOW target1 ] ;
DEPENDS all : target1 ;
""")
t.run_build_system(["-ffile.jam"], status=1, stdout="""\
...found 1 target...
...updating 1 target...
fail target1
exit 1
...failed fail target1...
...failed updating 1 target...
update1:
update2:
...found 1 target...
""")
t.cleanup()
def METHOD_NAME():
"""Tests that ignore-minus-n and ignore-minus-q are
local to the call to UPDATE_NOW"""
t = BoostBuild.Tester(pass_toolset=0)
t.write("actions.jam", """\
rule fail
{
NOTFILE $(<) ;
ALWAYS $(<) ;
}
actions fail
{
exit 1
}
rule pass
{
NOTFILE $(<) ;
ALWAYS $(<) ;
}
actions pass
{
echo updating $(<)
}
""")
t.write("file.jam", """
include actions.jam ;
fail target1 ;
fail target2 ;
UPDATE_NOW target1 target2 : : $(IGNORE_MINUS_N) : $(IGNORE_MINUS_Q) ;
fail target3 ;
fail target4 ;
UPDATE_NOW target3 target4 ;
UPDATE ;
""")
t.run_build_system(['-n', '-sIGNORE_MINUS_N=1', '-ffile.jam'],
stdout='''...found 2 targets...
...updating 2 targets...
fail target1
exit 1
...failed fail target1...
fail target2
exit 1
...failed fail target2...
...failed updating 2 targets...
...found 2 targets...
...updating 2 targets...
fail target3
exit 1
fail target4
exit 1
...updated 2 targets...
''')
t.run_build_system(['-q', '-sIGNORE_MINUS_N=1', '-ffile.jam'],
status=1, stdout='''...found 2 targets...
...updating 2 targets...
fail target1
exit 1
...failed fail target1...
...failed updating 1 target...
...found 2 targets...
...updating 2 targets...
fail target3
exit 1
...failed fail target3...
...failed updating 1 target...
''')
t.run_build_system(['-n', '-sIGNORE_MINUS_Q=1', '-ffile.jam'],
stdout='''...found 2 targets...
...updating 2 targets...
fail target1
exit 1
fail target2
exit 1
...updated 2 targets...
...found 2 targets...
...updating 2 targets...
fail target3
exit 1
fail target4
exit 1
...updated 2 targets...
''')
t.run_build_system(['-q', '-sIGNORE_MINUS_Q=1', '-ffile.jam'],
status=1, stdout='''...found 2 targets...
...updating 2 targets...
fail target1
exit 1
...failed fail target1...
fail target2
exit 1
...failed fail target2...
...failed updating 2 targets...
...found 2 targets...
...updating 2 targets...
fail target3
exit 1
...failed fail target3...
...failed updating 1 target...
''')
t.cleanup()
basic()
ignore_minus_n()
failed_target()
missing_target()
build_once()
return_status()
METHOD_NAME() | null |
5,722 | from conans import CHECKSUM_DEPLOY, REVISIONS, OAUTH_TOKEN
from conans.client.rest.rest_client_v2 import RestV2Methods
from conans.errors import AuthenticationException, ConanException
class RestApiClientFactory(object):
def __init__(self, requester, config):
self._requester = requester
self._config = config
self._cached_capabilities = {}
def new(self, remote, token, refresh_token, custom_headers):
tmp = RestApiClient(remote, token, refresh_token, custom_headers,
self._requester, self._config,
self._cached_capabilities)
return tmp
class RestApiClient(object):
"""
Rest Api Client for handle remote.
"""
def __init__(self, remote, token, refresh_token, custom_headers, requester,
config, cached_capabilities):
# Set to instance
self._token = token
self._refresh_token = refresh_token
self._remote_url = remote.url
self._custom_headers = custom_headers
self._requester = requester
self._verify_ssl = remote.verify_ssl
self._config = config
# This dict is shared for all the instances of RestApiClient
self._cached_capabilities = cached_capabilities
def _capable(self, capability, user=None, password=None):
capabilities = self._cached_capabilities.get(self._remote_url)
if capabilities is None:
tmp = RestV2Methods(self._remote_url, self._token, self._custom_headers,
self._requester, self._config, self._verify_ssl)
capabilities = tmp.server_capabilities(user, password)
self._cached_capabilities[self._remote_url] = capabilities
return capability in capabilities
def _get_api(self):
revisions = self._capable(REVISIONS)
if not revisions:
raise ConanException("The remote doesn't support revisions. "
"Conan 2.0 is no longer compatible with "
"remotes that don't accept revisions.")
checksum_deploy = self._capable(CHECKSUM_DEPLOY)
return RestV2Methods(self._remote_url, self._token, self._custom_headers,
self._requester, self._config, self._verify_ssl,
checksum_deploy)
def get_recipe(self, ref, dest_folder, metadata, only_metadata):
return self._get_api().get_recipe(ref, dest_folder, metadata, only_metadata)
def get_recipe_sources(self, ref, dest_folder):
return self._get_api().get_recipe_sources(ref, dest_folder)
def get_package(self, pref, dest_folder, metadata, only_metadata):
return self._get_api().get_package(pref, dest_folder, metadata, only_metadata)
def upload_recipe(self, ref, files_to_upload):
return self._get_api().upload_recipe(ref, files_to_upload)
def upload_package(self, pref, files_to_upload):
return self._get_api().upload_package(pref, files_to_upload)
def authenticate(self, user, password):
api_v2 = RestV2Methods(self._remote_url, self._token, self._custom_headers,
self._requester, self._config, self._verify_ssl)
if self._refresh_token and self._token:
token, refresh_token = api_v2.refresh_token(self._token, self._refresh_token)
else:
try:
# Check capabilities can raise also 401 until the new Artifactory is released
oauth_capable = self._capable(OAUTH_TOKEN, user, password)
except AuthenticationException:
oauth_capable = False
if oauth_capable:
# Artifactory >= 6.13.X
token, refresh_token = api_v2.authenticate_oauth(user, password)
else:
token = api_v2.authenticate(user, password)
refresh_token = None
return token, refresh_token
def check_credentials(self):
return self._get_api().check_credentials()
def search(self, pattern=None, ignorecase=True):
return self._get_api().search(pattern, ignorecase)
def search_packages(self, reference):
return self._get_api().search_packages(reference)
def remove_recipe(self, ref):
return self._get_api().remove_recipe(ref)
def remove_all_packages(self, ref):
return self._get_api().remove_all_packages(ref)
def remove_packages(self, prefs):
return self._get_api().remove_packages(prefs)
def server_capabilities(self):
return self._get_api().server_capabilities()
def get_recipe_revisions_references(self, ref):
return self._get_api().get_recipe_revisions_references(ref)
def get_package_revisions_references(self, pref, headers=None):
return self._get_api().get_package_revisions_references(pref, headers=headers)
def get_latest_recipe_reference(self, ref):
return self._get_api().get_latest_recipe_reference(ref)
def METHOD_NAME(self, pref, headers):
return self._get_api().METHOD_NAME(pref, headers=headers)
def get_recipe_revision_reference(self, ref):
return self._get_api().get_recipe_revision_reference(ref)
def get_package_revision_reference(self, pref):
return self._get_api().get_package_revision_reference(pref) | null |
5,723 | """
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework import status
from mock import patch
from storageadmin.tests.test_api import APITestMixin
class DiskSmartTests(APITestMixin):
# Fixture requires a single unpartitioned disk with id 2 of type supported by smart.
# No pool association means we can keep our fixture to a minimum.
# The required tables are created/populated upon smart "Refresh" button use.
# Fixture model content:
# - storageadmin.disk (disk to which storageadmin.smartinfo is linked)
# - storageadmin.smartcapability
# - storageadmin.smartattribute
# - storageadmin.smarterrorlog
# - storageadmin.smarttestlogdetail
# - storageadmin.smartidentity
# - storageadmin.smartinfo (links storageadmin.smart*.info to storageadmin.disk.id)
# Note storageadmin.smartinfo.pk is associated with storageadmin.smart*.info
#
# bin/django dumpdata storageadmin.disk storageadmin.smartcapability
# storageadmin.smartattribute storageadmin.smarterrorlog
# storageadmin.smarttestlogdetail storageadmin.smartidentity storageadmin.smartinfo
# --natural-foreign --indent 4 >
# src/rockstor/storageadmin/fixtures/test_disk_smart.json
#
# Proposed fixture = "test_disk_smart.json" was "fix1.json"
# ./bin/test -v 2 -p test_disk_smart.py
fixtures = ["test_api.json", "test_disk_smart.json"]
BASE_URL = "/api/disks/smart"
@classmethod
def setUpClass(cls):
super(DiskSmartTests, cls).setUpClass()
# Contextual mock of run_command to return nothing.
# Here we test our API end points against our existing fixture info.
# TODO Create system.test.test_smart for lower level smartctl output parsing.
cls.patch_run_test = patch("system.smart.run_command")
cls.mock_run_test = cls.patch_run_test.start()
cls.mock_run_test.return_value = [""], [""], 0
@classmethod
def METHOD_NAME(cls):
super(DiskSmartTests, cls).METHOD_NAME()
def test_get(self):
# get with disk id
response = self.client.get("{}/info/2".format(self.BASE_URL))
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.data)
def test_post_reqeusts_1(self):
# # invalid disk id
diskId = 99999
response = self.client.post("{}/info/{}".format(self.BASE_URL, diskId))
self.assertEqual(
response.status_code,
status.HTTP_500_INTERNAL_SERVER_ERROR,
msg=response.data,
)
e_msg = "Disk id ({}) does not exist.".format(diskId)
self.assertEqual(response.data[0], e_msg)
def test_post_requests_2(self):
# invalid command
diskId = 2
response = self.client.post("{}/invalid/{}".format(self.BASE_URL, diskId))
self.assertEqual(
response.status_code,
status.HTTP_500_INTERNAL_SERVER_ERROR,
msg=response.data,
)
e_msg = (
"Unknown command: (invalid). The only valid commands are " "info and test."
)
self.assertEqual(response.data[0], e_msg)
# unsupported self test
data = {"test_type": "invalid"}
response = self.client.post(
"{}/test/{}".format(self.BASE_URL, diskId), data=data
)
self.assertEqual(
response.status_code,
status.HTTP_500_INTERNAL_SERVER_ERROR,
msg=response.data,
)
e_msg = "Unsupported Self-Test: (invalid)."
self.assertEqual(response.data[0], e_msg)
# test command
data = {"test_type": "short"}
response = self.client.post(
"{}/test/{}".format(self.BASE_URL, diskId), data=data
)
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.data)
# happy path
response = self.client.post("{}/info/{}".format(self.BASE_URL, diskId))
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=response.data) | null |
5,724 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import imp
from charmhelpers import osplatform
from mock import patch
from tests.helpers import patch_open
from charmhelpers.core import kernel
class TestKernel(unittest.TestCase):
@patch('subprocess.check_call')
@patch.object(osplatform, 'get_platform')
def test_modprobe_persistent_ubuntu(self, platform, check_call):
platform.return_value = 'ubuntu'
imp.reload(kernel)
with patch_open() as (_open, _file):
_file.read.return_value = 'anothermod\n'
with patch("charmhelpers.core.kernel.log"):
kernel.modprobe('mymod')
_open.assert_called_with('/etc/modules', 'r+')
_file.read.assert_called_with()
_file.write.assert_called_with('mymod\n')
check_call.assert_called_with(['modprobe', 'mymod'])
@patch('os.chmod')
@patch('subprocess.check_call')
@patch.object(osplatform, 'get_platform')
def METHOD_NAME(self, platform, check_call, os):
platform.return_value = 'centos'
imp.reload(kernel)
with patch_open() as (_open, _file):
_file.read.return_value = 'anothermod\n'
with patch("charmhelpers.core.kernel.log"):
kernel.modprobe('mymod')
_open.assert_called_with('/etc/rc.modules', 'r+')
os.assert_called_with('/etc/rc.modules', 111)
_file.read.assert_called_with()
_file.write.assert_called_with('modprobe mymod\n')
check_call.assert_called_with(['modprobe', 'mymod'])
@patch('subprocess.check_call')
@patch.object(osplatform, 'get_platform')
def test_modprobe_not_persistent_ubuntu(self, platform, check_call):
platform.return_value = 'ubuntu'
imp.reload(kernel)
with patch_open() as (_open, _file):
_file.read.return_value = 'anothermod\n'
with patch("charmhelpers.core.kernel.log"):
kernel.modprobe('mymod', persist=False)
assert not _open.called
check_call.assert_called_with(['modprobe', 'mymod'])
@patch('subprocess.check_call')
@patch.object(osplatform, 'get_platform')
def test_modprobe_not_persistent_centos(self, platform, check_call):
platform.return_value = 'centos'
imp.reload(kernel)
with patch_open() as (_open, _file):
_file.read.return_value = 'anothermod\n'
with patch("charmhelpers.core.kernel.log"):
kernel.modprobe('mymod', persist=False)
assert not _open.called
check_call.assert_called_with(['modprobe', 'mymod'])
@patch.object(kernel, 'log')
@patch('subprocess.check_call')
def test_rmmod_not_forced(self, check_call, log):
kernel.rmmod('mymod')
check_call.assert_called_with(['rmmod', 'mymod'])
@patch.object(kernel, 'log')
@patch('subprocess.check_call')
def test_rmmod_forced(self, check_call, log):
kernel.rmmod('mymod', force=True)
check_call.assert_called_with(['rmmod', '-f', 'mymod'])
@patch.object(kernel, 'log')
@patch('subprocess.check_output')
def test_lsmod(self, check_output, log):
kernel.lsmod()
check_output.assert_called_with(['lsmod'],
universal_newlines=True)
@patch('charmhelpers.core.kernel.lsmod')
def test_is_module_loaded(self, lsmod):
lsmod.return_value = "ip6_tables 28672 1 ip6table_filter"
self.assertTrue(kernel.is_module_loaded("ip6_tables"))
@patch.object(osplatform, 'get_platform')
@patch('subprocess.check_call')
def test_update_initramfs_ubuntu(self, check_call, platform):
platform.return_value = 'ubuntu'
imp.reload(kernel)
kernel.update_initramfs()
check_call.assert_called_with(["update-initramfs", "-k", "all", "-u"])
@patch.object(osplatform, 'get_platform')
@patch('subprocess.check_call')
def test_update_initramfs_centos(self, check_call, platform):
platform.return_value = 'centos'
imp.reload(kernel)
kernel.update_initramfs()
check_call.assert_called_with(['dracut', '-f', 'all']) | null |
5,725 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import mantid.api
import os
import random
import string
import tempfile
import unittest
import warnings
from isis_powder.routines import run_details
from isis_powder.routines import common, yaml_parser
class ISISPowderInstrumentRunDetailsTest(unittest.TestCase):
def setup_mock_inst_settings(self, yaml_file_path):
calibration_dir = tempfile.mkdtemp()
# Keep track of list of folders to remove
self._folders_to_remove = [calibration_dir]
# Check the required unit test files could be found
test_configuration_path = mantid.api.FileFinder.getFullPath(yaml_file_path)
if not test_configuration_path or len(test_configuration_path) <= 0:
self.fail("Could not find the unit test input file called: " + str(yaml_file_path))
return MockInstSettings(cal_file_path=test_configuration_path, calibration_dir=calibration_dir)
def tearDown(self):
for folder in self._folders_to_remove:
try:
os.rmdir(folder)
except OSError:
warnings.warn("Could not remove the folder at the following path:\n" + str(folder))
def test_create_run_details_object(self):
# These attributes are based on a flat YAML file at the specified path
expected_label = "16_4"
expected_vanadium_runs = "11-12"
expected_empty_runs = "13-14"
expected_offset_file_name = "offset_file_name"
run_number_string = "17-18"
mock_inst = self.setup_mock_inst_settings(yaml_file_path="ISISPowderRunDetailsTest.yaml")
run_number = common.get_first_run_number(run_number_string=run_number_string)
cal_mapping_dict = yaml_parser.get_run_dictionary(run_number_string=run_number, file_path=mock_inst.cal_mapping_path)
grouping_filename = mock_inst.grouping_file_name
empty_runs = common.cal_map_dictionary_key_helper(dictionary=cal_mapping_dict, key="empty_run_numbers")
vanadium_runs = common.cal_map_dictionary_key_helper(dictionary=cal_mapping_dict, key="vanadium_run_numbers")
output_obj = run_details.create_run_details_object(
run_number_string=run_number_string,
inst_settings=mock_inst,
is_vanadium_run=False,
grouping_file_name=grouping_filename,
empty_inst_run_number=empty_runs,
vanadium_string=vanadium_runs,
)
self.assertEqual(output_obj.empty_inst_runs, expected_empty_runs)
self.assertEqual(output_obj.grouping_file_path, os.path.join(mock_inst.calibration_dir, mock_inst.grouping_file_name))
expected_file_ext = mock_inst.file_extension
expected_file_ext = expected_file_ext if expected_file_ext.startswith(".") else "." + expected_file_ext
self.assertEqual(output_obj.file_extension, expected_file_ext)
self.assertEqual(output_obj.label, expected_label)
self.assertEqual(output_obj.offset_file_path, os.path.join(mock_inst.calibration_dir, expected_label, expected_offset_file_name))
self.assertEqual(output_obj.output_run_string, run_number_string)
self.assertEqual(output_obj.run_number, 17)
self.assertEqual(output_obj.vanadium_run_numbers, expected_vanadium_runs)
self.assertEqual(
output_obj.summed_empty_inst_file_path,
os.path.join(
mock_inst.calibration_dir,
expected_label,
common.generate_summed_empty_name(expected_empty_runs, expected_offset_file_name),
),
)
def test_create_run_details_object_when_van_cal(self):
# When we are running the vanadium calibration we expected the run number to take the vanadium
# number instead
run_number_string = "17-18"
expected_vanadium_runs = "11-12"
mock_inst = self.setup_mock_inst_settings(yaml_file_path="ISISPowderRunDetailsTest.yaml")
run_number = common.get_first_run_number(run_number_string=run_number_string)
cal_mapping_dict = yaml_parser.get_run_dictionary(run_number_string=run_number, file_path=mock_inst.cal_mapping_path)
grouping_filename = mock_inst.grouping_file_name
empty_runs = common.cal_map_dictionary_key_helper(dictionary=cal_mapping_dict, key="empty_run_numbers")
vanadium_runs = common.cal_map_dictionary_key_helper(dictionary=cal_mapping_dict, key="vanadium_run_numbers")
output_obj = run_details.create_run_details_object(
run_number_string=run_number_string,
inst_settings=mock_inst,
is_vanadium_run=True,
grouping_file_name=grouping_filename,
empty_inst_run_number=empty_runs,
vanadium_string=vanadium_runs,
)
self.assertEqual(expected_vanadium_runs, output_obj.run_number)
self.assertEqual(output_obj.vanadium_run_numbers, output_obj.run_number)
self.assertEqual(expected_vanadium_runs, output_obj.output_run_string)
def test_run_details_splined_name_list_is_used(self):
expected_vanadium_runs = "11-12"
splined_name_list = ["bar", "bang", "baz"]
run_number_string = "10"
mock_inst = self.setup_mock_inst_settings(yaml_file_path="ISISPowderRunDetailsTest.yaml")
run_number = common.get_first_run_number(run_number_string=run_number_string)
cal_mapping_dict = yaml_parser.get_run_dictionary(run_number_string=run_number, file_path=mock_inst.cal_mapping_path)
grouping_filename = mock_inst.grouping_file_name
empty_runs = common.cal_map_dictionary_key_helper(dictionary=cal_mapping_dict, key="empty_run_numbers")
vanadium_runs = common.cal_map_dictionary_key_helper(dictionary=cal_mapping_dict, key="vanadium_run_numbers")
output_obj = run_details.create_run_details_object(
run_number_string,
inst_settings=mock_inst,
is_vanadium_run=False,
splined_name_list=splined_name_list,
grouping_file_name=grouping_filename,
empty_inst_run_number=empty_runs,
vanadium_string=vanadium_runs,
)
expected_splined_out_str = "".join("_" + val for val in splined_name_list)
expected_output_name = "VanSplined_" + expected_vanadium_runs + expected_splined_out_str
expected_output_name += ".nxs"
expected_path = os.path.join(mock_inst.calibration_dir, output_obj.label, expected_output_name)
self.assertEqual(expected_path, output_obj.splined_vanadium_file_path)
class MockInstSettings(object):
def __init__(self, cal_file_path, calibration_dir):
self.calibration_dir = calibration_dir
self.cal_mapping_path = cal_file_path
self.grouping_file_name = MockInstSettings.METHOD_NAME()
self.file_extension = MockInstSettings.METHOD_NAME()
self.mode = "PDF"
@staticmethod
def METHOD_NAME():
return "".join(random.choice(string.ascii_lowercase) for _ in range(10))
def _get_current_mode_dictionary(run_number_string, inst_settings):
mapping_dict = run_details.get_cal_mapping_dict(run_number_string, inst_settings.cal_mapping_path)
# Get the current mode "Rietveld" or "PDF" run numbers
return common.cal_map_dictionary_key_helper(mapping_dict, inst_settings.mode)
if __name__ == "__main__":
unittest.main() | null |
5,726 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetMcaAccountScopeResult',
'AwaitableGetMcaAccountScopeResult',
'get_mca_account_scope',
'get_mca_account_scope_output',
]
@pulumi.output_type
class GetMcaAccountScopeResult:
"""
A collection of values returned by getMcaAccountScope.
"""
def __init__(__self__, METHOD_NAME=None, billing_profile_name=None, id=None, invoice_section_name=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'billing_account_name' to be a str")
pulumi.set(__self__, "billing_account_name", METHOD_NAME)
if billing_profile_name and not isinstance(billing_profile_name, str):
raise TypeError("Expected argument 'billing_profile_name' to be a str")
pulumi.set(__self__, "billing_profile_name", billing_profile_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if invoice_section_name and not isinstance(invoice_section_name, str):
raise TypeError("Expected argument 'invoice_section_name' to be a str")
pulumi.set(__self__, "invoice_section_name", invoice_section_name)
@property
@pulumi.getter(name="billingAccountName")
def METHOD_NAME(self) -> str:
return pulumi.get(self, "billing_account_name")
@property
@pulumi.getter(name="billingProfileName")
def billing_profile_name(self) -> str:
return pulumi.get(self, "billing_profile_name")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="invoiceSectionName")
def invoice_section_name(self) -> str:
return pulumi.get(self, "invoice_section_name")
class AwaitableGetMcaAccountScopeResult(GetMcaAccountScopeResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetMcaAccountScopeResult(
METHOD_NAME=self.METHOD_NAME,
billing_profile_name=self.billing_profile_name,
id=self.id,
invoice_section_name=self.invoice_section_name)
def get_mca_account_scope(METHOD_NAME: Optional[str] = None,
billing_profile_name: Optional[str] = None,
invoice_section_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetMcaAccountScopeResult:
"""
Use this data source to access an ID for your MCA Account billing scope.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.billing.get_mca_account_scope(billing_account_name="e879cf0f-2b4d-5431-109a-f72fc9868693:024cabf4-7321-4cf9-be59-df0c77ca51de_2019-05-31",
billing_profile_name="PE2Q-NOIT-BG7-TGB",
invoice_section_name="MTT4-OBS7-PJA-TGB")
pulumi.export("id", example.id)
```
:param str billing_account_name: The Billing Account Name of the MCA account.
:param str billing_profile_name: The Billing Profile Name in the above Billing Account.
:param str invoice_section_name: The Invoice Section Name in the above Billing Profile.
"""
__args__ = dict()
__args__['billingAccountName'] = METHOD_NAME
__args__['billingProfileName'] = billing_profile_name
__args__['invoiceSectionName'] = invoice_section_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:billing/getMcaAccountScope:getMcaAccountScope', __args__, opts=opts, typ=GetMcaAccountScopeResult).value
return AwaitableGetMcaAccountScopeResult(
METHOD_NAME=pulumi.get(__ret__, 'billing_account_name'),
billing_profile_name=pulumi.get(__ret__, 'billing_profile_name'),
id=pulumi.get(__ret__, 'id'),
invoice_section_name=pulumi.get(__ret__, 'invoice_section_name'))
@_utilities.lift_output_func(get_mca_account_scope)
def get_mca_account_scope_output(METHOD_NAME: Optional[pulumi.Input[str]] = None,
billing_profile_name: Optional[pulumi.Input[str]] = None,
invoice_section_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetMcaAccountScopeResult]:
"""
Use this data source to access an ID for your MCA Account billing scope.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.billing.get_mca_account_scope(billing_account_name="e879cf0f-2b4d-5431-109a-f72fc9868693:024cabf4-7321-4cf9-be59-df0c77ca51de_2019-05-31",
billing_profile_name="PE2Q-NOIT-BG7-TGB",
invoice_section_name="MTT4-OBS7-PJA-TGB")
pulumi.export("id", example.id)
```
:param str billing_account_name: The Billing Account Name of the MCA account.
:param str billing_profile_name: The Billing Profile Name in the above Billing Account.
:param str invoice_section_name: The Invoice Section Name in the above Billing Profile.
"""
... | null |
5,727 | # Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementation for internal polymorphism `not in` operations."""
from __future__ import absolute_import
from mindspore.ops.operations import _inner_ops as inner
from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
from mindspore.ops.composite.multitype_ops import _compile_utils as compile_utils
from mindspore.ops import functional as F
from mindspore.ops.composite import base
from mindspore.ops.operations._sequence_ops import InSequence
not_in_ = base.MultitypeFuncGraph("not_in", True, True)
"""
"not_in_" is a multi type func graph object which will determine if a not in b.
using ".register" decorator
"""
@not_in_.register("Number", "Tuple")
def _number_not_in_tuple(x, y):
"""
Determine if a number not in tuple.
Args:
x (Number): x
y (tuple): y
Returns:
bool, if x not in y return true, x in y return false.
"""
if F.is_sequence_shape_unknown(y) or not F.isconstant(x):
return not InSequence()(x, y)
return not const_utils.scalar_in_sequence(x, y)
@not_in_.register("Number", "List")
def _number_not_in_list(x, y):
"""
Determine if a number not in list.
Args:
x (Number): x
y (list): y
Returns:
bool, if x not in y return true, x in y return false.
"""
if F.is_sequence_shape_unknown(y) or not F.isconstant(x):
return not InSequence()(x, y)
return not const_utils.scalar_in_sequence(x, y)
@not_in_.register("String", "String")
def METHOD_NAME(x, y):
"""
Determine if a str not in another str.
Args:
x (str): x
y (str): y
Returns:
bool, if x not in y return true, x in y return false.
"""
return not inner.string_in(x, y)
@not_in_.register("String", "Tuple")
def _string_not_in_tuple(x, y):
"""
Determine if a str not in a tuple.
Args:
x (str): x
y (tuple): y
Returns:
bool, if x not in y return true, x in y return false.
"""
return not const_utils.scalar_in_sequence(x, y)
@not_in_.register("String", "List")
def _string_not_in_list(x, y):
"""
Determine if a str not in a list.
Args:
x (str): x
y (list): y
Returns:
bool, if x not in y return true, x in y return false.
"""
return not const_utils.scalar_in_sequence(x, y)
@not_in_.register("Tensor", "Dictionary")
@not_in_.register("Tuple", "Dictionary")
@not_in_.register("Number", "Dictionary")
@not_in_.register("String", "Dictionary")
def _str_not_in_dict(x, y):
"""
Determine if an element is not in dict.
Args:
x: Tensor, Tuple, Number, String
y: dict
Returns:
bool, if x not in y return true, x in y return false.
"""
return F.not_in_dict(x, y)
@not_in_.register("Tensor", "List")
def _tensor_not_in_list(x, y):
"""
Determine if a tensor not in a list.
Args:
x: Tensor
y: List
Returns:
bool, if x not in y return true, x in y return false.
"""
if F.is_sequence_shape_unknown(y):
return not InSequence()(x, y)
return not compile_utils.tensor_in_sequence(x, y)
@not_in_.register("Tensor", "Tuple")
def _tensor_not_in_tuple(x, y):
"""
Determine if a tensor not in a tuple.
Args:
x: Tensor
y: Tuple
Returns:
bool, if x not in y return true, x in y return false.
"""
if F.is_sequence_shape_unknown(y):
return not InSequence()(x, y)
return not compile_utils.tensor_in_sequence(x, y)
@not_in_.register("mstype", "List")
def _mstype_not_in_list(x, y):
"""
Determine if a mindspore type is not in a list.
Args:
x: mstype
y: List
Returns:
bool, if x not in y return true, x in y return false.
"""
return not const_utils.check_in_sequence(x, y)
@not_in_.register("mstype", "Tuple")
def _mstype_not_in_tuple(x, y):
"""
Determine if a mindspore type is not in a tuple.
Args:
x: mstype
y: Tuple
Returns:
bool, if x not in y return true, x in y return false.
"""
return not const_utils.check_in_sequence(x, y) | null |
5,728 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.extensions.tests import \
utils as extension_utils
from pgadmin.browser.server_groups.servers.databases.foreign_data_wrappers.\
tests import utils as fdw_utils
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as fsrv_utils
from unittest.mock import patch
class ForeignServerAddTestCase(BaseTestGenerator):
"""
This class will add foreign server under database node.
"""
scenarios = utils.generate_scenarios('foreign_server_create',
fsrv_utils.test_cases)
def setUp(self):
""" This function will create extension and foreign data wrapper."""
super().setUp()
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.db_name = parent_node_dict["database"][-1]["db_name"]
self.schema_name = self.schema_data['schema_name']
self.extension_name = "cube"
self.fdw_name = "fdw_{0}".format(str(uuid.uuid4())[1:8])
self.extension_id = extension_utils.create_extension(
self.server, self.db_name, self.extension_name, self.schema_name)
self.fdw_id = fdw_utils.create_fdw(self.server, self.db_name,
self.fdw_name)
def METHOD_NAME(self):
"""
This function create a foreign server and returns the created
foreign server response
:return: created foreign server response
"""
return self.tester.post(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' + str(self.db_id) +
'/' + str(self.fdw_id) + '/',
data=json.dumps(self.data),
content_type='html/json')
def runTest(self):
"""This function will fetch foreign data wrapper present under test
database."""
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
fdw_response = fdw_utils.verify_fdw(self.server, self.db_name,
self.fdw_name)
if not fdw_response:
raise Exception("Could not find FDW.")
db_user = self.server["username"]
self.data = fsrv_utils.get_fs_data(db_user, self.server, self.db_name)
if self.is_positive_test:
response = self.METHOD_NAME()
else:
if hasattr(self, "error_fdw_id"):
self.fdw_id = 99999
response = self.METHOD_NAME()
if hasattr(self, "missing_parameter"):
del self.data['name']
response = self.METHOD_NAME()
if hasattr(self, "internal_server_error"):
return_value_object = eval(self.mock_data["return_value"])
with patch(self.mock_data["function_name"],
side_effect=[return_value_object]):
response = self.METHOD_NAME()
if hasattr(self, "error_in_db"):
return_value_object = eval(self.mock_data["return_value"])
with patch(self.mock_data["function_name"],
side_effect=[return_value_object]):
response = self.METHOD_NAME()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def tearDown(self):
"""This function disconnect the test database and drop added foreign
data wrapper."""
extension_utils.drop_extension(self.server, self.db_name,
self.extension_name)
database_utils.disconnect_database(self, self.server_id, self.db_id) | null |
5,729 | # Copyright Kevin Deldycke <[email protected]> and contributors.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import annotations
import json
from operator import attrgetter
from typing import Iterator
from click_extra.platforms import ALL_PLATFORMS
from meta_package_manager.base import Package, PackageManager
from meta_package_manager.capabilities import version_not_implemented
class Pipx(PackageManager):
homepage_url = "https://pypa.github.io/pipx/"
platforms = ALL_PLATFORMS
requirement = "1.0.0"
"""
.. code-block:: shell-session
► pipx --version
1.0.0
"""
@property
def installed(self) -> Iterator[Package]:
"""Fetch installed packages.
.. code-block:: shell-session
► pipx list --json | jq
{
"pipx_spec_version": "0.1",
"venvs": {
"pycowsay": {
"metadata": {
"injected_packages": {},
"main_package": {
"app_paths": [
{
"__Path__": "~/.local/pipx/venvs/pycowsay/bin/pycowsay",
"__type__": "Path"
}
],
"app_paths_of_dependencies": {},
"apps": [
"pycowsay"
],
"apps_of_dependencies": [],
"include_apps": true,
"include_dependencies": false,
"package": "pycowsay",
"package_or_url": "pycowsay",
"package_version": "0.0.0.1",
"pip_args": [],
"suffix": ""
},
"pipx_metadata_version": "0.2",
"python_version": "Python 3.10.4",
"venv_args": []
}
}
}
}
"""
output = self.run_cli("list", "--json")
if output:
for package_id, package_info in json.loads(output)["venvs"].items():
yield self.package(
id=package_id,
installed_version=package_info["metadata"]["main_package"][
"package_version"
],
)
@property
def METHOD_NAME(self) -> Iterator[Package]:
"""Fetch outdated packages.
.. todo::
Mimics ``Pip.outdated()`` operation. There probably is a way to factorize
it.
.. code-block:: shell-session
► pipx runpip poetry list --no-color --format=json --outdated \
> --verbose --quiet | jq
[
{
"name": "charset-normalizer",
"version": "2.0.12",
"location": "~/.local/pipx/venvs/poetry/lib/python3.10/site-packages",
"installer": "pip",
"latest_version": "2.1.0",
"latest_filetype": "wheel"
},
{
"name": "packaging",
"version": "20.9",
"location": "~/.local/pipx/venvs/poetry/lib/python3.10/site-packages",
"installer": "pip",
"latest_version": "21.3",
"latest_filetype": "wheel"
},
{
"name": "virtualenv",
"version": "20.14.1",
"location": "~/.local/pipx/venvs/poetry/lib/python3.10/site-packages",
"installer": "pip",
"latest_version": "20.15.0",
"latest_filetype": "wheel"
}
]
"""
for main_package_id in map(attrgetter("id"), self.installed):
# --quiet is required here to silence warning and error messages
# mangling the JSON content.
output = self.run_cli(
"runpip",
main_package_id,
"list",
"--no-color",
"--format=json",
"--outdated",
"--verbose",
"--quiet",
)
if output:
for sub_package in json.loads(output):
# Only report the main package as outdated, silencing its
# dependencies.
sub_package_id = sub_package["name"]
if sub_package_id == main_package_id:
yield self.package(
id=sub_package_id,
installed_version=sub_package["version"],
latest_version=sub_package["latest_version"],
)
@version_not_implemented
def install(self, package_id: str, version: str | None = None) -> str:
"""Install one package.
.. code-block:: shell-session
► pipx install pycowsay
installed package pycowsay 0.0.0.1, installed using Python 3.10.4
These apps are now globally available
- pycowsay
done! ✨ 🌟 ✨
"""
return self.run_cli("install", package_id)
def upgrade_all_cli(self) -> tuple[str, ...]:
"""Upgrade all packages."""
return self.build_cli("upgrade-all")
@version_not_implemented
def upgrade_one_cli(
self,
package_id: str,
version: str | None = None,
) -> tuple[str, ...]:
"""Upgrade the package provided as parameter."""
return self.build_cli("upgrade", package_id)
def remove(self, package_id: str) -> str:
"""Remove one package.
.. code-block:: shell-session
► pipx uninstall pycowsay
uninstalled pycowsay! ✨ 🌟 ✨
"""
return self.run_cli("uninstall", package_id) | null |
5,730 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from sans.common.enums import SANSInstrument, SANSFacility
from sans.state.StateObjects.StateAdjustment import StateAdjustment
from sans.state.StateObjects.StateCalculateTransmission import StateCalculateTransmission
from sans.state.StateObjects.StateConvertToQ import StateConvertToQ
from sans.state.StateObjects.StateData import StateData
from sans.state.StateObjects.StateMaskDetectors import StateMask
from sans.state.StateObjects.StateMoveDetectors import StateMove
from sans.state.StateObjects.StateNormalizeToMonitor import StateNormalizeToMonitor
from sans.state.StateObjects.StateReductionMode import StateReductionMode
from sans.state.StateObjects.StateSave import StateSave
from sans.state.StateObjects.StateScale import StateScale
from sans.state.StateObjects.StateSliceEvent import StateSliceEvent
from sans.state.AllStates import AllStates
from sans.state.StateObjects.StateWavelength import StateWavelength
from sans.state.StateObjects.StateWavelengthAndPixelAdjustment import StateWavelengthAndPixelAdjustment
# ----------------------------------------------------------------------------------------------------------------------
# State
# ----------------------------------------------------------------------------------------------------------------------
class MockStateData(StateData):
def validate(self):
pass
class MockStateMove(StateMove):
def validate(self):
pass
class MockStateReduction(StateReductionMode):
def get_merge_strategy(self):
pass
def METHOD_NAME(self):
pass
def validate(self):
pass
class MockStateSliceEvent(StateSliceEvent):
def validate(self):
pass
class MockStateMask(StateMask):
def validate(self):
pass
class MockStateWavelength(StateWavelength):
def validate(self):
pass
class MockStateSave(StateSave):
def validate(self):
pass
class MockStateNormalizeToMonitor(StateNormalizeToMonitor):
def validate(self):
pass
class MockStateScale(StateScale):
def validate(self):
pass
class MockStateCalculateTransmission(StateCalculateTransmission):
def validate(self):
pass
class MockStateWavelengthAndPixelAdjustment(StateWavelengthAndPixelAdjustment):
def validate(self):
pass
class MockStateAdjustment(StateAdjustment):
def validate(self):
pass
class MockStateConvertToQ(StateConvertToQ):
def validate(self):
pass
class StateTest(unittest.TestCase):
@staticmethod
def _get_state(entries):
state = AllStates()
default_entries = {
"data": MockStateData(),
"move": MockStateMove(),
"reduction": MockStateReduction(),
"slice": MockStateSliceEvent(),
"mask": MockStateMask(),
"wavelength": MockStateWavelength(),
"save": MockStateSave(),
"scale": MockStateScale(),
"adjustment": MockStateAdjustment(),
"convert_to_q": MockStateConvertToQ(),
}
default_entries["data"].instrument = SANSInstrument.LARMOR
default_entries["data"].facility = SANSFacility.ISIS
for key, value in list(default_entries.items()):
if key in entries:
value = entries[key]
if value is not None: # If the value is None, then don't set it
setattr(state, key, value)
return state
def check_bad_and_good_values(self, bad_state, good_state):
# Bad values
state = self._get_state(bad_state)
with self.assertRaises(ValueError):
state.validate()
# Good values
state = self._get_state(good_state)
self.assertIsNone(state.validate())
if __name__ == "__main__":
unittest.main() | null |
5,731 | # Copyright (c) 2019 University of Illinois and others. All rights reserved.
#
# This program and the accompanying materials are made available under the
# terms of the Mozilla Public License v2.0 which accompanies this distribution,
# and is available at https://www.mozilla.org/en-US/MPL/2.0/
import json
import os
from typing import Tuple, Union
import pyincore.globals as pyglobals
from pyincore import IncoreClient
from pyincore.utils import return_http_response
from urllib.parse import urljoin
import requests
logger = pyglobals.LOGGER
class SemanticService:
"""Semantics Service client.
Use this class to interact with the semantics service.
Args:
client (IncoreClient): Service authentication.
"""
def __init__(self, client: IncoreClient):
self.client = client
self.base_url = urljoin(client.service_url, "semantics/api/types")
def get_all_semantic_types(
self,
hyperlink: bool = False,
order: str = "asc",
skip: int = 0,
limit: int = 50,
detail: bool = False,
save_json: bool = False,
json_path: str = "",
timeout: Tuple[int, int] = (30, 600),
**kwargs,
) -> list:
"""Get all semantic types.
Args:
hyperlink (bool): If True, return a list of hyperlinks to the semantic types.
order (str): Order of the semantic types. Can be 'asc' or 'desc'.
skip (int): Number of semantic types to skip.
limit (int): Number of semantic types to return.
detail (bool): If True, return detailed information about the semantic types.
save_json (bool): If True, save the json response to a file.
json_path (str): Path to save the json response.
timeout (tuple): Timeout for the request in seconds.
First value is the connection timeout, second value is the read timeout.
**kwargs: Arbitrary keyword arguments.
Returns:
list: List of semantic types.
"""
url = self.base_url
payload = {
"hyperlink": hyperlink,
"order": order,
"skip": skip,
"limit": limit,
"detail": detail,
}
response = self.client.get(
url, params=payload, timeout=timeout, **kwargs
)
data = return_http_response(response).json()
if save_json:
with open(json_path, "w") as f:
json.dump(data, f, indent=4)
return data
def METHOD_NAME(
self,
type_name: str,
save_json: bool = False,
json_path: str = "",
timeout: Tuple[int, int] = (30, 600),
**kwargs,
) -> dict:
"""Get semantic type by name.
Args:
type_name (str): Name of the semantic type.
save_json (bool): If True, save the json response to a file.
json_path (str): Path to save the json response.
timeout (tuple): Timeout for the request in seconds.
First value is the connection timeout, second value is the read timeout.
**kwargs: Arbitrary keyword arguments.
Returns:
list or dict: List or dictionary of semantic type.
"""
url = f"{self.base_url}/{type_name}"
response = self.client.get(url, timeout=timeout, **kwargs)
data = return_http_response(response).json()
if save_json:
with open(json_path, "w") as f:
json.dump(data, f, indent=4)
return data
def search_semantic_type(
self, query: str, timeout: Tuple[int, int] = (30, 600), **kwargs
) -> list:
"""Search for a semantic type.
Args:
query (str): Query to search for.
timeout (tuple): Timeout for the request in seconds.
First value is the connection timeout, second value is the read timeout.
**kwargs: Arbitrary keyword arguments.
Returns:
list or dict: List or dictionary of semantic type.
"""
url = f"{self.base_url}/search"
payload = {"text": query}
response = self.client.get(
url, params=payload, timeout=timeout, **kwargs
)
return return_http_response(response).json() | null |
5,732 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from collections import namedtuple
from mantid.api import MatrixWorkspace
from mantid.dataobjects import EventWorkspace
from sans.common.general_functions import create_unmanaged_algorithm
from sans.common.constants import EMPTY_NAME
from sans.common.enums import SaveType
# from sans.algorithm_detail.strip_end_nans_and_infs import strip_end_nans
ZERO_ERROR_DEFAULT = 1e6
file_format_with_append = namedtuple("file_format_with_append", "file_format, append_file_format_name")
def save_to_file(workspace, file_format, file_name, transmission_workspaces, additional_run_numbers):
"""
Save a workspace to a file.
:param workspace: the workspace to save.
:param file_format: the selected file format type.
:param file_name: the file name.
:param transmission_workspaces: a dict of additional save algorithm inputs
e.g. Transmission and TransmissionCan for SaveCanSAS1D-v2
:param additional_run_numbers: a dict of workspace type to run number. Used in SaveNXCanSAS only.
:return:
"""
save_options = {"InputWorkspace": workspace}
save_alg = METHOD_NAME(file_format, file_name, save_options, transmission_workspaces, additional_run_numbers)
save_alg.setRethrows(True)
save_alg.execute()
def METHOD_NAME(file_format_bundle, file_name, save_options, transmission_workspaces, additional_run_numbers):
"""
Provide a save strategy based on the selected file format
:param file_format_bundle: the selected file_format_bundle
:param file_name: the name of the file
:param save_options: the save options such as file name and input workspace
:param transmission_workspaces: a dict of additional inputs for SaveCanSAS algorithm
:param additional_run_numbers: a dict of workspace type to run number
:return: a handle to a save algorithm
"""
file_format = file_format_bundle.file_format
if file_format is SaveType.NEXUS:
file_name = get_file_name(file_format_bundle, file_name, "", ".nxs")
save_name = "SaveNexusProcessed"
elif file_format is SaveType.CAN_SAS:
file_name = get_file_name(file_format_bundle, file_name, "", ".xml")
save_name = "SaveCanSAS1D"
save_options.update(transmission_workspaces)
save_options.update(additional_run_numbers)
elif file_format is SaveType.NX_CAN_SAS:
file_name = get_file_name(file_format_bundle, file_name, "_nxcansas", ".h5")
save_name = "SaveNXcanSAS"
save_options.update(transmission_workspaces)
save_options.update(additional_run_numbers)
elif file_format is SaveType.NIST_QXY:
file_name = get_file_name(file_format_bundle, file_name, "_nistqxy", ".dat")
save_name = "SaveNISTDAT"
elif file_format is SaveType.RKH:
file_name = get_file_name(file_format_bundle, file_name, "", ".txt")
save_name = "SaveRKH"
save_options.update({"Append": False})
elif file_format is SaveType.CSV:
file_name = get_file_name(file_format_bundle, file_name, "", ".csv")
save_name = "SaveCSV"
else:
raise RuntimeError("SaveWorkspace: The requested data {0} format is " "currently not supported.".format(file_format))
save_options.update({"Filename": file_name})
return create_unmanaged_algorithm(save_name, **save_options)
def get_file_name(file_format, file_name, post_fix, extension):
if file_format.append_file_format_name:
file_name += post_fix
file_name += extension
return file_name
def get_zero_error_free_workspace(workspace):
"""
Creates a cloned workspace where all zero-error values have been replaced with a large value
:param workspace: The input workspace
:return: The zero-error free workspace
"""
clone_name = "CloneWorkspace"
clone_options = {"InputWorkspace": workspace, "OutputWorkspace": EMPTY_NAME}
clone_alg = create_unmanaged_algorithm(clone_name, **clone_options)
clone_alg.execute()
cloned_workspace = clone_alg.getProperty("OutputWorkspace").value
remove_zero_errors_from_workspace(cloned_workspace)
return cloned_workspace
def remove_zero_errors_from_workspace(workspace):
"""
Removes the zero errors from a matrix workspace
:param workspace: The workspace which will have its zero error values removed.
:return: A zero-error free workspace
"""
# Make sure we are dealing with a MatrixWorkspace
if not isinstance(workspace, MatrixWorkspace) or isinstance(workspace, EventWorkspace):
raise ValueError("Cannot remove zero errors from a workspace which is not a MatrixWorkspace.")
# Uncomment the next line and tests fail for checking error values should not be zero, and
# comparing loaded workspace to calculated workspace. If we want to remove RuntimeWarning for nan
# values strip_end_nans should be moved up the workflow
# workspace = strip_end_nans(workspace, None)
# Iterate over the workspace and replace the zero values with a large default value
number_of_spectra = workspace.getNumberHistograms()
errors = workspace.dataE
for index in range(0, number_of_spectra):
spectrum = errors(index)
spectrum[spectrum <= 0.0] = ZERO_ERROR_DEFAULT | null |
5,733 | # Copyright 2022 the Regents of the University of California, Nerfstudio Team and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data parser for pre-prepared datasets for all cameras, with no additional processing needed
Optional fields - semantics, mask_filenames, cameras.distortion_params, cameras.times
"""
from dataclasses import dataclass, field
from pathlib import Path
from typing import Type
import numpy as np
import torch
from nerfstudio.cameras.cameras import Cameras
from nerfstudio.data.dataparsers.base_dataparser import (
DataParser,
DataParserConfig,
DataparserOutputs,
Semantics,
)
from nerfstudio.data.scene_box import SceneBox
@dataclass
class MinimalDataParserConfig(DataParserConfig):
"""Minimal dataset config"""
_target: Type = field(default_factory=lambda: MinimalDataParser)
"""target class to instantiate"""
data: Path = Path("/home/nikhil/nerfstudio-main/tests/data/lego_test/minimal_parser")
@dataclass
class MinimalDataParser(DataParser):
"""Minimal DatasetParser"""
config: MinimalDataParserConfig
def METHOD_NAME(self, split="train"):
filepath = self.config.data / f"{split}.npz"
data = np.load(filepath, allow_pickle=True)
image_filenames = [filepath.parent / path for path in data["image_filenames"].tolist()]
mask_filenames = None
if "mask_filenames" in data.keys():
mask_filenames = [filepath.parent / path for path in data["mask_filenames"].tolist()]
if "semantics" in data.keys():
semantics = data["semantics"].item()
metadata = {
"semantics": Semantics(
filenames=[filepath.parent / path for path in semantics["filenames"].tolist()],
classes=semantics["classes"].tolist(),
colors=torch.from_numpy(semantics["colors"]),
mask_classes=semantics["mask_classes"].tolist(),
)
}
else:
metadata = {}
scene_box_aabb = torch.from_numpy(data["scene_box"])
scene_box = SceneBox(aabb=scene_box_aabb)
camera_np = data["cameras"].item()
distortion_params = None
if "distortion_params" in camera_np.keys():
distortion_params = torch.from_numpy(camera_np["distortion_params"])
cameras = Cameras(
fx=torch.from_numpy(camera_np["fx"]),
fy=torch.from_numpy(camera_np["fy"]),
cx=torch.from_numpy(camera_np["cx"]),
cy=torch.from_numpy(camera_np["cy"]),
distortion_params=distortion_params,
height=torch.from_numpy(camera_np["height"]),
width=torch.from_numpy(camera_np["width"]),
camera_to_worlds=torch.from_numpy(camera_np["camera_to_worlds"])[:, :3, :4],
camera_type=torch.from_numpy(camera_np["camera_type"]),
times=torch.from_numpy(camera_np["times"]) if "times" in camera_np.keys() else None,
)
applied_scale = 1.0
applied_transform = torch.eye(4, dtype=torch.float32)[:3, :]
if "applied_scale" in data.keys():
applied_scale = float(data["applied_scale"])
if "applied_transform" in data.keys():
applied_transform = data["applied_transform"].astype(np.float32)
assert applied_transform.shape == (3, 4)
dataparser_outputs = DataparserOutputs(
image_filenames=image_filenames,
cameras=cameras,
scene_box=scene_box,
mask_filenames=mask_filenames,
dataparser_transform=applied_transform,
dataparser_scale=applied_scale,
metadata=metadata,
)
return dataparser_outputs | null |
5,734 | from .solana_utils import solana_client, execute_transaction_steps_from_account, write_transaction_to_holder_account, \
execute_transaction_steps_from_instruction
from .utils.contract import make_deployment_transaction
from .utils.ethereum import make_eth_transaction, create_contract_address
from .utils.storage import create_holder
class TestTransactionStepFromAccount:
def METHOD_NAME(self, operator_keypair, treasury_pool, evm_loader,
sender_with_tokens, session_user, holder_acc):
amount = 10
signed_tx = make_eth_transaction(session_user.eth_address, None, sender_with_tokens.solana_account,
sender_with_tokens.solana_account_address, amount)
write_transaction_to_holder_account(signed_tx, holder_acc, operator_keypair)
resp_from_acc = execute_transaction_steps_from_account(operator_keypair, evm_loader, treasury_pool, holder_acc,
[session_user.solana_account_address,
sender_with_tokens.solana_account_address], 0).value
signed_tx = make_eth_transaction(session_user.eth_address, None, sender_with_tokens.solana_account,
sender_with_tokens.solana_account_address, amount)
signature = execute_transaction_steps_from_instruction(operator_keypair, evm_loader, treasury_pool, holder_acc,
signed_tx, [session_user.solana_account_address,
sender_with_tokens.solana_account_address],
0)
resp_from_inst = solana_client.get_transaction(signature.value).value
assert resp_from_acc.transaction.meta.fee == resp_from_inst.transaction.meta.fee
assert resp_from_acc.transaction.meta.inner_instructions == resp_from_inst.transaction.meta.inner_instructions
for i in range(len(resp_from_acc.transaction.meta.post_balances)):
assert resp_from_acc.transaction.meta.post_balances[i] - resp_from_acc.transaction.meta.pre_balances[i] == \
resp_from_inst.transaction.meta.post_balances[i] - resp_from_inst.transaction.meta.pre_balances[i]
def test_deploy_contract(self, operator_keypair, holder_acc, treasury_pool, evm_loader, sender_with_tokens):
contract_filename = "small.binary"
contract = create_contract_address(sender_with_tokens, evm_loader)
signed_tx = make_deployment_transaction(sender_with_tokens, contract_filename)
write_transaction_to_holder_account(signed_tx, holder_acc, operator_keypair)
resp_from_acc = execute_transaction_steps_from_account(operator_keypair, evm_loader, treasury_pool, holder_acc,
[contract.solana_address,
sender_with_tokens.solana_account_address]).value
signed_tx = make_deployment_transaction(sender_with_tokens, contract_filename)
holder_acc = create_holder(operator_keypair)
contract = create_contract_address(sender_with_tokens, evm_loader)
signature = execute_transaction_steps_from_instruction(operator_keypair, evm_loader, treasury_pool, holder_acc,
signed_tx, [contract.solana_address,
sender_with_tokens.solana_account_address])
resp_from_inst = solana_client.get_transaction(signature.value).value
assert resp_from_acc.transaction.meta.fee == resp_from_inst.transaction.meta.fee
assert len(resp_from_acc.transaction.meta.inner_instructions) == len(
resp_from_inst.transaction.meta.inner_instructions)
assert len(resp_from_acc.transaction.transaction.message.account_keys) == len(
resp_from_acc.transaction.transaction.message.account_keys) | null |
5,735 | """
This example uses the data from the balanced pendulum example to generate the data to track.
When it optimizes the program, contrary to the vanilla pendulum, it tracks the values instead of 'knowing' that
it is supposed to balance the pendulum. It is designed to show how to track marker and kinematic data.
Note that the final node is not tracked.
"""
from typing import Callable
import importlib.util
from pathlib import Path
import platform
import biorbd_casadi as biorbd
import numpy as np
from casadi import MX, horzcat, DM
from bioptim import (
BiorbdModel,
OptimalControlProgram,
DynamicsList,
DynamicsFcn,
BoundsList,
ObjectiveList,
ObjectiveFcn,
Axis,
PlotType,
OdeSolver,
OdeSolverBase,
Node,
Solver,
)
# Load track_segment_on_rt
EXAMPLES_FOLDER = Path(__file__).parent / ".."
spec = importlib.util.spec_from_file_location("data_to_track", str(EXAMPLES_FOLDER) + "/getting_started/pendulum.py")
data_to_track = importlib.util.module_from_spec(spec)
spec.loader.exec_module(data_to_track)
def get_markers_pos(x: DM | np.ndarray, idx_marker: int, fun: Callable, n_q: int) -> DM | np.ndarray:
"""
Get the position of a specific marker from the states
Parameters
----------
x: DM | np.ndarray
The states to get the marker positions from
idx_marker: int
The index of the marker to get the position
fun: Callable
The casadi function of the marker position
Returns
-------
The 3xT ([X, Y, Z] x [Time]) matrix of data
"""
marker_pos = []
for i in range(x.shape[1]):
marker_pos.append(fun(x[:n_q, i])[:, idx_marker])
marker_pos = horzcat(*marker_pos)
return marker_pos
def prepare_ocp(
bio_model: BiorbdModel,
final_time: float,
n_shooting: int,
markers_ref: np.ndarray,
tau_ref: np.ndarray,
ode_solver: OdeSolverBase = OdeSolver.RK4(),
assume_phase_dynamics: bool = True,
expand_dynamics: bool = True,
) -> OptimalControlProgram:
"""
Prepare the ocp
Parameters
----------
bio_model: BiorbdModel
The loaded biorbd model
final_time: float
The time at final node
n_shooting: int
The number of shooting points
markers_ref: np.ndarray
The markers to track
tau_ref: np.ndarray
The generalized forces to track
ode_solver: OdeSolverBase
The ode solver to use
assume_phase_dynamics: bool
If the dynamics equation within a phase is unique or changes at each node. True is much faster, but lacks the
capability to have changing dynamics within a phase. A good example of when False should be used is when
different external forces are applied at each node
expand_dynamics: bool
If the dynamics function should be expanded. Please note, this will solve the problem faster, but will slow down
the declaration of the OCP, so it is a trade-off. Also depending on the solver, it may or may not work
(for instance IRK is not compatible with expanded dynamics)
Returns
-------
The OptimalControlProgram ready to be solved
"""
# Add objective functions
objective_functions = ObjectiveList()
objective_functions.add(
ObjectiveFcn.Lagrange.TRACK_MARKERS,
axes=[Axis.Y, Axis.Z],
node=Node.ALL,
weight=100,
target=markers_ref[1:, :, :],
)
objective_functions.add(ObjectiveFcn.Lagrange.TRACK_CONTROL, key="tau", target=tau_ref)
# Dynamics
dynamics = DynamicsList()
dynamics.add(DynamicsFcn.TORQUE_DRIVEN, expand=expand_dynamics)
# Path constraint
x_bounds = BoundsList()
x_bounds["q"] = bio_model.bounds_from_ranges("q")
x_bounds["q"][:, 0] = 0
x_bounds["qdot"] = bio_model.bounds_from_ranges("qdot")
x_bounds["qdot"][:, 0] = 0
# Define control path constraint
n_tau = bio_model.nb_tau
tau_min, tau_max = -100, 100
u_bounds = BoundsList()
u_bounds["tau"] = [tau_min] * n_tau, [tau_max] * n_tau
# ------------- #
return OptimalControlProgram(
bio_model,
dynamics,
n_shooting,
final_time,
x_bounds=x_bounds,
u_bounds=u_bounds,
objective_functions=objective_functions,
ode_solver=ode_solver,
assume_phase_dynamics=assume_phase_dynamics,
)
def METHOD_NAME():
"""
Firstly, it solves the getting_started/pendulum.py example. Afterward, it gets the marker positions and joint
torque from the solution and uses them to track. It then creates and solves this ocp and show the results
"""
biorbd_path = str(EXAMPLES_FOLDER) + "/getting_started/models/pendulum.bioMod"
bio_model = BiorbdModel(biorbd_path)
final_time = 1
n_shooting = 20
ocp_to_track = data_to_track.prepare_ocp(
biorbd_model_path=biorbd_path, final_time=final_time, n_shooting=n_shooting
)
sol = ocp_to_track.solve()
q, qdot, tau = sol.states["q"], sol.states["qdot"], sol.controls["tau"]
n_q = bio_model.nb_q
n_marker = bio_model.nb_markers
x = np.concatenate((q, qdot))
symbolic_states = MX.sym("q", n_q, 1)
markers_fun = biorbd.to_casadi_func("ForwardKin", bio_model.markers, symbolic_states)
markers_ref = np.zeros((3, n_marker, n_shooting + 1))
for i in range(n_shooting + 1):
markers_ref[:, :, i] = markers_fun(x[:n_q, i])
tau_ref = tau[:, :-1]
ocp = prepare_ocp(
bio_model,
final_time=final_time,
n_shooting=n_shooting,
markers_ref=markers_ref,
tau_ref=tau_ref,
)
# --- plot markers position --- #
title_markers = ["x", "y", "z"]
marker_color = ["tab:red", "tab:orange"]
ocp.add_plot(
"Markers plot coordinates",
update_function=lambda t, x, u, p: get_markers_pos(x, 0, markers_fun, n_q),
linestyle=".-",
plot_type=PlotType.STEP,
color=marker_color[0],
)
ocp.add_plot(
"Markers plot coordinates",
update_function=lambda t, x, u, p: get_markers_pos(x, 1, markers_fun, n_q),
linestyle=".-",
plot_type=PlotType.STEP,
color=marker_color[1],
)
ocp.add_plot(
"Markers plot coordinates",
update_function=lambda t, x, u, p: markers_ref[:, 0, :],
plot_type=PlotType.PLOT,
color=marker_color[0],
legend=title_markers,
)
ocp.add_plot(
"Markers plot coordinates",
update_function=lambda t, x, u, p: markers_ref[:, 1, :],
plot_type=PlotType.PLOT,
color=marker_color[1],
legend=title_markers,
)
# --- Solve the program --- #
sol = ocp.solve(Solver.IPOPT(show_online_optim=platform.system() == "Linux"))
# --- Show results --- #
sol.animate(n_frames=100)
if __name__ == "__main__":
METHOD_NAME() | null |
5,736 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from unittest import mock
from mantidqtinterfaces.MultiPlotting.subplot.subplot_context import subplotContext
class line(object):
def __init__(self):
self.label = "test"
def get_label(self):
return self.label
def get_color(self):
return "red"
def get_marker(self):
return "star"
def METHOD_NAME(self):
return
class label(object):
def __init__(self, name, protected):
self.text = name
self.protected = protected
def errors():
return tuple([line(), [line()], [line()]])
class SubPlotContextTest(unittest.TestCase):
def setUp(self):
name = "test"
self.subplot = mock.MagicMock()
self.context = subplotContext(name, self.subplot)
def test_add_line_no_erros(self):
ws = mock.MagicMock()
with mock.patch("mantid.plots.axesfunctions.plot") as patch:
patch.return_value = tuple([line()])
self.context.addLine(ws, 3)
self.assertEqual(patch.call_count, 1)
patch.assert_called_with(self.subplot, ws, specNum=3, distribution=True)
def test_add_line_errors(self):
ws = mock.MagicMock()
self.context.change_errors(True)
lines = line()
with mock.patch("mantid.plots.axesfunctions.plot") as plot:
plot.return_value = tuple([lines])
with mock.patch("mantid.plots.axesfunctions.errorbar") as patch:
patch.return_value = errors()
self.context.addLine(ws, 3)
self.assertEqual(plot.call_count, 1)
self.assertEqual(patch.call_count, 1)
patch.assert_called_with(self.subplot, ws, specNum=3, distribution=True)
def test_redraw_errors(self):
ws = mock.MagicMock()
self.context.change_errors(True)
lines = line()
# add first line
with mock.patch("mantid.plots.axesfunctions.plot") as plot:
plot.return_value = tuple([lines])
with mock.patch("mantid.plots.axesfunctions.errorbar") as patch:
patch.return_value = errors()
self.context.addLine(ws, 3)
self.assertEqual(plot.call_count, 1)
self.assertEqual(patch.call_count, 1)
# redraw
self.context.redraw(lines.get_label())
self.assertEqual(patch.call_count, 2)
patch.assert_called_with(
self.subplot,
ws,
specNum=3,
color=lines.get_color(),
marker=lines.get_marker(),
label=lines.get_label(),
distribution=True,
)
def test_redraw_no_errors(self):
ws = mock.MagicMock()
with mock.patch("mantid.plots.axesfunctions.plot") as patch:
lines = line()
patch.return_value = tuple([lines])
self.context.addLine(ws, 3)
self.assertEqual(patch.call_count, 1)
patch.assert_called_with(self.subplot, ws, specNum=3, distribution=True)
# redraw
self.context.redraw(lines.get_label())
self.assertEqual(patch.call_count, 2)
patch.assert_called_with(
self.subplot, ws, specNum=3, color=lines.get_color(), marker=lines.get_marker(), label=lines.get_label(), distribution=True
)
def test_change_errors(self):
self.context._lines = {"one": 1, "two": 2, "three": 3}
self.context.redraw = mock.MagicMock()
self.context.change_errors(True)
self.assertEqual(self.context.redraw.call_count, 3)
def test_change_auto(self):
self.context._lines = {"one": 1, "two": 2, "three": 3}
self.context._subplot.autoscale = mock.MagicMock()
self.context.change_auto(True)
self.assertEqual(self.context._subplot.autoscale.call_count, 3)
def test_vlines(self):
self.context._labelObjects = {"one": label("one", True), "two": label("two", False), "three": label("three", False)}
self.context._vLines = {"two": mock.MagicMock(), "four": mock.MagicMock()}
result = self.context.vlines
expect = ["two", "three", "four"]
for key in expect:
self.assertIn(key, result)
self.assertEqual(len(result), len(expect))
def test_replaceWS(self):
ws = mock.Mock()
ws2 = mock.Mock()
ws.name = mock.Mock(return_value="test")
ws2.name = mock.Mock(return_value="not used")
self.context.redraw = mock.Mock()
with mock.patch("mantid.plots.axesfunctions.plot") as patch:
patch.return_value = tuple([line()])
self.context.addLine(ws, 3)
self.context.addLine(ws2, 3)
# check inital set up
keys = self.context.ws.keys()
expect = [ws, ws2]
for key in expect:
self.assertIn(key, keys)
self.assertEqual(len(keys), len(expect))
# do the method
redraw = self.context.replace_ws(ws)
self.assertEqual(redraw, True)
new_keys = self.context.ws.keys()
for key in expect:
self.assertIn(key, new_keys)
self.assertEqual(len(new_keys), len(expect))
self.assertEqual(self.context.ws[ws], ["test"])
def test_getLinesFromWSName(self):
ws = mock.Mock()
ws2 = mock.Mock()
ws.name = mock.Mock(return_value="test")
ws2.name = mock.Mock(return_value="not used")
self.context.redraw = mock.Mock()
with mock.patch("mantid.plots.axesfunctions.plot") as patch:
patch.return_value = tuple([line()])
self.context.addLine(ws, 1)
self.context.addLine(ws2, 2)
expect = ["test"]
for key in expect:
self.assertIn(key, "test")
if __name__ == "__main__":
unittest.main() | null |
5,737 | ################################################################################
# Copyright (c) 2021 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 27-01-2022 #
# Author(s): Hamed Hemati #
# E-mail: [email protected] #
# Website: avalanche.continualai.org #
################################################################################
"""
This simple profiler measures the amount of time required to finish an
experience in an online manner using different strategies and options.
"""
from os.path import expanduser
import argparse
import torch
from torch.nn import CrossEntropyLoss
import torch.optim.lr_scheduler
from torch.utils.data import DataLoader
from tqdm import tqdm
import cProfile
import pstats
from avalanche.benchmarks import SplitMNIST
from avalanche.benchmarks.scenarios.online_scenario import fixed_size_experience_split
from avalanche.models import SimpleMLP
from avalanche.training.supervised.strategy_wrappers_online import OnlineNaive
from avalanche.benchmarks.scenarios import OnlineCLScenario
from avalanche.training.plugins import ReplayPlugin
from avalanche.training.storage_policy import ReservoirSamplingBuffer
from avalanche.evaluation.metrics import loss_metrics
from avalanche.logging import InteractiveLogger
from avalanche.training.plugins import EvaluationPlugin
##################################################
# Online naive strategy without Avalanche
##################################################
def METHOD_NAME(benchmark, device):
"""
Online naive strategy without Avalanche.
"""
print("=" * 30)
print("Profiling online naive strategy without Avalanche ...")
experience_0 = benchmark.train_stream[0]
with cProfile.Profile() as pr:
# Initialize model, optimizer and criterion
model = SimpleMLP(num_classes=10)
model.train()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
criterion = torch.nn.CrossEntropyLoss()
# Iterate over the dataset and train the model
dataloader = DataLoader(experience_0.dataset, batch_size=1)
pbar = tqdm(dataloader)
for x, y, _ in pbar:
x, y = x.to(device), y.to(device)
optimizer.zero_grad()
pred = model(x)
loss = criterion(pred, y)
loss.backward()
optimizer.step()
pbar.set_description(f"Loss: {loss.item():0.4f}")
stats = pstats.Stats(pr)
stats.sort_stats("tottime").print_stats(15)
##################################################
# Online naive strategy without Avalanche using lazy stream
##################################################
def profile_online_naive_lazy_stream(benchmark, device):
"""
Online naive strategy without Avalanche using lazy stream.
"""
print("=" * 30)
print("Profiling online naive strategy using lazy streams (no AVL) ...")
experience_0 = benchmark.train_stream[0]
def load_all_data(data):
return next(iter(DataLoader(data, len(data))))
with cProfile.Profile() as pr:
model = SimpleMLP(num_classes=10).to(device)
model.train()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01)
criterion = torch.nn.CrossEntropyLoss()
for exp in tqdm(fixed_size_experience_split(experience_0, 1)):
x, y, _ = load_all_data(exp.dataset)
x, y = x.to(device), torch.tensor([y]).to(device)
optimizer.zero_grad()
pred = model(x)
loss = criterion(pred, y)
loss.backward()
optimizer.step()
stats = pstats.Stats(pr)
stats.sort_stats("tottime").print_stats(15)
##################################################
# Online strategy using Avalanche
##################################################
def profile_online_avl(
benchmark, device, strategy="naive", use_interactive_logger: bool = True
):
"""
Online strategy using Avalanche.
"""
print("=" * 30)
print(f"Profiling online {strategy} strategy using Avalanche ...")
experience_0 = benchmark.train_stream[0]
# Create list of loggers to be used
loggers = []
if use_interactive_logger:
interactive_logger = InteractiveLogger()
loggers.append(interactive_logger)
# Evaluation plugin
eval_plugin = EvaluationPlugin(
loss_metrics(minibatch=True, epoch=True, experience=True, stream=True),
loggers=loggers,
)
with cProfile.Profile() as pr:
# Model
model = SimpleMLP(num_classes=10)
plugins = []
if strategy == "er":
# CREATE THE STRATEGY INSTANCE (ONLINE-REPLAY)
storage_policy = ReservoirSamplingBuffer(max_size=100)
replay_plugin = ReplayPlugin(
mem_size=100, batch_size=1, storage_policy=storage_policy
)
plugins.append(replay_plugin)
# Create OnlineNaive strategy
cl_strategy = OnlineNaive(
model,
torch.optim.SGD(model.parameters(), lr=0.01),
CrossEntropyLoss(),
train_passes=1,
train_mb_size=1,
device=device,
evaluator=eval_plugin,
plugins=plugins,
)
online_cl_scenario = OnlineCLScenario(benchmark.streams.values(), experience_0)
# Train on the first experience only
cl_strategy.train(online_cl_scenario.train_stream)
stats = pstats.Stats(pr)
stats.sort_stats("tottime").print_stats(40)
def main(args):
# Compute device
device = "cuda" if args.cuda >= 0 and torch.cuda.is_available() else "cpu"
print("Using ", device)
# Benchmark
benchmark = SplitMNIST(
n_experiences=5,
dataset_root=expanduser("~") + "/.avalanche/data/mnist/",
)
# Profilers:
METHOD_NAME(benchmark, device)
profile_online_naive_lazy_stream(benchmark, device)
profile_online_avl(benchmark, device, strategy="naive")
profile_online_avl(benchmark, device, strategy="er")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--cuda",
type=int,
default=0,
help="Select zero-indexed cuda device. -1 to use CPU.",
)
args = parser.parse_args()
main(args) | null |
5,738 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2018, 2020 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""A module for common maintenance scripts."""
from __future__ import absolute_import, print_function
from flask import current_app
from invenio_db import db
from cds.modules.deposit.api import deposit_video_resolver
from cds.modules.records.api import CDSVideosFilesIterator
from cds.modules.records.resolver import record_resolver
from ..flows.api import FlowService
from ..flows.deposit import index_deposit_project
from ..flows.models import FlowMetadata
from ..flows.tasks import TranscodeVideoTask
from ..opencast.utils import can_be_transcoded
id_types = ["recid", "depid"]
def METHOD_NAME(id_type, id_value):
"""Create all missing subformats."""
_validate(id_type=id_type)
depid, video_deposit = _resolve_deposit(id_type, id_value)
master, w, h = _get_master_video(video_deposit)
subformats = CDSVideosFilesIterator.get_video_subformats(master)
dones = [subformat["tags"]["preset_quality"] for subformat in subformats]
missing = set(current_app.config["CDS_OPENCAST_QUALITIES"].keys()) - set(
dones
)
transcodables_qualities = list(
filter(
lambda q: can_be_transcoded(q, w, h),
missing,
)
)
flow_metadata = FlowMetadata.get_by_deposit(depid)
assert flow_metadata, "Cannot find Flow for given deposit id {0}".format(
depid
)
if transcodables_qualities:
_run_transcoding_for(flow_metadata, transcodables_qualities)
return transcodables_qualities
def create_subformat(id_type, id_value, quality):
"""Recreate a given subformat."""
_validate(id_type=id_type, quality=quality)
depid, video_deposit = _resolve_deposit(id_type, id_value)
master, w, h = _get_master_video(video_deposit)
subformat = can_be_transcoded(quality, w, h)
if subformat:
flow_metadata = FlowMetadata.get_by_deposit(depid)
assert (
flow_metadata
), "Cannot find Flow for given deposit id {0}".format(depid)
_run_transcoding_for(flow_metadata, [quality])
return subformat["preset_quality"] if subformat else None
def create_all_subformats(id_type, id_value):
"""Recreate all subformats."""
_validate(id_type=id_type)
depid, video_deposit = _resolve_deposit(id_type, id_value)
master, w, h = _get_master_video(video_deposit)
transcodables_qualities = list(
filter(
lambda q: can_be_transcoded(q, w, h),
current_app.config["CDS_OPENCAST_QUALITIES"].keys(),
)
)
flow_metadata = FlowMetadata.get_by_deposit(depid)
assert flow_metadata, "Cannot find Flow for given deposit id {0}".format(
depid
)
_run_transcoding_for(flow_metadata, transcodables_qualities)
return transcodables_qualities
def _run_transcoding_for(flow_metadata, qualities=None):
"""Run transcoding for the given qualities."""
payload = flow_metadata.payload
payload = dict(
deposit_id=payload["deposit_id"],
flow_id=payload["flow_id"],
bucket_id=payload["bucket_id"],
key=payload["key"],
version_id=payload["version_id"],
)
TranscodeVideoTask.create_flow_tasks(payload, qualities=qualities)
db.session.commit()
TranscodeVideoTask().s(**payload).apply_async()
db.session.commit()
index_deposit_project(payload["deposit_id"])
def _resolve_deposit(id_type, id_value):
"""Return the deposit video."""
depid = id_value
if id_type == "recid":
_, record = record_resolver.resolve(id_value)
depid = record["_deposit"]["id"]
return depid, deposit_video_resolver(depid)
def _get_master_video(video_deposit):
"""Return master video."""
master = CDSVideosFilesIterator.get_master_video_file(video_deposit)
if not master:
raise Exception("No master video found for the given record")
return (
master,
int(master["tags"]["width"]),
int(master["tags"]["height"]),
)
def _validate(id_type=None, quality=None):
"""Validate input parameters."""
if id_type not in id_types:
raise Exception("`id_type` param must be one of {0}".format(id_types))
all_possible_qualities = current_app.config[
"CDS_OPENCAST_QUALITIES"
].keys()
if quality and quality not in all_possible_qualities:
raise Exception(
"`quality` param must be one of {0}".format(all_possible_qualities)
) | null |
5,739 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantid.simpleapi import CreateSampleWorkspace, CreatePeaksWorkspace, CreateWorkspace, SetSampleMaterial, SetUB
from mantid.geometry import CrystalStructure, CSGObject, OrientedLattice
from mantid.api import Sample
from numpy import pi
import copy
class SampleTest(unittest.TestCase):
def test_lattice_accessors(self):
instrument_ws = CreateSampleWorkspace()
peaks = CreatePeaksWorkspace(instrument_ws, 0)
SetUB(peaks, 1, 1, 1, 90, 90, 90)
sample = peaks.sample()
self.assertTrue(sample.hasOrientedLattice())
self.assertTrue(isinstance(sample.getOrientedLattice(), OrientedLattice))
sample.clearOrientedLattice()
self.assertFalse(sample.hasOrientedLattice())
def test_geometry_getters_and_setters(self):
sample = Sample()
sample.setThickness(12.5)
self.assertEqual(sample.getThickness(), 12.5)
sample.setHeight(10.2)
self.assertEqual(sample.getHeight(), 10.2)
sample.setWidth(5.9)
self.assertEqual(sample.getWidth(), 5.9)
def test_crystal_structure_handling(self):
sample = Sample()
self.assertEqual(sample.hasCrystalStructure(), False)
self.assertRaises(RuntimeError, sample.getCrystalStructure)
cs = CrystalStructure("5.43 5.43 5.43", "F d -3 m", "Si 0 0 0 1.0 0.01")
sample.setCrystalStructure(cs)
self.assertEqual(sample.hasCrystalStructure(), True)
cs_from_sample = sample.getCrystalStructure()
self.assertEqual(cs.getSpaceGroup().getHMSymbol(), cs_from_sample.getSpaceGroup().getHMSymbol())
self.assertEqual(cs.getUnitCell().a(), cs_from_sample.getUnitCell().a())
self.assertEqual(len(cs.getScatterers()), len(cs_from_sample.getScatterers()))
self.assertEqual(cs.getScatterers()[0], cs_from_sample.getScatterers()[0])
sample.clearCrystalStructure()
self.assertEqual(sample.hasCrystalStructure(), False)
self.assertRaises(RuntimeError, sample.getCrystalStructure)
def test_material(self):
ws = CreateWorkspace(DataX=[1], DataY=[1], StoreInADS=False)
sample = ws.sample()
SetSampleMaterial(ws, "Al2 O3", SampleMassDensity=4, StoreInADS=False)
material = sample.getMaterial()
self.assertAlmostEqual(material.numberDensity, 0.1181, places=4)
self.assertAlmostEqual(material.relativeMolecularMass(), 101.961, places=3)
atoms, numatoms = material.chemicalFormula()
self.assertEqual(len(atoms), len(numatoms))
self.assertEqual(len(atoms), 2)
self.assertEqual(numatoms[0], 2)
self.assertEqual(numatoms[1], 3)
xs0 = atoms[0].neutron()
xs1 = atoms[1].neutron()
# the correct way to calculate for coherent cross section
# is to average the scattering lengths then convert to a cross section
b_real = (xs0["coh_scatt_length_real"] * 2 + xs1["coh_scatt_length_real"] * 3) / 5
b_imag = (xs0["coh_scatt_length_img"] * 2 + xs1["coh_scatt_length_img"] * 3) / 5
xs = 0.04 * pi * (b_real * b_real + b_imag * b_imag)
self.assertAlmostEqual(material.cohScatterXSection(), xs, places=4)
def METHOD_NAME(self):
sample = Sample()
self.assertEqual(type(sample.getShape()), CSGObject)
def test_get_shape_xml(self):
sample = Sample()
shape = sample.getShape()
xml = shape.getShapeXML()
self.assertEqual(type(xml), str)
def do_test_copyable(self, copy_op):
original = Sample()
width = 1.0
height = 2.0
thickness = 3.0
original.setThickness(thickness)
original.setHeight(height)
original.setWidth(width)
# make copy
cp = copy_op(original)
# Check identity different
self.assertNotEqual(id(original), id(cp))
# Simple tests that cp is equal to original
self.assertEqual(original.getHeight(), cp.getHeight())
self.assertEqual(original.getWidth(), cp.getWidth())
self.assertEqual(original.getThickness(), cp.getThickness())
# Check really did succeed and is not tied in any way to original
del original
self.assertTrue(id(cp) > 0)
self.assertEqual(height, cp.getHeight())
self.assertEqual(width, cp.getWidth())
self.assertEqual(thickness, cp.getThickness())
def test_shallow_copyable(self):
self.do_test_copyable(copy.copy)
def test_deep_copyable(self):
self.do_test_copyable(copy.deepcopy)
def test_equals(self):
a = Sample()
b = Sample()
self.assertEqual(a, b)
b.setThickness(10)
self.assertNotEqual(a, b)
if __name__ == "__main__":
unittest.main() | null |
5,740 | from decimal import Decimal
import decimal
import math
from contracting.stdlib.bridge.decimal import ContractingDecimal, fix_precision, should_round, MAX_DECIMAL, neg_sci_not
from unittest import TestCase
class TestDecimal(TestCase):
def test_init(self):
ContractingDecimal('1.1')
def test_init_float(self):
ContractingDecimal(1.2)
def test_init_int(self):
ContractingDecimal(1)
def test_bool_true(self):
self.assertTrue(ContractingDecimal(1))
def test_bool_false(self):
self.assertFalse(ContractingDecimal(0))
def test_eq_whole_numbers(self):
self.assertEqual(ContractingDecimal(1), ContractingDecimal(1))
def test_eq_floats(self):
self.assertEqual(ContractingDecimal(1.234), ContractingDecimal(1.234))
def test_lt(self):
self.assertLess(ContractingDecimal(1), ContractingDecimal(2))
self.assertLess(ContractingDecimal(1.1), ContractingDecimal(2.2))
def test_lte(self):
self.assertLessEqual(ContractingDecimal(1), ContractingDecimal(2))
self.assertLessEqual(ContractingDecimal(1.1), ContractingDecimal(2.2))
self.assertLessEqual(ContractingDecimal(2.2), ContractingDecimal(2.2))
def test_gt(self):
self.assertGreater(ContractingDecimal(10), ContractingDecimal(2))
self.assertGreater(ContractingDecimal(10.1), ContractingDecimal(2.2))
def test_gte(self):
self.assertGreaterEqual(ContractingDecimal(10), ContractingDecimal(2))
self.assertGreaterEqual(ContractingDecimal(10.1), ContractingDecimal(2.2))
self.assertGreaterEqual(ContractingDecimal(2.2), ContractingDecimal(2.2))
def test_str(self):
f = ContractingDecimal(1.23445)
self.assertEqual(str(f), '1.23445')
def test_neg(self):
self.assertEqual(-ContractingDecimal(1), ContractingDecimal(-1))
def test_pos(self):
self.assertEqual(+ContractingDecimal(1), ContractingDecimal(1))
def test_other_equality(self):
self.assertEqual(ContractingDecimal(1), 1)
self.assertEqual(ContractingDecimal(1), 1.0)
def test_abs(self):
self.assertEqual(abs(ContractingDecimal(1)), 1)
self.assertEqual(abs(ContractingDecimal(-1)), 1)
def test_add(self):
self.assertEqual(ContractingDecimal(1) + ContractingDecimal(1), 2)
self.assertEqual(ContractingDecimal(1) + ContractingDecimal(10), 11)
self.assertEqual(ContractingDecimal(1.23456) + ContractingDecimal(6.54321), ContractingDecimal(7.77777))
def test_arbitrarily_large_number(self):
a = ContractingDecimal('38327950288419716939937510.582097494459230781640628620899')
b = ContractingDecimal('67523846748184676694051320.005681271452635608277857713427')
c = ContractingDecimal('105851797036604393633988830.587778765911866389918486334326')
self.assertEqual(a + b, c)
def test_zero_equality(self):
self.assertEqual(ContractingDecimal(0), 0)
def test_sub(self):
self.assertEqual(ContractingDecimal(1) - ContractingDecimal(1), 0)
self.assertEqual(ContractingDecimal(1) - ContractingDecimal(10), -9)
self.assertEqual(ContractingDecimal(1.23456) - ContractingDecimal(6.54321), ContractingDecimal(-5.30865))
def test_add_negs(self):
self.assertEqual(ContractingDecimal(1) + ContractingDecimal(-1), 0)
def test_radd(self):
self.assertEqual(1 + ContractingDecimal(1), 2)
self.assertEqual(1 + ContractingDecimal(10), 11)
self.assertEqual(1.23456 + ContractingDecimal(6.54321), ContractingDecimal(7.77777))
def test_rsub(self):
self.assertEqual(1 - ContractingDecimal(1), 0)
self.assertEqual(1 - ContractingDecimal(10), -9)
self.assertEqual(1.23456 - ContractingDecimal(6.54321), ContractingDecimal(-5.30865))
def test_mul(self):
self.assertEqual(ContractingDecimal(5) * ContractingDecimal(42), 210)
self.assertEqual(ContractingDecimal(0) * ContractingDecimal(100), 0)
self.assertEqual(ContractingDecimal(-5) * ContractingDecimal(42), -210)
self.assertEqual(ContractingDecimal(5.1234) * ContractingDecimal(2.3451), ContractingDecimal('12.01488534'))
def test_rmul(self):
self.assertEqual(5 * ContractingDecimal(42), 210)
self.assertEqual(0 * ContractingDecimal(100), 0)
self.assertEqual(-5 * ContractingDecimal(42), -210)
self.assertEqual(5.1234 * ContractingDecimal(2.3451), ContractingDecimal('12.01488534'))
def test_div(self):
self.assertEqual((ContractingDecimal(1) / ContractingDecimal(3)), ContractingDecimal('0.333333333333333333333333333333'))
self.assertEqual(ContractingDecimal(3) / ContractingDecimal(1), 3)
def test_div_large_decimals(self):
a = '0.78164062862089986280348253421170'
b = '0.53642401735797937714409102114816'
c = ContractingDecimal(a) / ContractingDecimal(b)
print(c)
def test_should_round_false_for_lower_number(self):
d = Decimal('1.12345678901234567890123456789')
self.assertFalse(should_round(d))
def test_should_round_true_for_too_lower_number(self):
d = Decimal('1.123456789012345678901234567890123')
self.assertTrue(should_round(d))
def test_fix_precision_cuts_too_low(self):
d = Decimal('1.123456789012345678901234567890123')
e = Decimal('1.12345678901234567890123456789')
self.assertEqual(fix_precision(d), e)
def METHOD_NAME(self):
e = Decimal('123456789012345678901234567890')
self.assertEqual(fix_precision(e), MAX_DECIMAL)
def test_fix_precision_doesnt_cut_high(self):
e = Decimal('12345678901234567890123456789')
self.assertEqual(fix_precision(e), e)
def test_fix_precision_cuts_all_decimals_if_too_high(self):
e = Decimal('123456789012345678901234567890.123456')
self.assertEqual(fix_precision(e), MAX_DECIMAL)
def test_fix_precision_cuts_decimals_if_high_but_not_too_high(self):
e = Decimal('12345678901234567890123456789.123456789012345678901234567890')
f = Decimal('12345678901234567890123456789.12345678901234567890123456789')
self.assertEqual(fix_precision(e), f)
def test_contracting_decimal_can_round(self):
s = '12345678901234567890123456789.123456789012345678901234567890'
self.assertEqual(round(Decimal(s), 10), round(ContractingDecimal(s), 10))
def test_sci_not_whole_number(self):
s = '2e-5'
expected = '0.00002'
self.assertEqual(neg_sci_not(s), expected)
def test_sci_not_decimal(self):
s = '2.2e-7'
expected = '0.00000022'
self.assertEqual(neg_sci_not(s), expected)
def test_sci_not_e0(self):
s = '2e-0'
expected = '2'
self.assertEqual(neg_sci_not(s), expected)
def test_sci_not_extra_precision(self):
s = '20e-5'
expected = '20e-5'
self.assertEqual(neg_sci_not(s), expected | null |
5,741 | import sqlparse
from collections import namedtuple
from sqlparse.sql import IdentifierList, Identifier, Function
from sqlparse.tokens import Keyword, DML, Punctuation
TableReference = namedtuple(
"TableReference", ["schema", "name", "alias", "is_function"]
)
TableReference.ref = property(
lambda self: self.alias or (
self.name
if self.name.islower() or self.name[0] == '"'
else '"' + self.name + '"'
)
)
# This code is borrowed from sqlparse example script.
# <url>
def is_subselect(parsed):
if not parsed.is_group:
return False
for item in parsed.tokens:
if item.ttype is DML and item.value.upper() in (
"SELECT",
"INSERT",
"UPDATE",
"CREATE",
"DELETE",
):
return True
return False
def _identifier_is_function(identifier):
return any(isinstance(t, Function) for t in identifier.tokens)
def extract_from_part(parsed, stop_at_punctuation=True):
tbl_prefix_seen = False
for item in parsed.tokens:
if tbl_prefix_seen:
if is_subselect(item):
yield from extract_from_part(item, stop_at_punctuation)
elif stop_at_punctuation and item.ttype is Punctuation:
return
# An incomplete nested select won't be recognized correctly as a
# sub-select. eg: 'SELECT * FROM (SELECT id FROM user'. This causes
# the second FROM to trigger this elif condition resulting in a
# `return`. So we need to ignore the keyword if the keyword
# FROM.
# Also 'SELECT * FROM abc JOIN def' will trigger this elif
# condition. So we need to ignore the keyword JOIN and its variants
# INNER JOIN, FULL OUTER JOIN, etc.
elif (
item.ttype is Keyword and
item.value.upper() != "FROM" and
(not item.value.upper().endswith("JOIN"))
):
tbl_prefix_seen = False
else:
yield item
elif item.ttype is Keyword or item.ttype is Keyword.DML:
item_val = item.value.upper()
if item_val in (
"COPY",
"FROM",
"INTO",
"UPDATE",
"TABLE",
) or item_val.endswith("JOIN"):
tbl_prefix_seen = True
# 'SELECT a, FROM abc' will detect FROM as part of the column list.
# So this check here is necessary.
elif isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
if identifier.ttype is Keyword and \
identifier.value.upper() == "FROM":
tbl_prefix_seen = True
break
def extract_table_identifiers(token_stream, allow_functions=True):
"""yields tuples of TableReference namedtuples"""
# We need to do some massaging of the names because postgres is case-
# insensitive and '"Foo"' is not the same table as 'Foo' (while 'foo' is)
def METHOD_NAME(item):
name = item.get_real_name()
schema_name = item.get_parent_name()
alias = item.get_alias()
if not name:
schema_name = None
name = item.get_name()
alias = alias or name
schema_quoted = schema_name and item.value[0] == '"'
if schema_name and not schema_quoted:
schema_name = schema_name.lower()
quote_count = item.value.count('"')
name_quoted = quote_count > 2 or (quote_count and not schema_quoted)
alias_quoted = alias and item.value[-1] == '"'
if alias_quoted or name_quoted and not alias and name.islower():
alias = '"' + (alias or name) + '"'
if name and not name_quoted and not name.islower():
if not alias:
alias = name
name = name.lower()
return schema_name, name, alias
try:
for item in token_stream:
if isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
# Sometimes Keywords (such as FROM ) are classified as
# identifiers which don't have the get_real_name() method.
try:
schema_name = identifier.get_parent_name()
real_name = identifier.get_real_name()
is_function = allow_functions and \
_identifier_is_function(identifier)
except AttributeError:
continue
if real_name:
yield TableReference(
schema_name, real_name, identifier.get_alias(),
is_function
)
elif isinstance(item, Identifier):
schema_name, real_name, alias = METHOD_NAME(item)
is_function = allow_functions and _identifier_is_function(item)
yield TableReference(schema_name, real_name, alias,
is_function)
elif isinstance(item, Function):
schema_name, real_name, alias = METHOD_NAME(item)
yield TableReference(None, real_name, alias, allow_functions)
except StopIteration:
return
# extract_tables is inspired from examples in the sqlparse lib.
def extract_tables(sql):
"""Extract the table names from an SQL statment.
Returns a list of TableReference namedtuples
"""
parsed = sqlparse.parse(sql)
if not parsed:
return ()
# INSERT statements must stop looking for tables at the sign of first
# Punctuation. eg: INSERT INTO abc (col1, col2) VALUES (1, 2)
# abc is the table name, but if we don't stop at the first lparen, then
# we'll identify abc, col1 and col2 as table names.
insert_stmt = parsed[0].token_first().value.lower() == "insert"
stream = extract_from_part(parsed[0], stop_at_punctuation=insert_stmt)
# Kludge: sqlparse mistakenly identifies insert statements as
# function calls due to the parenthesized column list, e.g. interprets
# "insert into foo (bar, baz)" as a function call to foo with arguments
# (bar, baz). So don't allow any identifiers in insert statements
# to have is_function=True
identifiers = extract_table_identifiers(stream,
allow_functions=not insert_stmt)
# In the case 'sche.<cursor>', we get an empty TableReference; remove that
return tuple(i for i in identifiers if i.name) | null |
5,742 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
resnet50 example
"""
import numpy as np
import mindspore.nn as nn # pylint: disable=C0414
from mindspore import Tensor
from mindspore.common.api import _cell_graph_executor
from mindspore.ops.operations import Add
from ...train_step_wrap import train_step_with_loss_warp
def conv3x3(in_channels, out_channels, stride=1, padding=1, pad_mode='pad'):
"""3x3 convolution """
return nn.Conv2d(in_channels, out_channels,
kernel_size=3, stride=stride, padding=padding, pad_mode=pad_mode)
def conv1x1(in_channels, out_channels, stride=1, padding=0, pad_mode='pad'):
"""1x1 convolution"""
return nn.Conv2d(in_channels, out_channels,
kernel_size=1, stride=stride, padding=padding, pad_mode=pad_mode)
class ResidualBlock(nn.Cell):
"""
residual Block
"""
expansion = 4
def __init__(self,
in_channels,
out_channels,
stride=1,
down_sample=False):
super(ResidualBlock, self).__init__()
out_chls = out_channels // self.expansion
self.conv1 = conv1x1(in_channels, out_chls, stride=1, padding=0)
self.bn1 = nn.BatchNorm2d(out_chls)
self.conv2 = conv3x3(out_chls, out_chls, stride=stride, padding=1)
self.bn2 = nn.BatchNorm2d(out_chls)
self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)
self.bn3 = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
self.downsample = down_sample
self.conv_down_sample = conv1x1(in_channels, out_channels,
stride=stride, padding=0)
self.bn_down_sample = nn.BatchNorm2d(out_channels)
self.add = Add()
def construct(self, x):
"""
:param x:
:return:
"""
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample:
identity = self.conv_down_sample(identity)
identity = self.bn_down_sample(identity)
out = self.add(out, identity)
out = self.relu(out)
return out
class ResNet18(nn.Cell):
"""
resnet nn.Cell
"""
def __init__(self, block, num_classes=100):
super(ResNet18, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, pad_mode='pad')
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode='same')
self.layer1 = self.MakeLayer(
block, 2, in_channels=64, out_channels=256, stride=1)
self.layer2 = self.MakeLayer(
block, 2, in_channels=256, out_channels=512, stride=2)
self.layer3 = self.MakeLayer(
block, 2, in_channels=512, out_channels=1024, stride=2)
self.layer4 = self.MakeLayer(
block, 2, in_channels=1024, out_channels=2048, stride=2)
self.avgpool = nn.AvgPool2d(7, 1)
self.flatten = nn.Flatten()
self.fc = nn.Dense(512 * block.expansion, num_classes)
def MakeLayer(self, block, layer_num, in_channels, out_channels, stride):
"""
make block layer
:param block:
:param layer_num:
:param in_channels:
:param out_channels:
:param stride:
:return:
"""
layers = []
resblk = block(in_channels, out_channels,
stride=stride, down_sample=True)
layers.append(resblk)
for _ in range(1, layer_num):
resblk = block(out_channels, out_channels, stride=1)
layers.append(resblk)
return nn.SequentialCell(layers)
def construct(self, x):
"""
:param x:
:return:
"""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = self.flatten(x)
x = self.fc(x)
return x
class ResNet9(nn.Cell):
"""
resnet nn.Cell
"""
def __init__(self, block, num_classes=100):
super(ResNet9, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2)
self.layer1 = self.MakeLayer(
block, 1, in_channels=64, out_channels=256, stride=1)
self.layer2 = self.MakeLayer(
block, 1, in_channels=256, out_channels=512, stride=2)
self.layer3 = self.MakeLayer(
block, 1, in_channels=512, out_channels=1024, stride=2)
self.layer4 = self.MakeLayer(
block, 1, in_channels=1024, out_channels=2048, stride=2)
self.avgpool = nn.AvgPool2d(7, 1)
self.flatten = nn.Flatten()
self.fc = nn.Dense(512 * block.expansion, num_classes)
def MakeLayer(self, block, layer_num, in_channels, out_channels, stride):
"""
make block layer
:param block:
:param layer_num:
:param in_channels:
:param out_channels:
:param stride:
:return:
"""
layers = []
resblk = block(in_channels, out_channels,
stride=stride, down_sample=True)
layers.append(resblk)
for _ in range(1, layer_num):
resblk = block(out_channels, out_channels, stride=1)
layers.append(resblk)
return nn.SequentialCell(layers)
def construct(self, x):
"""
:param x:
:return:
"""
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = self.flatten(x)
x = self.fc(x)
return x
def resnet18():
return ResNet18(ResidualBlock, 10)
def METHOD_NAME():
return ResNet9(ResidualBlock, 10)
def test_compile():
net = resnet18()
input_data = Tensor(np.ones([1, 3, 224, 224]))
_cell_graph_executor.compile(net, input_data)
def test_train_step():
net = train_step_with_loss_warp(METHOD_NAME())
input_data = Tensor(np.ones([1, 3, 224, 224]))
label = Tensor(np.zeros([1, 10]))
_cell_graph_executor.compile(net, input_data, label)
def test_train_step_training():
net = train_step_with_loss_warp(METHOD_NAME())
input_data = Tensor(np.ones([1, 3, 224, 224]))
label = Tensor(np.zeros([1, 10]))
net.set_train()
_cell_graph_executor.compile(net, input_data, label) | null |
5,743 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
from mindspore import Tensor
from mindspore.ops import operations as P
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
def atanh(x):
return 0.5 * np.log((1. + x) / (1. - x))
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def METHOD_NAME():
"""
Feature: Gpu Atanh kernel.
Description: Double dtype input.
Expectation: success.
"""
x_np = np.array([[-0.16595599, 0.44064897, -0.99977124, -0.39533487],
[-0.7064882, -0.8153228, -0.62747955, -0.30887854],
[-0.20646505, 0.07763347, -0.16161098, 0.370439]]).astype(np.float64)
output_ms = P.Atanh()(Tensor(x_np))
expect = atanh(x_np)
assert np.allclose(output_ms.asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_atanh_fp32():
"""
Feature: Gpu Atanh kernel.
Description: Float32 dtype input.
Expectation: success.
"""
x_np = np.array([[-0.16595599, 0.44064897, -0.99977124, -0.39533487],
[-0.7064882, -0.8153228, -0.62747955, -0.30887854],
[-0.20646505, 0.07763347, -0.16161098, 0.370439]]).astype(np.float32)
output_ms = P.Atanh()(Tensor(x_np))
expect = atanh(x_np)
assert np.allclose(output_ms.asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_atanh_fp16():
"""
Feature: Gpu Atanh kernel.
Description: Float16 dtype input.
Expectation: success.
"""
x_np = np.array([[-0.16595599, 0.44064897, -0.99977124, -0.39533487],
[-0.7064882, -0.8153228, -0.62747955, -0.30887854],
[-0.20646505, 0.07763347, -0.16161098, 0.370439]]).astype(np.float16)
output_ms = P.Atanh()(Tensor(x_np))
expect = atanh(x_np)
assert np.allclose(output_ms.asnumpy(), expect, 1e-3, 1e-3)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_atanh_complex64():
"""
Feature: Gpu Atanh kernel.
Description: Complex64 dtype input.
Expectation: success.
"""
x_np = np.array([[2+3j, 4+5j, 6-7j, 8+9j],
[1+3j, 2+5j, 5-7j, 7+9j],
[3+3j, 4+5j, 4-7j, 6+9j]]).astype(np.complex64)
output_ms = P.Atanh()(Tensor(x_np))
expect = atanh(x_np)
assert np.allclose(output_ms.asnumpy(), expect)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_atanh_complex128():
"""
Feature: Gpu Atanh kernel.
Description: Complex128 dtype input.
Expectation: success.
"""
x_np = np.array([[2+3j, 4+5j, 6-7j, 8+9j],
[1+3j, 2+5j, 5-7j, 7+9j],
[3+3j, 4+5j, 4-7j, 6+9j]]).astype(np.complex128)
output_ms = P.Atanh()(Tensor(x_np))
expect = atanh(x_np)
assert np.allclose(output_ms.asnumpy(), expect)
def test_atanh_forward_tensor_api(nptype):
"""
Feature: test atanh forward tensor api for given input dtype.
Description: test inputs for given input dtype.
Expectation: the result match with expected result.
"""
x = Tensor(np.array([0, -0.5]).astype(nptype))
output = x.atanh()
expected = np.array([0.0, -0.54930615]).astype(nptype)
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_atanh_forward_float32_tensor_api():
"""
Feature: test atanh forward tensor api.
Description: test float32 inputs.
Expectation: the result match with expected result.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
test_atanh_forward_tensor_api(np.float32)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
test_atanh_forward_tensor_api(np.float32) | null |
5,744 | # SPDX-License-Identifier: GPL-2.0-or-later
#
# Copyright (C) 2010-2016 Red Hat, Inc.
#
# Authors:
# Thomas Woerner <[email protected]>
"""Functions for NetworkManager interaction"""
import gi
from gi.repository import GLib
try:
gi.require_version('NM', '1.0')
except ValueError:
_nm_imported = False
else:
try:
from gi.repository import NM
_nm_imported = True
except (ImportError, ValueError, GLib.Error):
_nm_imported = False
_nm_client = None
from firewall import errors
from firewall.errors import FirewallError
from firewall.core.logger import log
import dbus
def check_nm_imported():
"""Check function to raise a MISSING_IMPORT error if the import of NM failed
"""
if not _nm_imported:
raise FirewallError(errors.MISSING_IMPORT, "gi.repository.NM = 1.0")
def METHOD_NAME():
"""Returns true if NM has been properly imported
@return True if import was successful, False otherwirse
"""
return _nm_imported
def nm_get_client():
"""Returns the NM client object or None if the import of NM failed
@return NM.Client instance if import was successful, None otherwise
"""
global _nm_client
if not _nm_client:
_nm_client = NM.Client.new(None)
return _nm_client
def nm_get_zone_of_connection(connection):
"""Get zone of connection from NM
@param connection name
@return zone string setting of connection, empty string if not set, None if connection is unknown
"""
check_nm_imported()
con = nm_get_client().get_connection_by_uuid(connection)
if con is None:
return None
setting_con = con.get_setting_connection()
if setting_con is None:
return None
try:
if con.get_flags() & (NM.SettingsConnectionFlags.NM_GENERATED
| NM.SettingsConnectionFlags.NM_VOLATILE):
return ""
except AttributeError:
# Prior to NetworkManager 1.12, we can only guess
# that a connection was generated/volatile.
if con.get_unsaved():
return ""
zone = setting_con.get_zone()
if zone is None:
zone = ""
return zone
def nm_set_zone_of_connection(zone, connection):
"""Set the zone for a connection
@param zone name
@param connection name
@return True if zone was set, else False
"""
check_nm_imported()
con = nm_get_client().get_connection_by_uuid(connection)
if con is None:
return False
setting_con = con.get_setting_connection()
if setting_con is None:
return False
if zone == "":
zone = None
setting_con.set_property("zone", zone)
return con.commit_changes(True, None)
def nm_get_connections(connections, connections_name):
"""Get active connections from NM
@param connections return dict
@param connections_name return dict
"""
connections.clear()
connections_name.clear()
check_nm_imported()
active_connections = nm_get_client().get_active_connections()
for active_con in active_connections:
# ignore vpn devices for now
if active_con.get_vpn():
continue
name = active_con.get_id()
uuid = active_con.get_uuid()
devices = active_con.get_devices()
connections_name[uuid] = name
for dev in devices:
ip_iface = dev.get_ip_iface()
if ip_iface:
connections[ip_iface] = uuid
def nm_get_interfaces():
"""Get active interfaces from NM
@returns list of interface names
"""
check_nm_imported()
active_interfaces = []
for active_con in nm_get_client().get_active_connections():
# ignore vpn devices for now
if active_con.get_vpn():
continue
try:
con = active_con.get_connection()
if con.get_flags() & (NM.SettingsConnectionFlags.NM_GENERATED
| NM.SettingsConnectionFlags.NM_VOLATILE):
continue
except AttributeError:
# Prior to NetworkManager 1.12, we can only guess
# that a connection was generated/volatile.
if con.get_unsaved():
continue
for dev in active_con.get_devices():
ip_iface = dev.get_ip_iface()
if ip_iface:
active_interfaces.append(ip_iface)
return active_interfaces
def nm_get_interfaces_in_zone(zone):
interfaces = []
for interface in nm_get_interfaces():
conn = nm_get_connection_of_interface(interface)
if zone == nm_get_zone_of_connection(conn):
interfaces.append(interface)
return interfaces
def nm_get_device_by_ip_iface(interface):
"""Get device from NM which has the given IP interface
@param interface name
@returns NM.Device instance or None
"""
check_nm_imported()
for device in nm_get_client().get_devices():
ip_iface = device.get_ip_iface()
if ip_iface is None:
continue
if ip_iface == interface:
return device
return None
def nm_get_connection_of_interface(interface):
"""Get connection from NM that is using the interface
@param interface name
@returns connection that is using interface or None
"""
check_nm_imported()
device = nm_get_device_by_ip_iface(interface)
if device is None:
return None
active_con = device.get_active_connection()
if active_con is None:
return None
try:
con = active_con.get_connection()
if con.get_flags() & NM.SettingsConnectionFlags.NM_GENERATED:
return None
except AttributeError:
# Prior to NetworkManager 1.12, we can only guess
# that a connection was generated.
if con.get_unsaved():
return None
return active_con.get_uuid()
def nm_get_bus_name():
if not _nm_imported:
return None
try:
bus = dbus.SystemBus()
obj = bus.get_object(NM.DBUS_INTERFACE, NM.DBUS_PATH)
name = obj.bus_name
del obj, bus
return name
except Exception:
log.debug2("Failed to get bus name of NetworkManager")
return None
def nm_get_dbus_interface():
if not _nm_imported:
return ""
return NM.DBUS_INTERFACE | null |
5,745 | """
Implements an object to handle manual object extraction.
.. include:: ../include/links.rst
"""
import inspect
from IPython import embed
import numpy as np
from pypeit import datamodel, msgs
class ManualExtractionObj(datamodel.DataContainer):
"""
A data container holding the arguments for how to perform the
manual extraction of a spectrum.
A list of these objects is generated in pypeit.py
to perform a set of user-defined extractions.
For an example of how to define a series of manual extractions in
the pypeit input file, see :ref:`pypeit_file`.
The datamodel attributes are:
.. include:: ../include/class_datamodel_manualextractionobj.rst
Args:
frame (:obj:`str`):
The name of the fits file for a manual extraction
spat (`numpy.ndarray`_): Array of spatial positions to hand extract
spec (`numpy.ndarray`_): Array of spectral positions to hand extract
det (`numpy.ndarray`_): Array of detectors for hand extraction.
This must be a aligned with spec and spat .
The values can be negative (for negative images)
fwhm (`numpy.ndarray`_): Array of FWHM for hand extraction.
This must be aligned with spec and spat.
boxcar_rad (`numpy.ndarray`_, optional): Array of boxcar_radii for hand extraction.
This must be aligned with spec and spat.
It is to be in *pixels*, not arcsec.
This is only intended for multi-slit reductions (not Echelle)
"""
version = '1.1.0'
datamodel = {
'frame': dict(otype=str,
descr='The name of the fits file for a manual extraction'),
'detname': dict(otype=np.ndarray, atype=str,
descr='detectors name for hand extraction.'),
'spec': dict(otype=np.ndarray, atype=np.floating,
descr='spectral positions to hand extract'),
'spat': dict(otype=np.ndarray, atype=np.floating,
descr='spatial positions to hand extract'),
'fwhm': dict(otype=np.ndarray, atype=np.floating,
descr='FWHMs for hand extractions'),
'neg': dict(otype=np.ndarray, atype=np.bool_,
descr='Flags indicating which hand extract is a negative trace'),
'boxcar_rad': dict(otype=np.ndarray, atype=np.floating,
descr='Boxcar radius for hand extractions (optional)'),
}
@classmethod
def by_fitstbl_input(cls, frame: str, inp: str, spectrograph):
"""Generate the object from an entry in the fitstbl
Args:
frame (str):
filename
inp (str):
String specifying the manual aperture: ``det:spat:spec:fwhm``;
e.g., ``1:1181.8:3820.6:3.``
spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`):
The `Spectrograph` instance that sets the instrument used to
take the observations. Used to set check that the input value
of the mosaic detectors are allowed for this spectrograph
Returns:
ManualExtractionObj:
"""
# Generate a dict
idict = dict(spat=[], spec=[], detname=[], fwhm=[], neg=[], boxcar_rad=[])
m_es = inp.split(';')
for m_e in m_es:
parse = m_e.split(':')
# det_strip will be a list of a single number (no mosaic) or 2 numbers (mosaic)
det_strip = [int(d) for d in parse[0].strip('()').split(',')]
# check if it's negative object (i.e., if the det number is negative)
if np.all([item < 0 for item in det_strip]):
idict['neg'] += [True]
det_strip = [item * -1 for item in det_strip]
else:
idict['neg'] += [False]
if len(det_strip) == 2 and tuple(det_strip) in spectrograph.allowed_mosaics:
# we use detname, which is a string (e.g., 'DET01', 'MSC01')
idict['detname'] += [spectrograph.get_det_name(tuple(det_strip))]
elif len(det_strip) == 1:
idict['detname'] += [spectrograph.get_det_name(det_strip[0])]
else:
msgs.error(f'Wrong input for detectors in the manual extraction parameters: {parse[0]}')
idict['spat'] += [float(parse[1])]
idict['spec'] += [float(parse[2])]
idict['fwhm'] += [float(parse[3])]
# Boxcar?
if len(parse) >= 5:
idict['boxcar_rad'] += [float(parse[4])]
else:
idict['boxcar_rad'] += [-1.]
# Build me
return cls(frame=frame, spat=np.array(idict['spat']),
spec=np.array(idict['spec']),
fwhm=np.array(idict['fwhm']),
detname=np.array(idict['detname']),
neg=np.array(idict['neg']),
boxcar_rad=np.array(idict['boxcar_rad']))
def __init__(self, frame=None, spat=None, spec=None, detname=None,
fwhm=None, neg=None, boxcar_rad=None):
# Parse
args, _, _, values = inspect.getargvalues(inspect.currentframe())
d = dict([(k,values[k]) for k in args[1:]])
# Setup the DataContainer
datamodel.DataContainer.__init__(self, d=d)
def METHOD_NAME(self):
"""Validate
A couple of quick checks..
Raises:
ValueError: Raised if one of the arrays is not set or if they don't have the same length
"""
if len(self.spec) != len(self.spat):
raise ValueError("spec and spat not of the same length")
if len(self.fwhm) != len(self.detname):
raise ValueError("FWHM and not det not of the same length")
def dict_for_objfind(self, detname, neg=False):
"""
Repackage into a dict for the extraction code
Args:
det (str):
Detector name under consideration
neg (bool, optional):
If True, return the negative image requests
Returns:
dict or None: To be passed into reduce.find_objects()
"""
# Find the ones we want
if neg:
gd_det = (self.neg == True) & (self.detname == detname)
else:
gd_det = (self.neg == False) & (self.detname == detname)
# None?
if not np.any(gd_det):
return None
# Fill
manual_extract_dict = {}
for key in ['spec', 'spat', 'detname', 'fwhm', 'boxcar_rad']:
manual_extract_dict[key] = self[key][gd_det]
# Return
return manual_extract_dict
| null |
5,746 | import pytest
import torch
from kornia.filters import BoxBlur, box_blur
from kornia.testing import BaseTester, tensor_to_gradcheck_var
class TestBoxBlur(BaseTester):
@pytest.mark.parametrize('kernel_size', [5, (3, 5)])
def test_smoke(self, kernel_size, device, dtype):
inpt = torch.rand(1, 1, 10, 10, device=device, dtype=dtype)
bb = BoxBlur(kernel_size, 'reflect')
actual = bb(inpt)
assert actual.shape == (1, 1, 10, 10)
@pytest.mark.parametrize('kernel_size', [5, (3, 5)])
@pytest.mark.parametrize('batch_size', [1, 2])
def test_separable(self, batch_size, kernel_size, device, dtype):
inpt = torch.randn(batch_size, 3, 10, 10, device=device, dtype=dtype)
out1 = box_blur(inpt, kernel_size, separable=False)
out2 = box_blur(inpt, kernel_size, separable=True)
self.assert_close(out1, out2)
def METHOD_NAME(self):
inpt = torch.rand(1, 1, 3, 3)
with pytest.raises(Exception) as errinfo:
box_blur(inpt, (1,))
assert '2D Kernel size should have a length of 2.' in str(errinfo)
@pytest.mark.parametrize('kernel_size', [(3, 3), 5, (5, 7)])
@pytest.mark.parametrize('batch_size', [1, 2])
def test_cardinality(self, batch_size, kernel_size, device, dtype):
inp = torch.zeros(batch_size, 3, 4, 4, device=device, dtype=dtype)
blur = BoxBlur(kernel_size)
actual = blur(inp)
expected = (batch_size, 3, 4, 4)
assert actual.shape == expected
@pytest.mark.parametrize('batch_size', [1, 2])
def test_kernel_3x3(self, batch_size, device, dtype):
inp = torch.tensor(
[
[
[
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[2.0, 2.0, 2.0, 2.0, 2.0],
[2.0, 2.0, 2.0, 2.0, 2.0],
]
]
],
device=device,
dtype=dtype,
).repeat(batch_size, 1, 1, 1)
kernel_size = (3, 3)
actual = box_blur(inp, kernel_size)
expected = torch.tensor(35.0 * batch_size, device=device, dtype=dtype)
self.assert_close(actual.sum(), expected)
@pytest.mark.parametrize('batch_size', [None, 1, 3])
def test_kernel_5x5(self, batch_size, device, dtype):
inp = torch.tensor(
[
[
[
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0],
[2.0, 2.0, 2.0, 2.0, 2.0],
[2.0, 2.0, 2.0, 2.0, 2.0],
]
]
],
device=device,
dtype=dtype,
)
if batch_size:
inp = inp.repeat(batch_size, 1, 1, 1)
kernel_size = (5, 5)
actual = box_blur(inp, kernel_size)
expected = inp.sum((1, 2, 3)) / torch.mul(*kernel_size)
self.assert_close(actual[:, 0, 2, 2], expected)
def test_kernel_3x1(self, device, dtype):
inp = torch.arange(16, device=device, dtype=dtype).view(1, 1, 4, 4)
ky, kx = 3, 1
actual = box_blur(inp, (ky, kx))
self.assert_close(actual[0, 0, 0, 0], torch.tensor((4 + 0 + 4) / 3, device=device, dtype=dtype))
self.assert_close(actual[0, 0, 1, 0], torch.tensor((0 + 4 + 8) / 3, device=device, dtype=dtype))
@pytest.mark.parametrize('separable', [False, True])
@pytest.mark.parametrize('batch_size', [1, 2])
def test_noncontiguous(self, batch_size, separable, device, dtype):
inp = torch.rand(3, 5, 5, device=device, dtype=dtype).expand(batch_size, -1, -1, -1)
actual = box_blur(inp, 3, separable=separable)
assert actual.is_contiguous()
@pytest.mark.parametrize('kernel_size', [(3, 3), 5, (5, 7)])
def test_gradcheck(self, kernel_size, device, dtype):
batch_size, channels, height, width = 1, 2, 5, 4
img = torch.rand(batch_size, channels, height, width, device=device, dtype=dtype)
img = tensor_to_gradcheck_var(img) # to var
fast_mode = 'cpu' in str(device) # Disable fast mode for GPU
self.gradcheck(box_blur, (img, kernel_size), fast_mode=fast_mode)
@pytest.mark.parametrize('kernel_size', [(3, 3), 5, (5, 7)])
@pytest.mark.parametrize('batch_size', [1, 2])
def test_module(self, kernel_size, batch_size, device, dtype):
op = box_blur
op_module = BoxBlur
img = torch.rand(batch_size, 3, 4, 5, device=device, dtype=dtype)
actual = op_module(kernel_size)(img)
expected = op(img, kernel_size)
self.assert_close(actual, expected)
@pytest.mark.parametrize('separable', [False, True])
@pytest.mark.parametrize('kernel_size', [5, (5, 7)])
@pytest.mark.parametrize('batch_size', [1, 2])
def test_dynamo(self, batch_size, kernel_size, separable, device, dtype, torch_optimizer):
inpt = torch.ones(batch_size, 3, 10, 10, device=device, dtype=dtype)
op = BoxBlur(kernel_size, separable=separable)
op_optimized = torch_optimizer(op)
self.assert_close(op(inpt), op_optimized(inpt)) | null |
5,747 | from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from jaseci.utils.utils import TestCaseHelper
import jaseci.jsorc.live_actions as lact
from django.test import TestCase
import uuid
import base64
import os
class TestLLWall(TestCaseHelper, TestCase):
"""Test the authorized user node API"""
def METHOD_NAME(self):
super().METHOD_NAME()
self.user = get_user_model().objects.create_user(
"[email protected]", "password"
)
self.master = self.user.get_master()
self.client = APIClient()
self.client.force_authenticate(self.user)
ll_loc = os.path.dirname(__file__) + "/ll_wall.jac"
ll_file = base64.b64encode(open(ll_loc).read().encode()).decode()
payload = {
"op": "sentinel_register",
"name": "Something",
"code": ll_file,
"encoded": True,
}
lact.load_local_actions(os.path.dirname(__file__) + "/infer.py")
res = self.client.post(reverse(f'jac_api:{payload["op"]}'), payload)
self.snt = self.master._h.get_obj(self.master.jid, res.data[0]["jid"])
self.gph = self.master._h.get_obj(self.master.jid, res.data[1]["jid"])
def tearDown(self):
super().tearDown()
def run_walker(self, w_name, ctx, prime=None):
"""Helper to make calls to execute walkers"""
if not prime:
payload = {
"snt": self.snt.jid,
"name": w_name,
"nd": self.gph.jid,
"ctx": ctx,
}
else:
payload = {"snt": self.snt.jid, "name": w_name, "nd": prime, "ctx": ctx}
res = self.client.post(reverse("jac_api:walker_run"), payload, format="json")
return res.data
def graph_node_set(self, nd_id, ctx):
"""Helper to set node context"""
payload = {"snt": self.snt.jid, "nd": nd_id, "ctx": ctx}
res = self.client.post(
reverse("jac_api:graph_node_set"), payload, format="json"
)
return res.data
def test_ll_wall_get_gen_day(self):
"""Test get_gen_day walker response time after cerify day"""
num_workettes = 3
# generate random day workettes
self.run_walker(
"gen_day_workettes", {"date": "2021-07-12", "num_workettes": num_workettes}
)
data = self.run_walker("get_latest_day", {"show_report": 1})["report"]
day_id = data[0][1]["jid"]
day_date = data[0][1]["context"]["day"]
day_note = data[0][1]["context"]["note"]
data = self.run_walker("get_workettes_deep", {"show_report": 1}, prime=day_id)[
"report"
]
self.assertEqual(len(data[0]), num_workettes)
# certify day, should return day highlights
data = self.run_walker(
"set_day_highlight",
{
"highlight_items": [
{
"id": data[0][0],
"type": "Most proud accomplishment",
"color": "#464ff6",
"icon": "0x1F3C6",
},
{
"id": data[0][1],
"type": "Made You Happiest",
"color": "#6e30dd",
"icon": "0x1F604",
},
{
"id": data[0][2],
"type": "Required the Most Work",
"color": "#b926df",
"icon": "0x1F4AA",
},
]
},
prime=day_id,
)["report"]
self.assertEqual(data[0][0][0][0][0]["jid"], day_id)
self.assertEqual(data[0][0][0][0][1], day_date)
self.assertEqual(data[0][0][0][1], day_note)
# data[0][0][0][2] is the highlight items
self.assertEqual(len(data[0][0][0][2]), 3)
def test_check_deep_write(self):
"""Test get_gen_day walker response time after cerify day"""
ret = self.run_walker("check_deep_write_start", {})
self.snt._h.clear_cache()
ret = self.run_walker("check_deep_write_update", {})
self.snt._h.clear_cache()
ret = self.run_walker("check_deep_write_report", {})
self.assertEqual(ret["report"], [{"a": {"b": {"c": [5, 6]}}}]) | null |
5,748 | import os
import unittest
from unittest.mock import patch
from biokbase.narrative.staging.helper import Helper
class StagingHelperTest(unittest.TestCase):
def METHOD_NAME(self):
self.good_fake_token = "good_fake_token"
os.environ["KB_AUTH_TOKEN"] = self.good_fake_token
self.staging_helper = Helper()
def test_missing_token(self):
os.environ["KB_AUTH_TOKEN"] = ""
with self.assertRaises(ValueError) as context:
Helper()
self.assertEqual("Cannot retrieve auth token", str(context.exception))
def test_token(self):
self.assertEqual(self.good_fake_token, self.staging_helper._token)
def test_staging_url(self):
self.assertTrue(
"kbase.us/services/staging_service" in self.staging_helper._staging_url
)
@unittest.skip("Skipped: test contacts the staging service, but should not")
def test_unauthorized_token(self):
with self.assertRaises(ValueError) as context:
self.staging_helper.list()
self.assertTrue("Reason: Unauthorized" in str(context.exception))
self.assertTrue("Error code: 401" in str(context.exception))
def mock_fetch_url(
end_point, values=None, headers=None, method="GET", save_path=None
):
if "list" in end_point:
print("mocking __fetch_url list endpoint")
return '[{"path": "tgu/test_file_1", "isFolder": false},\
{"path": "tgu/test_dir", "isFolder": true},\
{"path": "tgu/test_dir/test_file_2", "isFolder": false}]'
elif "jgi-metadata" in end_point:
print("mocking __fetch_url jgi-metadata endpoint")
return '{"file_name": "test_file", "file_status": "BACKUP_COMPLETE"}'
elif "metadata" in end_point:
print("mocking __fetch_url metadata endpoint")
return '{"head": "head_line", "tail": "tail_line", "lineCount": 10}'
elif "search" in end_point:
print("mocking __fetch_url search endpoint")
return '[{"isFolder": false, "mtime": 1515526154896, "name": "LMS-PROC-315.pdf"}]'
elif "delete" in end_point:
print("mocking __fetch_url delete endpoint")
return "successfully deleted tgu2/test.pdf"
elif "download" in end_point:
print("mocking __fetch_url download endpoint")
elif "mv" in end_point:
print("mocking __fetch_url mv endpoint")
return "successfully moved tgu2/test.pdf to tgu2/test_1.pdf"
@patch.object(Helper, "_Helper__fetch_url", side_effect=mock_fetch_url)
def test_list(self, _fetch_url):
file_list = self.staging_helper.list()
self.assertTrue("tgu/test_file_1" in file_list)
self.assertTrue("tgu/test_dir/test_file_2" in file_list)
self.assertTrue("tgu/test_dir" not in file_list)
def test_missing_path(self):
with self.assertRaises(ValueError) as context:
self.staging_helper.metadata()
self.assertEqual("Must provide path argument", str(context.exception))
@patch.object(Helper, "_Helper__fetch_url", side_effect=mock_fetch_url)
def test_metadata(self, _fetch_url):
metadata = self.staging_helper.metadata("test_fake_file")
self.assertTrue("head" in metadata)
self.assertEqual(metadata.get("head"), "head_line")
self.assertTrue("tail" in metadata)
self.assertEqual(metadata.get("tail"), "tail_line")
self.assertTrue("lineCount" in metadata)
self.assertEqual(metadata.get("lineCount"), 10)
@patch.object(Helper, "_Helper__fetch_url", side_effect=mock_fetch_url)
def test_jgi_metadata(self, _fetch_url):
metadata = self.staging_helper.jgi_metadata("test_fake_file")
self.assertTrue("file_name" in metadata)
self.assertEqual(metadata.get("file_name"), "test_file")
self.assertTrue("file_status" in metadata)
self.assertEqual(metadata.get("file_status"), "BACKUP_COMPLETE")
@patch.object(Helper, "_Helper__fetch_url", side_effect=mock_fetch_url)
def test_search(self, _fetch_url):
search_ret = self.staging_helper.search("test_fake_file")
self.assertTrue(isinstance(search_ret, (list)))
element = search_ret[0]
self.assertTrue("isFolder" in element)
self.assertFalse(element.get("isFolder"))
self.assertTrue("name" in element)
self.assertEqual(element.get("name"), "LMS-PROC-315.pdf")
@patch.object(Helper, "_Helper__fetch_url", side_effect=mock_fetch_url)
def test_delete(self, _fetch_url):
delete_ret = self.staging_helper.delete("test_fake_file")
self.assertTrue("server_response" in delete_ret)
self.assertEqual(
delete_ret.get("server_response"), "successfully deleted tgu2/test.pdf"
)
@patch.object(Helper, "_Helper__fetch_url", side_effect=mock_fetch_url)
def test_download(self, _fetch_url):
download_ret = self.staging_helper.download("test_fake_file")
self.assertTrue("test_fake_file" in download_ret)
@patch.object(Helper, "_Helper__fetch_url", side_effect=mock_fetch_url)
def test_mv(self, _fetch_url):
mv_ret = self.staging_helper.mv("test.pdf ", "test_1.pdf")
self.assertTrue("server_response" in mv_ret)
self.assertEqual(
mv_ret.get("server_response"),
"successfully moved tgu2/test.pdf to tgu2/test_1.pdf",
)
if __name__ == "__main__":
unittest.main() | null |
5,749 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for taxi_utils_bqml.py."""
import os
import types
import apache_beam as beam
import tensorflow as tf
from tensorflow import estimator as tf_estimator
import tensorflow_model_analysis as tfma
import tensorflow_transform as tft
from tensorflow_transform import beam as tft_beam
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import schema_utils
from tfx.components.trainer import executor as trainer_executor
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx.components.util import tfxio_utils
from tfx.dsl.io import fileio
from tfx.examples.bigquery_ml import taxi_utils_bqml
from tfx.types import standard_artifacts
from tfx.utils import io_utils
from tfx.utils import path_utils
from tfx_bsl.tfxio import tf_example_record
from tensorflow_metadata.proto.v0 import schema_pb2
class TaxiUtilsTest(tf.test.TestCase):
def setUp(self):
super().setUp()
self._testdata_path = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
'components/testdata')
def testUtils(self):
key = 'fare'
xfm_key = taxi_utils_bqml._transformed_name(key)
self.assertEqual(xfm_key, 'fare_xf')
def testPreprocessingFn(self):
schema_file = os.path.join(self._testdata_path, 'schema_gen/schema.pbtxt')
schema = io_utils.parse_pbtxt_file(schema_file, schema_pb2.Schema())
feature_spec = taxi_utils_bqml._get_raw_feature_spec(schema)
working_dir = self.get_temp_dir()
transform_graph_path = os.path.join(working_dir, 'transform_graph')
transformed_examples_path = os.path.join(
working_dir, 'transformed_examples')
# Run very simplified version of executor logic.
# TODO(kestert): Replace with tft_unit.assertAnalyzeAndTransformResults.
# Generate legacy `DatasetMetadata` object. Future version of Transform
# will accept the `Schema` proto directly.
legacy_metadata = dataset_metadata.DatasetMetadata(
schema_utils.schema_from_feature_spec(feature_spec))
tfxio = tf_example_record.TFExampleRecord(
file_pattern=os.path.join(self._testdata_path,
'csv_example_gen/Split-train/*'),
telemetry_descriptors=['Tests'],
schema=legacy_metadata.schema)
with beam.Pipeline() as p:
with tft_beam.Context(temp_dir=os.path.join(working_dir, 'tmp')):
examples = p | 'ReadTrainData' >> tfxio.BeamSource()
(transformed_examples, transformed_metadata), transform_fn = (
(examples, tfxio.TensorAdapterConfig())
| 'AnalyzeAndTransform' >> tft_beam.AnalyzeAndTransformDataset(
taxi_utils_bqml.preprocessing_fn))
# WriteTransformFn writes transform_fn and metadata to subdirectories
# tensorflow_transform.SAVED_MODEL_DIR and
# tensorflow_transform.TRANSFORMED_METADATA_DIR respectively.
# pylint: disable=expression-not-assigned
(transform_fn
| 'WriteTransformFn' >> tft_beam.WriteTransformFn(
transform_graph_path))
encoder = tft.coders.ExampleProtoCoder(transformed_metadata.schema)
(transformed_examples
| 'EncodeTrainData' >> beam.Map(encoder.encode)
| 'WriteTrainData' >> beam.io.WriteToTFRecord(
os.path.join(transformed_examples_path,
'Split-train/transformed_examples.gz'),
coder=beam.coders.BytesCoder()))
# pylint: enable=expression-not-assigned
# Verify the output matches golden output.
# NOTE: we don't verify that transformed examples match golden output.
expected_transformed_schema = io_utils.parse_pbtxt_file(
os.path.join(
self._testdata_path,
'transform/transform_graph/transformed_metadata/schema.pbtxt'),
schema_pb2.Schema())
transformed_schema = io_utils.parse_pbtxt_file(
os.path.join(transform_graph_path,
'transformed_metadata/schema.pbtxt'),
schema_pb2.Schema())
# Clear annotations so we only have to test main schema.
for feature in transformed_schema.feature:
feature.ClearField('annotation')
transformed_schema.ClearField('annotation')
self.assertEqual(transformed_schema, expected_transformed_schema)
def METHOD_NAME(self):
temp_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
schema_file = os.path.join(self._testdata_path, 'schema_gen/schema.pbtxt')
trainer_fn_args = trainer_executor.TrainerFnArgs(
train_files=os.path.join(
self._testdata_path,
'transform/transformed_examples/Split-train/*.gz'),
transform_output=os.path.join(self._testdata_path,
'transform/transform_graph/'),
serving_model_dir=os.path.join(temp_dir, 'serving_model_dir'),
eval_files=os.path.join(
self._testdata_path,
'transform/transformed_examples/Split-eval/*.gz'),
schema_file=schema_file,
train_steps=1,
eval_steps=1,
base_model=os.path.join(self._testdata_path,
'trainer/previous/Format-Serving'),
data_accessor=DataAccessor(
tf_dataset_factory=tfxio_utils.get_tf_dataset_factory_from_artifact(
[standard_artifacts.Examples()], []),
record_batch_factory=None,
data_view_decode_fn=None))
schema = io_utils.parse_pbtxt_file(schema_file, schema_pb2.Schema())
training_spec = taxi_utils_bqml.trainer_fn(trainer_fn_args, schema)
estimator = training_spec['estimator']
train_spec = training_spec['train_spec']
eval_spec = training_spec['eval_spec']
eval_input_receiver_fn = training_spec['eval_input_receiver_fn']
self.assertIsInstance(estimator, tf_estimator.Estimator)
self.assertIsInstance(train_spec, tf_estimator.TrainSpec)
self.assertIsInstance(eval_spec, tf_estimator.EvalSpec)
self.assertIsInstance(eval_input_receiver_fn, types.FunctionType)
# Train for one step, then eval for one step.
eval_result, exports = tf_estimator.train_and_evaluate(
estimator, train_spec, eval_spec)
print(eval_result, exports)
self.assertGreater(eval_result['loss'], 0.0)
self.assertEqual(len(exports), 1)
self.assertGreaterEqual(len(fileio.listdir(exports[0])), 1)
# Export the eval saved model.
eval_savedmodel_path = tfma.export.export_eval_savedmodel(
estimator=estimator,
export_dir_base=path_utils.eval_model_dir(temp_dir),
eval_input_receiver_fn=eval_input_receiver_fn)
self.assertGreaterEqual(len(fileio.listdir(eval_savedmodel_path)), 1)
# Test exported serving graph.
with tf.compat.v1.Session() as sess:
metagraph_def = tf.compat.v1.saved_model.loader.load(
sess, [tf.saved_model.SERVING], exports[0])
self.assertIsInstance(metagraph_def, tf.compat.v1.MetaGraphDef)
if __name__ == '__main__':
tf.test.main() | null |
5,750 | """Test snux platform."""
from mpf.tests.MpfFakeGameTestCase import MpfFakeGameTestCase, MagicMock
from mpf.platforms.interfaces.driver_platform_interface import PulseSettings, HoldSettings
class TestSnux(MpfFakeGameTestCase):
def get_config_file(self):
return 'config.yaml'
def METHOD_NAME(self):
return 'tests/machine_files/snux/'
def get_platform(self):
# no force platform. we are testing virtual + snux
return False
def _get_snux_platform(self):
return self.machine.hardware_platforms['snux']
def _get_driver(self, number):
return self._get_snux_platform().drivers[number]
def test_ac_relay_default(self):
# outside game it should be default off
c_ac_relay = self.machine.coils["c_ac_relay"]
self.assertEqual("disabled", c_ac_relay.hw_driver.state)
self.start_game()
# during a game it should be default on to allow fast flashers
self.assertEqual("enabled", c_ac_relay.hw_driver.state)
# after the game ended it should turn back to default off
self.drain_all_balls()
self.drain_all_balls()
self.drain_all_balls()
self.assertGameIsNotRunning()
self.assertEqual("disabled", c_ac_relay.hw_driver.state)
def test_ac_switch_and_pulse(self):
# test diag led flashing. otherwise snux is not running
c_diag_led_driver = self.machine.coils["c_diag_led_driver"]
c_diag_led_driver.pulse = MagicMock()
self.advance_time_and_run(1)
c_diag_led_driver.pulse.assert_called_with(250)
# test if a and c side relays were properly loaded
self.assertEqual(2, len(self._get_snux_platform().drivers))
driver_11 = self._get_driver("c11")
driver_12 = self._get_driver("c12")
c_ac_relay = self.machine.coils["c_ac_relay"]
c_ac_relay.enable = MagicMock()
c_ac_relay.disable = MagicMock()
driver_11.pulse = MagicMock(return_value=0)
driver_11.enable = MagicMock()
driver_11.disable = MagicMock()
driver_12.pulse = MagicMock(return_value=0)
driver_12.enable = MagicMock()
driver_12.disable = MagicMock()
# a side should be triggered first. c side should wait
self.machine.coils["c_side_a1"].pulse(50)
self.machine.coils["c_side_c1"].pulse(50)
self.advance_time_and_run(0.001)
driver_11.pulse.assert_called_with(PulseSettings(power=1.0, duration=50))
driver_11.pulse = MagicMock()
assert not driver_12.pulse.called
assert not c_ac_relay.enable.called
# after 50ms + 75ms transition c side should get triggered
self.advance_time_and_run(0.075)
c_ac_relay.enable.assert_called_with()
c_ac_relay.enable = MagicMock()
assert not driver_11.pulse.called
# after the relay switches. pulse the other coil
self.advance_time_and_run(0.075)
driver_11.pulse.assert_called_with(PulseSettings(power=1.0, duration=50))
# it should switch back to a side when idle
self.advance_time_and_run(0.052)
c_ac_relay.disable.assert_called_with()
c_ac_relay.disable = MagicMock()
def test_ac_switch_and_enable(self):
driver_12 = self._get_driver("c12")
driver_12.enable = MagicMock()
driver_12.disable = MagicMock()
c_ac_relay = self.machine.coils["c_ac_relay"].hw_driver
c_ac_relay.enable = MagicMock()
c_ac_relay.disable = MagicMock()
self.advance_time_and_run(0.10)
# test enable on c side
self.machine.coils["c_side_c2"].enable()
self.machine_run()
c_ac_relay.enable.assert_called_with(PulseSettings(power=1.0, duration=10), HoldSettings(power=1.0, duration=None))
c_ac_relay.enable = MagicMock()
assert not driver_12.enable.called
self.advance_time_and_run(0.1)
driver_12.enable.assert_called_with(PulseSettings(power=1.0, duration=10), HoldSettings(power=0.5, duration=None))
driver_12.enable = MagicMock()
# a side has preference. it should transition
self.machine.coils["c_side_a2"].enable()
self.machine_run()
driver_12.disable.assert_called_with()
c_ac_relay.disable.assert_called_with()
c_ac_relay.disable = MagicMock()
assert not driver_12.enable.called
# it should enable a side coils now
self.advance_time_and_run(0.075)
driver_12.enable.assert_called_with(PulseSettings(power=1.0, duration=10), HoldSettings(power=0.5, duration=None))
# disable driver on a side.
self.machine.coils["c_side_a2"].disable()
self.advance_time_and_run(0.2)
def test_flippers(self):
self.machine.flippers["f_test_single"].enable() | null |
5,751 | import json
import os
from jinja2 import Template, select_autoescape
from conan.api.output import cli_out_write
from conan.cli.formatters.graph.graph_info_text import filter_graph
from conan.cli.formatters.graph.info_graph_dot import graph_info_dot
from conan.cli.formatters.graph.info_graph_html import graph_info_html
from conans.client.graph.graph import BINARY_CACHE, \
BINARY_DOWNLOAD, BINARY_BUILD, BINARY_MISSING, BINARY_UPDATE
from conans.client.graph.graph_error import GraphConflictError
from conans.client.installer import build_id
from conans.util.files import load
# FIXME: Check all this code when format_graph_[html/dot] use serialized graph
class _PrinterGraphItem(object):
def __init__(self, _id, node, is_build_time_node):
self.id = _id
self._ref = node.ref
self._conanfile = node.conanfile
self._is_build_time_node = is_build_time_node
self.package_id = node.package_id
self.binary = node.binary
@property
def label(self):
return self._conanfile.display_name
@property
def short_label(self):
if self._ref and self._ref.name:
return "{}/{}".format(self._ref.name, self._ref.version)
else:
return self.label
@property
def METHOD_NAME(self):
return self._is_build_time_node
def data(self):
return {
'build_id': build_id(self._conanfile),
'url': self._conanfile.url,
'homepage': self._conanfile.homepage,
'license': self._conanfile.license,
'author': self._conanfile.author,
'topics': self._conanfile.topics
}
class _Grapher(object):
def __init__(self, deps_graph):
self._deps_graph = deps_graph
self.nodes, self.edges = self._build_graph()
def _build_graph(self):
graph_nodes = self._deps_graph.by_levels()
build_time_nodes = self._deps_graph.build_time_nodes()
graph_nodes = reversed([n for level in graph_nodes for n in level])
_node_map = {}
for i, node in enumerate(graph_nodes):
n = _PrinterGraphItem(i, node, bool(node in build_time_nodes))
_node_map[node] = n
edges = []
for node in self._deps_graph.nodes:
for node_to in node.neighbors():
src = _node_map[node]
dst = _node_map[node_to]
edges.append((src, dst))
return _node_map.values(), edges
@staticmethod
def binary_color(node):
assert isinstance(node, _PrinterGraphItem), "Wrong type '{}'".format(type(node))
color = {BINARY_CACHE: "SkyBlue",
BINARY_DOWNLOAD: "LightGreen",
BINARY_BUILD: "Khaki",
BINARY_MISSING: "OrangeRed",
BINARY_UPDATE: "SeaGreen"}.get(node.binary, "White")
return color
def _render_graph(graph, error, template, template_folder):
graph = _Grapher(graph)
from conans import __version__ as client_version
template = Template(template, autoescape=select_autoescape(['html', 'xml']))
return template.render(graph=graph, error=error, base_template_path=template_folder,
version=client_version)
def format_graph_html(result):
graph = result["graph"]
conan_api = result["conan_api"]
package_filter = result["package_filter"]
serial = graph.serialize()
# TODO: This is not used, it is necessary to update the renderings to use the serialized graph
# instead of the native graph
serial = filter_graph(serial, package_filter)
template_folder = os.path.join(conan_api.cache_folder, "templates")
user_template = os.path.join(template_folder, "graph.html")
template = load(user_template) if os.path.isfile(user_template) else graph_info_html
error = {
"type": "unknown",
"context": graph.error,
"should_highlight_node": lambda node: False
}
if isinstance(graph.error, GraphConflictError):
error["type"] = "conflict"
error["should_highlight_node"] = lambda node: node.id == graph.error.node.id
cli_out_write(_render_graph(graph, error, template, template_folder))
if graph.error:
raise graph.error
def format_graph_dot(result):
graph = result["graph"]
conan_api = result["conan_api"]
package_filter = result["package_filter"]
serial = graph.serialize()
# TODO: This is not used, it is necessary to update the renderings to use the serialized graph
# instead of the native graph
serial = filter_graph(serial, package_filter)
template_folder = os.path.join(conan_api.cache_folder, "templates")
user_template = os.path.join(template_folder, "graph.dot")
template = load(user_template) if os.path.isfile(user_template) else graph_info_dot
cli_out_write(_render_graph(graph, None, template, template_folder))
if graph.error:
raise graph.error
def format_graph_json(result):
graph = result["graph"]
field_filter = result.get("field_filter")
package_filter = result.get("package_filter")
serial = graph.serialize()
serial = filter_graph(serial, package_filter=package_filter, field_filter=field_filter)
json_result = json.dumps({"graph": serial}, indent=4)
cli_out_write(json_result)
if graph.error:
raise graph.error | null |
5,752 | from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework.test import APIClient
from rest_framework import status
from jaseci.utils.utils import TestCaseHelper
from django.test import TestCase
class PrivateJacAdminApiTests(TestCaseHelper, TestCase):
"""Test the authorized user node API"""
def setUp(self):
super().setUp()
# First user is always super,
self.user = get_user_model().objects.create_user(
"[email protected]", "password"
)
self.nonadmin = get_user_model().objects.create_user(
"[email protected]", "password"
)
self.client = APIClient()
self.client.force_authenticate(self.user)
self.notadminc = APIClient()
self.notadminc.force_authenticate(self.nonadmin)
self.master = self.user.get_master()
def tearDown(self):
super().tearDown()
def test_jac_api_config_index_has_core(self):
payload = {"op": "config_index"}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format="json"
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertGreater(len(res.data), 2)
self.assertIn("ACTION_SETS", res.data)
def test_jac_api_create_config(self):
"""Test API for creating a config"""
payload = {
"op": "config_set",
"name": "EMAIL_HOST_USER",
"value": "val1",
"do_check": False,
}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format="json"
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
payload = {"op": "config_get", "name": "EMAIL_HOST_USER"}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format="json"
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, "val1")
def METHOD_NAME(self):
"""Test API for creating a config"""
payload = {"op": "config_delete", "name": "TEST"}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format="json"
)
payload = {"op": "config_set", "name": "TEST", "value": "val1"}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format="json"
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
payload = {"op": "config_get", "name": "TEST"}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format="json"
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertNotEqual(res.data, "val1")
def test_jac_api_create_config_nonadmin_fails(self):
"""Test API for creating a config"""
payload = {"op": "config_set", "name": "EMAIL_HOST_USER", "value": "val1"}
res = self.notadminc.post(
reverse(f'jac_api:{payload["op"]}'), payload, format="json"
)
self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)
def test_jac_api_create_config_list(self):
"""Test API for creating a config"""
payload = {
"op": "config_set",
"name": "EMAIL_HOST_USER",
"value": "val1",
"do_check": False,
}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format="json"
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
payload = {
"op": "config_set",
"name": "EMAIL_HOST_PASSWORD",
"value": "val2",
"do_check": False,
}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format="json"
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
payload = {
"op": "config_set",
"name": "EMAIL_DEFAULT_FROM",
"value": "val3",
"do_check": False,
}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format="json"
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
payload = {
"op": "config_set",
"name": "EMAIL_BACKEND",
"value": "val4",
"do_check": False,
}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format="json"
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
payload = {"op": "config_list"}
res = self.client.post(
reverse(f'jac_api:{payload["op"]}'), payload, format="json"
)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 4)
self.assertIn("EMAIL_DEFAULT_FROM", res.data) | null |
5,753 | from unittest import TestCase, main
import numpy as np
from numpy.testing import assert_allclose
from cogent3.maths.stats.jackknife import JackknifeStats
def pmcc(data, axis=1):
"""Compute the Product-moment correlation coefficient.
Expression 15.3 from Biometry by Sokal/Rohlf
This code implementation is on the proviso that the data that is provided
is two dimensional: [[Y1], [Y2]] (trying to determine the correlation
coefficient between data sets Y1 and Y2"""
if axis == 0:
data = data.transpose()
axis = 1
other_axis = 0
mean = data.mean(axis=axis)
data_less_mean = np.array([data[0] - mean[0], data[1] - mean[1]])
sum_squares = np.sum(np.square(data_less_mean), axis=axis)
sum_products = np.sum(np.prod(data_less_mean, axis=other_axis))
pmcc = np.divide(sum_products, np.sqrt(np.prod(sum_squares)))
z_trans = np.arctanh(pmcc)
return z_trans
# test data from Box 15.2; Biometry by Sokal/Rohlf
data = np.array(
[
[159, 179, 100, 45, 384, 230, 100, 320, 80, 220, 320, 210],
[
14.40,
15.20,
11.30,
2.50,
22.70,
14.90,
1.41,
15.81,
4.19,
15.39,
17.25,
9.52,
],
]
)
# factory function generator for the statistical function of interest
def stat_maker(func, data, axis):
def calc_stat(coords):
subset_data = data.take(coords, axis)
return func(subset_data, axis)
return calc_stat
# function to compute mean of a np array
def mean(data, axis):
return data.mean(axis=axis)
class JackknifeTests(TestCase):
def METHOD_NAME(self):
"""jackknife should initialise correctly"""
# Scalar
pmcc_stat = stat_maker(pmcc, data, 1)
test_knife = JackknifeStats(data.shape[1], pmcc_stat)
self.assertEqual(test_knife.n, data.shape[1])
self.assertEqual(test_knife._jackknifed_stat, None)
# Vector
mean_stat = stat_maker(mean, data, 1)
test_knife = JackknifeStats(data.shape[1], mean_stat)
self.assertEqual(test_knife.n, data.shape[1])
self.assertEqual(test_knife._jackknifed_stat, None)
def test_jackknife_stats(self):
"""jackknife results should match Sokal & Rolf example"""
# Scalar
pmcc_stat = stat_maker(pmcc, data, 1)
test_knife = JackknifeStats(data.shape[1], pmcc_stat)
assert_allclose(test_knife.jackknifed_stat, 1.2905845)
assert_allclose(test_knife.standard_error, 0.2884490)
self.assertTrue(test_knife._jackknifed_stat != None)
# Vector
mean_stat = stat_maker(mean, data, 1)
test_knife = JackknifeStats(data.shape[1], mean_stat)
expected_jk_stat = data.mean(axis=1)
got_jk_stat = test_knife.jackknifed_stat
expected_standard_err = [30.69509346, 1.87179671]
got_standard_err = test_knife.standard_error
for index in [0, 1]:
assert_allclose(got_jk_stat[index], expected_jk_stat[index])
assert_allclose(got_standard_err[index], expected_standard_err[index])
def test_tables(self):
"""jackknife should work for calculators return scalars or vectors"""
# Scalar
pmcc_stat = stat_maker(pmcc, data, 1)
test_knife = JackknifeStats(data.shape[1], pmcc_stat)
expected_subsample_stats = [
1.4151,
1.3946,
1.4314,
1.1889,
1.1323,
1.3083,
1.3561,
1.3453,
1.2412,
1.3216,
1.2871,
1.3664,
]
expected_pseudovalues = [
0.1968,
0.4224,
0.0176,
2.6852,
3.3084,
1.3718,
0.8461,
0.9650,
2.1103,
1.2253,
1.6049,
0.7333,
]
test_knife.jackknife()
got_subsample_stats = test_knife._subset_statistics
got_pseudovalues = test_knife._pseudovalues
for index in range(data.shape[1]):
np.testing.assert_almost_equal(
got_subsample_stats[index], expected_subsample_stats[index], 4
)
np.testing.assert_approx_equal(
got_pseudovalues[index], expected_pseudovalues[index], 4
)
# Vector
mean_stat = stat_maker(mean, data, 1)
test_knife = JackknifeStats(data.shape[1], mean_stat)
test_knife.jackknife()
expected_pseudovalues = data.transpose()
expected_subsample_stats = [
[198.9091, 11.8336],
[197.0909, 11.7609],
[204.2727, 12.1155],
[209.2727, 12.9155],
[178.4545, 11.0791],
[192.4545, 11.7882],
[204.2727, 13.0145],
[184.2727, 11.7055],
[206.0909, 12.7618],
[193.3636, 11.7436],
[184.2727, 11.5745],
[194.2727, 12.2773],
]
got_subsample_stats = test_knife._subset_statistics
got_pseudovalues = test_knife._pseudovalues
for index1 in range(data.shape[1]):
for index2 in range(data.shape[0]):
np.testing.assert_almost_equal(
got_subsample_stats[index1][index2],
expected_subsample_stats[index1][index2],
4,
)
np.testing.assert_almost_equal(
got_pseudovalues[index1][index2],
expected_pseudovalues[index1][index2],
4,
)
def test_tabular_properties(self):
"""constructs tabular properties"""
pmcc_stat = stat_maker(pmcc, data, 1)
test_knife = JackknifeStats(data.shape[1], pmcc_stat)
ss = test_knife.sub_sample_stats
self.assertEqual(ss.shape, (12, 2))
ss = test_knife.sample_stat
pvs = test_knife.pseudovalues
self.assertEqual(pvs.shape, (12, 2))
ss = test_knife.summary_stats
self.assertEqual(ss.shape, (1, 3))
if __name__ == "__main__":
main() | null |
5,754 | import pytest
from wemake_python_styleguide.visitors.ast.complexity.offset import (
OffsetVisitor,
TooDeepNestingViolation,
)
nested_if = """
def container():
if True:
x = 1
"""
nested_if2 = """
def container():
if some_value:
call_other()
"""
nested_for = """
def container():
for i in '123':
return 0
"""
nested_try = """
def container():
try:
some_call()
except Exception:
raise
"""
nested_try2 = """
def container():
if some_call:
try:
some_call()
except Exception:
raise
"""
nested_with = """
def container():
with open('some') as temp:
temp.read()
"""
nested_while = """
def container():
while True:
continue
"""
real_nested_values = """
def container():
if some > 1:
if some > 2:
if some > 3:
if some > 4:
if some > 5:
print(some)
"""
# Regression for #320:
real_await_nested_values = """
async def update_control():
current_control = await too_long_name_please_find_one({'line': 1,
'point': 1})
"""
@pytest.mark.parametrize('code', [
nested_if,
nested_if2,
nested_for,
nested_try,
nested_try2,
nested_with,
nested_while,
])
def test_nested_offset(
assert_errors,
parse_ast_tree,
code,
default_options,
mode,
):
"""Testing that nested expression with default options works well."""
tree = parse_ast_tree(mode(code))
visitor = OffsetVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
def test_nested_offset_regression320(
assert_errors,
parse_ast_tree,
default_options,
):
"""
Testing that await works well with long lines.
See: https://github.com/wemake-services/wemake-python-styleguide/issues/320
"""
tree = parse_ast_tree(real_await_nested_values)
visitor = OffsetVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize(('code', 'number_of_errors'), [
(nested_if, 1),
(nested_if2, 1),
(nested_for, 1),
(nested_try, 2),
(nested_try2, 4),
(nested_with, 1),
(nested_while, 1),
])
def test_nested_offset_errors(
monkeypatch,
assert_errors,
parse_ast_tree,
code,
number_of_errors,
default_options,
mode,
):
"""Testing that nested expressions are restricted."""
tree = parse_ast_tree(mode(code))
monkeypatch.setattr(OffsetVisitor, '_max_offset_blocks', 1)
visitor = OffsetVisitor(default_options, tree=tree)
visitor.run()
errors = [TooDeepNestingViolation for _ in range(number_of_errors)]
assert_errors(visitor, errors)
@pytest.mark.parametrize('code', [
nested_if,
nested_if2,
nested_for,
nested_with,
nested_while,
])
def METHOD_NAME(
monkeypatch,
assert_errors,
assert_error_text,
parse_ast_tree,
code,
default_options,
mode,
):
"""Testing that nested expressions are restricted."""
tree = parse_ast_tree(mode(code))
monkeypatch.setattr(OffsetVisitor, '_max_offset_blocks', 1)
visitor = OffsetVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [TooDeepNestingViolation])
assert_error_text(visitor, '8', 4)
def test_real_nesting_config(
assert_errors,
assert_error_text,
parse_ast_tree,
default_options,
mode,
):
"""Ensures that real configuration works."""
tree = parse_ast_tree(mode(real_nested_values))
visitor = OffsetVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [TooDeepNestingViolation])
assert_error_text(visitor, '24', 10 * 2)
def test_regression282(
monkeypatch,
assert_errors,
parse_ast_tree,
default_options,
):
"""
Testing that issue-282 will not happen again.
See: https://github.com/wemake-services/wemake-python-styleguide/issues/282
"""
code = """
async def no_offset():
...
"""
tree = parse_ast_tree(code)
monkeypatch.setattr(OffsetVisitor, '_max_offset_blocks', 1)
visitor = OffsetVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, []) | null |
5,755 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test_dtype"""
from dataclasses import dataclass
import numpy as np
import pytest
import mindspore as ms
from mindspore.common import dtype
def test_dtype_to_nptype():
"""test_dtype2nptype"""
assert ms.dtype_to_nptype(ms.bool_) == np.bool_
assert ms.dtype_to_nptype(ms.int8) == np.int8
assert ms.dtype_to_nptype(ms.int16) == np.int16
assert ms.dtype_to_nptype(ms.int32) == np.int32
assert ms.dtype_to_nptype(ms.int64) == np.int64
assert ms.dtype_to_nptype(ms.uint8) == np.uint8
assert ms.dtype_to_nptype(ms.uint16) == np.uint16
assert ms.dtype_to_nptype(ms.uint32) == np.uint32
assert ms.dtype_to_nptype(ms.uint64) == np.uint64
assert ms.dtype_to_nptype(ms.float16) == np.float16
assert ms.dtype_to_nptype(ms.float32) == np.float32
assert ms.dtype_to_nptype(ms.float64) == np.float64
assert ms.dtype_to_nptype(ms.complex64) == np.complex64
assert ms.dtype_to_nptype(ms.complex128) == np.complex128
def test_dtype_to_pytype():
"""test_dtype_to_pytype"""
assert ms.dtype_to_pytype(ms.bool_) == bool
assert ms.dtype_to_pytype(ms.int8) == int
assert ms.dtype_to_pytype(ms.int16) == int
assert ms.dtype_to_pytype(ms.int32) == int
assert ms.dtype_to_pytype(ms.int64) == int
assert ms.dtype_to_pytype(ms.uint8) == int
assert ms.dtype_to_pytype(ms.uint16) == int
assert ms.dtype_to_pytype(ms.uint32) == int
assert ms.dtype_to_pytype(ms.uint64) == int
assert ms.dtype_to_pytype(ms.float16) == float
assert ms.dtype_to_pytype(ms.float32) == float
assert ms.dtype_to_pytype(ms.float64) == float
assert ms.dtype_to_pytype(ms.complex64) == complex
assert ms.dtype_to_pytype(ms.complex128) == complex
assert ms.dtype_to_pytype(ms.list_) == list
assert ms.dtype_to_pytype(ms.tuple_) == tuple
assert ms.dtype_to_pytype(ms.string) == str
assert ms.dtype_to_pytype(ms.type_none) == type(None)
@dataclass
class Foo:
x: int
def inf(self):
return self.x
def METHOD_NAME(cls):
"""
get attrib type of dataclass
"""
fields = cls.__dataclass_fields__
attr_type = [field.type for name, field in fields.items()]
return attr_type
def test_dtype():
"""test_dtype"""
x = 1.5
me_type = dtype.get_py_obj_dtype(x)
assert me_type == ms.float64
me_type = dtype.get_py_obj_dtype(type(x))
assert me_type == ms.float64
x = 100
me_type = dtype.get_py_obj_dtype(type(x))
assert me_type == ms.int64
me_type = dtype.get_py_obj_dtype(x)
assert me_type == ms.int64
x = False
me_type = dtype.get_py_obj_dtype(type(x))
assert me_type == ms.bool_
me_type = dtype.get_py_obj_dtype(x)
assert me_type == ms.bool_
x = 0.1+3j
me_type = dtype.get_py_obj_dtype(type(x))
assert me_type == ms.complex128
me_type = dtype.get_py_obj_dtype(x)
assert me_type == ms.complex128
# support str
# x = "string type"
x = [1, 2, 3]
me_type = dtype.get_py_obj_dtype(x)
assert me_type == ms.list_
me_type = dtype.get_py_obj_dtype(type(x))
assert me_type == ms.list_
x = (2, 4, 5)
me_type = dtype.get_py_obj_dtype(x)
assert me_type == ms.tuple_
me_type = dtype.get_py_obj_dtype(type(x))
assert me_type == ms.tuple_
y = Foo(3)
me_type = dtype.get_py_obj_dtype(y.x)
assert me_type == ms.int64
me_type = dtype.get_py_obj_dtype(type(y.x))
assert me_type == ms.int64
y = Foo(3.1)
me_type = dtype.get_py_obj_dtype(y.x)
assert me_type == ms.float64
me_type = dtype.get_py_obj_dtype(type(y.x))
assert me_type == ms.float64
fields = METHOD_NAME(y)
assert len(fields) == 1
me_type = dtype.get_py_obj_dtype(fields[0])
assert me_type == ms.int64
fields = METHOD_NAME(Foo)
assert len(fields) == 1
me_type = dtype.get_py_obj_dtype(fields[0])
assert me_type == ms.int64
with pytest.raises(NotImplementedError):
x = 1.5
dtype.get_py_obj_dtype(type(type(x)))
def test_type_equal():
t1 = (dtype.int32, dtype.int32)
valid_types = [dtype.float16, dtype.float32]
assert t1 not in valid_types
assert dtype.int32 not in valid_types
assert dtype.float32 in valid_types | null |
5,756 | import warnings
from typing import List, Optional, TypeVar
from docarray.typing.bytes.video_bytes import VideoBytes, VideoLoadResult
from docarray.typing.proto_register import _register_proto
from docarray.typing.url.any_url import AnyUrl
from docarray.typing.url.mimetypes import VIDEO_MIMETYPE
from docarray.utils._internal.misc import is_notebook
T = TypeVar('T', bound='VideoUrl')
@_register_proto(proto_type_name='video_url')
class VideoUrl(AnyUrl):
"""
URL to a video file.
Can be remote (web) URL, or a local file path.
"""
@classmethod
def mime_type(cls) -> str:
return VIDEO_MIMETYPE
@classmethod
def METHOD_NAME(cls) -> List[str]:
"""
Returns a list of additional file extensions that are valid for this class
but cannot be identified by the mimetypes library.
"""
return []
def load(self: T, **kwargs) -> VideoLoadResult:
"""
Load the data from the url into a `NamedTuple` of
[`VideoNdArray`][docarray.typing.VideoNdArray],
[`AudioNdArray`][docarray.typing.AudioNdArray]
and [`NdArray`][docarray.typing.NdArray].
---
```python
from typing import Optional
from docarray import BaseDoc
from docarray.typing import VideoUrl, VideoNdArray, AudioNdArray, NdArray
class MyDoc(BaseDoc):
video_url: VideoUrl
video: Optional[VideoNdArray]
audio: Optional[AudioNdArray]
key_frame_indices: Optional[NdArray]
doc = MyDoc(
video_url='https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true'
)
doc.video, doc.audio, doc.key_frame_indices = doc.video_url.load()
assert isinstance(doc.video, VideoNdArray)
assert isinstance(doc.audio, AudioNdArray)
assert isinstance(doc.key_frame_indices, NdArray)
```
---
You can load only the key frames (or video, audio respectively):
---
```python
from pydantic import parse_obj_as
from docarray.typing import NdArray, VideoUrl
url = parse_obj_as(
VideoUrl,
'https://github.com/docarray/docarray/blob/main/tests/toydata/mov_bbb.mp4?raw=true',
)
key_frame_indices = url.load().key_frame_indices
assert isinstance(key_frame_indices, NdArray)
```
---
:param kwargs: supports all keyword arguments that are being supported by
av.open() as described [here](https://pyav.org/docs/stable/api/_globals.html?highlight=open#av.open)
:return: [`AudioNdArray`][docarray.typing.AudioNdArray] representing the audio content,
[`VideoNdArray`][docarray.typing.VideoNdArray] representing the images of the video,
[`NdArray`][docarray.typing.NdArray] of the key frame indices.
"""
buffer = self.load_bytes(**kwargs)
return buffer.load()
def load_bytes(self, timeout: Optional[float] = None) -> VideoBytes:
"""
Convert url to [`VideoBytes`][docarray.typing.VideoBytes]. This will either load or download
the file and save it into an [`VideoBytes`][docarray.typing.VideoBytes] object.
:param timeout: timeout for urlopen. Only relevant if url is not local
:return: [`VideoBytes`][docarray.typing.VideoBytes] object
"""
bytes_ = super().load_bytes(timeout=timeout)
return VideoBytes(bytes_)
def display(self):
"""
Play video from url in notebook.
"""
if is_notebook():
from IPython.display import display
remote_url = True if self.startswith('http') else False
if remote_url:
from IPython.display import Video
b = self.load_bytes()
display(Video(data=b, embed=True, mimetype='video/mp4'))
else:
import os
from IPython.display import HTML
path = os.path.relpath(self)
src = f'''
<body>
<video width="320" height="240" autoplay muted controls>
<source src="{path}">
Your browser does not support the video tag.
</video>
</body>
'''
display(HTML(src))
else:
warnings.warn('Display of video is only possible in a notebook.') | null |
5,757 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numpy as np
import time
import os
import sys
import codecs
import fastdeploy as fd
# triton_python_backend_utils is available in every Triton Python model. You
# need to use this module to create inference requests and responses. It also
# contains some utility functions for extracting information from model_config
# and converting Triton input/output types to numpy types.
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""
def METHOD_NAME(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
# You must parse model_config. JSON string is not parsed here
self.model_config = json.loads(args['model_config'])
print("model_config:", self.model_config)
self.input_names = []
for input_config in self.model_config["input"]:
self.input_names.append(input_config["name"])
print("postprocess input names:", self.input_names)
self.output_names = []
self.output_dtype = []
for output_config in self.model_config["output"]:
self.output_names.append(output_config["name"])
dtype = pb_utils.triton_string_to_numpy(output_config["data_type"])
self.output_dtype.append(dtype)
print("postprocess output names:", self.output_names)
dir_name = os.path.dirname(os.path.realpath(__file__)) + "/"
file_name = dir_name + "ppocr_keys_v1.txt"
#self.label_list = load_dict()
self.postprocessor = fd.vision.ocr.RecognizerPostprocessor(file_name)
def execute(self, requests):
"""`execute` must be implemented in every Python model. `execute`
function receives a list of pb_utils.InferenceRequest as the only
argument. This function is called when an inference is requested
for this model. Depending on the batching configuration (e.g. Dynamic
Batching) used, `requests` may contain multiple requests. Every
Python model, must create one pb_utils.InferenceResponse for every
pb_utils.InferenceRequest in `requests`. If there is an error, you can
set the error argument when creating a pb_utils.InferenceResponse.
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
responses = []
for request in requests:
infer_outputs = pb_utils.get_input_tensor_by_name(
request, self.input_names[0])
infer_outputs = infer_outputs.as_numpy()
results = self.postprocessor.run([infer_outputs])
out_tensor_0 = pb_utils.Tensor(
self.output_names[0], np.array(
results[0], dtype=np.object_))
out_tensor_1 = pb_utils.Tensor(self.output_names[1],
np.array(results[1]))
inference_response = pb_utils.InferenceResponse(
output_tensors=[out_tensor_0, out_tensor_1])
responses.append(inference_response)
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is optional. This function allows
the model to perform any necessary clean ups before exit.
"""
print('Cleaning up...') | null |
5,758 | """
This is a very simple example aiming to show how vairable scaling can be used.
Variable scaling is important for the conditioning of the problem thus it might improve for convergence.
This example is copied from the getting_started/pendulum.py example.
One scaling should be declared for each phase for the states and controls. The scaling of the parameters should be
declared in the parameter declaration like in the example getting_started/custom_parameters.py.
"""
from bioptim import (
BiorbdModel,
OptimalControlProgram,
DynamicsFcn,
Dynamics,
BoundsList,
ObjectiveFcn,
Objective,
OdeSolver,
OdeSolverBase,
CostType,
Solver,
VariableScalingList,
)
def METHOD_NAME(
biorbd_model_path: str,
final_time: float,
n_shooting: int,
ode_solver: OdeSolverBase = OdeSolver.RK4(),
use_sx: bool = True,
n_threads: int = 1,
assume_phase_dynamics: bool = True,
expand_dynamics: bool = True,
) -> OptimalControlProgram:
"""
The initialization of an ocp
Parameters
----------
biorbd_model_path: str
The path to the biorbd model
final_time: float
The time in second required to perform the task
n_shooting: int
The number of shooting points to define int the direct multiple shooting program
ode_solver: OdeSolverBase = OdeSolver.RK4()
Which type of OdeSolver to use
use_sx: bool
If the SX variable should be used instead of MX (can be extensive on RAM)
n_threads: int
The number of threads to use in the paralleling (1 = no parallel computing)
assume_phase_dynamics: bool
If the dynamics equation within a phase is unique or changes at each node. True is much faster, but lacks the
capability to have changing dynamics within a phase. A good example of when False should be used is when
different external forces are applied at each node
expand_dynamics: bool
If the dynamics function should be expanded. Please note, this will solve the problem faster, but will slow down
the declaration of the OCP, so it is a trade-off. Also depending on the solver, it may or may not work
(for instance IRK is not compatible with expanded dynamics)
Returns
-------
The OptimalControlProgram ready to be solved
"""
biorbd_model = BiorbdModel(biorbd_model_path)
# Add objective functions
objective_functions = Objective(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key="tau")
# Dynamics
dynamics = Dynamics(DynamicsFcn.TORQUE_DRIVEN, expand=expand_dynamics)
# Path constraint
x_bounds = BoundsList()
x_bounds["q"] = biorbd_model.bounds_from_ranges("q")
x_bounds["q"][:, [0, -1]] = 0
x_bounds["q"][1, -1] = 3.14
x_bounds["qdot"] = [-3.14 * 100] * biorbd_model.nb_qdot, [3.14 * 100] * biorbd_model.nb_qdot
x_bounds["qdot"][:, [0, -1]] = 0
# Define control path constraint
n_tau = biorbd_model.nb_tau
tau_min, tau_max = -1000, 1000
u_bounds = BoundsList()
u_bounds["tau"] = [tau_min] * n_tau, [tau_max] * n_tau
u_bounds["tau"][1, :] = 0
# Variable scaling
x_scaling = VariableScalingList()
x_scaling["q"] = [1, 3]
x_scaling["qdot"] = [85, 85]
u_scaling = VariableScalingList()
u_scaling["tau"] = [900, 1]
return OptimalControlProgram(
biorbd_model,
dynamics,
n_shooting,
final_time,
x_bounds=x_bounds,
u_bounds=u_bounds,
x_scaling=x_scaling,
u_scaling=u_scaling,
objective_functions=objective_functions,
ode_solver=ode_solver,
use_sx=use_sx,
n_threads=n_threads,
assume_phase_dynamics=assume_phase_dynamics,
)
def main():
"""
If pendulum is run as a script, it will perform the optimization and animates it
"""
# --- Prepare the ocp --- #
ocp = METHOD_NAME(biorbd_model_path="models/pendulum.bioMod", final_time=1 / 10, n_shooting=30)
# Custom plots
ocp.add_plot_penalty(CostType.ALL)
# --- Print ocp structure --- #
ocp.print(to_console=False, to_graph=False)
# --- Solve the ocp --- #
sol = ocp.solve(Solver.IPOPT(show_online_optim=False))
sol.graphs()
# --- Show the results in a bioviz animation --- #
sol.print_cost()
sol.animate(n_frames=100)
if __name__ == "__main__":
main() | null |
5,759 | import os
import textwrap
import unittest
from conans.model.info import load_binary_info
from conans.model.recipe_ref import RecipeReference
from conans.paths import CONANFILE
from conans.test.utils.tools import TestClient
from conans.util.files import load, save
class SettingsTest(unittest.TestCase):
def _get_conaninfo(self, reference, client):
ref = client.cache.get_latest_recipe_reference(RecipeReference.loads(reference))
pkg_ids = client.cache.get_package_references(ref)
pref = client.cache.get_latest_package_reference(pkg_ids[0])
pkg_folder = client.cache.pkg_layout(pref).package()
return load_binary_info(client.load(os.path.join(pkg_folder, "conaninfo.txt")))
def test_wrong_settings(self):
settings = """os:
null:
subsystem: [null, msys]
"""
client = TestClient()
save(client.cache.settings_path, settings)
save(client.cache.default_profile_path, "")
conanfile = """from conan import ConanFile
class Pkg(ConanFile):
settings = "os", "compiler"
"""
client.save({"conanfile.py": conanfile})
client.run("create . --name=pkg --version=0.1 --user=lasote --channel=testing", assert_error=True)
self.assertIn("ERROR: settings.yml: null setting can't have subsettings", client.out)
def test_settings_constraint_error_type(self):
# https://github.com/conan-io/conan/issues/3022
conanfile = """from conan import ConanFile
class Test(ConanFile):
settings = "os"
def build(self):
self.output.info("OS!!: %s" % self.settings.os)
"""
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("create . --name=pkg --version=0.1 --user=user --channel=testing -s os=Linux")
self.assertIn("pkg/0.1@user/testing: OS!!: Linux", client.out)
def test_settings_as_a_str(self):
content = """
from conan import ConanFile
class SayConan(ConanFile):
name = "say"
version = "0.1"
settings = "os"
"""
client = TestClient()
client.save({CONANFILE: content})
client.run("create . -s os=Windows --build missing")
# Now read the conaninfo and verify that settings applied is only os and value is windows
conan_info = self._get_conaninfo("say/0.1@", client)
self.assertEqual(conan_info["settings"]["os"], "Windows")
client.run("remove say/0.1 -c")
client.run("create . -s os=Linux --build missing")
# Now read the conaninfo and verify that settings applied is only os and value is windows
conan_info = self._get_conaninfo("say/0.1@", client)
self.assertEqual(conan_info["settings"]["os"], "Linux")
def test_settings_as_a_list_conanfile(self):
# Now with conanfile as a list
content = """
from conan import ConanFile
class SayConan(ConanFile):
name = "say"
version = "0.1"
settings = "os", "arch"
"""
client = TestClient()
client.save({CONANFILE: content})
client.run("create . -s os=Windows --build missing")
conan_info = self._get_conaninfo("say/0.1@", client)
self.assertEqual(conan_info["settings"]["os"], "Windows")
def METHOD_NAME(self):
# Now with conanfile as a dict
# XXX: this test only works on machines w default arch "x86", "x86_64", "sparc" or "sparcv9"
content = """
from conan import ConanFile
class SayConan(ConanFile):
name = "say"
version = "0.1"
settings = {"os", "arch"}
"""
client = TestClient()
client.save({CONANFILE: content})
client.run("create . -s os=Windows --build missing")
conan_info = self._get_conaninfo("say/0.1@", client)
self.assertEqual(conan_info["settings"]["os"], "Windows")
def test_invalid_settings3(self):
client = TestClient()
# Test wrong settings in conanfile
content = textwrap.dedent("""
from conan import ConanFile
class SayConan(ConanFile):
settings = "invalid"
""")
client.save({CONANFILE: content})
client.run("install . --build missing", assert_error=True)
self.assertIn("'settings.invalid' doesn't exist", client.out)
# Test wrong values in conanfile
def test_invalid_settings4(self):
content = """
from conan import ConanFile
class SayConan(ConanFile):
name = "say"
version = "0.1"
settings = "os"
"""
client = TestClient()
client.save({CONANFILE: content})
client.run("create . -s os=ChromeOS --build missing", assert_error=True)
assert "ERROR: Invalid setting 'ChromeOS' is not a valid 'settings.os' value." in client.out
assert "Possible values are ['Windows', 'WindowsStore', 'WindowsCE', 'Linux'" in client.out
# Now add new settings to config and try again
config = load(client.cache.settings_path)
config = config.replace("Windows:",
"Windows:\n ChromeOS:\n")
save(client.cache.settings_path, config)
client.run("create . -s os=ChromeOS --build missing")
# Settings is None
content = """
from conan import ConanFile
class SayConan(ConanFile):
name = "say"
version = "0.1"
settings = None
"""
client.save({CONANFILE: content})
client.run("remove say/0.1 -c")
client.run("create . --build missing")
conan_info = self._get_conaninfo("say/0.1", client)
self.assertEqual(conan_info.get("settings"), None)
# Settings is {}
content = """
from conan import ConanFile
class SayConan(ConanFile):
name = "say"
version = "0.1"
settings = {}
"""
client.save({CONANFILE: content})
client.run("remove say/0.1 -c")
client.run("create . --build missing")
conan_info = self._get_conaninfo("say/0.1", client)
self.assertEqual(conan_info.get("settings"), None) | null |
5,760 | import pytest
from pytest_bdd import given, scenario, then, when, parsers
from retrying import retry
import os
import subprocess
import time
from common.mayastor import container_mod, mayastor_mod
from common.volume import Volume
import grpc
import mayastor_pb2 as pb
def megabytes(n):
return n * 1024 * 1024
def find_child(nexus, uri):
for child in nexus.children:
if child.uri == uri:
return child
return None
def convert_nexus_state(state):
STATES = {
"UNKNOWN": pb.NexusState.NEXUS_UNKNOWN,
"ONLINE": pb.NexusState.NEXUS_ONLINE,
"DEGRADED": pb.NexusState.NEXUS_DEGRADED,
"FAULTED": pb.NexusState.NEXUS_FAULTED,
}
return STATES[state]
def convert_child_state(state):
STATES = {
"UNKNOWN": pb.ChildState.CHILD_UNKNOWN,
"ONLINE": pb.ChildState.CHILD_ONLINE,
"DEGRADED": pb.ChildState.CHILD_DEGRADED,
"FAULTED": pb.ChildState.CHILD_FAULTED,
}
return STATES[state]
def convert_child_action(state):
ACTIONS = {
"OFFLINE": pb.ChildAction.offline,
"ONLINE": pb.ChildAction.online,
}
return ACTIONS[state]
def lookup_nexus(mayastor_instance, nexus_uuid):
for nexus in mayastor_instance.ms.ListNexus(pb.Null()).nexus_list:
if nexus.uuid == nexus_uuid:
return nexus
return None
def lookup_nexus_child(mayastor_instance, nexus_uuid, child_uri):
nexus = lookup_nexus(mayastor_instance, nexus_uuid)
if nexus is None:
return None
for child in nexus.children:
if child.uri == child_uri:
return child
return None
@retry(wait_fixed=100, stop_max_attempt_number=5)
def METHOD_NAME(mayastor_instance, nexus_uuid, child_uri, state):
child = lookup_nexus_child(mayastor_instance, nexus_uuid, child_uri)
assert child is not None and child.state == convert_child_state(state)
@scenario("features/rebuild.feature", "running rebuild")
def test_running_rebuild():
"Running rebuild."
@scenario("features/rebuild.feature", "stopping rebuild")
def test_stopping_rebuild():
"Stopping rebuild."
@scenario("features/rebuild.feature", "pausing rebuild")
def test_pausing_rebuild():
"Pausing rebuild."
@scenario("features/rebuild.feature", "resuming rebuild")
def test_resuming_rebuild():
"Resuming rebuild."
@scenario("features/rebuild.feature", "setting a child ONLINE")
def test_setting_a_child_online():
"Setting a child ONLINE."
@scenario("features/rebuild.feature", "setting a child OFFLINE")
def test_setting_a_child_offline():
"Setting a child OFFLINE."
@pytest.fixture(scope="module")
def local_files():
files = [f"/tmp/disk-rebuild-{base}.img" for base in ["source", "target"]]
for path in files:
subprocess.run(
["sudo", "sh", "-c", f"rm -f '{path}' && truncate -s 64M '{path}'"],
check=True,
)
yield
for path in files:
subprocess.run(["sudo", "rm", "-f", path], check=True)
@pytest.fixture(scope="module")
def source_uri(local_files):
yield "aio:///tmp/disk-rebuild-source.img?blk_size=4096"
@pytest.fixture(scope="module")
def target_uri(local_files):
yield "aio:///tmp/disk-rebuild-target.img?blk_size=4096"
@pytest.fixture(scope="module")
def nexus_uuid():
yield "2c58c9f0-da89-4cb9-8097-dc67fa132493"
@pytest.fixture(scope="module")
def mayastor_instance(mayastor_mod):
yield mayastor_mod["ms0"]
@pytest.fixture(scope="module")
def find_nexus(mayastor_instance):
def find(uuid):
return lookup_nexus(mayastor_instance, uuid)
yield find
@pytest.fixture
def mayastor_nexus(mayastor_instance, nexus_uuid, source_uri):
nexus = mayastor_instance.ms.CreateNexus(
pb.CreateNexusRequest(
uuid=nexus_uuid, size=megabytes(64), children=[source_uri]
)
)
yield nexus
mayastor_instance.ms.DestroyNexus(pb.DestroyNexusRequest(uuid=nexus_uuid))
@pytest.fixture
def nexus_state(mayastor_nexus, find_nexus, nexus_uuid):
yield find_nexus(nexus_uuid)
@pytest.fixture
def rebuild_state(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
try:
yield mayastor_instance.ms.GetRebuildState(
pb.RebuildStateRequest(uuid=nexus_uuid, uri=target_uri)
).state
except:
yield None
@given("a mayastor instance")
@given(parsers.parse('a mayastor instance "{name}"'))
def get_instance(mayastor_instance):
pass
@given("a nexus")
@given("a nexus with a source child device")
def get_nexus(mayastor_nexus):
pass
@when("a target child is added to the nexus")
def add_child(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
mayastor_instance.ms.AddChildNexus(
pb.AddChildNexusRequest(uuid=nexus_uuid, uri=target_uri, norebuild=True)
)
@when("the rebuild operation is started")
def start_rebuild(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
mayastor_instance.ms.StartRebuild(
pb.StartRebuildRequest(uuid=nexus_uuid, uri=target_uri)
)
@when("the rebuild operation is then paused")
def pause_rebuild(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
mayastor_instance.ms.PauseRebuild(
pb.PauseRebuildRequest(uuid=nexus_uuid, uri=target_uri)
)
time.sleep(0.5)
@when("the rebuild operation is then resumed")
def resume_rebuild(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
mayastor_instance.ms.ResumeRebuild(
pb.ResumeRebuildRequest(uuid=nexus_uuid, uri=target_uri)
)
@when("the rebuild operation is then stopped")
def stop_rebuild(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
mayastor_instance.ms.StopRebuild(
pb.StopRebuildRequest(uuid=nexus_uuid, uri=target_uri)
)
time.sleep(0.5)
@when("the rebuild statistics are requested", target_fixture="rebuild_statistics")
def rebuild_statistics(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri):
return mayastor_instance.ms.GetRebuildStats(
pb.RebuildStatsRequest(uuid=nexus_uuid, uri=target_uri)
)
@when(parsers.parse("the target child is set {state}"), target_fixture="set_child")
@when(parsers.parse("the target child is then set {state}"), target_fixture="set_child")
def set_child(mayastor_instance, mayastor_nexus, nexus_uuid, target_uri, state):
mayastor_instance.ms.ChildOperation(
pb.ChildNexusRequest(
uuid=nexus_uuid, uri=target_uri, action=convert_child_action(state)
)
)
# After offlining a child, it may take some time to close the device
# and reach DEGRADED state.
if state == "OFFLINE":
METHOD_NAME(mayastor_instance, nexus_uuid, target_uri, "DEGRADED")
@then(parsers.parse("the nexus state is {expected}"))
def check_nexus_state(nexus_state, expected):
assert nexus_state.state == convert_nexus_state(expected)
@then(parsers.parse("the source child state is {expected}"))
def check_source_child_state(nexus_state, source_uri, expected):
child = find_child(nexus_state, source_uri)
assert child.state == convert_child_state(expected)
@then(parsers.parse("the target child state is {expected}"))
def check_target_child_state(nexus_state, target_uri, expected):
child = find_child(nexus_state, target_uri)
assert child.state == convert_child_state(expected)
@then(parsers.parse("the rebuild count is {expected:d}"))
def check_rebuild_count(nexus_state, expected):
assert nexus_state.rebuilds == expected
@then(parsers.parse('the rebuild state is "{expected}"'))
def check_rebuild_state(rebuild_state, expected):
assert rebuild_state == expected
@then("the rebuild state is undefined")
def rebuild_state_is_undefined(rebuild_state):
assert rebuild_state is None
@then(parsers.parse('the rebuild statistics counter "{name}" is {expected}'))
def check_rebuild_statistics_counter(rebuild_statistics, name, expected):
assert (getattr(rebuild_statistics, name) == 0) == (expected == "zero") | null |
5,761 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for google.protobuf.proto_builder."""
import collections
import unittest
from google.protobuf import descriptor_pb2 # pylint: disable=g-import-not-at-top
from google.protobuf import descriptor
from google.protobuf import descriptor_pool
from google.protobuf import proto_builder
from google.protobuf import text_format
class ProtoBuilderTest(unittest.TestCase):
def setUp(self):
self.ordered_fields = collections.OrderedDict([
('foo', descriptor_pb2.FieldDescriptorProto.TYPE_INT64),
('bar', descriptor_pb2.FieldDescriptorProto.TYPE_STRING),
])
self._fields = dict(self.ordered_fields)
def testMakeSimpleProtoClass(self):
"""Test that we can create a proto class."""
proto_cls = proto_builder.MakeSimpleProtoClass(
self._fields,
full_name='net.proto2.python.public.proto_builder_test.Test')
proto = proto_cls()
proto.foo = 12345
proto.bar = 'asdf'
self.assertMultiLineEqual(
'bar: "asdf"\nfoo: 12345\n', text_format.MessageToString(proto))
def testOrderedFields(self):
"""Test that the field order is maintained when given an OrderedDict."""
proto_cls = proto_builder.MakeSimpleProtoClass(
self.ordered_fields,
full_name='net.proto2.python.public.proto_builder_test.OrderedTest')
proto = proto_cls()
proto.foo = 12345
proto.bar = 'asdf'
self.assertMultiLineEqual(
'foo: 12345\nbar: "asdf"\n', text_format.MessageToString(proto))
def METHOD_NAME(self):
"""Test that the DescriptorPool is used."""
pool = descriptor_pool.DescriptorPool()
proto_cls1 = proto_builder.MakeSimpleProtoClass(
self._fields,
full_name='net.proto2.python.public.proto_builder_test.Test',
pool=pool)
proto_cls2 = proto_builder.MakeSimpleProtoClass(
self._fields,
full_name='net.proto2.python.public.proto_builder_test.Test',
pool=pool)
self.assertIs(proto_cls1.DESCRIPTOR, proto_cls2.DESCRIPTOR)
def testMakeLargeProtoClass(self):
"""Test that large created protos don't use reserved field numbers."""
num_fields = 123456
fields = {
'foo%d' % i: descriptor_pb2.FieldDescriptorProto.TYPE_INT64
for i in range(num_fields)
}
proto_cls = proto_builder.MakeSimpleProtoClass(
fields,
full_name='net.proto2.python.public.proto_builder_test.LargeProtoTest')
reserved_field_numbers = set(
range(descriptor.FieldDescriptor.FIRST_RESERVED_FIELD_NUMBER,
descriptor.FieldDescriptor.LAST_RESERVED_FIELD_NUMBER + 1))
proto_field_numbers = set(proto_cls.DESCRIPTOR.fields_by_number)
self.assertFalse(reserved_field_numbers.intersection(proto_field_numbers))
if __name__ == '__main__':
unittest.main() | null |
5,762 | """
Copyright (c) 2012-2020 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import requests
import time
import json
import base64
from storageadmin.exceptions import RockStorAPIException
from storageadmin.models import OauthApp
from django.conf import settings
class APIWrapper(object):
def __init__(self, client_id=None, client_secret=None, url=None):
self.access_token = None
self.expiration = time.time()
self.client_id = client_id
self.client_secret = client_secret
# directly connect to gunicorn, bypassing nginx as we are on the same
# host.
self.url = "http://127.0.0.1:8000"
if url is not None:
# for remote urls.
self.url = url
def METHOD_NAME(self):
if self.client_id is None or self.client_secret is None:
app = OauthApp.objects.get(name=settings.OAUTH_INTERNAL_APP)
self.client_id = app.application.client_id
self.client_secret = app.application.client_secret
token_request_data = {
"grant_type": "client_credentials",
"client_id": self.client_id,
"client_secret": self.client_secret,
}
user_pass = "{0}:{1}".format(self.client_id, self.client_secret)
auth_string = base64.b64encode(user_pass.encode("utf-8"))
auth_headers = {
"HTTP_AUTHORIZATION": "Basic " + auth_string.decode("utf-8"),
}
content = None
try:
response = requests.post(
"%s/o/token/" % self.url,
data=token_request_data,
headers=auth_headers,
verify=False,
)
content = json.loads(response.content.decode("utf-8"))
self.access_token = content["access_token"]
self.expiration = int(time.time()) + content["expires_in"] - 600
except Exception as e:
msg = (
"Exception while setting access_token for url(%s): %s. "
"content: %s" % (self.url, e.__str__(), content)
)
raise Exception(msg)
def api_call(self, url, data=None, calltype="get", headers=None, save_error=True):
if self.access_token is None or time.time() > self.expiration:
self.METHOD_NAME()
api_auth_header = {
"Authorization": "Bearer " + self.access_token,
}
call = getattr(requests, calltype)
url = "%s/api/%s" % (self.url, url)
try:
if headers is not None:
headers.update(api_auth_header)
if headers["content-type"] == "application/json":
r = call(url, verify=False, data=json.dumps(data), headers=headers)
else:
r = call(url, verify=False, data=data, headers=headers)
else:
r = call(url, verify=False, headers=api_auth_header, data=data)
except requests.exceptions.ConnectionError:
print("Error connecting to Rockstor. Is it running?")
raise
if r.status_code == 404:
msg = "Invalid api end point: %s" % url
raise RockStorAPIException(detail=msg)
if r.status_code != 200:
try:
error_d = json.loads(r.text)
if settings.DEBUG is True and save_error is True:
cur_time = str(int(time.time()))
err_file = "/tmp/err-%s.html" % cur_time
with open(err_file, "w") as efo:
for line in r.text.split("\n"):
efo.write("%s\n" % line)
print("Error detail is saved at %s" % err_file)
if "detail" in error_d:
if (
error_d["detail"]
== "Authentication credentials were not provided."
): # noqa E501
self.METHOD_NAME()
return self.api_call(
url,
data=data,
calltype=calltype,
headers=headers,
save_error=save_error,
)
raise RockStorAPIException(detail=error_d["detail"])
except ValueError as e:
raise RockStorAPIException(
detail="Internal Server Error: %s" % e.__str__()
)
r.raise_for_status()
try:
ret_val = r.json()
except ValueError:
ret_val = {}
return ret_val | null |
5,763 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""HPC generator"""
import sys
import os
import io
import argparse
from itertools import chain
def key_value_pair(line):
"""
split key and value
:param line:
:return:
"""
key = None
value = None
try:
key, value = line.split("=", 1)
except ValueError:
print("line must be format: key=value, but now is:", line)
sys.exit(1)
try:
value = int(value)
except ValueError:
print("Error: you input value must be integer, but now is:", value)
sys.exit(1)
return key, value
def METHOD_NAME(line):
"""
get indent length
:param line:
:return:
"""
index = 0
for i in line:
if i == " ":
index += 1
else:
break
return index
def print_line(line):
"""
Convert line to a python string
:param line:
:return:
"""
global PYTHON_INDENT
global GENERATE_CODE_INDENT
if line.strip()[0] == "}" or line.strip()[0] == ")":
PYTHON_INDENT = -1
split_str = line.split("@")
if line.strip()[0] != "@" and len(split_str) == 1:
if METHOD_NAME(line) == PYTHON_INDENT or PYTHON_INDENT == -1:
result = ["print(", line, ", file=OUT_STREAM)"]
PYTHON_INDENT = -1
if "{" in line or "asm volatile(" in line:
GENERATE_CODE_INDENT = METHOD_NAME(line)
if line.strip().startswith("}") and "{" not in line:
GENERATE_CODE_INDENT -= 4
if len(line) == 1 and line[0] == "}":
# modify next fun GENERATE_CODE_INDENT
GENERATE_CODE_INDENT = -4
return "\"".join(result)
if line.strip()[0] == '@':
# get python indent and first GENERATE_CODE_INDENT
if PYTHON_INDENT == -1:
GENERATE_CODE_INDENT = METHOD_NAME(line) - 4
PYTHON_INDENT = METHOD_NAME(line)
result = split_str[0][PYTHON_INDENT:] + split_str[1]
return result
index = METHOD_NAME(split_str[0])
result = [split_str[0][PYTHON_INDENT:index] + "print("]
prefix = " " * (GENERATE_CODE_INDENT + 4) + split_str[0].lstrip()
suffix = " %("
for str_tmp in split_str[1:]:
second = str_tmp.find("}")
suffix += str_tmp[1:second] + ', '
str_tmp = str_tmp.replace(str_tmp[0:second + 1], "%d")
prefix += str_tmp
result.append(prefix)
result.append(suffix + "), file=OUT_STREAM)")
return "\"".join(result)
def generate_code(template_file, exec_dict):
"""
generate hpc
:param template_file: template file path
:param exec_dict: dict
:return: hpc
"""
output_stream = io.StringIO()
with open(template_file, 'r') as f:
generate_code_lines = []
for line in f:
line = line.replace("\n", "")
if line.strip() and line.strip()[0] != "@":
line = line.replace("\"", "\\\"")
line = line.replace("%", "%%")
if "print" in line:
line = line.replace("%%", "%")
if not line:
generate_code_lines.append("print(" + "\"" + line + "\"" + ", file=OUT_STREAM)")
else:
str = print_line(line)
if "%(" not in str:
str = str.replace("%%[", "%[")
generate_code_lines.append(str)
c = compile('\n'.join(generate_code_lines), '', 'exec')
exec_dict["OUT_STREAM"] = output_stream
exec(c, exec_dict)
return output_stream.getvalue()
def check_python_version():
if sys.version_info < (3, 6):
sys.stdout.write("At least python 3.6 is required, but now is " + str(sys.version_info.major) + "." +
str(sys.version_info.minor) + "\n")
sys.exit(1)
GENERATE_CODE_INDENT = -4
PYTHON_INDENT = -1
parser = argparse.ArgumentParser(description="MSLite NNACL Code Generator")
parser.add_argument("-I", dest="Template_File", nargs=1, help="template file to generate code")
parser.add_argument("-A", dest="defines", metavar="KEY=VALUE", nargs="*", type=key_value_pair, action="append",
help="Custom Parameters")
parser.add_argument("-O", dest="Output_File", nargs=1, help="generate code output file path")
if __name__ == "__main__":
check_python_version()
parameters = parser.parse_args(sys.argv[1:])
exec_globals = dict(chain(*parameters.defines))
generate_code_str = generate_code(parameters.Template_File[0], exec_globals)
if os.path.exists(parameters.Output_File[0]):
os.remove(parameters.Output_File[0])
saveDir = os.path.dirname(parameters.Output_File[0])
if not os.path.exists(saveDir):
os.mkdir(saveDir)
with open(parameters.Output_File[0], "w", encoding='utf-8') as output_file:
output_file.write(generate_code_str) | null |
5,764 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import uuid
from unittest.mock import patch
from pgadmin.browser.server_groups.servers.databases.schemas \
.fts_configurations.tests import utils as fts_config_utils
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils import server_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression import trigger_funcs_utils as fts_config_funcs_utils
from regression.python_test_utils import test_utils as utils
from . import utils as fts_configurations_utils
class FTSConfigurationDependencyDependentTestCase(BaseTestGenerator):
""" This class will get the dependency and dependents FTS configuration
under test schema. """
scenarios = utils.generate_scenarios(
'get_fts_configuration_get_dictionaries',
fts_configurations_utils.test_cases
)
def METHOD_NAME(self):
super().METHOD_NAME()
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.schema_name = self.schema_data['schema_name']
self.schema_id = self.schema_data['schema_id']
self.extension_name = "postgres_fdw"
self.db_name = parent_node_dict["database"][-1]["db_name"]
self.db_user = self.server["username"]
self.func_name = "fts_configuration_func_%s" % str(uuid.uuid4())[1:8]
self.fts_configuration_name = "fts_configuration_delete_%s" % (
str(uuid.uuid4())[1:8])
server_con = server_utils.connect_server(self, self.server_id)
if not server_con["info"] == "Server connected.":
raise Exception("Could not connect to server to add resource "
"groups.")
server_version = 0
if "type" in server_con["data"]:
if server_con["data"]["version"] < 90500:
message = "FTS Configuration are not supported by PG9.4 " \
"and PPAS9.4 and below."
self.skipTest(message)
self.function_info = fts_config_funcs_utils.create_trigger_function(
self.server, self.db_name, self.schema_name, self.func_name,
server_version)
self.fts_configuration_id = fts_configurations_utils. \
create_fts_configuration(
self.server, self.db_name, self.schema_name,
self.fts_configuration_name)
def get_fts_configuration_dictionaries(self):
"""
This functions returns the fts configuration dictionaries
:return: fts configuration dictionaries
"""
return self.tester.get(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) + '/' + str(self.schema_id) + '/',
content_type='html/json')
def runTest(self):
""" This function will add new FTS configuration under test schema. """
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema.")
fts_conf_response = fts_configurations_utils.verify_fts_configuration(
self.server, self.db_name, self.fts_configuration_name
)
if not fts_conf_response:
raise Exception("Could not find the FTS Configuration.")
if self.is_positive_test:
response = self.get_fts_configuration_dictionaries()
else:
if hasattr(self, "error_fetching_fts_configuration"):
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
response = self.get_fts_configuration_dictionaries()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def tearDown(self):
"""This function delete the fts_config and disconnect the test
database."""
fts_config_utils.delete_fts_configurations(self.server, self.db_name,
self.schema_name,
self.fts_configuration_name)
database_utils.disconnect_database(self, self.server_id,
self.db_id) | null |
5,765 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
import SANSUserFileParser as UserFileParser
class BackCommandParserTest(unittest.TestCase):
def test_can_parse_correctly_initial_command(self):
# Arrange
correct1 = "TImE /sdlf/sdf" # Correct MAIN
correct2 = "UAMp/sdlf/sdf" # Correct HAB
correct3 = "MON/RUN=1234/sdf/sdf" # Correct Mon/RUN=
parser = UserFileParser.BackCommandParser()
# Act and assert
self.assertTrue(parser.can_attempt_to_parse(correct1))
self.assertTrue(parser.can_attempt_to_parse(correct2))
self.assertTrue(parser.can_attempt_to_parse(correct3))
def test_cannot_parse_correctly_initial_command(self):
# Arrange
correct1 = "FRoNT=/sdlf/sdf" # Wrong specifier
correct2 = "MON/sdf/sdf/sdf" # No run number
correct3 = "Time/sdf" # Correct first but incorrect length
parser = UserFileParser.BackCommandParser()
# Act and assert
self.assertFalse(parser.can_attempt_to_parse(correct1))
self.assertFalse(parser.can_attempt_to_parse(correct2))
self.assertFalse(parser.can_attempt_to_parse(correct3))
def test_that_can_parse_TIME_MEAN_RUN(self):
argument = "TIME/ mEAN/RuN=SANS2D1111111"
uniform = True
mean = True
run_number = "SANS2D1111111"
is_mon = False
mon_number = None
self.METHOD_NAME(argument, uniform, mean, run_number, is_mon, mon_number)
def test_that_can_parse_UAMP_TOF_RUN(self):
argument = "Uamp/ToF /Run=2222"
uniform = False
mean = False
run_number = "2222"
is_mon = False
mon_number = None
self.METHOD_NAME(argument, uniform, mean, run_number, is_mon, mon_number)
def test_that_can_parse_TIME_LOQ_RUN(self):
argument = "TIME/tof/run=LOQ33333333"
uniform = True
mean = False
run_number = "LOQ33333333"
is_mon = False
mon_number = None
self.METHOD_NAME(argument, uniform, mean, run_number, is_mon, mon_number)
def test_that_can_parse_UAMP_MEAN_RUN(self):
argument = " UAMP/mean /RuN=444444444"
uniform = False
mean = True
run_number = "444444444"
is_mon = False
mon_number = None
self.METHOD_NAME(argument, uniform, mean, run_number, is_mon, mon_number)
def test_that_can_parse_MON_RUN_TIME_MEAN(self):
argument = "MON/RUN=123124/time/mean"
uniform = True
mean = True
run_number = "123124"
is_mon = True
mon_number = None
self.METHOD_NAME(argument, uniform, mean, run_number, is_mon, mon_number)
def test_rejects_bad_first_value(self):
argument = "GUN/RUN=123124/time/mean "
self.do_test_parsing_fails(argument)
def test_rejects_bad_value(self):
argument = "mean/UAMP//RuN=444444444"
self.do_test_parsing_fails(argument)
def test_rejects_bad_second_value(self):
argument = "UAMP/meanTT/RuN=444444444"
self.do_test_parsing_fails(argument)
def test_rejects_bad_third_value(self):
argument = "UAMP/mean/RuN 44444"
self.do_test_parsing_fails(argument)
def test_that_can_pars_M3_RUN_TIME_MEAN(self):
argument = "M3/RUN=123124/time/mean"
uniform = True
mean = True
run_number = "123124"
is_mon = True
mon_number = 3
self.METHOD_NAME(argument, uniform, mean, run_number, is_mon, mon_number)
def METHOD_NAME(self, arguments, expected_uniform, expected_mean, expected_run_number, is_mon, expected_mon_number):
# Arrange
parser = UserFileParser.BackCommandParser()
# Act
result = parser.parse_and_set(arguments)
# Assert
self.assertEqual(result.mean, expected_mean)
self.assertEqual(result.time, expected_uniform)
self.assertEqual(result.mon, is_mon)
self.assertEqual(result.run_number, expected_run_number)
self.assertEqual(result.mon_number, expected_mon_number)
def do_test_parsing_fails(self, arguments):
# Arrange
parser = UserFileParser.BackCommandParser()
# Act
args = [arguments]
self.assertRaises(RuntimeError, parser.parse_and_set, *args)
if __name__ == "__main__":
unittest.main() | null |
5,766 | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the custom scikit-learn Evaluator module."""
import os
import pickle
import apache_beam as beam
from apache_beam.testing import util
from sklearn import neural_network as nn
import tensorflow_model_analysis as tfma
from tfx.examples.penguin.experimental import sklearn_predict_extractor
from tfx_bsl.tfxio import tensor_adapter
from tfx_bsl.tfxio import test_util
from google.protobuf import text_format
from tensorflow_metadata.proto.v0 import schema_pb2
class SklearnPredictExtractorTest(tfma.test.TestCase):
def setUp(self):
super().setUp()
self._eval_export_dir = os.path.join(self._getTempDir(), 'eval_export')
self._create_sklearn_model(self._eval_export_dir)
self._eval_config = tfma.EvalConfig(model_specs=[tfma.ModelSpec()])
self._eval_shared_model = (
sklearn_predict_extractor.custom_eval_shared_model(
eval_saved_model_path=self._eval_export_dir,
model_name=None,
eval_config=self._eval_config))
self._schema = text_format.Parse(
"""
feature {
name: "age"
type: FLOAT
}
feature {
name: "language"
type: FLOAT
}
feature {
name: "label"
type: INT
}
""", schema_pb2.Schema())
self._tfx_io = test_util.InMemoryTFExampleRecord(
schema=self._schema,
raw_record_column_name=tfma.ARROW_INPUT_COLUMN)
self._tensor_adapter_config = tensor_adapter.TensorAdapterConfig(
arrow_schema=self._tfx_io.ArrowSchema(),
tensor_representations=self._tfx_io.TensorRepresentations())
self._examples = [
self._makeExample(age=3.0, language=1.0, label=1),
self._makeExample(age=3.0, language=0.0, label=0),
self._makeExample(age=4.0, language=1.0, label=1),
self._makeExample(age=5.0, language=0.0, label=0),
]
def testMakeSklearnPredictExtractor(self):
"""Tests that predictions are made from extracts for a single model."""
feature_extractor = tfma.extractors.FeaturesExtractor(self._eval_config)
prediction_extractor = (
sklearn_predict_extractor._make_sklearn_predict_extractor(
self._eval_shared_model))
with beam.Pipeline() as pipeline:
predict_extracts = (
pipeline
| 'Create' >> beam.Create(
[e.SerializeToString() for e in self._examples])
| 'BatchExamples' >> self._tfx_io.BeamSource()
| 'InputsToExtracts' >> tfma.BatchedInputsToExtracts() # pylint: disable=no-value-for-parameter
| feature_extractor.stage_name >> feature_extractor.ptransform
| prediction_extractor.stage_name >> prediction_extractor.ptransform
)
def check_result(actual):
try:
for item in actual:
self.assertEqual(item['labels'].shape, item['predictions'].shape)
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(predict_extracts, check_result)
def METHOD_NAME(self):
"""Tests that predictions are made from extracts for multiple models."""
eval_config = tfma.EvalConfig(model_specs=[
tfma.ModelSpec(name='model1'),
tfma.ModelSpec(name='model2'),
])
eval_export_dir_1 = os.path.join(self._eval_export_dir, '1')
self._create_sklearn_model(eval_export_dir_1)
eval_shared_model_1 = sklearn_predict_extractor.custom_eval_shared_model(
eval_saved_model_path=eval_export_dir_1,
model_name='model1',
eval_config=eval_config)
eval_export_dir_2 = os.path.join(self._eval_export_dir, '2')
self._create_sklearn_model(eval_export_dir_2)
eval_shared_model_2 = sklearn_predict_extractor.custom_eval_shared_model(
eval_saved_model_path=eval_export_dir_2,
model_name='model2',
eval_config=eval_config)
feature_extractor = tfma.extractors.FeaturesExtractor(self._eval_config)
prediction_extractor = (
sklearn_predict_extractor._make_sklearn_predict_extractor(
eval_shared_model={
'model1': eval_shared_model_1,
'model2': eval_shared_model_2,
}))
with beam.Pipeline() as pipeline:
predict_extracts = (
pipeline
| 'Create' >> beam.Create(
[e.SerializeToString() for e in self._examples])
| 'BatchExamples' >> self._tfx_io.BeamSource()
| 'InputsToExtracts' >> tfma.BatchedInputsToExtracts() # pylint: disable=no-value-for-parameter
| feature_extractor.stage_name >> feature_extractor.ptransform
| prediction_extractor.stage_name >> prediction_extractor.ptransform
)
def check_result(actual):
try:
for item in actual:
self.assertEqual(item['labels'].shape, item['predictions'].shape)
self.assertIn('model1', item['predictions'][0])
self.assertIn('model2', item['predictions'][0])
except AssertionError as err:
raise util.BeamAssertException(err)
util.assert_that(predict_extracts, check_result)
def test_custom_eval_shared_model(self):
"""Tests that an EvalSharedModel is created with a custom sklearn loader."""
model_file = os.path.basename(self._eval_shared_model.model_path)
self.assertEqual(model_file, 'model.pkl')
model = self._eval_shared_model.model_loader.construct_fn()
self.assertIsInstance(model, nn.MLPClassifier)
def test_custom_extractors(self):
"""Tests that the sklearn extractor is used when creating extracts."""
extractors = sklearn_predict_extractor.custom_extractors(
self._eval_shared_model, self._eval_config, self._tensor_adapter_config)
self.assertLen(extractors, 6)
self.assertIn(
'SklearnPredict', [extractor.stage_name for extractor in extractors])
def _create_sklearn_model(self, eval_export_dir):
"""Creates and pickles a toy scikit-learn model.
Args:
eval_export_dir: Directory to store a pickled scikit-learn model. This
directory is created if it does not exist.
"""
x_train = [[3, 0], [4, 1]]
y_train = [0, 1]
model = nn.MLPClassifier(max_iter=1)
model.feature_keys = ['age', 'language']
model.label_key = 'label'
model.fit(x_train, y_train)
os.makedirs(eval_export_dir)
model_path = os.path.join(eval_export_dir, 'model.pkl')
with open(model_path, 'wb+') as f:
pickle.dump(model, f) | null |
5,767 | # -*- coding: utf-8 -*-
###
# (C) Copyright [2021] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from hpeOneView.resources.resource import Resource, ResourceSchemaMixin
class IdPools(Resource, ResourceSchemaMixin):
"""
Class for Id Pools API client.
"""
URI = '/rest/id-pools'
def __init__(self, connection, data=None):
super(IdPools, self).__init__(connection, data)
def get_pool_type(self, pool_type):
"""
Gets a pool along with the list of ranges present in it
Args:
pool_type: Id pool type
Returns:
dict: List of ranges
"""
uri = self._helper.build_uri(pool_type)
return super(IdPools, self).get_by_uri(uri)
def update_pool_type(self, data, pool_type, timeout=-1):
"""
Enables or disables the pool
Args:
data: List of ID ranges
pool_type: Id pool type
Returns:
dict: Updated Resource.
"""
uri = self._helper.build_uri(pool_type)
return self._helper.update(data, uri, timeout=timeout)
def validate_id_pool(self, pool_type, ids_pools):
"""
Validates an ID pool.
Args:
pool_type: Id pool type
ids_pools (list):
List of Id Pools.
Returns:
dict: A dict containing a list with IDs.
"""
uri = self._helper.build_uri(pool_type) + "/validate?idList=" + "&idList=".join(ids_pools)
return super(IdPools, self).get_by_uri(uri)
def validate(self, information, pool_type, timeout=-1):
"""
Validates a set of user specified IDs to reserve in the pool.
This API can be used to check if the specified IDs can be allocated.
Args:
information (dict):
Information to update. Can result in system specified IDs or the system reserving user-specified IDs.
pool_type: Id pool type
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: A dict containing a list with IDs.
"""
uri = self._helper.build_uri(pool_type) + "/validate"
return self._helper.update(information, uri, timeout=timeout)
def METHOD_NAME(self, information, pool_type, timeout=-1):
"""
Allocates a set of IDs from range.
The allocator returned contains the list of IDs successfully allocated.
Args:
information (dict):
Information to update. Can result in system specified IDs or the system reserving user-specified IDs.
pool_type: Id pool type
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: A dict containing a list with IDs.
"""
uri = self._helper.build_uri(pool_type) + "/allocator"
return self._helper.update(information, uri, timeout=timeout)
def collect(self, information, pool_type, timeout=-1):
"""
Collects one or more IDs to be returned to a pool.
Args:
information (dict):
The list of IDs to be collected
pool_type: Id pool type
timeout:
Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: Collector containing list of collected IDs successfully collected.
"""
uri = self._helper.build_uri(pool_type) + "/collector"
return self._helper.update(information, uri, timeout=timeout)
def get_check_range_availability(self, pool_type, ids_pools):
"""
Checks the range availability in the ID pool.
Args:
pool_type: Id pool type
ids_pools (list):
List of Id Pools.
Returns:
dict: A dict containing a list with IDs.
"""
uri = self._helper.build_uri(pool_type) + "/checkrangeavailability?idList=" + "&idList=".join(ids_pools)
return super(IdPools, self).get_by_uri(uri)
def generate(self, pool_type):
"""
Generates and returns a random range.
Args:
pool_type: Id pool type
Returns:
dict: A dict containing a list with IDs.
"""
uri = self._helper.build_uri(pool_type) + "/generate"
return super(IdPools, self).get_by_uri(uri) | null |
5,768 | import binascii
from typing import Callable, Optional, Tuple
from .._crypto import AEAD, CryptoError, HeaderProtection
from ..tls import CipherSuite, cipher_suite_hash, hkdf_expand_label, hkdf_extract
from .packet import decode_packet_number, is_draft_version, is_long_header
CIPHER_SUITES = {
CipherSuite.AES_128_GCM_SHA256: (b"aes-128-ecb", b"aes-128-gcm"),
CipherSuite.AES_256_GCM_SHA384: (b"aes-256-ecb", b"aes-256-gcm"),
CipherSuite.CHACHA20_POLY1305_SHA256: (b"chacha20", b"chacha20-poly1305"),
}
INITIAL_CIPHER_SUITE = CipherSuite.AES_128_GCM_SHA256
INITIAL_SALT_DRAFT_29 = binascii.unhexlify("afbfec289993d24c9e9786f19c6111e04390a899")
INITIAL_SALT_VERSION_1 = binascii.unhexlify("38762cf7f55934b34d179ae6a4c80cadccbb7f0a")
SAMPLE_SIZE = 16
Callback = Callable[[str], None]
def NoCallback(trigger: str) -> None:
pass
class KeyUnavailableError(CryptoError):
pass
def derive_key_iv_hp(
cipher_suite: CipherSuite, secret: bytes
) -> Tuple[bytes, bytes, bytes]:
algorithm = cipher_suite_hash(cipher_suite)
if cipher_suite in [
CipherSuite.AES_256_GCM_SHA384,
CipherSuite.CHACHA20_POLY1305_SHA256,
]:
key_size = 32
else:
key_size = 16
return (
hkdf_expand_label(algorithm, secret, b"quic key", b"", key_size),
hkdf_expand_label(algorithm, secret, b"quic iv", b"", 12),
hkdf_expand_label(algorithm, secret, b"quic hp", b"", key_size),
)
class CryptoContext:
def __init__(
self,
key_phase: int = 0,
setup_cb: Callback = NoCallback,
teardown_cb: Callback = NoCallback,
) -> None:
self.aead: Optional[AEAD] = None
self.cipher_suite: Optional[CipherSuite] = None
self.hp: Optional[HeaderProtection] = None
self.key_phase = key_phase
self.secret: Optional[bytes] = None
self.version: Optional[int] = None
self._setup_cb = setup_cb
self._teardown_cb = teardown_cb
def decrypt_packet(
self, packet: bytes, encrypted_offset: int, expected_packet_number: int
) -> Tuple[bytes, bytes, int, bool]:
if self.aead is None:
raise KeyUnavailableError("Decryption key is not available")
# header protection
plain_header, packet_number = self.hp.remove(packet, encrypted_offset)
first_byte = plain_header[0]
# packet number
pn_length = (first_byte & 0x03) + 1
packet_number = decode_packet_number(
packet_number, pn_length * 8, expected_packet_number
)
# detect key phase change
crypto = self
if not is_long_header(first_byte):
key_phase = (first_byte & 4) >> 2
if key_phase != self.key_phase:
crypto = next_key_phase(self)
# payload protection
payload = crypto.aead.decrypt(
packet[len(plain_header) :], plain_header, packet_number
)
return plain_header, payload, packet_number, crypto != self
def encrypt_packet(
self, plain_header: bytes, plain_payload: bytes, packet_number: int
) -> bytes:
assert self.is_valid(), "Encryption key is not available"
# payload protection
protected_payload = self.aead.encrypt(
plain_payload, plain_header, packet_number
)
# header protection
return self.hp.apply(plain_header, protected_payload)
def is_valid(self) -> bool:
return self.aead is not None
def setup(self, cipher_suite: CipherSuite, secret: bytes, version: int) -> None:
hp_cipher_name, aead_cipher_name = CIPHER_SUITES[cipher_suite]
key, iv, hp = derive_key_iv_hp(cipher_suite, secret)
self.aead = AEAD(aead_cipher_name, key, iv)
self.cipher_suite = cipher_suite
self.hp = HeaderProtection(hp_cipher_name, hp)
self.secret = secret
self.version = version
# trigger callback
self._setup_cb("tls")
def teardown(self) -> None:
self.aead = None
self.cipher_suite = None
self.hp = None
self.secret = None
# trigger callback
self._teardown_cb("tls")
def apply_key_phase(self: CryptoContext, crypto: CryptoContext, trigger: str) -> None:
self.aead = crypto.aead
self.key_phase = crypto.key_phase
self.secret = crypto.secret
# trigger callback
self._setup_cb(trigger)
def next_key_phase(self: CryptoContext) -> CryptoContext:
algorithm = cipher_suite_hash(self.cipher_suite)
crypto = CryptoContext(key_phase=int(not self.key_phase))
crypto.setup(
cipher_suite=self.cipher_suite,
secret=hkdf_expand_label(
algorithm, self.secret, b"quic ku", b"", algorithm.digest_size
),
version=self.version,
)
return crypto
class CryptoPair:
def __init__(
self,
recv_setup_cb: Callback = NoCallback,
recv_teardown_cb: Callback = NoCallback,
send_setup_cb: Callback = NoCallback,
send_teardown_cb: Callback = NoCallback,
) -> None:
self.aead_tag_size = 16
self.recv = CryptoContext(setup_cb=recv_setup_cb, teardown_cb=recv_teardown_cb)
self.send = CryptoContext(setup_cb=send_setup_cb, teardown_cb=send_teardown_cb)
self._update_key_requested = False
def decrypt_packet(
self, packet: bytes, encrypted_offset: int, expected_packet_number: int
) -> Tuple[bytes, bytes, int]:
plain_header, payload, packet_number, update_key = self.recv.decrypt_packet(
packet, encrypted_offset, expected_packet_number
)
if update_key:
self.METHOD_NAME("remote_update")
return plain_header, payload, packet_number
def encrypt_packet(
self, plain_header: bytes, plain_payload: bytes, packet_number: int
) -> bytes:
if self._update_key_requested:
self.METHOD_NAME("local_update")
return self.send.encrypt_packet(plain_header, plain_payload, packet_number)
def setup_initial(self, cid: bytes, is_client: bool, version: int) -> None:
if is_client:
recv_label, send_label = b"server in", b"client in"
else:
recv_label, send_label = b"client in", b"server in"
if is_draft_version(version):
initial_salt = INITIAL_SALT_DRAFT_29
else:
initial_salt = INITIAL_SALT_VERSION_1
algorithm = cipher_suite_hash(INITIAL_CIPHER_SUITE)
initial_secret = hkdf_extract(algorithm, initial_salt, cid)
self.recv.setup(
cipher_suite=INITIAL_CIPHER_SUITE,
secret=hkdf_expand_label(
algorithm, initial_secret, recv_label, b"", algorithm.digest_size
),
version=version,
)
self.send.setup(
cipher_suite=INITIAL_CIPHER_SUITE,
secret=hkdf_expand_label(
algorithm, initial_secret, send_label, b"", algorithm.digest_size
),
version=version,
)
def teardown(self) -> None:
self.recv.teardown()
self.send.teardown()
def update_key(self) -> None:
self._update_key_requested = True
@property
def key_phase(self) -> int:
if self._update_key_requested:
return int(not self.recv.key_phase)
else:
return self.recv.key_phase
def METHOD_NAME(self, trigger: str) -> None:
apply_key_phase(self.recv, next_key_phase(self.recv), trigger=trigger)
apply_key_phase(self.send, next_key_phase(self.send), trigger=trigger)
self._update_key_requested = False | null |
5,769 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
import os
from xml.dom import minidom, Node
from googletest.test import gtest_test_utils
from googletest.test import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" skipped="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="TestSomeProperties" file="gtest_xml_outfile1_test_.cc" line="41" status="run" result="completed" time="*" timestamp="*" classname="PropertyOne">
<properties>
<property name="SetUpProp" value="1"/>
<property name="TestSomeProperty" value="1"/>
<property name="TearDownProp" value="1"/>
</properties>
</testcase>
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" skipped="0" disabled="0" errors="0" time="*" timestamp="*">
<testcase name="TestSomeProperties" file="gtest_xml_outfile2_test_.cc" line="41" status="run" result="completed" time="*" timestamp="*" classname="PropertyTwo">
<properties>
<property name="SetUpProp" value="2"/>
<property name="TestSomeProperty" value="2"/>
<property name="TearDownProp" value="2"/>
</properties>
</testcase>
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def METHOD_NAME(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main() | null |
5,770 | # extracted from ac2
import logging
import uuid
from ipsw_parser.build_identity import BuildIdentity
logger = logging.getLogger(__name__)
SUPPORTED_DATA_TYPES = {
'BasebandBootData': False,
'BasebandData': False,
'BasebandStackData': False,
'BasebandUpdaterOutputData': False,
'BootabilityBundle': False,
'BuildIdentityDict': False,
'BuildIdentityDictV2': False,
'DataType': False,
'DiagData': False,
'EANData': False,
'FDRMemoryCommit': False,
'FDRTrustData': False,
'FUDData': False,
'FileData': False,
'FileDataDone': False,
'FirmwareUpdaterData': False,
'GrapeFWData': False,
'HPMFWData': False,
'HostSystemTime': True,
'KernelCache': False,
'NORData': False,
'NitrogenFWData': True,
'OpalFWData': False,
'OverlayRootDataCount': False,
'OverlayRootDataForKey': True,
'PeppyFWData': True,
'PersonalizedBootObjectV3': False,
'PersonalizedData': True,
'ProvisioningData': False,
'RamdiskFWData': True,
'RecoveryOSASRImage': True,
'RecoveryOSAppleLogo': True,
'RecoveryOSDeviceTree': True,
'RecoveryOSFileAssetImage': True,
'RecoveryOSIBEC': True,
'RecoveryOSIBootFWFilesImages': True,
'RecoveryOSImage': True,
'RecoveryOSKernelCache': True,
'RecoveryOSLocalPolicy': True,
'RecoveryOSOverlayRootDataCount': False,
'RecoveryOSRootTicketData': True,
'RecoveryOSStaticTrustCache': True,
'RecoveryOSVersionData': True,
'RootData': False,
'RootTicket': False,
'S3EOverride': False,
'SourceBootObjectV3': False,
'SourceBootObjectV4': False,
'SsoServiceTicket': False,
'StockholmPostflight': False,
'SystemImageCanonicalMetadata': False,
'SystemImageData': False,
'SystemImageRootHash': False,
'USBCFWData': False,
'USBCOverride': False,
'FirmwareUpdaterPreflight': True,
'ReceiptManifest': True,
'FirmwareUpdaterDataV2': False,
'RestoreLocalPolicy': True,
'AuthInstallCACert': True,
'OverlayRootDataForKeyIndex': True,
}
# extracted from ac2
SUPPORTED_MESSAGE_TYPES = {
'BBUpdateStatusMsg': False,
'CheckpointMsg': True,
'DataRequestMsg': False,
'FDRSubmit': True,
'MsgType': False,
'PreviousRestoreLogMsg': False,
'ProgressMsg': False,
'ProvisioningAck': False,
'ProvisioningInfo': False,
'ProvisioningStatusMsg': False,
'ReceivedFinalStatusMsg': False,
'RestoredCrash': True,
'StatusMsg': False,
}
class RestoreOptions:
def __init__(self, preflight_info=None, sep=None, macos_variant=None, build_identity: BuildIdentity = None,
restore_boot_args=None, spp=None, restore_behavior: str = None, msp=None):
self.AutoBootDelay = 0
if preflight_info is not None:
bbus = dict(preflight_info)
bbus.pop('FusingStatus')
bbus.pop('PkHash')
self.BBUpdaterState = bbus
nonce = preflight_info.get('Nonce')
if nonce is not None:
self.BasebandNonce = nonce
self.SupportedDataTypes = SUPPORTED_DATA_TYPES
self.SupportedMessageTypes = SUPPORTED_MESSAGE_TYPES
# FIXME: Should be adjusted for update behaviors
if macos_variant:
self.AddSystemPartitionPadding = True
self.AllowUntetheredRestore = False
self.AuthInstallEnableSso = False
macos_variant = build_identity.macos_variant
if macos_variant is not None:
self.AuthInstallRecoveryOSVariant = macos_variant
self.AuthInstallRestoreBehavior = restore_behavior
self.AutoBootDelay = 0
self.BasebandUpdaterOutputPath = True
self.DisableUserAuthentication = True
self.FitSystemPartitionToContent = True
self.FlashNOR = True
self.FormatForAPFS = True
self.FormatForLwVM = False
self.InstallDiags = False
self.InstallRecoveryOS = True
self.MacOSSwapPerformed = True
self.MacOSVariantPresent = True
self.MinimumBatteryVoltage = 0 # FIXME: Should be adjusted for M1 macbooks (if needed)
self.RecoveryOSUnpack = True
self.ShouldRestoreSystemImage = True
self.SkipPreflightPersonalization = False
self.UpdateBaseband = True
# FIXME: I don't know where this number comes from yet.
# It seems like it matches this part of the build identity:
# <key>OSVarContentSize</key>
# <integer>573751296</integer>
# It did work with multiple macOS versions
self.recoveryOSPartitionSize = 58201
if msp:
self.SystemPartitionSize = msp
else:
self.BootImageType = 'UserOrInternal'
self.DFUFileType = 'RELEASE'
self.DataImage = False
self.FirmwareDirectory = '.'
self.FlashNOR = True
self.KernelCacheType = 'Release'
self.NORImageType = 'production'
self.RestoreBundlePath = '/tmp/Per2.tmp'
self.SystemImageType = 'User'
self.UpdateBaseband = False
if sep is not None:
required_capacity = sep.get('RequiredCapacity')
if required_capacity:
logger.debug(f'TZ0RequiredCapacity: {required_capacity}')
self.TZ0RequiredCapacity = required_capacity
self.PersonalizedDuringPreflight = True
self.RootToInstall = False
self.UUID = str(uuid.uuid4()).upper()
self.CreateFilesystemPartitions = True
self.SystemImage = True
if restore_boot_args is not None:
self.RestoreBootArgs = restore_boot_args
if spp:
spp = dict(spp)
else:
spp = {'128': 1280, '16': 160, '32': 320, '64': 640, '8': 80}
self.SystemPartitionPadding = spp
def METHOD_NAME(self):
return self.__dict__ | null |
5,771 | #!/usr/bin/env python3
"""Analyze the test outcomes from a full CI run.
This script can also run on outcomes from a partial run, but the results are
less likely to be useful.
"""
import argparse
import re
import sys
import traceback
import check_test_cases
class Results:
"""Process analysis results."""
def __init__(self):
self.error_count = 0
self.warning_count = 0
@staticmethod
def log(fmt, *args, **kwargs):
sys.stderr.write((fmt + '\n').format(*args, **kwargs))
def error(self, fmt, *args, **kwargs):
self.log('Error: ' + fmt, *args, **kwargs)
self.error_count += 1
def warning(self, fmt, *args, **kwargs):
self.log('Warning: ' + fmt, *args, **kwargs)
self.warning_count += 1
class TestCaseOutcomes:
"""The outcomes of one test case across many configurations."""
# pylint: disable=too-few-public-methods
def __init__(self):
# Collect a list of witnesses of the test case succeeding or failing.
# Currently we don't do anything with witnesses except count them.
# The format of a witness is determined by the read_outcome_file
# function; it's the platform and configuration joined by ';'.
self.successes = []
self.failures = []
def hits(self):
"""Return the number of times a test case has been run.
This includes passes and failures, but not skips.
"""
return len(self.successes) + len(self.failures)
class TestDescriptions(check_test_cases.TestDescriptionExplorer):
"""Collect the available test cases."""
def __init__(self):
super().__init__()
self.descriptions = set()
def process_test_case(self, _per_file_state,
file_name, _line_number, description):
"""Record an available test case."""
base_name = re.sub(r'\.[^.]*$', '', re.sub(r'.*/', '', file_name))
key = ';'.join([base_name, description.decode('utf-8')])
self.descriptions.add(key)
def METHOD_NAME():
"""Collect the available test cases."""
explorer = TestDescriptions()
explorer.walk_all()
return sorted(explorer.descriptions)
def analyze_coverage(results, outcomes):
"""Check that all available test cases are executed at least once."""
available = METHOD_NAME()
for key in available:
hits = outcomes[key].hits() if key in outcomes else 0
if hits == 0:
# Make this a warning, not an error, as long as we haven't
# fixed this branch to have full coverage of test cases.
results.warning('Test case not executed: {}', key)
def analyze_outcomes(outcomes):
"""Run all analyses on the given outcome collection."""
results = Results()
analyze_coverage(results, outcomes)
return results
def read_outcome_file(outcome_file):
"""Parse an outcome file and return an outcome collection.
An outcome collection is a dictionary mapping keys to TestCaseOutcomes objects.
The keys are the test suite name and the test case description, separated
by a semicolon.
"""
outcomes = {}
with open(outcome_file, 'r', encoding='utf-8') as input_file:
for line in input_file:
(platform, config, suite, case, result, _cause) = line.split(';')
key = ';'.join([suite, case])
setup = ';'.join([platform, config])
if key not in outcomes:
outcomes[key] = TestCaseOutcomes()
if result == 'PASS':
outcomes[key].successes.append(setup)
elif result == 'FAIL':
outcomes[key].failures.append(setup)
return outcomes
def analyze_outcome_file(outcome_file):
"""Analyze the given outcome file."""
outcomes = read_outcome_file(outcome_file)
return analyze_outcomes(outcomes)
def main():
try:
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('outcomes', metavar='OUTCOMES.CSV',
help='Outcome file to analyze')
options = parser.parse_args()
results = analyze_outcome_file(options.outcomes)
if results.error_count > 0:
sys.exit(1)
except Exception: # pylint: disable=broad-except
# Print the backtrace and exit explicitly with our chosen status.
traceback.print_exc()
sys.exit(120)
if __name__ == '__main__':
main() | null |
5,772 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QWidget
from mantid.plots.legend import LegendProperties
from mantidqt.utils.qt import load_ui
from mantidqt.widgets.plotconfigdialog.colorselector import ColorSelector
from mantidqt.widgets.plotconfigdialog.legendtabwidget.advancedlegendoptionsdialog.view import AdvancedLegendOptionsView
class LegendTabWidgetView(QWidget):
def __init__(self, parent=None):
super(LegendTabWidgetView, self).__init__(parent=parent)
self.ui = load_ui(__file__, "legend_tab.ui", baseinstance=self)
self.background_color_selector_widget = ColorSelector(parent=self)
self.edge_color_selector_widget = ColorSelector(parent=self)
self.title_color_selector_widget = ColorSelector(parent=self)
self.entries_color_selector_widget = ColorSelector(parent=self)
self.grid_layout.replaceWidget(self.background_color_selector_dummy_widget, self.background_color_selector_widget)
self.grid_layout.replaceWidget(self.edge_color_selector_dummy_widget, self.edge_color_selector_widget)
self.grid_layout.replaceWidget(self.entries_color_selector_dummy_widget, self.entries_color_selector_widget)
self.grid_layout.replaceWidget(self.title_color_selector_dummy_widget, self.title_color_selector_widget)
self.setAttribute(Qt.WA_DeleteOnClose, True)
self.advanced_options = AdvancedLegendOptionsView(self)
def set_transparency_slider(self, transparency):
self.transparency_slider.setValue(transparency)
def get_transparency_slider_value(self):
return self.transparency_slider.value()
def set_transparency_spin_box(self, transparency):
self.transparency_spin_box.setValue(transparency)
def get_transparency_spin_box_value(self):
return self.transparency_spin_box.value()
def hide_transparency(self):
self.transparency_label.setHidden(True)
self.transparency_slider.setHidden(True)
self.transparency_spin_box.setHidden(True)
def set_title(self, title):
self.title_line_edit.setText(title)
def get_title(self):
return self.title_line_edit.text()
def set_background_color(self, color):
self.background_color_selector_widget.set_color(color)
def get_background_color(self):
return self.background_color_selector_widget.get_color()
def set_edge_color(self, color):
self.edge_color_selector_widget.set_color(color)
def get_edge_color(self):
return self.edge_color_selector_widget.get_color()
def set_entries_font(self, font):
self.entries_font_combo_box.setCurrentText(font)
def get_entries_font(self):
return self.entries_font_combo_box.currentText()
def set_entries_size(self, size):
self.entries_size_spin_box.setValue(size)
def get_entries_size(self):
return self.entries_size_spin_box.value()
def set_entries_color(self, color):
self.entries_color_selector_widget.set_color(color)
def get_entries_color(self):
return self.entries_color_selector_widget.get_color()
def set_title_font(self, font):
self.title_font_combo_box.setCurrentText(font)
def get_title_font(self):
return self.title_font_combo_box.currentText()
def set_title_size(self, size):
self.title_size_spin_box.setValue(size)
def get_title_size(self):
return self.title_size_spin_box.value()
def METHOD_NAME(self, color):
self.title_color_selector_widget.set_color(color)
def get_title_color(self):
return self.title_color_selector_widget.get_color()
def set_marker_size(self, size):
self.marker_size_spin_box.setValue(size)
def get_marker_size(self):
return self.marker_size_spin_box.value()
def get_hide_box(self):
return self.hide_box_check_box.isChecked()
def set_hide_box(self, hide):
self.hide_box_check_box.setChecked(hide)
def get_hide_legend(self):
return self.hide_legend_check_box.isChecked()
def set_hide_legend(self, hide):
self.hide_legend_check_box.setChecked(hide)
def get_properties(self):
props = LegendProperties.from_view(self)
advanced_props = self.advanced_options.get_properties()
props.update(advanced_props)
return props
def hide_box_properties(self):
self.box_label.setHidden(True)
self.hide_box_check_box.setHidden(True)
self.background_color_label.setHidden(True)
self.background_color_selector_widget.setHidden(True)
self.edge_color_label.setHidden(True)
self.edge_color_selector_widget.setHidden(True)
self.hide_transparency() | null |
5,773 | # -*- coding: utf-8 -*-
"""activait simulation data operations
:copyright: Copyright (c) 2022 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from pykern import pkio
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdc, pkdlog, pkdp
import contextlib
import io
import sirepo.sim_data
import zipfile
import tarfile
class SimData(sirepo.sim_data.SimDataBase):
_OLD_NEURAL_NET_FIELDS = [
"activationActivation",
"alphaDropoutRate",
"denseActivation",
"denseDimensionality",
"gaussianDropoutRate",
"gaussianNoiseStddev",
]
@classmethod
def fixup_old_data(cls, data, qcall, **kwargs):
if data.simulationType == "ml":
data.simulationType = "activait"
dm = data.models
cls._init_models(
dm,
cls.schema().model.keys(),
)
if "colsWithNonUniqueValues" not in dm.columnInfo:
dm.columnInfo.colsWithNonUniqueValues = PKDict()
for m in dm:
if "fileColumnReport" in m:
cls.update_model_defaults(dm[m], "fileColumnReport")
cls._fixup_neural_net(dm)
dm.analysisReport.pksetdefault(history=[])
dm.hiddenReport.pksetdefault(subreports=[])
@classmethod
def _compute_model(cls, analysis_model, *args, **kwargs):
if "fileColumnReport" in analysis_model:
return "fileColumnReport"
if "partitionColumnReport" in analysis_model:
return "partitionColumnReport"
return super(SimData, cls)._compute_model(analysis_model, *args, **kwargs)
@classmethod
def _compute_job_fields(cls, data, r, compute_model):
res = [
"columnInfo.header",
"dataFile.file",
"dataFile.inputsScaler",
]
if "fileColumnReport" in r:
d = data.models.dataFile
if d.appMode == "classification":
# no outputsScaler for classification
return res
res.append("dataFile.outputsScaler")
if d.inputsScaler == d.outputsScaler:
# If inputsScaler and outputsScaler are the same then the
# the columns will be unchanged when switching between input/output
return res
return res + ["columnInfo.inputOutput"]
if "partitionColumnReport" in r:
res.append("partition")
return res
@classmethod
def _fixup_neural_net(cls, dm):
def _layer_fields(layer):
f = []
n = layer.layer.lower()
for field in layer:
if n in field.lower():
f.append((field, field.lower().replace(n, "")))
return f
def _update(layer, old, new):
if old in layer:
layer[new] = layer[old]
layer.pop(old)
for l in dm.neuralNet.layers:
for old, new in _layer_fields(l):
_update(l, old, new)
for f in cls._OLD_NEURAL_NET_FIELDS:
if f in l:
del l[f]
if "rate" in l:
# special fixup for dropoutRate
l.dropoutRate = l["rate"]
del l["rate"]
@classmethod
def _lib_file_basenames(cls, data):
name = data.models.dataFile.get("file")
if name:
return [cls.lib_file_name_with_model_field("dataFile", "file", name)]
return []
class DataReader:
def __init__(self, file_path, data_path=None):
self.file_ctx = open
self.path = pkio.py_path(file_path)
self.data_path = data_path
def is_archive(self):
return False
def is_dir(self, item):
return False
@contextlib.contextmanager
def data_context_manager(self):
yield self.file_ctx(self.path, mode="r")
def get_data_list(self, item_filter):
return None
def read(self):
with self.data_context_manager() as f:
f.read()
def METHOD_NAME(self):
import csv
import re
with self.data_context_manager() as f:
for r in csv.reader(f):
yield ",".join(map(lambda x: re.sub(r'["\n\r,]', "", x), r))
class ArchiveDataReader(DataReader):
def __init__(self, file_path, data_path):
super().__init__(file_path, data_path=data_path)
@contextlib.contextmanager
def file_context_manager(self):
yield self.file_ctx(self.path, mode="r")
def is_archive(self):
return True
class HDF5DataReader(ArchiveDataReader):
import h5py
h5py = staticmethod(h5py)
def __init__(self, file_path, data_path):
super().__init__(file_path, data_path=data_path)
self.file_ctx = HDF5DataReader.h5py.File
def is_dir(self, item):
return isinstance(item, HDF5DataReader.h5py.Dataset)
@contextlib.contextmanager
def data_context_manager(self):
with self.file_context_manager() as f:
yield f[self.data_path]
def get_data_list(self, item_filter):
keys = []
with self.file_context_manager() as f:
f.visit(lambda x: keys.append(x) if self.is_dir(f[x]) else None)
return keys
class TarDataReader(ArchiveDataReader):
def __init__(self, file_path, data_path):
super().__init__(file_path, data_path=data_path)
self.file_ctx = tarfile.open
@contextlib.contextmanager
def data_context_manager(self):
with self.file_context_manager() as f:
yield io.TextIOWrapper(f.extractfile(self.data_path))
def get_data_list(self, item_filter):
with self.file_context_manager() as f:
return [x.name for x in f.getmembers() if item_filter(x)]
def is_dir(self, item):
return item.isdir()
class ZipDataReader(ArchiveDataReader):
def __init__(self, file_path, data_path):
super().__init__(file_path, data_path=data_path)
self.file_ctx = zipfile.ZipFile
@contextlib.contextmanager
def data_context_manager(self):
with self.file_context_manager() as f:
yield io.TextIOWrapper(f.open(self.data_path))
def get_data_list(self, item_filter):
with self.file_context_manager() as f:
return [x.filename for x in f.infolist() if item_filter(x)]
def is_dir(self, item):
return item.is_dir()
class DataReaderFactory:
_SUPPORTED_ARCHIVES = PKDict(
{
".h5": HDF5DataReader,
".tar": TarDataReader,
".tar.gz": TarDataReader,
".zip": ZipDataReader,
}
)
_SUPPORTED_ARCHIVE_EXTENSIONS = _SUPPORTED_ARCHIVES.keys()
@classmethod
def get_archive_extension(cls, file_path):
x = list(
filter(
lambda s: str(file_path).endswith(s),
cls._SUPPORTED_ARCHIVE_EXTENSIONS,
)
)
return x[0] if x else None
@classmethod
def build(cls, file_path, data_path=None):
return cls._SUPPORTED_ARCHIVES.get(
cls.get_archive_extension(file_path), DataReader
)(file_path, data_path) | null |
5,774 | from math import sqrt, acos, degrees
import pyqtgraph as pg
from pyqtgraph.Qt.QtWidgets import QGraphicsPathItem, QGraphicsEllipseItem
R_vals_default = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 3, 4, 5, 10, 20, 50]
X_vals_default = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 3, 4, 5, 10, 20, 50]
ZGrid_pen = pg.mkPen("#999999", width=0.2, antialias=True)
YGrid_pen = pg.mkPen("g", width=0.2, antialias=True)
def reactance_arc_sweep(X):
"""
return the .arcTo parameters
:param X: constant reactance
:return: angle of the arc sweep
"""
S11re = (X**2 - 1) / (X**2 + 1) # real part of S11, when |S11| = 1
S11im = sqrt(1 - S11re**2) # imag part of S11, when |S11| = 1
r = 1 / X
c = sqrt((1 - S11re) ** 2 + S11im ** 2)
theta = acos((2 * r ** 2 - c ** 2) / (2 * r ** 2))
return degrees(theta)
def resistance_grid_lines(R_vals=R_vals_default, path_item=None):
if path_item is None:
path_item = QGraphicsPathItem()
path = path_item.path() # type: pg.QtGui.QPainterPath
path.moveTo(1, 0)
for R in R_vals:
radius = 1 / (1 + R)
path.addEllipse(1, -radius, -radius * 2, radius * 2)
path_item.setPath(path)
path_item.setPen(ZGrid_pen)
return path_item
def METHOD_NAME(C_vals=R_vals_default, path_item=None):
if path_item is None:
path_item = QGraphicsPathItem()
path = path_item.path() # type: pg.QtGui.QPainterPath
path.moveTo(-1, 0)
for C in C_vals:
radius = 1 / (1 + C)
path.addEllipse(-1, -radius, radius * 2, radius * 2)
path_item.setPath(path)
path_item.setPen(YGrid_pen)
return path_item
def reactance_grid_lines(X_vals=X_vals_default, path_item=None):
if path_item is None:
path_item = QGraphicsPathItem()
path = path_item.path() # type: pg.QtGui.QPainterPath
for X in X_vals:
r = 1 / X
d = 2 * r
alpha = reactance_arc_sweep(X)
x = 1 - r
y = -d
path.arcMoveTo(x, 0, d, d, 90)
path.arcTo(x, 0, d, d, 90, alpha)
path.arcMoveTo(x, y, d, d, -alpha - 90)
path.arcTo(x, y, d, d, -alpha - 90, alpha)
path.moveTo(1, 0)
path.lineTo(-1, 0)
path_item.setPath(path)
path_item.setPen(ZGrid_pen)
return path_item
def susceptance_grid_lines(Y_vals=X_vals_default, path_item=None):
if path_item is None:
path_item = QGraphicsPathItem()
path = path_item.path() # type: pg.QtGui.QPainterPath
for Y in Y_vals:
r = 1 / Y
d = 2 * r
alpha = reactance_arc_sweep(Y)
x = -1 - r
y = -d
path.arcMoveTo(x, 0, d, d, -270)
path.arcTo(x, 0, d, d, -270, -alpha)
path.arcMoveTo(x, y, d, d, -90)
path.arcTo(x, y, d, d, -90, alpha)
path.moveTo(1, 0)
path.lineTo(-1, 0)
path_item.setPath(path)
path_item.setPen(YGrid_pen)
return path_item
def gen_s_unity_circle():
s_unity_circle = QGraphicsEllipseItem(1, -1, -2, 2)
s_unity_circle.setPen(pg.mkPen('w', antialias=True))
return s_unity_circle
def gen_z_grid(major_accented=True):
grid_lines = reactance_grid_lines(path_item=resistance_grid_lines())
if major_accented:
ZGridBold = reactance_grid_lines([1], path_item=resistance_grid_lines([1]))
path = ZGridBold.path()
path.moveTo(1, 0)
path.lineTo(-1, 0)
ZGridBold.setPen(pg.mkPen('#999999', width=0.6, antialias=True))
ZGridBold.setParentItem(grid_lines)
return grid_lines
def gen_y_grid(major_accented=True):
grid_lines = susceptance_grid_lines(path_item=METHOD_NAME())
if major_accented:
YGridBold = susceptance_grid_lines([1], path_item=METHOD_NAME([1]))
path = YGridBold.path()
path.moveTo(1, 0)
path.lineTo(-1, 0)
YGridBold.setPen(pg.mkPen('g', width=0.6, antialias=True))
YGridBold.setParentItem(grid_lines)
return grid_lines
if __name__ == "__main__":
plot = pg.plot()
plot.setAspectLocked()
plot.setXRange(-1, 1)
plot.setYRange(-1, 1)
plot.addItem(gen_s_unity_circle())
plot.addItem(gen_z_grid())
plot.addItem(gen_y_grid())
import sys
if sys.flags.interactive != 1 or not hasattr(pg.QtCore, 'PYQT_VERSION'):
pg.QtGui.QApplication.exec_() | null |
5,775 | """New Project Initialization Service."""
from __future__ import annotations
import contextlib
import os
import uuid
from pathlib import Path
import click
from meltano.core.cli_messages import GREETING
from meltano.core.db import project_engine
from meltano.core.plugin.meltano_file import MeltanoFilePlugin
from meltano.core.project import Project
from meltano.core.project_settings_service import SettingValueStore
PROJECT_FILENAME = "meltano.yml"
class ProjectInitServiceError(Exception):
"""Project Initialization Service Exception."""
class ProjectInitService:
"""New Project Initialization Service."""
def __init__(self, project_directory: os.PathLike):
"""Create a new ProjectInitService instance.
Args:
project_directory: The directory path to create the project at
"""
self.project_directory = Path(project_directory)
with contextlib.suppress(ValueError):
self.project_directory = self.project_directory.relative_to(Path.cwd())
def init( # noqa: C901
self,
*,
activate: bool = True,
force: bool = False,
) -> Project:
"""Initialise Meltano Project.
Args:
activate: Activate newly created project
force: Whether to overwrite `meltano.yml` in the existing directory.
Returns:
A new Project instance
Raises:
ProjectInitServiceError: Directory already exists
"""
try:
self.project_directory.mkdir()
except FileExistsError as ex:
if (
os.path.exists(os.path.join(self.project_directory, PROJECT_FILENAME))
and not force
):
msg = (
"A `meltano.yml` file already exists in the target directory. "
"Use `--force` to overwrite it."
)
raise ProjectInitServiceError(msg) from ex
except PermissionError as ex:
raise ProjectInitServiceError(
f"Permission denied to create '{self.project_directory}'.",
) from ex
except Exception as ex:
raise ProjectInitServiceError(
f"Could not create directory '{self.project_directory}'. {ex}",
) from ex
project = Project(self.project_directory)
self.create_dot_meltano_dir(project)
self.create_files(project)
project.settings.set(
"project_id",
str(uuid.uuid4()),
store=SettingValueStore.MELTANO_YML,
)
self.set_send_anonymous_usage_stats(project)
if activate:
Project.activate(project)
self.create_system_database(project)
return project
def create_dot_meltano_dir(self, project: Project):
"""Create .meltano directory.
Args:
project: Meltano project context
"""
# explicitly create the .meltano directory if it doesn't exist
click.secho("Creating .meltano folder", fg="blue")
os.makedirs(project.meltano_dir(), exist_ok=True)
click.secho("created", fg="blue", nl=False)
click.echo(f" .meltano in {project.sys_dir_root}")
def create_files(self, project: Project):
"""Create project files.
Args:
project: Meltano project context
"""
click.secho("Creating project files...", fg="blue")
if project.root != Path.cwd():
click.echo(f" {self.project_directory}/")
plugin = MeltanoFilePlugin()
expected_files = plugin.files_to_create(project, [])
created_files = plugin.create_files(project)
for path in expected_files:
if path in created_files:
click.secho(" |--", fg="blue", nl=False)
click.echo(f" {path}")
else:
click.secho(" |--", fg="yellow", nl=False)
click.echo(f" {path} (skipped)")
def set_send_anonymous_usage_stats(self, project: Project):
"""Set Anonymous Usage Stats flag.
Args:
project: The Meltano project the setting is being set on.
"""
# If set to false store explicitly in `meltano.yml`
if not project.settings.get("send_anonymous_usage_stats"):
project.settings.set(
"send_anonymous_usage_stats",
project.settings.get("send_anonymous_usage_stats"),
store=SettingValueStore.MELTANO_YML,
)
def create_system_database(self, project: Project):
"""Create Meltano System DB.
Args:
project: Meltano project context
Raises:
ProjectInitServiceError: Database initialization failed
"""
click.secho("Creating system database...", fg="blue", nl=False)
# register the system database connection
engine, _ = project_engine(project, default=True)
from meltano.core.migration_service import MigrationError, MigrationService
try:
migration_service = MigrationService(engine)
migration_service.upgrade(silent=True)
click.secho(" Done!", fg="blue")
except MigrationError as err:
raise ProjectInitServiceError(str(err)) from err
def METHOD_NAME(self, project: Project):
"""Echo Next Steps to Click CLI.
Args:
project: Meltano project context
"""
click.secho(GREETING, nl=False)
click.echo("Your project has been created!\n")
click.echo("Meltano Environments initialized with ", nl=False)
click.secho("dev", fg="bright_green", nl=False)
click.echo(", ", nl=False)
click.secho("staging", fg="bright_yellow", nl=False)
click.echo(", and ", nl=False)
click.secho("prod", fg="bright_red", nl=False)
click.echo(".")
click.echo("To learn more about Environments visit: ", nl=False)
click.secho(
"https://docs.meltano.com/concepts/environments",
fg="cyan",
)
click.echo("\nNext steps:")
if project.root != Path.cwd():
click.secho(" cd ", nl=False)
click.secho(self.project_directory, fg="magenta")
click.echo(" Visit ", nl=False)
click.secho(
"https://docs.meltano.com/getting-started/part1",
fg="cyan",
nl=False,
)
click.echo(" to learn where to go from here") | null |
5,776 | # -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from hpeOneView.resources.resource import ResourceClient
class LogicalDownlinks(object):
"""
Logical Downlinks API client.
"""
URI = '/rest/logical-downlinks'
def __init__(self, con):
self._connection = con
self._client = ResourceClient(con, self.URI)
def get_all(self, start=0, count=-1, filter='', sort=''):
"""
Gets a paginated collection of logical downlinks. The collection is based on
optional sorting and filtering and is constrained by start and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
list: A list of logical downlinks.
"""
return self._client.get_all(start, count, filter=filter, sort=sort)
def get(self, id_or_uri):
"""
Gets a logical downlink by ID or by URI.
Args:
id_or_uri: Can be either the logical downlink id or the logical downlink uri.
Returns:
dict: The logical downlink.
"""
return self._client.get(id_or_uri)
def METHOD_NAME(self, field, value):
"""
Gets all logical downlinks that match the filter.
The search is case-insensitive.
Args:
field: Field name to filter.
value: Value to filter.
Returns:
list: A list of logical downlinks.
"""
return self._client.METHOD_NAME(field, value)
def get_all_without_ethernet(self, start=0, count=-1, filter='', sort=''):
"""
Gets a paginated collection of logical downlinks without ethernet. The collection is
based on optional sorting and filtering and is constrained by start and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
Returns:
dict
"""
without_ethernet_client = ResourceClient(
self._connection, "/rest/logical-downlinks/withoutEthernet")
return without_ethernet_client.get_all(start, count, filter=filter, sort=sort)
def get_without_ethernet(self, id_or_uri):
"""
Gets the logical downlink with the specified ID without ethernet.
Args:
id_or_uri: Can be either the logical downlink id or the logical downlink uri.
Returns:
dict
"""
uri = self._client.build_uri(id_or_uri) + "/withoutEthernet"
return self._client.get(uri) | null |
5,777 | """Game representation for views"""
import time
from lutris.database import games
from lutris.database.games import get_service_games
from lutris.runners import get_runner_human_name
from lutris.services import SERVICES
from lutris.util.log import logger
from lutris.util.strings import get_formatted_playtime, gtk_safe
class StoreItem:
"""Representation of a game for views
TODO: Fix overlap with Game class
"""
def __init__(self, game_data, service_media):
if not game_data:
raise RuntimeError("No game data provided")
self._game_data = game_data
self._cached_installed_game_data = None
self.service_media = service_media
def __str__(self):
return self.name
def __repr__(self):
return "<Store id=%s slug=%s>" % (self.METHOD_NAME, self.slug)
@property
def _installed_game_data(self):
"""Provides- and caches- the DB data for the installed game corresponding to this one,
if it's a service game. We can get away with caching this because StoreItem instances are
very short-lived, so the game won't be changed underneath us."""
appid = self._game_data.get("appid")
if appid:
if self._cached_installed_game_data is None:
self._cached_installed_game_data = games.get_game_for_service(self.service,
self._game_data["appid"]) or {}
return self._cached_installed_game_data
return None
def _get_game_attribute(self, key):
value = self._game_data.get(key)
if not value:
game_data = self._installed_game_data
if game_data:
value = game_data.get(key)
return value
@property
def METHOD_NAME(self): # pylint: disable=invalid-name
"""Game internal ID"""
# Return a unique identifier for the game.
# Since service games are not related to lutris, use the appid
if "service_id" not in self._game_data:
if "appid" in self._game_data:
return self._game_data["appid"]
return self._game_data["slug"]
return self._game_data["id"]
@property
def service(self):
return gtk_safe(self._game_data.get("service"))
@property
def slug(self):
"""Slug identifier"""
return gtk_safe(self._game_data["slug"])
@property
def name(self):
"""Name"""
return gtk_safe(self._game_data["name"])
@property
def sortname(self):
"""Name used for sorting"""
return gtk_safe(self._get_game_attribute("sortname") or "")
@property
def year(self):
"""Year"""
return str(self._get_game_attribute("year") or "")
@property
def runner(self):
"""Runner slug"""
_runner = self._get_game_attribute("runner")
return gtk_safe(_runner) or ""
@property
def runner_text(self):
"""Runner name"""
return gtk_safe(get_runner_human_name(self.runner))
@property
def platform(self):
"""Platform"""
_platform = self._get_game_attribute("platform")
if not _platform and self.service in SERVICES:
service = SERVICES[self.service]()
_platforms = service.get_game_platforms(self._game_data)
if _platforms:
_platform = ", ".join(_platforms)
return gtk_safe(_platform)
@property
def installed(self):
"""Game is installed"""
if "service_id" not in self._game_data:
return self.METHOD_NAME in get_service_games(self.service)
if not self._game_data.get("runner"):
return False
return self._game_data.get("installed")
def get_media_path(self):
"""Returns the path to the image file for this item"""
if self._game_data.get("icon"):
return self._game_data["icon"]
return self.service_media.get_media_path(self.slug)
@property
def installed_at(self):
"""Date of install"""
return self._get_game_attribute("installed_at")
@property
def installed_at_text(self):
"""Date of install (textual representation)"""
return gtk_safe(
time.strftime("%X %x", time.localtime(self.installed_at)) if
self.installed_at else ""
)
@property
def lastplayed(self):
"""Date of last play"""
return self._get_game_attribute("lastplayed")
@property
def lastplayed_text(self):
"""Date of last play (textual representation)"""
return gtk_safe(
time.strftime(
"%X %x",
time.localtime(self.lastplayed)
) if self.lastplayed else ""
)
@property
def playtime(self):
"""Playtime duration in hours"""
try:
return float(self._get_game_attribute("playtime") or 0)
except (TypeError, ValueError):
return 0.0
@property
def playtime_text(self):
"""Playtime duration in hours (textual representation)"""
try:
_playtime_text = get_formatted_playtime(self.playtime)
except ValueError:
logger.warning("Invalid playtime value %s for %s", self.playtime, self)
_playtime_text = "" # Do not show erroneous values
return gtk_safe(_playtime_text) | null |
5,778 | import sys
from six import StringIO
from contextlib import contextmanager
import unittest
from nose.plugins.attrib import attr
from jnpr.junos import Device
from jnpr.junos.utils.scp import SCP
from mock import patch
__author__ = "Rick Sherman, Nitin Kumar"
__credits__ = "Jeremy Schulman"
if sys.version < "3":
builtin_string = "__builtin__"
else:
builtin_string = "builtins"
@attr("unit")
class TestScp(unittest.TestCase):
def setUp(self):
self.dev = Device(host="1.1.1.1")
@patch("paramiko.SSHClient")
def test_scp_open(self, mock_connect):
from scp import SCPClient
self.dev.bind(scp=SCP)
assert isinstance(self.dev.scp.open(), SCPClient)
@patch("paramiko.SSHClient")
def test_scp_close(self, mock_connect):
self.dev.bind(scp=SCP)
self.dev.scp.open()
self.assertEqual(self.dev.scp.close(), None)
@patch("paramiko.SSHClient")
def test_scp_context(self, mock_connect):
with SCP(self.dev) as scp:
scp.get("addrbook.conf")
def test_scp_console(self):
dev = Device(host="1.1.1.1", mode="telnet")
self.assertRaises(RuntimeError, SCP, dev)
@patch("jnpr.junos.device.os")
@patch(builtin_string + ".open")
@patch("paramiko.config.SSHConfig.lookup")
@patch("paramiko.SSHClient")
@patch("paramiko.proxy.ProxyCommand")
def test_scp_proxycommand(
self, mock_proxy, mock_paramiko, mock_connect, open_mock, os_mock
):
os_mock.path.exists.return_value = True
# self.dev._sshconf_path = '/home/rsherman/.ssh/config'
with SCP(self.dev) as scp:
scp.get("addrbook.conf")
mock_proxy.assert_called_once()
def test_scp_progress(self):
scp = SCP(self.dev)
print(scp._scp_progress("test", 100, 50))
@patch("paramiko.SSHClient")
@patch("scp.SCPClient.put")
@patch("scp.SCPClient.__init__")
def test_scp_user_def_progress(self, mock_scpclient, mock_put, mock_ssh):
mock_scpclient.return_value = None
def fn(file, total, tfd):
pass
package = "test.tgz"
with SCP(self.dev, progress=fn) as scp:
scp.put(package)
self.assertEqual(mock_scpclient.mock_calls[0][2]["progress"].__name__, "fn")
@patch("paramiko.SSHClient")
@patch("scp.SCPClient.put")
@patch("scp.SCPClient.__init__")
def test_scp_user_def_progress_args_2(self, mock_scpclient, mock_put, mock_ssh):
mock_scpclient.return_value = None
def myprogress(dev, report):
print("host: %s, report: %s" % (dev.hostname, report))
package = "test.tgz"
with SCP(self.dev, progress=myprogress) as scp:
scp.put(package)
self.assertEqual(
mock_scpclient.mock_calls[0][2]["progress"].__name__, "_scp_progress"
)
@patch("paramiko.SSHClient")
@patch("scp.SCPClient.put")
@patch("scp.SCPClient.__init__")
def test_scp_progress_true(self, mock_scpclient, mock_put, mock_sshclient):
mock_scpclient.return_value = None
package = "test.tgz"
with SCP(self.dev, progress=True) as scp:
scp.put(package)
self.assertEqual(
mock_scpclient.mock_calls[0][2]["progress"].__name__, "_scp_progress"
)
@patch("ncclient.manager.connect")
@patch("paramiko.SSHClient.connect")
@patch("scp.SCPClient.put")
@patch("scp.SCPClient.__init__")
def test_ssh_private_key_file(
self, mock_scpclient, mock_put, mock_sshclient, mock_ncclient
):
mock_scpclient.return_value = None
package = "test.tgz"
dev = Device(
host="1.1.1.1", user="user", ssh_private_key_file="/Users/test/testkey"
)
dev.open(gather_facts=False)
with SCP(dev) as scp:
scp.put(package)
self.assertEqual(
mock_sshclient.mock_calls[0][2]["key_filename"], "/Users/test/testkey"
)
@contextmanager
def METHOD_NAME(self, command, *args, **kwargs):
out, sys.stdout = sys.stdout, StringIO()
command(*args, **kwargs)
sys.stdout.seek(0)
yield sys.stdout.read()
sys.stdout = out | null |
5,779 | import pytest
SMART_FRAME_SURROUNDINGS = {
'hide_all': 'hide_all',
'on': 'hide_all',
'true': 'hide_all',
'1': 'hide_all',
'hide_gaps': 'hide_gaps',
'off': 'off',
'false': 'off',
'0': 'off',
}
can_toggle = [
'update_dragged_clients',
]
cannot_toggle = [
'window_border_width',
'frame_border_active_color',
'default_frame_layout',
'wmname'
]
@pytest.mark.parametrize('name', can_toggle)
def test_toggle_boolean_settings(hlwm, name):
hlwm.call("toggle " + name)
@pytest.mark.parametrize('name', cannot_toggle)
def test_cannot_toggle_non_boolean(hlwm, name):
p = hlwm.call_xfail("toggle " + name)
assert p.stderr.endswith("not of type bool\n")
@pytest.mark.parametrize('name', can_toggle + cannot_toggle)
def test_get(hlwm, name):
hlwm.call("get " + name)
@pytest.mark.parametrize('name', can_toggle)
def test_toggle_numeric_settings(hlwm, name):
hlwm.call("toggle " + name)
@pytest.mark.parametrize('name', cannot_toggle)
def test_cannot_toggle_non_numeric(hlwm, name):
hlwm.call_xfail("toggle " + name)
def test_toggle_completion(hlwm):
res = hlwm.complete("toggle")
for n in can_toggle:
assert n in res
for n in cannot_toggle:
assert n not in res
def test_default_frame_layout_value_too_high(hlwm):
hlwm.call_xfail('set default_frame_layout 99') \
.expect_stderr('set: Invalid value "99" for setting "default_frame_layout": .*out of range')
def test_default_frame_layout_value_invalid_value(hlwm):
hlwm.call_xfail('set default_frame_layout -23') \
.expect_stderr('set: Invalid value "-23" for setting "default_frame_layout": .*Expecting.*vertical')
hlwm.call_xfail('set default_frame_layout foobar') \
.expect_stderr('set: Invalid value "foobar" for setting "default_frame_layout": .*Expecting.*vertical')
def test_default_frame_layout_after_split(hlwm):
"""When splitting a FrameLeaf, then the new frame
inherits the layout algorithm. However, when a FrameSplit is
split, then default_frame_layout is used.
"""
old_default = hlwm.attr.settings.default_frame_layout()
new_default = 'grid'
assert old_default != new_default, \
"the test is vacuous if the default didn't change"
hlwm.attr.settings.default_frame_layout = new_default
hlwm.call('split right')
# split the root frame
hlwm.call(['split', 'bottom', '0.5', ''])
# this new frame has the new default frame layout, but the two frames
# on the top still have the original algorithm:
assert hlwm.attr.tags.focus.tiling.root[0][0].algorithm() == old_default
assert hlwm.attr.tags.focus.tiling.root[0][1].algorithm() == old_default
assert hlwm.attr.tags.focus.tiling.root[1].algorithm() == new_default
def test_default_frame_layout_on_new_tag(hlwm):
old_default = hlwm.attr.settings.default_frame_layout()
new_default = 'grid'
assert old_default != new_default, \
"the test is vacuous if the default didn't change"
hlwm.attr.settings.default_frame_layout = new_default
hlwm.call('add newtag')
assert hlwm.attr.tags[1].tiling.root.algorithm() == new_default
assert hlwm.attr.tags[0].tiling.root.algorithm() == old_default
def test_default_frame_layout_index_as_name(hlwm):
"""test backwards compatibility of default_frame_layout"""
layout_with_index_1 = 'horizontal'
assert hlwm.attr.settings.default_frame_layout() != layout_with_index_1
hlwm.attr.settings.default_frame_layout = '1'
assert hlwm.attr.settings.default_frame_layout() == layout_with_index_1
def test_default_frame_layout_completion(hlwm):
assert 'grid' in hlwm.complete(['set', 'default_frame_layout'])
def test_set_invalid_setting(hlwm):
hlwm.call_xfail('set foobar baz') \
.expect_stderr('Setting "foobar" not found\n')
def test_get_invalid_setting(hlwm):
hlwm.call_xfail('get foobar') \
.expect_stderr('Setting "foobar" not found\n')
def test_toggle_invalid_setting(hlwm):
hlwm.call_xfail('toggle foobar') \
.expect_stderr('Setting "foobar" not found\n')
def METHOD_NAME(hlwm):
hlwm.call_xfail('set monitors_locked -1') \
.expect_stderr('out of range')
def test_smart_frame_surroundings_parsing(hlwm):
assert sorted(SMART_FRAME_SURROUNDINGS) == sorted(hlwm.complete(['set', 'smart_frame_surroundings']))
for k in SMART_FRAME_SURROUNDINGS:
hlwm.attr.settings.smart_frame_surroundings = k
assert hlwm.attr.settings.smart_frame_surroundings() == SMART_FRAME_SURROUNDINGS[k]
hlwm.call_xfail('set smart_frame_surroundings foobar') \
.expect_stderr('Expecting one of: hide_all.*')
def test_smart_frame_surroundings(hlwm, x11):
hlwm.attr.settings.frame_border_width = 5
hlwm.attr.settings.frame_gap = 7
hlwm.attr.settings.smart_frame_surroundings = 'hide_all'
frame_x11 = x11.get_hlwm_frames()[0]
frame_geom = frame_x11.get_geometry()
assert (frame_geom.width, frame_geom.height) == (800, 600)
hlwm.attr.settings.smart_frame_surroundings = 'hide_gaps'
frame_x11 = x11.get_hlwm_frames()[0]
frame_geom = frame_x11.get_geometry()
assert (frame_geom.width, frame_geom.height) == (790, 590)
hlwm.attr.settings.smart_frame_surroundings = 'off'
frame_x11 = x11.get_hlwm_frames()[0]
frame_geom = frame_x11.get_geometry()
assert (frame_geom.width, frame_geom.height) == (776, 576)
def test_always_show_frame(hlwm):
# test old->new setting
settings = hlwm.attr.settings
settings.always_show_frame = True
assert settings.show_frame_decorations() == 'all'
settings.always_show_frame = False
assert settings.show_frame_decorations() == 'focused'
# test new->old setting
settings.always_show_frame = True
settings.show_frame_decorations = 'nonempty'
assert settings.always_show_frame() is False
settings.show_frame_decorations = 'all'
assert settings.always_show_frame() is True
settings.show_frame_decorations = 'focused'
assert settings.always_show_frame() is False | null |
5,780 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore as ms
from mindspore.common.tensor import Tensor
from mindspore.ops import Primitive
from mindspore.ops import functional as F
from tests.ut.python.model.resnet import resnet50
scala_add = F.scalar_add
def scalar_add(x, y):
"""Implement `scalar_add`."""
return x + y
def scalar_mul(x, y):
"""Implement `scalar_mul`."""
return x * y
# Test:common function
def test_null(x, y):
return scala_add(10.0, 28.0 / 43.0)
def test_grad_add(x, y):
return scala_add(x, y)
def test_grad_expr(x, y):
return x ** 3.0 * y ** 4.0
def test_constant(x):
return 18.0 * x
def test_dup_args_in_call(x):
"""The naive gradient update rule fails when a function's arguments
contain the same variable more than once."""
return x * x
def test_quadruple_args_in_call(x):
"""Test that duplicated arguments still cause no problem even if
there are four of them."""
def g(a, b, c, d):
return a * b * c * d
return g(x, x, x, x)
def test_tuples(x, y):
tup = scala_add(x, y), x * y
z = scala_add(tup[0], tup[1])
return z
def test_hof(a, b):
"""Test higher order functions."""
def f(g, x):
return g(x) * g(scala_add(x, 10.0))
def g(x):
return x * b
return scala_add(f(g, a), f(g, b))
def test_hof_tup(a, b):
"""Test higher order functions."""
def f(gh, x, y):
g, h = gh
return scalar_mul(g(x, y), h(x, y))
return f((scalar_add, scalar_mul), a, b)
def test_simple_closure(a, b):
"""Test some trivial closures."""
def f():
return a + 1.0
def g():
return b + 2.0
return f() * g()
def test_closure(a):
"""This is the closure test in the paper."""
def x1(b):
def x4(c):
return c * b
return x4
x2 = x1(a)
x3 = x2(1.0)
return x3
def test_if(a, b):
# This is max, but what this is really testing is the most basic
# if statement, so I prefer to name the test 'test_if'
if a > b:
return a
return b
def test_if2(a, b):
if a > b:
return a * a
return b + b
def test_fact(x):
def fact(n):
if n <= 1:
return 1
return n * fact(n - 1)
return fact(x)
def test_while(x):
rval = x
while rval < 100:
rval = rval * rval
return rval
def test_while_2(x, y, z):
rval = 0
# Cannot compare to 0 or finite diff is unstable
while x > -0.1:
rval = rval + y
x = x - z
return rval
def test_pow10(x):
v = x
j = 0
while j < 3:
i = 0
while i < 3:
v = v * x
i = i + 1
j = j + 1
return v
def test_nested_closure(x):
a = x * x
b = x + 5
def f():
def g():
return a + b
def h():
return a * b
return g if x < 0 else h
return f()()
def test_functions_in_tuples(x, y):
tup = scalar_add, scalar_mul
f, g = tup
return f(x, y) + g(x, y)
def test_closures_in_tuples(x, y):
def f():
return x * y
def g():
return scala_add(x, y)
tup = f, g
ff, gg = tup
return scala_add(ff(), gg())
# tensor test
def METHOD_NAME(x, y):
t1 = Tensor(np.ones(x))
t2 = Tensor(np.zeros(y), ms.float32)
return t1 + t2
def test_tensor_set_type(x):
t = Tensor(x)
t.set_dtype(ms.float32)
return t
def test_tensor_mul(x, y):
x = Tensor(x)
y = Tensor(y)
z = x * y
return z
def test_tensor_sub(x, y):
x = Tensor(x)
y = Tensor(y)
z = x - y
return z
relu = Primitive('relu')
# Extension test
def test_ops_fn(x):
foo = relu(x)
return foo
def test_clone_simple(x, y):
a = x * x
b = y * y
c = a + b
return c
def test_more_closure(a, b):
"""Test some trivial closures."""
z = 1
def f():
return a + z
def g():
return b + 2.0
return f() * g()
def test_more_hof(a, b):
"""Test higher order functions."""
def f(g, h, x):
return g(x) * h(x) * g(x + 10.0)
def g(x):
return x * b
def h(x):
return x * a
return scala_add(f(g, h, a), f(g, h, b))
def test_constant_output(x, y):
return 1
# test resnet
def test_resnet_construct(x):
# not right model to import
network = resnet50()
return network.construct(x) | null |
5,781 | # -*- coding: utf-8 -*-
"""
Provides information on the kinematics involved in the data.
Uses the PLOTTING file specification.
"""
from collections import namedtuple
import logging
import numpy as np
import pandas as pd
from reportengine import collect
from reportengine.checks import check_positive
from reportengine.table import table
from validphys import plotoptions
from validphys.core import CutsPolicy
log = logging.getLogger(__name__)
@check_positive('titlelevel')
def describe_kinematics(commondata, titlelevel: int = 1):
"""Output a markdown text describing the stored metadata for a given
commondata.
titlelevel can be used to control the header level of the title.
"""
import inspect
cd = commondata
info = plotoptions.get_info(cd)
proc = cd.load_commondata().commondataproc
src = inspect.getsource(info.kinematics_override.xq2map)
titlespec = '#' * titlelevel
return f"""
{titlespec} {cd}
{info.dataset_label}
Stored data:
- Process type: **{proc}** ({info.process_description})
- variables:
* k1: {info.kinlabels[0]}
* k2: {info.kinlabels[1]}
* k3: {info.kinlabels[2]}
Map:
```python
{src}
```
"""
describe_kinematics.highlight = 'markdown'
nfittedlabel = '$N_{fitted}$'
ndatalabel = '$N_{data}$'
def kinlimits(commondata, cuts, use_cuts, use_kinoverride: bool = True):
"""Return a mapping containing the number of fitted and used datapoints, as
well as the label, minimum and maximum value for each of the three
kinematics. If ``use_kinoverride`` is set to False, the PLOTTING files will
be ignored and the kinematics will be interpred based on the process type
only. If use_cuts is 'CutsPolicy.NOCUTS', the information on the total
number of points will be displayed, instead of the fitted ones."""
info = plotoptions.get_info(commondata, cuts=None, use_plotfiles=use_kinoverride)
kintable = plotoptions.kitable(commondata, info)
ndata = len(kintable)
if cuts:
kintable = kintable.loc[cuts.load()]
nfitted = len(kintable)
elif use_cuts is not CutsPolicy.NOCUTS:
nfitted = len(kintable)
else:
nfitted = '-'
d = {'dataset': commondata, ndatalabel: ndata, nfittedlabel: nfitted}
for i, key in enumerate(['k1', 'k2', 'k3']):
kmin = kintable[key].min()
kmax = kintable[key].max()
label = info.kinlabels[i]
d[key] = label
d[key + ' min'] = kmin
d[key + ' max'] = kmax
return d
all_kinlimits = collect(kinlimits, ('dataset_inputs',))
@table
def all_kinlimits_table(all_kinlimits, use_kinoverride: bool = True):
"""Return a table with the kinematic limits for the datasets given as input
in dataset_inputs. If the PLOTTING overrides are not used, the information on
sqrt(k2) will be displayed."""
table = pd.DataFrame(
all_kinlimits,
columns=[
'dataset',
'$N_{data}$',
'$N_{fitted}$',
'k1',
'k1 min',
'k1 max',
'k2',
'k2 min',
'k2 max',
'k3',
'k3 min',
'k3 max',
],
)
# We really want to see the square root of the scale
if not use_kinoverride:
table['k2'] = 'sqrt(' + table['k2'] + ')'
table['k2 min'] = np.sqrt(table['k2 min'])
table['k2 max'] = np.sqrt(table['k2 max'])
# renaming the columns is overly complicated
cols = list(table.columns)
cols[6:9] = ['sqrt(k2)', 'sqrt(k2) min', 'sqrt(k2) max']
table.columns = cols
return table
@table
def all_commondata_grouping(all_commondata, metadata_group):
"""Return a table with the grouping specified
by `metadata_group` key for each dataset for all available commondata.
"""
records = []
for cd in all_commondata:
records.append(
{'dataset': str(cd), metadata_group: getattr(plotoptions.get_info(cd), metadata_group)}
)
df = pd.DataFrame.from_records(records, index='dataset')
# sort first by grouping alphabetically and then dataset name
return df.sort_values([metadata_group, 'dataset'])
def total_fitted_points(all_kinlimits_table) -> int:
"""Print the total number of fitted points in a given set of data"""
tb = all_kinlimits_table
return int(tb[nfittedlabel].sum())
XQ2Map = namedtuple('XQ2Map', ('experiment', 'commondata', 'fitted', 'masked', "group"))
def xq2map_with_cuts(commondata, cuts, group_name=None):
"""Return two (x,Q²) tuples: one for the fitted data and one for the
cut data. If `display_cuts` is false or all data passes the cuts, the second
tuple will be empty."""
info = plotoptions.get_info(commondata)
kintable = plotoptions.kitable(commondata, info)
if cuts:
mask = cuts.load()
boolmask = np.zeros(len(kintable), dtype=bool)
boolmask[mask] = True
fitted_kintable = kintable.loc[boolmask]
masked_kitable = kintable.loc[~boolmask]
xq2fitted = plotoptions.get_xq2map(fitted_kintable, info)
xq2masked = plotoptions.get_xq2map(masked_kitable, info)
return XQ2Map(info.experiment, commondata, xq2fitted, xq2masked, group_name)
fitted_kintable = plotoptions.get_xq2map(kintable, info)
empty = (np.array([]), np.array([]))
return XQ2Map(info.experiment, commondata, fitted_kintable, empty, group_name)
dataset_inputs_by_groups_xq2map = collect(
xq2map_with_cuts,
(
'group_dataset_inputs_by_metadata',
'data_input',
),
)
def kinematics_table_notable(commondata, cuts, show_extra_labels: bool = False):
"""
Table containing the kinematics of a commondata object,
indexed by their datapoint id. The kinematics will be tranfsormed as per the
PLOTTING file of the dataset or process type, and the column headers will
be the labels of the variables defined in the metadata.
If ``show_extra_labels`` is ``True`` then extra label defined in the
PLOTTING files will be displayed. Otherwise only the original three
kinematics will be shown.
"""
info = plotoptions.get_info(commondata, cuts=cuts)
res = plotoptions.kitable(commondata, info, cuts=cuts)
res.columns = [*info.kinlabels, *res.columns[3:]]
if not show_extra_labels:
res = res.iloc[:, :3]
return res
@table
def METHOD_NAME(kinematics_table_notable):
"""Same as kinematics_table_notable but writing the table to file"""
return kinematics_table_notable | null |
5,782 | """
Testing the handling of events on the top level.
As input:
* Mocked cause detection, with the cause artificially simulated for each test.
The proper cause detection is tested elsewhere (see ``test_detection.py``).
* Registered handlers in a global registry. Each handler is a normal function,
which calls a mock -- to ease the assertions.
As output, we check mocked calls on the following:
* ``asyncio.sleep()`` -- for delays.
* ``kopf.clients.patching.patch_obj()`` -- for patch content.
* ``kopf.clients.events.post_event()`` -- for events posted.
* Handler mocks -- whether they were or were not called with specific arguments.
* Captured logs.
The above inputs & outputs represent the expected user scenario
rather than the specific implementation of it.
Therefore, we do not mock/spy/intercept anything within the handling routines
(except for cause detection), leaving it as the implementation details.
Specifically, this internal chain of calls happens on every event:
* ``causation.detect_*_cause()`` -- tested separately in ``/tests/causation/``.
* ``handle_cause()``
* ``execute()``
* ``_execute()``
* ``_call_handler()``
* ``invocation.invoke()`` -- tested separately in ``/tests/invocations/``.
Some of these aspects are tested separately to be sure they indeed execute
all possible cases properly. In the top-level event handling, we assume they do,
and only check for the upper-level behaviour, not all of the input combinations.
"""
import dataclasses
from typing import Callable
import pytest
from mock import Mock
import kopf
from kopf._core.intents.causes import ChangingCause
@pytest.fixture(autouse=True)
def _auto_mocked(k8s_mocked):
pass
@dataclasses.dataclass(frozen=True, eq=False, order=False)
class HandlersContainer:
index_mock: Mock
event_mock: Mock
create_mock: Mock
update_mock: Mock
delete_mock: Mock
resume_mock: Mock
event_fn: Callable
create_fn: Callable
update_fn: Callable
delete_fn: Callable
resume_fn: Callable
@pytest.fixture()
def handlers(registry):
index_mock = Mock(return_value=None)
event_mock = Mock(return_value=None)
create_mock = Mock(return_value=None)
update_mock = Mock(return_value=None)
delete_mock = Mock(return_value=None)
resume_mock = Mock(return_value=None)
@kopf.index('kopfexamples', id='index_fn')
async def index_fn(**kwargs):
return index_mock(**kwargs)
@kopf.on.event('kopfexamples', id='event_fn')
async def event_fn(**kwargs):
return event_mock(**kwargs)
# Keep on-resume on top, to catch any issues with the test design (where it could be skipped).
@kopf.on.resume('kopfexamples', id='resume_fn', timeout=600, retries=100,
deleted=True) # only for resuming handles, to cover the resource being deleted.
async def resume_fn(**kwargs):
return resume_mock(**kwargs)
@kopf.on.create('kopfexamples', id='create_fn', timeout=600, retries=100)
async def create_fn(**kwargs):
return create_mock(**kwargs)
@kopf.on.update('kopfexamples', id='update_fn', timeout=600, retries=100)
async def update_fn(**kwargs):
return update_mock(**kwargs)
@kopf.on.delete('kopfexamples', id='delete_fn', timeout=600, retries=100)
async def delete_fn(**kwargs):
return delete_mock(**kwargs)
return HandlersContainer(
index_mock=index_mock,
event_mock=event_mock,
create_mock=create_mock,
update_mock=update_mock,
delete_mock=delete_mock,
resume_mock=resume_mock,
event_fn=event_fn,
create_fn=create_fn,
update_fn=update_fn,
delete_fn=delete_fn,
resume_fn=resume_fn,
)
@pytest.fixture()
def extrahandlers(registry, handlers):
index_mock = Mock(return_value=None)
event_mock = Mock(return_value=None)
create_mock = Mock(return_value=None)
update_mock = Mock(return_value=None)
delete_mock = Mock(return_value=None)
resume_mock = Mock(return_value=None)
@kopf.index('kopfexamples', id='index_fn2')
async def METHOD_NAME(**kwargs):
return index_mock(**kwargs)
@kopf.on.event('kopfexamples', id='event_fn2')
async def event_fn2(**kwargs):
return event_mock(**kwargs)
# Keep on-resume on top, to catch any issues with the test design (where it could be skipped).
# Note: deleted=True -- only for resuming handles, to cover the resource being deleted.
@kopf.on.resume('kopfexamples', id='resume_fn2', deleted=True)
async def resume_fn2(**kwargs):
return resume_mock(**kwargs)
@kopf.on.create('kopfexamples', id='create_fn2')
async def create_fn2(**kwargs):
return create_mock(**kwargs)
@kopf.on.update('kopfexamples', id='update_fn2')
async def update_fn2(**kwargs):
return update_mock(**kwargs)
@kopf.on.delete('kopfexamples', id='delete_fn2')
async def delete_fn2(**kwargs):
return delete_mock(**kwargs)
return HandlersContainer(
index_mock=index_mock,
event_mock=event_mock,
create_mock=create_mock,
update_mock=update_mock,
delete_mock=delete_mock,
resume_mock=resume_mock,
event_fn=event_fn2,
create_fn=create_fn2,
update_fn=update_fn2,
delete_fn=delete_fn2,
resume_fn=resume_fn2,
)
@pytest.fixture()
def cause_mock(mocker, settings):
"""
Mock the resulting _cause_ of the resource change detection logic.
The change detection is complex, depends on many fields and values, and it
is difficult to simulate by artificial event bodies, especially its reason.
Instead, we patch a method which detects the resource changing causes, and
return a cause with the mocked reason (also, diff, and some other fields).
The a value of this fixture, a mock is provided with a few fields to mock.
The default is to no mock anything, unless defined in the test, and to use
the original arguments to the detection method.
"""
# Use everything from a mock, but use the passed `patch` dict as is.
# The event handler passes its own accumulator, and checks/applies it later.
def new_detect_fn(*, finalizer, diff, new, old, **kwargs):
# For change detection, we ensure that there is no extra cycle of adding a finalizer.
raw_event = kwargs.pop('raw_event', None)
raw_body = raw_event['object']
raw_body.setdefault('metadata', {}).setdefault('finalizers', [finalizer])
# Pass through kwargs: resource, logger, patch, diff, old, new.
# I.e. everything except what we mock -- for them, use the mocked values (if not None).
return ChangingCause(
reason=mock.reason,
diff=mock.diff if mock.diff is not None else diff,
new=mock.new if mock.new is not None else new,
old=mock.old if mock.old is not None else old,
**kwargs)
# Substitute the real cause detector with out own mock-based one.
mocker.patch('kopf._core.intents.causes.detect_changing_cause', new=new_detect_fn)
# The mock object stores some values later used by the factory substitute.
# Note: ONLY those fields we mock in the tests. Other kwargs should be passed through.
mock = mocker.Mock(spec_set=['reason', 'diff', 'new', 'old'])
mock.reason = None
mock.diff = None
mock.new = None
mock.old = None
return mock | null |
5,783 | #!/usr/bin/env python
import os
import sys
import datetime
import shutil
sys.path.append(os.path.dirname(__file__))
from comp import *
class ParserError(Exception):
pass
class Parser(object):
def __init__(self, path, section_markers=None):
self.path = path
self.data = {}
self.changed = False
self.nocf = False
self.keys = []
self.sections = {}
self.section_names = []
self.lastkey = '__lastkey__'
self.comments = {self.lastkey: []}
if section_markers:
self.section_markers = section_markers
else:
self.section_markers = ["Match"]
self.load()
self.bkp = path + '.' + str(datetime.datetime.now())
def __str__(self):
s = ""
for k in self.keys:
if k in self.comments:
s += '\n'.join(self.comments[k]) + '\n'
s += '\n'.join([k + " " + str(v) for v in self.data[k]]) + '\n'
if len(self.comments[self.lastkey]) > 0:
s += '\n'.join(self.comments[self.lastkey])
for section, data in self.sections.items():
s += section + '\n'
for k in data["keys"]:
for v in data["data"][k]:
s += "\t" + k + " " + str(v) + '\n'
return s
def truncate(self, key, max):
if key not in self.data:
return
n = len(self.data[key])
if n <= max:
return
self.data[key] = self.data[key][:max]
self.changed = True
def set(self, key, value, instance=0):
if key not in self.data:
self.data[key] = [value]
self.keys.append(key)
elif instance >= len(self.data[key]):
extra = instance + 1 - len(self.data[key])
for i in range(len(self.data[key]), instance-1):
self.data[key].append(None)
self.data[key].append(value)
else:
self.data[key].insert(instance, value)
self.changed = True
def METHOD_NAME(self, key, value=None):
if key in self.data:
if value is not None and value.strip() != "":
self.data[key].remove(value)
else:
self.data[key] = []
if len(self.data[key]) == 0:
del(self.data[key])
if key in self.keys:
self.keys.remove(key)
self.changed = True
def get(self, key, instance=0):
if key not in self.data:
return
if instance is None:
return self.data[key]
if instance < len(self.data[key]):
return self.data[key][instance]
return
def load(self):
if not os.path.exists(self.path):
raise ParserError("%s does not exist"%self.path)
self.nocf = True
return
with open(self.path, 'r') as f:
buff = f.read()
self.parse(buff)
def backup(self):
if self.nocf:
return
try:
shutil.copy(self.path, self.bkp)
except Exception as e:
perror(e)
raise ParserError("failed to backup %s"%self.path)
pinfo("%s backup up as %s" % (self.path, self.bkp))
def restore(self):
if self.nocf:
return
try:
shutil.copy(self.bkp, self.path)
except:
raise ParserError("failed to restore %s"%self.path)
pinfo("%s restored from %s" % (self.path, self.bkp))
def write(self):
self.backup()
try:
with open(self.path, 'w') as f:
f.write(str(self))
pinfo("%s rewritten"%self.path)
except Exception as e:
perror(e)
self.restore()
raise ParserError()
def parse(self, buff):
section = None
for line in buff.split("\n"):
line = line.strip()
# store comment line and continue
if line.startswith('#') or len(line) == 0:
self.comments[self.lastkey].append(line)
continue
# strip end-of-line comment
try:
i = line.index('#')
line = line[:i]
line = line.strip()
except ValueError:
pass
# discard empty line
if len(line) == 0:
continue
l = line.split()
if len(l) < 2:
continue
key = l[0]
value = line[len(key):].strip()
if key not in self.comments:
self.comments[key] = self.comments[self.lastkey]
else:
self.comments[key] += self.comments[self.lastkey]
self.comments[self.lastkey] = []
try:
value = int(value)
except:
pass
if key in self.section_markers:
section = key + " " + value
if section not in self.sections:
self.sections[section] = {"keys": [], "data": {}}
self.section_names.append(section)
continue
if section:
if key not in self.sections[section]["keys"]:
self.sections[section]["keys"].append(key)
if key not in self.sections[section]["data"]:
self.sections[section]["data"][key] = []
self.sections[section]["data"][key].append(value)
else:
if key not in self.keys:
self.keys.append(key)
if key not in self.data:
self.data[key] = []
self.data[key].append(value)
if __name__ == "__main__":
if len(sys.argv) != 2:
perror("wrong number of arguments")
sys.exit(1)
o = Parser(sys.argv[1])
o.get("Subsystem")
o.set("Subsystem", "foo")
o.METHOD_NAME("PermitRootLogin")
o.backup()
pinfo(o)
| null |
5,784 | import pytest
from rest_framework.authtoken.models import Token
from thunderstore.account.forms import (
CreateServiceAccountForm,
DeleteServiceAccountForm,
EditServiceAccountForm,
)
from thunderstore.account.models import ServiceAccount
from thunderstore.core.factories import UserFactory
from thunderstore.repository.models import TeamMember, TeamMemberRole
@pytest.mark.django_db
def test_service_account_fixture(service_account):
username = ServiceAccount.create_username(service_account.uuid.hex)
assert username == service_account.user.username
@pytest.mark.django_db
def test_service_account_create(user, team):
TeamMember.objects.create(
user=user,
team=team,
role=TeamMemberRole.owner,
)
form = CreateServiceAccountForm(
user,
data={"team": team, "nickname": "Nickname"},
)
assert form.is_valid() is True
service_account = form.save()
username = ServiceAccount.create_username(service_account.uuid.hex)
assert username == service_account.user.username
assert service_account.user.first_name == "Nickname"
assert service_account.api_token is not None
assert service_account.api_token.startswith("pbkdf2_sha256$524288$w520TEzFVlsO$")
assert service_account.created_at is not None
assert service_account.last_used is None
assert (
team.members.filter(
user=service_account.user,
role=TeamMemberRole.member,
).exists()
is True
)
@pytest.mark.django_db
def test_service_account_create_nickname_too_long(user, team):
TeamMember.objects.create(
user=user,
team=team,
role=TeamMemberRole.owner,
)
form = CreateServiceAccountForm(
user,
data={"team": team, "nickname": "x" * 1000},
)
assert form.is_valid() is False
assert len(form.errors["nickname"]) == 1
assert (
form.errors["nickname"][0]
== "Ensure this value has at most 32 characters (it has 1000)."
)
@pytest.mark.django_db
def test_service_account_create_not_member(user, team):
assert team.members.filter(user=user).exists() is False
form = CreateServiceAccountForm(
user,
data={"team": team, "nickname": "Nickname"},
)
assert form.is_valid() is False
assert len(form.errors["team"]) == 1
assert (
form.errors["team"][0]
== "Select a valid choice. That choice is not one of the available choices."
)
@pytest.mark.django_db
def test_service_account_create_not_owner(user, team):
TeamMember.objects.create(
user=user,
team=team,
role=TeamMemberRole.member,
)
form = CreateServiceAccountForm(
user,
data={"team": team, "nickname": "Nickname"},
)
assert form.is_valid() is False
assert len(form.errors["team"]) == 1
assert form.errors["team"][0] == "Must be an owner to create a service account"
@pytest.mark.django_db
def test_service_account_delete(django_user_model, service_account):
member = service_account.owner.members.first()
assert member.role == TeamMemberRole.owner
assert django_user_model.objects.filter(pk=service_account.user.pk).exists() is True
form = DeleteServiceAccountForm(
member.user,
data={"service_account": service_account},
)
assert form.is_valid()
form.save()
assert ServiceAccount.objects.filter(pk=service_account.pk).exists() is False
assert (
django_user_model.objects.filter(pk=service_account.user.pk).exists() is False
)
@pytest.mark.django_db
def test_service_account_delete_not_member(service_account):
user = UserFactory.create()
assert service_account.owner.members.filter(user=user).exists() is False
form = DeleteServiceAccountForm(
user,
data={"service_account": service_account},
)
assert form.is_valid() is False
assert len(form.errors["service_account"]) == 1
assert (
form.errors["service_account"][0]
== "Select a valid choice. That choice is not one of the available choices."
)
@pytest.mark.django_db
def test_service_account_delete_not_owner(service_account):
user = UserFactory.create()
TeamMember.objects.create(
user=user,
team=service_account.owner,
role=TeamMemberRole.member,
)
form = DeleteServiceAccountForm(
user,
data={"service_account": service_account},
)
assert form.is_valid() is False
assert len(form.errors["service_account"]) == 1
assert (
form.errors["service_account"][0]
== "Must be an owner to delete a service account"
)
@pytest.mark.django_db
def METHOD_NAME(service_account):
member = service_account.owner.members.first()
assert member.role == TeamMemberRole.owner
form = EditServiceAccountForm(
member.user,
data={"service_account": service_account, "nickname": "New nickname"},
)
assert form.is_valid()
service_account = form.save()
assert service_account.user.first_name == "New nickname"
assert service_account.nickname == "New nickname"
service_account = ServiceAccount.objects.get(pk=service_account.pk)
assert service_account.user.first_name == "New nickname"
assert service_account.nickname == "New nickname"
@pytest.mark.django_db
def test_service_account_edit_nickname_too_long(service_account):
member = service_account.owner.members.first()
assert member.role == TeamMemberRole.owner
form = EditServiceAccountForm(
member.user,
data={"service_account": service_account, "nickname": "x" * 1000},
)
assert form.is_valid() is False
assert len(form.errors["nickname"]) == 1
assert (
form.errors["nickname"][0]
== "Ensure this value has at most 32 characters (it has 1000)."
)
@pytest.mark.django_db
def test_service_account_edit_not_member(service_account):
user = UserFactory.create()
assert service_account.owner.members.filter(user=user).exists() is False
form = EditServiceAccountForm(
user,
data={"service_account": service_account, "nickname": "New nickname"},
)
assert form.is_valid() is False
assert len(form.errors["service_account"]) == 1
assert (
form.errors["service_account"][0]
== "Select a valid choice. That choice is not one of the available choices."
)
@pytest.mark.django_db
def test_service_account_edit_not_owner(service_account):
user = UserFactory.create()
TeamMember.objects.create(
user=user,
team=service_account.owner,
role=TeamMemberRole.member,
)
form = EditServiceAccountForm(
user,
data={"service_account": service_account, "nickname": "New nickname"},
)
assert form.is_valid() is False
assert len(form.errors["service_account"]) == 1
assert (
form.errors["service_account"][0]
== "Must be an owner to edit a service account"
) | null |
5,785 | import os
import platform
import shutil
import pytest
import py7zr
import py7zr.helpers
testdata_path = os.path.join(os.path.dirname(__file__), "data")
targets = [
("zstd", [{"id": py7zr.FILTER_ZSTD, "level": 3}]),
("bzip2", [{"id": py7zr.FILTER_BZIP2}]),
("lzma+bcj", [{"id": py7zr.FILTER_X86}, {"id": py7zr.FILTER_LZMA, "preset": 7}]),
("lzma2+bcj", [{"id": py7zr.FILTER_X86}, {"id": py7zr.FILTER_LZMA2, "preset": 7}]),
(
"bzip2+aes",
[{"id": py7zr.FILTER_BZIP2}, {"id": py7zr.FILTER_CRYPTO_AES256_SHA256}],
),
(
"lzma2+aes",
[
{"id": py7zr.FILTER_LZMA2, "preset": 7},
{"id": py7zr.FILTER_CRYPTO_AES256_SHA256},
],
),
]
@pytest.mark.benchmark(group="compress")
@pytest.mark.parametrize("name, filters", targets)
def test_benchmark_filters_compress(tmp_path, benchmark, name, filters):
def compressor(filters, password):
with py7zr.SevenZipFile(tmp_path.joinpath("target.7z"), "w", filters=filters, password=password) as szf:
szf.writeall(tmp_path.joinpath("src"), "src")
def setup():
if tmp_path.joinpath("target.7z").exists():
tmp_path.joinpath("target.7z").unlink()
with py7zr.SevenZipFile(os.path.join(testdata_path, "mblock_1.7z"), "r") as szf:
szf.extractall(path=tmp_path.joinpath("src"))
with py7zr.SevenZipFile(os.path.join(testdata_path, "mblock_1.7z"), "r") as szf:
archive_info = szf.archiveinfo()
source_size = archive_info.uncompressed
if name.endswith("aes"):
password = "secret"
else:
password = None
benchmark.extra_info["data_size"] = source_size
benchmark.pedantic(compressor, setup=setup, args=[filters, password], iterations=1, rounds=3)
benchmark.extra_info["ratio"] = str(tmp_path.joinpath("target.7z").stat().st_size / source_size)
@pytest.mark.benchmark(group="decompress")
@pytest.mark.parametrize("name, filters", targets)
def test_benchmark_filters_decompress(tmp_path, benchmark, name, filters):
def decompressor(secret):
with py7zr.SevenZipFile(tmp_path.joinpath("target.7z"), "r", password=secret) as szf:
szf.extractall(tmp_path.joinpath("tgt"))
def setup():
shutil.rmtree(tmp_path.joinpath("tgt"), ignore_errors=True)
with py7zr.SevenZipFile(os.path.join(testdata_path, "mblock_1.7z"), "r") as szf:
szf.extractall(path=tmp_path.joinpath("src"))
with py7zr.SevenZipFile(os.path.join(testdata_path, "mblock_1.7z"), "r") as szf:
archive_info = szf.archiveinfo()
source_size = archive_info.uncompressed
if name.endswith("aes"):
password = "secret"
else:
password = None
with py7zr.SevenZipFile(tmp_path.joinpath("target.7z"), "w", filters=filters, password=password) as szf:
szf.writeall(tmp_path.joinpath("src"), "src")
benchmark.extra_info["data_size"] = source_size
benchmark.extra_info["ratio"] = str(tmp_path.joinpath("target.7z").stat().st_size / source_size)
benchmark.pedantic(decompressor, setup=setup, args=[password], iterations=1, rounds=3)
textfilters = [
("ppmd(text)", [{"id": py7zr.FILTER_PPMD, "order": 8, "mem": "4m"}]),
("deflate(text)", [{"id": py7zr.FILTER_DEFLATE}]),
("zstd(text)", [{"id": py7zr.FILTER_ZSTD, "level": 3}]),
("brotli(text)", [{"id": py7zr.FILTER_BROTLI, "level": 11}]),
]
@pytest.mark.benchmark(group="compress")
@pytest.mark.parametrize("name, filters", textfilters)
def test_benchmark_text_compress(tmp_path, benchmark, name, filters):
def compressor(filters):
with py7zr.SevenZipFile(tmp_path.joinpath("target.7z"), "w", filters=filters) as szf:
szf.writeall(tmp_path.joinpath("src"), "src")
def setup():
if tmp_path.joinpath("target.7z").exists():
tmp_path.joinpath("target.7z").unlink()
with py7zr.SevenZipFile(os.path.join(testdata_path, "bzip2_2.7z"), "r") as szf:
szf.extractall(path=tmp_path.joinpath("src"))
with py7zr.SevenZipFile(os.path.join(testdata_path, "bzip2_2.7z"), "r") as szf:
archive_info = szf.archiveinfo()
source_size = archive_info.uncompressed
benchmark.extra_info["data_size"] = source_size
benchmark.pedantic(compressor, setup=setup, args=[filters], iterations=1, rounds=3)
benchmark.extra_info["ratio"] = str(tmp_path.joinpath("target.7z").stat().st_size / source_size)
@pytest.mark.benchmark(group="decompress")
@pytest.mark.parametrize("name, filters", textfilters)
def test_benchmark_text_decompress(tmp_path, benchmark, name, filters):
def decompressor():
with py7zr.SevenZipFile(tmp_path.joinpath("target.7z"), "r") as szf:
szf.extractall(tmp_path.joinpath("tgt"))
def setup():
shutil.rmtree(tmp_path.joinpath("tgt"), ignore_errors=True)
with py7zr.SevenZipFile(os.path.join(testdata_path, "bzip2_2.7z"), "r") as szf:
szf.extractall(path=tmp_path.joinpath("src"))
with py7zr.SevenZipFile(os.path.join(testdata_path, "bzip2_2.7z"), "r") as szf:
archive_info = szf.archiveinfo()
source_size = archive_info.uncompressed
password = None
with py7zr.SevenZipFile(tmp_path.joinpath("target.7z"), "w", filters=filters, password=password) as szf:
szf.writeall(tmp_path.joinpath("src"), "src")
benchmark.extra_info["data_size"] = source_size
benchmark.extra_info["ratio"] = str(tmp_path.joinpath("target.7z").stat().st_size / source_size)
benchmark.pedantic(decompressor, setup=setup, args=[], iterations=1, rounds=3)
@pytest.mark.benchmark(group="calculate_key")
@pytest.mark.skip(reason="Don't test in ordinary development")
def METHOD_NAME(benchmark):
password = "secret".encode("utf-16LE")
cycles = 19
salt = b""
expected = b"e\x11\xf1Pz<*\x98*\xe6\xde\xf4\xf6X\x18\xedl\xf2Be\x1a\xca\x19\xd1\\\xeb\xc6\xa6z\xe2\x89\x1d"
key = benchmark(py7zr.helpers._calculate_key1, password, cycles, salt, "sha256")
assert key == expected
@pytest.mark.benchmark(group="calculate_key")
@pytest.mark.skip(reason="Don't test in ordinary development")
@pytest.mark.skipif(platform.python_implementation() == "PyPy", reason="Will crash on PyPy")
def test_benchmark_calculate_key2(benchmark):
password = "secret".encode("utf-16LE")
cycles = 19
salt = b""
expected = b"e\x11\xf1Pz<*\x98*\xe6\xde\xf4\xf6X\x18\xedl\xf2Be\x1a\xca\x19\xd1\\\xeb\xc6\xa6z\xe2\x89\x1d"
key = benchmark(py7zr.helpers._calculate_key2, password, cycles, salt, "sha256")
assert key == expected
@pytest.mark.benchmark(group="calculate_key")
@pytest.mark.skip(reason="Don't test in ordinary development")
def test_benchmark_calculate_key3(benchmark):
password = "secret".encode("utf-16LE")
cycles = 19
salt = b""
expected = b"e\x11\xf1Pz<*\x98*\xe6\xde\xf4\xf6X\x18\xedl\xf2Be\x1a\xca\x19\xd1\\\xeb\xc6\xa6z\xe2\x89\x1d"
key = benchmark(py7zr.helpers._calculate_key3, password, cycles, salt, "sha256")
assert key == expected | null |
5,786 | import importlib
import os
import shutil
import sys
import textwrap
from contextlib import contextmanager
from pathlib import Path
import pytest
from _pytest.monkeypatch import MonkeyPatch
import lektor.project
from lektor.builder import Builder
from lektor.db import Database
from lektor.db import Tree
from lektor.environment import Environment
from lektor.environment.expressions import Expression
from lektor.project import Project
from lektor.reporter import BufferReporter
from lektor.utils import locate_executable
@pytest.fixture(scope="session")
def data_path():
"""Path to directory which contains test data.
Current this data lives in the ``tests`` directory.
"""
return Path(__file__).parent
@pytest.fixture(scope="session", autouse=True)
def temporary_lektor_cache(tmp_path_factory):
"""Get Lektor to use a temporary cache directory.
This prevents the tests from leaving scats behind in the
user’s real cache directory.
"""
cache_dir = tmp_path_factory.mktemp("lektor_cache")
# The stock monkeypatch fixture is function-scoped and so can not
# be used in a session-scoped fixture.
# Workaround from:
# https://github.com/pytest-dev/pytest/issues/363#issuecomment-406536200
def get_cache_dir():
return str(cache_dir)
mp = MonkeyPatch()
mp.setattr(lektor.project, "get_cache_dir", get_cache_dir)
yield cache_dir
mp.undo()
@contextmanager
def restore_import_state():
"""Save `sys.path`, and `sys.modules` state on test
entry, restore after test completion.
Any test which constructs a `lektor.environment.Environment` instance
or which runs any of the Lektor CLI commands should use this fixture
to ensure that alterations made to `sys.path` do not interfere with
other tests.
Lektor's private package cache is added to `sys.path` by
`lektor.packages.load_packages`. This happens, for example,
whenever a Lektor `Environment` is constructed (unless
`load_plugins=False` is specified.) Since all tests are run
within an single invocation of the python interpreter, this can
cause problems when different tests are using different private
package caches.
"""
path = sys.path.copy()
meta_path = sys.meta_path.copy()
path_hooks = sys.path_hooks.copy()
modules = sys.modules.copy()
# Importlib_metadata, when it is imported, cripples the stdlib distribution finder
# by deleting its find_distributions method.
#
# https://github.com/python/importlib_metadata/blob/705a7571ec7c5abec4d4b008da3a58df7e5c94e7/importlib_metadata/_compat.py#L31
#
def clone_class(cls):
return type(cls)(cls.__name__, cls.__bases__, cls.__dict__.copy())
sys.meta_path[:] = [
clone_class(finder) if isinstance(finder, type) else finder
for finder in meta_path
]
try:
yield
finally:
importlib.invalidate_caches()
# NB: Restore sys.modules, sys.path, et. all. in place. (Some modules may hold
# references to these — e.g. pickle appears to hold a reference to sys.modules.)
for module in set(sys.modules).difference(modules):
del sys.modules[module]
sys.modules.update(modules)
sys.path[:] = path
sys.meta_path[:] = meta_path
sys.path_hooks[:] = path_hooks
sys.path_importer_cache.clear()
_initial_path_key = object()
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
item.stash[_initial_path_key] = sys.path.copy()
@pytest.hookimpl(trylast=True)
def METHOD_NAME(item):
# Check that tests don't alter sys.path
initial_path = item.stash[_initial_path_key]
assert sys.path == initial_path
@pytest.fixture
def save_sys_path():
with restore_import_state():
yield
@pytest.fixture(scope="session")
def project(data_path):
return Project.from_path(data_path / "demo-project")
@pytest.fixture(scope="function")
def scratch_project_data(tmp_path):
base = tmp_path / "scratch-proj"
def write_text(path, text):
filename = base / path
filename.parent.mkdir(parents=True, exist_ok=True)
filename.write_text(textwrap.dedent(text), "utf-8")
write_text(
"Scratch.lektorproject",
"""
[project]
name = Scratch
[alternatives.en]
primary = yes
[alternatives.de]
url_prefix = /de/
""",
)
write_text(
"content/contents.lr",
"""
_model: page
---
title: Index
---
body: *Hello World!*
""",
)
write_text(
"templates/page.html",
"""
<h1>{{ this.title }}</h1>
{{ this.body }}
""",
)
write_text(
"models/page.ini",
"""
[model]
label = {{ this.title }}
[fields.title]
type = string
[fields.body]
type = markdown
""",
)
return base
@pytest.fixture(scope="function")
def scratch_project(scratch_project_data):
return Project.from_path(scratch_project_data)
@pytest.fixture(scope="function")
def env(project, save_sys_path):
return Environment(project)
@pytest.fixture(scope="function")
def scratch_env(scratch_project, save_sys_path):
return Environment(scratch_project)
@pytest.fixture(scope="function")
def pad(env):
return Database(env).new_pad()
@pytest.fixture(scope="function")
def scratch_pad(scratch_env):
return Database(scratch_env).new_pad()
@pytest.fixture(scope="function")
def scratch_tree(scratch_pad):
return Tree(scratch_pad)
@pytest.fixture(scope="function")
def builder(tmp_path, pad):
output_path = tmp_path / "output"
output_path.mkdir()
return Builder(pad, str(output_path))
@pytest.fixture(scope="session")
def built_demo(tmp_path_factory, project):
output_path = tmp_path_factory.mktemp("demo-output")
with restore_import_state():
env = Environment(project)
builder = Builder(env.new_pad(), os.fspath(output_path))
builder.build_all()
return output_path
@pytest.fixture(scope="function")
def scratch_builder(tmp_path, scratch_pad):
output_path = tmp_path / "output"
output_path.mkdir()
return Builder(scratch_pad, str(output_path))
# Builder for child-sources-test-project, a project to test that child sources
# are built even if they're filtered out by a pagination query.
@pytest.fixture(scope="function")
def child_sources_test_project_builder(tmp_path, data_path, save_sys_path):
output_path = tmp_path / "output"
output_path.mkdir()
project = Project.from_path(data_path / "child-sources-test-project")
pad = project.make_env().new_pad()
return Builder(pad, str(output_path))
@pytest.fixture(scope="function")
def eval_expr(env):
def eval_expr(expr, **kwargs):
expr = Expression(env, expr)
return expr.evaluate(**kwargs)
return eval_expr
@pytest.fixture(scope="function")
def reporter(request, env):
reporter = BufferReporter(env)
reporter.push()
request.addfinalizer(reporter.pop)
return reporter
@pytest.fixture(scope="function")
def project_cli_runner(isolated_cli_runner, project, save_sys_path):
"""
Copy the project files into the isolated file system used by the
Click test runner.
"""
for entry in os.listdir(project.tree):
entry_path = os.path.join(project.tree, entry)
if os.path.isdir(entry_path):
shutil.copytree(entry_path, entry)
else:
shutil.copy2(entry_path, entry)
return isolated_cli_runner
@pytest.fixture
def no_utils(monkeypatch):
"""Monkeypatch $PATH to hide any installed external utilities
(e.g. git, ffmpeg)."""
monkeypatch.setitem(os.environ, "PATH", "/dev/null")
locate_executable.cache_clear()
try:
yield
finally:
locate_executable.cache_clear() | null |
5,787 | import concurrent.futures
import logging
import os
import ssl
import sys
from urllib.request import urlopen
import pytest
import py7zr
# hack only for the test, it is highly discouraged for production.
ssl._create_default_https_context = ssl._create_unverified_context
testdata_path = os.path.join(os.path.dirname(__file__), "data")
archives = [
(
"qt3d.7z",
"https://ftp.jaist.ac.jp/pub/qtproject/online/qtsdkrepository/"
"windows_x86/desktop/qt5_5126/qt.qt5.5126.win64_mingw73/"
"5.12.6-0-201911111120qt3d-Windows-Windows_10-Mingw73-Windows-Windows_10-X86_64.7z",
),
(
"qtxmlpatterns.7z",
"https://ftp1.nluug.nl/languages/qt/online/qtsdkrepository/"
"windows_x86/desktop/qt5_5132/qt.qt5.5132.win64_mingw73/"
"5.13.2-0-201910281254qtxmlpatterns-Windows-Windows_10-Mingw73-Windows-Windows_10-X86_64.7z",
),
(
"qtactiveqt.7z",
"http://mirrors.dotsrc.org/qtproject/online/qtsdkrepository/"
"windows_x86/desktop/qt5_5132/qt.qt5.5132.win64_mingw73/"
"5.13.2-0-201910281254qtactiveqt-Windows-Windows_10-Mingw73-Windows-Windows_10-X86_64.7z",
),
(
"qtbase.7z",
"http://qt.mirrors.tds.net/qt/online/qtsdkrepository/"
"windows_x86/desktop/qt5_5132/qt.qt5.5132.win32_mingw73/"
"5.13.2-0-201910281254qtbase-Windows-Windows_7-Mingw73-Windows-Windows_7-X86.7z",
),
(
"opengl32sw.7z",
"http://mirrors.ocf.berkeley.edu/qt/online/qtsdkrepository/windows_x86/desktop/"
"qt5_5132/qt.qt5.5132.win64_mingw73/"
"5.13.2-0-201910281254opengl32sw-64-mesa_12_0_rc2.7z",
),
(
"EnvVarUpdate.7z",
"https://nsis.sourceforge.io/" "mediawiki/images/a/ad/EnvVarUpdate.7z",
),
(
"GTKVICE-3.3.7z",
"https://downloads.sourceforge.net/project/" "vice-emu/releases/binaries/windows/GTK3VICE-3.4-win64.7z",
),
(
"lpng1634.7z",
"https://github.com/glennrp/libpng-releases/raw/master/lpng1634.7z",
),
]
@pytest.mark.timeout(360)
@pytest.mark.remote_data
def test_concurrent_futures(tmp_path):
def download_and_extract(ar, path):
archive = path.joinpath(ar[0])
try:
resp = urlopen(ar[1])
with open(archive, "wb") as fd:
while True:
chunk = resp.read(8196)
if not chunk:
break
fd.write(chunk)
szf = py7zr.SevenZipFile(archive)
szf.extractall(path=path)
szf.close()
except Exception:
exc = sys.exc_info()
logging.error("Caught error: %s" % exc[1])
return False
return True
with concurrent.futures.ThreadPoolExecutor() as texec:
tasks = [texec.submit(download_and_extract, ar, tmp_path) for ar in archives]
for task in concurrent.futures.as_completed(tasks):
if not task.result():
raise Exception("Failed to extract.")
@pytest.mark.timeout(180)
def test_concurrent_extraction(tmp_path, caplog):
def METHOD_NAME(archive, path):
szf = py7zr.SevenZipFile(archive, "r")
szf.extractall(path=path)
szf.close()
archives = [
"bugzilla_4.7z",
"bzip2.7z",
"bzip2_2.7z",
"copy.7z",
"empty.7z",
"github_14.7z",
"lzma2bcj.7z",
"mblock_1.7z",
"mblock_3.7z",
"solid.7z",
"symlink.7z",
"test_1.7z",
"test_2.7z",
"test_3.7z",
"test_5.7z",
"test_6.7z",
"test_folder.7z",
"umlaut-non_solid.7z",
"umlaut-solid.7z",
"zerosize.7z",
]
with concurrent.futures.ThreadPoolExecutor() as executor:
tasks = [executor.submit(METHOD_NAME, os.path.join(testdata_path, ar), tmp_path.joinpath(ar)) for ar in archives]
done, not_done = concurrent.futures.wait(tasks, return_when=concurrent.futures.ALL_COMPLETED)
if len(not_done) > 0:
raise Exception("Extraction error.") | null |
5,788 | """
地下城选大关,小关部分
"""
import time
from typing import Optional, Union
from automator_mixins._base import OCRRecognizeError
from core.constant import DXC_ELEMENT, DXC_ENTRANCE_DRAG, DXC_ENTRANCE, JUQING_BTN, MAIN_BTN
from core.pcr_checker import LockError, LockTimeoutError
from scenes.dxc.dxc_fight import FightInfoDXC
from scenes.scene_base import PossibleSceneList, PCRMsgBoxBase, PCRSceneBase
from scenes.zhuxian.zhuxian_base import SevenBTNMixin
class PossibleDXCMenu(PossibleSceneList):
def __init__(self, a):
self.DXCSelectA = DXCSelectA
self.DXCSelectB = DXCSelectB
self.DXCKKR = DXCKKR
self.DXCJuQing = DXCJuQing
self.ShouQuBaoChou = ShouQuBaoChou
self.QuYuXuanZeQueRen = QuYuXuanZeQueRen
scene_list = [
DXCKKR(a),
ShouQuBaoChou(a),
QuYuXuanZeQueRen(a),
DXCJuQing(a),
DXCSelectA(a),
DXCSelectB(a),
]
# 使用场景双判,因此MsgBox的DXCKKR,ShouQuBaoChou要往前放,JuQing,A,B作为Scene往后。
super().__init__(a, scene_list, double_check=1., timeout=90, check_double_scene=True)
class DXCSelectA(SevenBTNMixin):
def __init__(self, a):
super().__init__(a)
self.scene_name = "DXCSelectA"
self.feature = self.fun_feature_exist(DXC_ELEMENT["sytzcs"])
def get_cishu(self):
cishu = self.check_dict_id({
0: DXC_ELEMENT["0/1"],
1: DXC_ELEMENT["1/1"]
})
return cishu
def enter_dxc(self, dxc_id, skip=False, skip_only=False) -> Union["DXCSelectB", str]:
'''
skip_only: mode 5使用,不能跳过就不进入
'''
drag = DXC_ENTRANCE_DRAG[dxc_id]
def do_fun():
if drag == "left":
self.click(10, 242)
# self.Drag_Left(origin=True)
time.sleep(1.5)
elif drag == "right":
# self.Drag_Right(origin=True)
self.click(950, 242)
time.sleep(1.5)
self.click(DXC_ENTRANCE[dxc_id])
PS = self.goto(PossibleDXCMenu, do_fun)
while True:
if isinstance(PS, (DXCKKR, DXCJuQing)):
PS.skip()
PS = self._a.get_zhuye().goto_maoxian().goto(PossibleDXCMenu, self.fun_click(MAIN_BTN["dxc"]))
continue
elif isinstance(PS, DXCSelectB):
return PS
elif isinstance(PS, QuYuXuanZeQueRen):
if PS.can_skip():
if skip:
PS.skip()
return "skip"
else:
PS = PS.METHOD_NAME()
continue
elif skip_only:
self.fclick(1,1)
return "cannot_skip"
else:
PS = PS.ok()
continue
else:
raise LockTimeoutError("进入地下城失败!")
class DXCSelectB(SevenBTNMixin):
def __init__(self, a):
super().__init__(a)
self.scene_name = "DXCSelectB"
self.feature = self.fun_feature_exist(DXC_ELEMENT["in_sytzcs"])
def goto_chetui(self) -> "DXCCheTui":
return self.goto(DXCCheTui, self.fun_click(DXC_ELEMENT["chetui"]))
def click_xy_and_open_fightinfo_xy(self, x, y) -> Optional[FightInfoDXC]:
def gotofun():
self.click(x, y)
try:
return self.goto(FightInfoDXC, gotofun, retry=3, interval=3)
except LockError:
return None
def get_cishu(self, screen=None):
# OCR获取还可以挑战的次数
at = (721,421,753,438)
A, B = self.ocr_A_B(*at, screen_shot=screen,allow_AB="01")
return A
def get_jieshu(self, screen=None):
# OCR获取当前层数
at = (516,421,548,438)
try:
A, B = self.ocr_A_B(*at, screen_shot=screen)
except OCRRecognizeError as e:
self.log.write_log("warning", f"OCR获取层数失败!将从1层开始尝试。{e}")
return 1
return A
def get_next_id(self, screen=None):
# 寻找“层”字,试图获得下一层的xy
# 备用,并不准备使用这个。
LST = self.img_where_all(DXC_ELEMENT["ceng"], threshold=0.8, screen=screen)
if len(LST) == 0:
return -1 # 找不到层
else:
NLST = [] # Reshape
now = []
for L in LST:
now.append(L)
if len(now) == 3:
NLST.append(now)
now = []
XY = []
for x, y, _ in NLST:
x0 = x
y0 = y - 100
if y0 < 1:
y0 = 1
XY.append((x0, y0))
class ShouQuBaoChou(PCRMsgBoxBase):
def __init__(self, a):
super().__init__(a)
self.feature = self.fun_feature_exist(DXC_ELEMENT["shouqubaochou_ok"])
def ok(self):
self.exit(self.fun_click(475, 481)) # ok
class QuYuXuanZeQueRen(PCRMsgBoxBase):
def __init__(self, a):
super().__init__(a)
self.feature = self.fun_feature_exist(DXC_ELEMENT["qyxzqr"])
def can_skip(self, screen=None):
if screen is None:
screen = self.getscreen()
if self.is_exists(DXC_ELEMENT["quyuxuanzequeren_skip"], screen=screen):
return True
else:
return False
def ok(self):
return self.goto(PossibleDXCMenu, self.fun_click(DXC_ELEMENT["quyuxuanzequeren_ok"]))
def METHOD_NAME(self):
return self.goto(PossibleDXCMenu, self.fun_click(DXC_ELEMENT["quyuxuanzequeren_tz"]))
def skip(self):
from scenes.zhuxian.zhuxian_msg import SaoDangJieGuo
SD: SaoDangJieGuo = self.goto(SaoDangJieGuo, self.fun_click(DXC_ELEMENT["quyuxuanzequeren_skip"]))
MsgList = SD.OK() # 扫荡后的一系列MsgBox
MsgList.exit_all(True)
self.fclick(1, 1)
self.fclick(1, 1)
class DXCKKR(PCRMsgBoxBase):
def __init__(self, a):
super().__init__(a)
self.feature = self.fun_feature_exist(DXC_ELEMENT["dxc_kkr"])
def skip(self):
self.chulijiaocheng(None)
class DXCJuQing(PCRSceneBase):
def __init__(self, a):
super().__init__(a)
self.feature = self.fun_feature_exist(JUQING_BTN["caidanyuan"])
def skip(self):
self.chulijiaocheng(None)
class DXCCheTui(PCRMsgBoxBase):
def __init__(self, a):
super().__init__(a)
self.feature = self.fun_feature_exist(DXC_ELEMENT["chetuiqueren"])
def ok(self) -> "DXCSelectA":
return self.goto(DXCSelectA, self.fun_click(DXC_ELEMENT["chetui_ok"])) | null |
5,789 | import binascii
from construct import Container
from paradox.hardware.evo.parsers import (DefinitionsParserMap,
get_user_definition, RAMDataParserMap)
def test_zone_definition_test():
parser = DefinitionsParserMap["zone"]
assert parser.sizeof() == 2
data = parser.parse(b"\x11\x02")
assert data.definition == "entry_delay1"
assert data.partition == 1
assert data.options.alarm_type == "steady_alarm"
assert data.options.bypass_enabled is True
data = parser.parse(b"\x31\x02")
assert data.definition == "follow"
assert data.partition == 1
assert data.options.alarm_type == "steady_alarm"
assert data.options.bypass_enabled is True
data = parser.parse(b"\x41\x0a")
assert data.definition == "instant"
assert data.partition == 1
assert data.options.alarm_type == "steady_alarm"
assert data.options.force_zone is True
assert data.options.bypass_enabled is True
data = parser.parse(b"\x01\x0a")
assert data.definition == "disabled"
assert data.partition == 1
assert data.options.alarm_type == "steady_alarm"
assert data.options.force_zone is True
assert data.options.bypass_enabled is True
data = parser.parse(b"\x42\x0e")
assert data.definition == "instant"
assert data.partition == 2
assert data.options.alarm_type == "steady_alarm"
assert data.options.force_zone is True
assert data.options.bypass_enabled is True
data = parser.parse(b"\x42\x0e")
assert data.definition == "instant"
assert data.partition == 2
assert data.options.alarm_type == "steady_alarm"
assert data.options.force_zone is True
assert data.options.bypass_enabled is True
data = parser.parse(b"\xd4\x2a")
assert data.definition == "standard_fire_24h"
assert data.partition == 4
assert data.options.alarm_type == "pulsed_alarm"
assert data.options.force_zone is True
assert data.options.bypass_enabled is True
# def test_partition_definition_test():
# parser = DefinitionsParserMap["partition"]
#
# assert parser.sizeof() == 1
#
# data = parser.parse(b"\xcb")
#
# assert data[1]["definition"] == "enabled"
# assert data[3]["definition"] == "disabled"
# assert len(data) == 8
def METHOD_NAME():
settings = Container(
system_options=Container(
user_code_length_6=False, user_code_length_flexible=False
)
)
parser = get_user_definition(settings)
assert parser.sizeof() == 10
# empty
data = parser.parse(binascii.unhexlify("00000048000000000000"))
assert data.code is None
# master
data = parser.parse(binascii.unhexlify("123412ebff00af000000"))
assert data.code == "1234"
assert data.options == dict(
type="FullMaster",
duress=False,
bypass=True,
arm_only=False,
stay_instant_arming=True,
force_arming=True,
all_subsystems=True,
)
assert data.partitions == {
1: True,
2: True,
3: True,
4: True,
5: True,
6: True,
7: True,
8: True,
}
# regular
data = parser.parse(binascii.unhexlify("a123a140cb0000000000"))
assert data.code == "0123"
assert data.options == dict(
type="Regular",
duress=False,
bypass=False,
arm_only=False,
stay_instant_arming=False,
force_arming=True,
all_subsystems=False,
)
assert data.partitions == {
1: True,
2: True,
3: False,
4: True,
5: False,
6: False,
7: True,
8: True,
}
def test_parse_ram_1_troubles():
raw_data = bytearray(64)
raw_data[18] = 20 # century
raw_data[20] = 1 # month
raw_data[21] = 1 # day
raw_data[13] |= 0x10 # zone tamper
raw_data[13] |= 0x20 # zone low battery
raw_data[14] |= 0x2 # battery failure trouble
raw_data[16] |= 0x20 # module AC lost
raw_data[17] |= 0x1 # missing keypad trouble
parser = RAMDataParserMap[1]
data = parser.parse(raw_data)
print(data)
assert data["system"]["troubles"]["zone_tamper_trouble"]
assert data["system"]["troubles"]["zone_low_battery_trouble"]
assert data["system"]["troubles"]["battery_failure_trouble"]
assert data["system"]["troubles"]["module_ac_trouble"]
assert data["system"]["troubles"]["missing_keypad_trouble"] | null |
5,790 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from unittest import mock
from mantidqtinterfaces.drill.model.DrillExportModel import DrillExportModel
class DrillExportModelTest(unittest.TestCase):
EXPORT_ALGORITHMS = {"a1": {("ea1", ".txt"): True, ("ea2", ".xml"): False}, "a2": {("ea2", ".xml"): True}}
EXPORT_ALGO_CRITERIA = {"ea1": "%param% == 'test'", "ea2": "%param% != 'test'"}
def METHOD_NAME(self):
patch = mock.patch("mantidqtinterfaces.drill.model.DrillExportModel.mtd")
self.mMtd = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch("mantidqtinterfaces.drill.model.DrillExportModel.logger")
self.mLogger = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch("mantidqtinterfaces.drill.model.DrillExportModel.config")
self.mConfig = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch.dict(
"mantidqtinterfaces.drill.model.DrillExportModel" ".RundexSettings.EXPORT_ALGORITHMS", self.EXPORT_ALGORITHMS, clear=True
)
self.mAlgo = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch.dict(
"mantidqtinterfaces.drill.model.DrillExportModel" ".RundexSettings.EXPORT_ALGO_CRITERIA", self.EXPORT_ALGO_CRITERIA, clear=True
)
self.mAlgoCriteria = patch.start()
self.addCleanup(patch.stop)
patch = mock.patch("mantidqtinterfaces.drill.model.DrillExportModel" ".DrillAlgorithmPool")
self.mTasksPool = patch.start()
self.mTasksPool = self.mTasksPool.return_value
self.addCleanup(patch.stop)
patch = mock.patch("mantidqtinterfaces.drill.model.DrillExportModel" ".DrillTask")
self.mTask = patch.start()
self.addCleanup(patch.stop)
self.exportModel = DrillExportModel("a1")
def test_init(self):
self.assertDictEqual(self.exportModel._exportAlgorithms, self.EXPORT_ALGORITHMS["a1"])
self.assertEqual(self.exportModel._exports, {})
self.assertEqual(self.exportModel._successExports, {})
def test_getAlgorithms(self):
algs = self.exportModel.getAlgorithms()
self.assertEqual(algs, [a for a in self.EXPORT_ALGORITHMS["a1"]])
def test_isAlgorithmActivated(self):
self.assertEqual(self.exportModel._exportAlgorithms[("ea1", ".txt")], True)
self.assertEqual(self.exportModel.isAlgorithmActivated(("ea1", ".txt")), True)
self.exportModel._exportAlgorithms[("ea1", ".txt")] = False
self.assertEqual(self.exportModel.isAlgorithmActivated(("ea1", ".txt")), False)
self.assertEqual(self.exportModel._exportAlgorithms[("ea2", ".xml")], False)
self.assertEqual(self.exportModel.isAlgorithmActivated(("ea2", ".xml")), False)
def test_activateAlgorithm(self):
self.assertEqual(self.exportModel._exportAlgorithms[("ea2", ".xml")], False)
self.exportModel.activateAlgorithm(("ea2", ".xml"))
self.assertEqual(self.exportModel._exportAlgorithms[("ea2"), (".xml")], True)
self.exportModel.activateAlgorithm(("ea2", ".xml"))
self.assertEqual(self.exportModel._exportAlgorithms[("ea2", ".xml")], True)
self.exportModel.activateAlgorithm(("ea3", ".data"))
def test_inactivateAlgorithm(self):
self.assertEqual(self.exportModel._exportAlgorithms[("ea1", ".txt")], True)
self.exportModel.inactivateAlgorithm(("ea1", ".txt"))
self.assertEqual(self.exportModel._exportAlgorithms[("ea1", ".txt")], False)
self.exportModel.inactivateAlgorithm(("ea1", ".txt"))
self.assertEqual(self.exportModel._exportAlgorithms[("ea1", ".txt")], False)
self.exportModel.inactivateAlgorithm(("ea3", ".data"))
def test_valid_Criteria(self):
mHist = self.mMtd.__getitem__.return_value.getHistory.return_value
mAlgo = mHist.lastAlgorithm.return_value
mAlgo.getPropertyValue.return_value = "test"
self.assertTrue(self.exportModel._validCriteria(mock.Mock(), "ea3"))
self.assertTrue(self.exportModel._validCriteria(mock.Mock(), "ea1"))
self.assertFalse(self.exportModel._validCriteria(mock.Mock(), "ea2"))
def test_onTaskSuccess(self):
self.exportModel._logSuccessExport = mock.Mock()
self.exportModel._exports = {"workspace1": {"filename1", "filename2"}, "workspace2": {"filename3"}}
self.exportModel._onTaskSuccess("workspace1", "filename1")
self.assertDictEqual(self.exportModel._successExports, {"workspace1": {"filename1"}})
self.assertDictEqual(self.exportModel._exports, {"workspace1": {"filename2"}, "workspace2": {"filename3"}})
self.exportModel._onTaskSuccess("workspace1", "filename2")
self.assertDictEqual(self.exportModel._successExports, {"workspace1": {"filename1", "filename2"}})
self.assertDictEqual(self.exportModel._exports, {"workspace2": {"filename3"}})
self.exportModel._logSuccessExport.assert_called_once()
def test_onTaskError(self):
self.exportModel._logSuccessExport = mock.Mock()
self.exportModel._exports = {"workspace1": {"filename1", "filename2"}, "workspace2": {"filename3"}}
self.exportModel._onTaskError("workspace2", "filename3", "error message")
self.mLogger.error.assert_called()
self.assertDictEqual(self.exportModel._exports, {"workspace1": {"filename1", "filename2"}})
self.exportModel._logSuccessExport.assert_called_once()
def test_logSuccessExport(self):
self.exportModel._successExports = {"workspace1": {"filename1", "filename2"}}
self.exportModel._logSuccessExport("workspace1")
self.mLogger.notice.assert_called()
self.assertDictEqual(self.exportModel._successExports, {})
def test_run(self):
self.mConfig.getString.return_value = "/default/save/directory/"
mSample = mock.Mock()
mSample.getOutputName.return_value = "workspace"
mGroup = mock.Mock()
mGroup.getNames.return_value = ["workspace"]
self.mMtd.__getitem__.return_value = mGroup
self.mMtd.getObjectNames.return_value = ["workspace_1", "workspace_2"]
self.exportModel._validCriteria = mock.Mock()
self.exportModel._validCriteria.return_value = True
self.exportModel.run(mSample)
self.assertDictEqual(
self.exportModel._exports,
{"workspace_1": {"/default/save/directory/workspace_1.txt"}, "workspace_2": {"/default/save/directory/workspace_2.txt"}},
)
self.mTask.assert_called()
if __name__ == "__main__":
unittest.main() | null |
5,791 | import random
import threading
import time
from abc import ABC, abstractmethod
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Dict, List, Tuple
import itertools
from pilot.model.base import ModelInstance
class ModelRegistry(ABC):
"""
Abstract base class for a model registry. It provides an interface
for registering, deregistering, fetching instances, and sending heartbeats
for instances.
"""
@abstractmethod
async def register_instance(self, instance: ModelInstance) -> bool:
"""
Register a given model instance.
Args:
- instance (ModelInstance): The instance of the model to register.
Returns:
- bool: True if registration is successful, False otherwise.
"""
pass
@abstractmethod
async def deregister_instance(self, instance: ModelInstance) -> bool:
"""
Deregister a given model instance.
Args:
- instance (ModelInstance): The instance of the model to deregister.
Returns:
- bool: True if deregistration is successful, False otherwise.
"""
@abstractmethod
async def get_all_instances(
self, model_name: str, healthy_only: bool = False
) -> List[ModelInstance]:
"""
Fetch all instances of a given model. Optionally, fetch only the healthy instances.
Args:
- model_name (str): Name of the model to fetch instances for.
- healthy_only (bool, optional): If set to True, fetches only the healthy instances.
Defaults to False.
Returns:
- List[ModelInstance]: A list of instances for the given model.
"""
@abstractmethod
async def get_all_model_instances(self) -> List[ModelInstance]:
"""
Fetch all instances of all models
Returns:
- List[ModelInstance]: A list of instances for the all models.
"""
async def select_one_health_instance(self, model_name: str) -> ModelInstance:
"""
Selects one healthy and enabled instance for a given model.
Args:
- model_name (str): Name of the model.
Returns:
- ModelInstance: One randomly selected healthy and enabled instance, or None if no such instance exists.
"""
instances = await self.get_all_instances(model_name, healthy_only=True)
instances = [i for i in instances if i.enabled]
if not instances:
return None
return random.choice(instances)
@abstractmethod
async def send_heartbeat(self, instance: ModelInstance) -> bool:
"""
Send a heartbeat for a given model instance. This can be used to
verify if the instance is still alive and functioning.
Args:
- instance (ModelInstance): The instance of the model to send a heartbeat for.
Returns:
- bool: True if heartbeat is successful, False otherwise.
"""
class EmbeddedModelRegistry(ModelRegistry):
def __init__(
self, heartbeat_interval_secs: int = 60, heartbeat_timeout_secs: int = 120
):
self.registry: Dict[str, List[ModelInstance]] = defaultdict(list)
self.heartbeat_interval_secs = heartbeat_interval_secs
self.heartbeat_timeout_secs = heartbeat_timeout_secs
self.heartbeat_thread = threading.Thread(target=self.METHOD_NAME)
self.heartbeat_thread.daemon = True
self.heartbeat_thread.start()
def _get_instances(
self, model_name: str, host: str, port: int, healthy_only: bool = False
) -> Tuple[List[ModelInstance], List[ModelInstance]]:
instances = self.registry[model_name]
if healthy_only:
instances = [ins for ins in instances if ins.healthy == True]
exist_ins = [ins for ins in instances if ins.host == host and ins.port == port]
return instances, exist_ins
def METHOD_NAME(self):
while True:
for instances in self.registry.values():
for instance in instances:
if (
instance.check_healthy
and datetime.now() - instance.last_heartbeat
> timedelta(seconds=self.heartbeat_timeout_secs)
):
instance.healthy = False
time.sleep(self.heartbeat_interval_secs)
async def register_instance(self, instance: ModelInstance) -> bool:
model_name = instance.model_name.strip()
host = instance.host.strip()
port = instance.port
instances, exist_ins = self._get_instances(
model_name, host, port, healthy_only=False
)
if exist_ins:
# One exist instance at most
ins = exist_ins[0]
# Update instance
ins.weight = instance.weight
ins.healthy = True
ins.prompt_template = instance.prompt_template
ins.last_heartbeat = datetime.now()
else:
instance.healthy = True
instance.last_heartbeat = datetime.now()
instances.append(instance)
return True
async def deregister_instance(self, instance: ModelInstance) -> bool:
model_name = instance.model_name.strip()
host = instance.host.strip()
port = instance.port
_, exist_ins = self._get_instances(model_name, host, port, healthy_only=False)
if exist_ins:
ins = exist_ins[0]
ins.healthy = False
return True
async def get_all_instances(
self, model_name: str, healthy_only: bool = False
) -> List[ModelInstance]:
instances = self.registry[model_name]
if healthy_only:
instances = [ins for ins in instances if ins.healthy == True]
return instances
async def get_all_model_instances(self) -> List[ModelInstance]:
print(self.registry)
return list(itertools.chain(*self.registry.values()))
async def send_heartbeat(self, instance: ModelInstance) -> bool:
_, exist_ins = self._get_instances(
instance.model_name, instance.host, instance.port, healthy_only=False
)
if not exist_ins:
# register new install from heartbeat
self.register_instance(instance)
return True
ins = exist_ins[0]
ins.last_heartbeat = datetime.now()
ins.healthy = True
return True | null |
5,792 | import pytest
import env # noqa: F401
from pybind11_tests import ConstructorStats
from pybind11_tests import call_policies as m
@pytest.mark.xfail("env.PYPY", reason="sometimes comes out 1 off on PyPy", strict=False)
def test_keep_alive_argument(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.addChild(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert (
capture
== """
Allocating child.
Releasing child.
"""
)
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert capture == "Allocating child."
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
"""
)
p = m.Parent()
c = m.Child()
assert ConstructorStats.detail_reg_inst() == n_inst + 2
m.free_function(p, c)
del c
assert ConstructorStats.detail_reg_inst() == n_inst + 2
del p
assert ConstructorStats.detail_reg_inst() == n_inst
with pytest.raises(RuntimeError) as excinfo:
m.invalid_arg_index()
assert str(excinfo.value) == "Could not activate keep_alive!"
def METHOD_NAME(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnChild()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert (
capture
== """
Allocating child.
Releasing child.
"""
)
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnChildKeepAlive()
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert capture == "Allocating child."
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
"""
)
p = m.Parent()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
with capture:
m.Parent.staticFunction(p)
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert capture == "Allocating child."
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
"""
)
# https://foss.heptapod.net/pypy/pypy/-/issues/2447
@pytest.mark.xfail("env.PYPY", reason="_PyObject_GetDictPtr is unimplemented")
def test_alive_gc(capture):
n_inst = ConstructorStats.detail_reg_inst()
p = m.ParentGC()
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
lst = [p]
lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
"""
)
def test_alive_gc_derived(capture):
class Derived(m.Parent):
pass
n_inst = ConstructorStats.detail_reg_inst()
p = Derived()
p.addChildKeepAlive(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
lst = [p]
lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
"""
)
def test_alive_gc_multi_derived(capture):
class Derived(m.Parent, m.Child):
def __init__(self):
m.Parent.__init__(self)
m.Child.__init__(self)
n_inst = ConstructorStats.detail_reg_inst()
p = Derived()
p.addChildKeepAlive(m.Child())
# +3 rather than +2 because Derived corresponds to two registered instances
assert ConstructorStats.detail_reg_inst() == n_inst + 3
lst = [p]
lst.append(lst) # creates a circular reference
with capture:
del p, lst
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
Releasing child.
"""
)
def test_return_none(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnNullChildKeepAliveChild()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert capture == ""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
with capture:
p = m.Parent()
assert capture == "Allocating parent."
with capture:
p.returnNullChildKeepAliveParent()
assert ConstructorStats.detail_reg_inst() == n_inst + 1
assert capture == ""
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert capture == "Releasing parent."
def test_keep_alive_constructor(capture):
n_inst = ConstructorStats.detail_reg_inst()
with capture:
p = m.Parent(m.Child())
assert ConstructorStats.detail_reg_inst() == n_inst + 2
assert (
capture
== """
Allocating child.
Allocating parent.
"""
)
with capture:
del p
assert ConstructorStats.detail_reg_inst() == n_inst
assert (
capture
== """
Releasing parent.
Releasing child.
"""
)
def test_call_guard():
assert m.unguarded_call() == "unguarded"
assert m.guarded_call() == "guarded"
assert m.multiple_guards_correct_order() == "guarded & guarded"
assert m.multiple_guards_wrong_order() == "unguarded & guarded"
if hasattr(m, "with_gil"):
assert m.with_gil() == "GIL held"
assert m.without_gil() == "GIL released" | null |
5,793 | """Author Alexandre De Zotti
Draws Julia sets of quadratic polynomials and exponential maps.
More specifically, this iterates the function a fixed number of times
then plots whether the absolute value of the last iterate is greater than
a fixed threshold (named "escape radius"). For the exponential map this is not
really an escape radius but rather a convenient way to approximate the Julia
set with bounded orbits.
The examples presented here are:
- The Cauliflower Julia set, see e.g.
https://en.wikipedia.org/wiki/File:Julia_z2%2B0,25.png
- Other examples from https://en.wikipedia.org/wiki/Julia_set
- An exponential map Julia set, ambiantly homeomorphic to the examples in
https://www.math.univ-toulouse.fr/~cheritat/GalII/galery.html
and
https://ddd.uab.cat/pub/pubmat/02141493v43n1/02141493v43n1p27.pdf
Remark: Some overflow runtime warnings are suppressed. This is because of the
way the iteration loop is implemented, using numpy's efficient computations.
Overflows and infinites are replaced after each step by a large number.
"""
import warnings
from collections.abc import Callable
from typing import Any
import numpy
from matplotlib import pyplot
c_cauliflower = 0.25 + 0.0j
c_polynomial_1 = -0.4 + 0.6j
c_polynomial_2 = -0.1 + 0.651j
c_exponential = -2.0
nb_iterations = 56
window_size = 2.0
nb_pixels = 666
def eval_exponential(c_parameter: complex, z_values: numpy.ndarray) -> numpy.ndarray:
"""
Evaluate $e^z + c$.
>>> eval_exponential(0, 0)
1.0
>>> abs(eval_exponential(1, numpy.pi*1.j)) < 1e-15
True
>>> abs(eval_exponential(1.j, 0)-1-1.j) < 1e-15
True
"""
return numpy.exp(z_values) + c_parameter
def eval_quadratic_polynomial(
c_parameter: complex, z_values: numpy.ndarray
) -> numpy.ndarray:
"""
>>> eval_quadratic_polynomial(0, 2)
4
>>> eval_quadratic_polynomial(-1, 1)
0
>>> round(eval_quadratic_polynomial(1.j, 0).imag)
1
>>> round(eval_quadratic_polynomial(1.j, 0).real)
0
"""
return z_values * z_values + c_parameter
def prepare_grid(window_size: float, nb_pixels: int) -> numpy.ndarray:
"""
Create a grid of complex values of size nb_pixels*nb_pixels with real and
imaginary parts ranging from -window_size to window_size (inclusive).
Returns a numpy array.
>>> prepare_grid(1,3)
array([[-1.-1.j, -1.+0.j, -1.+1.j],
[ 0.-1.j, 0.+0.j, 0.+1.j],
[ 1.-1.j, 1.+0.j, 1.+1.j]])
"""
x = numpy.linspace(-window_size, window_size, nb_pixels)
x = x.reshape((nb_pixels, 1))
y = numpy.linspace(-window_size, window_size, nb_pixels)
y = y.reshape((1, nb_pixels))
return x + 1.0j * y
def iterate_function(
eval_function: Callable[[Any, numpy.ndarray], numpy.ndarray],
function_params: Any,
nb_iterations: int,
z_0: numpy.ndarray,
infinity: float | None = None,
) -> numpy.ndarray:
"""
Iterate the function "eval_function" exactly nb_iterations times.
The first argument of the function is a parameter which is contained in
function_params. The variable z_0 is an array that contains the initial
values to iterate from.
This function returns the final iterates.
>>> iterate_function(eval_quadratic_polynomial, 0, 3, numpy.array([0,1,2])).shape
(3,)
>>> numpy.round(iterate_function(eval_quadratic_polynomial,
... 0,
... 3,
... numpy.array([0,1,2]))[0])
0j
>>> numpy.round(iterate_function(eval_quadratic_polynomial,
... 0,
... 3,
... numpy.array([0,1,2]))[1])
(1+0j)
>>> numpy.round(iterate_function(eval_quadratic_polynomial,
... 0,
... 3,
... numpy.array([0,1,2]))[2])
(256+0j)
"""
z_n = z_0.astype("complex64")
for _ in range(nb_iterations):
z_n = eval_function(function_params, z_n)
if infinity is not None:
numpy.nan_to_num(z_n, copy=False, nan=infinity)
z_n[abs(z_n) == numpy.inf] = infinity
return z_n
def show_results(
function_label: str,
function_params: Any,
escape_radius: float,
z_final: numpy.ndarray,
) -> None:
"""
Plots of whether the absolute value of z_final is greater than
the value of escape_radius. Adds the function_label and function_params to
the title.
>>> show_results('80', 0, 1, numpy.array([[0,1,.5],[.4,2,1.1],[.2,1,1.3]]))
"""
abs_z_final = (abs(z_final)).transpose()
abs_z_final[:, :] = abs_z_final[::-1, :]
pyplot.matshow(abs_z_final < escape_radius)
pyplot.title(f"Julia set of ${function_label}$, $c={function_params}$")
pyplot.show()
def METHOD_NAME() -> None:
"""
Ignore some overflow and invalid value warnings.
>>> ignore_overflow_warnings()
"""
warnings.filterwarnings(
"ignore", category=RuntimeWarning, message="overflow encountered in multiply"
)
warnings.filterwarnings(
"ignore",
category=RuntimeWarning,
message="invalid value encountered in multiply",
)
warnings.filterwarnings(
"ignore", category=RuntimeWarning, message="overflow encountered in absolute"
)
warnings.filterwarnings(
"ignore", category=RuntimeWarning, message="overflow encountered in exp"
)
if __name__ == "__main__":
z_0 = prepare_grid(window_size, nb_pixels)
METHOD_NAME() # See file header for explanations
nb_iterations = 24
escape_radius = 2 * abs(c_cauliflower) + 1
z_final = iterate_function(
eval_quadratic_polynomial,
c_cauliflower,
nb_iterations,
z_0,
infinity=1.1 * escape_radius,
)
show_results("z^2+c", c_cauliflower, escape_radius, z_final)
nb_iterations = 64
escape_radius = 2 * abs(c_polynomial_1) + 1
z_final = iterate_function(
eval_quadratic_polynomial,
c_polynomial_1,
nb_iterations,
z_0,
infinity=1.1 * escape_radius,
)
show_results("z^2+c", c_polynomial_1, escape_radius, z_final)
nb_iterations = 161
escape_radius = 2 * abs(c_polynomial_2) + 1
z_final = iterate_function(
eval_quadratic_polynomial,
c_polynomial_2,
nb_iterations,
z_0,
infinity=1.1 * escape_radius,
)
show_results("z^2+c", c_polynomial_2, escape_radius, z_final)
nb_iterations = 12
escape_radius = 10000.0
z_final = iterate_function(
eval_exponential,
c_exponential,
nb_iterations,
z_0 + 2,
infinity=1.0e10,
)
show_results("e^z+c", c_exponential, escape_radius, z_final) | null |
5,794 | from bbot.modules.base import BaseModule
class bucket_aws(BaseModule):
watched_events = ["DNS_NAME", "STORAGE_BUCKET"]
produced_events = ["STORAGE_BUCKET", "FINDING"]
flags = ["active", "safe", "cloud-enum", "web-basic", "web-thorough"]
meta = {"description": "Check for S3 buckets related to target"}
options = {"permutations": False}
options_desc = {
"permutations": "Whether to try permutations",
}
scope_distance_modifier = 3
cloud_helper_name = "aws"
delimiters = ("", ".", "-")
base_domains = ["s3.amazonaws.com"]
regions = [None]
supports_open_check = True
async def METHOD_NAME(self):
self.buckets_tried = set()
self.cloud_helper = getattr(self.helpers.cloud, self.cloud_helper_name)
self.permutations = self.config.get("permutations", False)
return True
async def filter_event(self, event):
if event.type == "DNS_NAME" and event.scope_distance > 0:
return False, "only accepts in-scope DNS_NAMEs"
if event.type == "STORAGE_BUCKET":
if f"cloud-{self.cloud_helper_name}" not in event.tags:
return False, "bucket belongs to a different cloud provider"
return True
async def handle_event(self, event):
if event.type == "DNS_NAME":
await self.handle_dns_name(event)
elif event.type == "STORAGE_BUCKET":
await self.handle_storage_bucket(event)
async def handle_dns_name(self, event):
buckets = set()
base = event.data
stem = self.helpers.domain_stem(base)
for b in [base, stem]:
split = b.split(".")
for d in self.delimiters:
buckets.add(d.join(split))
async for bucket_name, url, tags in self.brute_buckets(buckets, permutations=self.permutations):
self.emit_event({"name": bucket_name, "url": url}, "STORAGE_BUCKET", source=event, tags=tags)
async def handle_storage_bucket(self, event):
url = event.data["url"]
bucket_name = event.data["name"]
if self.supports_open_check:
description, tags = await self._check_bucket_open(bucket_name, url)
if description:
event_data = {"host": event.host, "url": url, "description": description}
self.emit_event(event_data, "FINDING", source=event, tags=tags)
async for bucket_name, url, tags in self.brute_buckets(
[bucket_name], permutations=self.permutations, omit_base=True
):
self.emit_event({"name": bucket_name, "url": url}, "STORAGE_BUCKET", source=event, tags=tags)
async def brute_buckets(self, buckets, permutations=False, omit_base=False):
buckets = set(buckets)
new_buckets = set(buckets)
if permutations:
for b in buckets:
for mutation in self.helpers.word_cloud.mutations(b, cloud=False):
for d in self.delimiters:
new_buckets.add(d.join(mutation))
if omit_base:
new_buckets = new_buckets - buckets
new_buckets = [b for b in new_buckets if self.valid_bucket_name(b)]
tasks = []
for base_domain in self.base_domains:
for region in self.regions:
for bucket_name in new_buckets:
url = self.build_url(bucket_name, base_domain, region)
tasks.append(self._check_bucket_exists(bucket_name, url))
async for task in self.helpers.as_completed(tasks):
existent_bucket, tags, bucket_name, url = await task
if existent_bucket:
yield bucket_name, url, tags
async def _check_bucket_exists(self, bucket_name, url):
self.debug(f'Checking if bucket exists: "{bucket_name}"')
return await self.check_bucket_exists(bucket_name, url)
async def check_bucket_exists(self, bucket_name, url):
response = await self.helpers.request(url)
tags = self.gen_tags_exists(response)
status_code = getattr(response, "status_code", 404)
existent_bucket = status_code != 404
return (existent_bucket, tags, bucket_name, url)
async def _check_bucket_open(self, bucket_name, url):
self.debug(f'Checking if bucket is misconfigured: "{bucket_name}"')
return await self.check_bucket_open(bucket_name, url)
async def check_bucket_open(self, bucket_name, url):
response = await self.helpers.request(url)
tags = self.gen_tags_exists(response)
status_code = getattr(response, "status_code", 404)
content = getattr(response, "text", "")
open_bucket = status_code == 200 and "Contents" in content
msg = ""
if open_bucket:
msg = "Open storage bucket"
return (msg, tags)
def valid_bucket_name(self, bucket_name):
valid = self.cloud_helper.is_valid_bucket(bucket_name)
if valid and not self.helpers.is_ip(bucket_name):
bucket_hash = hash(bucket_name)
if not bucket_hash in self.buckets_tried:
self.buckets_tried.add(bucket_hash)
return True
return False
def build_url(self, bucket_name, base_domain, region):
return f"https://{bucket_name}.{base_domain}"
def gen_tags_exists(self, response):
return set()
def gen_tags_open(self, response):
return set() | null |
5,795 | # Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
import mindspore as ms
from mindspore import Tensor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.ops.functional import vmap
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class Net(nn.Cell):
def __init__(self, sample, replacement, seed=0):
super(Net, self).__init__()
self.sample = sample
self.replacement = replacement
self.seed = seed
def construct(self, x):
return C.METHOD_NAME(x, self.sample, self.replacement, self.seed)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_multinomial_net():
x0 = Tensor(np.array([0.9, 0.2]).astype(np.float32))
x1 = Tensor(np.array([[0.9, 0.2], [0.9, 0.2]]).astype(np.float32))
net0 = Net(1, True, 20)
net1 = Net(2, True, 20)
net2 = Net(6, True, 20)
out0 = net0(x0)
out1 = net1(x0)
out2 = net2(x1)
assert out0.asnumpy().shape == (1,)
assert out1.asnumpy().shape == (2,)
assert out2.asnumpy().shape == (2, 6)
class DynamicShapeNet(nn.Cell):
"""
Inputs:
- **x** (Tensor) - the input tensor containing the cumsum of probabilities, must be 1 or 2
dimensions. Must be one of the following types: float16, float32, float64. CPU and GPU
supports x 1 or 2 dimensions and Ascend only supports 2 dimensions.
- **num_samples** (int) - number of samples to draw, must be a nonnegative number.
"""
def __init__(self):
super(DynamicShapeNet, self).__init__()
self.unique = P.Unique()
self.gather = P.Gather()
self.METHOD_NAME = P.Multinomial()
def construct(self, x, indices):
unique_indices, _ = self.unique(indices)
x = self.gather(x, unique_indices, 0)
return self.METHOD_NAME(x, 2)
@pytest.mark.level2
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_multinomial_dynamic_shape():
"""
Feature: test Multinomial dynamic_shape feature.
Description: test Multinomial dynamic_shape feature. Only support GRAPH_MODE.
Expectation: success.
"""
# dynamic inputs
indices_np = np.random.randint(0, 3, size=6)
indices_ms = Tensor(indices_np)
# data preparation
x = Tensor(np.arange(20).reshape(4, 5).astype(np.float32) / 10)
# dynamic shape
x_dyn = Tensor(shape=[None for _ in x.shape], dtype=x.dtype)
dynamic_shape_net = DynamicShapeNet()
dynamic_shape_net.set_inputs(x_dyn, indices_ms)
# run in graph mode
outputs = dynamic_shape_net(x, indices_ms)
expect_shape = (len(np.unique(indices_np)), 2)
assert outputs.asnumpy().shape == expect_shape
class BatchedMultinomial(nn.Cell):
def __init__(self):
super().__init__()
self.METHOD_NAME = P.Multinomial(seed=5, seed2=6)
def construct(self, prob, num_sample):
return self.METHOD_NAME(prob, num_sample)
def METHOD_NAME(prob, num_sample):
return P.Multinomial(seed=5, seed2=6)(prob, num_sample)
@pytest.mark.level2
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_multinomial_vmap():
"""
Feature: test Multinomial vmap feature.
Description: test Multinomial vmap feature.
Expectation: success.
"""
prob = Tensor([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]], ms.float32)
num_sample = 3
batched_multinomial = BatchedMultinomial()
batched_out = batched_multinomial(prob, num_sample)
vmap_out = vmap(METHOD_NAME, in_axes=(0, None), out_axes=0)(prob, num_sample)
assert (batched_out.asnumpy() == vmap_out.asnumpy()).all() | null |
5,796 | #!/usr/bin/env python3
from std_msgs.msg import String, Empty
from lg_msg_defs.srv import USCSMessage
from lg_msg_defs.msg import StringArray
from interactivespaces_msgs.msg import GenericMessage
import http.server
import socketserver
import threading
import tempfile
import rospy
import json
import copy
import os
import re
import binascii
DEFAULT_VIEWPORTS = ['left_three', 'left_two', 'left_one', 'center',
'right_one', 'right_two', 'right_three']
DEFAULT_EARTH_INSTANCE = {
'activity': 'earth',
'activity_config': {},
'assets': [],
'height': 1920,
'presentation_viewport': 'CHANGE_ME',
'slug': -1875729098,
'width': 1080,
'x_coord': 0,
'y_coord': 0
}
kml_id_pattern = re.compile('<kml.*? id="(.*?)".*?>')
def get_kml_id(kml):
"""
if <kml> tag has id attribute returns it value
othervise return unsigned crc32 of kml string
"""
id_match = kml_id_pattern.search(kml, re.IGNORECASE)
if id_match:
return id_match.group(1)
try:
return hex(binascii.crc32(kml) % (1 << 32))
except TypeError:
print("Can't calculate crc32 for")
print(kml)
raise
def get_match_any_starts_with(prefixes):
def matcher(test_string):
for prefix in prefixes:
if test_string.startswith(prefix):
return True
return False
return matcher
class KMLAdder():
def __init__(self, uscs_service, director_pub, added_kml_pub, port, hostname='localhost', viewports=None):
self.serve_dir = tempfile.mktemp()
self.uscs_service = uscs_service
self.director_pub = director_pub
self.added_kml_pub = added_kml_pub
self.id_to_file = dict()
self.hostname = hostname
self.viewports = viewports
if self.viewports is None:
self.viewports = DEFAULT_VIEWPORTS
self.port = port
self.server = threading.Thread(target=self._serve)
os.mkdir(self.serve_dir)
self.server.start()
def handle_kml(self, msg):
kml = msg.data
filename = tempfile.mktemp(dir=self.serve_dir)
with open(filename, 'w') as f:
f.write(kml)
kml_id = get_kml_id(kml)
if kml_id not in self.id_to_file:
self.id_to_file[kml_id] = list()
# Keep track of files for easier remove by id
self.id_to_file[kml_id].append(os.path.basename(filename))
current_scene = self.uscs_service.call().message
current_scene = json.loads(current_scene)
self.add_earths(current_scene)
for window in current_scene['windows']:
if window['activity'] != 'earth':
continue
if 'assets' in window:
window['assets'].append(self.formatURL(filename))
else:
window['assets'] = [self.formatURL(filename)]
new_msg = GenericMessage()
new_msg.type = 'json'
new_msg.message = json.dumps(current_scene)
self.director_pub.publish(new_msg)
self.added_kml_pub.publish(StringArray(list(self.id_to_file.keys())))
def formatURL(self, filename):
return 'http://{}:{}/{}'.format(self.hostname, self.port, os.path.basename(filename))
def METHOD_NAME(self):
return 'http://{}:{}/tmp'.format(self.hostname, self.port)
def clear_kmls(self, msg):
current_scene = self.uscs_service.call().message
current_scene = json.loads(current_scene)
ids = msg.strings if msg.strings else None
if ids:
files = []
for id in ids:
if id in self.id_to_file:
for names in self.id_to_file.pop(id):
if type(names) == list:
for name in names:
files.append(name)
else:
files.append(names)
urls_to_remove = [self.formatURL(filename) for filename in files]
matcher = get_match_any_starts_with(urls_to_remove)
else:
# Remove all additional kmls
self.id_to_file = dict()
matcher = get_match_any_starts_with([self.METHOD_NAME()])
for window in current_scene['windows']:
if window['activity'] == 'earth':
window['assets'] = [a for a in window['assets'] if not matcher(a)]
new_msg = GenericMessage()
new_msg.type = 'json'
new_msg.message = json.dumps(current_scene)
self.director_pub.publish(new_msg)
self.added_kml_pub.publish(StringArray(list(self.id_to_file.keys())))
def _serve(self):
os.chdir(self.serve_dir)
Handler = http.server.SimpleHTTPRequestHandler
self.httpd = socketserver.TCPServer(("", self.port), Handler)
self.httpd.serve_forever()
def add_earths(self, scene):
for viewport in self.viewports:
flag = False
for window in scene['windows']:
if window['activity'] == 'earth' and 'presentation_viewport' in window and window['presentation_viewport'] == viewport:
flag = True
# if no instance of earth w/ our current viewport is found
# we add one and give it our viewport
if flag is False:
scene['windows'].append(copy.deepcopy(DEFAULT_EARTH_INSTANCE))
scene['windows'][-1]['presentation_viewport'] = viewport
def shutdown(self):
self.httpd.shutdown()
self.server.join()
def main():
rospy.init_node('add_kml')
director_pub = rospy.Publisher('/director/scene', GenericMessage, queue_size=10)
added_kml_pub = rospy.Publisher('/lg_earth/added_kml', StringArray, latch=True, queue_size=1)
uscs_service = rospy.ServiceProxy('/uscs/message', USCSMessage, persistent=False)
hostname = rospy.get_param('~hostname', 'localhost')
port = rospy.get_param('~port', 18111)
k = KMLAdder(uscs_service, director_pub, added_kml_pub, port, hostname)
rospy.Subscriber('/lg_earth/add_kml', String, k.handle_kml)
rospy.Subscriber('/lg_earth/clear_kml', StringArray, k.clear_kmls)
rospy.on_shutdown(k.shutdown)
rospy.spin()
if __name__ == '__main__':
main() | null |
5,797 | from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.test.utils import override_settings
from cms.api import assign_user_to_page, create_page
from cms.cache.permissions import clear_user_permission_cache, get_permission_cache, set_permission_cache
from cms.models import Page
from cms.models.permissionmodels import ACCESS_PAGE_AND_DESCENDANTS, GlobalPagePermission
from cms.test_utils.testcases import URL_CMS_PAGE_ADD, CMSTestCase
from cms.utils.page_permissions import (
get_change_id_list,
user_can_add_subpage,
user_can_publish_page,
user_can_view_page,
)
@override_settings(
CMS_PERMISSION=True,
CMS_CACHE_DURATIONS={
'menus': 60,
'content': 60,
'permissions': 60,
},
)
class PermissionCacheTests(CMSTestCase):
def setUp(self):
self.user_super = self._create_user("super", is_staff=True,
is_superuser=True)
self.user_normal = self._create_user("randomuser", is_staff=True,
add_default_permissions=True)
self.home_page = create_page("home", "nav_playground.html", "en",
created_by=self.user_super)
def test_basic_permissions(self):
"""
Test basic permissions cache get / set / clear low-level api
"""
cached_permissions = get_permission_cache(self.user_normal, "change_page")
self.assertIsNone(cached_permissions)
set_permission_cache(self.user_normal, "change_page", [self.home_page.id])
cached_permissions = get_permission_cache(self.user_normal, "change_page")
self.assertEqual(cached_permissions, [self.home_page.id])
clear_user_permission_cache(self.user_normal)
cached_permissions = get_permission_cache(self.user_normal, "change_page")
self.assertIsNone(cached_permissions)
def test_permission_manager(self):
"""
Test page permission manager working on a subpage
"""
page_b = create_page("page_b", "nav_playground.html", "en",
created_by=self.user_super)
assign_user_to_page(page_b, self.user_normal, can_view=True,
can_change=True)
cached_permissions = get_permission_cache(self.user_normal, "change_page")
self.assertIsNone(cached_permissions)
live_permissions = get_change_id_list(self.user_normal, Site.objects.get_current())
cached_permissions_permissions = get_permission_cache(self.user_normal,
"change_page")
self.assertEqual(live_permissions, [page_b.id])
self.assertEqual(cached_permissions_permissions, live_permissions)
def test_cached_permission_precedence(self):
# refs - https://github.com/divio/django-cms/issues/6335
# cached page permissions should not override global permissions
page = create_page(
"test page",
"nav_playground.html",
"en",
created_by=self.user_super,
)
page_permission = GlobalPagePermission.objects.create(
can_change=True,
can_publish=True,
user=self.user_normal,
)
page_permission.sites.add(Site.objects.get_current())
set_permission_cache(self.user_normal, "publish_page", [])
can_publish = user_can_publish_page(
self.user_normal,
page,
Site.objects.get_current(),
)
self.assertTrue(can_publish)
def METHOD_NAME(self):
superuser = self.get_superuser()
user1 = self._create_user("user1", is_staff=True, is_superuser=False)
visible = create_page("visible", "nav_playground.html", "en", published=True)
visible_child = create_page("visible_child", "nav_playground.html", "en", parent=visible, published=True)
invisible_for_user1 = create_page("invisible", "nav_playground.html", "en", published=True)
assign_user_to_page(visible, user1, grant_on=ACCESS_PAGE_AND_DESCENDANTS, can_view=True)
assign_user_to_page(invisible_for_user1, superuser, grant_on=ACCESS_PAGE_AND_DESCENDANTS, can_view=True)
with self.login_user_context(user1):
response = self.client.get(visible_child.get_public_object().get_absolute_url())
self.assertEqual(response.status_code, 200)
response = self.client.get(invisible_for_user1.get_public_object().get_absolute_url())
self.assertEqual(response.status_code, 404)
with self.login_user_context(superuser):
move_url = self.get_admin_url(Page, 'move_page', visible_child.pk)
response = self.client.post(move_url, {
'id': visible_child.pk,
'position': 0,
'target': invisible_for_user1.pk,
})
self.assertEqual(response.status_code, 200)
visible_child = visible_child.reload()
self.assertEqual(visible_child.parent_page.pk, invisible_for_user1.pk)
# Ignore cached_func
User = get_user_model()
user1 = User.objects.get(pk=user1.pk)
self.assertFalse(user_can_view_page(user=user1, page=visible_child))
def test_add_page_twice(self):
user1 = self._create_user("user1", is_staff=True, is_superuser=False, add_default_permissions=True)
home = create_page("home", "nav_playground.html", "en", published=True)
home.set_as_homepage()
assign_user_to_page(home, user1, grant_on=ACCESS_PAGE_AND_DESCENDANTS, can_add=True, can_change=True, can_publish=True)
with self.login_user_context(user1):
response = self.client.post(f'{URL_CMS_PAGE_ADD}?parent_node={home.node.pk}', self.get_new_page_data(parent_id=home.node.pk))
self.assertEqual(response.status_code, 302)
child = home.reload().get_child_pages().first()
self.assertIsNotNone(child)
# Ignore cached_func
User = get_user_model()
user1 = User.objects.get(pk=user1.pk)
self.assertTrue(user_can_add_subpage(user1, child)) | null |
5,798 | import random
from collections import OrderedDict
from typing import Any, Dict, Type
import numpy as np
import torch
from avalanche.training.determinism.cuda_rng import (
cuda_rng_seed,
cuda_rng_save_state,
cuda_rng_load_state,
cuda_rng_step,
cpu_rng_seed,
)
class _Singleton(type):
_instances: Dict[Type, Any] = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(_Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class _RNGManager:
"""
A class used to manage a set of deterministic random number generators.
The main goal of this class is to provide a simplified mechanism to
improve determinism on the RNG side. This class includes a method to set
a manual seed of all number generators and a method to register new
generators.
By default, Python (`random` module), NumPy, and PyTorch global generators
are managed.
"""
__metaclass__ = _Singleton
RNG_DEF_REQUIRED_FIELDS = {"seed", "save_state", "load_state", "step"}
def __init__(self):
"""
Initializes this object, registering the default supported generators.
"""
self.random_generators = OrderedDict()
self._register_default_generators()
def register_random_generator(self, name: str, rng_def: dict):
"""
Register a new random number generator.
Please note that Python's `random`, NumPy, and PyTorch global generators
are already supported out-of-the-box and should not be re-registered.
:param name: The name of the random number generator.
:param rng_def: The definition of the random number generator.
This must be a dictionary including the following fields:
`seed`: a function that initializes the internal random
number generator, it should accept an int (the seed);
`save_state`: a function that returns an object that can be used to
restore the state of the random number generator. The returned
object should be pickleable; `load_state`: a function that sets
the internal state of the generator based on the object passed as
an argument. That object is the one returned from a previous call to
`save_state`; `step`: a function that advances the state of the
random number generator. Its return value is ignored. To advance the
state, it is recommended to generate a small amount of random
data, like a float (to minimize the performance impact).
"""
rng_def_keys = set(rng_def.keys())
if not rng_def_keys.issubset(_RNGManager.RNG_DEF_REQUIRED_FIELDS):
raise ValueError("Invalid random number generator definition")
self.random_generators[name] = rng_def
def _register_default_generators(self):
self.register_random_generator(
"torch",
{
"seed": cpu_rng_seed,
"save_state": torch.random.get_rng_state,
"load_state": torch.random.set_rng_state,
"step": lambda: torch.rand(1),
},
)
self.register_random_generator(
"torch.cuda",
{
"seed": cuda_rng_seed,
"save_state": cuda_rng_save_state,
"load_state": cuda_rng_load_state,
"step": cuda_rng_step,
},
)
self.register_random_generator(
"numpy",
{
"seed": np.random.seed,
"save_state": np.random.get_state,
"load_state": np.random.set_state,
"step": lambda: np.random.rand(1),
},
)
self.register_random_generator(
"random",
{
"seed": random.seed,
"save_state": random.getstate,
"load_state": random.setstate,
"step": random.random,
},
)
def set_random_seeds(self, random_seed):
"""
Set the initial seed of all number generators.
:param random_seed: The initial seed. It should be a value compatible
with all registered number generators. A native `int` value in
range `[0, 2^32 - 1)` is usually acceptable.
"""
for gen_name, gen_dict in self.random_generators.items():
gen_dict["seed"](random_seed)
def align_seeds(self):
"""
Align random number generator seeds by using the next PyTorch generated
integer value.
"""
reference_seed = torch.randint(0, 2**32 - 1, (1,), dtype=torch.int64)
seed = int(reference_seed)
self.set_random_seeds(seed)
def __getstate__(self):
all_rngs_state = dict()
for rng_name, rng_def in self.random_generators.items():
rng_state = dict()
rng_state["current_state"] = rng_def["save_state"]()
all_rngs_state[rng_name] = rng_state
return all_rngs_state
def step_generators(self):
for rng_name, rng_def in self.random_generators.items():
rng_def["step"]()
def __setstate__(self, rngs):
# Note on the following:
# here we load self.random_generators from the global singleton.
# We have to do this because, while "rngs" could contain the un-pickled
# rng functions (load_state, save_state, step), those functions
# are incorrectly pickled and un-pickled.
# In other words, they cannot be used to actually re-load the state
# of the related number generator after closing and reopening
# the process! This has been tested thanks to a generous expenditure of
# man-hours.
# On the other hand, RNGManager.random_generators will contain the
# "correct" (of the current process) functions linked to the global
# generators. User will need to register custom generators before
# un-pickling _RNGManager objects (usually by loading a checkpoint).
# Also consider that this object will probably not be used...
# the goal of this __setstate__ is to load the state of the
# global number generators registered in the singleton.
self.random_generators = RNGManager.random_generators
for rng_name, rng_def in self.random_generators.items():
loaded_state = rngs[rng_name]["current_state"]
rng_def["load_state"](loaded_state)
def METHOD_NAME(self, generators):
"""
For internal use only.
"""
self.random_generators = generators
RNGManager = _RNGManager()
__all__ = ["RNGManager"] | null |
5,799 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore as ms
from mindspore import context, Tensor, Parameter
from mindspore.common.api import _cell_graph_executor
from mindspore.nn import Cell, TrainOneStepCell, Momentum
from mindspore.ops import operations as P
def METHOD_NAME():
context.set_auto_parallel_context(dataset_strategy="full_batch")
class Net(Cell):
def __init__(self, mul_weight, batch_matmul_weight, transpose_b=False, strategy1=None, strategy2=None):
super().__init__()
self.mul = P.Mul().shard(strategy1)
self.batch_matmul = P.BatchMatMul(transpose_b=transpose_b).shard(strategy2)
self.mul_weight = Parameter(mul_weight, "w1")
self.batch_matmul_weight = Parameter(batch_matmul_weight, "w2")
def construct(self, x, b):
out = self.mul(x, self.mul_weight)
out = self.batch_matmul(out, self.batch_matmul_weight)
return out
_x = Tensor(np.ones([128, 64, 32]), dtype=ms.float32)
_w1 = Tensor(np.ones([128, 64, 32]), dtype=ms.float32)
_w2 = Tensor(np.ones([128, 32, 32]), dtype=ms.float32)
_b = Tensor(np.ones([128, 64, 16]), dtype=ms.float32)
def compile_net(net):
optimizer = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
train_net = TrainOneStepCell(net, optimizer)
train_net.set_train()
_cell_graph_executor.compile(train_net, _x, _b)
context.reset_auto_parallel_context()
def test_batch_matmul_data_parallel():
"""
Feature: distribute operator batch_matmul in auto parallel.
Description: mul-batch_matmul net with data parallel strategy in semi auto parallel.
Expectation: compile done without error.
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((16, 1, 1), (16, 1, 1))
strategy2 = ((16, 1, 1), (16, 1, 1))
net = Net(_w1, _w2, False, strategy1, strategy2)
compile_net(net)
def test_batch_matmul_model_parallel():
"""
Feature: distribute operator batch_matmul in auto parallel.
Description: mul-batch_matmul net with model parallel strategy in semi auto parallel.
Expectation: compile done without error.
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((1, 1, 1), (1, 1, 1))
strategy2 = ((1, 1, 1), (1, 1, 16))
net = Net(_w1, _w2, False, strategy1, strategy2)
compile_net(net)
def test_batch_matmul_hybrid_parallel():
"""
Feature: distribute operator batch_matmul in auto parallel.
Description: mul-batch_matmul net with mixed strategy in semi auto parallel.
Expectation: compile done without error.
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((2, 2, 2), (2, 2, 2))
strategy2 = ((2, 2, 2), (2, 2, 2))
net = Net(_w1, _w2, False, strategy1, strategy2)
compile_net(net)
def test_batch_matmul_auto_parallel():
"""
Feature: distribute operator batch_matmul in auto parallel.
Description: mul-batch_matmul net in auto parallel.
Expectation: compile done without error.
"""
context.set_auto_parallel_context(parallel_mode="auto_parallel", device_num=16, global_rank=0)
net = Net(_w1, _w2, False)
compile_net(net)
def test_batch_matmul_repeat_calc():
"""
Feature: distribute operator batch_matmul in auto parallel.
Description: mul-batch_matmul net with repeated strategy in semi auto parallel.
Expectation: compile done without error.
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((2, 2, 4), (2, 2, 4))
strategy2 = ((1, 2, 2), (1, 2, 2))
net = Net(_w1, _w2, False, strategy1, strategy2)
compile_net(net)
def test_batch_matmul_transpose_b():
"""
Feature: distribute operator batch_matmul in auto parallel.
Description: mul-batch_matmul net with strategy in semi auto parallel, transpose_b.
Expectation: compile done without error.
"""
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel", device_num=16, global_rank=0)
strategy1 = ((2, 2, 4), (2, 2, 4))
strategy2 = ((1, 2, 2), (1, 2, 2))
net = Net(_w1, _w2, True, strategy1, strategy2)
compile_net(net) | null |