id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
5,900 | # Copyright 2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import time
import random
import pytest
import numpy as np
import mindspore.nn as nn
from mindspore import context, Tensor
from mindspore.common import dtype as mstype
from mindspore.common.initializer import TruncatedNormal
from mindspore.nn.optim import Momentum
from mindspore.nn.wrap.cell_wrapper import WithLossCell, TrainOneStepCell
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from mindspore.common import set_seed
from mindspore._extends import cell_attr_register
def seed_set():
set_seed(1)
np.random.seed(1)
random.seed(1)
def weight_variable():
"""weight initial"""
return TruncatedNormal(0.02)
def conv(in_channels, out_channels, kernel_size, stride=1, padding=0):
"""weight initial for conv layer"""
weight = weight_variable()
return nn.Conv2d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
weight_init=weight, has_bias=False, pad_mode="valid")
def METHOD_NAME(input_channels, out_channels):
"""weight initial for fc layer"""
weight = weight_variable()
bias = weight_variable()
return nn.Dense(input_channels, out_channels, weight, bias)
class LeNetConvBlock(nn.Cell):
@cell_attr_register
def __init__(self, in_channels, out_channels, kernel_size):
super(LeNetConvBlock, self).__init__()
self.conv = conv(in_channels, out_channels, kernel_size)
self.relu = nn.ReLU()
self.max_pool2d = nn.MaxPool2d(kernel_size=2, stride=2)
def construct(self, x):
x = self.conv(x)
x = self.relu(x)
x = self.max_pool2d(x)
return x
class LeNetFcBlock(nn.Cell):
@cell_attr_register
def __init__(self, in_channels, out_channels):
super(LeNetFcBlock, self).__init__()
self.fc = METHOD_NAME(in_channels, out_channels)
self.relu = nn.ReLU()
def construct(self, x):
x = self.fc(x)
x = self.relu(x)
return x
class LeNet(nn.Cell):
"""
Lenet network
Args:
num_class (int): Num classes, Default: 10.
Returns:
Tensor, output tensor
Examples:
>>> LeNet(num_class=10)
"""
def __init__(self, num_class=10):
super(LeNet, self).__init__()
self.num_class = num_class
self.batch_size = 32
self.conv1 = LeNetConvBlock(1, 6, 5)
self.conv2 = LeNetConvBlock(6, 16, 5)
self.fc1 = LeNetFcBlock(16 * 5 * 5, 120)
self.fc2 = LeNetFcBlock(120, 84)
self.fc3 = METHOD_NAME(84, self.num_class)
self.reshape = P.Reshape()
def construct(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.reshape(x, (self.batch_size, -1))
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
class CrossEntropyLoss(nn.Cell):
"""
Define loss for network
"""
def __init__(self):
super(CrossEntropyLoss, self).__init__()
self.cross_entropy = P.SoftmaxCrossEntropyWithLogits()
self.mean = P.ReduceMean()
self.one_hot = P.OneHot()
self.on_value = Tensor(1.0, mstype.float32)
self.off_value = Tensor(0.0, mstype.float32)
self.num = Tensor(32.0, mstype.float32)
def construct(self, logits, label):
label = self.one_hot(label, F.shape(logits)[1], self.on_value, self.off_value)
loss = self.cross_entropy(logits, label)[0]
loss = P.RealDiv()(P.ReduceSum()(loss, -1), self.num)
return loss
def train_ascend_lenet():
epoch_size = 20
batch_size = 32
inputs = Tensor(np.ones([batch_size, 1, 32, 32]).astype(np.float32))
labels = Tensor(np.ones([batch_size]).astype(np.int32))
net = LeNet()
criterion = CrossEntropyLoss()
optimizer = Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), 0.1, 0.9)
net_with_criterion = WithLossCell(net, criterion)
train_network = TrainOneStepCell(net_with_criterion, optimizer)
train_network.set_train()
total_time = 0
for epoch in range(0, epoch_size):
start_time = time.time()
loss = train_network(inputs, labels)
end_time = time.time()
cost_time = end_time - start_time
total_time = total_time + cost_time
print("======epoch: ", epoch, " loss: ", loss.asnumpy(), " cost time: ", cost_time)
return loss
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_lenet_cell():
"""
Feature: test ge ascend lenet with cell reuse.
Description: subgraph sink with ge.
Expectation: the result match with expect
"""
seed_set()
os.environ['MS_GE_TRAIN'] = str(1)
os.environ['MS_ENABLE_GE'] = str(1)
os.environ['MS_ENABLE_REF_MODE'] = str(1)
os.environ['MS_DEV_CELL_REUSE'] = str(1)
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
loss_output = train_ascend_lenet()
del os.environ['MS_GE_TRAIN']
del os.environ['MS_ENABLE_GE']
del os.environ['MS_ENABLE_REF_MODE']
del os.environ['MS_DEV_CELL_REUSE']
assert loss_output.asnumpy() < 0.004
assert loss_output.asnumpy() > 0.003
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_lenet_no_cell():
"""
Feature: test ge ascend lenet with no cell reuse.
Description: multi-graph sink with ge.
Expectation: the result match with expect
"""
seed_set()
os.environ['MS_GE_TRAIN'] = str(1)
os.environ['MS_ENABLE_GE'] = str(1)
os.environ['MS_ENABLE_REF_MODE'] = str(1)
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
loss_output = train_ascend_lenet()
del os.environ['MS_GE_TRAIN']
del os.environ['MS_ENABLE_GE']
del os.environ['MS_ENABLE_REF_MODE']
assert loss_output.asnumpy() < 0.004
assert loss_output.asnumpy() > 0.003 | null |
5,901 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from unittest import mock
from mantidqt.utils.qt.testing import start_qapplication
from mantidqtinterfaces.Muon.GUI.ElementalAnalysis.PeriodicTable.periodic_table import PeriodicTable as silxPT, PeriodicTableItem
from mantidqtinterfaces.Muon.GUI.ElementalAnalysis.PeriodicTable.periodic_table_model import PeriodicTableModel
from mantidqtinterfaces.Muon.GUI.ElementalAnalysis.PeriodicTable.periodic_table_presenter import PeriodicTablePresenter
from mantidqtinterfaces.Muon.GUI.ElementalAnalysis.PeriodicTable.periodic_table_view import PeriodicTableView
@start_qapplication
class PeriodicTablePresenterTest(unittest.TestCase):
def setUp(self):
self._model = mock.create_autospec(PeriodicTableModel)
self.view = PeriodicTableView()
self.presenter = PeriodicTablePresenter(self.view, self._model)
self.presenter.is_selected = mock.Mock()
self.mock_elem = mock.create_autospec(PeriodicTableItem)
self.mock_elem.symbol = mock.Mock()
self.view.ptable = mock.create_autospec(silxPT)
self.view.ptable.getSelection = mock.Mock(return_value=self.mock_elem)
self.view.ptable.isElementSelected = mock.Mock(return_value=True)
self.view.on_table_lclicked = mock.Mock()
self.view.on_table_rclicked = mock.Mock()
self.view.on_table_changed = mock.Mock()
self.view.unreg_on_table_lclicked = mock.Mock()
self.view.unreg_on_table_rclicked = mock.Mock()
self.view.unreg_on_table_changed = mock.Mock()
self.presenter.view = self.view
# checks if subsequent function is called on func()
def check_second_func_called(self, register_func, signal_func):
test_slot = mock.Mock()
register_func(test_slot)
assert signal_func.call_count == 1
def test_register_table_lclicked(self):
self.check_second_func_called(self.presenter.register_table_lclicked, self.view.on_table_lclicked)
def METHOD_NAME(self):
self.check_second_func_called(self.presenter.unregister_table_lclicked, self.view.unreg_on_table_lclicked)
def test_register_table_rclicked(self):
self.check_second_func_called(self.presenter.register_table_rclicked, self.view.on_table_rclicked)
def test_unregister_table_rclicked(self):
self.check_second_func_called(self.presenter.unregister_table_rclicked, self.view.unreg_on_table_rclicked)
def test_register_table_changed(self):
self.check_second_func_called(self.presenter.register_table_changed, self.view.on_table_changed)
def test_unregister_table_changed(self):
self.check_second_func_called(self.presenter.unregister_table_changed, self.view.unreg_on_table_changed)
def test_selection(self):
assert self.presenter.selection == self.mock_elem
def test_is_selected(self):
assert self.presenter.is_selected(mock.Mock())
def test_select_element(self):
self.check_second_func_called(self.presenter.select_element, self.view.ptable.setElementSelected)
def test_add_elements(self):
self.check_second_func_called(self.presenter.add_elements, self.view.ptable.setSelection)
def test_set_buttons(self):
self.presenter.model.peak_data = [self.mock_elem.symbol]
self.view.ptable.elements = [self.mock_elem]
self.presenter.set_buttons()
assert self.view.ptable.silentSetElementSelected.call_count == 1
assert self.view.ptable.enableElementButton.call_count == 1
def test_set_peak_datafile(self):
self.presenter.set_buttons = mock.Mock()
test_filename = mock.Mock
self.presenter.set_peak_datafile(test_filename)
assert self.presenter.model.peak_data_file == test_filename
if __name__ == "__main__":
unittest.main() | null |
5,902 | # Copyright (c) 2022 Tulir Asokan
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from typing import Optional
import asyncio
import olm
from mautrix.errors import DecryptionError, MatchingSessionDecryptionError
from mautrix.types import (
DecryptedOlmEvent,
EncryptedOlmEventContent,
EncryptionAlgorithm,
IdentityKey,
OlmCiphertext,
OlmMsgType,
ToDeviceEvent,
UserID,
)
from mautrix.util import background_task
from .base import BaseOlmMachine
from .sessions import Session
class OlmDecryptionMachine(BaseOlmMachine):
async def _decrypt_olm_event(self, evt: ToDeviceEvent) -> DecryptedOlmEvent:
if not isinstance(evt.content, EncryptedOlmEventContent):
raise DecryptionError("unsupported event content class")
elif evt.content.algorithm != EncryptionAlgorithm.OLM_V1:
raise DecryptionError("unsupported event encryption algorithm")
try:
own_content = evt.content.ciphertext[self.account.identity_key]
except KeyError:
raise DecryptionError("olm event doesn't contain ciphertext for this device")
self.log.debug(
f"Decrypting to-device olm event from {evt.sender}/{evt.content.sender_key}"
)
plaintext = await self._decrypt_olm_ciphertext(
evt.sender, evt.content.sender_key, own_content
)
try:
decrypted_evt: DecryptedOlmEvent = DecryptedOlmEvent.parse_json(plaintext)
except Exception:
self.log.trace("Failed to parse olm event plaintext: %s", plaintext)
raise
if decrypted_evt.sender != evt.sender:
raise DecryptionError("mismatched sender in olm payload")
elif decrypted_evt.recipient != self.client.mxid:
raise DecryptionError("mismatched recipient in olm payload")
elif decrypted_evt.recipient_keys.ed25519 != self.account.signing_key:
raise DecryptionError("mismatched recipient key in olm payload")
decrypted_evt.sender_key = evt.content.sender_key
decrypted_evt.source = evt
self.log.debug(
f"Successfully decrypted olm event from {evt.sender}/{decrypted_evt.sender_device} "
f"(sender key: {decrypted_evt.sender_key} into a {decrypted_evt.type}"
)
return decrypted_evt
async def _decrypt_olm_ciphertext(
self, sender: UserID, sender_key: IdentityKey, message: OlmCiphertext
) -> str:
if message.type not in (OlmMsgType.PREKEY, OlmMsgType.MESSAGE):
raise DecryptionError("unsupported olm message type")
try:
plaintext = await self._try_decrypt_olm_ciphertext(sender_key, message)
except MatchingSessionDecryptionError:
self.log.warning(
f"Found matching session yet decryption failed for sender {sender}"
f" with key {sender_key}"
)
background_task.create(self.METHOD_NAME(sender, sender_key))
raise
if not plaintext:
if message.type != OlmMsgType.PREKEY:
background_task.create(self.METHOD_NAME(sender, sender_key))
raise DecryptionError("Decryption failed for normal message")
self.log.trace(f"Trying to create inbound session for {sender}/{sender_key}")
try:
session = await self._create_inbound_session(sender_key, message.body)
except olm.OlmSessionError as e:
background_task.create(self.METHOD_NAME(sender, sender_key))
raise DecryptionError("Failed to create new session from prekey message") from e
self.log.debug(
f"Created inbound session {session.id} for {sender} (sender key: {sender_key})"
)
try:
plaintext = session.decrypt(message)
except olm.OlmSessionError as e:
raise DecryptionError(
"Failed to decrypt olm event with session created from prekey message"
) from e
await self.crypto_store.update_session(sender_key, session)
return plaintext
async def _try_decrypt_olm_ciphertext(
self, sender_key: IdentityKey, message: OlmCiphertext
) -> Optional[str]:
sessions = await self.crypto_store.get_sessions(sender_key)
for session in sessions:
if message.type == OlmMsgType.PREKEY and not session.matches(message.body):
continue
try:
plaintext = session.decrypt(message)
except olm.OlmSessionError as e:
if message.type == OlmMsgType.PREKEY:
raise MatchingSessionDecryptionError(
"decryption failed with matching session"
) from e
else:
await self.crypto_store.update_session(sender_key, session)
return plaintext
return None
async def _create_inbound_session(self, sender_key: IdentityKey, ciphertext: str) -> Session:
session = self.account.new_inbound_session(sender_key, ciphertext)
await self.crypto_store.put_account(self.account)
await self.crypto_store.add_session(sender_key, session)
return session
async def METHOD_NAME(self, sender: UserID, sender_key: IdentityKey) -> None:
raise NotImplementedError() | null |
5,903 | # copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This code is refer from:
https://github.com/whai362/PSENet/blob/python3/models/neck/fpn.py
"""
import paddle.nn as nn
import paddle
import math
import paddle.nn.functional as F
class Conv_BN_ReLU(nn.Layer):
def __init__(self,
in_planes,
out_planes,
kernel_size=1,
stride=1,
padding=0):
super(Conv_BN_ReLU, self).__init__()
self.conv = nn.Conv2D(
in_planes,
out_planes,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias_attr=False)
self.bn = nn.BatchNorm2D(out_planes, momentum=0.1)
self.relu = nn.ReLU()
for m in self.sublayers():
if isinstance(m, nn.Conv2D):
n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels
m.weight = paddle.create_parameter(
shape=m.weight.shape,
dtype='float32',
default_initializer=paddle.nn.initializer.Normal(
0, math.sqrt(2. / n)))
elif isinstance(m, nn.BatchNorm2D):
m.weight = paddle.create_parameter(
shape=m.weight.shape,
dtype='float32',
default_initializer=paddle.nn.initializer.Constant(1.0))
m.bias = paddle.create_parameter(
shape=m.bias.shape,
dtype='float32',
default_initializer=paddle.nn.initializer.Constant(0.0))
def forward(self, x):
return self.relu(self.bn(self.conv(x)))
class FPN(nn.Layer):
def __init__(self, in_channels, out_channels):
super(FPN, self).__init__()
# Top layer
self.toplayer_ = Conv_BN_ReLU(
in_channels[3], out_channels, kernel_size=1, stride=1, padding=0)
# Lateral layers
self.latlayer1_ = Conv_BN_ReLU(
in_channels[2], out_channels, kernel_size=1, stride=1, padding=0)
self.latlayer2_ = Conv_BN_ReLU(
in_channels[1], out_channels, kernel_size=1, stride=1, padding=0)
self.latlayer3_ = Conv_BN_ReLU(
in_channels[0], out_channels, kernel_size=1, stride=1, padding=0)
# Smooth layers
self.smooth1_ = Conv_BN_ReLU(
out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.smooth2_ = Conv_BN_ReLU(
out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.smooth3_ = Conv_BN_ReLU(
out_channels, out_channels, kernel_size=3, stride=1, padding=1)
self.out_channels = out_channels * 4
for m in self.sublayers():
if isinstance(m, nn.Conv2D):
n = m._kernel_size[0] * m._kernel_size[1] * m._out_channels
m.weight = paddle.create_parameter(
shape=m.weight.shape,
dtype='float32',
default_initializer=paddle.nn.initializer.Normal(
0, math.sqrt(2. / n)))
elif isinstance(m, nn.BatchNorm2D):
m.weight = paddle.create_parameter(
shape=m.weight.shape,
dtype='float32',
default_initializer=paddle.nn.initializer.Constant(1.0))
m.bias = paddle.create_parameter(
shape=m.bias.shape,
dtype='float32',
default_initializer=paddle.nn.initializer.Constant(0.0))
def METHOD_NAME(self, x, scale=1):
return F.upsample(x, scale_factor=scale, mode='bilinear')
def _upsample_add(self, x, y, scale=1):
return F.upsample(x, scale_factor=scale, mode='bilinear') + y
def forward(self, x):
f2, f3, f4, f5 = x
p5 = self.toplayer_(f5)
f4 = self.latlayer1_(f4)
p4 = self._upsample_add(p5, f4, 2)
p4 = self.smooth1_(p4)
f3 = self.latlayer2_(f3)
p3 = self._upsample_add(p4, f3, 2)
p3 = self.smooth2_(p3)
f2 = self.latlayer3_(f2)
p2 = self._upsample_add(p3, f2, 2)
p2 = self.smooth3_(p2)
p3 = self.METHOD_NAME(p3, 2)
p4 = self.METHOD_NAME(p4, 4)
p5 = self.METHOD_NAME(p5, 8)
fuse = paddle.concat([p2, p3, p4, p5], axis=1)
return fuse | null |
5,904 | # SPDX-FileCopyrightText: 2023 Melissa LeBlanc-Williams for Adafruit Industries
#
# SPDX-License-Identifier: MIT
from queue import Queue
import time
import speech_recognition as sr
class Listener:
def __init__(
self, api_key, energy_threshold=300, phrase_timeout=3.0, record_timeout=30
):
self.listener_handle = None
self.microphone = sr.Microphone()
self.recognizer = sr.Recognizer()
self.recognizer.energy_threshold = energy_threshold
self.recognizer.dynamic_energy_threshold = False
self.recognizer.pause_threshold = 1
self.last_sample = bytes()
self.phrase_time = time.monotonic()
self.phrase_timeout = phrase_timeout
with self.microphone as source:
self.recognizer.adjust_for_ambient_noise(
source
) # we only need to calibrate once, before we start listening
self.record_timeout = record_timeout
self.phrase_complete = False
self.data_queue = Queue()
self.listener_handle = None
self.api_key = api_key
def listen(self, ready_callback=None):
print("Start listening...")
self.phrase_complete = False
start = time.monotonic()
self._start_listening()
if ready_callback:
ready_callback()
while (
self.listener_handle and not self.speech_waiting()
) or not self.phrase_complete:
if self.phrase_time and time.monotonic() > start + self.phrase_timeout:
self.last_sample = bytes()
self.phrase_complete = True
self.phrase_time = time.monotonic() - start
self.stop_listening()
def _save_audio_callback(self, _, audio):
print("Saving audio")
data = audio.get_raw_data()
self.data_queue.put(data)
def _get_audio(self):
"""Concatenate and convert the queued raw data back to audio and return it"""
start = time.monotonic()
if self.speech_waiting():
self.phrase_complete = False
if self.phrase_time and time.monotonic() > start + self.phrase_timeout:
self.last_sample = bytes()
self.phrase_complete = True
self.phrase_time = time.monotonic() - start
# Concatenate our current audio data with the latest audio data.
while self.speech_waiting():
data = self.data_queue.get()
self.last_sample += data
# Use AudioData to convert the raw data to wav data.
return sr.AudioData(
self.last_sample,
self.microphone.SAMPLE_RATE,
self.microphone.SAMPLE_WIDTH,
)
return None
def _start_listening(self):
if not self.listener_handle:
self.listener_handle = self.recognizer.listen_in_background(
self.microphone,
self._save_audio_callback,
phrase_time_limit=self.record_timeout,
)
def stop_listening(self, wait_for_stop=False):
if self.listener_handle:
self.listener_handle(wait_for_stop=wait_for_stop)
self.listener_handle = None
print("Stop listening...")
def is_listening(self):
return self.listener_handle is not None
def speech_waiting(self):
return not self.data_queue.empty()
def METHOD_NAME(self):
audio = self._get_audio()
if audio:
# Transcribe the audio data to text using Whisper
print("Recognizing...")
attempts = 0
while attempts < 3:
try:
result = self.recognizer.recognize_whisper_api(
audio, api_key=self.api_key
)
return result.strip()
except sr.RequestError as e:
print(f"Error: {e}")
time.sleep(3)
attempts += 1
print("Retry attempt: ", attempts)
print("Failed to recognize")
return None
return None | null |
5,905 | import gzip
from datetime import timedelta
from typing import Any
import pytest
from django.utils import timezone
from freezegun.api import FrozenDateTimeFactory
from storages.backends.s3boto3 import S3Boto3Storage
from thunderstore.cache.storage import get_cache_storage
from thunderstore.community.factories import CommunityFactory
from thunderstore.community.models import Community
from thunderstore.repository.models.cache import APIV1PackageCache
from thunderstore.utils.makemigrations import StubStorage
@pytest.mark.django_db
def test_api_v1_package_cache_get_latest_for_community_without_community(
community: Community,
) -> None:
# Make sure a community is in the DB to ensure a random one isn't returned
assert community.pk
assert APIV1PackageCache.get_latest_for_community(community_identifier=None) is None
@pytest.mark.django_db
def test_api_v1_package_cache_get_latest_for_community(settings: Any) -> None:
settings.DISABLE_TRANSACTION_CHECKS = True
community_a = CommunityFactory()
community_b = CommunityFactory()
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community_a.identifier
)
is None
)
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community_b.identifier
)
is None
)
APIV1PackageCache.update_for_community(community_a, b"")
APIV1PackageCache.update_for_community(community_b, b"")
assert APIV1PackageCache.get_latest_for_community(community_identifier=None) is None
cache_a = APIV1PackageCache.get_latest_for_community(
community_identifier=community_a.identifier
)
cache_b = APIV1PackageCache.get_latest_for_community(
community_identifier=community_b.identifier
)
assert cache_a.pk != cache_b.pk
assert cache_a.community == community_a
assert cache_b.community == community_b
APIV1PackageCache.update_for_community(community_a, b"")
cache_a2 = APIV1PackageCache.get_latest_for_community(
community_identifier=community_a.identifier
)
assert cache_a2.pk != cache_a.pk
cache_b.delete()
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community_b.identifier
)
is None
)
@pytest.mark.django_db
def test_api_v1_packge_cache_update_for_community(community: Community) -> None:
content = b"this is a test message"
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community.identifier
)
is None
)
latest = APIV1PackageCache.update_for_community(community, content=content)
assert latest.content_type == "application/json"
assert latest.content_encoding == "gzip"
assert latest.community.pk == community.pk
assert (
APIV1PackageCache.get_latest_for_community(
community_identifier=community.identifier
).pk
== latest.pk
)
with gzip.GzipFile(fileobj=latest.data, mode="r") as f:
result = f.read()
assert result == content
@pytest.mark.django_db
def test_api_v1_package_cache_drop_stale_cache(
freezer: FrozenDateTimeFactory, settings: Any
) -> None:
settings.DISABLE_TRANSACTION_CHECKS = True
start = timezone.now()
community_a = CommunityFactory()
community_b = CommunityFactory()
cache_a1 = APIV1PackageCache.update_for_community(community_a, b"")
cache_b1 = APIV1PackageCache.update_for_community(community_b, b"")
communityless_cache = APIV1PackageCache.update_for_community(community_a, b"")
communityless_cache.community = None
communityless_cache.save()
# B1 is within 1 hours of B2 so should not be dropped
# TODO: Use freezegun once https://github.com/spulec/freezegun/issues/331 is fixed
# freezer.move_to(start + timedelta(minutes=30))
cache_b2 = APIV1PackageCache.update_for_community(community_b, b"")
cache_b2.last_modified = start + timedelta(minutes=30)
cache_b2.save()
# A1 is over 60 minutes older than A2 and should be dropped
# TODO: Use freezegun once https://github.com/spulec/freezegun/issues/331 is fixed
# freezer.move_to(start + timedelta(minutes=61))
cache_a2 = APIV1PackageCache.update_for_community(community_a, b"")
cache_a2.last_modified = start + timedelta(minutes=61)
cache_a2.save()
assert APIV1PackageCache.objects.filter(pk=communityless_cache.pk).count() == 1
APIV1PackageCache.drop_stale_cache()
assert APIV1PackageCache.objects.filter(pk=communityless_cache.pk).count() == 0
assert APIV1PackageCache.objects.filter(pk=cache_a1.pk).count() == 0
assert APIV1PackageCache.objects.filter(pk=cache_a2.pk).count() == 1
assert APIV1PackageCache.objects.filter(pk=cache_b1.pk).count() == 1
assert APIV1PackageCache.objects.filter(pk=cache_b2.pk).count() == 1
@pytest.mark.django_db
def METHOD_NAME(settings: Any) -> None:
settings.DISABLE_TRANSACTION_CHECKS = True
CommunityFactory() # Create a community without a community site
assert APIV1PackageCache.drop_stale_cache() is None # Ensure no crash
@pytest.mark.django_db
def test_api_v1_package_cache_delete_file_transactions_disabled(community: Community):
cache = APIV1PackageCache.update_for_community(community, b"")
with pytest.raises(RuntimeError, match="Must not be called during a transaction"):
cache.delete_file()
@pytest.mark.django_db(transaction=True)
def test_api_v1_package_cache_delete_file_transactionless_allowed(community: Community):
cache = APIV1PackageCache.update_for_community(community, b"")
cache.delete_file()
@pytest.mark.django_db
def test_api_v1_package_cache_delete_file(community: Community, settings: Any):
settings.DISABLE_TRANSACTION_CHECKS = True
cache = APIV1PackageCache.update_for_community(community, b"")
storage: S3Boto3Storage = cache.data.storage
assert isinstance(storage, S3Boto3Storage)
name = cache.data.name
assert storage.exists(name)
cache.delete_file()
assert not storage.exists(name)
cache.refresh_from_db()
assert cache.is_deleted is True
assert bool(cache.data) is False
@pytest.mark.django_db
def test_api_v1_package_cache_delete(community: Community, settings: Any):
settings.DISABLE_TRANSACTION_CHECKS = True
cache = APIV1PackageCache.update_for_community(community, b"")
storage: S3Boto3Storage = cache.data.storage
assert isinstance(storage, S3Boto3Storage)
name = cache.data.name
assert storage.exists(name)
cache.delete()
assert not storage.exists(name)
@pytest.mark.django_db
def test_api_v1_package_cache_queryset_delete_disallowed():
with pytest.raises(NotImplementedError, match="Delete is not supported for"):
APIV1PackageCache.objects.all().delete()
def test_api_v1_packge_cache_storage_is_stub_during_makemigrations(mocker):
mocker.patch("sys.argv", ["manage.py", "makemigrations"])
storage = get_cache_storage()
assert isinstance(storage, StubStorage)
def test_api_v1_packge_cache_storage_is_s3_during_run(mocker):
mocker.patch("sys.argv", ["manage.py", "runserver"])
storage = get_cache_storage()
assert isinstance(storage, S3Boto3Storage) | null |
5,906 | import numpy as np
import cantera as ct
from . import utilities
import math
import pytest
class TestFunc1(utilities.CanteraTest):
def test_function(self):
f = ct.Func1(np.sin)
self.assertNear(f(0), np.sin(0))
self.assertNear(f(0.1), np.sin(0.1))
self.assertNear(f(0.7), np.sin(0.7))
def test_lambda(self):
f = ct.Func1(lambda t: np.sin(t)*np.sqrt(t))
assert f.type == "functor"
for t in [0.1, 0.7, 4.5]:
self.assertNear(f(t), np.sin(t)*np.sqrt(t))
def test_callable(self):
class Multiplier:
def __init__(self, factor):
self.factor = factor
def __call__(self, t):
return self.factor * t
m = Multiplier(8.1)
f = ct.Func1(m)
assert f.type == "functor"
for t in [0.1, 0.7, 4.5]:
self.assertNear(f(t), 8.1*t)
def test_constant(self):
f = ct.Func1(5)
for t in [0.1, 0.7, 4.5]:
self.assertNear(f(t), 5)
assert f.type == "constant"
def test_sequence(self):
f = ct.Func1([5])
for t in [0.1, 0.7, 4.5]:
self.assertNear(f(t), 5)
with self.assertRaises(TypeError):
ct.Func1([3,4])
def test_numpy(self):
f = ct.Func1(np.array(5))
assert f.type == "constant"
g = ct.Func1(np.array([[5]]))
assert g.type == "constant"
for t in [0.1, 0.7, 4.5]:
self.assertNear(f(t), 5)
self.assertNear(g(t), 5)
with self.assertRaises(TypeError):
ct.Func1(np.array([3,4]))
def test_failure(self):
def METHOD_NAME(t):
raise ValueError('bad')
f = ct.Func1(METHOD_NAME)
with self.assertRaises(ValueError):
f(0.1)
def test_unpicklable(self):
import pickle
f = ct.Func1(np.sin)
with self.assertRaises(NotImplementedError):
pickle.dumps(f)
def test_uncopyable(self):
import copy
f = ct.Func1(np.sin)
with self.assertRaises(NotImplementedError):
copy.copy(f)
def test_simple(self):
functors = {
'sin': math.sin,
'cos': math.cos,
'exp': math.exp,
'log': math.log,
}
for name, fcn in functors.items():
coeff = 2.34
func = ct.Func1.cxx_functor(name, coeff)
assert func.type == name
for val in [.1, 1., 10.]:
assert name in func.write()
assert func(val) == pytest.approx(fcn(coeff * val))
def test_compound(self):
functors = {
'sum': lambda x, y: x + y,
'diff': lambda x, y: x - y,
'product': lambda x, y: x * y,
'ratio': lambda x, y: x / y,
}
f1 = ct.Func1.cxx_functor('pow', 2)
f2 = ct.Func1.cxx_functor('sin')
for name, fcn in functors.items():
func = ct.Func1.cxx_functor(name, f1, f2)
assert func.type == name
for val in [.1, 1., 10.]:
assert name not in func.write()
assert func(val) == pytest.approx(fcn(f1(val), f2(val)))
def test_modified(self):
functors = {
'plus-constant': lambda x, y: x + y,
'times-constant': lambda x, y: x * y,
}
f1 = ct.Func1.cxx_functor('sin')
constant = 2.34
for name, fcn in functors.items():
func = ct.Func1.cxx_functor(name, f1, constant)
assert func.type == name
for val in [.1, 1., 10.]:
assert name not in func.write()
assert func(val) == pytest.approx(fcn(f1(val), constant))
def test_tabulated1(self):
# this implicitly probes advanced functors
arr = np.array([[0, 2], [1, 1], [2, 0]])
time = arr[:, 0]
fval = arr[:, 1]
fcn = ct.Tabulated1(time, fval)
assert fcn.type == "tabulated-linear"
for t, f in zip(time, fval):
self.assertNear(f, fcn(t))
def test_tabulated2(self):
time = [0, 1, 2]
fval = [2, 1, 0]
fcn = ct.Tabulated1(time, fval)
assert fcn.type == "tabulated-linear"
for t, f in zip(time, fval):
self.assertNear(f, fcn(t))
def test_tabulated3(self):
time = 0, 1, 2,
fval = 2, 1, 0,
fcn = ct.Tabulated1(time, fval)
self.assertNear(fcn(-1), fval[0])
self.assertNear(fcn(3), fval[-1])
def test_tabulated4(self):
time = np.array([0, 1, 2])
fval = np.array([2, 1, 0])
fcn = ct.Tabulated1(time, fval)
tt = .5*(time[1:] + time[:-1])
ff = .5*(fval[1:] + fval[:-1])
for t, f in zip(tt, ff):
self.assertNear(f, fcn(t))
def test_tabulated5(self):
time = [0, 1, 2]
fval = [2, 1, 0]
fcn = ct.Tabulated1(time, fval, method='previous')
assert fcn.type == "tabulated-previous"
val = np.array([fcn(v) for v in [-0.5, 0, 0.5, 1.5, 2, 2.5]])
self.assertArrayNear(val, np.array([2.0, 2.0, 2.0, 1.0, 0.0, 0.0]))
def test_tabulated_failures(self):
with pytest.raises(ct.CanteraError, match="even number of entries"):
ct.Tabulated1(range(2), range(3))
with pytest.raises(ct.CanteraError, match="at least 4 entries"):
ct.Tabulated1([], [])
with pytest.raises(ct.CanteraError, match="monotonically"):
ct.Tabulated1((0, 1, 0.5, 2), (2, 1, 1, 0))
with pytest.raises(ct.CanteraError, match="No such type"):
ct.Tabulated1((0, 1, 1, 2), (2, 1, 1, 0), method='spam') | null |
5,907 | import os
import unittest
import dcs
from dcs.drawing.drawing import LineStyle, Rgba
from dcs.drawing.drawings import StandardLayer
from dcs.drawing.icon import StandardIcon
from dcs.drawing.polygon import Circle
from dcs.mapping import Point
from dcs.mission import Mission
class DrawingTests(unittest.TestCase):
def setUp(self):
os.makedirs('missions', exist_ok=True)
def test_load_save_load(self) -> None:
m: Mission = dcs.mission.Mission()
self.assertEqual(0, len(m.load_file('tests/missions/Draw_tool_test.miz')))
self.assert_expected_stuff(m)
mission_path = 'missions/Draw_tool_test_saved.miz'
m.save(mission_path)
m2 = dcs.mission.Mission()
self.assertEqual(0, len(m2.load_file(mission_path)))
self.assert_expected_stuff(m2)
def assert_expected_stuff(self, m: Mission) -> None:
self.assertEqual(5, len(m.drawings.layers))
self.assertEqual(False, m.drawings.options.hiddenOnF10Map["Observer"]["Neutral"])
red_layer = m.drawings.get_layer(StandardLayer.Red)
self.assertEqual(True, red_layer.visible)
self.assertEqual("Red", red_layer.name)
self.assertEqual("Icon 2", red_layer.objects[0].name)
line = m.drawings.get_layer(StandardLayer.Blue).objects[0]
self.assertEqual("Line 2 segments closed", line.name)
self.assertEqual(Rgba(255, 255, 0, 131), line.color)
self.assertEqual(-260885.56415634, line.position.x)
self.assertEqual(671996.90379981, line.position.y)
self.assertEqual(0, line.points[0].x)
self.assertEqual(0, line.points[0].y)
self.assertEqual(-6076.521389334, line.points[2].x)
self.assertEqual(3038.260694667, line.points[2].y)
def test_add_drawings_to_loaded_mission(self) -> None:
m: Mission = dcs.mission.Mission()
self.assertEqual(0, len(m.load_file('tests/missions/Draw_tool_test.miz')))
circle = Circle(
True,
Point(10, 10, m.terrain),
"TEST CIRCLE",
Rgba(20, 30, 40, 200),
":S",
Rgba(50, 60, 70, 150),
10,
LineStyle.Solid,
100
)
m.drawings.layers[0].add_drawing(circle)
self.assertEqual("TEST CIRCLE", m.drawings.layers[0].objects[1].name)
mission_path = 'missions/Draw_tool_test_added_drawings.miz'
m.save(mission_path)
m2 = dcs.mission.Mission()
self.assertEqual(0, len(m2.load_file(mission_path)))
self.assert_expected_stuff(m2)
self.assertEqual("TEST CIRCLE", m2.drawings.layers[0].objects[1].name)
def test_add_drawings_to_new_mission(self) -> None:
m: Mission = dcs.mission.Mission()
circle = Circle(
True,
Point(10, 10, m.terrain),
"TEST CIRCLE",
Rgba(20, 30, 40, 200),
":S",
Rgba(50, 60, 70, 150),
10,
LineStyle.Solid,
100
)
red_layer = m.drawings.get_layer(StandardLayer.Red)
red_layer.add_drawing(circle)
red_layer.add_line_segments(
Point(1, 1, m.terrain),
[Point(6, 6, m.terrain), Point(7, 7, m.terrain)],
closed=True,
)
m.drawings.options.hiddenOnF10Map["Pilot"]["Red"] = True
m.drawings.options.hiddenOnF10Map["Instructor"]["Blue"] = True
m.drawings.options.hiddenOnF10Map["Observer"]["Neutral"] = True
mission_path = 'missions/New_mission_w_added_drawings.miz'
m.save(mission_path)
m2 = dcs.mission.Mission()
self.assertEqual(0, len(m2.load_file(mission_path)))
red_layer2 = m2.drawings.get_layer(StandardLayer.Red)
self.assertEqual("TEST CIRCLE", red_layer2.objects[0].name)
self.assertEqual("A line", red_layer2.objects[1].name)
self.assertEqual(True, red_layer2.objects[1].closed)
self.assertEqual("Red", red_layer2.objects[0].layer_name)
self.assertEqual("Red", red_layer2.objects[1].layer_name)
def test_set_options_hidden_f10(self) -> None:
m: Mission = dcs.mission.Mission()
m.drawings.options.hiddenOnF10Map["Pilot"]["Red"] = True
m.drawings.options.hiddenOnF10Map["Instructor"]["Blue"] = True
m.drawings.options.hiddenOnF10Map["Observer"]["Neutral"] = True
mission_path = 'missions/New_mission_w_added_drawings.miz'
m.save(mission_path)
m2 = dcs.mission.Mission()
self.assertEqual(0, len(m2.load_file(mission_path)))
self.assertEqual(False, m2.drawings.options.hiddenOnF10Map["Pilot"]["Blue"])
self.assertEqual(True, m2.drawings.options.hiddenOnF10Map["Pilot"]["Red"])
self.assertEqual(True, m2.drawings.options.hiddenOnF10Map["Instructor"]["Blue"])
self.assertEqual(True, m2.drawings.options.hiddenOnF10Map["Observer"]["Neutral"])
def test_add_std_icon(self) -> None:
m: Mission = dcs.mission.Mission()
red_layer = m.drawings.get_layer(StandardLayer.Red)
red_layer.add_icon(
Point(1000, 1000, m.terrain),
StandardIcon.MechanizedArtillery,
)
mission_path = 'missions/New_mission_w_added_std_icon.miz'
m.save(mission_path)
m2 = dcs.mission.Mission()
self.assertEqual(0, len(m2.load_file(mission_path)))
red_layer = m.drawings.get_layer(StandardLayer.Red)
self.assertEqual(StandardIcon.MechanizedArtillery.value, red_layer.objects[0].file)
def METHOD_NAME(self) -> None:
m: Mission = dcs.mission.Mission()
layer = m.drawings.get_layer(StandardLayer.Common)
self.assertEqual(0, len(layer.objects))
oblong = layer.add_oblong(
Point(1000, 1000, m.terrain),
Point(4000, 1000, m.terrain),
1000,
resolution=20,
)
self.assertEqual(1, len(layer.objects))
# Resolution 20 should give 43 points
# (21 in each end and one extra to close polygon)
self.assertEqual(43, len(oblong.points)) | null |
5,908 | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Imagenet convert tool for MindRecord.
"""
import os
import time
from mindspore import log as logger
from ..common.exceptions import PathNotExistsError
from ..filewriter import FileWriter
from ..shardutils import check_filename, ExceptionThread
__all__ = ['ImageNetToMR']
class ImageNetToMR:
r"""
A class to transform from imagenet to MindRecord.
Args:
map_file (str): The map file that indicates label. This file can be generated by command
:code:`ls -l [image_dir] | grep -vE "total|\." | awk -F " " '{print $9, NR-1;}' > [file_path]` ,
where `image_dir` is image directory contains n01440764, n01443537, n01484850 and n15075141 directory
and `file_path` is the generated `map_file` . An example of `map_file` is as below:
.. code-block::
n01440764 0
n01443537 1
n01484850 2
n01491361 3
...
n15075141 999
image_dir (str): Image directory contains n01440764, n01443537, n01484850 and n15075141 directory.
destination (str): MindRecord file path to transform into, ensure that the directory is created in advance and
no file with the same name exists in the directory.
partition_number (int, optional): The partition size. Default: ``1`` .
Raises:
ValueError: If `map_file` , `image_dir` or `destination` is invalid.
Examples:
>>> from mindspore.mindrecord import ImageNetToMR
>>>
>>> map_file = "/path/to/imagenet/map_file"
>>> imagenet_dir = "/path/to/imagenet/train"
>>> mindrecord_file = "/path/to/mindrecord/file"
>>> imagenet_to_mr = ImageNetToMR(map_file, imagenet_dir, mindrecord_file, 8)
>>> status = imagenet_to_mr.transform()
"""
def __init__(self, map_file, image_dir, destination, partition_number=1):
check_filename(map_file)
self.map_file = map_file
check_filename(image_dir)
self.image_dir = image_dir
check_filename(destination)
self.destination = destination
if partition_number is not None:
if not isinstance(partition_number, int):
raise ValueError("The parameter partition_number must be int")
self.partition_number = partition_number
else:
raise ValueError("The parameter partition_number must be int")
self.writer = FileWriter(self.destination, self.partition_number)
# pylint: disable=missing-docstring
def METHOD_NAME(self):
t0_total = time.time()
imagenet_schema_json = {"label": {"type": "int32"},
"image": {"type": "bytes"},
"file_name": {"type": "string"}}
logger.info("transformed MindRecord schema is: {}".format(imagenet_schema_json))
# set the header size
self.writer.set_header_size(1 << 24)
# set the page size
self.writer.set_page_size(1 << 26)
# create the schema
self.writer.add_schema(imagenet_schema_json, "imagenet_schema")
# add the index
self.writer.add_index(["label", "file_name"])
imagenet_iter = self._get_imagenet_as_dict()
batch_size = 256
transform_count = 0
while True:
data_list = []
try:
for _ in range(batch_size):
data_list.append(imagenet_iter.__next__())
transform_count += 1
self.writer.write_raw_data(data_list, True)
logger.info("transformed {} record...".format(transform_count))
except StopIteration:
if data_list:
self.writer.write_raw_data(data_list, True)
logger.info("transformed {} record...".format(transform_count))
break
ret = self.writer.commit()
t1_total = time.time()
logger.info("--------------------------------------------")
logger.info("END. Total time: {}".format(t1_total - t0_total))
logger.info("--------------------------------------------")
return ret
def transform(self):
"""
Execute transformation from imagenet to MindRecord.
Note:
Please refer to the Examples of :class:`mindspore.mindrecord.ImageNetToMR` .
Returns:
MSRStatus, SUCCESS or FAILED.
Raises:
ParamTypeError: If index field is invalid.
MRMOpenError: If failed to open MindRecord file.
MRMValidateDataError: If data does not match blob fields.
MRMSetHeaderError: If failed to set header.
MRMWriteDatasetError: If failed to write dataset.
TypeError: If `parallel_writer` is not bool.
"""
t = ExceptionThread(target=self.METHOD_NAME)
t.daemon = True
t.start()
t.join()
if t.exitcode != 0:
raise t.exception
return t.res
def _get_imagenet_as_dict(self):
"""
Get data from imagenet as dict.
Yields:
data (dict of list): imagenet data list which contains dict.
"""
real_file_path = os.path.realpath(self.map_file)
if not os.path.exists(real_file_path):
raise IOError("map file {} not exists".format(self.map_file))
label_dict = {}
with open(real_file_path) as fp:
line = fp.readline()
while line:
labels = line.split(" ")
label_dict[labels[1]] = labels[0]
line = fp.readline()
# get all the dir which are n02087046, n02094114, n02109525
dir_paths = {}
for item in label_dict:
real_path = os.path.join(self.image_dir, label_dict[item])
if not os.path.isdir(real_path):
logger.warning("{} dir is not exist".format(real_path))
continue
dir_paths[item] = real_path
if not dir_paths:
raise PathNotExistsError("not valid image dir in {}".format(self.image_dir))
# get the filename, label and image binary as a dict
for label in dir_paths:
for item in os.listdir(dir_paths[label]):
file_name = os.path.join(dir_paths[label], item)
if not item.endswith("JPEG") and not item.endswith("jpg"):
logger.warning("{} file is not suffix with JPEG/jpg, skip it.".format(file_name))
continue
data = {}
data["file_name"] = str(file_name)
data["label"] = int(label)
# get the image data
real_file_path = os.path.realpath(file_name)
image_file = open(real_file_path, "rb")
image_bytes = image_file.read()
image_file.close()
if not image_bytes:
logger.warning("The image file: {} is invalid.".format(file_name))
continue
data["image"] = image_bytes
yield data | null |
5,909 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as domain_cons_utils
from unittest.mock import patch
class DomainConstraintAddTestCase(BaseTestGenerator):
""" This class will add new domain constraint under schema node. """
scenarios = utils.generate_scenarios('domain_constraint_create',
domain_cons_utils.test_cases)
def setUp(self):
super().setUp()
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
self.domain_name = "domain_%s" % (str(uuid.uuid4())[1:8])
self.domain_info = domain_cons_utils.create_domain(self.server,
self.db_name,
self.schema_name,
self.schema_id,
self.domain_name)
def create_domain_constraint(self):
"""
This function create a domain constraint and returns it
:return: created domain constraint response
"""
return self.tester.post(self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) +
'/' + str(self.schema_id) + '/' +
str(self.domain_id) + '/',
data=json.dumps(self.test_data),
content_type='html/json',
follow_redirects=True)
def METHOD_NAME(self):
""" This function will add domain constraint under test database. """
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database.")
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema.")
self.test_data['name'] =\
"test_domain_con_add_%s" % (str(uuid.uuid4())[1:8])
self.domain_id = self.domain_info[0]
if self.is_positive_test:
response = self.create_domain_constraint()
else:
if hasattr(self, "internal_server_error"):
return_value_object = eval(self.mock_data["return_value"])
with patch(self.mock_data["function_name"],
side_effect=[return_value_object]):
response = self.create_domain_constraint()
if hasattr(self, "error_in_db"):
return_value_object = eval(self.mock_data["return_value"])
with patch(self.mock_data["function_name"],
side_effect=[return_value_object]):
response = self.create_domain_constraint()
if hasattr(self, "error_getting_coid"):
with patch(self.mock_data["function_name"],
side_effect=eval(self.mock_data["return_value"])):
response = self.create_domain_constraint()
if hasattr(self, "error_domain_id"):
self.domain_id = 99999
response = self.create_domain_constraint()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id) | null |
5,910 | # """
# This file demonstrates writing tests using the unittest module. These will pass
# when you run "manage.py test".
# Replace this with more appropriate tests for your application.
# """
from django.test import TestCase
from django.contrib.gis.measure import D
# from nose.tools import istest
from nose.tools import assert_equal, assert_false, assert_true, assert_raises
from .. import utils
class TestToDistance (TestCase):
def test_no_units_assumes_meters(self):
d = utils.to_distance('123.45')
assert_equal(d, D(m=123.45))
def test_units_are_respected(self):
d = utils.to_distance('123.45 km')
assert_equal(d, D(km=123.45))
d = utils.to_distance('123.45mi')
assert_equal(d, D(mi=123.45))
class TestBuildRelativeURL (TestCase):
def test_relative_path_with_leading_slash(self):
url = utils.build_relative_url('http://ex.co/pictures/silly/abc.png', '/home')
assert_equal(url, 'http://ex.co/home')
def test_relative_path_without_leading_slash(self):
url = utils.build_relative_url('http://ex.co/p/index.html', 'about.html')
assert_equal(url, 'http://ex.co/p/about.html')
def test_relative_path_empty(self):
url = utils.build_relative_url('http://ex.co/p/index.html', '')
assert_equal(url, 'http://ex.co/p/index.html')
def test_original_path_ends_with_slash(self):
url = utils.build_relative_url('http://ex.co/p/', 'about.html')
assert_equal(url, 'http://ex.co/p/about.html')
def test_leading_slash_beats_trailing_slash(self):
url = utils.build_relative_url('http://ex.co/pictures/silly/', '/home')
assert_equal(url, 'http://ex.co/home')
def test_original_path_empty(self):
url = utils.build_relative_url('', 'about.html')
assert_equal(url, '/about.html')
def METHOD_NAME(self):
url = utils.build_relative_url('http://ex.co/', 'https://google.com/')
assert_equal(url, 'https://google.com/')
# class TestToWkt (object):
# @istest
# def converts_from_dict_with_lat_and_lng_to_point(self):
# data = dict(lat=23, lng=140, extra='something that doesn\'t matter')
# wkt = utils.to_wkt(data)
# assert_equal(wkt, 'POINT (140 23)')
# @istest
# def valid_wkt_is_identical(self):
# data = 'POINT (150 70)'
# wkt = utils.to_wkt(data)
# assert_equal(wkt, 'POINT (150 70)')
# @istest
# def invalid_text_returned_unchanged(self):
# data = 'lat lng ... this is not wkt'
# assert_equal(utils.to_wkt(data), data)
# @istest
# def invalid_type_raises_error(self):
# assert_raises(TypeError, utils.to_wkt, None)
# assert_raises(TypeError, utils.to_wkt, ['lat', 'lng'])
# assert_raises(TypeError, utils.to_wkt, 99)
# class TestIsIterable(object):
# @istest
# def some_builtins_not_iterable(self):
# assert_false(utils.isiterable(None))
# assert_false(utils.isiterable(1))
# @istest
# def some_builtins_iterable(self):
# assert_true(utils.isiterable(''))
# assert_true(utils.isiterable({}))
# assert_true(utils.isiterable([]))
# assert_true(utils.isiterable(()))
# @istest
# def generator_is_iterable(self):
# def ints_forever():
# n = 0
# while True:
# yield n
# n += 1
# assert_true(utils.isiterable(ints_forever()))
# class TestUnpackDataBlob(object):
# @istest
# def removes_csrfmiddlewaretoken_and_data_and_not_others(self):
# data = {'csrfmiddlewaretoken': 'xyz', 'a key': 'a value', 'data': '{}'}
# result = utils.unpack_data_blob(data)
# assert_equal(None, result)
# assert_false('csrfmiddlewaretoken' in data)
# assert_false('data' in data)
# assert_true('a key' in data)
# @istest
# def invalid_json_data(self):
# data = {'data': 'this is not json'}
# from djangorestframework.response import ErrorResponse
# assert_raises(ErrorResponse, utils.unpack_data_blob, data)
# data = {'data': '["this is json but not a dict"]'}
# from djangorestframework.response import ErrorResponse
# assert_raises(ErrorResponse, utils.unpack_data_blob, data)
# @istest
# def json_merged_with_data(self):
# data = {'x': 'y', 'data': '{"inner": "peace", "outer": "turmoil"}'}
# utils.unpack_data_blob(data)
# assert_equal(data,
# {'x': 'y', 'inner': 'peace', 'outer': 'turmoil'})
# class TestCachedProperty (object):
# @istest
# def cached_property_is_cached(self):
# class Foo(object):
# call_count = 0
# def do_something(self):
# self.call_count += 1
# return 'hello %d' % self.call_count
# do_something_cached = utils.cached_property(do_something)
# # Uncached, the count is incremented
# foo = Foo()
# assert_equal(foo.do_something(), 'hello 1')
# assert_equal(foo.do_something(), 'hello 2')
# assert_equal(foo.do_something(), 'hello 3')
# # Cached, it's only incremented once.
# assert_equal(foo.do_something_cached, 'hello 4')
# for i in range(10):
# assert_equal(foo.do_something_cached, 'hello 4')
# @istest
# def cached_multiple_properties(self):
# class Foo(object):
# hellocount = 0
# goodbyecount = 100
# @utils.cached_property
# def greeting(self):
# self.hellocount += 1
# return 'hello %d' % self.hellocount
# @utils.cached_property
# def parting(self):
# self.goodbyecount += 1
# return 'goodbye %d' % self.goodbyecount
# foo = Foo()
# assert_equal(foo.greeting, 'hello 1')
# assert_equal(foo.parting, 'goodbye 101')
# assert_equal(foo.greeting, 'hello 1')
# assert_equal(foo.parting, 'goodbye 101') | null |
5,911 | #!/usr/bin/env python
from collections import OrderedDict
from decimal import Decimal
from agate import Table, TableSet
from agate.aggregations import Count, MaxLength, Mean, Min, Sum
from agate.data_types import Number, Text
from agate.exceptions import DataTypeError
from agate.testcase import AgateTestCase
class TestAggregate(AgateTestCase):
def METHOD_NAME(self):
self.table1 = (
('a', 1),
('a', 3),
('b', 2)
)
self.table2 = (
('b', 0),
('a', 2),
('c', 5)
)
self.table3 = (
('a', 1),
('a', 2),
('c', 3)
)
self.text_type = Text()
self.number_type = Number()
self.column_names = ['letter', 'number']
self.column_types = [self.text_type, self.number_type]
self.tables = OrderedDict([
('table1', Table(self.table1, self.column_names, self.column_types)),
('table2', Table(self.table2, self.column_names, self.column_types)),
('table3', Table(self.table3, self.column_names, self.column_types))
])
def test_aggregate_key_name(self):
tableset = TableSet(self.tables.values(), self.tables.keys(), key_name='test')
new_table = tableset.aggregate([
('count', Count())
])
self.assertIsInstance(new_table, Table)
self.assertColumnNames(new_table, ('test', 'count'))
self.assertColumnTypes(new_table, [Text, Number])
def test_aggregate_key_type(self):
tables = OrderedDict([
(1, Table(self.table1, self.column_names, self.column_types)),
(2, Table(self.table2, self.column_names, self.column_types)),
(3, Table(self.table3, self.column_names, self.column_types))
])
tableset = TableSet(tables.values(), tables.keys(), key_name='test', key_type=self.number_type)
new_table = tableset.aggregate([
('count', Count())
])
self.assertIsInstance(new_table, Table)
self.assertColumnNames(new_table, ('test', 'count'))
self.assertColumnTypes(new_table, [Number, Number])
def test_aggregate_row_names(self):
tableset = TableSet(self.tables.values(), self.tables.keys(), key_name='test')
new_table = tableset.aggregate([
('count', Count())
])
self.assertRowNames(new_table, ['table1', 'table2', 'table3'])
def test_aggregate_sum(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
new_table = tableset.aggregate([
('count', Count()),
('number_sum', Sum('number'))
])
self.assertIsInstance(new_table, Table)
self.assertColumnNames(new_table, ('group', 'count', 'number_sum'))
self.assertColumnTypes(new_table, [Text, Number, Number])
self.assertRows(new_table, [
('table1', 3, 6),
('table2', 3, 7),
('table3', 3, 6)
])
def test_aggregate_min(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
new_table = tableset.aggregate([
('count', Count()),
('number_min', Min('number'))
])
self.assertIsInstance(new_table, Table)
self.assertColumnNames(new_table, ('group', 'count', 'number_min'))
self.assertColumnTypes(new_table, [Text, Number, Number])
self.assertRows(new_table, [
('table1', 3, 1),
('table2', 3, 0),
('table3', 3, 1)
])
def test_aggregate_two_ops(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
new_table = tableset.aggregate([
('count', Count()),
('number_sum', Sum('number')),
('number_mean', Mean('number'))
])
self.assertIsInstance(new_table, Table)
self.assertColumnNames(new_table, ('group', 'count', 'number_sum', 'number_mean'))
self.assertColumnTypes(new_table, [Text, Number, Number, Number])
self.assertRows(new_table, [
('table1', 3, 6, 2),
('table2', 3, 7, Decimal(7) / 3),
('table3', 3, 6, 2)
])
def test_aggregate_max_length(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
new_table = tableset.aggregate([
('count', Count()),
('letter_max_length', MaxLength('letter'))
])
self.assertIsInstance(new_table, Table)
self.assertColumnNames(new_table, ('group', 'count', 'letter_max_length'))
self.assertColumnTypes(new_table, [Text, Number, Number])
self.assertRows(new_table, [
('table1', 3, 1),
('table2', 3, 1),
('table3', 3, 1)
])
def test_aggregate_sum_invalid(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
with self.assertRaises(DataTypeError):
tableset.aggregate([('letter_sum', Sum('letter'))])
def test_aggregeate_bad_column(self):
tableset = TableSet(self.tables.values(), self.tables.keys())
with self.assertRaises(KeyError):
tableset.aggregate([('one_sum', Sum('one'))])
with self.assertRaises(KeyError):
tableset.aggregate([('bad_sum', Sum('bad'))])
def test_nested_aggregation(self):
tableset = TableSet(self.tables.values(), self.tables.keys(), key_name='test')
nested = tableset.group_by('letter')
results = nested.aggregate([
('count', Count()),
('number_sum', Sum('number'))
])
self.assertIsInstance(results, Table)
self.assertColumnNames(results, ('test', 'letter', 'count', 'number_sum'))
self.assertColumnTypes(results, (Text, Text, Number, Number))
self.assertRows(results, [
('table1', 'a', 2, 4),
('table1', 'b', 1, 2),
('table2', 'b', 1, 0),
('table2', 'a', 1, 2),
('table2', 'c', 1, 5),
('table3', 'a', 2, 3),
('table3', 'c', 1, 3)
])
def test_nested_aggregate_row_names(self):
tableset = TableSet(self.tables.values(), self.tables.keys(), key_name='test')
nested = tableset.group_by('letter')
results = nested.aggregate([
('count', Count()),
('number_sum', Sum('number'))
])
self.assertRowNames(results, [
('table1', 'a'),
('table1', 'b'),
('table2', 'b'),
('table2', 'a'),
('table2', 'c'),
('table3', 'a'),
('table3', 'c'),
])
self.assertSequenceEqual(results.rows[('table1', 'a')], ('table1', 'a', 2, 4))
self.assertSequenceEqual(results.rows[('table2', 'c')], ('table2', 'c', 1, 5)) | null |
5,912 | __author__ = "Nitin Kumar, Rick Sherman"
__credits__ = "Jeremy Schulman"
try:
import unittest2 as unittest
except ImportError:
import unittest
from nose.plugins.attrib import attr
from mock import patch, MagicMock
import os
from jnpr.junos import Device
from jnpr.junos.ofacts.swver import facts_software_version as software_version
from jnpr.junos.ofacts.swver import _get_swver
from ncclient.manager import Manager, make_device_handler
from ncclient.transport import SSHSession
from jnpr.junos.exception import RpcError
@attr("unit")
class TestSwver(unittest.TestCase):
@patch("ncclient.manager.connect")
@patch("jnpr.junos.device.warnings")
def setUp(self, mock_warnings, mock_connect):
mock_connect.side_effect = self.METHOD_NAME
self.dev = Device(
host="1.1.1.1",
user="rick",
password="password123",
gather_facts=False,
fact_style="old",
)
self.dev.open()
self.facts = {}
self.facts["vc_capable"] = False
def test_get_swver_vc(self):
self.dev.rpc.cli = MagicMock()
self.facts["vc_capable"] = True
_get_swver(self.dev, self.facts)
self.dev.rpc.cli.assert_called_with("show version all-members", format="xml")
def test_get_swver_vc_capable_standalone(self):
def raise_ex(*args):
if args[0] == "show version all-members":
raise RpcError()
self.dev.rpc.cli = MagicMock(
side_effect=lambda *args, **kwargs: raise_ex(*args)
)
self.facts["vc_capable"] = True
_get_swver(self.dev, self.facts)
self.dev.rpc.cli.assert_called_with(
"show version invoke-on all-routing-engines", format="xml"
)
@patch("jnpr.junos.Device.execute")
def test_swver(self, mock_execute):
mock_execute.side_effect = self.METHOD_NAME
self.facts["master"] = "RE0"
software_version(self.dev, self.facts)
self.assertEqual(self.facts["version"], "12.3R6.6")
@patch("jnpr.junos.Device.execute")
def test_swver_f_master_list(self, mock_execute):
mock_execute.side_effect = self.METHOD_NAME
self.facts["master"] = ["RE0", "RE1"]
software_version(self.dev, self.facts)
self.assertEqual(self.facts["version"], "12.3R6.6")
@patch("jnpr.junos.Device.execute")
def test_swver_hostname_none(self, mock_execute):
mock_execute.side_effect = self.METHOD_NAME
self.facts["master"] = "RE5"
self.facts["version_RE5"] = "15.3R6.6"
software_version(self.dev, self.facts)
self.assertEqual(self.facts["version"], "15.3R6.6")
@patch("jnpr.junos.Device.execute")
def test_swver_txp_master_list(self, mock_execute):
mock_execute.side_effect = self.METHOD_NAME
self.facts["master"] = ["RE0", "RE0", "RE1", "RE2", "RE3"]
self.facts["version_RE0-RE0"] = "14.2R4"
software_version(self.dev, self.facts)
self.assertEqual(self.facts["version"], "14.2R4")
# --> JLS, there should always be a facts['master'] assigned.
# @patch('jnpr.junos.Device.execute')
# def test_swver_master_none(self, mock_execute):
# mock_execute.side_effect = self._mock_manager
# self.facts['master'] = None
# software_version(self.dev, self.facts)
# self.assertEqual(self.facts['version'], '12.3R6.6')
@patch("jnpr.junos.Device.execute")
@patch("jnpr.junos.facts.get_software_information.re.findall")
def test_swver_exception_handling(self, mock_re_findall, mock_execute):
mock_execute.side_effect = self.METHOD_NAME
mock_re_findall.side_effect = IndexError
self.facts["master"] = "RE0"
software_version(self.dev, self.facts)
self.assertEqual(self.facts["version"], "0.0I0.0")
def _read_file(self, fname):
from ncclient.xml_ import NCElement
fpath = os.path.join(os.path.dirname(__file__), "rpc-reply", fname)
foo = open(fpath).read()
rpc_reply = NCElement(
foo, self.dev._conn._device_handler.transform_reply()
)._NCElement__doc[0]
return rpc_reply
def METHOD_NAME(self, *args, **kwargs):
if kwargs:
device_params = kwargs["device_params"]
device_handler = make_device_handler(device_params)
session = SSHSession(device_handler)
return Manager(session, device_handler)
if args:
if "version_RE0-RE0" in self.facts:
return self._read_file(args[0].tag + "_RE0-RE0.xml")
return self._read_file(args[0].tag + ".xml") | null |
5,913 | # SPDX-FileCopyrightText: 2020 Jeff Epler for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import random
import time
import board
import displayio
import framebufferio
import rgbmatrix
displayio.release_displays()
matrix = rgbmatrix.RGBMatrix(
width=64, height=32, bit_depth=3,
rgb_pins=[board.D6, board.D5, board.D9, board.D11, board.D10, board.D12],
addr_pins=[board.A5, board.A4, board.A3, board.A2],
clock_pin=board.D13, latch_pin=board.D0, output_enable_pin=board.D1)
display = framebufferio.FramebufferDisplay(matrix, auto_refresh=False)
# This bitmap contains the emoji we're going to use. It is assumed
# to contain 20 icons, each 20x24 pixels. This fits nicely on the 64x32
# RGB matrix display.
filename = "emoji.bmp"
# CircuitPython 6 & 7 compatible
bitmap_file = open(filename, 'rb')
bitmap = displayio.OnDiskBitmap(bitmap_file)
pixel_shader = getattr(bitmap, 'pixel_shader', displayio.ColorConverter())
# # CircuitPython 7+ compatible
# bitmap = displayio.OnDiskBitmap(filename)
# pixel_shader = bitmap.pixel_shader
# Each wheel can be in one of three states:
STOPPED, RUNNING, BRAKING = range(3)
# Return a duplicate of the input list in a random (shuffled) order.
def METHOD_NAME(seq):
return sorted(seq, key=lambda _: random.random())
# The Wheel class manages the state of one wheel. "pos" is a position in
# scaled integer coordinates, with one revolution being 7680 positions
# and 1 pixel being 16 positions. The wheel also has a velocity (in positions
# per tick) and a state (one of the above constants)
class Wheel(displayio.TileGrid):
def __init__(self):
# Portions of up to 3 tiles are visible.
super().__init__(bitmap=bitmap, pixel_shader=pixel_shader,
width=1, height=3, tile_width=20, tile_height=24)
self.order = METHOD_NAME(range(20))
self.state = STOPPED
self.pos = 0
self.vel = 0
self.y = 0
self.x = 0
self.stop_time = time.monotonic_ns()
def step(self):
# Update each wheel for one time step
if self.state == RUNNING:
# Slowly lose speed when running, but go at least speed 64
self.vel = max(self.vel * 9 // 10, 64)
if time.monotonic_ns() > self.stop_time:
self.state = BRAKING
elif self.state == BRAKING:
# More quickly lose speed when braking, down to speed 7
self.vel = max(self.vel * 85 // 100, 7)
# Advance the wheel according to the velocity, and wrap it around
# after 7680 positions
self.pos = (self.pos + self.vel) % 7680
# Compute the rounded Y coordinate
yy = round(self.pos / 16)
# Compute the offset of the tile (tiles are 24 pixels tall)
yyy = yy % 24
# Find out which tile is the top tile
off = yy // 24
# If we're braking and a tile is close to midscreen,
# then stop and make sure that tile is exactly centered
if self.state == BRAKING and self.vel == 7 and yyy < 4:
self.pos = off * 24 * 16
self.vel = 0
self.state = STOPPED
# Move the displayed tiles to the correct height and make sure the
# correct tiles are displayed.
self.y = yyy - 20
for i in range(3):
self[i] = self.order[(19 - i + off) % 20]
# Set the wheel running again, using a slight bit of randomness.
# The 'i' value makes sure the first wheel brakes first, the second
# brakes second, and the third brakes third.
def kick(self, i):
self.state = RUNNING
self.vel = random.randint(256, 320)
self.stop_time = time.monotonic_ns() + 3_000_000_000 + i * 350_000_000
# Our fruit machine has 3 wheels, let's create them with a correct horizontal
# (x) offset and arbitrary vertical (y) offset.
g = displayio.Group()
wheels = []
for idx in range(3):
wheel = Wheel()
wheel.x = idx * 22
wheel.y = -20
g.append(wheel)
wheels.append(wheel)
display.show(g)
# Make a unique order of the emoji on each wheel
orders = [METHOD_NAME(range(20)), METHOD_NAME(range(20)), METHOD_NAME(range(20))]
# And put up some images to start with
for si, oi in zip(wheels, orders):
for idx in range(3):
si[idx] = oi[idx]
# We want a way to check if all the wheels are stopped
def all_stopped():
return all(si.state == STOPPED for si in wheels)
# To start with, though, they're all in motion
for idx, si in enumerate(wheels):
si.kick(idx)
# Here's the main loop
while True:
# Refresh the display (doing this manually ensures the wheels move
# together, not at different times)
display.refresh(minimum_frames_per_second=0)
if all_stopped():
# Once everything comes to a stop, wait a little bit and then
# start everything over again. Maybe you want to check if the
# combination is a "winner" and add a light show or something.
for idx in range(100):
display.refresh(minimum_frames_per_second=0)
for idx, si in enumerate(wheels):
si.kick(idx)
# Otherwise, let the wheels keep spinning...
for idx, si in enumerate(wheels):
si.step() | null |
5,914 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from qtpy import QtWidgets
from qtpy.QtCore import Signal
from mantidqt.utils.qt import load_ui
from sans.common.enums import BinningType
Ui_SummationSettingsWidget, _ = load_ui(__file__, "summation_settings_widget.ui")
def set_checked_without_signal(checkable, should_be_checked):
checkable.blockSignals(True)
checkable.setChecked(should_be_checked)
checkable.blockSignals(False)
class SummationSettingsWidget(QtWidgets.QWidget, Ui_SummationSettingsWidget):
binningTypeChanged = Signal(int)
preserveEventsChanged = Signal(bool)
binSettingsChanged = Signal()
additionalTimeShiftsChanged = Signal()
sum = Signal()
def __init__(self, parent=None):
super(SummationSettingsWidget, self).__init__(parent)
self.setupUi(self)
self.METHOD_NAME()
def setupUi(self, other):
Ui_SummationSettingsWidget.setupUi(self, other)
self._setupBinningTypes()
def _setupBinningTypes(self):
binningTypes = ["Use custom binning", "Use binning from monitors", "Save as event data"]
for binningType in binningTypes:
self.binningType.addItem(binningType)
def METHOD_NAME(self):
self.binningType.currentIndexChanged.connect(self._handle_binning_type_changed)
self.overlayEventWorkspacesCheckbox.stateChanged.connect(self._handle_overlay_ews_changed)
self.binningOptionsLineEdit.editingFinished.connect(self._handle_binning_options_line_edit_changed)
@staticmethod
def _binning_type_to_index(bin_type):
if bin_type == BinningType.CUSTOM:
return 0
elif bin_type == BinningType.FROM_MONITORS:
return 1
elif bin_type == BinningType.SAVE_AS_EVENT_DATA:
return 2
def _handle_binning_type_changed(self, index):
self.binningTypeChanged.emit(index)
def _handle_binning_options_line_edit_changed(self):
# Since the text box is shared we don't
# know which of these was actually changed.
# The presenter can work it out.
self.binSettingsChanged.emit()
self.additionalTimeShiftsChanged.emit()
def _handle_overlay_ews_changed(self, state):
self.preserveEventsChanged.emit(state != 0)
def draw_settings(self, settings):
self._draw_binning_type(settings)
self._draw_bin_settings(settings)
self._draw_additional_time_shifts(settings)
self._draw_overlay_event_workspaces(settings)
def bin_settings(self):
return self.binningOptionsLineEdit.text()
def additional_time_shifts(self):
return self.binningOptionsLineEdit.text()
def _draw_overlay_event_workspaces(self, settings):
if settings.has_overlay_event_workspaces():
self.overlayEventWorkspacesCheckbox.setVisible(True)
should_be_checked = settings.is_overlay_event_workspaces_enabled()
set_checked_without_signal(self.overlayEventWorkspacesCheckbox, should_be_checked)
else:
set_checked_without_signal(self.overlayEventWorkspacesCheckbox, False)
self.overlayEventWorkspacesCheckbox.setVisible(False)
def _draw_binning_type(self, settings):
index = self._binning_type_to_index(settings.type)
if index is not None:
self.binningType.setCurrentIndex(index)
def _draw_bin_settings(self, settings):
if settings.has_bin_settings():
self._activate_line_edit("Custom Bin Boundaries:", settings.bin_settings)
elif not settings.has_additional_time_shifts():
self._deactivate_line_edit()
def _draw_additional_time_shifts(self, settings):
if settings.has_additional_time_shifts():
self._activate_line_edit("Additional Time Shifts:", settings.additional_time_shifts)
elif not settings.has_bin_settings():
self._deactivate_line_edit()
def _activate_line_edit(self, label, content):
self.binningOptionsLineEdit.setText(content)
self.binningOptionsLineEdit.setVisible(True)
self.lineEditLabel.setText(label)
self.lineEditLabel.setVisible(True)
def _deactivate_line_edit(self):
self.binningOptionsLineEdit.setText("")
self.binningOptionsLineEdit.setVisible(False)
self.lineEditLabel.setVisible(False) | null |
5,915 | from math import isclose
import pytest
from ufo2ft.filters.transformations import TransformationsFilter
@pytest.fixture(
params=[
{
"capHeight": 700,
"xHeight": 500,
"glyphs": [
{"name": "space", "width": 500},
{
"name": "a",
"width": 350,
"outline": [
("moveTo", ((0, 0),)),
("lineTo", ((300, 0),)),
("lineTo", ((300, 300),)),
("lineTo", ((0, 300),)),
("closePath", ()),
],
"anchors": [(100, 200, "top"), (100, -200, "bottom")],
},
{
"name": "b",
"width": 450,
"outline": [
("addComponent", ("a", (1, 0, 0, 1, 0, 0))),
("addComponent", ("c", (1, 0, 0, 1, 0, 0))),
("addComponent", ("a", (1, 0, 0, 1, 10, -10))),
],
},
{
"name": "c",
"outline": [
("moveTo", ((0, 0),)),
("lineTo", ((300, 0),)),
("lineTo", ((150, 300),)),
("closePath", ()),
],
},
{
"name": "d",
"outline": [("addComponent", ("b", (1, 0, 0, -1, 0, 0)))],
},
],
}
]
)
def font(request, FontClass):
font = FontClass()
font.info.capHeight = request.param["capHeight"]
font.info.xHeight = request.param["xHeight"]
for param in request.param["glyphs"]:
glyph = font.newGlyph(param["name"])
glyph.width = param.get("width", 0)
pen = glyph.getPen()
for operator, operands in param.get("outline", []):
getattr(pen, operator)(*operands)
for x, y, name in param.get("anchors", []):
glyph.appendAnchor(dict(x=x, y=y, name=name))
return font
@pytest.fixture(
params=TransformationsFilter.Origin,
ids=[e.name for e in TransformationsFilter.Origin],
)
def origin(request):
return request.param
class TransformationsFilterTest:
def test_invalid_origin_value(self):
with pytest.raises(ValueError) as excinfo:
TransformationsFilter(Origin=5)
excinfo.match(r"is not a valid (TransformationsFilter\.)?Origin")
def test_empty_glyph(self, font):
filter_ = TransformationsFilter(OffsetY=51, include={"space"})
assert not filter_(font)
def test_Identity(self, font):
filter_ = TransformationsFilter()
assert not filter_(font)
def METHOD_NAME(self, font):
filter_ = TransformationsFilter(OffsetX=-10)
assert filter_(font)
a = font["a"]
assert (a[0][0].x, a[0][0].y) == (-10, 0)
assert (a.anchors[1].x, a.anchors[1].y) == (90, -200)
# base glyph was already transformed, component didn't change
assert font["b"].components[0].transformation[-2:] == (0, 0)
def test_OffsetY(self, font):
filter_ = TransformationsFilter(OffsetY=51)
assert filter_(font)
a = font["a"]
assert (a[0][0].x, a[0][0].y) == (0, 51)
assert (a.anchors[1].x, a.anchors[1].y) == (100, -149)
assert font["b"].components[0].transformation[-2:] == (0, 0)
def test_OffsetXY(self, font):
filter_ = TransformationsFilter(OffsetX=-10, OffsetY=51)
assert filter_(font)
a = font["a"]
assert (a[0][0].x, a[0][0].y) == (-10, 51)
assert (a.anchors[1].x, a.anchors[1].y) == (90, -149)
assert font["b"].components[0].transformation[-2:] == (0, 0)
def test_ScaleX(self, font, origin):
# different Origin heights should not affect horizontal scale
filter_ = TransformationsFilter(ScaleX=50, Origin=origin)
assert filter_(font)
a = font["a"]
assert (a[0][0].x, a[0][0].y) == (0, 0)
assert (a[0][2].x, a[0][2].y) == (150, 300)
assert a.width == 350 * 0.50
def test_ScaleY(self, font, origin):
percent = 50
filter_ = TransformationsFilter(ScaleY=percent, Origin=origin)
assert filter_(font)
factor = percent / 100
origin_height = filter_.get_origin_height(font, origin)
bottom = origin_height * factor
top = bottom + 300 * factor
a = font["a"]
# only y coords change
assert (a[0][0].x, a[0][0].y) == (0, bottom)
assert (a[0][2].x, a[0][2].y) == (300, top)
def test_ScaleXY(self, font, origin):
percent = 50
filter_ = TransformationsFilter(ScaleX=percent, ScaleY=percent, Origin=origin)
assert filter_(font)
factor = percent / 100
origin_height = filter_.get_origin_height(font, origin)
bottom = origin_height * factor
top = bottom + 300 * factor
a = font["a"]
# both x and y change
assert (a[0][0].x, a[0][0].y) == (0, bottom)
assert (a[0][2].x, a[0][2].y) == (150, top)
assert a.width == 350 * factor
def test_Slant(self, font, origin):
filter_ = TransformationsFilter(Slant=45, Origin=origin)
assert filter_(font)
origin_height = filter_.get_origin_height(font, origin)
a = font["a"]
assert isclose(a[0][0].x, -origin_height)
assert a[0][0].y == 0
def test_composite_glyphs(self, font):
filter_ = TransformationsFilter(
OffsetX=-10, OffsetY=51, ScaleX=50, ScaleY=50, exclude={"c"}
)
assert filter_(font)
b = font["b"]
# component 'a' #1 was not transformed, because the base glyph was already
# transformed, and the component's own transformation is identity
assert b.components[0].transformation == (1, 0, 0, 1, 0, 0)
# component 'c' was transformed, because base glyph was not included
assert b.components[1].transformation == (0.5, 0, 0, 0.5, -10, 51)
# component 'a' #2 was partly transformed: the base glyph was transformed, but
# the component's original transformation was not identity; thus
# it was modified to compensate for the transformation already applied to
# the base glyph (scale stays same, offsets are scaled)
assert b.components[2].transformation == (1, 0, 0, 1, 5, -5)
d = font["d"]
# component 'b' was transformed as well as its base glyph, because
# its original transform had a scale, so it was necessary to
# compensate for the transformation applied on the base glyph
assert d.components[0].transformation == (1, 0, 0, -1, 0, 102)
def test_ScaleOffset_width(self, font, origin):
percent = 50
filter_ = TransformationsFilter(
OffsetX=-100, ScaleX=percent, ScaleY=percent, Origin=origin
)
assert filter_(font)
factor = percent / 100
a = font["a"]
# The offset value here should not change the fact that the glyph
# bounding box is scaled by 50%.
assert a.width == 350 * factor | null |
5,916 | """
Copyright (c) 2012-2020 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from base_console import BaseConsole
from share_detail_console import ShareDetailConsole
from rest_util import api_call, print_shares_info, print_share_info, api_error
class SharesConsole(BaseConsole):
def __init__(self, greeting):
BaseConsole.__init__(self)
self.greeting = greeting + " Shares"
self.prompt = self.greeting + "> "
self.url = "%sshares" % BaseConsole.url
@api_error
def do_list(self, args):
url = self.url
if args:
url = "%s/%s" % (self.url, args)
shares_info = api_call(url)
print_shares_info(shares_info)
def help_list(self):
snps = "Display details of shares on the appliance"
args = ("<share_name>",)
params = {
"<share_name>": "(Optional)Name of a share",
}
examples = {
"Display details of all shares": "",
"Display details of a share called myshare": "myshare",
}
self.print_help(snps, "list", args, params, examples)
@api_error
def METHOD_NAME(self, args):
arg_fields = args.split()
if len(arg_fields) < 3:
error = "3 arguments expected. %d given" % len(arg_fields)
return self.help_wrapper(error, "add")
multiplier = 1024
num = None
try:
num = int(arg_fields[2])
except:
if len(arg_fields[2]) > 2:
try:
num = int(arg_fields[2][:-2])
except:
error = "Invalid size parameter: %s" % arg_fields[2]
return self.help_wrapper(error, "add")
suffix = arg_fields[2][-2:].lower()
if suffix == "mb":
multiplier = multiplier ** 1
elif suffix == "gb":
multiplier = multiplier ** 2
elif suffix == "tb":
multiplier = multiplier ** 3
elif suffix == "pb":
multiplier = multiplier ** 4
else:
error = (
"Invalid size suffix: %s. must be one of "
"MB, GB, TB or PB" % suffix
)
return self.help_wrapper(error, "add")
else:
error = "Invalid size parameter: %s" % arg_fields[2]
return self.help_wrapper(error, "add")
size = num * multiplier
input_data = {
"sname": arg_fields[0],
"pool": arg_fields[1],
"size": size,
}
share_info = api_call(self.url, data=input_data, calltype="post")
print_share_info(share_info)
def help_add(self):
args = (
"share_name",
"pool_name",
"size",
)
params = {
"share_name": "Intended name of the share",
"pool_name": ("Pool in which to create the share. It must already exist"),
"size": (
"Intended size of the share. An integer with "
"optional suffix(MB, GB, TB, PB). When no suffix "
"is provided, MB is presumed"
),
}
examples = {
"Create a 20GB share in a pool called pool": "share1234 pool0 20GB",
"Create a 100MB share in a pool called mypool": "share100 mypool 100",
}
self.print_help(
"Create a new share", "add", args=args, params=params, examples=examples
)
@api_error
def do_resize(self, args):
try:
fields = args.split()
sname = fields[0]
new_size = int(fields[1])
except:
return self.do_help(args)
input_data = {
"size": new_size,
}
url = "%s/%s" % (self.url, sname)
share_info = api_call(url, data=input_data, calltype="put")
print_share_info(share_info)
def help_resize(self):
args = (
"share_name",
"new_size",
)
params = {
"share_name": "Name of the share to resize",
"new_size": "Desired new size of the share",
}
examples = {
"Resize a share called myshare to 100GB": "myshare 100GB",
}
self.print_help(
"Resize a share", "resize", args=args, params=params, examples=examples
)
@api_error
def do_clone(self, args):
fields = args.split()
input_data = {
"name": fields[1],
}
url = "%s/%s/clone" % (self.url, fields[0])
print(api_call(url, data=input_data, calltype="post"))
def help_clone(self):
args = (
"share_name",
"clone_name",
)
self.print_help("Clone a share", "clone", args=args)
@api_error
def do_rollback(self, args):
"""
Rollback a share to the state of one of it's snapshots.
rollback <share_name> <snap_name>
"""
fields = args.split()
input_data = {
"name": fields[1],
}
url = "%s/%s/rollback" % (self.url, fields[0])
print(api_call(url, data=input_data, calltype="post"))
@api_error
def do_change_op(self, args):
"""
To change ownership and permissions
change_op share_name owner group perms
"""
fields = args.split()
input_data = {
"owner": fields[1],
"group": fields[2],
"perms": fields[3],
}
url = "%s%s/acl" % (self.url, fields[0])
share_info = api_call(url, data=input_data, calltype="post")
print_share_info(share_info)
def do_delete(self, args):
"""
Delete a share
"""
pass
def do_disable(self, args):
"""
Disable a share. Mark of deletion, but don't quite delete.
"""
pass
def do_enable(self, args):
"""
Enable a previously disabled share
"""
pass
@api_error
def do_share(self, args):
"""
To go to a share console: share share_name
"""
input_share = args.split()
if len(input_share) > 0:
sd_console = ShareDetailConsole(self.greeting, input_share[0])
if len(input_share) > 1:
return sd_console.onecmd(" ".join(input_share[1:]))
return sd_console.cmdloop() | null |
5,917 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils import server_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression import trigger_funcs_utils
from regression.python_test_utils import test_utils as utils
from . import utils as event_trigger_utils
from unittest.mock import patch
class EventTriggerPutTestCase(BaseTestGenerator):
""" This class will fetch added event trigger under test database. """
scenarios = utils.generate_scenarios('update_event_trigger',
event_trigger_utils.test_cases)
def setUp(self):
super().setUp()
self.schema_data = parent_node_dict['schema'][-1]
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.schema_name = self.schema_data['schema_name']
self.schema_id = self.schema_data['schema_id']
self.extension_name = "postgres_fdw"
self.db_name = parent_node_dict["database"][-1]["db_name"]
self.db_user = self.server["username"]
self.func_name = "trigger_func_%s" % str(uuid.uuid4())[1:8]
self.trigger_name = "event_trigger_put_%s" % (str(uuid.uuid4())[1:8])
server_con = server_utils.connect_server(self, self.server_id)
if not server_con["info"] == "Server connected.":
raise Exception("Could not connect to server to add resource "
"groups.")
server_version = 0
if "type" in server_con["data"]:
if server_con["data"]["version"] < 90300:
message = "Event triggers are not supported by PG9.2 " \
"and PPAS9.2 and below."
self.skipTest(message)
self.function_info = trigger_funcs_utils.create_trigger_function(
self.server, self.db_name, self.schema_name, self.func_name,
server_version)
self.event_trigger_id = event_trigger_utils.create_event_trigger(
self.server, self.db_name, self.schema_name, self.func_name,
self.trigger_name)
def update_event_trigger(self):
"""
This functions update event trigger details
:return: Event trigger update request details
"""
return self.tester.put(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' + str(self.db_id) +
'/' + str(self.event_trigger_id),
data=json.dumps(self.test_data),
follow_redirects=True)
def runTest(self):
""" This function will update event trigger under test database. """
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database.")
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema.")
func_name = self.function_info[1]
func_response = trigger_funcs_utils.verify_trigger_function(
self.server,
self.db_name,
func_name)
if not func_response:
raise Exception("Could not find the trigger function.")
trigger_response = event_trigger_utils.verify_event_trigger(
self.server, self.db_name, self.trigger_name)
if not trigger_response:
raise Exception("Could not find event trigger.")
self.test_data['id'] = self.event_trigger_id
actual_response_code = True
expected_response_code = False
if self.is_positive_test:
response = self.update_event_trigger()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
else:
if hasattr(self, "error_updating_event_trigger"):
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
response = self.update_event_trigger()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
if hasattr(self, "wrong_event_trigger_id"):
self.event_trigger_id = 99999
response = self.update_event_trigger()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
if hasattr(self, "error_in_db"):
with patch(self.mock_data["function_name"],
return_value=eval(self.mock_data["return_value"])):
response = self.update_event_trigger()
actual_response_code = response.status_code
expected_response_code = self.expected_data['status_code']
self.assertEqual(actual_response_code, expected_response_code)
def METHOD_NAME(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id) | null |
5,918 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import re
import flask
from flask import current_app, request, session, testing
from werkzeug.datastructures import Headers
from werkzeug.test import EnvironBuilder
from flask_wtf.csrf import generate_csrf
import config
class RequestShim():
"""
A fake request that proxies cookie-related methods to a Flask test client.
"""
def __init__(self, client):
self.client = client
def set_cookie(self, key, value='', *args, **kwargs):
"Set the cookie on the Flask test client."
server_name = current_app.config["SERVER_NAME"] or "localhost"
return self.client.set_cookie(
server_name, key=key, value=value, *args, **kwargs
)
def delete_cookie(self, key, *args, **kwargs):
"Delete the cookie on the Flask test client."
server_name = current_app.config["SERVER_NAME"] or "localhost"
return self.client.delete_cookie(
server_name, key=key, *args, **kwargs
)
class TestClient(testing.FlaskClient):
def __init__(self, *args, **kwargs):
self.csrf_token = None
self.app = None
super().__init__(*args, **kwargs)
def setApp(self, _app):
self.app = _app
def METHOD_NAME(self, *args, **kwargs):
if len(args) > 0 and isinstance(args[0], (EnvironBuilder, dict)):
return super().METHOD_NAME(*args, **kwargs)
data = kwargs.get('data', {})
if self.csrf_token is not None and not (
'email' in data and
'password' in data and
'csrf_token' in data
):
api_key_headers = Headers({})
api_key_headers[
getattr(config, 'WTF_CSRF_HEADERS', ['X-CSRFToken'])[0]
] = self.csrf_token
headers = kwargs.pop('headers', Headers())
headers.extend(api_key_headers)
kwargs['headers'] = headers
return super().METHOD_NAME(*args, **kwargs)
def fetch_csrf(self, res):
m = re.search(
b'<input id="csrf_token" name="csrf_token" type="hidden"'
b' value="([^"]*)">', res.data
)
if m is None:
# When login through Kerberos, we won't find the CSRF
return None
return m.group(1).decode("utf-8")
def generate_csrf_token(self, *args, **kwargs):
# First, we'll wrap our request shim around the test client, so
# that it will work correctly when Flask asks it to set a cookie.
request = RequestShim(self)
# Next, we need to look up any cookies that might already exist on
# this test client, such as the secure cookie that
# powers `flask.session`,
# and make a test request context that has those cookies in it.
environ_overrides = {}
self.cookie_jar.inject_wsgi(environ_overrides)
with self.app.test_request_context():
# Now, we call Flask-WTF's method of generating a CSRF token...
csrf_token = generate_csrf()
# ...which also sets a value in `flask.session`, so we need to
# ask Flask to save that value to the cookie jar in the test
# client. This is where we actually use that request shim we
# made!
self.app.session_interface.save_session(
self.app, flask.session, request)
return csrf_token
def login(self, email, password, _follow_redirects=False,
headers=None, extra_form_data=dict()):
csrf_token = None
if config.SERVER_MODE is True:
res = self.get('/login',
follow_redirects=_follow_redirects)
csrf_token = self.fetch_csrf(res)
if csrf_token is None:
csrf_token = self.generate_csrf_token()
form_data = dict(
email=email,
password=password,
csrf_token=csrf_token
)
if extra_form_data:
form_data.update(extra_form_data)
res = self.post(
'/authenticate/login', data=form_data,
follow_redirects=_follow_redirects,
headers=headers
)
self.csrf_token = csrf_token
return res
def logout(self):
self.get('/logout?next=/browser/', follow_redirects=False)
self.csrf_token = None | null |
5,919 | import hail as hl
from .hgvs import hgvsp_from_consequence_amino_acids
from .vep import consequence_term_rank
OMIT_CONSEQUENCE_TERMS = hl.set(["upstream_gene_variant", "downstream_gene_variant"])
def METHOD_NAME(variants_path, transcripts_path, mane_transcripts_path=None):
ds = hl.read_table(variants_path)
most_severe_consequence = ds.vep.most_severe_consequence
transcript_consequences = ds.vep.transcript_consequences
# Drop irrelevant consequences
transcript_consequences = transcript_consequences.map(
lambda c: c.annotate(
consequence_terms=c.consequence_terms.filter(
lambda t: ~OMIT_CONSEQUENCE_TERMS.contains(t) # pylint: disable=invalid-unary-operand-type
)
)
).filter(lambda c: c.consequence_terms.size() > 0)
# Add/transmute derived fields
transcript_consequences = transcript_consequences.map(
lambda c: c.annotate(major_consequence=hl.sorted(c.consequence_terms, key=consequence_term_rank)[0])
).map(
lambda c: c.annotate(
domains=hl.set(c.domains.map(lambda domain: domain.db + ":" + domain.name).filter(hl.is_defined)),
hgvsc=c.hgvsc.split(":")[-1],
hgvsp=hgvsp_from_consequence_amino_acids(c),
is_canonical=hl.bool(c.canonical),
)
)
transcript_consequences = transcript_consequences.map(
lambda c: c.select(
"biotype",
"consequence_terms",
"domains",
"gene_id",
"gene_symbol",
"hgvsc",
"hgvsp",
"is_canonical",
"lof_filter",
"lof_flags",
"lof",
"major_consequence",
"polyphen_prediction",
"sift_prediction",
"transcript_id",
)
)
transcripts = hl.read_table(transcripts_path)
# TODO: This can potentially be improved by removing Table.collect
# See https://hail.zulipchat.com/#narrow/stream/123010-Hail-0.2E2.20support/topic/Optimize.20annotation.20with.20small.20dataset
# and https://github.com/Nealelab/ukb_common/blob/ad94d20f8c9f3b711e40a473425925775f0b1f30/utils/generic.py#L18
transcript_info = hl.dict(
[
(row.transcript_id, row.transcript_info)
for row in transcripts.select(
transcript_info=hl.struct(
transcript_version=transcripts.transcript_version,
gene_version=transcripts.gene.gene_version,
)
).collect()
]
)
transcript_consequences = transcript_consequences.map(
lambda csq: csq.annotate(**transcript_info.get(csq.transcript_id))
)
if mane_transcripts_path:
mane_transcripts = hl.read_table(mane_transcripts_path)
mane_transcripts_version = hl.eval(mane_transcripts.globals.version)
mane_transcripts = hl.dict([(row.gene_id, row.drop("gene_id")) for row in mane_transcripts.collect()])
transcript_consequences = transcript_consequences.map(
lambda csq: csq.annotate(
**hl.rbind(
mane_transcripts.get(csq.gene_id),
lambda mane_transcript: (
hl.case()
.when(
(mane_transcript.ensembl_id == csq.transcript_id)
& (mane_transcript.ensembl_version == csq.transcript_version),
hl.struct(
is_mane_select=True,
is_mane_select_version=True,
refseq_id=mane_transcript.refseq_id,
refseq_version=mane_transcript.refseq_version,
),
)
.when(
mane_transcript.ensembl_id == csq.transcript_id,
hl.struct(
is_mane_select=True,
is_mane_select_version=False,
refseq_id=hl.null(hl.tstr),
refseq_version=hl.null(hl.tstr),
),
)
.default(
hl.struct(
is_mane_select=False,
is_mane_select_version=False,
refseq_id=hl.null(hl.tstr),
refseq_version=hl.null(hl.tstr),
)
)
),
)
)
)
transcript_consequences = hl.sorted(
transcript_consequences,
lambda c: (
hl.if_else(c.biotype == "protein_coding", 0, 1, missing_false=True),
hl.if_else(c.major_consequence == most_severe_consequence, 0, 1, missing_false=True),
hl.if_else(c.is_mane_select, 0, 1, missing_false=True),
hl.if_else(c.is_canonical, 0, 1, missing_false=True),
),
)
ds = ds.annotate(transcript_consequences=transcript_consequences).drop("vep")
ds = ds.annotate_globals(mane_transcripts_version=mane_transcripts_version)
else:
transcript_consequences = hl.sorted(
transcript_consequences,
lambda c: (
hl.if_else(c.biotype == "protein_coding", 0, 1, missing_false=True),
hl.if_else(c.major_consequence == most_severe_consequence, 0, 1, missing_false=True),
hl.if_else(c.is_canonical, 0, 1, missing_false=True),
),
)
ds = ds.annotate(transcript_consequences=transcript_consequences).drop("vep")
return ds | null |
5,920 | # -*- coding: utf-8 -*-
###
# (C) Copyright [2019] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from hpeOneView.resources.resource import Resource, ensure_resource_client, unavailable_method
class SasLogicalInterconnects(Resource):
"""
SAS Logical Interconnects API client.
"""
URI = '/rest/sas-logical-interconnects'
def __init__(self, connection, data=None):
super(SasLogicalInterconnects, self).__init__(connection, data)
def get_all(self, start=0, count=-1, fields='', filter='', query='', sort='', view=''):
"""
Gets a list of SAS Logical Interconnects based on optional sorting and filtering and constrained by start and
count parameters.
Args:
start:
The first item to return, using 0-based indexing. If not specified, the default is 0 - start with the
first available item.
count:
The number of resources to return. A count of -1 requests all items. The actual number of items in
the response may differ from the requested count if the sum of start and count exceeds the total number
of items.
fields:
Specifies which fields should be returned in the result set.
filter (list or str):
A general filter/query string to narrow the list of items returned. The default is no filter; all
resources are returned.
query:
A general query string to narrow the list of resources returned. The default is no query (all
resources are returned).
sort:
The sort order of the returned data set. By default, the sort order is based on create time, with the
oldest entry first.
view:
Returns a specific subset of the attributes of the resource or collection, by specifying the name of a
predefined view. The default view is expand (show all attributes of the resource and all elements of
collections of resources).
Returns:
list: A list of SAS logical interconnects.
"""
return self._helper.get_all(start=start, count=count, filter=filter, query=query, sort=sort, view=view,
fields=fields)
@ensure_resource_client
def update_firmware(self, firmware_information, force=False):
"""
Installs firmware to the member interconnects of a SAS Logical Interconnect.
Args:
firmware_information: Options to install firmware to a SAS Logical Interconnect.
force: If sets to true, the operation completes despite any problems with the network connectivy
or the erros on the resource itself.
Returns:
dict: SAS Logical Interconnect Firmware.
"""
firmware_uri = "{}/firmware".format(self.data["uri"])
result = self._helper.update(firmware_information, firmware_uri, force=force)
self.refresh()
return result
@ensure_resource_client
def get_firmware(self):
"""
Gets baseline firmware information for a SAS Logical Interconnect.
Returns:
dict: SAS Logical Interconnect Firmware.
"""
firmware_uri = "{}/firmware".format(self.data["uri"])
return self._helper.do_get(firmware_uri)
def update_compliance_all(self, information, timeout=-1):
"""
Returns SAS Logical Interconnects to a consistent state. The current SAS Logical Interconnect state is
compared to the associated SAS Logical Interconnect group.
Args:
information: Can be either the resource ID or URI.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: SAS Logical Interconnect.
"""
uri = self.URI + "/compliance"
result = self._helper.update(information, uri, timeout=timeout)
return result
@ensure_resource_client
def update_compliance(self, timeout=-1):
"""
Returns a SAS Logical Interconnect to a consistent state. The current SAS Logical Interconnect state is
compared to the associated SAS Logical Interconnect group.
Args:
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
Returns:
dict: SAS Logical Interconnect.
"""
uri = "{}/compliance".format(self.data["uri"])
result = self._helper.update({}, uri, timeout=timeout)
self.refresh()
return result
@ensure_resource_client
def METHOD_NAME(self, information):
"""
When a drive enclosure has been physically replaced, initiate the replacement operation that enables the
new drive enclosure to take over as a replacement for the prior drive enclosure. The request requires
specification of both the serial numbers of the original drive enclosure and its replacement to be provided.
Args:
information: Options to replace the drive enclosure.
Returns:
dict: SAS Logical Interconnect.
"""
uri = "{}/replaceDriveEnclosure".format(self.data["uri"])
result = self._helper.create(information, uri)
self.refresh()
return result
@ensure_resource_client
def update_configuration(self):
"""
Asynchronously applies or re-applies the SAS Logical Interconnect configuration to all managed interconnects
of a SAS Logical Interconnect.
Returns:
dict: SAS Logical Interconnect.
"""
uri = "{}/configuration".format(self.data["uri"])
result = self._helper.update({}, uri)
self.refresh()
return result
def create(self):
"""Create method is not available"""
unavailable_method()
def delete(self):
"""Delete method is not available"""
unavailable_method()
def update(self):
"""update method is not available"""
unavailable_method() | null |
5,921 | #!/usr/bin/env python
"""Random sequences and random evolution of sequences in a tree"""
import bisect
import numpy
def argpicks(freqs, random_series):
partition = numpy.add.accumulate(freqs)
assert abs(partition[-1] - 1.0) < 1e-6, (freqs, partition)
while True:
x = random_series.uniform(0.0, 1.0)
i = bisect.bisect_left(partition, x)
yield i
def argpick(freqs, random_series):
return next(argpicks(freqs, random_series))
def _randomMotifGenerator(random_series, motif_probs):
motifs = list(motif_probs.keys())
freqs = [motif_probs[m] for m in motifs]
for i in argpicks(freqs, random_series):
yield motifs[i]
def METHOD_NAME(
random_series, motifs, parent_seq, site_cats, psubs, preserved_sites=()
):
"""Evolve a new sequence derived from parent_seq. Uses psubs[site_cats[i]]
to pick a new motif derived from parent_seq[i]"""
seq = []
randomMotifSources = {}
for i, parent_motif in enumerate(parent_seq):
if i in preserved_sites:
edge_motif = preserved_sites[i]
else:
if parent_motif not in randomMotifSources:
mprobs = {}
parent_motif_index = motifs.index(parent_motif)
site_cat = site_cats[i]
psub = psubs[site_cat]
for dest_motif_index, dest_motif in enumerate(motifs):
prob = psub[parent_motif_index, dest_motif_index]
mprobs[dest_motif] = prob
randomMotifSources[site_cat, parent_motif] = _randomMotifGenerator(
random_series, mprobs
)
edge_motif = next(randomMotifSources[site_cat, parent_motif])
seq.append(edge_motif)
return seq
def random_sequence(random_series, motif_probs, sequence_length):
getRootRandomMotif = _randomMotifGenerator(random_series, motif_probs).__next__
return [getRootRandomMotif() for i in range(sequence_length)]
class AlignmentEvolver(object):
# Encapsulates settings that are constant throughout the recursive generation
# of a synthetic alignment.
def __init__(
self,
random_series,
orig_ambig,
exclude_internal,
bin_names,
site_bins,
psub_for,
motifs,
):
self.random_series = random_series
self.orig_ambig = orig_ambig
self.exclude_internal = exclude_internal
self.bin_names = bin_names
self.site_bins = site_bins
self.psub_for = psub_for
self.motifs = motifs
def __call__(self, tree, root_sequence):
# probsd = dict(enumerate(self.bin_probs))
# bprobs = _randomMotifGenerator(self.random_series, probsd)
# site_bins = [bprobs.next() for c in range(len(root_sequence))]
return self.generate_simulated_seqs(tree, root_sequence)
def generate_simulated_seqs(self, parent, parent_seq):
"""recursively generate the descendant sequences by descending the tree
from root.
Each child will be set by mutating the parent motif based on the probs
in the psub matrix of this edge.
random_series - get a random numer 0-1 by calling random_series.random()
length - the desired alignment length
parent - the edge structure.
parent_seq - the corresponding sequence. This will be mutated for each
of its children, based on their psub matricies.
"""
# This depends on parameter names 'mprobs', 'alignment2', 'bprobs' and
# 'psubs'. Might be better to integrate it into likelihood_calculation.
if self.exclude_internal and parent.children:
simulated_sequences = {}
else:
simulated_sequences = {parent.name: "".join(parent_seq)}
for edge in parent.children:
# The result for this edge - a list of motifs
# Keep original ambiguity codes
if edge.name in self.orig_ambig:
orig_seq_ambig = self.orig_ambig[edge.name]
else:
orig_seq_ambig = {}
# Matrix of substitution probabilities
psubs = [self.psub_for(edge.name, bin) for bin in self.bin_names]
# Make the semi-random sequence for this edge.
edge_seq = METHOD_NAME(
self.random_series,
self.motifs,
parent_seq,
self.site_bins,
psubs,
orig_seq_ambig,
)
# Pass this new edge sequence on down the tree
descendant_sequences = self.generate_simulated_seqs(edge, edge_seq)
simulated_sequences.update(descendant_sequences)
return simulated_sequences | null |
5,922 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# httpwww.apache.orglicensesLICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Defined callback for DeepFM.
"""
import time
from mindspore.train import Callback
def METHOD_NAME(file_path, out_str):
with open(file_path, 'a+', encoding='utf-8') as file_out:
file_out.write(out_str + '\n')
class EvalCallBack(Callback):
"""
Monitor the loss in training.
If the loss is NAN or INF terminating training.
Note
If per_print_times is 0 do not print loss.
"""
def __init__(self, model, eval_dataset, auc_metric, eval_file_path):
super(EvalCallBack, self).__init__()
self.model = model
self.eval_dataset = eval_dataset
self.aucMetric = auc_metric
self.aucMetric.clear()
self.eval_file_path = eval_file_path
def epoch_end(self, run_context):
start_time = time.time()
out = self.model.eval(self.eval_dataset)
eval_time = int(time.time() - start_time)
time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
out_str = "{} EvalCallBack metric{}; eval_time{}s".format(
time_str, out.values(), eval_time)
print(out_str)
METHOD_NAME(self.eval_file_path, out_str)
class LossCallBack(Callback):
"""
Monitor the loss in training.
If the loss is NAN or INF terminating training.
Note
If per_print_times is 0 do not print loss.
Args
loss_file_path (str) The file absolute path, to save as loss_file;
per_print_times (int) Print loss every times. Default 1.
"""
def __init__(self, loss_file_path, per_print_times=1):
super(LossCallBack, self).__init__()
if not isinstance(per_print_times, int) or per_print_times < 0:
raise ValueError("print_step must be int and >= 0.")
self.loss_file_path = loss_file_path
self._per_print_times = per_print_times
self.loss = 0
def step_end(self, run_context):
cb_params = run_context.original_args()
loss = cb_params.net_outputs.asnumpy()
cur_step_in_epoch = (cb_params.cur_step_num - 1) % cb_params.batch_num + 1
cur_num = cb_params.cur_step_num
if self._per_print_times != 0 and cur_num % self._per_print_times == 0:
with open(self.loss_file_path, "a+") as loss_file:
time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
loss_file.write("{} epoch: {} step: {}, loss is {}\n".format(
time_str, cb_params.cur_epoch_num, cur_step_in_epoch, loss))
print("epoch: {} step: {}, loss is {}\n".format(
cb_params.cur_epoch_num, cur_step_in_epoch, loss))
self.loss = loss
class TimeMonitor(Callback):
"""
Time monitor for calculating cost of each epoch.
Args
data_size (int) step size of an epoch.
"""
def __init__(self, data_size):
super(TimeMonitor, self).__init__()
self.data_size = data_size
self.per_step_time = 0
def epoch_begin(self, run_context):
self.epoch_time = time.time()
def epoch_end(self, run_context):
epoch_mseconds = (time.time() - self.epoch_time) * 1000
per_step_mseconds = epoch_mseconds / self.data_size
print("epoch time: {0}, per step time: {1}".format(epoch_mseconds, per_step_mseconds), flush=True)
self.per_step_time = per_step_mseconds
def step_begin(self, run_context):
self.step_time = time.time()
def step_end(self, run_context):
step_mseconds = (time.time() - self.step_time) * 1000
print(f"step time {step_mseconds}", flush=True) | null |
5,923 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore.common.api import _cell_graph_executor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.ops.operations.comm_ops import _VirtualDataset
from tests.ut.python.ops.test_math_ops import VirtualLoss
def setup_function():
context.set_auto_parallel_context(dataset_strategy="full_batch")
context.set_context(mode=context.GRAPH_MODE)
grad_all = C.GradOperation(get_all=True)
class Net(nn.Cell):
def __init__(self, strategy1, strategy2, num_segments):
super(Net, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.merge_op = P.UnsortedSegmentMax().shard((strategy1, strategy2))
self.num_segments = num_segments
def construct(self, vectors, segment_ids):
predict = self.merge_op(vectors, segment_ids, self.num_segments)
return predict
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x, y):
return grad_all(self.network)(x, y)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x, y):
predict = self.network(x, y)
return self.loss(predict)
def compile_graph(x, y, segments, strategy1, strategy2, auto=False):
net = GradWrap(NetWithLoss(Net(strategy1, strategy2, segments)))
net.set_train()
if auto:
context.set_auto_parallel_context(parallel_mode="auto_parallel")
else:
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
_cell_graph_executor.compile(net, x, y)
def test_UnsortedSegmentMax_model_parallel_slice_1d():
context.set_auto_parallel_context(device_num=8, global_rank=0)
x = Tensor(np.ones(8), ms.float32)
y = Tensor(np.ones(8), ms.int32)
num_segments = 16
strategy1 = (8,)
strategy2 = (8,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_UnsortedSegmentMax_model_parallel_no_slice_1d():
context.set_auto_parallel_context(device_num=8, global_rank=0)
x = Tensor(np.ones(8), ms.float32)
y = Tensor(np.ones(8), ms.int32)
num_segments = 16
strategy1 = (1,)
strategy2 = (1,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def METHOD_NAME():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 8)), ms.float32)
y = Tensor(np.arange(4), ms.int32)
num_segments = 4
strategy1 = (4, 1)
strategy2 = (4,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_UnsortedSegmentMax_model_parallel_vector_slice_2d():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 8)), ms.float32)
y = Tensor(np.ones(4), ms.int32)
num_segments = 4
strategy1 = (1, 4)
strategy2 = (1,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_UnsortedSegmentMax_model_parallel_vector_slice_3d():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 8, 8)), ms.float32)
y = Tensor(np.ones(4), ms.int32)
num_segments = 4
strategy1 = (1, 2, 2)
strategy2 = (1,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_UnsortedSegmentMax_model_parallel_index_vector_slice_2d():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 8)), ms.float32)
y = Tensor(np.ones(4), ms.int32)
num_segments = 4
strategy1 = (2, 2)
strategy2 = (2,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_UnsortedSegmentMax_model_parallel_index_vector_slice_3d():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 4, 8)), ms.float32)
y = Tensor(np.ones((4)), ms.int32)
num_segments = 16
strategy1 = (2, 1, 2)
strategy2 = (2,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_UnsortedSegmentMax_model_parallel_float16():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 4, 8)), ms.float16)
y = Tensor(np.ones((4)), ms.int32)
num_segments = 16
strategy1 = (2, 1, 2)
strategy2 = (2,)
compile_graph(x, y, num_segments, strategy1, strategy2)
def test_UnsortedSegmentMax_model_parallel_int32():
context.set_auto_parallel_context(device_num=4, global_rank=0)
x = Tensor(np.ones((4, 4, 8)), ms.int32)
y = Tensor(np.ones((4)), ms.int32)
num_segments = 16
strategy1 = (2, 1, 2)
strategy2 = (2,)
compile_graph(x, y, num_segments, strategy1, strategy2) | null |
5,924 | import pytest
from pybind11_tests import ConstructorStats
def test_regressions():
from pybind11_tests.issues import print_cchar, print_char
# #137: const char* isn't handled properly
assert print_cchar("const char *") == "const char *"
# #150: char bindings broken
assert print_char("c") == "c"
def test_dispatch_issue(msg):
"""#159: virtual function dispatch has problems with similar-named functions"""
from pybind11_tests.issues import DispatchIssue, dispatch_issue_go
class PyClass1(DispatchIssue):
def dispatch(self):
return "Yay.."
class PyClass2(DispatchIssue):
def dispatch(self):
with pytest.raises(RuntimeError) as excinfo:
super(PyClass2, self).dispatch()
assert msg(excinfo.value) == 'Tried to call pure virtual function "Base::dispatch"'
p = PyClass1()
return dispatch_issue_go(p)
b = PyClass2()
assert dispatch_issue_go(b) == "Yay.."
def test_reference_wrapper():
"""#171: Can't return reference wrappers (or STL data structures containing them)"""
from pybind11_tests.issues import Placeholder, return_vec_of_reference_wrapper
assert str(return_vec_of_reference_wrapper(Placeholder(4))) == \
"[Placeholder[1], Placeholder[2], Placeholder[3], Placeholder[4]]"
def test_iterator_passthrough():
"""#181: iterator passthrough did not compile"""
from pybind11_tests.issues import iterator_passthrough
assert list(iterator_passthrough(iter([3, 5, 7, 9, 11, 13, 15]))) == [3, 5, 7, 9, 11, 13, 15]
def test_shared_ptr_gc():
"""// #187: issue involving std::shared_ptr<> return value policy & garbage collection"""
from pybind11_tests.issues import ElementList, ElementA
el = ElementList()
for i in range(10):
el.add(ElementA(i))
pytest.gc_collect()
for i, v in enumerate(el.get()):
assert i == v.value()
def test_no_id(msg):
from pybind11_tests.issues import get_element, expect_float, expect_int
with pytest.raises(TypeError) as excinfo:
get_element(None)
assert msg(excinfo.value) == """
get_element(): incompatible function arguments. The following argument types are supported:
1. (arg0: m.issues.ElementA) -> int
Invoked with: None
"""
with pytest.raises(TypeError) as excinfo:
expect_int(5.2)
assert msg(excinfo.value) == """
expect_int(): incompatible function arguments. The following argument types are supported:
1. (arg0: int) -> int
Invoked with: 5.2
"""
assert expect_float(12) == 12
def test_str_issue(msg):
"""Issue #283: __str__ called on uninitialized instance when constructor arguments invalid"""
from pybind11_tests.issues import StrIssue
assert str(StrIssue(3)) == "StrIssue[3]"
with pytest.raises(TypeError) as excinfo:
str(StrIssue("no", "such", "constructor"))
assert msg(excinfo.value) == """
__init__(): incompatible constructor arguments. The following argument types are supported:
1. m.issues.StrIssue(arg0: int)
2. m.issues.StrIssue()
Invoked with: 'no', 'such', 'constructor'
"""
def test_nested():
""" #328: first member in a class can't be used in operators"""
from pybind11_tests.issues import NestA, NestB, NestC, get_NestA, get_NestB, get_NestC
a = NestA()
b = NestB()
c = NestC()
a += 10
assert get_NestA(a) == 13
b.a += 100
assert get_NestA(b.a) == 103
c.b.a += 1000
assert get_NestA(c.b.a) == 1003
b -= 1
assert get_NestB(b) == 3
c.b -= 3
assert get_NestB(c.b) == 1
c *= 7
assert get_NestC(c) == 35
abase = a.as_base()
assert abase.value == -2
a.as_base().value += 44
assert abase.value == 42
assert c.b.a.as_base().value == -2
c.b.a.as_base().value += 44
assert c.b.a.as_base().value == 42
del c
pytest.gc_collect()
del a # Should't delete while abase is still alive
pytest.gc_collect()
assert abase.value == 42
del abase, b
pytest.gc_collect()
def test_move_fallback():
from pybind11_tests.issues import get_moveissue1, get_moveissue2
m2 = get_moveissue2(2)
assert m2.value == 2
m1 = get_moveissue1(1)
assert m1.value == 1
def METHOD_NAME():
from pybind11_tests.issues import OverrideTest
o = OverrideTest("asdf")
# Not allowed (see associated .cpp comment)
# i = o.str_ref()
# assert o.str_ref() == "asdf"
assert o.str_value() == "asdf"
assert o.A_value().value == "hi"
a = o.A_ref()
assert a.value == "hi"
a.value = "bye"
assert a.value == "bye"
def test_operators_notimplemented(capture):
from pybind11_tests.issues import OpTest1, OpTest2
with capture:
c1, c2 = OpTest1(), OpTest2()
c1 + c1
c2 + c2
c2 + c1
c1 + c2
assert capture == """
Add OpTest1 with OpTest1
Add OpTest2 with OpTest2
Add OpTest2 with OpTest1
Add OpTest2 with OpTest1
"""
def test_iterator_rvpolicy():
""" Issue 388: Can't make iterators via make_iterator() with different r/v policies """
from pybind11_tests.issues import make_iterator_1
from pybind11_tests.issues import make_iterator_2
assert list(make_iterator_1()) == [1, 2, 3]
assert list(make_iterator_2()) == [1, 2, 3]
assert not isinstance(make_iterator_1(), type(make_iterator_2()))
def test_dupe_assignment():
""" Issue 461: overwriting a class with a function """
from pybind11_tests.issues import dupe_exception_failures
assert dupe_exception_failures() == []
def test_enable_shared_from_this_with_reference_rvp():
""" Issue #471: shared pointer instance not dellocated """
from pybind11_tests import SharedParent, SharedChild
parent = SharedParent()
child = parent.get_child()
cstats = ConstructorStats.get(SharedChild)
assert cstats.alive() == 1
del child, parent
assert cstats.alive() == 0
def test_non_destructed_holders():
""" Issue #478: unique ptrs constructed and freed without destruction """
from pybind11_tests import SpecialHolderObj
a = SpecialHolderObj(123)
b = a.child()
assert a.val == 123
assert b.val == 124
cstats = SpecialHolderObj.holder_cstats()
assert cstats.alive() == 1
del b
assert cstats.alive() == 1
del a
assert cstats.alive() == 0
def test_complex_cast(capture):
""" Issue #484: number conversion generates unhandled exceptions """
from pybind11_tests.issues import test_complex
with capture:
test_complex(1)
test_complex(2j)
assert capture == """
1.0
(0.0, 2.0)
"""
def test_inheritance_override_def_static():
from pybind11_tests.issues import MyBase, MyDerived
b = MyBase.make()
d1 = MyDerived.make2()
d2 = MyDerived.make()
assert isinstance(b, MyBase)
assert isinstance(d1, MyDerived)
assert isinstance(d2, MyDerived) | null |
5,925 | import struct
from ..base import TestBase
from ...messaging.serialization import DefaultStruct, PackError, Serializable, Serializer
class Short(Serializable):
format_list = ["H"]
def __init__(self, number):
self.number = number
def to_pack_list(self):
return [("H", self.number)]
@classmethod
def from_unpack_list(cls, *args):
return Short(*args)
class Byte(Serializable):
format_list = ["B"]
def __init__(self, byte):
self.byte = byte
def to_pack_list(self):
return [("B", self.byte)]
@classmethod
def from_unpack_list(cls, *args):
return Byte(*args)
class Nested(Serializable):
format_list = [[Byte]]
def __init__(self, byte_list):
self.byte_list = byte_list
def to_pack_list(self):
return [('payload-list', self.byte_list)]
@classmethod
def from_unpack_list(cls, *args):
return Nested(*args)
class TestSerializer(TestBase):
def setUp(self):
super().setUp()
self.serializer = Serializer()
def check_pack_unpack(self, format_ser, format_unser, value):
packer = self.serializer.get_packer_for(format_ser)
values = (value,) if not isinstance(value, (list, tuple)) else value
serialized = packer.pack(*values)
unpack_list = []
packer = self.serializer.get_packer_for(format_unser)
packer.unpack(serialized, 0, unpack_list)
self.assertEqual(value, unpack_list[0])
def test_pack_bool_true(self):
"""
Check if 'true' booleans can be correctly packed and unpacked.
"""
self.check_pack_unpack('?', '?', True)
def test_pack_bool_false(self):
"""
Check if 'false' booleans can be correctly packed and unpacked.
"""
self.check_pack_unpack('?', '?', False)
def METHOD_NAME(self):
"""
Check if a 0 (unsigned byte) can be correctly packed and unpacked.
"""
self.check_pack_unpack('B', 'B', 0)
def test_pack_byte_1(self):
"""
Check if a 1 (unsigned byte) can be correctly packed and unpacked.
"""
self.check_pack_unpack('B', 'B', 1)
def test_pack_byte_255(self):
"""
Check if a 255 (unsigned byte) can be correctly packed and unpacked.
"""
self.check_pack_unpack('B', 'B', 255)
def test_pack_byte_256(self):
"""
Check if a 256 (unsigned byte) throws a struct.error.
"""
self.assertRaises(struct.error, self.check_pack_unpack, 'B', 'B', 256)
def test_unpack_short_truncated(self):
"""
Check if 1 byte string cannot be unpacked as a short.
"""
self.assertRaises(struct.error, self.check_pack_unpack, 'B', 'H', 255)
def test_pack_list(self):
"""
Check if a list of shorts is correctly packed and unpacked.
"""
self.check_pack_unpack('HH', 'HH', (0, 1337))
def test_get_formats(self):
"""
Check if all reported formats contain valid packers.
"""
formats = self.serializer.get_available_formats()
for format in formats:
packer = self.serializer.get_packer_for(format)
pack_name = f"{packer.__class__.__name__}({format})"
self.assertTrue(hasattr(packer, 'pack'), msg='%s has no pack() method' % pack_name)
self.assertTrue(callable(getattr(packer, 'pack')), msg='%s.pack is not a method' % pack_name)
self.assertTrue(hasattr(packer, 'unpack'), msg='%s has no unpack() method' % pack_name)
self.assertTrue(callable(getattr(packer, 'unpack')), msg='%s.unpack is not a method' % pack_name)
def test_add_packer(self):
"""
Check if we can add a packer on the fly.
"""
self.serializer.add_packer("H_LE", DefaultStruct("<H")) # little-endian
serialized = self.serializer.get_packer_for("H_LE").pack(1) # Packed as 01 00
unpacked = []
self.serializer.get_packer_for("H_LE").unpack(serialized, 0, unpacked) # little-endian, unpacked as 00 01 = 1
self.serializer.get_packer_for("H").unpack(serialized, 0, unpacked) # big-endian, unpacked as 01 00 = 256
self.assertEqual([1, 256], unpacked)
def test_nested_serializable(self):
"""
Check if we can unpack nested serializables.
"""
instance = Short(123)
data = self.serializer.pack_serializable(instance)
output, _ = self.serializer.unpack_serializable(Short, data)
self.assertEqual(instance.number, output.number)
def test_serializable_byte_256(self):
"""
Check if pack_serializable of a 256 (unsigned byte) raises a PackError.
"""
self.assertRaises(PackError, self.serializer.pack_serializable, Byte(256))
def test_serializable_short_from_byte(self):
"""
Check if a unpack_serializable of a short from a byte raises a PackError.
"""
serialized = self.serializer.pack_serializable(Byte(1))
self.assertRaises(PackError, self.serializer.unpack_serializable, Short, serialized)
def test_serializable_list(self):
"""
Check if we can (un)pack serializables easily.
"""
instance1 = Short(123)
instance2 = Short(456)
data = self.serializer.pack_serializable_list([instance1, instance2])
deserialized = self.serializer.unpack_serializable_list([Short, Short], data)
self.assertEqual(instance1.number, 123)
self.assertEqual(instance1.number, deserialized[0].number)
self.assertEqual(instance2.number, 456)
self.assertEqual(instance2.number, deserialized[1].number)
def test_serializable_list_extra_data(self):
"""
Check if we throw an error when we have too much data to unpack.
"""
instance1 = Short(123)
instance2 = Short(456)
data = self.serializer.pack_serializable_list([instance1, instance2])
self.assertRaises(PackError, self.serializer.unpack_serializable_list, [Short, Short], data + b"Nope.avi")
def test_nested_payload_list(self):
serializable = Nested([Byte(1), Byte(2)])
data = self.serializer.pack_serializable(serializable)
decoded, _ = self.serializer.unpack_serializable(Nested, data)
self.assertEqual(serializable.byte_list[0].byte, decoded.byte_list[0].byte)
self.assertEqual(serializable.byte_list[1].byte, decoded.byte_list[1].byte) | null |
5,926 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import os
import systemtesting
import shutil
import mantid.simpleapi as mantid
from mantid import config
from isis_powder import Gem, SampleDetails
DIRS = config["datasearch.directories"].split(";")
# Setup various path details
inst_name = "GEM"
user_name = "Test"
# Relative to system data folder
working_folder_name = "ISIS_Powder"
# Relative to working folder
input_folder_name = "input"
output_folder_name = "output"
# Relative to input folder
calibration_folder_name = os.path.join("calibration", inst_name.lower())
calibration_map_rel_path = os.path.join("yaml_files", "gem_system_test_mapping.yaml")
cycle = "17_1"
spline_rel_path = os.path.join(cycle, "VanSplined_83608_offsets_2011_cycle111b.cal.nxs")
# Generate paths for the tests
# This implies DIRS[0] is the system test data folder
working_dir = os.path.join(DIRS[0], working_folder_name)
input_dir = os.path.join(working_dir, input_folder_name)
output_dir = os.path.join(working_dir, output_folder_name)
calibration_map_path = os.path.join(input_dir, calibration_map_rel_path)
calibration_dir = os.path.join(input_dir, calibration_folder_name)
spline_path = os.path.join(calibration_dir, spline_rel_path)
generated_offset = os.path.join(calibration_dir, "19_1")
class CreateVanadiumTest(systemtesting.MantidSystemTest):
calibration_results = None
existing_config = config["datasearch.directories"]
def requiredFiles(self):
return _gen_required_files()
def runTest(self):
setup_mantid_paths()
self.calibration_results = run_vanadium_calibration()
def validate(self):
return self.calibration_results.name(), "ISIS_Powder-GEM-VanSplined_83608_offsets_2011_cycle111b.cal.nxs"
def cleanup(self):
try:
METHOD_NAME(output_dir)
METHOD_NAME(spline_path)
finally:
mantid.mtd.clear()
config["datasearch.directories"] = self.existing_config
class FocusTestMixin(object):
focus_results = None
existing_config = config["datasearch.directories"]
def requiredFiles(self):
return _gen_required_files()
def doTest(self, absorb_corrections):
# Gen vanadium calibration first
setup_mantid_paths()
self.focus_results = run_focus(absorb_corrections)
def cleanup(self):
try:
METHOD_NAME(spline_path)
METHOD_NAME(output_dir)
finally:
config["datasearch.directories"] = self.existing_config
mantid.mtd.clear()
class FocusTestNoAbsCorr(FocusTestMixin, systemtesting.MantidSystemTest):
def runTest(self):
self.doTest(absorb_corrections=False)
def validate(self):
# check output files as expected
def generate_error_message(expected_file, output_dir):
return "Unable to find {} in {}.\nContents={}".format(expected_file, output_dir, os.listdir(output_dir))
def assert_output_file_exists(directory, filename):
self.assertTrue(os.path.isfile(os.path.join(directory, filename)), msg=generate_error_message(filename, directory))
user_output = os.path.join(output_dir, cycle, user_name)
assert_output_file_exists(user_output, "GEM83605.nxs")
assert_output_file_exists(user_output, "GEM83605.gsas")
output_dat_dir = os.path.join(user_output, "dat_files")
for bankno in range(1, 7):
assert_output_file_exists(output_dat_dir, "GEM83605-b_{}-TOF.dat".format(bankno))
assert_output_file_exists(output_dat_dir, "GEM83605-b_{}-d.dat".format(bankno))
return self.focus_results.name(), "ISIS_Powder-GEM83605_FocusSempty.nxs"
class FocusTestWithAbsCorr(FocusTestMixin, systemtesting.MantidSystemTest):
def runTest(self):
self.doTest(absorb_corrections=True)
def validate(self):
return self.focus_results.name(), "ISIS_Powder-GEM83605_FocusSempty_abscorr.nxs"
class CreateCalTest(systemtesting.MantidSystemTest):
focus_results = None
existing_config = config["datasearch.directories"]
def requiredFiles(self):
return _gen_required_files()
def runTest(self):
# Gen vanadium calibration first
setup_mantid_paths()
self.focus_results = run_calibration()
def validate(self):
self.tolerance = 1e-5
if systemtesting.using_gsl_v1():
return self.focus_results.name(), "ISIS_Powder-GEM87618_groupedGSAS1.nxs"
else:
return self.focus_results.name(), "ISIS_Powder-GEM87618_grouped.nxs"
def cleanup(self):
try:
METHOD_NAME(generated_offset)
finally:
config["datasearch.directories"] = self.existing_config
mantid.mtd.clear()
def _gen_required_files():
required_run_numbers = ["83607", "83608", "83605", "83608_splined"] # create_van : PDF mode # File to focus (Si)
# Generate file names of form "INSTxxxxx.nxs"
input_files = [os.path.join(input_dir, (inst_name + number + ".nxs")) for number in required_run_numbers]
input_files.append(calibration_map_path)
return input_files
def run_vanadium_calibration():
vanadium_run = 83605 # Choose arbitrary run in the cycle 17_1
pdf_inst_obj = setup_inst_object(mode="PDF")
# Run create vanadium twice to ensure we get two different output splines / files
pdf_inst_obj.create_vanadium(first_cycle_run_no=vanadium_run, do_absorb_corrections=True, multiple_scattering=False)
# Check the spline looks good and was saved
if not os.path.exists(spline_path):
raise RuntimeError("Could not find output spline at the following path: " + spline_path)
splined_ws = mantid.Load(Filename=spline_path)
return splined_ws
def run_focus(absorb_corrections):
run_number = 83605
sample_empty = 83608 # Use the vanadium empty again to make it obvious
sample_empty_scale = 0.5 # Set it to 50% scale
# Copy the required splined file into place first (instead of relying on generated one)
splined_file_name = "GEM83608_splined.nxs"
original_splined_path = os.path.join(input_dir, splined_file_name)
shutil.copy(original_splined_path, spline_path)
inst_object = setup_inst_object(mode="PDF")
if absorb_corrections:
sample = SampleDetails(height=5.0, radius=0.3, center=[0, 0, 0], shape="cylinder")
sample.set_material(chemical_formula="(Li7)14 Mg1.05 Si2 S12.05", number_density=0.001641)
inst_object.set_sample_details(sample=sample, mode="Rietveld")
return inst_object.focus(
run_number=run_number,
input_mode="Individual",
vanadium_normalisation=True,
do_absorb_corrections=absorb_corrections,
multiple_scattering=False,
sample_empty=sample_empty,
sample_empty_scale=sample_empty_scale,
)
def run_calibration():
iron_run = 87618
inst_object = setup_inst_object(mode="PDF")
return inst_object.create_cal(run_number=iron_run)
def setup_mantid_paths():
config["datasearch.directories"] += ";" + input_dir
def setup_inst_object(mode):
inst_obj = Gem(
user_name=user_name,
calibration_mapping_file=calibration_map_path,
calibration_directory=calibration_dir,
output_directory=output_dir,
mode=mode,
)
return inst_obj
def METHOD_NAME(path):
try:
# Use this instead of os.remove as we could be passed a non-empty dir
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
except OSError:
print("Could not delete output file at: ", path) | null |
5,927 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
import mindspore.common.dtype as mstype
from mindspore.common.api import _cell_graph_executor
from mindspore.common.parameter import Parameter
from mindspore.nn.loss.loss import LossBase
from mindspore.nn.optim.momentum import Momentum
from mindspore.ops import operations as P
from mindspore.ops import functional as F
from mindspore.parallel._utils import _reset_op_id
from mindspore.train import Model
from mindspore.context import ParallelMode
from tests.dataset_mock import MindData
context.set_context(mode=context.GRAPH_MODE)
class Dataset(MindData):
def __init__(self, predict, label, length=3):
super(Dataset, self).__init__(size=length)
self.predict = predict
self.label = label
self.index = 0
self.length = length
def __iter__(self):
return self
def __next__(self):
if self.index >= self.length:
raise StopIteration
self.index += 1
return self.predict, self.label
def reset(self):
self.index = 0
class AllToAllNet(nn.Cell):
def __init__(self):
super(AllToAllNet, self).__init__()
self.matmul = P.MatMul()
self.matmul_weight = Parameter(Tensor(np.ones([128, 32]), dtype=ms.float32), name="weight")
self.transpose1 = P.Transpose()
def construct(self, x):
x = self.matmul(x, self.matmul_weight)
x = self.transpose1(x, (1, 0))
return x
class SoftmaxCrossEntropyWithLogits(LossBase):
def __init__(self,
sparse=False,
reduction='none'):
super(SoftmaxCrossEntropyWithLogits, self).__init__(reduction)
self.sparse = sparse
self.reduction = reduction
self.softmax_cross_entropy = P.SoftmaxCrossEntropyWithLogits()
self.one_hot = P.OneHot()
self.on_value = Tensor(1.0, mstype.float32)
self.off_value = Tensor(0., mstype.float32)
self.is_cpugpu = context.get_context('device_target') in ["CPU", "GPU"]
if self.is_cpugpu:
self.sparse_softmax_cross_entropy = P.SparseSoftmaxCrossEntropyWithLogits()
def construct(self, logits, labels):
if self.is_cpugpu and self.sparse and self.reduction == 'mean':
x = self.sparse_softmax_cross_entropy(logits, labels)
return x
if self.sparse:
labels = self.one_hot(labels, F.shape(logits)[-1], self.on_value, self.off_value)
x = self.softmax_cross_entropy(logits, labels)[0]
return self.get_loss(x)
def all_to_all_net():
return AllToAllNet()
def METHOD_NAME():
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=ParallelMode.AUTO_PARALLEL, device_num=1, global_rank=0)
predict = Tensor(np.ones([32, 128]), dtype=ms.float32)
label = Tensor(np.ones([32]), dtype=ms.int32)
dataset = Dataset(predict, label, 2)
net = all_to_all_net()
loss = SoftmaxCrossEntropyWithLogits(sparse=True, reduction='mean')
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss, opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
strategys = _cell_graph_executor._get_shard_strategy(model._train_network)
return strategys
def test_one_dev():
_reset_op_id()
strategies = METHOD_NAME()
for (k, v) in strategies.items():
if re.search('SoftmaxCrossEntropyWithLogits-op', k) is not None:
assert v == [[1, 1], [1, 1]]
elif re.search('Transpose-op', k) is not None:
assert v == [[1, 1]]
elif re.search('MatMul-op', k) is not None:
assert v == [[1, 1], [1, 1]] | null |
5,928 | #!/usr/bin/env python3
#
#===- clang-format-diff.py - ClangFormat Diff Reformatter ----*- python -*--===#
#
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License.
#
# ============================================================
#
# University of Illinois/NCSA
# Open Source License
#
# Copyright (c) 2007-2015 University of Illinois at Urbana-Champaign.
# All rights reserved.
#
# Developed by:
#
# LLVM Team
#
# University of Illinois at Urbana-Champaign
#
# http://llvm.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal with
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the names of the LLVM Team, University of Illinois at
# Urbana-Champaign, nor the names of its contributors may be used to
# endorse or promote products derived from this Software without specific
# prior written permission.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
# SOFTWARE.
#
# ============================================================
#
#===------------------------------------------------------------------------===#
r"""
ClangFormat Diff Reformatter
============================
This script reads input from a unified diff and reformats all the changed
lines. This is useful to reformat all the lines touched by a specific patch.
Example usage for git/svn users:
git diff -U0 HEAD^ | clang-format-diff.py -p1 -i
svn diff --diff-cmd=diff -x-U0 | clang-format-diff.py -i
"""
import argparse
import difflib
import io
import re
import subprocess
import sys
# Change this to the full path if clang-format is not on the path.
binary = 'clang-format'
def METHOD_NAME():
parser = argparse.ArgumentParser(description=
'Reformat changed lines in diff. Without -i '
'option just output the diff that would be '
'introduced.')
parser.add_argument('-i', action='store_true', default=False,
help='apply edits to files instead of displaying a diff')
parser.add_argument('-p', metavar='NUM', default=0,
help='strip the smallest prefix containing P slashes')
parser.add_argument('-regex', metavar='PATTERN', default=None,
help='custom pattern selecting file paths to reformat '
'(case sensitive, overrides -iregex)')
parser.add_argument('-iregex', metavar='PATTERN', default=
r'.*\.(cpp|cc|c\+\+|cxx|c|cl|h|hpp|m|mm|inc|js|ts|proto'
r'|protodevel|java)',
help='custom pattern selecting file paths to reformat '
'(case insensitive, overridden by -regex)')
parser.add_argument('-sort-includes', action='store_true', default=False,
help='let clang-format sort include blocks')
parser.add_argument('-v', '--verbose', action='store_true',
help='be more verbose, ineffective without -i')
args = parser.parse_args()
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
match = re.search('^\+\+\+\ (.*?/){%s}(\S*)' % args.p, line)
if match:
filename = match.group(2)
if filename == None:
continue
if args.regex is not None:
if not re.match('^%s$' % args.regex, filename):
continue
else:
if not re.match('^%s$' % args.iregex, filename, re.IGNORECASE):
continue
match = re.search('^@@.*\+(\d+)(,(\d+))?', line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1
lines_by_file.setdefault(filename, []).extend(
['-lines', str(start_line) + ':' + str(end_line)])
# Reformat files containing changes in place.
for filename, lines in lines_by_file.items():
if args.i and args.verbose:
print('Formatting {}'.format(filename))
command = [binary, filename]
if args.i:
command.append('-i')
if args.sort_includes:
command.append('-sort-includes')
command.extend(lines)
command.extend(['-style=file', '-fallback-style=none'])
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=None,
stdin=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = p.communicate()
if p.returncode != 0:
sys.exit(p.returncode)
if not args.i:
with open(filename, encoding="utf8") as f:
code = f.readlines()
formatted_code = io.StringIO(stdout).readlines()
diff = difflib.unified_diff(code, formatted_code,
filename, filename,
'(before formatting)', '(after formatting)')
diff_string = ''.join(diff)
if len(diff_string) > 0:
sys.stdout.write(diff_string)
if __name__ == '__main__':
METHOD_NAME() | null |
5,929 | from collections import ChainMap
from typing import Any
from graphql.error import GraphQLError
from graphql.execution import ExecutionResult, execute_sync
from graphql.language import SourceLocation, parse
from graphql.type import (
GraphQLArgument,
GraphQLField,
GraphQLInputField,
GraphQLInputObjectType,
GraphQLInt,
GraphQLObjectType,
GraphQLSchema,
GraphQLString,
)
def describe_execute_resolve_function():
def _test_schema(test_field: GraphQLField) -> GraphQLSchema:
return GraphQLSchema(GraphQLObjectType("Query", {"test": test_field}))
def default_function_accesses_attributes():
class RootValue:
test = "testValue"
assert execute_sync(
schema=_test_schema(GraphQLField(GraphQLString)),
document=parse("{ test }"),
root_value=RootValue(),
) == (
{"test": "testValue"},
None,
)
def METHOD_NAME():
root_value = {"test": "testValue"}
assert execute_sync(
schema=_test_schema(GraphQLField(GraphQLString)),
document=parse("{ test }"),
root_value=root_value,
) == ({"test": "testValue"}, None)
def default_function_accesses_keys_of_chain_map():
# use a mapping that is not a subclass of dict
root_value = ChainMap({"test": "testValue"})
assert execute_sync(
schema=_test_schema(GraphQLField(GraphQLString)),
document=parse("{ test }"),
root_value=root_value,
) == ({"test": "testValue"}, None)
def default_function_calls_methods():
class RootValue:
_secret = "secretValue"
def test(self, _info):
return self._secret
assert execute_sync(
schema=_test_schema(GraphQLField(GraphQLString)),
document=parse("{ test }"),
root_value=RootValue(),
) == (
{"test": "secretValue"},
None,
)
def default_function_passes_args_and_context():
class Adder:
_num: int
def __init__(self, num):
self._num = num
def test(self, info, addend1: int):
return self._num + addend1 + info.context.addend2
root_value = Adder(700)
schema = _test_schema(
GraphQLField(GraphQLInt, args={"addend1": GraphQLArgument(GraphQLInt)})
)
class ContextValue:
addend2 = 9
context_value = ContextValue()
document = parse("{ test(addend1: 80) }")
assert execute_sync(
schema=schema,
document=document,
root_value=root_value,
context_value=context_value,
) == (
{"test": 789},
None,
)
def uses_provided_resolve_function():
schema = _test_schema(
GraphQLField(
GraphQLString,
args={
"aStr": GraphQLArgument(GraphQLString),
"aInt": GraphQLArgument(GraphQLInt),
},
resolve=lambda source, info, **args: repr([source, args]),
)
)
def execute_query(query: str, root_value: Any = None) -> ExecutionResult:
document = parse(query)
return execute_sync(
schema=schema,
document=document,
root_value=root_value,
)
assert execute_query("{ test }") == ({"test": "[None, {}]"}, None)
assert execute_query("{ test }", "Source!") == (
{"test": "['Source!', {}]"},
None,
)
assert execute_query('{ test(aStr: "String!") }', "Source!") == (
{"test": "['Source!', {'aStr': 'String!'}]"},
None,
)
assert execute_query('{ test(aInt: -123, aStr: "String!") }', "Source!") == (
{"test": "['Source!', {'aStr': 'String!', 'aInt': -123}]"},
None,
)
def transforms_arguments_using_out_names():
# This is an extension of GraphQL.js.
schema = _test_schema(
GraphQLField(
GraphQLString,
args={
"aStr": GraphQLArgument(GraphQLString, out_name="a_str"),
"aInt": GraphQLArgument(GraphQLInt, out_name="a_int"),
},
resolve=lambda source, info, **args: repr([source, args]),
)
)
def execute_query(query: str, root_value: Any = None) -> ExecutionResult:
document = parse(query)
return execute_sync(schema=schema, document=document, root_value=root_value)
assert execute_query("{ test }") == ({"test": "[None, {}]"}, None)
assert execute_query("{ test }", "Source!") == (
{"test": "['Source!', {}]"},
None,
)
assert execute_query('{ test(aStr: "String!") }', "Source!") == (
{"test": "['Source!', {'a_str': 'String!'}]"},
None,
)
assert execute_query('{ test(aInt: -123, aStr: "String!") }', "Source!") == (
{"test": "['Source!', {'a_str': 'String!', 'a_int': -123}]"},
None,
)
def transforms_arguments_with_inputs_using_out_names():
# This is an extension of GraphQL.js.
TestInputObject = GraphQLInputObjectType(
"TestInputObjectType",
lambda: {
"inputOne": GraphQLInputField(GraphQLString, out_name="input_one"),
"inputRecursive": GraphQLInputField(
TestInputObject, out_name="input_recursive"
),
},
)
schema = _test_schema(
GraphQLField(
GraphQLString,
args={"aInput": GraphQLArgument(TestInputObject, out_name="a_input")},
resolve=lambda source, info, **args: repr([source, args]),
)
)
def execute_query(query: str, root_value: Any = None) -> ExecutionResult:
document = parse(query)
return execute_sync(schema=schema, document=document, root_value=root_value)
assert execute_query("{ test }") == ({"test": "[None, {}]"}, None)
assert execute_query('{ test(aInput: {inputOne: "String!"}) }', "Source!") == (
{"test": "['Source!', {'a_input': {'input_one': 'String!'}}]"},
None,
)
assert execute_query(
'{ test(aInput: {inputRecursive: {inputOne: "SourceRecursive!"}}) }',
"Source!",
) == (
{
"test": "['Source!',"
" {'a_input': {'input_recursive': {'input_one': 'SourceRecursive!'}}}]"
},
None,
)
def pass_error_from_resolver_wrapped_as_located_graphql_error():
def resolve(_obj, _info):
raise ValueError("Some error")
schema = _test_schema(GraphQLField(GraphQLString, resolve=resolve))
result = execute_sync(schema, parse("{ test }"))
assert result == (
{"test": None},
[{"message": "Some error", "locations": [(1, 3)], "path": ["test"]}],
)
assert result.errors is not None
error = result.errors[0]
assert isinstance(error, GraphQLError)
assert str(error) == "Some error\n\nGraphQL request:1:3\n1 | { test }\n | ^"
assert error.positions == [2]
locations = error.locations
assert locations == [(1, 3)]
location = locations[0]
assert isinstance(location, SourceLocation)
assert location == SourceLocation(1, 3)
original_error = error.original_error
assert isinstance(original_error, ValueError)
assert str(original_error) == "Some error" | null |
5,930 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench.
#
import unittest
import matplotlib
matplotlib.use("AGG")
import matplotlib.pyplot as plt
from numpy import array_equal
# Pulling in the MantidAxes registers the 'mantid' projection
from mantid.simpleapi import CreateWorkspace
from mantidqt.utils.qt.testing import start_qapplication
from workbench.plotting.figureerrorsmanager import FigureErrorsManager
def plot_things(make_them_errors):
def function_reference(func):
def function_parameters(self):
if not make_them_errors:
# plot this line with specNum
self.ax.plot(self.ws2d_histo, specNum=1)
# and another one with wkspIndex
self.ax.plot(self.ws2d_histo, wkspIndex=2)
else:
self.ax.errorbar(self.ws2d_histo, specNum=1)
self.ax.errorbar(self.ws2d_histo, wkspIndex=2)
return func(self)
return function_parameters
return function_reference
@start_qapplication
class FigureErrorsManagerTest(unittest.TestCase):
"""
Test class that covers the interaction of the FigureErrorsManager with plots
that use the mantid projection and have MantidAxes
"""
@classmethod
def setUpClass(cls):
cls.ws2d_histo = CreateWorkspace(
DataX=[10, 20, 30, 10, 20, 30, 10, 20, 30],
DataY=[2, 3, 4, 5, 3, 5],
DataE=[1, 2, 3, 4, 1, 1],
NSpec=3,
Distribution=True,
UnitX="Wavelength",
VerticalAxisUnit="DeltaE",
VerticalAxisValues=[4, 6, 8],
OutputWorkspace="ws2d_histo",
)
# initialises the QApplication
super(cls, FigureErrorsManagerTest).setUpClass()
def setUp(self):
self.fig, self.ax = plt.subplots(subplot_kw={"projection": "mantid"})
self.errors_manager = FigureErrorsManager(self.fig.canvas)
def tearDown(self):
plt.close("all")
del self.fig
del self.ax
del self.errors_manager
@plot_things(make_them_errors=False)
def test_show_all_errors(self):
# assert plot does not have errors
self.assertEqual(0, len(self.ax.containers))
self.errors_manager.toggle_all_errors(self.ax, make_visible=True)
# check that the errors have been added
self.assertEqual(2, len(self.ax.containers))
@plot_things(make_them_errors=True)
def test_hide_all_errors(self):
self.assertEqual(2, len(self.ax.containers))
self.errors_manager.toggle_all_errors(self.ax, make_visible=False)
# errors still exist
self.assertEqual(2, len(self.ax.containers))
# they are just invisible
self.assertFalse(self.ax.containers[0][2][0].get_visible())
@plot_things(make_them_errors=True)
def test_hide_all_errors_retains_legend_properties(self):
# create a legend with a title
self.ax.legend(title="Test")
self.errors_manager.toggle_all_errors(self.ax, make_visible=False)
# check that the legend still has a title
self.assertEqual(self.ax.get_legend().get_title().get_text(), "Test")
@plot_things(make_them_errors=False)
def test_show_all_errors_retains_legend_properties(self):
# create a legend with a title
self.ax.legend(title="Test")
self.errors_manager.toggle_all_errors(self.ax, make_visible=True)
# check that the legend still has a title
self.assertEqual(self.ax.get_legend().get_title().get_text(), "Test")
def test_curve_has_all_errorbars_on_replot_after_error_every_increase(self):
curve = self.ax.errorbar([0, 1, 2, 4], [0, 1, 2, 4], yerr=[0.1, 0.2, 0.3, 0.4])
new_curve = FigureErrorsManager._replot_mpl_curve(self.ax, curve, {"errorevery": 2})
self.assertEqual(2, len(new_curve[2][0].get_segments()))
new_curve = FigureErrorsManager._replot_mpl_curve(self.ax, new_curve, {"errorevery": 1})
self.assertTrue(hasattr(new_curve, "errorbar_data"))
self.assertEqual(4, len(new_curve[2][0].get_segments()))
def test_show_all_errors_on_waterfall_plot_retains_waterfall(self):
self.ax.plot([0, 1], [0, 1])
self.ax.plot([0, 1], [0, 1])
self.ax.set_waterfall(True)
self.errors_manager.toggle_all_errors(self.ax, make_visible=True)
self.assertFalse(array_equal(self.ax.get_lines()[0].get_data(), self.ax.get_lines()[1].get_data()))
def test_hide_all_errors_on_waterfall_plot_retains_waterfall(self):
self.ax.plot([0, 1], [0, 1])
self.ax.plot([0, 1], [0, 1])
self.ax.set_waterfall(True)
self.errors_manager.toggle_all_errors(self.ax, make_visible=True)
self.errors_manager.toggle_all_errors(self.ax, make_visible=False)
self.assertFalse(array_equal(self.ax.get_lines()[0].get_data(), self.ax.get_lines()[1].get_data()))
def METHOD_NAME(self):
self.ax.plot([1, 2], [1, 2])
self.errors_manager.replot_curve(self.ax, self.ax.lines[0], {})
self.assertEqual(0, len(self.ax.creation_args))
if __name__ == "__main__":
unittest.main() | null |
5,931 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.api import IFunction
from mantidqtinterfaces.Muon.GUI.Common.fitting_widgets.basic_fitting.basic_fitting_view import BasicFittingView
from mantidqtinterfaces.Muon.GUI.Common.fitting_widgets.model_fitting.model_fitting_data_selector_view import ModelFittingDataSelectorView
from qtpy.QtWidgets import QWidget
class ModelFittingView(BasicFittingView):
"""
The ModelFittingView derives from the BasicFittingView. It adds the ModelFittingDataSelectorView to the widget.
"""
def __init__(self, parent: QWidget = None):
"""Initializes the ModelFittingView, and adds the ModelFittingDataSelectorView widget."""
super(ModelFittingView, self).__init__(parent)
self.model_fitting_data_selector = ModelFittingDataSelectorView(self)
self.general_fitting_options_layout.addWidget(self.model_fitting_data_selector)
# Hide the workspace selector which is used to store the generated matrix workspaces
self.workspace_selector.hide()
def set_slot_for_results_table_changed(self, slot) -> None:
"""Connect the slot for the result tables combo box being changed."""
self.model_fitting_data_selector.set_slot_for_results_table_changed(slot)
def set_slot_for_selected_x_changed(self, slot) -> None:
"""Connect the slot for when the selected X changes."""
self.model_fitting_data_selector.set_slot_for_selected_x_changed(slot)
def set_slot_for_selected_y_changed(self, slot) -> None:
"""Connect the slot for when the selected Y changes."""
self.model_fitting_data_selector.set_slot_for_selected_y_changed(slot)
def result_table_names(self) -> list:
"""Returns a list of result table names currently loaded into model fitting."""
return self.model_fitting_data_selector.result_table_names()
def add_results_table_name(self, results_table_name: str) -> None:
"""Add a results table to the results table combo box."""
self.model_fitting_data_selector.add_results_table_name(results_table_name)
def update_result_table_names(self, table_names: list) -> None:
"""Update the data in the results table combo box."""
self.model_fitting_data_selector.update_result_table_names(table_names)
def update_x_parameters(self, x_parameters: list, x_parameter_types: list, emit_signal: bool = False) -> None:
"""Update the available X parameters."""
self.model_fitting_data_selector.update_x_parameters(x_parameters, x_parameter_types, emit_signal)
def update_y_parameters(self, y_parameters: list, y_parameter_types: list, emit_signal: bool = False) -> None:
"""Update the available Y parameters."""
self.model_fitting_data_selector.update_y_parameters(y_parameters, y_parameter_types, emit_signal)
def update_fit_function(self, fit_function: IFunction) -> None:
"""Updates the fit function shown in the view."""
self.fit_function_options.set_fit_function(fit_function)
def set_selected_x_parameter(self, x_parameter: str) -> None:
"""Sets the selected X parameter."""
self.model_fitting_data_selector.set_selected_x_parameter(x_parameter)
def METHOD_NAME(self, y_parameter: str) -> None:
"""Sets the selected Y parameter."""
self.model_fitting_data_selector.METHOD_NAME(y_parameter)
@property
def current_result_table_index(self) -> str:
"""Returns the index of the currently displayed result table."""
return self.model_fitting_data_selector.current_result_table_index
def x_parameter(self) -> str:
"""Returns the selected X parameter name."""
return self.model_fitting_data_selector.x_parameter
def y_parameter(self) -> str:
"""Returns the selected Y parameter name."""
return self.model_fitting_data_selector.y_parameter
def enable_view(self) -> None:
"""Enable all widgets in this fitting widget."""
if self.model_fitting_data_selector.number_of_result_tables() > 0:
self.setEnabled(True) | null |
5,932 | import unittest
from util import *
import json
class BIP39Tests(unittest.TestCase):
cases = None
langs = { 'en': 'english',
'es': 'spanish',
'fr': 'french',
'it': 'italian',
'jp': 'japanese',
'zhs': 'chinese_simplified',
'zht': 'chinese_traditional' }
def get_wordlist(self, lang):
out = c_void_p()
bip39_get_wordlist(lang, byref(out))
return out
def setUp(self):
if self.cases is None:
with open(root_dir + 'src/data/wordlists/vectors.json', 'r') as f:
cases = json.load(f)['english']
conv = lambda case: [utf8(x) for x in case]
self.cases = [conv(case) for case in cases]
gwl = lambda lang: self.get_wordlist(utf8(lang))
self.wordlists = {l: gwl(l) for l in list(self.langs.keys())}
ret, all_langs = bip39_get_languages()
self.assertEqual(ret, 0)
self.all_langs = all_langs.split()
self.num_langs = len(self.all_langs)
def test_all_langs(self):
for lang in self.all_langs:
self.assertTrue(lang in self.langs)
self.assertTrue(self.num_langs == 1 or self.num_langs == len(self.langs.keys()))
def METHOD_NAME(self):
for lang, wl in self.wordlists.items():
self.assertIsNotNone(wl)
def_wl = self.get_wordlist(None)
en_wl = self.wordlists['en']
self.assertEqual(def_wl.value, en_wl.value)
def test_all_lookups(self):
for lang in self.all_langs:
wl = self.wordlists[lang]
words_list, _ = load_words(self.langs[lang])
for i in range(2048):
ret, word = bip39_get_word(wl, i)
word = word.encode('utf-8')
self.assertEqual(ret, 0)
self.assertEqual(word, utf8(words_list[i]))
if wordlist_lookup_word is not None:
idx = wordlist_lookup_word(wl, word)
self.assertEqual(i, idx - 1)
self.assertEqual(bip39_get_word(wl, 2048), (WALLY_EINVAL, None))
def test_bip39_vectors(self):
"""Test conversion to and from the BIP39 specification vectors"""
wl = self.get_wordlist(None)
for case in self.cases:
hex_input, mnemonic = case[0], case[1]
buf, buf_len = make_cbuffer(hex_input)
ret, result = bip39_mnemonic_from_bytes(wl, buf, buf_len)
self.assertEqual(ret, 0)
result = utf8(result)
self.assertEqual(result, mnemonic)
self.assertEqual(bip39_mnemonic_validate(wl, mnemonic), 0)
out_buf = create_string_buffer(buf_len)
ret, rlen = bip39_mnemonic_to_bytes(wl, result, out_buf, buf_len)
self.assertEqual(ret, 0)
self.assertEqual(rlen, buf_len)
self.assertEqual(buf, out_buf.raw)
def test_288(self):
""" Test a 288 bit (27 word) mnemonic phrase """
mnemonic = 'panel jaguar rib echo witness mean please festival ' \
'issue item notable divorce conduct page tourist ' \
'west off salmon ghost grit kitten pull marine toss ' \
'dirt oak gloom'
self.assertEqual(bip39_mnemonic_validate(None, utf8(mnemonic)), 0)
out_buf = create_string_buffer(36)
ret, rlen = bip39_mnemonic_to_bytes(None, utf8(mnemonic), out_buf, 36)
self.assertEqual(ret, 0)
self.assertEqual(rlen, 36)
expected = '9F8EE6E3A2FFCB13A99AA976AEDA5A2002ED' \
'3DF97FCB9957CD863357B55AA2072D3EB2F9'
self.assertEqual(h(out_buf).upper(), utf8(expected))
def test_mnemonic_to_seed(self):
for case in self.cases:
mnemonic, seed = case[1], case[2]
buf = create_string_buffer(64)
for fn in [bip39_mnemonic_to_seed, bip39_mnemonic_to_seed512]:
ret = fn(mnemonic, b'TREZOR', buf, 64)
self.assertEqual(ret, (WALLY_OK, 64) if fn == bip39_mnemonic_to_seed else WALLY_OK)
self.assertEqual(h(buf), seed)
if __name__ == '__main__':
unittest.main() | null |
5,933 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2023, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import utils as \
database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as types_utils
from unittest.mock import patch
from pgadmin.utils import server_utils
class TypesAddTestCase(BaseTestGenerator):
""" This class will add type under schema node. """
scenarios = utils.generate_scenarios('types_create',
types_utils.test_cases)
def setUp(self):
super().setUp()
self.data = self.test_data
self.db_name = parent_node_dict["database"][-1]["db_name"]
schema_info = parent_node_dict["schema"][-1]
self.server_id = schema_info["server_id"]
self.db_id = schema_info["db_id"]
# Check DB version
if "server_min_version" in self.data:
server_con = server_utils.connect_server(self, self.server_id)
if not server_con["info"] == "Server connected.":
raise Exception("Could not connect to server to add "
"a table.")
if (server_con["data"]["type"] == 'pg' or
server_con["data"]["version"] <=
self.data["server_min_version"]):
self.skipTest(self.data["skip_msg"])
db_con = database_utils.connect_database(self, utils.SERVER_GROUP,
self.server_id, self.db_id)
if not db_con['data']["connected"]:
raise Exception("Could not connect to database to add a type.")
self.schema_id = schema_info["schema_id"]
self.schema_name = schema_info["schema_name"]
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema to add a type.")
def METHOD_NAME(self):
"""
This function create a type and returns the created type response
:return: created types response
"""
return self.tester.post(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' + str(self.db_id) +
'/' + str(self.schema_id) + '/',
data=json.dumps(self.data),
content_type='html/json')
def runTest(self):
""" This function will add type under schema node. """
if "name" in self.data:
self.data["name"] = self.data["name"] + (str(uuid.uuid4())[1:8])
db_user = self.server["username"]
self.data["typeowner"] = db_user
self.data["schema"] = self.schema_name
if self.is_positive_test:
response = self.METHOD_NAME()
else:
if hasattr(self, "missing_parameter"):
response = self.METHOD_NAME()
if hasattr(self, "internal_server_error"):
return_value_object = eval(self.mock_data["return_value"])
with patch(self.mock_data["function_name"],
side_effect=[return_value_object]):
response = self.METHOD_NAME()
if hasattr(self, "error_in_db"):
return_value_object = eval(self.mock_data["return_value"])
with patch(self.mock_data["function_name"],
side_effect=[return_value_object]):
response = self.METHOD_NAME()
self.assertEqual(response.status_code,
self.expected_data["status_code"])
def tearDown(self):
# Disconnect the database
database_utils.disconnect_database(self, self.server_id, self.db_id) | null |
5,934 | # Copyright 2021-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing RgbToBgr op in DE
"""
import numpy as np
from numpy.testing import assert_allclose
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.c_transforms as vision
import mindspore.dataset.vision.py_transforms as py_vision
DATA_DIR = ["../data/dataset/test_tf_file_3_images/train-0000-of-0001.data"]
SCHEMA_DIR = "../data/dataset/test_tf_file_3_images/datasetSchema.json"
def METHOD_NAME(shape):
# Only generate floating points that are fractions like n / 256, since they
# are RGB pixels. Some low-precision floating point types in this test can't
# handle arbitrary precision floating points well.
return np.random.randint(0, 256, shape) / 255.
def test_rgb_bgr_hwc_py():
"""
Feature: RgbToBgr Op
Description: Test Python op with HWC input shape in eager mode
Expectation: Output image shape from op is verified
"""
rgb_flat = METHOD_NAME((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((8, 8, 3))
rgb2bgr_op = py_vision.RgbToBgr(is_hwc=True)
bgr_np_pred = rgb2bgr_op(rgb_np)
r, g, b = rgb_np[:, :, 0], rgb_np[:, :, 1], rgb_np[:, :, 2]
bgr_np_gt = np.stack((b, g, r), axis=2)
assert bgr_np_pred.shape == rgb_np.shape
assert_allclose(bgr_np_pred.flatten(),
bgr_np_gt.flatten(),
rtol=1e-5,
atol=0)
def test_rgb_bgr_hwc_c():
"""
Feature: RgbToBgr Op
Description: Test C++ op with HWC input shape in eager mode
Expectation: Output image shape from op is verified
"""
rgb_flat = METHOD_NAME((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((8, 8, 3))
rgb2bgr_op = vision.RgbToBgr()
bgr_np_pred = rgb2bgr_op(rgb_np)
r, g, b = rgb_np[:, :, 0], rgb_np[:, :, 1], rgb_np[:, :, 2]
bgr_np_gt = np.stack((b, g, r), axis=2)
assert bgr_np_pred.shape == rgb_np.shape
assert_allclose(bgr_np_pred.flatten(),
bgr_np_gt.flatten(),
rtol=1e-5,
atol=0)
def test_rgb_bgr_chw_py():
"""
Feature: RgbToBgr Op
Description: Test Python op in with CHW input shape eager mode
Expectation: Output image shape from op is verified
"""
rgb_flat = METHOD_NAME((64, 3)).astype(np.float32)
rgb_np = rgb_flat.reshape((3, 8, 8))
rgb2bgr_op = py_vision.RgbToBgr(is_hwc=False)
rgb_np_pred = rgb2bgr_op(rgb_np)
rgb_np_gt = rgb_np[::-1, :, :]
assert rgb_np_pred.shape == rgb_np.shape
assert_allclose(rgb_np_pred.flatten(),
rgb_np_gt.flatten(),
rtol=1e-5,
atol=0)
def test_rgb_bgr_pipeline_py():
"""
Feature: RgbToBgr Op
Description: Test Python op in dataset pipeline
Expectation: Dataset pipeline runs successfully and results are verified
"""
# First dataset
transforms1_list = [py_vision.Decode(), py_vision.Resize([64, 64]), py_vision.ToTensor()]
transforms1 = mindspore.dataset.transforms.py_transforms.Compose(
transforms1_list)
ds1 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds1 = ds1.map(operations=transforms1, input_columns=["image"])
# Second dataset
transforms2 = [
py_vision.Decode(),
py_vision.Resize([64, 64]),
py_vision.ToTensor(),
py_vision.RgbToBgr()
]
transforms2 = mindspore.dataset.transforms.py_transforms.Compose(
transforms2)
ds2 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds2 = ds2.map(operations=transforms2, input_columns=["image"])
num_iter = 0
for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1),
ds2.create_dict_iterator(num_epochs=1)):
num_iter += 1
ori_img = data1["image"].asnumpy()
cvt_img = data2["image"].asnumpy()
cvt_img_gt = ori_img[::-1, :, :]
assert_allclose(cvt_img_gt.flatten(),
cvt_img.flatten(),
rtol=1e-5,
atol=0)
assert ori_img.shape == cvt_img.shape
assert num_iter == 3
def test_rgb_bgr_pipeline_c():
"""
Feature: RgbToBgr Op
Description: Test C++ op in dataset pipeline
Expectation: Dataset pipeline runs successfully and results are verified
"""
# First dataset
transforms1 = [
# Set Decode(rbg=True) for test coverage of deprecated rgb arg
vision.Decode(rgb=True),
vision.Resize([60, 60])
]
transforms1 = mindspore.dataset.transforms.py_transforms.Compose(
transforms1)
ds1 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds1 = ds1.map(operations=transforms1, input_columns=["image"])
# Second dataset
transforms2_list = [
# Set Decode(True) for test coverage of deprecated rgb arg
vision.Decode(True),
vision.Resize([60, 60]),
vision.RgbToBgr()
]
transforms2 = mindspore.dataset.transforms.py_transforms.Compose(transforms2_list)
ds2 = ds.TFRecordDataset(DATA_DIR,
SCHEMA_DIR,
columns_list=["image"],
shuffle=False)
ds2 = ds2.map(operations=transforms2, input_columns=["image"])
for data1, data2 in zip(ds1.create_dict_iterator(num_epochs=1),
ds2.create_dict_iterator(num_epochs=1)):
ori_img = data1["image"].asnumpy()
cvt_img = data2["image"].asnumpy()
cvt_img_gt = ori_img[:, :, ::-1]
assert_allclose(cvt_img_gt.flatten(),
cvt_img.flatten(),
rtol=1e-5,
atol=0)
assert ori_img.shape == cvt_img.shape
if __name__ == "__main__":
test_rgb_bgr_hwc_py()
test_rgb_bgr_hwc_c()
test_rgb_bgr_chw_py()
test_rgb_bgr_pipeline_py()
test_rgb_bgr_pipeline_c() | null |
5,935 | # Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""End to end test for running a beam-powered custom component in local mode.
The component and pipeline pattern in this file are provided only for testing
purposes and are not a recommended way to structure TFX pipelines. We recommend
consulting the TFX Component Tutorial (https://www.tensorflow.org/tfx/tutorials)
for a recommended pipeline topology.
"""
import os
import tempfile
import absl.testing.absltest
import apache_beam as beam
from apache_beam.options import pipeline_options
from tfx.dsl.component.experimental import annotations
from tfx.dsl.component.experimental.decorators import component
from tfx.dsl.placeholder import placeholder as ph
from tfx.orchestration import pipeline as pipeline_py
from tfx.orchestration.local import local_dag_runner
from tfx.orchestration.metadata import sqlite_metadata_connection_config
@component(use_beam=True)
def SimpleBeamPoweredComponent(
beam_pipeline: annotations.BeamComponentParameter[beam.Pipeline] = None):
with beam_pipeline as p:
direct_num_workers = p.options.view_as(
pipeline_options.DirectOptions).direct_num_workers
direct_running_mode = p.options.view_as(
pipeline_options.DirectOptions).direct_running_mode
LocalDagRunnerTest.BEAM_ARG_VALUES[
'direct_num_workers'] = direct_num_workers
LocalDagRunnerTest.BEAM_ARG_VALUES[
'direct_running_mode'] = direct_running_mode
class LocalDagRunnerTest(absl.testing.absltest.TestCase):
# Global list of components names that have run, used to confirm
# execution side-effects in local test.
RAN_COMPONENTS = []
# List of beam env vars from placeholders
BEAM_ARG_VALUES = {}
def setUp(self):
super().setUp()
self.__class__.RAN_COMPONENTS = []
self.__class__.BEAM_ARG_VALUES = {}
def METHOD_NAME(
self, num_workers_env_var_name,
direct_running_mode_env_var_name) -> pipeline_py.Pipeline:
# Construct component instances.
dummy_beam_component = SimpleBeamPoweredComponent().with_id('Beam')
# Construct and run pipeline
temp_path = tempfile.mkdtemp()
pipeline_root_path = os.path.join(temp_path, 'pipeline_root')
metadata_path = os.path.join(temp_path, 'metadata.db')
return pipeline_py.Pipeline(
pipeline_name='test_pipeline',
pipeline_root=pipeline_root_path,
metadata_connection_config=sqlite_metadata_connection_config(
metadata_path),
components=[dummy_beam_component],
beam_pipeline_args=[
'--runner=DirectRunner',
'--direct_running_mode=' +
ph.environment_variable(direct_running_mode_env_var_name),
ph.environment_variable(num_workers_env_var_name),
],
)
def testBeamComponentWithPlaceHolderArgs(self):
# Set env vars for the placeholder
direct_running_mode_env_var_name = 'DIRECT_RUNNING_MODE'
direct_running_mode = 'multi_processing'
direct_num_workers = 2
num_workers_env_var_name = 'NUM_WORKERS'
num_workers_env_var_value = f'--direct_num_workers={direct_num_workers}'
os.environ[direct_running_mode_env_var_name] = direct_running_mode
os.environ[num_workers_env_var_name] = num_workers_env_var_value
local_dag_runner.LocalDagRunner().run(
self.METHOD_NAME(
num_workers_env_var_name, direct_running_mode_env_var_name))
self.assertEqual(self.BEAM_ARG_VALUES['direct_num_workers'],
direct_num_workers)
self.assertEqual(self.BEAM_ARG_VALUES['direct_running_mode'],
direct_running_mode)
if __name__ == '__main__':
absl.testing.absltest.main() | null |
5,936 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore import Tensor, context
from mindspore.ops.functional import vmap
from mindspore.ops import functional as F
from mindspore.common.api import jit
context.set_context(mode=context.GRAPH_MODE, device_target='CPU')
class CeluTEST(nn.Cell):
def __init__(self, alpha):
super(CeluTEST, self).__init__()
self.celu = P.CeLU(alpha)
def construct(self, x):
return self.celu(x)
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.parametrize("data_type", [np.float32, np.float16])
def test_celu_op(data_type):
"""
Feature: Celu cpu kernel
Description: test the celu alpha = 1.0.
Expectation: match to np benchmark.
"""
error = 1e-3
celu = CeluTEST(1.)
x = Tensor(np.array([-2.0, -1.0, 1.0, 2.0]).astype(data_type))
expect = np.array([-0.86468184, -0.6321212, 1., 2.]).astype(data_type)
context.set_context(mode=context.GRAPH_MODE)
output = celu(x)
print(output)
np.testing.assert_allclose(output.asnumpy(), expect, rtol=error)
context.set_context(mode=context.PYNATIVE_MODE)
output = celu(x)
print(output)
np.testing.assert_allclose(output.asnumpy(), expect, rtol=error)
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
@pytest.mark.parametrize("data_type", [np.float32, np.float16])
def test_celu_func(data_type):
"""
Feature: Celu cpu kernel
Description: test the celu alpha = 1.0.
Expectation: match to np benchmark.
"""
error = 1e-3
x = Tensor(np.array([-2.0, -1.0, 1.0, 2.0]).astype(data_type))
expect = np.array([-0.86468184, -0.6321212, 1., 2.]).astype(data_type)
context.set_context(mode=context.GRAPH_MODE)
output = F.celu(x, 1.0)
print(output)
np.testing.assert_allclose(output.asnumpy(), expect, rtol=error)
context.set_context(mode=context.PYNATIVE_MODE)
output = F.celu(x, 1.0)
print(output)
np.testing.assert_allclose(output.asnumpy(), expect, rtol=error)
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_cpu
def METHOD_NAME():
"""
Feature: celu cpu kernel.
Description: test celu vmap feature.
Expectation: Success.
"""
error = 1e-3
def cal_celu(x):
return P.CeLU(1.0)(x)
x = Tensor(np.array([[-2.0, -1.0, 1.0, 2.0], [-2.0, -1.0, 1.0, 2.0], [-2.0, -1.0, 1.0, 2.0],
[-2.0, -1.0, 1.0, 2.0], [-2.0, -1.0, 1.0, 2.0], [-2.0, -1.0, 1.0, 2.0],
[-2.0, -1.0, 1.0, 2.0], [-2.0, -1.0, 1.0, 2.0]]).astype(np.float32))
expect = np.array([[-0.86468184, -0.6321212, 1., 2.], [-0.86468184, -0.6321212, 1., 2.],
[-0.86468184, -0.6321212, 1., 2.], [-0.86468184, -0.6321212, 1., 2.],
[-0.86468184, -0.6321212, 1., 2.], [-0.86468184, -0.6321212, 1., 2.],
[-0.86468184, -0.6321212, 1., 2.], [-0.86468184, -0.6321212, 1., 2.]]).astype(np.float32)
vmap_celu = vmap(cal_celu, in_axes=(0), out_axes=0)
output = vmap_celu(x)
np.testing.assert_allclose(output.asnumpy(), expect, rtol=error)
@jit
def manually_batched(xs):
output = []
for i in range(xs.shape[0]):
output.append(cal_celu(xs[i]))
return F.stack(output)
expect_m = manually_batched(x)
np.testing.assert_allclose(output.asnumpy(), expect_m.asnumpy(), rtol=error) | null |
5,937 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import json
import os
from typing import Optional
from qtpy.QtCore import QSettings
from mantid.kernel import ConfigService, ErrorReporter, Logger, UsageService
from mantidqt.dialogs.errorreports.report import MAX_STACK_TRACE_LENGTH
class ErrorReporterPresenter(object):
SENDING_ERROR_MESSAGE = "There was an error when sending the report.\nPlease contact [email protected] directly"
def __init__(self, view, exit_code: str, application: str, traceback: Optional[str] = None):
"""
:param view: A reference to the view managed by this presenter
:param exit_code: A string containing the exit_code of the failing application
:param application: A string containing the failing application name
:param traceback: An optional string containing a traceback dumped as JSON-encoded string
"""
self.error_log = Logger("errorreports")
self._view = view
self._exit_code = exit_code
self._application = application
self._traceback = traceback if traceback else ""
self._view.set_report_callback(self.error_handler)
self._view.moreDetailsButton.clicked.connect(self.show_more_details)
if not traceback:
traceback_file_path = os.path.join(ConfigService.getAppDataDirectory(), "{}_stacktrace.txt".format(application))
try:
if os.path.isfile(traceback_file_path):
with open(traceback_file_path, "r") as file:
self._traceback = file.readlines()
new_workspace_name = os.path.join(ConfigService.getAppDataDirectory(), "{}_stacktrace_sent.txt".format(application))
os.rename(traceback_file_path, new_workspace_name)
except OSError:
pass
def forget_contact_info(self):
settings = QSettings()
settings.beginGroup(self._view.CONTACT_INFO)
settings.setValue(self._view.NAME, "")
settings.setValue(self._view.EMAIL, "")
settings.endGroup()
def do_not_share(self, continue_working=True):
self.error_log.notice("No information shared")
self._handle_exit(continue_working)
if not self._view.rememberContactInfoCheckbox.checkState():
self.forget_contact_info()
return -1
def share_non_identifiable_information(self, continue_working, text_box):
uptime = UsageService.getUpTime()
status = self._send_report_to_server(share_identifiable=False, uptime=uptime, text_box=text_box)
self.error_log.notice("Sent non-identifiable information")
self._handle_exit(continue_working)
if not self._view.rememberContactInfoCheckbox.checkState():
self.forget_contact_info()
return status
def share_all_information(self, continue_working, new_name, new_email, text_box):
uptime = UsageService.getUpTime()
status = self._send_report_to_server(share_identifiable=True, uptime=uptime, name=new_name, email=new_email, text_box=text_box)
self.error_log.notice("Sent full information")
self._handle_exit(continue_working)
# Remember name and email in QSettings
if self._view.rememberContactInfoCheckbox.checkState():
settings = QSettings()
settings.beginGroup(self._view.CONTACT_INFO)
settings.setValue(self._view.NAME, new_name)
settings.setValue(self._view.EMAIL, new_email)
settings.endGroup()
else:
self.forget_contact_info()
return status
def error_handler(self, continue_working, share, name, email, text_box):
if share == 0:
status = self.share_all_information(continue_working, name, email, text_box)
elif share == 1:
status = self.share_non_identifiable_information(continue_working, text_box)
elif share == 2:
status = self.do_not_share(continue_working)
else:
self.error_log.error("Unrecognised signal in errorreporter exiting")
self._handle_exit(continue_working)
status = -2
self._view.close_reporter(status)
def _handle_exit(self, continue_working):
if not continue_working:
self.error_log.error("Terminated by user.")
self._view.quit()
else:
self.error_log.error("Continue working.")
def _send_report_to_server(self, share_identifiable=False, name="", email="", uptime="", text_box=""):
stacktrace = "".join(self._traceback)
if len(stacktrace) > MAX_STACK_TRACE_LENGTH:
difference = len(stacktrace) - MAX_STACK_TRACE_LENGTH
stacktrace = self._cut_down_stacktrace()
self.error_log.warning(
f"The middle {difference+5} characters of this stack trace has been removed"
r" and replaced with \n...\n in order to"
f" reduce it to {MAX_STACK_TRACE_LENGTH} characters"
)
errorReporter = ErrorReporter(
self._application, uptime, self._exit_code, share_identifiable, str(name), str(email), str(text_box), stacktrace
)
status = errorReporter.sendErrorReport()
if status != 201:
self._view.display_message_box(
"Error contacting server", self.SENDING_ERROR_MESSAGE, "http request returned with status {}".format(status)
)
self.error_log.error("Failed to send error report http request returned status {}".format(status))
return status
def _cut_down_stacktrace(self):
# server has a max size for the stack trace, if exceeded will cause an error
stacktrace = "".join(self._traceback)
return stacktrace[: (MAX_STACK_TRACE_LENGTH // 2 - 2)] + "\n...\n" + stacktrace[-(MAX_STACK_TRACE_LENGTH // 2 - 3) :]
def show_view(self):
self._view.show()
def METHOD_NAME(self):
self._view.exec_()
def show_more_details(self):
error_reporter = ErrorReporter(
self._application,
UsageService.getUpTime(),
self._exit_code,
True,
str(self._view.input_name_line_edit.text()),
str(self._view.input_email_line_edit.text()),
str(self._view.input_free_text.toPlainText()),
"".join(self._traceback),
)
error_message_json = json.loads(error_reporter.generateErrorMessage())
stacktrace_text = error_message_json["stacktrace"]
del error_message_json["stacktrace"] # remove this entry so it doesn't appear twice.
user_information = "".join("{}: {}\n".format(key, error_message_json[key]) for key in error_message_json)
self._view.display_more_details(user_information, stacktrace_text) | null |
5,938 | import os
from testtools import TestCase
from mock import (
patch,
MagicMock,
)
from charmhelpers.payload import archive
from tempfile import mkdtemp
from shutil import rmtree
import subprocess
class ArchiveTestCase(TestCase):
def create_archive(self, format):
workdir = mkdtemp()
if format == "tar":
workfile = "{}/foo.tar.gz".format(workdir)
cmd = "tar czf {} hosts".format(workfile)
elif format == "zip":
workfile = "{}/foo.zip".format(workdir)
cmd = "zip {} hosts".format(workfile)
curdir = os.getcwd()
os.chdir("/etc")
subprocess.check_output(cmd, shell=True)
os.chdir(curdir)
self.addCleanup(rmtree, workdir)
return (workfile, ["hosts"])
@patch('os.path.isfile')
def METHOD_NAME(self, _isfile):
tar_archive_handler = archive.extract_tarfile
zip_archive_handler = archive.extract_zipfile
_isfile.return_value = False
for ext in ('tar', 'tar.gz', 'tgz', 'tar.bz2', 'tbz2', 'tbz'):
handler = archive.get_archive_handler("somefile.{}".format(ext))
msg = "handler for extension: {}".format(ext)
self.assertEqual(handler, tar_archive_handler, msg)
for ext in ('zip', 'jar'):
handler = archive.get_archive_handler("somefile.{}".format(ext))
msg = "handler for extension {}".format(ext)
self.assertEqual(handler, zip_archive_handler, msg)
@patch('zipfile.is_zipfile')
@patch('tarfile.is_tarfile')
@patch('os.path.isfile')
def test_gets_archive_hander_by_filetype(self, _isfile, _istarfile,
_iszipfile):
tar_archive_handler = archive.extract_tarfile
zip_archive_handler = archive.extract_zipfile
_isfile.return_value = True
_istarfile.return_value = True
_iszipfile.return_value = False
handler = archive.get_archive_handler("foo")
self.assertEqual(handler, tar_archive_handler)
_istarfile.return_value = False
_iszipfile.return_value = True
handler = archive.get_archive_handler("foo")
self.assertEqual(handler, zip_archive_handler)
@patch('charmhelpers.core.hookenv.charm_dir')
def test_gets_archive_dest_default(self, _charmdir):
_charmdir.return_value = "foo"
thedir = archive.archive_dest_default("baz")
self.assertEqual(thedir, os.path.join("foo", "archives", "baz"))
thedir = archive.archive_dest_default("baz/qux")
self.assertEqual(thedir, os.path.join("foo", "archives", "qux"))
def test_extracts_tarfile(self):
destdir = mkdtemp()
self.addCleanup(rmtree, destdir)
tar_file, contents = self.create_archive("tar")
archive.extract_tarfile(tar_file, destdir)
for path in [os.path.join(destdir, item) for item in contents]:
self.assertTrue(os.path.exists(path))
def test_extracts_zipfile(self):
destdir = mkdtemp()
self.addCleanup(rmtree, destdir)
try:
zip_file, contents = self.create_archive("zip")
except subprocess.CalledProcessError as e:
if e.returncode == 127:
self.skip("Skipping - zip is not installed")
else:
raise
archive.extract_zipfile(zip_file, destdir)
for path in [os.path.join(destdir, item) for item in contents]:
self.assertTrue(os.path.exists(path))
@patch('charmhelpers.core.host.mkdir')
@patch('charmhelpers.payload.archive.get_archive_handler')
@patch('charmhelpers.payload.archive.archive_dest_default')
def test_extracts(self, _defdest, _gethandler, _mkdir):
archive_name = "foo"
archive_handler = MagicMock()
_gethandler.return_value = archive_handler
dest = archive.extract(archive_name, "bar")
_gethandler.assert_called_with(archive_name)
archive_handler.assert_called_with(archive_name, "bar")
_defdest.assert_not_called()
_mkdir.assert_called_with("bar")
self.assertEqual(dest, "bar")
@patch('charmhelpers.core.host.mkdir')
@patch('charmhelpers.payload.archive.get_archive_handler')
def test_unhandled_extract_raises_exc(self, _gethandler, _mkdir):
archive_name = "foo"
_gethandler.return_value = None
self.assertRaises(archive.ArchiveError, archive.extract,
archive_name)
_gethandler.assert_called_with(archive_name)
_mkdir.assert_not_called()
@patch('charmhelpers.core.host.mkdir')
@patch('charmhelpers.payload.archive.get_archive_handler')
@patch('charmhelpers.payload.archive.archive_dest_default')
def test_extracts_default_dest(self, _defdest, _gethandler, _mkdir):
expected_dest = "bar"
archive_name = "foo"
_defdest.return_value = expected_dest
handler = MagicMock()
handler.return_value = expected_dest
_gethandler.return_value = handler
dest = archive.extract(archive_name)
self.assertEqual(expected_dest, dest)
handler.assert_called_with(archive_name, expected_dest) | null |
5,939 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Test lite server inference python API.
"""
import mindspore_lite as mslite
import numpy as np
import pytest
# ============================ Context.parallel ============================
def test_context_parallel_workers_num_type_error():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.workers_num = "4"
assert "workers_num must be int" in str(raise_info.value)
def test_context_parallel_workers_num_negative_error():
with pytest.raises(ValueError) as raise_info:
context = mslite.Context()
context.parallel.workers_num = -4
assert "workers_num must be a non-negative int" in str(raise_info.value)
def test_context_parallel_config_info_type_error():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.config_info = 1
assert "config_info must be dict" in str(raise_info.value)
def test_context_parallel_config_info_key_type_error():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.config_info = {1: {"test": "test"}}
assert "config_info_key must be str" in str(raise_info.value)
def test_context_parallel_config_info_value_type_error():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.config_info = {"test": "test"}
assert "config_info_value must be dict" in str(raise_info.value)
def test_context_parallel_config_info_value_key_type_error():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.config_info = {"test": {1: "test"}}
assert "config_info_value_key must be str" in str(raise_info.value)
def METHOD_NAME():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.config_info = {"test": {"test": 1}}
assert "config_info_value_value must be str" in str(raise_info.value)
def test_context_parallel_config_path_type_error():
with pytest.raises(TypeError) as raise_info:
context = mslite.Context()
context.parallel.config_path = 1
assert "config_path must be str" in str(raise_info.value)
def test_context_parallel_config_path_not_exist_error():
with pytest.raises(ValueError) as raise_info:
context = mslite.Context()
context.parallel.config_path = "test.cfg"
assert "config_path does not exist" in str(raise_info.value)
def test_context_parallel():
config_info = {"weight": {"weight_path": "path of model weight"}}
context = mslite.Context()
context.target = ["cpu"]
context.parallel.workers_num = 4
assert "workers num:" in str(context.parallel)
context.parallel.config_info = config_info
assert "config info:" in str(context.parallel)
# ============================ ModelParallelRunner ============================
def test_model_parallel_runner():
model_parallel_runner = mslite.ModelParallelRunner()
assert "model_path:" in str(model_parallel_runner)
def test_model_parallel_runner_build_from_file_model_path_type_error():
with pytest.raises(TypeError) as raise_info:
model_parallel_runner = mslite.ModelParallelRunner()
model_parallel_runner.build_from_file(model_path=["test.ms"])
assert "model_path must be str" in str(raise_info.value)
def test_model_parallel_runner_build_from_file_model_path_not_exist_error():
with pytest.raises(RuntimeError) as raise_info:
model_parallel_runner = mslite.ModelParallelRunner()
model_parallel_runner.build_from_file(model_path="test.ms")
assert "model_path does not exist" in str(raise_info.value)
def test_model_parallel_runner_build_from_file_01():
model_parallel_runner = mslite.model.ModelParallelRunner()
model_parallel_runner.build_from_file(model_path="mobilenetv2.ms")
assert "model_path:" in str(model_parallel_runner)
def test_model_parallel_runner_build_from_file_02():
context = mslite.Context()
context.target = ["cpu"]
context.parallel.workers_num = 4
model_parallel_runner = mslite.model.ModelParallelRunner()
model_parallel_runner.build_from_file(model_path="mobilenetv2.ms", context=context)
assert "model_path:" in str(model_parallel_runner)
def get_model_parallel_runner():
context = mslite.Context()
context.target = ["cpu"]
context.parallel.workers_num = 4
model_parallel_runner = mslite.ModelParallelRunner()
model_parallel_runner.build_from_file(model_path="mobilenetv2.ms", context=context)
return model_parallel_runner
def test_model_parallel_runner_predict_inputs_type_error():
with pytest.raises(TypeError) as raise_info:
model_parallel_runner = get_model_parallel_runner()
inputs = model_parallel_runner.get_inputs()
outputs = model_parallel_runner.predict(inputs[0])
assert "inputs must be list" in str(raise_info.value)
def test_model_parallel_runner_predict_inputs_elements_type_error():
with pytest.raises(TypeError) as raise_info:
model_parallel_runner = get_model_parallel_runner()
inputs = model_parallel_runner.get_inputs()
outputs = model_parallel_runner.predict(["input"])
assert "inputs element must be Tensor" in str(raise_info.value)
def test_model_parallel_runner_predict_runtime_error():
with pytest.raises(RuntimeError) as raise_info:
model_parallel_runner = get_model_parallel_runner()
tensor1 = mslite.Tensor()
tensor2 = mslite.Tensor()
inputs = [tensor1, tensor2]
outputs = model_parallel_runner.predict(inputs)
assert "predict failed" in str(raise_info.value)
def test_model_parallel_runner_predict_01():
model_parallel_runner = get_model_parallel_runner()
inputs = model_parallel_runner.get_inputs()
in_data = np.arange(1 * 224 * 224 * 3, dtype=np.float32).reshape((1, 224, 224, 3))
inputs[0].set_data_from_numpy(in_data)
outputs = model_parallel_runner.predict(inputs)
def test_model_parallel_runner_predict_02():
model_parallel_runner = get_model_parallel_runner()
inputs = model_parallel_runner.get_inputs()
input_tensor = mslite.Tensor()
input_tensor.dtype = inputs[0].dtype
input_tensor.shape = inputs[0].shape
input_tensor.format = inputs[0].format
input_tensor.name = inputs[0].name
in_data = np.arange(1 * 224 * 224 * 3, dtype=np.float32).reshape((1, 224, 224, 3))
input_tensor.set_data_from_numpy(in_data)
outputs = model_parallel_runner.predict([input_tensor]) | null |
5,940 | """Store object for a list of games"""
# pylint: disable=not-an-iterable
import time
from gi.repository import GLib, GObject, Gtk
from lutris import settings
from lutris.database import sql
from lutris.database.games import get_games
from lutris.gui.views.store_item import StoreItem
from lutris.util.strings import gtk_safe
from . import (
COL_ID, COL_INSTALLED, COL_INSTALLED_AT, COL_INSTALLED_AT_TEXT, COL_LASTPLAYED, COL_LASTPLAYED_TEXT, COL_MEDIA_PATH,
COL_NAME, COL_PLATFORM, COL_PLAYTIME, COL_PLAYTIME_TEXT, COL_RUNNER, COL_RUNNER_HUMAN_NAME, COL_SLUG, COL_SORTNAME,
COL_YEAR
)
def METHOD_NAME(value):
try:
out = value.lower()
except AttributeError:
out = value
return out
def sort_func(model, row1, row2, sort_col):
"""Sorting function for the game store"""
value1 = model.get_value(row1, sort_col)
value2 = model.get_value(row2, sort_col)
if value1 is None and value2 is None:
value1 = value2 = 0
elif value1 is None:
value1 = type(value2)()
elif value2 is None:
value2 = type(value1)()
value1 = METHOD_NAME(value1)
value2 = METHOD_NAME(value2)
diff = -1 if value1 < value2 else 0 if value1 == value2 else 1
if diff == 0:
value1 = METHOD_NAME(model.get_value(row1, COL_SORTNAME))
value2 = METHOD_NAME(model.get_value(row2, COL_SORTNAME))
try:
diff = -1 if value1 < value2 else 0 if value1 == value2 else 1
except TypeError:
diff = 0
if diff == 0:
value1 = METHOD_NAME(model.get_value(row1, COL_RUNNER_HUMAN_NAME))
value2 = METHOD_NAME(model.get_value(row2, COL_RUNNER_HUMAN_NAME))
try:
return -1 if value1 < value2 else 0 if value1 == value2 else 1
except TypeError:
return 0
class GameStore(GObject.Object):
__gsignals__ = {
"icons-changed": (GObject.SIGNAL_RUN_FIRST, None, ()),
}
def __init__(self, service, service_media):
super().__init__()
self.service = service
self.service_media = service_media
self._installed_games = []
self._installed_games_accessed = False
self._icon_updates = {}
self.store = Gtk.ListStore(
str,
str,
str,
str,
str,
str,
str,
str,
str,
int,
str,
bool,
int,
str,
float,
str,
)
@property
def installed_game_slugs(self):
previous_access = self._installed_games_accessed or 0
self._installed_games_accessed = time.time()
if self._installed_games_accessed - previous_access > 1:
self._installed_games = [g["slug"] for g in get_games(filters={"installed": "1"})]
return self._installed_games
def add_games(self, games):
"""Add games to the store"""
for game in list(games):
GLib.idle_add(self.add_game, game)
def get_row_by_slug(self, slug):
for model_row in self.store:
if model_row[COL_SLUG] == slug:
return model_row
def get_row_by_id(self, _id):
if not _id:
return
for model_row in self.store:
try:
if model_row[COL_ID] == str(_id):
return model_row
except TypeError:
return
def remove_game(self, _id):
"""Remove a game from the view."""
row = self.get_row_by_id(_id)
if row:
self.store.remove(row.iter)
def update(self, db_game):
"""Update game informations
Return whether a row was updated; False if the game was not already
present.
"""
store_item = StoreItem(db_game, self.service_media)
row = self.get_row_by_id(store_item.id)
if not row:
row = self.get_row_by_id(db_game["service_id"])
if not row:
return False
row[COL_ID] = str(store_item.id)
row[COL_SLUG] = store_item.slug
row[COL_NAME] = store_item.name
row[COL_SORTNAME] = store_item.sortname if store_item.sortname else store_item.name
row[COL_MEDIA_PATH] = store_item.get_media_path() if settings.SHOW_MEDIA else None
row[COL_YEAR] = store_item.year
row[COL_RUNNER] = store_item.runner
row[COL_RUNNER_HUMAN_NAME] = store_item.runner_text
row[COL_PLATFORM] = store_item.platform
row[COL_LASTPLAYED] = store_item.lastplayed
row[COL_LASTPLAYED_TEXT] = store_item.lastplayed_text
row[COL_INSTALLED] = store_item.installed
row[COL_INSTALLED_AT] = store_item.installed_at
row[COL_INSTALLED_AT_TEXT] = store_item.installed_at_text
row[COL_PLAYTIME] = store_item.playtime
row[COL_PLAYTIME_TEXT] = store_item.playtime_text
return True
def add_game(self, db_game):
"""Add a PGA game to the store"""
game = StoreItem(db_game, self.service_media)
self.store.append(
(
str(game.id),
game.slug,
game.name,
game.sortname if game.sortname else game.name,
game.get_media_path() if settings.SHOW_MEDIA else None,
game.year,
game.runner,
game.runner_text,
gtk_safe(game.platform),
game.lastplayed,
game.lastplayed_text,
game.installed,
game.installed_at,
game.installed_at_text,
game.playtime,
game.playtime_text,
)
)
def on_game_updated(self, game):
if self.service:
db_games = sql.filtered_query(
settings.PGA_DB,
"service_games",
filters=({
"service": self.service_media.service,
"appid": game.appid
})
)
else:
db_games = sql.filtered_query(
settings.PGA_DB,
"games",
filters=({
"id": game.id
})
)
for db_game in db_games:
GLib.idle_add(self.update, db_game)
return True | null |
5,941 | """
Briefing generation logic
"""
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import Dict, List, TYPE_CHECKING
from dcs.mission import Mission
from jinja2 import Environment, FileSystemLoader, select_autoescape
from game.ato.flightwaypoint import FlightWaypoint
from game.ground_forces.combat_stance import CombatStance
from game.radio.radios import RadioFrequency
from game.runways import RunwayData
from game.theater import ControlPoint, FrontLine
from .aircraft.flightdata import FlightData
from .airsupportgenerator import AwacsInfo, TankerInfo
from .flotgenerator import JtacInfo
if TYPE_CHECKING:
from game import Game
@dataclass
class CommInfo:
"""Communications information for the kneeboard."""
name: str
freq: RadioFrequency
class FrontLineInfo:
def __init__(self, front_line: FrontLine):
self.front_line: FrontLine = front_line
self.player_base: ControlPoint = front_line.blue_cp
self.enemy_base: ControlPoint = front_line.red_cp
self.player_zero: bool = self.player_base.base.total_armor == 0
self.enemy_zero: bool = self.enemy_base.base.total_armor == 0
self.advantage: bool = (
self.player_base.base.total_armor > self.enemy_base.base.total_armor
)
self.stance: CombatStance = self.player_base.stances[self.enemy_base.id]
self.combat_stances = CombatStance
class MissionInfoGenerator:
"""Base type for generators of mission information for the player.
Examples of subtypes include briefing generators, kneeboard generators, etc.
"""
def __init__(self, mission: Mission, game: Game) -> None:
self.mission = mission
self.game = game
self.awacs: List[AwacsInfo] = []
self.comms: List[CommInfo] = []
self.flights: List[FlightData] = []
self.jtacs: List[JtacInfo] = []
self.tankers: List[TankerInfo] = []
self.frontlines: List[FrontLineInfo] = []
self.dynamic_runways: List[RunwayData] = []
def add_awacs(self, awacs: AwacsInfo) -> None:
"""Adds an AWACS/GCI to the mission.
Args:
awacs: AWACS information.
"""
self.awacs.append(awacs)
def add_comm(self, name: str, freq: RadioFrequency) -> None:
"""Adds communications info to the mission.
Args:
name: Name of the radio channel.
freq: Frequency of the radio channel.
"""
self.comms.append(CommInfo(name, freq))
def add_flight(self, flight: FlightData) -> None:
"""Adds flight info to the mission.
Args:
flight: Flight information.
"""
self.flights.append(flight)
def add_jtac(self, jtac: JtacInfo) -> None:
"""Adds a JTAC to the mission.
Args:
jtac: JTAC information.
"""
self.jtacs.append(jtac)
def add_tanker(self, tanker: TankerInfo) -> None:
"""Adds a tanker to the mission.
Args:
tanker: Tanker information.
"""
self.tankers.append(tanker)
def add_frontline(self, frontline: FrontLineInfo) -> None:
"""Adds a frontline to the briefing
Arguments:
frontline: Frontline conflict information
"""
self.frontlines.append(frontline)
def add_dynamic_runway(self, runway: RunwayData) -> None:
"""Adds a dynamically generated runway to the briefing.
Dynamic runways are any valid landing point that is a unit rather than a
map feature. These include carriers, ships with a helipad, and FARPs.
"""
self.dynamic_runways.append(runway)
def generate(self) -> None:
"""Generates the mission information."""
raise NotImplementedError
def METHOD_NAME(waypoint: FlightWaypoint, depart_prefix: str) -> str:
if waypoint.tot is not None:
return f"{waypoint.tot.time()} "
elif waypoint.departure_time is not None:
return f"{depart_prefix} {waypoint.departure_time.time()} "
return ""
def format_intra_flight_channel(flight: FlightData) -> str:
frequency = flight.intra_flight_channel
channel = flight.channel_for(frequency)
if channel is None:
return str(frequency)
channel_name = flight.aircraft_type.channel_name(channel.radio_id, channel.channel)
return f"{channel_name} ({frequency})"
class BriefingGenerator(MissionInfoGenerator):
def __init__(self, mission: Mission, game: Game):
super().__init__(mission, game)
self.allied_flights_by_departure: Dict[str, List[FlightData]] = {}
env = Environment(
loader=FileSystemLoader("resources/briefing/templates"),
autoescape=select_autoescape(
disabled_extensions=("",),
default_for_string=True,
default=True,
),
trim_blocks=True,
lstrip_blocks=True,
)
env.filters["waypoint_timing"] = METHOD_NAME
env.filters["intra_flight_channel"] = format_intra_flight_channel
self.template = env.get_template("briefingtemplate_EN.j2")
def generate(self) -> None:
"""Generate the mission briefing"""
self._generate_frontline_info()
self.generate_allied_flights_by_departure()
self.mission.set_description_text(self.template.render(vars(self)))
self.mission.add_picture_blue(
os.path.abspath("./resources/ui/splash_screen.png")
)
def _generate_frontline_info(self) -> None:
"""Build FrontLineInfo objects from FrontLine type and append to briefing."""
for front_line in self.game.theater.conflicts():
self.add_frontline(FrontLineInfo(front_line))
# TODO: This should determine if runway is friendly through a method more robust than the existing string match
def generate_allied_flights_by_departure(self) -> None:
"""Create iterable to display allied flights grouped by departure airfield."""
for flight in self.flights:
if not flight.client_units and flight.friendly:
name = flight.departure.airfield_name
if (
name in self.allied_flights_by_departure
): # where else can we get this?
self.allied_flights_by_departure[name].append(flight)
else:
self.allied_flights_by_departure[name] = [flight] | null |
5,942 | import os
import random
import pytest
from solana.rpc.api import Client
from solana.publickey import PublicKey
from solana.rpc.commitment import Confirmed
from .solana_utils import neon_cli, create_treasury_pool_address, get_neon_balance, get_transaction_count
from .solana_utils import solana_client, wait_confirm_transaction, get_solana_balance, send_transaction
from .utils.constants import SOLANA_URL
from .utils.contract import deploy_contract
from .utils.ethereum import make_eth_transaction
from eth_utils import abi, to_text
from .utils.instructions import TransactionWithComputeBudget, make_PartialCallOrContinueFromRawEthereumTX
from .utils.storage import create_holder
def gen_hash_of_block(size: int) -> str:
"""Generates a block hash of the given size"""
try:
block_hash = hex(int.from_bytes(os.urandom(size), "big"))
if bytes.fromhex(block_hash[2:]) or len(block_hash[2:]) != size * 2:
return block_hash
except ValueError:
return gen_hash_of_block(size)
def test_emulate_transfer(user_account, evm_loader, session_user):
result = neon_cli().emulate(
evm_loader.loader_id,
user_account.eth_address.hex(),
session_user.eth_address.hex(),
data=None
)
assert result['exit_status'] == 'succeed', f"The 'exit_status' field is not succeed. Result: {result}"
assert result['steps_executed'] == 1, f"Steps executed amount is not 1. Result: {result}"
assert result['used_gas'] > 0, f"Used gas is less than 0. Result: {result}"
def test_emulate_contract_deploy(user_account, evm_loader):
contract_path = pytest.CONTRACTS_PATH / "hello_world.binary"
with open(contract_path, 'rb') as f:
contract_code = f.read()
result = neon_cli().emulate(
evm_loader.loader_id,
user_account.eth_address.hex(),
'deploy',
contract_code.hex()
)
assert result['exit_status'] == 'succeed', f"The 'exit_status' field is not succeed. Result: {result}"
assert result['steps_executed'] > 0, f"Steps executed amount is not 0. Result: {result}"
assert result['used_gas'] > 0, f"Used gas is less than 0. Result: {result}"
def test_emulate_call_contract_function(user_account, evm_loader, operator_keypair, treasury_pool):
contract = deploy_contract(operator_keypair, user_account, "hello_world.binary", evm_loader, treasury_pool)
assert contract.eth_address
assert get_solana_balance(contract.solana_address) > 0
data = abi.function_signature_to_4byte_selector('call_hello_world()')
result = neon_cli().emulate(
evm_loader.loader_id,
user_account.eth_address.hex(),
contract.eth_address.hex(),
data.hex()
)
assert result['exit_status'] == 'succeed', f"The 'exit_status' field is not succeed. Result: {result}"
assert result['steps_executed'] > 0, f"Steps executed amount is 0. Result: {result}"
assert result['used_gas'] > 0, f"Used gas is less than 0. Result: {result}"
assert "Hello World" in to_text(result["result"])
def test_neon_elf_params(evm_loader):
result = neon_cli().call(f"--evm_loader={evm_loader.loader_id} neon-elf-params")
some_fields = ['NEON_CHAIN_ID', 'NEON_TOKEN_MINT', 'NEON_REVISION']
for field in some_fields:
assert field in result, f"The field {field} is not in result {result}"
assert result[field] != "", f"The value for fiels {field} is empty"
def test_collect_treasury(evm_loader):
command_args = f"collect-treasury --evm_loader {evm_loader.loader_id}"
index = random.randint(0, 127)
treasury_pool_address = create_treasury_pool_address(index)
result = neon_cli().call(command_args)
main_pool_address = PublicKey(result["pool_address"])
balance_before = get_solana_balance(main_pool_address)
amount = random.randint(1, 1000)
trx = solana_client.request_airdrop(treasury_pool_address, amount)
wait_confirm_transaction(solana_client, trx.value)
result = neon_cli().call(command_args)
balance_after = get_solana_balance(PublicKey(main_pool_address))
assert balance_after >= balance_before + amount
def test_init_environment(evm_loader):
result = neon_cli().call(f"init-environment --evm_loader {evm_loader.loader_id}")
assert len(result["transactions"]) == 0
def METHOD_NAME(evm_loader, user_account):
result = neon_cli().call(
f"get-ether-account-data --evm_loader {evm_loader.loader_id} {user_account.eth_address.hex()}")
assert f"0x{user_account.eth_address.hex()}" == result["address"]
assert str(user_account.solana_account_address) == result["solana_address"]
assert solana_client.get_account_info(user_account.solana_account.public_key).value is not None
def test_create_ether_account(evm_loader):
acc = gen_hash_of_block(20)
result = neon_cli().call(
f"create-ether-account --evm_loader {evm_loader.loader_id} {acc}")
acc_info = solana_client.get_account_info(PublicKey(result['solana_address']), commitment=Confirmed)
assert acc_info.value is not None
def test_deposit(evm_loader, user_account):
amount = random.randint(1, 100000)
result = neon_cli().call(
f"deposit --evm_loader {evm_loader.loader_id} {amount} {user_account.eth_address.hex()}")
balance_after = get_neon_balance(solana_client, user_account.solana_account_address)
assert result["transaction"] is not None
assert balance_after == amount * 1000000000
def test_get_storage_at(evm_loader, operator_keypair, user_account, treasury_pool):
contract = deploy_contract(operator_keypair, user_account, "hello_world.binary", evm_loader, treasury_pool)
expected_storage = '0000000000000000000000000000000000000000000000000000000000000005'
result = neon_cli().call(
f"get-storage-at --evm_loader {evm_loader.loader_id} {contract.eth_address.hex()} 0x0")
assert result == expected_storage
def test_cancel_trx(evm_loader, user_account, rw_lock_contract, operator_keypair, treasury_pool):
func_name = abi.function_signature_to_4byte_selector('unchange_storage(uint8,uint8)')
data = (func_name + bytes.fromhex("%064x" % 0x01) + bytes.fromhex("%064x" % 0x01))
eth_transaction = make_eth_transaction(
rw_lock_contract.eth_address,
data,
user_account.solana_account,
user_account.solana_account_address,
)
storage_account = create_holder(operator_keypair)
instruction = eth_transaction.rawTransaction
trx = TransactionWithComputeBudget(operator_keypair)
trx.add(
make_PartialCallOrContinueFromRawEthereumTX(
instruction,
operator_keypair, evm_loader, storage_account, treasury_pool.account, treasury_pool.buffer, 1,
[
rw_lock_contract.solana_address,
user_account.solana_account_address,
]
)
)
solana_client = Client(SOLANA_URL)
receipt = send_transaction(solana_client, trx, operator_keypair)
assert receipt.value.transaction.meta.err is None
user_nonce = get_transaction_count(solana_client, user_account.solana_account_address)
result = neon_cli().call(f"cancel-trx --evm_loader={evm_loader.loader_id} {storage_account}")
assert result["transaction"] is not None
assert user_nonce < get_transaction_count(solana_client, user_account.solana_account_address) | null |
5,943 | # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests for Cylc scheduler server."""
import logging
import socket
from time import time
from types import SimpleNamespace
from typing import List
from unittest.mock import MagicMock, Mock
import pytest
from cylc.flow import CYLC_LOG
from cylc.flow.exceptions import InputError
from cylc.flow.scheduler import Scheduler
from cylc.flow.scheduler_cli import RunOptions
from cylc.flow.task_pool import TaskPool
from cylc.flow.task_proxy import TaskProxy
from cylc.flow.workflow_status import AutoRestartMode
@pytest.mark.parametrize(
'opts_to_test, is_restart, err_msg',
[
pytest.param(
['icp', 'startcp', 'starttask'],
True,
"option --{} is not valid for restart",
id="start opts on restart"
),
pytest.param(
['icp', 'startcp', 'starttask'],
False,
"option --{}=reload is not valid",
id="start opts =reload"
),
pytest.param(
['fcp', 'stopcp'],
False,
"option --{}=reload is only valid for restart",
id="end opts =reload when not restart"
),
]
)
def test_check_startup_opts(
opts_to_test: List[str],
is_restart: bool,
err_msg: str
) -> None:
"""Test Scheduler._check_startup_opts()"""
for opt in opts_to_test:
mocked_scheduler = Mock(is_restart=is_restart)
mocked_scheduler.options = SimpleNamespace(**{opt: 'reload'})
with pytest.raises(InputError) as excinfo:
Scheduler._check_startup_opts(mocked_scheduler)
assert(err_msg.format(opt) in str(excinfo))
@pytest.mark.parametrize(
'auto_restart_time, expected',
[
(-1, True),
(0, True),
(1, False),
(None, False),
]
)
def test_should_auto_restart_now(
auto_restart_time, expected, monkeypatch: pytest.MonkeyPatch
):
"""Test Scheduler.should_auto_restart_now()."""
time_now = time()
monkeypatch.setattr('cylc.flow.scheduler.time', lambda: time_now)
if auto_restart_time is not None:
auto_restart_time += time_now
mock_schd = Mock(spec=Scheduler, auto_restart_time=auto_restart_time)
assert Scheduler.should_auto_restart_now(mock_schd) == expected
def test_release_queued_tasks__auto_restart():
"""Test that Scheduler.release_queued_tasks() works as expected
during auto restart."""
mock_schd = Mock(
auto_restart_time=(time() - 100),
auto_restart_mode=AutoRestartMode.RESTART_NORMAL,
is_paused=False,
stop_mode=None,
pool=Mock(
spec=TaskPool,
get_tasks=lambda: [Mock(spec=TaskProxy)]
),
workflow='parachutes',
options=RunOptions(),
task_job_mgr=MagicMock()
)
Scheduler.release_queued_tasks(mock_schd)
# Should not actually release any more tasks, just submit the
# preparing ones
mock_schd.pool.release_queued_tasks.assert_not_called()
mock_schd.task_job_mgr.submit_task_jobs.assert_called()
def METHOD_NAME(monkeypatch, caplog, log_filter):
"""Ensure that DNS errors in host selection are caught."""
def _select_workflow_host(cached=False):
# fake a "get address info" error
# this error can occur due to an unknown host resulting from broken
# DNS or an invalid host name in the global config
raise socket.gaierror('elephant')
monkeypatch.setattr(
'cylc.flow.scheduler.select_workflow_host',
_select_workflow_host,
)
schd = Mock(
workflow='myworkflow',
options=RunOptions(abort_if_any_task_fails=False),
INTERVAL_AUTO_RESTART_ERROR=0,
)
caplog.set_level(logging.ERROR, CYLC_LOG)
assert not Scheduler.workflow_auto_restart(schd, max_retries=2)
assert log_filter(caplog, contains='elephant')
def test_auto_restart_popen_error(monkeypatch, caplog, log_filter):
"""Ensure that subprocess errors are handled."""
def _select_workflow_host(cached=False):
# mock a host-select return value
return ('foo', 'foo')
monkeypatch.setattr(
'cylc.flow.scheduler.select_workflow_host',
_select_workflow_host,
)
def _popen(*args, **kwargs):
# mock an auto-restart command failure
return Mock(
wait=lambda: 1,
communicate=lambda: ('mystdout', 'mystderr'),
)
monkeypatch.setattr(
'cylc.flow.scheduler.Popen',
_popen,
)
schd = Mock(
workflow='myworkflow',
options=RunOptions(abort_if_any_task_fails=False),
INTERVAL_AUTO_RESTART_ERROR=0,
)
caplog.set_level(logging.ERROR, CYLC_LOG)
assert not Scheduler.workflow_auto_restart(schd, max_retries=2)
assert log_filter(caplog, contains='mystderr') | null |
5,944 | import operator as op
from functools import reduce
import numpy as np
def combination_Cnr(n, r):
r = min(r, n - r)
numer = reduce(op.mul, range(n, n - r, -1), 1)
denom = reduce(op.mul, range(1, r + 1), 1)
return numer / denom
def fisher_test_pValue_by_formula(ts_len, g_f):
a = int((ts_len - 1) / 2)
x = g_f # observed value of the test statistic
b = int(1 / x)
pValue = 0.0
for k in range(1, b + 1):
try:
cak = combination_Cnr(a, k)
except OverflowError:
cak = 1e300
expo = (1 - k * x)**(a - 1)
curr = (-1)**(k - 1) * cak * expo
pValue += curr
return pValue
def fisher_test_critical_value(ts_len, alpha_th):
# return g_f, alpha
if ts_len < 10 or ts_len > 4097: # 41-4001
raise ValueError('only support 41<=ts_len<=4096')
N = ts_len
n = int((N - 1) / 2)
Set_n = [5, 20, 30, 50, 100, 500, 1000, 2000, 2100]
if alpha_th == 0.05:
alpha_precision_th = 1e-3
# Set_g_f is critical value for fisher_test based on papers
Set_g_f = [
0.684, 0.270, 0.198, 0.131, 0.0443 / 0.6, 0.011 / 0.6,
0.00592 / 0.6, 0.00317 / 0.6, 0.0050473
]
elif alpha_th == 0.01:
alpha_precision_th = 1e-4
Set_g_f = [
0.789, 0.330, 0.241, 0.160, 0.0533 / 0.6, 0.0129 / 0.6,
0.00687 / 0.6, 0.00365 / 0.6, 0.005819
]
else:
raise ValueError('only support alpha_th = 0.05 or 0.01')
for idx in range(len(Set_n)):
if Set_n[idx] <= n < Set_n[idx + 1]:
# binaray search
g_f_high = Set_g_f[idx]
g_f_low = Set_g_f[idx + 1]
while g_f_low <= g_f_high:
g_f = g_f_low + (g_f_high - g_f_low) / 2
alpha = fisher_test_pValue_by_formula(ts_len, g_f)
if np.abs(alpha - alpha_th) < alpha_precision_th:
return g_f, alpha
elif alpha > alpha_th:
g_f_low = g_f
else:
g_f_high = g_f
def METHOD_NAME(periodogram_input, ts_len, alph_th=1e-50):
# based on paper "Statistical power of Fisher test for the detection
# of short periodic gene expression profiles"
# formula 1-4. tested, the following
# periodogram_values, ts_len = fft_periodogram(input_data)
# period_candi, pValue, observed_g =
# fisher_g_test(periodogram_values, ts_len)
# observed_g, pValue, 1/period_candi (when there is period)
# is the same as
# https://rdrr.io/cran/ptest/man/ptestg.html
# import rpy2.robjects as robjects
# from rpy2.robjects.packages import importr
# from rpy2.robjects import pandas2ri
# pandas2ri.activate()
# ptest=importr('ptest')
# ptestg=robjects.r('ptestg')
# Fisher = ptestg(input_data,method="Fisher")
# print(Fisher)
periodogram_values = periodogram_input.copy()
periodogram_values[:2] = 0 # do not consider 0,1-> DC and T != N
max_p_value = np.max(periodogram_values)
max_p_idx = np.argmax(periodogram_values)
sum_p_values = np.sum(periodogram_values)
# handle zero case
if sum_p_values == 0.0:
sum_p_values = 1.0
observed_g = max_p_value / sum_p_values
# handle zero case
if observed_g == 0.0:
observed_g = 1.0
x = observed_g
a = int((ts_len - 1) / 2)
b = int(1 / x)
pValue = 0.0
for k in range(1, b + 1):
try:
cak = combination_Cnr(a, k)
except OverflowError:
cak = 1e300
expo = (1 - k * x)**(a - 1)
curr = (-1)**(k - 1) * cak * expo
pValue += curr
# special processing for non-period signal
if max_p_idx == 0 or max_p_idx == 1:
period_candi = 0
period_range = None
per_T = 0
else:
per_T = np.round(ts_len / max_p_idx).astype(int)
if pValue <= alph_th:
period_candi = per_T
# calculate period_range
N = ts_len
k = max_p_idx
low = int((N / (k + 1) + N / k) / 2 - 1)
high = int((N / k + N / (k - 1)) / 2 + 1)
period_range = np.arange(low, high + 1)
else: # period_candi=0 indicate no period
period_candi = 0
period_range = None
# output
return period_candi, period_range, per_T, pValue, observed_g
def siegel_test_alpha_by_formula(ts_len, g_f, t, lamb=0.6):
# passed only for n<=50
N = ts_len
n = int((N - 1) / 2)
pValue = 0.0
for ell in range(1, n + 1): # 1 ~ n
for k in range(0, ell): # 0 ~ ell-1
c_nl = combination_Cnr(n, ell)
c_lk = combination_Cnr(ell - 1, k)
c_nk = combination_Cnr(n - 1, k)
comb = c_nl * c_lk * c_nk
pos_part = 1 - ell * lamb * g_f - t
pos_part = max(pos_part, 0)**(n - k - 1)
curr = (-1)**(k + ell + 1) * comb * (t**k) * pos_part
pValue += curr
return pValue
def siegel_test_critical_value(ts_len, alpha_th, lamb=0.6):
# return g_f, alpha
if ts_len < 13 or ts_len > 4097: # 41-4001
raise ValueError('only support 13<=ts_len<=4096')
if lamb != 0.6:
raise ValueError('only support lamb=0.6')
N = ts_len
n = int((N - 1) / 2)
alpha = 100
if ts_len > 100:
t_6 = siegel_test_critical_value_by_interpolation(ts_len, alpha_th)
return t_6, alpha
else:
Set_n = [5, 10, 20, 30, 50]
if alpha_th == 0.05:
alpha_precision_th = 1e-3
# Set_g_f is critical value for fisher_test based on papers
Set_t_6 = [0.274, 0.181, 0.116, 0.088, 0.0616]
elif alpha_th == 0.01:
alpha_precision_th = 1e-4
Set_t_6 = [0.315, 0.214, 0.134, 0.0993, 0.0673]
else:
raise ValueError('only support alpha_th = 0.05 or 0.01')
g_f, alpha_tmp = fisher_test_critical_value(ts_len, alpha_th)
for idx in range(len(Set_n)):
if Set_n[idx] <= n < Set_n[idx + 1]:
# binaray search
t_6_high = Set_t_6[idx]
t_6_low = Set_t_6[idx + 1]
while t_6_low <= t_6_high:
t_6 = t_6_low + (t_6_high - t_6_low) / 2
alpha = siegel_test_alpha_by_formula(ts_len, g_f, t_6)
if np.abs(alpha - alpha_th) < alpha_precision_th:
return t_6, alpha
elif alpha > alpha_th:
t_6_low = t_6
else:
t_6_high = t_6
def siegel_test_critical_value_by_interpolation(ts_len, alpha_th):
# this one fixed lambda=0.6
if ts_len < 41 or ts_len > 4097: # 41-4001
raise ValueError('only support 41<=ts_len<=4096')
N = ts_len
m = int((N - 1) / 2)
if alpha_th == 0.05:
critical_value = 1.033 * (m**(-0.72356))
elif alpha_th == 0.01:
critical_value = 1.4987 * (m**(-0.79695))
else:
raise ValueError('only support alpha_th = 0.05 or 0.01')
return critical_value | null |
5,945 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
from mindspore import nn, context, Tensor
from mindspore.ops.operations import Lerp
from .test_grad_of_dynamic import TestDynamicGrad
class NetLerp(nn.Cell):
def __init__(self):
super(NetLerp, self).__init__()
self.lerp = Lerp()
def construct(self, x, y, z):
return self.lerp(x, y, z)
def grad_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(NetLerp())
x = np.array([[1.0, -1.0, 2.0], [3.1, 2, 1.0]], dtype=np.float32)
y = np.array([[1.2, -1.0, 2.1], [3.0, 2.0, 1.1]], dtype=np.float32)
z = np.array([[1.0, -1.2, 0.9], [0.1, 2.0, 1.0]], dtype=np.float32)
test_dynamic.test_dynamic_grad_net([Tensor(x), Tensor(y), Tensor(z)], is_dynamic_rank)
def grad_partial_dyn_case(is_dynamic_rank):
test_dynamic = TestDynamicGrad(NetLerp())
test_dynamic.skip_convert_in_ids = [2]
x = np.array([[1.0, -1.0, 2.0], [3.1, 2, 1.0]], dtype=np.float32)
y = np.array([[1.2, -1.0, 2.1], [3.0, 2.0, 1.1]], dtype=np.float32)
z = np.array([[1.0, -1.2, 0.9], [0.1, 2.0, 1.0]], dtype=np.float32)
test_dynamic.test_dynamic_grad_net([Tensor(x), Tensor(y), Tensor(z)], is_dynamic_rank)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_grad_dynamic_shape():
"""
Feature: test Lerp grad dynamic shape.
Description: input is dynamic shape.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(False)
grad_partial_dyn_case(False)
context.set_context(mode=context.GRAPH_MODE)
grad_dyn_case(False)
grad_partial_dyn_case(False)
@pytest.mark.level1
@pytest.mark.platform_x86_cpu
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_grad_dynamic_rank():
"""
Feature: test Lerp grad dynamic rank.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE)
grad_dyn_case(True)
grad_partial_dyn_case(True)
context.set_context(mode=context.GRAPH_MODE)
grad_dyn_case(True)
grad_partial_dyn_case(True)
@pytest.mark.skip(reason="Ascend does not support dynamic shape")
@pytest.mark.level2
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def METHOD_NAME():
"""
Feature: test Lerp grad dynamic rank on Ascend.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
grad_dyn_case(False)
grad_partial_dyn_case(False)
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
grad_dyn_case(False)
grad_partial_dyn_case(False)
@pytest.mark.skip(reason="Ascend does not support dynamic shape")
@pytest.mark.level2
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_ascend_grad_dynamic_rank():
"""
Feature: test Lerp grad dynamic rank on Ascend.
Description: input is dynamic rank.
Expectation: the result match with static shape
"""
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend")
grad_dyn_case(True)
grad_partial_dyn_case(True)
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
grad_dyn_case(True)
grad_partial_dyn_case(True) | null |
5,946 | # Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementation for internal polymorphism `not in` operations."""
from __future__ import absolute_import
from mindspore.ops.operations import _inner_ops as inner
from mindspore.ops.composite.multitype_ops import _constexpr_utils as const_utils
from mindspore.ops.composite.multitype_ops import _compile_utils as compile_utils
from mindspore.ops import functional as F
from mindspore.ops.composite import base
from mindspore.ops.operations._sequence_ops import InSequence
not_in_ = base.MultitypeFuncGraph("not_in", True, True)
"""
"not_in_" is a multi type func graph object which will determine if a not in b.
using ".register" decorator
"""
@not_in_.register("Number", "Tuple")
def METHOD_NAME(x, y):
"""
Determine if a number not in tuple.
Args:
x (Number): x
y (tuple): y
Returns:
bool, if x not in y return true, x in y return false.
"""
if F.is_sequence_shape_unknown(y) or not F.isconstant(x):
return not InSequence()(x, y)
return not const_utils.scalar_in_sequence(x, y)
@not_in_.register("Number", "List")
def _number_not_in_list(x, y):
"""
Determine if a number not in list.
Args:
x (Number): x
y (list): y
Returns:
bool, if x not in y return true, x in y return false.
"""
if F.is_sequence_shape_unknown(y) or not F.isconstant(x):
return not InSequence()(x, y)
return not const_utils.scalar_in_sequence(x, y)
@not_in_.register("String", "String")
def _string_not_in_string(x, y):
"""
Determine if a str not in another str.
Args:
x (str): x
y (str): y
Returns:
bool, if x not in y return true, x in y return false.
"""
return not inner.string_in(x, y)
@not_in_.register("String", "Tuple")
def _string_not_in_tuple(x, y):
"""
Determine if a str not in a tuple.
Args:
x (str): x
y (tuple): y
Returns:
bool, if x not in y return true, x in y return false.
"""
return not const_utils.scalar_in_sequence(x, y)
@not_in_.register("String", "List")
def _string_not_in_list(x, y):
"""
Determine if a str not in a list.
Args:
x (str): x
y (list): y
Returns:
bool, if x not in y return true, x in y return false.
"""
return not const_utils.scalar_in_sequence(x, y)
@not_in_.register("Tensor", "Dictionary")
@not_in_.register("Tuple", "Dictionary")
@not_in_.register("Number", "Dictionary")
@not_in_.register("String", "Dictionary")
def _str_not_in_dict(x, y):
"""
Determine if an element is not in dict.
Args:
x: Tensor, Tuple, Number, String
y: dict
Returns:
bool, if x not in y return true, x in y return false.
"""
return F.not_in_dict(x, y)
@not_in_.register("Tensor", "List")
def _tensor_not_in_list(x, y):
"""
Determine if a tensor not in a list.
Args:
x: Tensor
y: List
Returns:
bool, if x not in y return true, x in y return false.
"""
if F.is_sequence_shape_unknown(y):
return not InSequence()(x, y)
return not compile_utils.tensor_in_sequence(x, y)
@not_in_.register("Tensor", "Tuple")
def _tensor_not_in_tuple(x, y):
"""
Determine if a tensor not in a tuple.
Args:
x: Tensor
y: Tuple
Returns:
bool, if x not in y return true, x in y return false.
"""
if F.is_sequence_shape_unknown(y):
return not InSequence()(x, y)
return not compile_utils.tensor_in_sequence(x, y)
@not_in_.register("mstype", "List")
def _mstype_not_in_list(x, y):
"""
Determine if a mindspore type is not in a list.
Args:
x: mstype
y: List
Returns:
bool, if x not in y return true, x in y return false.
"""
return not const_utils.check_in_sequence(x, y)
@not_in_.register("mstype", "Tuple")
def _mstype_not_in_tuple(x, y):
"""
Determine if a mindspore type is not in a tuple.
Args:
x: mstype
y: Tuple
Returns:
bool, if x not in y return true, x in y return false.
"""
return not const_utils.check_in_sequence(x, y) | null |
5,947 | import os
from pkg_resources import parse_version
import shutil
import subprocess
import tempfile
from testtools import TestCase
from mock import (
MagicMock,
patch,
)
from charmhelpers.core.host import chdir
from urllib.parse import urlparse
try:
from charmhelpers.fetch import (
giturl,
UnhandledSource,
)
except ImportError:
giturl = None
UnhandledSource = None
class GitUrlFetchHandlerTest(TestCase):
def setUp(self):
super(GitUrlFetchHandlerTest, self).setUp()
self.valid_urls = (
"http://example.com/git-branch",
"https://example.com/git-branch",
"git://example.com/git-branch",
)
self.invalid_urls = (
"file://example.com/foo.tar.bz2",
"abc:example",
"garbage",
)
self.fh = giturl.GitUrlFetchHandler()
self._git_version_2_28_plus = None
@property
def is_git_version_2_28_plus(self):
if self._git_version_2_28_plus is None:
try:
cmd = "git --version"
version = subprocess.check_output(cmd.split()).decode()
if version:
version = version.split()[-1]
if parse_version(version) >= parse_version("2.28"):
self._git_version_2_28_plus = True
except Exception:
# any error, assume it wasn't 2.28+
self._git_version_2_28_plus = False
return self._git_version_2_28_plus
def METHOD_NAME(self):
for url in self.valid_urls:
result = self.fh.can_handle(url)
self.assertEqual(result, True, url)
for url in self.invalid_urls:
result = self.fh.can_handle(url)
self.assertNotEqual(result, True, url)
@patch.object(giturl, 'check_output')
def test_clone(self, check_output):
dest_path = "/destination/path"
branch = "main" if self.is_git_version_2_28_plus else "master"
for url in self.valid_urls:
self.fh.remote_branch = MagicMock()
self.fh.load_plugins = MagicMock()
self.fh.clone(url, dest_path, branch, None)
check_output.assert_called_with(
['git', 'clone', url, dest_path, '--branch', branch], stderr=-2)
for url in self.invalid_urls:
with patch.dict('os.environ', {'CHARM_DIR': 'foo'}):
self.assertRaises(UnhandledSource, self.fh.clone, url,
dest_path, None,
branch)
def test_clone_functional(self):
src = None
dst = None
try:
src = tempfile.mkdtemp()
with chdir(src):
if self.is_git_version_2_28_plus:
subprocess.check_output(['git', 'init',
'--initial-branch', 'main'])
else:
subprocess.check_output(['git', 'init'])
subprocess.check_output(['git', 'config', 'user.name', 'Joe'])
subprocess.check_output(
['git', 'config', 'user.email', '[email protected]'])
subprocess.check_output(['touch', 'foo'])
subprocess.check_output(['git', 'add', 'foo'])
subprocess.check_output(['git', 'commit', '-m', 'test'])
dst = tempfile.mkdtemp()
os.rmdir(dst)
if self.is_git_version_2_28_plus:
self.fh.clone(src, dst, "main")
assert os.path.exists(os.path.join(dst, '.git'))
self.fh.clone(src, dst, "main") # idempotent
assert os.path.exists(os.path.join(dst, '.git'))
else:
self.fh.clone(src, dst)
assert os.path.exists(os.path.join(dst, '.git'))
self.fh.clone(src, dst) # idempotent
assert os.path.exists(os.path.join(dst, '.git'))
finally:
if src:
shutil.rmtree(src, ignore_errors=True)
if dst:
shutil.rmtree(dst, ignore_errors=True)
def test_installs(self):
self.fh.clone = MagicMock()
for url in self.valid_urls:
branch_name = urlparse(url).path.strip("/").split("/")[-1]
dest = os.path.join('foo', 'fetched',
os.path.basename(branch_name))
with patch.dict('os.environ', {'CHARM_DIR': 'foo'}):
where = self.fh.install(url)
self.assertEqual(where, dest)
def test_installs_specified_dest(self):
self.fh.clone = MagicMock()
for url in self.valid_urls:
branch_name = urlparse(url).path.strip("/").split("/")[-1]
dest_repo = os.path.join('/tmp/git/',
os.path.basename(branch_name))
with patch.dict('os.environ', {'CHARM_DIR': 'foo'}):
where = self.fh.install(url, dest="/tmp/git")
self.assertEqual(where, dest_repo) | null |
5,948 | from __future__ import annotations
import heapq
import math
from collections import defaultdict
from dataclasses import dataclass, field
from enum import Enum, auto
from typing import Dict, Iterator, List, Optional, Set, Tuple
from .conflicttheater import ConflictTheater
from .controlpoint import ControlPoint
class NoPathError(RuntimeError):
def __init__(self, origin: ControlPoint, destination: ControlPoint) -> None:
super().__init__(f"Could not reconstruct path to {destination} from {origin}")
@dataclass(frozen=True, order=True)
class FrontierNode:
cost: float
point: ControlPoint = field(compare=False)
class Frontier:
def __init__(self) -> None:
self.nodes: List[FrontierNode] = []
def push(self, poly: ControlPoint, cost: float) -> None:
heapq.heappush(self.nodes, FrontierNode(cost, poly))
def pop(self) -> Optional[FrontierNode]:
try:
return heapq.heappop(self.nodes)
except IndexError:
return None
def __bool__(self) -> bool:
return bool(self.nodes)
class TransitConnection(Enum):
Road = auto()
Shipping = auto()
Airlift = auto()
class TransitNetwork:
def __init__(self) -> None:
self.nodes: Dict[
ControlPoint, Dict[ControlPoint, TransitConnection]
] = defaultdict(dict)
def has_destinations(self, control_point: ControlPoint) -> bool:
return bool(self.nodes[control_point])
def has_link(self, a: ControlPoint, b: ControlPoint) -> bool:
return b in self.nodes[a]
def METHOD_NAME(self, a: ControlPoint, b: ControlPoint) -> TransitConnection:
return self.nodes[a][b]
def link_with(
self, a: ControlPoint, b: ControlPoint, METHOD_NAME: TransitConnection
) -> None:
self.nodes[a][b] = METHOD_NAME
self.nodes[b][a] = METHOD_NAME
def link_road(self, a: ControlPoint, b: ControlPoint) -> None:
self.link_with(a, b, TransitConnection.Road)
def link_shipping(self, a: ControlPoint, b: ControlPoint) -> None:
self.link_with(a, b, TransitConnection.Shipping)
def link_airport(self, a: ControlPoint, b: ControlPoint) -> None:
self.link_with(a, b, TransitConnection.Airlift)
def connections_from(self, control_point: ControlPoint) -> Iterator[ControlPoint]:
yield from self.nodes[control_point]
def cost(self, a: ControlPoint, b: ControlPoint) -> float:
return {
TransitConnection.Road: 1,
TransitConnection.Shipping: 3,
# Set arbitrarily high so that other methods are preferred, but still scaled
# by distance so that when we do need it we still pick the closest airfield.
# The units of distance are meters so there's no risk of these
TransitConnection.Airlift: a.position.distance_to_point(b.position),
}[self.METHOD_NAME(a, b)]
def has_path_between(
self,
origin: ControlPoint,
destination: ControlPoint,
seen: Optional[set[ControlPoint]] = None,
) -> bool:
if seen is None:
seen = set()
seen.add(origin)
for connection in self.connections_from(origin):
if connection in seen:
continue
if connection == destination:
return True
if self.has_path_between(connection, destination, seen):
return True
return False
def shortest_path_between(
self, origin: ControlPoint, destination: ControlPoint
) -> list[ControlPoint]:
return self.shortest_path_with_cost(origin, destination)[0]
def shortest_path_with_cost(
self, origin: ControlPoint, destination: ControlPoint
) -> Tuple[List[ControlPoint], float]:
if origin not in self.nodes:
raise ValueError(f"{origin} is not in the transit network.")
if destination not in self.nodes:
raise ValueError(f"{destination} is not in the transit network.")
frontier = Frontier()
frontier.push(origin, 0)
came_from: Dict[ControlPoint, Optional[ControlPoint]] = {origin: None}
best_known: Dict[ControlPoint, float] = defaultdict(lambda: math.inf)
best_known[origin] = 0.0
while (node := frontier.pop()) is not None:
cost = node.cost
current = node.point
if cost > best_known[current]:
continue
for neighbor in self.connections_from(current):
new_cost = cost + self.cost(node.point, neighbor)
if new_cost < best_known[neighbor]:
best_known[neighbor] = new_cost
frontier.push(neighbor, new_cost)
came_from[neighbor] = current
# Reconstruct and reverse the path.
current = destination
path: List[ControlPoint] = []
while current != origin:
path.append(current)
previous = came_from.get(current)
if previous is None:
raise NoPathError(origin, destination)
current = previous
path.reverse()
return path, best_known[destination]
class TransitNetworkBuilder:
def __init__(self, theater: ConflictTheater, for_player: bool) -> None:
self.control_points = list(theater.control_points_for(for_player))
self.network = TransitNetwork()
self.airports: Set[ControlPoint] = {
cp
for cp in self.control_points
if cp.is_friendly(for_player) and cp.runway_is_operational()
}
def build(self) -> TransitNetwork:
seen = set()
for control_point in self.control_points:
if control_point not in seen:
seen.add(control_point)
self.add_transit_links(control_point)
return self.network
def add_transit_links(self, control_point: ControlPoint) -> None:
# Prefer road connections.
for road_connection in control_point.connected_points:
if road_connection.is_friendly_to(control_point):
self.network.link_road(control_point, road_connection)
# Use sea connections if there's no road or rail connection.
for sea_connection in control_point.shipping_lanes:
if self.network.has_link(control_point, sea_connection):
continue
if sea_connection.is_friendly_to(control_point):
self.network.link_shipping(control_point, sea_connection)
# And use airports as a last resort.
if control_point in self.airports:
for airport in self.airports:
if control_point == airport:
continue
if self.network.has_link(control_point, airport):
continue
if not airport.is_friendly_to(control_point):
continue
self.network.link_airport(control_point, airport) | null |
5,949 | #!/usr/bin/env python3
#Copyright (C) 2011 by Glenn Hickey
#
#Released under the MIT license, see LICENSE.txt
"""
"""
import unittest
import os
import sys
from sonLib.bioio import TestStatus
from sonLib.bioio import getTempDirectory
from sonLib.bioio import logger
from sonLib.bioio import system
from cactus.progressive.multiCactusTree import MultiCactusTree
from sonLib.nxnewick import NXNewick
class TestCase(unittest.TestCase):
def setUp(self):
unittest.TestCase.setUp(self)
self.mcTree1 = None
self.mcTree1a = None
self.mcTree2 = None
self.__generateTrees()
@TestStatus.shortLength
def testSanity(self):
parser = NXNewick()
mcTree1 = MultiCactusTree(parser.parseString(self.tree1, addImpliedRoots = False))
tree1String = NXNewick().writeString(mcTree1)
self.assertEqual(tree1String, self.tree1)
mcTree2 = MultiCactusTree(parser.parseString(self.tree2, addImpliedRoots = False))
tree2String = NXNewick().writeString(mcTree2)
self.assertEqual(tree2String, self.tree2)
@TestStatus.shortLength
def testSubtrees(self):
roots1 = ["Anc0", "Anc1", "Anc2", "Anc3", "Anc4", "Anc5", "Anc6", "Anc7"]
roots2 = ["Anc0", "Anc1", "Anc2", "Anc3", "Anc4", "Anc5"]
subTree1_a3 = '(Anc7:0.025291,BABOON:0.044568)Anc3;'
subTree2_a5 = '(monkey:100.8593,cat:47.14069)Anc5;'
trueRoots = [roots1, roots2]
trueSubtrees = [subTree1_a3, subTree2_a5]
trees = [self.mcTree1, self.mcTree2]
ancs = ["Anc3", "Anc5"]
for tree, trueRoot, anc, trueSubtree in zip(trees, trueRoots, ancs, trueSubtrees):
roots = tree.getSubtreeRootNames()
self.assertEqual(sorted(roots), sorted(trueRoot))
subtree = tree.extractSubTree(anc)
subtree = NXNewick().writeString(subtree)
self.assertEqual(subtree, trueSubtree)
@TestStatus.shortLength
def testAddSelf(self):
trueSelf = '((((((((HUMAN:0.006969)HUMAN_self:0.006969,(CHIMP:0.009727)CHIMP_self:0.009727)Anc7:0.025291)Anc7_self:0.025291,(BABOON:0.044568)BABOON_self:0.044568)Anc3:0.11)Anc3_self:0.11,(((MOUSE:0.072818)MOUSE_self:0.072818,(RAT:0.081244)RAT_self:0.081244)Anc4:0.260342)Anc4_self:0.260342)Anc1:0.02326)Anc1_self:0.02326,(((((DOG:0.07)DOG_self:0.07,(CAT:0.07)CAT_self:0.07)Anc5:0.087381)Anc5_self:0.087381,(((PIG:0.06)PIG_self:0.06,(COW:0.06)COW_self:0.06)Anc6:0.104728)Anc6_self:0.104728)Anc2:0.04)Anc2_self:0.04)Anc0;'
tree = MultiCactusTree(self.mcTree1)
tree.nameUnlabeledInternalNodes()
tree.computeSubtreeRoots()
tree.addSelfEdges()
treeString = NXNewick().writeString(tree)
self.assertEqual(treeString, trueSelf)
@TestStatus.shortLength
def testAddOutgroup(self):
trueOg = '((((HUMAN:0.006969,CHIMP:0.009727)Anc7:0.025291,BABOON:0.044568)Anc3:0.11,(MOUSE:0.072818,RAT:0.081244)Anc4:0.260342)Anc1:0.02326,((DOG:0.07,CAT:0.07)Anc5:0.087381,(PIG:0.06,COW:0.06)Anc6:0.104728)Anc2:0.04,outgroup:1.7)Anc0;'
tree = MultiCactusTree(self.mcTree1)
tree.nameUnlabeledInternalNodes()
tree.computeSubtreeRoots()
tree.addOutgroup("outgroup", 1.7)
treeString = NXNewick().writeString(tree)
self.assertEqual(treeString, trueOg)
trueLeafOg = "(A:1.1,outgroup:1.1);"
leafTreeString = "A;"
parser = NXNewick()
leafTree = MultiCactusTree(parser.parseString(leafTreeString, addImpliedRoots = False))
leafTree.nameUnlabeledInternalNodes()
leafTree.computeSubtreeRoots()
leafTree.addOutgroup("outgroup", 2.2)
leafTreeOutString = NXNewick().writeString(leafTree)
self.assertEqual(leafTreeOutString, trueLeafOg)
@TestStatus.shortLength
def testExtractSpanningTree(self):
"""Tests whether extracting a binary spanning tree works correctly."""
prevNewick1 = NXNewick().writeString(self.mcTree1)
# Check a dead-simple spanning tree with 3 closely related leaves.
spanHCB = self.mcTree1.extractSpanningTree(["HUMAN", "CHIMP", "BABOON"])
# Check that the existing tree hasn't been modified (OK, a bit
# silly, but just in case).
self.assertEqual(NXNewick().writeString(self.mcTree1), prevNewick1)
# Check the actual spanning tree.
self.assertEqual(NXNewick().writeString(spanHCB), "((HUMAN:0.006969,CHIMP:0.009727)Anc7:0.025291,BABOON:0.044568)Anc3;")
# Now test a more complicated tree, where we should remove as
# many of the ancestors as possible (they will add extra
# losses for no reason!).
spanHCC = self.mcTree1.extractSpanningTree(["HUMAN", "CHIMP", "CAT"])
self.assertEqual(NXNewick().writeString(self.mcTree1), prevNewick1)
self.assertEqual(NXNewick().writeString(spanHCC), "((HUMAN:0.006969,CHIMP:0.009727)Anc7:0.158551,CAT:0.197381)Anc0;")
@TestStatus.shortLength
def testGetChildren(self):
self.assertEqual(self.mcTree1.getChildNames('Anc6'), ['PIG', 'COW'])
def __generateTrees(self):
self.tree1 = '((((HUMAN:0.006969,CHIMP:0.009727):0.025291,BABOON:0.044568):0.11,(MOUSE:0.072818,RAT:0.081244):0.260342):0.02326,((DOG:0.07,CAT:0.07):0.087381,(PIG:0.06,COW:0.06):0.104728):0.04);'
self.tree2 = '((raccoon:19.19959,bear:6.80041):0.846,((sea_lion:11.997,seal:12.003):7.52973,((monkey:100.8593,cat:47.14069):20.59201,weasel:18.87953):2.0946):3.87382,dog:25.46154);'
parser = NXNewick()
self.mcTree1 = MultiCactusTree(parser.parseString(self.tree1, addImpliedRoots = False))
self.mcTree2 = MultiCactusTree(parser.parseString(self.tree2, addImpliedRoots = False))
self.mcTree1.nameUnlabeledInternalNodes()
self.mcTree2.nameUnlabeledInternalNodes()
self.mcTree1.computeSubtreeRoots()
self.mcTree2.computeSubtreeRoots()
def METHOD_NAME():
unittest.METHOD_NAME()
if __name__ == '__main__':
METHOD_NAME() | null |
5,950 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.api import AnalysisDataService, WorkspaceGroup
from mantid.simpleapi import CompareWorkspaces, IndirectQuickRun, LoadNexus
import systemtesting
import unittest
def exists_in_ads(workspace_name):
return AnalysisDataService.doesExist(workspace_name)
def get_ads_workspace(workspace_name):
return AnalysisDataService.retrieve(workspace_name) if exists_in_ads(workspace_name) else None
class IndirectQuickRunTest(unittest.TestCase):
def setUp(self):
self._run_numbers = "92762-92766"
self._instrument = "OSIRIS"
self._analyser = "graphite"
self._reflection = "002"
self._spectra_range = "963,980"
self._elastic_range = "-0.02,0.02"
self._inelastic_range = "0.4,0.5"
self._total_range = "-0.5,0.5"
def tearDown(self):
AnalysisDataService.clear()
def test_that_IndirectQuickRun_produces_output_workspaces_with_the_correct_names(self):
self._execute_IndirectQuickRun()
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_eisf"))
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_q"))
def test_that_IndirectQuickRun_performs_an_energy_window_scan_and_produces_a_workspace_with_the_correct_size(self):
self._execute_IndirectQuickRun()
scan_group = get_ads_workspace("osiris92762_to_osiris92766_scan_q")
self.assertTrue(isinstance(scan_group, WorkspaceGroup))
self.assertEqual(scan_group.getNumberOfEntries(), 12)
def test_that_IndirectQuickRun_produces_the_correct_eisf_workspace(self):
self._execute_IndirectQuickRun()
self._assert_equal_to_reference_file("osiris92762_to_osiris92766_scan_eisf")
def test_that_IndirectQuickRun_produces_the_correct_workspaces_when_doing_an_MSDFit(self):
self._execute_IndirectQuickRun(msd_fit=True)
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_msd"))
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_msd_Parameters"))
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_msd_fit"))
msd_fit_group = get_ads_workspace("osiris92762_to_osiris92766_scan_msd_fit")
self.assertTrue(isinstance(msd_fit_group, WorkspaceGroup))
self.assertEqual(msd_fit_group.getNumberOfEntries(), 5)
self._assert_equal_to_reference_file("osiris92762_to_osiris92766_scan_msd")
def METHOD_NAME(self):
self._execute_IndirectQuickRun(width_fit=True)
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_red_Width1"))
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_red_Diffusion"))
self.assertTrue(exists_in_ads("osiris92762_to_osiris92766_scan_red_Width_Fit"))
self._assert_equal_to_reference_file("osiris92762_to_osiris92766_scan_red_Width1", "osiris92762_to_osiris92766_scan_red_Width")
self._assert_equal_to_reference_file("osiris92762_to_osiris92766_scan_red_Diffusion")
width_fit_group = get_ads_workspace("osiris92762_to_osiris92766_scan_red_Width_Fit")
self.assertTrue(isinstance(width_fit_group, WorkspaceGroup))
self.assertEqual(width_fit_group.getNumberOfEntries(), 12)
def _execute_IndirectQuickRun(self, msd_fit=False, width_fit=False):
IndirectQuickRun(
InputFiles=self._run_numbers,
Instrument=self._instrument,
Analyser=self._analyser,
Reflection=self._reflection,
SpectraRange=self._spectra_range,
ElasticRange=self._elastic_range,
InelasticRange=self._inelastic_range,
TotalRange=self._total_range,
MSDFit=msd_fit,
WidthFit=width_fit,
)
def _assert_equal_to_reference_file(self, output_name, reference_name=None):
reference_name = output_name if reference_name is None else reference_name
expected_workspace = LoadNexus(Filename="IndirectQuickRun_" + reference_name + ".nxs")
self.assertTrue(
CompareWorkspaces(
Workspace1=get_ads_workspace(output_name), Workspace2=expected_workspace, Tolerance=5.0, ToleranceRelErr=True
)[0]
)
class IndirectQuickRunTestRunner(systemtesting.MantidSystemTest):
def __init__(self):
systemtesting.MantidSystemTest.__init__(self)
self._success = False
def runTest(self):
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(IndirectQuickRunTest, "test"))
runner = unittest.TextTestRunner()
res = runner.run(suite)
if res.wasSuccessful():
self._success = True
def requiredMemoryMB(self):
return 2000
def validate(self):
return self._success | null |
5,951 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
import numpy as np
import numpy.testing as npt
import testhelpers
from mantid.geometry import UnitCell
from mantid.simpleapi import CreatePeaksWorkspace, CreateSimulationWorkspace
import fractional_indexing as indexing
class FractionIndexingTests(unittest.TestCase):
def setUp(self):
# Need to set the random seed because the scipy kmeans algorithm
# randomly initilizes the starting centroids. This can lead to a
# different but equivilent indexing.
np.random.seed(10)
def test_find_bases_with_1d_modulation(self):
qs = np.array(
[
[0, 0, 0.13],
]
)
ndim, bases = indexing.find_bases(qs, tolerance=0.02)
self.assertEqual(ndim, 1)
npt.assert_array_equal(bases[0], qs[0])
def test_find_bases_with_2d_modulation(self):
qs = np.array(
[
[0, 0, 0.1],
[0, 0.1, 0],
]
)
ndim, bases = indexing.find_bases(qs, tolerance=0.02)
self.assertEqual(ndim, 2)
npt.assert_array_equal(bases[0], qs[0])
npt.assert_array_equal(bases[1], qs[1])
def test_find_bases_with_3d_modulation(self):
qs = np.array(
[
[0, 0, 0.1],
[0, 0.1, 0],
[0.1, 0, 0],
]
)
ndim, bases = indexing.find_bases(qs, tolerance=0.02)
self.assertEqual(ndim, 3)
npt.assert_array_equal(bases[0], qs[0])
npt.assert_array_equal(bases[1], qs[1])
npt.assert_array_equal(bases[2], qs[2])
def test_find_bases_with_4d_modulation(self):
qs = np.array(
[
[0, 0, 0.1],
[0, 0.1, 0],
[0.1, 0, 0],
[0.15, 0, 0],
]
)
ndim, bases = indexing.find_bases(qs, tolerance=0.02)
self.assertEqual(ndim, 4)
npt.assert_array_equal(bases[0], qs[0])
npt.assert_array_equal(bases[1], qs[1])
npt.assert_array_equal(bases[2], qs[2])
def test_find_bases_with_2d_modulation_with_linear_combination(self):
qs = np.array(
[
[0, 0.1, 0.1],
[0, 0, 0.1],
[0, 0.1, 0],
[0, 0, 0.15],
[0, 0.1, 0.2],
]
)
ndim, bases = indexing.find_bases(qs, tolerance=0.02)
self.assertEqual(ndim, 3)
npt.assert_array_equal(bases[0], np.array([0, 0, 0.1]))
npt.assert_array_equal(bases[1], np.array([0, 0.1, 0]))
npt.assert_array_equal(bases[2], np.array([0, 0, 0.15]))
def test_find_bases_with_non_orthogonal_cell(self):
cell = UnitCell(1, 1, 1, 90, 95, 103)
cart_cell = cell.getB()
qs = np.array(
[
[0.5, 0, 0],
[0, 0.5, 0],
[0, 0, 0.5],
[0, 0, 0.25],
]
)
qs = np.dot(qs, cart_cell)
ndim, bases = indexing.find_bases(qs, 1e-5)
self.assertEqual(ndim, 3, "Number of dimensions must be 3")
expected_bases = np.array([[0.0, 0.0, 0.25], [0.0, 0.5, 0.0], [0.51521732, 0.11589868, 0.04490415]])
npt.assert_almost_equal(bases, expected_bases, err_msg="Basis vectors do not match")
def test_find_indexing_with_non_orthogonal_cell(self):
cell = UnitCell(1, 1, 1, 90, 95, 103)
cart_cell = cell.getB()
qs = np.array(
[
[0.5, 0, 0],
[0, 0.5, 0],
[0, 0, 0.5],
[0, 0, 0.25],
]
)
qs = np.dot(qs, cart_cell)
indices = indexing.index_q_vectors(qs)
expected_indexing = np.array([[0, 0, 1], [0, 1, 0], [2, 0, 0], [1, 0, 0]])
npt.assert_equal(indices, expected_indexing, err_msg="Indexing does not match expected.")
def test_index_with_modulation(self):
qs = np.array(
[
[0, 0.1, 0.1],
[0, -0.1, 0],
[0, 0, 0.1],
[0, 0, -0.1],
[0, 0.1, 0.2],
[0, 0, 0.45],
]
)
expected_indexing = np.array([[-1, 1, 0], [1, 0, 0], [0, 1, 0], [0, -1, 0], [-1, 2, 0], [0, 0, 1]])
actual_indexing = indexing.index_q_vectors(qs, tolerance=0.03)
npt.assert_array_equal(actual_indexing, expected_indexing)
def METHOD_NAME(self):
vecs = np.array(
[
[0, 0, 3],
[0, 2, 0],
[1, 0, 0],
[0, 5, 5],
[0, 4, 4],
]
)
expected_output = np.array([3, 2, 1, 7.071068, 5.656854])
norms = indexing.norm_along_axis(vecs)
npt.assert_allclose(norms, expected_output)
def test_sort_vectors_by_norm(self):
vecs = np.array(
[
[0, 0, 3],
[0, 2, 0],
[1, 0, 0],
[0, 5, 5],
[0, 4, 4],
]
)
expected_output = np.array(
[
[1, 0, 0],
[0, 2, 0],
[0, 0, 3],
[0, 4, 4],
[0, 5, 5],
]
)
vecs_sorted = indexing.sort_vectors_by_norm(vecs)
npt.assert_allclose(vecs_sorted, expected_output)
def test_trunc_decimals(self):
reference = np.array([0, 0, 0.1])
test_input = reference + np.random.random(3) * 0.1
result = indexing.trunc_decimals(test_input, 1)
npt.assert_array_equal(result, reference)
def test_remove_noninteger(self):
test_input = np.array([1, 2, 3, 1.1, 3.4, -10, -1.8])
reference = np.array([1, 2, 3, 0, 0, -10, 0])
result = indexing.remove_noninteger(test_input)
npt.assert_array_equal(result, reference)
def test_get_hkls(self):
ws = CreateSimulationWorkspace("IRIS", BinParams="1,5,10")
peaks = CreatePeaksWorkspace(ws, 2)
reference = np.array(
[
[1, 1, 2],
[2, 1, 4],
]
)
peak = peaks.getPeak(0)
peak.setHKL(1, 1, 2)
peak = peaks.getPeak(1)
peak.setHKL(2, 1, 4)
hkl = indexing.get_hkls(peaks)
npt.assert_array_equal(hkl, reference)
def test_cluster_qs_with_fixed_k(self):
qs = np.array(
[
[0, 0.1, 0.1],
[0, 0.1, 0.1],
[0, 0.0, 0.1],
[0, 0.0, 0.1],
[0, 0.1, 0.1],
]
)
qs += np.random.random(qs.shape) * 0.01
k = 2
clusters, k = indexing.cluster_qs(qs, k)
self.assertEqual(k, 2)
npt.assert_array_equal(clusters, np.array([0, 0, 1, 1, 0]))
def test_cluster_qs_with_auto_k(self):
qs = np.array(
[
[0, 0.1, 0.1],
[0, 0.1, 0.1],
[0, 0.0, 0.1],
[0, 0.0, 0.1],
[0, 0.1, 0.1],
]
)
qs += np.random.random(qs.shape) * 0.01
clusters, k = indexing.cluster_qs(qs, threshold=0.01)
self.assertEqual(k, 2)
npt.assert_array_equal(clusters, np.array([2, 2, 1, 1, 2]))
if __name__ == "__main__":
unittest.main() | null |
5,952 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A subclass of unittest.TestCase which checks for reference leaks.
To use:
- Use testing_refleak.BaseTestCase instead of unittest.TestCase
- Configure and compile Python with --with-pydebug
If sys.gettotalrefcount() is not available (because Python was built without
the Py_DEBUG option), then this module is a no-op and tests will run normally.
"""
import gc
import sys
try:
import copy_reg as copyreg #PY26
except ImportError:
import copyreg
try:
import unittest2 as unittest #PY26
except ImportError:
import unittest
class LocalTestResult(unittest.TestResult):
"""A TestResult which forwards events to a parent object, except for Skips."""
def __init__(self, parent_result):
unittest.TestResult.__init__(self)
self.parent_result = parent_result
def addError(self, test, error):
self.parent_result.addError(test, error)
def addFailure(self, test, error):
self.parent_result.addFailure(test, error)
def addSkip(self, test, reason):
pass
class ReferenceLeakCheckerTestCase(unittest.TestCase):
"""A TestCase which runs tests multiple times, collecting reference counts."""
NB_RUNS = 3
def run(self, result=None):
# python_message.py registers all Message classes to some pickle global
# registry, which makes the classes immortal.
# We save a copy of this registry, and reset it before we could references.
self._saved_pickle_registry = copyreg.dispatch_table.copy()
# Run the test twice, to warm up the instance attributes.
super(ReferenceLeakCheckerTestCase, self).run(result=result)
super(ReferenceLeakCheckerTestCase, self).run(result=result)
oldrefcount = 0
local_result = LocalTestResult(result)
refcount_deltas = []
for _ in range(self.NB_RUNS):
oldrefcount = self._getRefcounts()
super(ReferenceLeakCheckerTestCase, self).run(result=local_result)
newrefcount = self._getRefcounts()
refcount_deltas.append(newrefcount - oldrefcount)
print(refcount_deltas, self)
try:
self.assertEqual(refcount_deltas, [0] * self.NB_RUNS)
except Exception: # pylint: disable=broad-except
result.addError(self, sys.exc_info())
def _getRefcounts(self):
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(self._saved_pickle_registry)
# It is sometimes necessary to gc.collect() multiple times, to ensure
# that all objects can be collected.
gc.collect()
gc.collect()
gc.collect()
return sys.gettotalrefcount()
if hasattr(sys, 'gettotalrefcount'):
BaseTestCase = ReferenceLeakCheckerTestCase
SkipReferenceLeakChecker = unittest.skip
else:
# When PyDEBUG is not enabled, run the tests normally.
BaseTestCase = unittest.TestCase
def SkipReferenceLeakChecker(reason):
del reason # Don't skip, so don't need a reason.
def METHOD_NAME(func):
return func
return METHOD_NAME | null |
5,953 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The parser for hwts log file."""
import os
import struct
from mindspore.profiler.common.util import fwrite_format, get_file_join_name
from mindspore import log as logger
from mindspore.profiler.common.validator.validate_path import \
validate_and_normalize_path
class HWTSLogParser:
"""
The Parser for hwts log files.
Args:
input_path (str): The profiling job path. Such as: '/var/log/npu/profiling/JOBAIFGJEJFEDCBAEADIFJAAAAAAAAAA".
output_filename (str): The output data path and name. Such as: './output_format_data_hwts_0.txt'.
"""
GRAPH_MODE_MAX_TASKID = 65000
_source_file_target_old = 'hwts.log.data.45.dev.profiler_default_tag'
_source_file_target = 'hwts.data.'
_dst_file_title = 'title:45 HWTS data'
_dst_file_column_title = 'Type cnt Core_ID Block_ID Task_ID Cycle_counter Stream_ID'
def __init__(self, input_path, output_filename, dynamic_status):
self._input_path = input_path
self._output_filename = output_filename
self._source_flie_name = self._get_source_file()
self._dynamic_status = dynamic_status
def METHOD_NAME(self):
"""
Execute the parser, get result data, and write it to the output file.
Returns:
bool, whether succeed to analyse hwts log.
"""
content_format = ['QIIIIIIIIIIII', 'QIIQIIIIIIII', 'IIIIQIIIIIIII']
log_type = ['Start of task', 'End of task', 'Start of block', 'End of block', 'Block PMU']
result_data = ""
flip_times = 0
last_task_stream_map = {}
task_id_threshold = 65536
self._source_flie_name = validate_and_normalize_path(self._source_flie_name)
with open(self._source_flie_name, 'rb') as hwts_data:
while True:
line = hwts_data.read(64)
if not line:
break
if not line.strip():
continue
if len(line) < 64:
logger.error("Length of hwts data is less than 64, it is %s", len(line))
continue
byte_first_four = struct.unpack('BBHHH', line[0:8])
byte_first = bin(byte_first_four[0]).replace('0b', '').zfill(8)
ms_type, is_warn_res0_ov = byte_first[-3:], byte_first[4]
cnt, core_id = int(byte_first[0:4], 2), byte_first_four[1]
blk_id, task_id = byte_first_four[3], int(byte_first_four[4])
if ms_type in ['000', '001', '010']: # log type 0,1,2
result = struct.unpack(content_format[0], line[8:])
syscnt = result[0]
stream_id = result[1]
elif ms_type == '011': # log type 3
result = struct.unpack(content_format[1], line[8:])
syscnt = result[0]
stream_id = result[1]
elif ms_type == '100': # log type 4
result = struct.unpack(content_format[2], line[8:])
stream_id = result[2]
syscnt = None
if is_warn_res0_ov == '0':
syscnt = result[4]
else:
logger.info("Profiling: invalid hwts log record type %s", ms_type)
continue
if HWTSLogParser.GRAPH_MODE_MAX_TASKID < last_task_stream_map.get(stream_id, task_id) \
and task_id < last_task_stream_map.get(stream_id, task_id):
flip_times += 1
task_id_str = ("%s_%s" % (str(stream_id), str(task_id + flip_times * task_id_threshold)))
result_data += ("%-14s %-4s %-8s %-9s %-8s %-15s %s\n" % (log_type[int(ms_type, 2)], cnt, core_id,
blk_id, task_id_str, syscnt, stream_id))
last_task_stream_map[stream_id] = task_id
fwrite_format(self._output_filename, data_source=self._dst_file_title, is_start=True)
fwrite_format(self._output_filename, data_source=self._dst_file_column_title)
fwrite_format(self._output_filename, data_source=result_data)
return True
def _get_source_file(self):
"""Get hwts log file name, which was created by ada service."""
file_name = get_file_join_name(self._input_path, self._source_file_target)
if not file_name:
file_name = get_file_join_name(self._input_path, self._source_file_target_old)
if not file_name:
data_path = os.path.join(self._input_path, "data")
file_name = get_file_join_name(data_path, self._source_file_target)
if not file_name:
file_name = get_file_join_name(data_path, self._source_file_target_old)
if not file_name:
msg = "Fail to find hwts log file, under profiling directory"
raise RuntimeError(msg)
return file_name | null |
5,954 | """Specialized text support classes for segment displays."""
import abc
from collections import namedtuple
from typing import Optional, List, Union
from mpf.core.rgb_color import RGBColor
DisplayCharacter = namedtuple("DisplayCharacter", ["char_code", "dot", "comma", "color"])
DOT_CODE = ord(".")
COMMA_CODE = ord(",")
SPACE_CODE = ord(" ")
class SegmentDisplayText(metaclass=abc.ABCMeta):
"""A list of characters with specialized functions for segment displays. Use for display text effects."""
__slots__ = ["_text", "embed_dots", "embed_commas", "use_dots_for_commas"]
def __init__(self, char_list, embed_dots, embed_commas, use_dots_for_commas):
"""Initialize segment display text."""
self.embed_dots = embed_dots
self.embed_commas = embed_commas
self.use_dots_for_commas = use_dots_for_commas
self._text = char_list
# pylint: disable=too-many-arguments
@classmethod
def METHOD_NAME(cls, text: str, display_size: int, collapse_dots: bool, collapse_commas: bool,
use_dots_for_commas: bool, colors: List[RGBColor]) -> "ColoredSegmentDisplayText":
"""Create colored text."""
return ColoredSegmentDisplayText(
cls._create_characters(text, display_size, collapse_dots, collapse_commas, use_dots_for_commas, colors[:]),
collapse_dots, collapse_commas, use_dots_for_commas)
# pylint: disable=too-many-arguments
@classmethod
def from_str(cls, text: str, display_size: int, collapse_dots: bool, collapse_commas: bool,
use_dots_for_commas: bool, colors: Optional[List[RGBColor]] = None) -> \
Union["ColoredSegmentDisplayText", "UncoloredSegmentDisplayText"]:
"""Create from string."""
if colors:
return cls.METHOD_NAME(text, display_size, collapse_dots, collapse_commas,
use_dots_for_commas, colors[:])
char_colors = [None] * len(text)
return UncoloredSegmentDisplayText(
cls._create_characters(text, display_size, collapse_dots, collapse_commas, use_dots_for_commas,
char_colors), collapse_dots, collapse_commas, use_dots_for_commas)
@classmethod
def _embed_dots_and_commas(cls, text: str, collapse_dots: bool, collapse_commas: bool, use_dots_for_commas: bool):
"""Return text with embedded dots and commas."""
char_has_dot = False
char_has_comma = False
char_list = []
for char in reversed(text):
char_code = ord(char)
if collapse_dots and char_code == DOT_CODE or use_dots_for_commas and char_code == COMMA_CODE:
char_has_dot = True
continue
if collapse_commas and char_code == COMMA_CODE:
char_has_comma = True
continue
char_list.insert(0, (char_code, char_has_dot, char_has_comma))
char_has_dot = False
char_has_comma = False
return char_list
# pylint: disable-msg=too-many-locals
@classmethod
def _create_characters(cls, text: str, display_size: int, collapse_dots: bool, collapse_commas: bool,
use_dots_for_commas: bool, colors: List[Optional[RGBColor]]) -> List[DisplayCharacter]:
"""Create characters from text and color them.
- Colors are used from the left to the right (starting with the first character).
- If colors are shorter than text the last color is repeated for text.
- The first color is used to pad the text to the left if text is shorter than the display - thus text is right
aligned.
- Dots and commas are embedded on the fly.
"""
char_list = []
left_pad_color = colors[0] if colors else None
default_right_color = colors[len(colors) - 1] if colors else None
uncolored_chars = cls._embed_dots_and_commas(text, collapse_dots, collapse_commas, use_dots_for_commas)
colors = colors[-len(uncolored_chars):]
for char_code, char_has_dot, char_has_comma in uncolored_chars:
color = colors.pop(0) if colors else default_right_color
char_list.append(DisplayCharacter(char_code, char_has_dot, char_has_comma, color))
# ensure list is the same size as the segment display (cut off on left or right justify characters)
current_length = len(char_list)
if current_length > display_size:
for _ in range(current_length - display_size):
char_list.pop(0)
elif current_length < display_size:
for _ in range(display_size - current_length):
char_list.insert(0, DisplayCharacter(SPACE_CODE, False, False, left_pad_color))
return char_list
def blank_segments(self, flash_mask) -> "SegmentDisplayText":
"""Return new SegmentDisplayText with chars blanked."""
return ColoredSegmentDisplayText(
[char if mask != "F" else DisplayCharacter(SPACE_CODE, False, False, char.color)
for char, mask in zip(self._text, flash_mask)],
self.embed_dots, self.embed_commas, self.use_dots_for_commas
)
def convert_to_str(self):
"""Convert back to normal text string."""
text = ""
for display_character in self:
text += chr(display_character.char_code)
if display_character.dot:
text += "."
if display_character.comma:
text += ","
return text
def __len__(self):
"""Return length."""
return self._text.__len__()
def __getitem__(self, item):
"""Return item or slice."""
if isinstance(item, slice):
return self.__class__(self._text.__getitem__(item), self.embed_dots, self.embed_commas,
self.use_dots_for_commas)
return self._text.__getitem__(item)
def __eq__(self, other):
"""Return true if two texts and colors are the same."""
# pylint: disable-msg=protected-access
return isinstance(other, SegmentDisplayText) and self._text == other._text
def extend(self, other_list):
"""Extend list."""
# pylint: disable-msg=protected-access
self._text.extend(other_list._text)
@abc.abstractmethod
def get_colors(self) -> Optional[List[RGBColor]]:
"""Get the list of colors for each character (if set)."""
raise NotImplementedError()
def __repr__(self):
"""Return string representation."""
return "<{} {}>".format(self.__class__, self._text)
class UncoloredSegmentDisplayText(SegmentDisplayText):
"""Segment text without colors."""
def get_colors(self) -> None:
"""Return None as we are transparent."""
return None
class ColoredSegmentDisplayText(SegmentDisplayText):
"""Segment text with colors."""
def get_colors(self) -> List[RGBColor]:
"""Get the list of colors for each character (if set)."""
return [display_character.color for display_character in self] | null |
5,955 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetPtrRecordResult',
'AwaitableGetPtrRecordResult',
'get_ptr_record',
'get_ptr_record_output',
]
@pulumi.output_type
class GetPtrRecordResult:
"""
A collection of values returned by getPtrRecord.
"""
def __init__(__self__, fqdn=None, id=None, name=None, records=None, resource_group_name=None, tags=None, ttl=None, zone_name=None):
if fqdn and not isinstance(fqdn, str):
raise TypeError("Expected argument 'fqdn' to be a str")
pulumi.set(__self__, "fqdn", fqdn)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if records and not isinstance(records, list):
raise TypeError("Expected argument 'records' to be a list")
pulumi.set(__self__, "records", records)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if ttl and not isinstance(ttl, int):
raise TypeError("Expected argument 'ttl' to be a int")
pulumi.set(__self__, "ttl", ttl)
if zone_name and not isinstance(zone_name, str):
raise TypeError("Expected argument 'zone_name' to be a str")
pulumi.set(__self__, "zone_name", zone_name)
@property
@pulumi.getter
def fqdn(self) -> str:
"""
The FQDN of the DNS PTR Record.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def records(self) -> Sequence[str]:
"""
List of Fully Qualified Domain Names.
"""
return pulumi.get(self, "records")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
A mapping of tags assigned to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def ttl(self) -> int:
"""
The Time To Live (TTL) of the DNS record in seconds.
"""
return pulumi.get(self, "ttl")
@property
@pulumi.getter(name="zoneName")
def zone_name(self) -> str:
return pulumi.get(self, "zone_name")
class AwaitableGetPtrRecordResult(GetPtrRecordResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPtrRecordResult(
fqdn=self.fqdn,
id=self.id,
name=self.name,
records=self.records,
resource_group_name=self.resource_group_name,
tags=self.tags,
ttl=self.ttl,
zone_name=self.zone_name)
def METHOD_NAME(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
zone_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPtrRecordResult:
"""
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.dns.PtrRecord("example",
zone_name="test-zone",
resource_group_name="test-rg")
pulumi.export("dnsPtrRecordId", example.id)
```
:param str name: The name of the DNS PTR Record.
:param str resource_group_name: Specifies the resource group where the DNS Zone (parent resource) exists.
:param str zone_name: Specifies the DNS Zone where the resource exists.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
__args__['zoneName'] = zone_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:dns/getPtrRecord:getPtrRecord', __args__, opts=opts, typ=GetPtrRecordResult).value
return AwaitableGetPtrRecordResult(
fqdn=pulumi.get(__ret__, 'fqdn'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
records=pulumi.get(__ret__, 'records'),
resource_group_name=pulumi.get(__ret__, 'resource_group_name'),
tags=pulumi.get(__ret__, 'tags'),
ttl=pulumi.get(__ret__, 'ttl'),
zone_name=pulumi.get(__ret__, 'zone_name'))
@_utilities.lift_output_func(METHOD_NAME)
def get_ptr_record_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
zone_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPtrRecordResult]:
"""
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.dns.PtrRecord("example",
zone_name="test-zone",
resource_group_name="test-rg")
pulumi.export("dnsPtrRecordId", example.id)
```
:param str name: The name of the DNS PTR Record.
:param str resource_group_name: Specifies the resource group where the DNS Zone (parent resource) exists.
:param str zone_name: Specifies the DNS Zone where the resource exists.
"""
... | null |
5,956 | #!/usr/bin/env python3
from __future__ import annotations
import json
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
headers = {"UserAgent": UserAgent().random}
def extract_user_profile(script) -> dict:
"""
May raise json.decoder.JSONDecodeError
"""
data = script.contents[0]
info = json.loads(data[data.find('{"config"') : -1])
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class InstagramUser:
"""
Class Instagram crawl instagram user information
Usage: (doctest failing on GitHub Actions)
# >>> instagram_user = InstagramUser("github")
# >>> instagram_user.is_verified
True
# >>> instagram_user.biography
'Built for developers.'
"""
def __init__(self, username):
self.url = f"https://www.instagram.com/{username}/"
self.user_data = self.get_json()
def get_json(self) -> dict:
"""
Return a dict of user information
"""
html = requests.get(self.url, headers=headers).text
scripts = BeautifulSoup(html, "html.parser").find_all("script")
try:
return extract_user_profile(scripts[4])
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3])
def __repr__(self) -> str:
return f"{self.__class__.__name__}('{self.username}')"
def __str__(self) -> str:
return f"{self.fullname} ({self.username}) is {self.biography}"
@property
def username(self) -> str:
return self.user_data["username"]
@property
def fullname(self) -> str:
return self.user_data["full_name"]
@property
def biography(self) -> str:
return self.user_data["biography"]
@property
def email(self) -> str:
return self.user_data["business_email"]
@property
def website(self) -> str:
return self.user_data["external_url"]
@property
def number_of_followers(self) -> int:
return self.user_data["edge_followed_by"]["count"]
@property
def METHOD_NAME(self) -> int:
return self.user_data["edge_follow"]["count"]
@property
def number_of_posts(self) -> int:
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def profile_picture_url(self) -> str:
return self.user_data["profile_pic_url_hd"]
@property
def is_verified(self) -> bool:
return self.user_data["is_verified"]
@property
def is_private(self) -> bool:
return self.user_data["is_private"]
def test_instagram_user(username: str = "github") -> None:
"""
A self running doctest
>>> test_instagram_user()
"""
import os
if os.environ.get("CI"):
return # test failing on GitHub Actions
instagram_user = InstagramUser(username)
assert instagram_user.user_data
assert isinstance(instagram_user.user_data, dict)
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.METHOD_NAME > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("https://instagram.")
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
instagram_user = InstagramUser("github")
print(instagram_user)
print(f"{instagram_user.number_of_posts = }")
print(f"{instagram_user.number_of_followers = }")
print(f"{instagram_user.METHOD_NAME = }")
print(f"{instagram_user.email = }")
print(f"{instagram_user.website = }")
print(f"{instagram_user.profile_picture_url = }")
print(f"{instagram_user.is_verified = }")
print(f"{instagram_user.is_private = }") | null |
5,957 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2021 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from mantidqt.utils.qt.testing import start_qapplication
from mantidqtinterfaces.Muon.GUI.Common.fitting_widgets.general_fitting.general_fitting_view import (
GeneralFittingView,
SIMULTANEOUS_FIT_LABEL,
SINGLE_FIT_LABEL,
)
@start_qapplication
class GeneralFittingViewTest(unittest.TestCase):
def setUp(self):
self.view = GeneralFittingView()
def tearDown(self):
self.assertTrue(self.view.close())
def test_that_the_view_can_be_initialized_without_an_error(self):
self.view = GeneralFittingView()
def test_that_update_dataset_name_combo_box_will_set_the_names_in_the_dataset_name_combobox(self):
dataset_names = ["Name1", "Name2", "Name3"]
self.view.update_dataset_name_combo_box(dataset_names)
data = [
self.view.workspace_selector.dataset_name_combo_box.itemText(i)
for i in range(self.view.workspace_selector.dataset_name_combo_box.count())
]
self.assertTrue(data, dataset_names)
def test_that_update_dataset_name_combo_box_will_select_the_previously_selected_item_if_it_still_exists(self):
selected_dataset = "Name3"
dataset_names = ["Name1", "Name2", selected_dataset]
self.view.update_dataset_name_combo_box(dataset_names)
self.view.workspace_selector.dataset_name_combo_box.setCurrentIndex(2)
new_dataset_names = ["Name4", selected_dataset, "Name5"]
self.view.update_dataset_name_combo_box(new_dataset_names)
self.assertTrue(self.view.current_dataset_name, selected_dataset)
def test_that_the_current_dataset_name_can_be_set_as_expected(self):
selected_dataset = "Name2"
dataset_names = ["Name1", selected_dataset, "Name3"]
self.view.update_dataset_name_combo_box(dataset_names)
self.assertEqual(self.view.current_dataset_name, "Name1")
self.view.current_dataset_name = selected_dataset
self.assertEqual(self.view.current_dataset_name, selected_dataset)
def test_that_the_current_dataset_name_will_not_change_the_selected_dataset_if_the_provided_dataset_does_not_exist(self):
selected_dataset = "Name3"
dataset_names = ["Name1", "Name2", selected_dataset]
self.view.update_dataset_name_combo_box(dataset_names)
self.view.current_dataset_name = selected_dataset
self.assertEqual(self.view.current_dataset_name, selected_dataset)
self.view.current_dataset_name = "Does not exist"
self.assertEqual(self.view.current_dataset_name, selected_dataset)
def test_that_number_of_datasets_will_return_the_expected_number_of_datasets(self):
dataset_names = ["Name1", "Name2", "Name3"]
self.view.update_dataset_name_combo_box(dataset_names)
self.assertEqual(self.view.number_of_datasets(), len(dataset_names))
def test_that_the_simultaneous_fit_by_can_be_set_as_expected(self):
self.assertEqual(self.view.simultaneous_fit_by, "Run")
self.view.simultaneous_fit_by = "Group/Pair"
self.assertEqual(self.view.simultaneous_fit_by, "Group/Pair")
def test_that_current_dataset_index_will_return_the_expected_dataset_index(self):
dataset_names = ["Name1", "Name2", "Name3"]
self.view.update_dataset_name_combo_box(dataset_names)
self.view.current_dataset_name = "Name2"
self.assertEqual(self.view.current_dataset_index, 1)
def test_that_current_dataset_index_will_return_none_when_there_is_nothing_selected_in_the_combobox(self):
self.assertEqual(self.view.current_dataset_index, None)
def test_that_switch_to_simultaneous_will_change_the_relevant_label(self):
self.view.switch_to_simultaneous()
self.assertEqual(self.view.workspace_selector.data_combo_box_label.text(), SIMULTANEOUS_FIT_LABEL)
def METHOD_NAME(self):
self.view.switch_to_simultaneous()
self.view.switch_to_single()
self.assertEqual(self.view.workspace_selector.data_combo_box_label.text(), SINGLE_FIT_LABEL)
def test_that_setup_fit_by_specifier_will_add_fit_specifiers_to_the_relevant_checkbox(self):
fit_specifiers = ["long", "fwd", "bwd"]
self.view.setup_fit_by_specifier(fit_specifiers)
data = [
self.view.general_fitting_options.simul_fit_by_specifier.itemText(i)
for i in range(self.view.general_fitting_options.simul_fit_by_specifier.count())
]
self.assertTrue(data, fit_specifiers)
if __name__ == "__main__":
unittest.main() | null |
5,958 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import unittest
from unittest import mock
from mantidqt.utils.qt.testing import start_qapplication
from qtpy.QtWidgets import QWidget
from mantidqtinterfaces.Muon.GUI.Common.grouping_tab_widget.grouping_tab_widget_model import GroupingTabModel
from mantidqtinterfaces.Muon.GUI.Common.muon_group import MuonGroup
from mantidqtinterfaces.Muon.GUI.Common.muon_pair import MuonPair
from mantidqtinterfaces.Muon.GUI.Common.pairing_table_widget.pairing_table_widget_presenter import PairingTablePresenter
from mantidqtinterfaces.Muon.GUI.Common.pairing_table_widget.pairing_table_widget_view import PairingTableView
from mantidqtinterfaces.Muon.GUI.Common.test_helpers.context_setup import setup_context_for_tests
def pair_name():
name = []
for i in range(21):
name.append("pair_" + str(i + 1))
return name
@start_qapplication
class AlphaTest(unittest.TestCase):
def setUp(self):
# Store an empty widget to parent all the views, and ensure they are deleted correctly
self.obj = QWidget()
setup_context_for_tests(self)
self.model = GroupingTabModel(context=self.context)
self.view = PairingTableView(parent=self.obj)
self.presenter = PairingTablePresenter(self.view, self.model)
self.add_three_groups_to_model()
self.view.warning_popup = mock.Mock()
self.view.enter_pair_name = mock.Mock(side_effect=pair_name())
def tearDown(self):
self.obj = None
def assert_model_empty(self):
self.assertEqual(len(self.model.pair_names), 0)
self.assertEqual(len(self.model.pairs), 0)
def assert_view_empty(self):
self.assertEqual(self.view.num_rows(), 0)
def add_three_groups_to_model(self):
group1 = MuonGroup(group_name="my_group_0", detector_ids=[1])
group2 = MuonGroup(group_name="my_group_1", detector_ids=[2])
group3 = MuonGroup(group_name="my_group_2", detector_ids=[3])
self.group_context.add_group(group1)
self.group_context.add_group(group2)
self.group_context.add_group(group3)
def add_two_pairs_to_table(self):
pair1 = MuonPair(pair_name="my_pair_0", forward_group_name="my_group_0", backward_group_name="my_group_1", alpha=1.0)
pair2 = MuonPair(pair_name="my_pair_1", forward_group_name="my_group_1", backward_group_name="my_group_2", alpha=1.0)
self.presenter.add_pair(pair1)
self.presenter.add_pair(pair2)
def get_group_1_selector(self, row):
return self.view.pairing_table.cellWidget(row, 1)
def METHOD_NAME(self, row):
return self.view.pairing_table.cellWidget(row, 2)
# ------------------------------------------------------------------------------------------------------------------
# TESTS : test the functionality around alpha.
# ------------------------------------------------------------------------------------------------------------------
def test_that_alpha_defaults_to_1(self):
self.presenter.handle_add_pair_button_clicked()
self.assertEqual(self.view.get_table_item_text(0, 4), "1.0")
def test_that_table_reverts_to_previous_value_when_adding_values_which_arent_numbers_to_alpha_column(self):
self.presenter.handle_add_pair_button_clicked()
non_numeric_alphas = ["", "a", "long", "!", "_", "1+2"]
default_value = self.view.get_table_item_text(0, 4)
for invalid_alpha in non_numeric_alphas:
self.view.pairing_table.setCurrentCell(0, 4)
self.view.pairing_table.item(0, 4).setText(invalid_alpha)
self.assertEqual(self.view.get_table_item_text(0, 4), default_value)
def test_that_warning_displayed_when_adding_invalid_alpha_values(self):
self.presenter.handle_add_pair_button_clicked()
non_numeric_alphas = ["", "a", "long", "!", "_", "1+2"]
call_count = 0
for invalid_alpha in non_numeric_alphas:
call_count += 1
self.view.pairing_table.setCurrentCell(0, 4)
self.view.pairing_table.item(0, 4).setText(invalid_alpha)
self.assertEqual(self.view.warning_popup.call_count, call_count)
def test_that_alpha_values_stored_to_correct_decimal_places(self):
self.presenter.handle_add_pair_button_clicked()
self.view.pairing_table.setCurrentCell(0, 4)
# test that rounds correctly
self.view.pairing_table.item(0, 4).setText("1.1234567890")
self.assertEqual(self.view.get_table_item_text(0, 4), "1.123457")
def test_that_alpha_values_stored_to_correct_decimal_places_when_rounding_down(self):
self.presenter.handle_add_pair_button_clicked()
self.view.pairing_table.setCurrentCell(0, 4)
# test that rounds correctly
self.view.pairing_table.item(0, 4).setText("1.12345617890")
self.assertEqual(self.view.get_table_item_text(0, 4), "1.123456")
def test_that_valid_alpha_values_are_added_correctly(self):
self.presenter.handle_add_pair_button_clicked()
valid_inputs = ["1.0", "12", ".123", "0.0000011", "0.05e-6"]
expected_output = [1.0, 12.0, 0.123, 1e-6, 1e-6]
for valid_alpha, expected_alpha in iter(zip(valid_inputs, expected_output)):
self.view.pairing_table.setCurrentCell(0, 4)
self.view.pairing_table.item(0, 4).setText(valid_alpha)
# make presenter update
self.presenter.handle_data_change(0, 4)
self.assertEqual(float(self.view.get_table_item_text(0, 4)), expected_alpha)
def test_that_negative_alpha_is_not_allowed(self):
self.presenter.handle_add_pair_button_clicked()
self.view.pairing_table.setCurrentCell(0, 4)
default_value = self.view.get_table_item_text(0, 4)
self.view.pairing_table.item(0, 4).setText("-1.0")
self.assertEqual(self.view.get_table_item_text(0, 4), default_value)
self.assertEqual(self.view.warning_popup.call_count, 1)
def test_that_clicking_guess_alpha_triggers_correct_slot_with_correct_row_supplied(self):
# Guess alpha functionality must be implemented by parent widgets. So we just check that the
# design for implementing this works (via an Observable in the presenter)
self.presenter.handle_add_pair_button_clicked()
self.presenter.handle_add_pair_button_clicked()
self.presenter.guessAlphaNotifier.notify_subscribers = mock.Mock()
self.view.pairing_table.cellWidget(1, 5).clicked.emit(True)
self.assertEqual(self.presenter.guessAlphaNotifier.notify_subscribers.call_count, 1)
self.assertEqual(
self.presenter.guessAlphaNotifier.notify_subscribers.call_args_list[0][0][0], ["pair_2", "my_group_0", "my_group_1"]
)
if __name__ == "__main__":
unittest.main(buffer=False, verbosity=2) | null |
5,959 | import os
import sys
# Add parent path to use local src as package for tests
root_dir = os.path.abspath(
os.path.join(
os.path.dirname(__file__), os.path.pardir, os.path.pardir, os.path.pardir
)
)
sys.path.append(root_dir)
import asyncio
from multiprocessing import Process
import pytest
import uvicorn
from fastapi import Depends, FastAPI, Header, HTTPException
from flaky import flaky
from opal_common.fetcher import FetchingEngine
from opal_common.fetcher.providers.http_fetch_provider import HttpFetcherConfig
# Configurable
PORT = int(os.environ.get("PORT") or "9110")
BASE_URL = f"http://localhost:{PORT}"
DATA_ROUTE = f"/data"
AUTHORIZED_DATA_ROUTE = f"/data_authz"
SECRET_TOKEN = "fake-super-secret-token"
DATA_KEY = "Hello"
DATA_VALUE = "World"
DATA_SECRET_VALUE = "SecretWorld"
async def check_token_header(x_token: str = Header(None)):
if x_token != SECRET_TOKEN:
raise HTTPException(status_code=400, detail="X-Token header invalid")
return None
def setup_server():
app = FastAPI()
@app.get(DATA_ROUTE)
def get_data():
return {DATA_KEY: DATA_VALUE}
@app.get(AUTHORIZED_DATA_ROUTE)
def METHOD_NAME(token=Depends(check_token_header)):
return {DATA_KEY: DATA_SECRET_VALUE}
uvicorn.run(app, port=PORT)
@pytest.fixture(scope="module")
def server():
# Run the server as a separate process
proc = Process(target=setup_server, args=(), daemon=True)
proc.start()
yield proc
proc.kill() # Cleanup after test
@pytest.mark.asyncio
async def test_simple_http_get(server):
"""Simple http get."""
got_data_event = asyncio.Event()
async with FetchingEngine() as engine:
async def callback(data):
assert data[DATA_KEY] == DATA_VALUE
got_data_event.set()
await engine.queue_url(f"{BASE_URL}{DATA_ROUTE}", callback)
await asyncio.wait_for(got_data_event.wait(), 5)
assert got_data_event.is_set()
@pytest.mark.asyncio
async def test_simple_http_get_with_wait(server):
"""
Simple http get - with 'queue_url_and_wait'
"""
async with FetchingEngine() as engine:
data = await engine.handle_url(f"{BASE_URL}{DATA_ROUTE}")
assert data[DATA_KEY] == DATA_VALUE
@pytest.mark.asyncio
async def test_authorized_http_get(server):
"""Test getting data from a server route with an auth token."""
got_data_event = asyncio.Event()
async with FetchingEngine() as engine:
async def callback(data):
assert data[DATA_KEY] == DATA_SECRET_VALUE
got_data_event.set()
# fetch with bearer token authorization
await engine.queue_url(
f"{BASE_URL}{AUTHORIZED_DATA_ROUTE}",
callback,
HttpFetcherConfig(headers={"X-TOKEN": SECRET_TOKEN}),
)
await asyncio.wait_for(got_data_event.wait(), 5)
assert got_data_event.is_set()
@pytest.mark.asyncio
async def test_authorized_http_get_from_dict(server):
"""Just like test_authorized_http_get, but we also check that the
FetcherConfig is adapted from "the wire" (as a dict instead of the explicit
HttpFetcherConfig)"""
got_data_event = asyncio.Event()
async with FetchingEngine() as engine:
async def callback(data):
assert data[DATA_KEY] == DATA_SECRET_VALUE
got_data_event.set()
# raw config to be parsed
config = {"headers": {"X-TOKEN": SECRET_TOKEN}}
# fetch with bearer token authorization
await engine.queue_url(f"{BASE_URL}{AUTHORIZED_DATA_ROUTE}", callback, config)
await asyncio.wait_for(got_data_event.wait(), 5)
assert got_data_event.is_set()
@flaky
@pytest.mark.asyncio
async def test_external_http_get():
"""Test simple http get on external (https://freegeoip.app/) site Checking
we get a JSON with the data we expected (the IP we queried)"""
got_data_event = asyncio.Event()
async with FetchingEngine() as engine:
url = "https://httpbin.org/anything"
async def callback(data):
assert data["url"] == url
got_data_event.set()
await engine.queue_url(url, callback)
await asyncio.wait_for(got_data_event.wait(), 5)
assert got_data_event.is_set() | null |
5,960 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=invalid-name
from PyQt4 import QtGui, QtCore
from functools import partial
from mantidqtinterfaces.reduction_gui.widgets.base_widget import BaseWidget
from reduction_gui.reduction.inelastic.dgs_pd_sc_conversion_script import PdAndScConversionScript
import ui.inelastic.ui_dgs_pd_sc_conversion
import mantidqtinterfaces.reduction_gui.widgets.util as util
class PdAndScConversionWidget(BaseWidget):
"""
Widget that presents powder and single crystal data conversion options
to the user.
"""
## Widget name
name = "Powder and SC"
def __init__(self, parent=None, state=None, settings=None, data_type=None):
super(PdAndScConversionWidget, self).__init__(parent, state, settings, data_type=data_type)
class PdAndScConversionFrame(QtGui.QFrame, ui.inelastic.ui_dgs_pd_sc_conversion.Ui_PdScConversionFrame):
def __init__(self, parent=None):
QtGui.QFrame.__init__(self, parent)
self.setupUi(self)
self._content = PdAndScConversionFrame(self)
self._layout.addWidget(self._content)
self._instrument_name = settings.instrument_name
self.initialize_content()
if state is not None:
self.set_state(state)
else:
self.set_state(PdAndScConversionScript(self._instrument_name))
def initialize_content(self):
# Constraints
self._content.q_low_edit.setValidator(QtGui.QDoubleValidator(self._content.q_low_edit))
self._content.q_width_edit.setValidator(QtGui.QDoubleValidator(self._content.q_width_edit))
self._content.q_high_edit.setValidator(QtGui.QDoubleValidator(self._content.q_high_edit))
# Default states
self._save_powder_nxs_state(self._content.save_procnexus_cb.isChecked())
# Connections
self.connect(self._content.save_procnexus_save, QtCore.SIGNAL("clicked()"), self._save_powder_nxs_save)
self.connect(self._content.save_procnexus_cb, QtCore.SIGNAL("toggled(bool)"), self._save_powder_nxs_state)
# Validate widgets
self._connect_validated_lineedit(self._content.q_low_edit)
self._connect_validated_lineedit(self._content.q_width_edit)
self._connect_validated_lineedit(self._content.q_high_edit)
def METHOD_NAME(self, lineedit, content):
lineedit.setText(content)
util.set_valid(lineedit, not lineedit.text() == "")
def _connect_validated_lineedit(self, ui_ctrl):
call_back = partial(self._validate_edit, ctrl=ui_ctrl)
self.connect(ui_ctrl, QtCore.SIGNAL("editingFinished()"), call_back)
self.connect(ui_ctrl, QtCore.SIGNAL("textEdited(QString)"), call_back)
self.connect(ui_ctrl, QtCore.SIGNAL("textChanged(QString)"), call_back)
def _validate_edit(self, ctrl=None):
is_valid = True
if not ctrl.text():
is_valid = False
util.set_valid(ctrl, is_valid)
def _save_powder_nxs_state(self, state):
self._content.save_procnexus_edit.setEnabled(state)
self._content.save_procnexus_save.setEnabled(state)
def _save_powder_nxs_save(self):
fname = self.data_save_dialog("*.nxs")
if fname:
self._content.save_procnexus_edit.setText(fname)
def set_state(self, state):
"""
Populate the UI elements with the data from the given state.
@param state: PdAndScConversionScript object
"""
self._content.powder_gb.setChecked(state.do_pd_convert)
self.METHOD_NAME(self._content.q_low_edit, state.pd_q_range_low)
self.METHOD_NAME(self._content.q_width_edit, state.pd_q_range_width)
self.METHOD_NAME(self._content.q_high_edit, state.pd_q_range_high)
self._content.save_procnexus_cb.setChecked(state.save_powder_nxs)
self._content.save_procnexus_edit.setText(state.save_powder_nxs_file)
def get_state(self):
"""
Returns an object with the state of the interface
"""
p = PdAndScConversionScript(self._instrument_name)
p.do_pd_convert = self._content.powder_gb.isChecked()
p.pd_q_range_low = self._content.q_low_edit.text()
p.pd_q_range_width = self._content.q_width_edit.text()
p.pd_q_range_high = self._content.q_high_edit.text()
p.save_powder_nxs = self._content.save_procnexus_cb.isChecked()
p.save_powder_nxs_file = self._content.save_procnexus_edit.text()
return p | null |
5,961 | import os
import mock
import pytest
import libensemble.tests.unit_tests.setup as setup
from libensemble.alloc_funcs.give_sim_work_first import give_sim_work_first
from libensemble.comms.logs import LogConfig
from libensemble.libE import libE
from libensemble.manager import LoggedException
from libensemble.resources.resources import Resources
from libensemble.tests.regression_tests.common import mpi_comm_excl
class MPIAbortException(Exception):
"""Raised when mock mpi abort is called"""
class MPISendException(Exception):
"""Raised when mock mpi abort is called"""
class Fake_MPI:
"""Explicit mocking of MPI communicator"""
def Get_size(self):
return 2
def Get_rank(self):
return 0
def Barrier(self):
return 0
def Dup(self):
return self
def Free(self):
return
def isend(self, msg, dest, tag):
raise MPISendException()
def Abort(self, flag):
assert flag == 1, "Aborting without exit code of 1"
raise MPIAbortException()
class Fake_MPI_1P(Fake_MPI):
def Get_size(self):
return 1
fake_mpi = Fake_MPI()
fake_mpi_1p = Fake_MPI_1P()
alloc_specs = {"alloc_f": give_sim_work_first}
hfile_abort = "libE_history_at_abort_0.npy"
pfile_abort = "libE_persis_info_at_abort_0.pickle"
# Run by pytest at end of module
def METHOD_NAME(module):
try:
print(f"teardown_module module:{module.__name__}")
except AttributeError:
print(f"teardown_module (direct run) module:{module}")
if Resources.resources is not None:
del Resources.resources
Resources.resources = None
# Run by pytest before each function
def setup_function(function):
print(f"setup_function function:{function.__name__}")
if Resources.resources is not None:
del Resources.resources
Resources.resources = None
def remove_file_if_exists(filename):
try:
os.remove(filename)
except OSError:
pass
def test_manager_exception():
"""Checking dump of history and pickle file on abort"""
sim_specs, gen_specs, exit_criteria = setup.make_criteria_and_specs_0()
remove_file_if_exists(hfile_abort)
remove_file_if_exists(pfile_abort)
with mock.patch("libensemble.manager.manager_main") as managerMock:
managerMock.side_effect = Exception
# Collision between libE.py and libE() (after mods to __init__.py) means
# libensemble.libE.comms_abort tries to refer to the function, not file
with mock.patch("libensemble.libE.comms_abort") as abortMock:
abortMock.side_effect = Exception
# Need fake MPI to get past the Manager only check and dump history
with pytest.raises(Exception):
libE_specs = {"mpi_comm": fake_mpi, "disable_resource_manager": True}
libE(sim_specs, gen_specs, exit_criteria, libE_specs=libE_specs)
pytest.fail("Expected exception")
assert os.path.isfile(hfile_abort), "History file not dumped"
assert os.path.isfile(pfile_abort), "Pickle file not dumped"
os.remove(hfile_abort)
os.remove(pfile_abort)
# Test that History and Pickle files NOT created when disabled
with pytest.raises(Exception):
libE_specs = {"mpi_comm": fake_mpi, "save_H_and_persis_on_abort": False}
libE(sim_specs, gen_specs, exit_criteria, libE_specs=libE_specs)
pytest.fail("Expected exception")
assert not os.path.isfile(hfile_abort), "History file dumped"
assert not os.path.isfile(pfile_abort), "Pickle file dumped"
# Note - this could be combined now with above tests as fake_MPI prevents need for use of mock module
# Only way that is better is that this will simply hit first code exception - (when fake_MPI tries to isend)
# While first test triggers on call to manager
def test_exception_raising_manager_with_abort():
"""Running until fake_MPI tries to send msg to test (mocked) comm.Abort is called
Manager should raise MPISendException when fakeMPI tries to send message, which
will be caught by libE and raise MPIAbortException from fakeMPI.Abort"""
with pytest.raises(MPIAbortException):
sim_specs, gen_specs, exit_criteria = setup.make_criteria_and_specs_0()
libE_specs = {"mpi_comm": fake_mpi, "disable_resource_manager": True}
libE(sim_specs, gen_specs, exit_criteria, libE_specs=libE_specs)
pytest.fail("Expected MPIAbortException exception")
def test_exception_raising_manager_no_abort():
"""Running until fake_MPI tries to send msg to test (mocked) comm.Abort is called
Manager should raise MPISendException when fakeMPI tries to send message, which
will be caught by libE and raise MPIAbortException from fakeMPI.Abort"""
libE_specs = {"abort_on_exception": False, "mpi_comm": fake_mpi, "disable_resource_manager": True}
with pytest.raises(LoggedException):
sim_specs, gen_specs, exit_criteria = setup.make_criteria_and_specs_0()
libE(sim_specs, gen_specs, exit_criteria, libE_specs=libE_specs)
pytest.fail("Expected MPISendException exception")
# So it's a key error rather than assertion error as does not test if "in" is
# missing, only that it's a list - needs updating in future.
def test_exception_raising_check_inputs():
"""Intentionally running without sim_specs["in"] to test exception raising (Fails)"""
libE_specs = {"mpi_comm": fake_mpi, "disable_resource_manager": True}
with pytest.raises(Exception):
H, _, _ = libE({"out": [("f", float)]}, {"out": [("x", float)]}, {"sim_max": 1}, libE_specs=libE_specs)
pytest.fail("Expected ValidationError exception")
def test_proc_not_in_communicator():
"""Checking proc not in communicator returns exit status of 3"""
libE_specs = {}
libE_specs["mpi_comm"], mpi_comm_null = mpi_comm_excl()
H, _, flag = libE(
{"in": ["x"], "out": [("f", float)]}, {"out": [("x", float)]}, {"sim_max": 1}, libE_specs=libE_specs
)
assert flag == 3, "libE return flag should be 3. Returned: " + str(flag)
# def test_exception_raising_worker():
# # Intentionally running without sim_specs["in"] to test exception raising (Fails)
# H, _, _ = libE({"out": [("f", float)]}, {"out": [("x", float)]},
# {"sim_max": 1}, libE_specs={"mpi_comm": MPI.COMM_WORLD})
# assert H==[]
def rmfield(a, *fieldnames_to_remove):
return a[[name for name in a.dtype.names if name not in fieldnames_to_remove]]
@pytest.mark.extra
def test_logging_disabling():
remove_file_if_exists("ensemble.log")
remove_file_if_exists("libE_stats.txt")
sim_specs, gen_specs, exit_criteria = setup.make_criteria_and_specs_0()
libE_specs = {"mpi_comm": fake_mpi, "comms": "mpi", "disable_log_files": True}
logconfig = LogConfig.config
logconfig.logger_set = False
with mock.patch("libensemble.manager.manager_main") as managerMock:
managerMock.side_effect = Exception
with mock.patch("libensemble.libE.comms_abort") as abortMock:
abortMock.side_effect = Exception
with pytest.raises(Exception):
libE(sim_specs, gen_specs, exit_criteria, libE_specs=libE_specs)
pytest.fail("Expected exception")
assert not os.path.isfile("ensemble.log"), "ensemble.log file dumped"
assert not os.path.isfile("libE_stats.txt"), "libE_stats.txt file dumped"
if __name__ == "__main__":
test_manager_exception()
test_exception_raising_manager_with_abort()
test_exception_raising_manager_no_abort()
test_exception_raising_check_inputs()
test_proc_not_in_communicator()
test_logging_disabling() | null |
5,962 | #!/usr/bin/env python
"""
[{"service": "foo", "level": "2345", "state": "on"},
{"service": "foo", "level": "016", "state": "off"},
{"service": "bar", "state": "on"},
...]
"""
import os
import sys
import json
import pwd
import re
from subprocess import *
sys.path.append(os.path.dirname(__file__))
from comp import *
class CompRc(object):
def __init__(self, prefix='OSVC_COMP_RC_'):
self.prefix = prefix.upper()
self.sysname, self.nodename, x, x, self.machine = os.uname()
self.services = []
for k in [key for key in os.environ if key.startswith(self.prefix)]:
try:
l = json.loads(os.environ[k])
for i, d in enumerate(l):
for key, val in d.items():
d[key] = self.METHOD_NAME(val)
l[i] = d
self.services += l
except ValueError:
perror('failed to concatenate', os.environ[k], 'to service list')
self.validate_svcs()
if len(self.services) == 0:
raise NotApplicable()
if self.sysname not in ['Linux', 'HP-UX']:
perror(__file__, 'module not supported on', self.sysname)
raise NotApplicable()
vendor = os.environ.get('OSVC_COMP_NODES_OS_VENDOR', 'unknown')
release = os.environ.get('OSVC_COMP_NODES_OS_RELEASE', 'unknown')
if vendor in ['CentOS', 'Redhat', 'Red Hat', 'SuSE'] or \
(vendor == 'Oracle' and self.sysname == 'Linux'):
import chkconfig
self.o = chkconfig.Chkconfig()
elif vendor in ['Ubuntu', 'Debian', 'HP']:
import sysvinit
self.o = sysvinit.SysVInit()
else:
perror(vendor, "not supported")
raise NotApplicable()
def METHOD_NAME(self, v):
if type(v) == list:
l = []
for _v in v:
l.append(self.METHOD_NAME(_v))
return l
if type(v) != str and type(v) != unicode:
return v
p = re.compile('%%ENV:\w+%%')
for m in p.findall(v):
s = m.strip("%").replace('ENV:', '')
if s in os.environ:
_v = os.environ[s]
elif 'OSVC_COMP_'+s in os.environ:
_v = os.environ['OSVC_COMP_'+s]
else:
perror(s, 'is not an env variable')
raise NotApplicable()
v = v.replace(m, _v)
return v
def validate_svcs(self):
l = []
for i, svc in enumerate(self.services):
if self.validate_svc(svc) == RET_OK:
l.append(svc)
self.svcs = l
def validate_svc(self, svc):
if 'service' not in svc:
perror(svc, ' rule is malformed ... service key not present')
return RET_ERR
if 'state' not in svc:
perror(svc, ' rule is malformed ... state key not present')
return RET_ERR
return RET_OK
def check_svc(self, svc, verbose=True):
if 'seq' in svc:
seq = svc['seq']
else:
seq = None
return self.o.check_state(svc['service'], svc['level'], svc['state'], seq=seq, verbose=verbose)
def fix_svc(self, svc, verbose=True):
if 'seq' in svc:
seq = svc['seq']
else:
seq = None
if self.check_svc(svc, verbose=False) == RET_OK:
return RET_OK
return self.o.fix_state(svc['service'], svc['level'], svc['state'], seq=seq)
def check(self):
r = 0
for svc in self.services:
r |= self.check_svc(svc)
return r
def fix(self):
r = 0
for svc in self.services:
r |= self.fix_svc(svc)
return r
if __name__ == "__main__":
syntax = """syntax:
%s PREFIX check|fixable|fix"""%sys.argv[0]
if len(sys.argv) != 3:
perror("wrong number of arguments")
perror(syntax)
sys.exit(RET_ERR)
try:
o = CompRc(sys.argv[1])
if sys.argv[2] == 'check':
RET = o.check()
elif sys.argv[2] == 'fix':
RET = o.fix()
elif sys.argv[2] == 'fixable':
RET = o.fixable()
else:
perror("unsupported argument '%s'"%sys.argv[2])
perror(syntax)
RET = RET_ERR
except NotApplicable:
sys.exit(RET_NA)
except:
import traceback
traceback.print_exc()
sys.exit(RET_ERR)
sys.exit(RET)
| null |
5,963 | import unittest
import os
import numpy as np
import skrf as rf
class CitiTestCase(unittest.TestCase):
"""
Test the IO of CITI files.
"""
def METHOD_NAME(self):
"""
Sets up the test directory
"""
self.test_dir = os.path.dirname(os.path.abspath(__file__))+'/MDIF_CITI_MDL/'
# constructor from filename
self.oneport_example1 = rf.Citi(self.test_dir + 'test_1p_citi.cti')
self.oneport_example2 = rf.Citi(self.test_dir + 'test_1p_citi_2_ri.cti')
self.twoport_example1 = rf.Citi(self.test_dir + 'test_2p_citi.cti')
self.twoport_example2 = rf.Citi(self.test_dir + 'test_2p_citi_2.cti')
self.twoport_example3 = rf.Citi(self.test_dir + 'test_2p_citi_2params.cti')
self.twoport_example4 = rf.Citi(self.test_dir + 'test_2p_citi_2params_db.cti')
self.twoport_example5 = rf.Citi(self.test_dir + 'test_2p_citi_3_ri.cti')
self.fourport_example1 = rf.Citi(self.test_dir + 'test_4p_citi.cti')
self.examples = [self.oneport_example1, self.oneport_example2,
self.twoport_example1, self.twoport_example2,
self.twoport_example3, self.twoport_example4,
self.twoport_example5, self.fourport_example1]
# constructor from file-object
file = open(self.test_dir + 'test_1p_citi.cti')
self.oneport_example1_from_fo = rf.Citi(file)
def test_to_networks(self):
""" Test if CITI data are correctly converted into Networks """
for ex in self.examples:
for ntwk in ex.networks:
self.assertIsInstance(ntwk, rf.Network)
def test_to_networkset(self):
""" Test if CITI data are correctly converted into NetworkSet """
for example in self.examples:
self.assertIsInstance(example.to_networkset(), rf.NetworkSet)
def test_params(self):
""" Test if the params are correctly parsed from the CITI files """
self.assertEqual(self.oneport_example1.params, ['Cm'])
self.assertEqual(self.twoport_example1.params, ['Cm'])
self.assertEqual(self.fourport_example1.params, ['Cm'])
def test_only_freq_in_var(self):
""" File without any VAR except for freq should return non empty NetworkSet. """
file = self.test_dir + 'test_2p_only_freq_VAR.cti'
cti = rf.Citi(file)
ns = cti.to_networkset()
self.assertTrue(ns) # not empty
self.assertEqual(len(ns), 1)
def test_values_1p_1(self):
""" Test if the values are correctly parsed from the CITI files """
deg = np.array([
-0.0178919999, -0.0180179999, -0.0181439998, -0.0182699998,
-0.0183959998, -0.0185219998, -0.0186479998, -0.0187739998,
-0.0188999998, -0.0204479998, -0.0205919998, -0.0207359998,
-0.0208799998, -0.0210239998, -0.0211679998, -0.0213119998,
-0.0214559997, -0.0215999997, -0.0230039997, -0.0231659997,
-0.0233279997, -0.0234899997, -0.0236519997, -0.0238139997,
-0.0239759997, -0.0241379996, -0.0242999996, -0.0255599996,
-0.0257399996, -0.0259199996, -0.0260999995, -0.0262799995,
-0.0264599995, -0.0266399995, -0.0268199995, -0.0269999995])
f = np.array([
710000000, 715000000, 720000000, 725000000,
730000000, 735000000, 740000000, 745000000,
750000000,
])
mag = np.ones(len(deg))
s1p_1 = rf.magdeg_2_reim(mag, deg)
ns_1p_1 = self.oneport_example1.to_networkset()
self.assertEqual(len(ns_1p_1), 4)
self.assertEqual(len(ns_1p_1[0]), 9)
np.testing.assert_array_equal(ns_1p_1[0].f, f)
np.testing.assert_array_almost_equal(ns_1p_1[0].s.squeeze(), s1p_1[:9])
def test_values_1p_2(self):
""" Test if the values are correctly parsed from the CITI files """
s1p_2 = np.array([
-1.31189E-3 - 1.47980E-3 * 1j,
-3.67867E-3 - 0.67782E-3 * 1j,
-3.43990E-3 + 0.58746E-3 * 1j,
-2.70664E-4 - 9.76175E-4 * 1j,
+0.65892E-4 - 9.61571E-4 * 1j])
ns_1p_2 = self.oneport_example2.to_networkset()
self.assertEqual(len(ns_1p_2), 1)
self.assertEqual(len(ns_1p_2[0]), len(s1p_2))
np.testing.assert_array_equal(ns_1p_2[0].f, np.array([1., 2., 3., 4., 5.]))
np.testing.assert_array_almost_equal(ns_1p_2[0].s.squeeze(), s1p_2)
def test_values_2p_1(self):
""" Test if the values are correctly parsed from the CITI files """
mag = np.array([[[0.999999951, 0.000312274295], [0.000312274295, 0.999999951]]])
deg = np.array([[[-0.0178919994, 89.982108], [89.982108, -0.0178919994]]])
s_2p_1 = rf.magdeg_2_reim(mag, deg)
f = np.array([
710000000, 715000000, 720000000, 725000000,
730000000, 735000000, 740000000, 745000000,
750000000,
])
ns_2p_1 = self.twoport_example1.to_networkset()
self.assertEqual(len(ns_2p_1), 4)
self.assertEqual(len(ns_2p_1[0]), 9)
np.testing.assert_array_equal(ns_2p_1[0].f, f)
np.testing.assert_array_almost_equal(ns_2p_1[0].s_mag[0], mag.squeeze())
np.testing.assert_array_almost_equal(ns_2p_1[0].s_deg[0], deg.squeeze())
np.testing.assert_array_almost_equal(ns_2p_1[0].s[0], s_2p_1[0])
def test_values_2p_2(self):
""" Test if the values are correctly parsed from the CITI files """
mag = np.array([
[[0.1, 0.3],
[0.5, 0.7]],
[[0.2, 0.4],
[0.6, 0.8]]
])
deg = np.array([
[[2, 4],
[6, 8]],
[[3, 5],
[7, 9]]
])
s2p_2 = rf.magdeg_2_reim(mag, deg)
ns_2p_2 = self.twoport_example2.to_networkset()
self.assertEqual(len(ns_2p_2), 1)
self.assertEqual(len(ns_2p_2[0]), 2)
np.testing.assert_array_equal(ns_2p_2[0].f, np.array([1e9, 2e9]))
np.testing.assert_array_almost_equal(ns_2p_2[0].s.squeeze(), s2p_2)
def test_values_4p(self):
""" Test if the values are correctly parsed from the CITI files """
ns_4p = self.fourport_example1.to_networkset()
self.assertEqual(len(ns_4p), 3)
self.assertEqual(len(ns_4p[0]), 51)
self.assertEqual(ns_4p.coords['Cm'], [9e-16, 8e-16, 7e-16])
suite = unittest.TestLoader().loadTestsFromTestCase(CitiTestCase)
unittest.TextTestRunner(verbosity=2).run(suite) | null |
5,964 | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore.ops import functional as F
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class Net(nn.Cell):
def __init__(self, reduction="none"):
super(Net, self).__init__()
self.bce = P.BinaryCrossEntropy(reduction)
def construct(self, x, y, weight=None):
return self.bce(x, y, weight)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_binary_cross_entropy_loss():
np.random.seed(42)
prediction = np.random.rand(20).astype(np.float32)
target = np.random.rand(20).astype(np.float32)
weight = np.random.rand(20).astype(np.float32)
reduction = "none"
net = Net(reduction)
loss = net(Tensor(prediction), Tensor(target), Tensor(weight))
expect = [0.09555826, 1.2861121, 0.03518666, 0.6969416, 0.24313456, 0.99062896,
0.19205657, 0.5465214, 0.36964455, 0.21999404, 2.2953863, 2.2566645,
1.5803775, 1.3266402, 0.9883408, 1.2997618, 0.05439841, 0.14389999,
0.03405444, 0.23934692]
assert np.allclose(loss.asnumpy(), expect)
def test_binary_cross_entropy_loss_mean():
np.random.seed(42)
prediction = np.random.rand(20).astype(np.float32)
target = np.random.rand(20).astype(np.float32)
weight = np.random.rand(20).astype(np.float32)
reduction = "mean"
net = Net(reduction)
loss = net(Tensor(prediction), Tensor(target), Tensor(weight))
expect = [0.7447324991226196]
assert loss.asnumpy() == expect
def test_binary_cross_entropy_loss_sum():
np.random.seed(42)
prediction = np.random.rand(20).astype(np.float32)
target = np.random.rand(20).astype(np.float32)
weight = np.random.rand(20).astype(np.float32)
reduction = "sum"
net = Net(reduction)
loss = net(Tensor(prediction), Tensor(target), Tensor(weight))
expect = [14.894649505615234]
assert loss.asnumpy() == expect
def test_binary_cross_entropy_loss_sum_without_weight():
np.random.seed(42)
prediction = np.random.rand(20).astype(np.float32)
target = np.random.rand(20).astype(np.float32)
reduction = "sum"
net = Net(reduction)
loss = net(Tensor(prediction), Tensor(target))
expect = [25.48195216753522]
assert np.allclose(loss.asnumpy(), expect)
def test_binary_cross_entropy_loss_16():
np.random.seed(42)
prediction = np.random.rand(20).astype(np.float16)
target = np.random.rand(20).astype(np.float16)
weight = np.random.rand(20).astype(np.float16)
reduction = "none"
net = Net(reduction)
loss = net(Tensor(prediction), Tensor(target), Tensor(weight))
expect = [0.09552, 1.28613, 0.0351868, 0.696777, 0.243164, 0.990234,
0.192139, 0.546875, 0.370117, 0.219971, 2.29492, 2.25391,
1.58105, 1.32812, 0.987305, 1.30078, 0.0544434, 0.143921,
0.0340576, 0.239258]
assert np.allclose(loss.asnumpy(), expect)
def test_binary_cross_entropy_loss_mean_16():
np.random.seed(42)
prediction = np.random.rand(20).astype(np.float16)
target = np.random.rand(20).astype(np.float16)
weight = np.random.rand(20).astype(np.float16)
reduction = "mean"
net = Net(reduction)
loss = net(Tensor(prediction), Tensor(target), Tensor(weight))
expect = [0.74462890625]
assert loss.asnumpy() == expect
def test_binary_cross_entropy_loss_sum_16():
np.random.seed(42)
prediction = np.random.rand(20).astype(np.float16)
target = np.random.rand(20).astype(np.float16)
weight = np.random.rand(20).astype(np.float16)
reduction = "sum"
net = Net(reduction)
loss = net(Tensor(prediction), Tensor(target), Tensor(weight))
expect = [14.890625]
assert loss.asnumpy() == expect
class Grad(nn.Cell):
def __init__(self, network):
super(Grad, self).__init__()
self.grad = C.GradOperation(get_all=True, sens_param=True)
self.network = network
def construct(self, x1, x2, sens, weight=None):
gout = self.grad(self.network)(x1, x2, sens, weight)
return gout
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def METHOD_NAME():
np.random.seed(42)
prediction = np.random.rand(20).astype(np.float32)
target = np.random.rand(20).astype(np.float32)
sens = np.random.rand(20).astype(np.float32)
weight = np.random.rand(20).astype(np.float32)
reduction = "none"
grad = Grad(Net(reduction))
dx = grad(Tensor(prediction), Tensor(target), Tensor(sens), Tensor(weight))
dx1_expect = [-4.80516590e-02, 2.32625079e+00, 6.38972521e-02, 3.13642323e-01,
-1.65661633e-01, -1.71821892e+00, -1.13685496e-01, 1.26669514e+00,
1.47891801e-03, 5.83921909e-01, -2.17992840e+01, 4.21899414e+00,
2.85430793e-02, -3.21346498e+00, -2.22674108e+00, -2.80453944e+00,
-1.19787852e-04, 2.48514321e-02, -1.66696273e-02, -2.71965731e-02]
assert np.allclose(dx[0].asnumpy(), dx1_expect)
def test_binary_cross_entropy_forward_functional(nptype):
"""
Feature: test binary_cross_entropy forward for given input dtype.
Description: test inputs for given input dtype.
Expectation: the result match with expected result.
"""
logits = Tensor(np.array([0.2, 0.7, 0.1]).astype(nptype))
labels = Tensor(np.array([0., 1., 0.]).astype(nptype))
weight = Tensor(np.array([1, 2, 2]).astype(nptype))
output = F.binary_cross_entropy(logits, labels, weight)
expected = Tensor(np.array([0.38240486]).astype(nptype))
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
def test_binary_cross_entropy_forward_float32_functional():
"""
Feature: test binary_cross_entropy forward.
Description: test float32 inputs.
Expectation: the result match with expected result.
"""
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
test_binary_cross_entropy_forward_functional(np.float32)
context.set_context(mode=context.PYNATIVE_MODE, device_target="CPU")
test_binary_cross_entropy_forward_functional(np.float32) | null |
5,965 | import pytest
from kopf._cogs.structs.references import EVERYTHING, Resource, Selector
@pytest.fixture()
def resource():
return Resource(
group='group1', version='version1', preferred=True,
plural='plural1', singular='singular1', kind='kind1',
shortcuts=['shortcut1', 'shortcut2'],
categories=['category1', 'category2'],
)
@pytest.fixture()
def v1_resource():
return Resource(
group='', version='v1', preferred=True,
plural='plural1', singular='singular1', kind='kind1',
shortcuts=['shortcut1', 'shortcut2'],
categories=['category1', 'category2'],
)
@pytest.mark.parametrize('group, version', [
(None, None),
('group1', None),
(None, 'version1'),
('group1', 'version1'),
])
def test_when_matches_everything(resource, group, version):
selector = Selector(EVERYTHING, group=group, version=version)
matches = selector.check(resource)
assert matches
@pytest.mark.parametrize('kwarg, kwval', [
('kind', 'kind1'),
('plural', 'plural1'),
('singular', 'singular1'),
('shortcut', 'shortcut1'),
('shortcut', 'shortcut2'),
('category', 'category1'),
('category', 'category2'),
('any_name', 'kind1'),
('any_name', 'plural1'),
('any_name', 'singular1'),
('any_name', 'shortcut1'),
('any_name', 'shortcut2'),
])
@pytest.mark.parametrize('group, version', [
(None, None),
('group1', None),
(None, 'version1'),
('group1', 'version1'),
])
def METHOD_NAME(resource, kwarg, kwval, group, version):
selector = Selector(group=group, version=version, **{kwarg: kwval})
matches = selector.check(resource)
assert matches
@pytest.mark.parametrize('kwarg, kwval', [
('kind', 'kind1'),
('plural', 'plural1'),
('singular', 'singular1'),
('shortcut', 'shortcut1'),
('shortcut', 'shortcut2'),
('category', 'category1'),
('category', 'category2'),
('any_name', 'kind1'),
('any_name', 'plural1'),
('any_name', 'singular1'),
('any_name', 'shortcut1'),
('any_name', 'shortcut2'),
])
@pytest.mark.parametrize('group, version', [
('group9', None),
(None, 'version9'),
('group1', 'version9'),
('group9', 'version1'),
('group9', 'version9'),
])
def test_when_groupversion_mismatch_but_names_do_match(resource, kwarg, kwval, group, version):
selector = Selector(group=group, version=version, **{kwarg: kwval})
matches = selector.check(resource)
assert not matches
@pytest.mark.parametrize('kwarg, kwval', [
('kind', 'kind9'),
('plural', 'plural9'),
('singular', 'singular9'),
('shortcut', 'shortcut9'),
('category', 'category9'),
('any_name', 'category9'),
('any_name', 'category1'), # categories are not used with any_name, must be explicit.
('any_name', 'category2'), # categories are not used with any_name, must be explicit.
])
@pytest.mark.parametrize('group, version', [
(None, None),
('group1', None),
(None, 'version1'),
('group1', 'version1'),
])
def test_when_groupversion_do_match_but_names_mismatch(resource, kwarg, kwval, group, version):
selector = Selector(group=group, version=version, **{kwarg: kwval})
matches = selector.check(resource)
assert not matches
def test_catchall_versions_are_ignored_for_nonpreferred_resources():
resource = Resource(
group='group1', version='version1', preferred=False,
plural='plural1', singular='singular1', kind='kind1',
shortcuts=['shortcut1', 'shortcut2'],
categories=['category1', 'category2'],
)
selector = Selector(EVERYTHING)
matches = selector.check(resource)
assert not matches
@pytest.mark.parametrize('selector_args', [
pytest.param(['events'], id='only-name'),
pytest.param(['v1', 'events'], id='with-version'),
pytest.param(['', 'v1', 'events'], id='with-groupversion'),
])
def test_events_are_matched_when_explicitly_named(selector_args):
resource = Resource('', 'v1', 'events')
selector = Selector(*selector_args)
matches = selector.check(resource)
assert matches
@pytest.mark.parametrize('selector_args', [
pytest.param([EVERYTHING], id='only-marker'),
pytest.param(['v1', EVERYTHING], id='with-core-version'),
pytest.param(['', 'v1', EVERYTHING], id='with-core-groupversion'),
pytest.param(['events.k8s.io', EVERYTHING], id='with-k8sio-group'),
pytest.param(['events.k8s.io', 'v1beta1', EVERYTHING], id='with-k8sio-groupversion'),
])
@pytest.mark.parametrize('resource_kwargs', [
pytest.param(dict(group='', version='v1'), id='core-v1'),
pytest.param(dict(group='events.k8s.io', version='v1'), id='k8sio-v1'),
pytest.param(dict(group='events.k8s.io', version='v1beta1'), id='k8sio-v1beta1'),
])
def test_events_are_excluded_from_everything(resource_kwargs, selector_args):
resource = Resource(**resource_kwargs, plural='events')
selector = Selector(*selector_args)
matches = selector.check(resource)
assert not matches
@pytest.mark.parametrize('kwarg, kwval', [
('kind', 'kind1'),
('plural', 'plural1'),
('singular', 'singular1'),
('shortcut', 'shortcut1'),
('shortcut', 'shortcut2'),
('any_name', 'kind1'),
('any_name', 'plural1'),
('any_name', 'singular1'),
('any_name', 'shortcut1'),
('any_name', 'shortcut2'),
])
def test_selection_of_specific_resources(resource, kwarg, kwval):
selector = Selector(**{kwarg: kwval})
selected = selector.select([resource])
assert selector.is_specific # prerequisite
assert selected == {resource}
@pytest.mark.parametrize('kwarg, kwval', [
('category', 'category1'),
('category', 'category2'),
('any_name', EVERYTHING),
])
def test_selection_of_nonspecific_resources(resource, kwarg, kwval):
selector = Selector(**{kwarg: kwval})
selected = selector.select([resource])
assert not selector.is_specific # prerequisite
assert selected == {resource}
@pytest.mark.parametrize('kwarg, kwval', [
('kind', 'kind1'),
('plural', 'plural1'),
('singular', 'singular1'),
('shortcut', 'shortcut1'),
('shortcut', 'shortcut2'),
('any_name', 'kind1'),
('any_name', 'plural1'),
('any_name', 'singular1'),
('any_name', 'shortcut1'),
('any_name', 'shortcut2'),
])
def test_precedence_of_corev1_over_others_when_specific(resource, v1_resource, kwarg, kwval):
selector = Selector(**{kwarg: kwval})
selected = selector.select([resource, v1_resource])
assert selector.is_specific # prerequisite
assert selected == {v1_resource}
@pytest.mark.parametrize('kwarg, kwval', [
('category', 'category1'),
('category', 'category2'),
('any_name', EVERYTHING),
])
def test_precedence_of_corev1_same_as_others_when_nonspecific(resource, v1_resource, kwarg, kwval):
selector = Selector(**{kwarg: kwval})
selected = selector.select([resource, v1_resource])
assert not selector.is_specific # prerequisite
assert selected == {resource, v1_resource} | null |
5,966 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2022 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from mantid.simpleapi import (
logger,
AverageLogData,
CreateEmptyTableWorkspace,
GroupWorkspaces,
DeleteWorkspace,
DeleteTableRows,
RenameWorkspace,
)
from mantidqtinterfaces.Engineering.gui.engineering_diffraction.settings.settings_helper import get_setting
from mantidqtinterfaces.Engineering.gui.engineering_diffraction.tabs.common import output_settings
from mantid.api import AnalysisDataService as ADS
from os import path
from numpy import full, nan, max
def write_table_row(ws_table, row, irow):
if irow > ws_table.rowCount() - 1:
ws_table.setRowCount(irow + 1)
[ws_table.setCell(irow, icol, row[icol]) for icol in range(0, len(row))]
def _generate_workspace_name(filepath: str, suffix: str) -> str:
wsname = path.splitext(path.split(filepath)[1])[0] + suffix
return wsname
class SampleLogsGroupWorkspace(object):
def __init__(self, suffix: str):
self._log_names = []
self._log_workspaces = None # GroupWorkspace
self._log_values = dict() # {ws_name: {log_name: [avg, er]} }
self._suffix = suffix
self._run_info_name = "run_info" + self._suffix
def create_log_workspace_group(self):
# run information table
run_info = self.make_runinfo_table()
self._log_workspaces = GroupWorkspaces([run_info], OutputWorkspace="logs" + self._suffix)
# a table per logs
logs = get_setting(output_settings.INTERFACES_SETTINGS_GROUP, output_settings.ENGINEERING_PREFIX, "logs")
if logs:
self._log_names = logs.split(",")
for log in self._log_names:
log_table_ws = self.make_log_table(log)
self._log_workspaces.add(log_table_ws.name())
def make_log_table(self, log):
ws_log = CreateEmptyTableWorkspace(OutputWorkspace=log + self._suffix)
ws_log.addColumn(type="float", name="avg")
ws_log.addColumn(type="float", name="stdev")
return ws_log
def make_runinfo_table(self):
run_info = CreateEmptyTableWorkspace(OutputWorkspace=self._run_info_name)
run_info.addColumn(type="str", name="Instrument")
run_info.addColumn(type="int", name="Run")
run_info.addColumn(type="str", name="Bank")
run_info.addColumn(type="float", name="uAmps")
run_info.addColumn(type="str", name="Title")
return run_info
def update_log_workspace_group(self, data_workspaces=None):
# both ws and name needed in event a ws is renamed and ws.name() is no longer correct
if not data_workspaces:
self.delete_logs()
return
if not self._log_workspaces:
self.create_log_workspace_group()
else:
for log in self._log_names:
if not ADS.doesExist(log + self._suffix):
log_table_ws = self.make_log_table(log)
self._log_workspaces.add(log_table_ws.name())
if not ADS.doesExist(self._run_info_name):
self.make_runinfo_table()
self._log_workspaces.add(self._run_info_name)
# update log tables
self.remove_all_log_rows()
for irow, (ws_name, ws) in enumerate(data_workspaces.get_loaded_ws_dict().items()):
try:
self.add_log_to_table(ws_name, ws, irow)
except Exception as e:
logger.warning(f"Unable to output log workspaces for workspace {ws_name}: " + str(e))
def add_log_to_table(self, ws_name, ws, irow):
# both ws and name needed in event a ws is renamed and ws.name() is no longer correct
# make dict for run if doesn't exist
if ws_name not in self._log_values:
self._log_values[ws_name] = dict()
# add run info
run = ws.getRun()
row = [
ws.getInstrument().getFullName(),
ws.getRunNumber(),
str(run.getProperty("bankid").value),
run.getProtonCharge(),
ws.getTitle(),
]
write_table_row(ADS.retrieve(self._run_info_name), row, irow)
# add log data - loop over existing log workspaces not logs in settings as these might have changed
currentRunLogs = [l.name for l in run.getLogData()]
nullLogValue = full(2, nan) # default nan if can't read/average log data
if run.getProtonCharge() > 0 and "proton_charge" in currentRunLogs:
for log in self._log_names:
if log in self._log_values[ws_name]:
avg, stdev = self._log_values[ws_name][log] # already averaged
elif log in currentRunLogs:
avg, stdev = AverageLogData(ws_name, LogName=log, FixZero=False)
else:
avg, stdev = nullLogValue
self._log_values[ws_name][log] = [avg, stdev] # update model dict (even if nan)
else:
self._log_values[ws_name] = {log: nullLogValue for log in self._log_names}
logger.warning(f"{ws.name()} does not contain a proton charge log - log values cannot be averaged.")
# write log values to table (nan if log could not be averaged)
for log, avg_and_stdev in self._log_values[ws_name].items():
write_table_row(ADS.retrieve(log + self._suffix), avg_and_stdev, irow)
self.update_log_group_name()
def remove_log_rows(self, row_numbers):
DeleteTableRows(TableWorkspace=self._log_workspaces, Rows=list(row_numbers))
self.update_log_group_name()
def remove_all_log_rows(self):
for ws in self._log_workspaces:
ws.setRowCount(0)
def delete_logs(self):
if self._log_workspaces:
ws_name = self._log_workspaces.name()
self._log_workspaces = None
DeleteWorkspace(ws_name)
def update_log_group_name(self):
name = self._generate_log_group_name()
if not name:
self.delete_logs()
return
RenameWorkspace(InputWorkspace=self._log_workspaces.name(), OutputWorkspace=name)
def _generate_log_group_name(self) -> str:
run_info = ADS.retrieve(self._run_info_name)
if run_info.rowCount() > 0:
runs = run_info.column("Run")
name = f"{run_info.row(0)['Instrument']}_{min(runs)}-{max(runs)}_logs"
return name + self._suffix
return ""
def METHOD_NAME(self):
return self._log_values
def get_log_workspaces(self):
return self._log_workspaces
def update_log_value(self, new_key, old_key):
self._log_values[new_key] = self._log_values.pop(old_key)
def clear_log_workspaces(self):
self._log_workspaces = None | null |
5,967 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Uwe Hermann <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
from .lists import *
class SamplerateError(Exception):
pass
class Decoder(srd.Decoder):
api_version = 3
id = 'ir_rc5'
name = 'IR RC-5'
longname = 'IR RC-5'
desc = 'RC-5 infrared remote control protocol.'
license = 'gplv2+'
inputs = ['logic']
outputs = []
tags = ['IR']
channels = (
{'id': 'ir', 'name': 'IR', 'desc': 'IR data line', 'idn':'dec_ir_rc5_chan_ir'},
)
options = (
{'id': 'polarity', 'desc': 'Polarity', 'default': 'active-low',
'values': ('active-low', 'active-high'), 'idn':'dec_ir_rc5_opt_polarity'},
{'id': 'protocol', 'desc': 'Protocol type', 'default': 'standard',
'values': ('standard', 'extended'), 'idn':'dec_ir_rc5_opt_protocol'},
)
annotations = (
('bit', 'Bit'),
('startbit1', 'Startbit 1'),
('startbit2', 'Startbit 2'),
('togglebit-0', 'Toggle bit 0'),
('togglebit-1', 'Toggle bit 1'),
('address', 'Address'),
('command', 'Command'),
)
annotation_rows = (
('bits', 'Bits', (0,)),
('fields', 'Fields', (1, 2, 3, 4, 5, 6)),
)
def __init__(self):
self.reset()
def reset(self):
self.samplerate = None
self.samplenum = None
self.edges, self.bits, self.ss_es_bits = [], [], []
self.state = 'IDLE'
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.old_ir = 1 if self.options['polarity'] == 'active-low' else 0
def METHOD_NAME(self, key, value):
if key == srd.SRD_CONF_SAMPLERATE:
self.samplerate = value
# One bit: 1.78ms (one half low, one half high).
self.halfbit = int((self.samplerate * 0.00178) / 2.0)
def putb(self, bit1, bit2, data):
ss, es = self.ss_es_bits[bit1][0], self.ss_es_bits[bit2][1]
self.put(ss, es, self.out_ann, data)
def handle_bits(self):
a, c, b = 0, 0, self.bits
# Individual raw bits.
for i in range(14):
if i == 0:
ss = max(0, self.bits[0][0] - self.halfbit)
else:
ss = self.ss_es_bits[i - 1][1]
es = self.bits[i][0] + self.halfbit
self.ss_es_bits.append([ss, es])
self.putb(i, i, [0, ['%d' % self.bits[i][1]]])
# Bits[0:0]: Startbit 1
s = ['Startbit1: %d' % b[0][1], 'SB1: %d' % b[0][1], 'SB1', 'S1', 'S']
self.putb(0, 0, [1, s])
# Bits[1:1]: Startbit 2
ann_idx = 2
s = ['Startbit2: %d' % b[1][1], 'SB2: %d' % b[1][1], 'SB2', 'S2', 'S']
if self.options['protocol'] == 'extended':
s = ['CMD[6]#: %d' % b[1][1], 'C6#: %d' % b[1][1], 'C6#', 'C#', 'C']
ann_idx = 6
self.putb(1, 1, [ann_idx, s])
# Bits[2:2]: Toggle bit
s = ['Togglebit: %d' % b[2][1], 'Toggle: %d' % b[2][1],
'TB: %d' % b[2][1], 'TB', 'T']
self.putb(2, 2, [3 if b[2][1] == 0 else 4, s])
# Bits[3:7]: Address (MSB-first)
for i in range(5):
a |= (b[3 + i][1] << (4 - i))
x = system.get(a, ['Unknown', 'Unk'])
s = ['Address: %d (%s)' % (a, x[0]), 'Addr: %d (%s)' % (a, x[1]),
'Addr: %d' % a, 'A: %d' % a, 'A']
self.putb(3, 7, [5, s])
# Bits[8:13]: Command (MSB-first)
for i in range(6):
c |= (b[8 + i][1] << (5 - i))
if self.options['protocol'] == 'extended':
inverted_bit6 = 1 if b[1][1] == 0 else 0
c |= (inverted_bit6 << 6)
cmd_type = 'VCR' if x[1] in ('VCR1', 'VCR2') else 'TV'
x = command[cmd_type].get(c, ['Unknown', 'Unk'])
s = ['Command: %d (%s)' % (c, x[0]), 'Cmd: %d (%s)' % (c, x[1]),
'Cmd: %d' % c, 'C: %d' % c, 'C']
self.putb(8, 13, [6, s])
def edge_type(self):
# Categorize according to distance from last edge (short/long).
distance = self.samplenum - self.edges[-1]
s, l, margin = self.halfbit, self.halfbit * 2, int(self.halfbit / 2)
if distance in range(l - margin, l + margin + 1):
return 'l'
elif distance in range(s - margin, s + margin + 1):
return 's'
else:
return 'e' # Error, invalid edge distance.
def reset_decoder_state(self):
self.edges, self.bits, self.ss_es_bits = [], [], []
self.state = 'IDLE'
def decode(self):
if not self.samplerate:
raise SamplerateError('Cannot decode without samplerate.')
while True:
(self.ir,) = self.wait()
# Wait for any edge (rising or falling).
if self.old_ir == self.ir:
continue
# State machine.
if self.state == 'IDLE':
bit = 1
self.edges.append(self.samplenum)
self.bits.append([self.samplenum, bit])
self.state = 'MID1'
self.old_ir = self.ir
continue
edge = self.edge_type()
if edge == 'e':
self.reset_decoder_state() # Reset state machine upon errors.
continue
if self.state == 'MID1':
self.state = 'START1' if edge == 's' else 'MID0'
bit = None if edge == 's' else 0
elif self.state == 'MID0':
self.state = 'START0' if edge == 's' else 'MID1'
bit = None if edge == 's' else 1
elif self.state == 'START1':
if edge == 's':
self.state = 'MID1'
bit = 1 if edge == 's' else None
elif self.state == 'START0':
if edge == 's':
self.state = 'MID0'
bit = 0 if edge == 's' else None
self.edges.append(self.samplenum)
if bit is not None:
self.bits.append([self.samplenum, bit])
if len(self.bits) == 14:
self.handle_bits()
self.reset_decoder_state()
self.old_ir = self.ir | null |
5,968 | #!/usr/bin/env python
## Copyright (c) 2019, Alliance for Open Media. All rights reserved
##
## This source code is subject to the terms of the BSD 2 Clause License and
## the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
## was not distributed with this source code in the LICENSE file, you can
## obtain it at www.aomedia.org/license/software. If the Alliance for Open
## Media Patent License 1.0 was not distributed with this source code in the
## PATENTS file, you can obtain it at www.aomedia.org/license/patent.
##
__author__ = "[email protected], [email protected]"
import os
import re
import logging
import math
from Config import BinPath, LoggerName, VMAF
from Utils import GetShortContentName, ExecuteCmd
subloggername = "CalcQtyMetrics_VMAFTool"
loggername = LoggerName + '.' + '%s' % subloggername
logger = logging.getLogger(loggername)
Model_Pkg_File = os.path.join(BinPath, 'vmaf_v0.6.1.pkl')
VMAFMetricsFullList = ['VMAF_Y','VMAF_Y-NEG','PSNR_Y','PSNR_U','PSNR_V','SSIM_Y(dB)',
'MS-SSIM_Y(dB)','PSNR-HVS','CIEDE2000','APSNR_Y','APSNR_U','APSNR_V']
def ParseVMAFLogFile(vmaf_log):
floats = len(VMAFMetricsFullList) * [0.0]
per_frame_log = []
flog = open(vmaf_log, 'r')
for line in flog:
m = re.search(r"<frame\s+frameNum=\"(\d+)\"",line)
if m:
frameNum = m.group(1)
m = re.search(r"<frame\s+(.*)\s+psnr_y=\"(\d+\.?\d*)\"",line)
if m:
psnr_y = m.group(2)
m = re.search(r"<frame\s+(.*)\s+psnr_cb=\"(\d+\.?\d*)\"", line)
if m:
psnr_cb = m.group(2)
m = re.search(r"<frame\s+(.*)\s+psnr_cr=\"(\d+\.?\d*)\"", line)
if m:
psnr_cr = m.group(2)
m = re.search(r"<frame\s+(.*)\s+float_ssim=\"(\d+\.?\d*)\"", line)
if m:
ssim = m.group(2)
m = re.search(r"<frame\s+(.*)\s+psnr_hvs=\"(\d+\.?\d*)\"", line)
if m:
psnr_hvs = m.group(2)
m = re.search(r"<frame\s+(.*)\s+float_ms_ssim=\"(\d+\.?\d*)\"", line)
if m:
ms_ssim = m.group(2)
m = re.search(r"<frame\s+(.*)\s+ciede2000=\"(\d+\.?\d*)\"", line)
if m:
ciede2000 = m.group(2)
m = re.search(r"<frame\s+(.*)\s+vmaf=\"(\d+\.?\d*)\"", line)
if m:
vmaf = m.group(2)
m = re.search(r"<frame\s+(.*)\s+vmaf_neg=\"(\d+\.?\d*)\"", line)
if m:
vmaf_neg = m.group(2)
per_frame_log.append("%s,%s,%s,%s,%s,%s,%s,%s,%s"%
(psnr_y,psnr_cb,psnr_cr,ssim,ms_ssim,vmaf,vmaf_neg,psnr_hvs,
ciede2000))
m = re.search(r"\"vmaf\".*\s+mean=\"(\d+\.?\d*)\"\s+",line)
if m:
floats[0] = m.group(1)
m = re.search(r"\"vmaf_neg\".*\s+mean=\"(\d+\.?\d*)\"\s+", line)
if m:
floats[1] = m.group(1)
m = re.search(r"\"psnr_y\".*\s+mean=\"(\d+\.?\d*)\"\s+", line)
if m:
floats[2] = m.group(1)
m = re.search(r"\"psnr_cb\".*\s+mean=\"(\d+\.?\d*)\"\s+", line)
if m:
floats[3] = m.group(1)
m = re.search(r"\"psnr_cr\".*\s+mean=\"(\d+\.?\d*)\"\s+", line)
if m:
floats[4] = m.group(1)
m = re.search(r"\"float_ssim\".*\s+mean=\"(\d+\.?\d*)\"\s+", line)
if m:
floats[5] = m.group(1)
m = re.search(r"\"float_ms_ssim\".*\s+mean=\"(\d+\.?\d*)\"\s+", line)
if m:
floats[6] = m.group(1)
m = re.search(r"\"psnr_hvs\".*\s+mean=\"(\d+\.?\d*)\"\s+", line)
if m:
floats[7] = m.group(1)
m = re.search(r"\"ciede2000\".*\s+mean=\"(\d+\.?\d*)\"\s+", line)
if m:
floats[8] = m.group(1)
#<aggregate_metrics apsnr_y="46.817276" apsnr_cb="49.092538" apsnr_cr="50.014785" />
m = re.search(r"aggregate_metrics\s+apsnr_y=\"(\d+\.?\d*)\"\s+apsnr_cb=\"(\d+\.?\d*)\"\s+apsnr_cr=\"(\d+\.?\d*)\"", line)
if m:
floats[9] = m.group(1)
floats[10] = m.group(2)
floats[11] = m.group(3)
flog.close()
floats = [float(i) for i in floats]
print_str = "VMAF quality metrics: "
for metrics, idx in zip(VMAFMetricsFullList, range(len(VMAFMetricsFullList))):
print_str += "%s = %2.5f, " % (metrics, floats[idx])
logger.info(print_str)
return floats[0:len(VMAFMetricsFullList)], per_frame_log
def METHOD_NAME(recfile, path):
filename = recfile + '-libvmaf.xml'
file = os.path.join(path, filename)
return file
################################################################################
##################### Exposed Functions ########################################
def VMAF_CalQualityMetrics(origfile, recfile, fmt, num, w, h, bit_depth,
logfilePath, LogCmdOnly=False):
vmaf_log = METHOD_NAME(recfile, logfilePath)
args = " -r %s -d %s --aom_ctc v1.0 -q --threads 4 -o %s" \
% (origfile, recfile, vmaf_log)
cmd = VMAF + args
ExecuteCmd(cmd, LogCmdOnly)
def VMAF_GatherQualityMetrics(recfile, logfilePath):
vmaf_log = METHOD_NAME(recfile, logfilePath)
results, per_frame_log = ParseVMAFLogFile(vmaf_log)
return results, per_frame_log | null |
5,969 | # Copyright 2014-2021 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Functions for managing volumes in juju units. One volume is supported per unit.
Subordinates may have their own storage, provided it is on its own partition.
Configuration stanzas::
volume-ephemeral:
type: boolean
default: true
description: >
If false, a volume is mounted as specified in "volume-map"
If true, ephemeral storage will be used, meaning that log data
will only exist as long as the machine. YOU HAVE BEEN WARNED.
volume-map:
type: string
default: {}
description: >
YAML map of units to device names, e.g:
"{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
Service units will raise a configure-error if volume-ephemeral
is 'true' and no volume-map value is set. Use 'juju set' to set a
value and 'juju resolved' to complete configuration.
Usage::
from charmsupport.volumes import configure_volume, VolumeConfigurationError
from charmsupport.hookenv import log, ERROR
def post_mount_hook():
stop_service('myservice')
def post_mount_hook():
start_service('myservice')
if __name__ == '__main__':
try:
configure_volume(before_change=pre_mount_hook,
after_change=post_mount_hook)
except VolumeConfigurationError:
log('Storage could not be configured', ERROR)
'''
# XXX: Known limitations
# - fstab is neither consulted nor updated
import os
from charmhelpers.core import hookenv
from charmhelpers.core import host
import yaml
MOUNT_BASE = '/srv/juju/volumes'
class VolumeConfigurationError(Exception):
'''Volume configuration data is missing or invalid'''
pass
def get_config():
'''Gather and sanity-check volume configuration data'''
volume_config = {}
config = hookenv.config()
errors = False
if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
volume_config['ephemeral'] = True
else:
volume_config['ephemeral'] = False
try:
volume_map = yaml.safe_load(config.get('volume-map', '{}'))
except yaml.YAMLError as e:
hookenv.log("Error parsing YAML volume-map: {}".format(e),
hookenv.ERROR)
errors = True
if volume_map is None:
# probably an empty string
volume_map = {}
elif not isinstance(volume_map, dict):
hookenv.log("Volume-map should be a dictionary, not {}".format(
type(volume_map)))
errors = True
volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
if volume_config['device'] and volume_config['ephemeral']:
# asked for ephemeral storage but also defined a volume ID
hookenv.log('A volume is defined for this unit, but ephemeral '
'storage was requested', hookenv.ERROR)
errors = True
elif not volume_config['device'] and not volume_config['ephemeral']:
# asked for permanent storage but did not define volume ID
hookenv.log('Ephemeral storage was requested, but there is no volume '
'defined for this unit.', hookenv.ERROR)
errors = True
unit_mount_name = hookenv.local_unit().replace('/', '-')
volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
if errors:
return None
return volume_config
def mount_volume(config):
if os.path.exists(config['mountpoint']):
if not os.path.isdir(config['mountpoint']):
hookenv.log('Not a directory: {}'.format(config['mountpoint']))
raise VolumeConfigurationError()
else:
host.mkdir(config['mountpoint'])
if os.path.ismount(config['mountpoint']):
unmount_volume(config)
if not host.mount(config['device'], config['mountpoint'], persist=True):
raise VolumeConfigurationError()
def unmount_volume(config):
if os.path.ismount(config['mountpoint']):
if not host.umount(config['mountpoint'], persist=True):
raise VolumeConfigurationError()
def METHOD_NAME():
'''List of all mounted managed volumes'''
return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
def configure_volume(before_change=lambda: None, after_change=lambda: None):
'''Set up storage (or don't) according to the charm's volume configuration.
Returns the mount point or "ephemeral". before_change and after_change
are optional functions to be called if the volume configuration changes.
'''
config = get_config()
if not config:
hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
raise VolumeConfigurationError()
if config['ephemeral']:
if os.path.ismount(config['mountpoint']):
before_change()
unmount_volume(config)
after_change()
return 'ephemeral'
else:
# persistent storage
if os.path.ismount(config['mountpoint']):
mounts = dict(METHOD_NAME())
if mounts.get(config['mountpoint']) != config['device']:
before_change()
unmount_volume(config)
mount_volume(config)
after_change()
else:
before_change()
mount_volume(config)
after_change()
return config['mountpoint'] | null |
5,970 | """
Module to run tests on SpecObj
"""
import numpy as np
import sys
import os
from copy import deepcopy
import pytest
from IPython import embed
from astropy.table import Table
from astropy.io import fits
from pypeit import spec2dobj
from pypeit.spectrographs.util import load_spectrograph
from pypeit.tests import tstutils
from pypeit import slittrace
from pypeit import pypmsgs
from pypeit.images import imagebitmask
@pytest.fixture
def init_dict():
sciimg = np.ones((1000,1000)).astype(float)
# Slits
left = np.full((1000, 3), 2, dtype=float)
right = np.full((1000, 3), 8, dtype=float)
left[:,1] = 15.
right[:,1] = 21.
left[:,2] = 25.
right[:,2] = 31.
slits = slittrace.SlitTraceSet(left, right, 'MultiSlit',
nspat=1000, PYP_SPEC='dummy')
# Construct table of spectral flexure
spec_flex_table = Table()
spec_flex_table['spat_id'] = slits.spat_id
spec_flex_table['sci_spec_flexure'] = np.zeros(left.shape[1])
#
return dict(sciimg = sciimg,
ivarraw = 0.1 * np.ones_like(sciimg),
skymodel = 0.95 * np.ones_like(sciimg),
objmodel = np.ones_like(sciimg),
ivarmodel = 0.05 * np.ones_like(sciimg),
scaleimg = np.ones_like(sciimg),
waveimg = 1000 * np.ones_like(sciimg),
bpmmask=imagebitmask.ImageBitMaskArray(sciimg.shape),
slits=slits,
wavesol=None,
maskdef_designtab=None,
tilts=np.ones_like(sciimg).astype(float),
#tilts=wavetilts.WaveTilts(**test_wavetilts.instant_dict),
sci_spat_flexure=3.5,
sci_spec_flexure=spec_flex_table,
vel_type='HELIOCENTRIC',
vel_corr=1.0+1.0e-5)
'''
from IPython import embed
dpath = '/home/xavier/Projects/PypeIt-development-suite/REDUX_OUT/keck_lris_blue/multi_300_5000_d680'
new_spec2dfile = os.path.join(dpath, 'Science', 'spec2d_b170816_0076-E570_LRISb_2017Aug16T071652.378.fits')
orig_spec2dfile = os.path.join(dpath, 'Science', 'Orig', 'spec2d_b170816_0076-E570_LRISb_2017Aug16T071652.378.fits')
new_spec2DObj = spec2dobj.Spec2DObj.from_file(new_spec2dfile, 1)
orig_spec2DObj = spec2dobj.Spec2DObj.from_file(orig_spec2dfile, 1)
orig_spec2DObj.update_slits(new_spec2DObj)
'''
####################################################3
# Testing of Spec2DObj
def test_init(init_dict):
init_dict['detector'] = tstutils.get_kastb_detector()
spec2DObj = spec2dobj.Spec2DObj(**init_dict)
# Check
assert spec2DObj.hdu_prefix == 'DET01-'
def test_spec2dobj_io(init_dict):
init_dict['detector'] = tstutils.get_kastb_detector()
spec2DObj = spec2dobj.Spec2DObj(**init_dict)
# Write
ofile = tstutils.data_path('tst_spec2d.fits')
if os.path.isfile(ofile):
os.remove(ofile)
spec2DObj.to_file(ofile)
# Read
_spec2DObj = spec2dobj.Spec2DObj.from_file(ofile, spec2DObj.detname)
os.remove(ofile)
def test_spec2dobj_update_slit(init_dict):
# Build two
spec2DObj1 = spec2dobj.Spec2DObj(**init_dict,
detector=load_spectrograph('keck_deimos').get_detector_par(1))
spec2DObj2 = spec2dobj.Spec2DObj(**init_dict,
detector=load_spectrograph('keck_deimos').get_detector_par(2))
# WARNING: The instantiation of the two objects above using the same
# dictionary means their components point to *the same objects*. I.e.,
# ``spec2DObj1.sciimg is spec2DObj2.sciimg`` is True! That means any
# alterations to complex attributes of spec2DObj1 are also made to
# spec2DObj2. I made some changes below to ensure this isn't true for this
# test, but we need to beware of any instantiations like the above in the
# code itself!
# Checks
with pytest.raises(pypmsgs.PypeItError):
spec2DObj1.update_slits(spec2DObj2)
# Update
spec2DObj2.detector = load_spectrograph('keck_deimos').get_detector_par(1)
spec2DObj2.sciimg = spec2DObj1.sciimg.copy()*2.
spec2DObj2.slits = deepcopy(init_dict['slits'])
spec2DObj2.slits.mask[1:] = 1
# WARNING: This barfs!!
# spec2DObj2.bpmmask = deepcopy(init_dict['bpmmask'])
spec2DObj2.bpmmask = imagebitmask.ImageBitMaskArray(init_dict['sciimg'].shape)
spec2DObj2.bpmmask[...] = 1
spec2DObj1.update_slits(spec2DObj2)
####################################################3
# Testing of AllSpec2DObj
def test_all2dobj_hdr(init_dict):
# Build one
init_dict['detector'] = tstutils.get_kastb_detector()
spec2DObj = spec2dobj.Spec2DObj(**init_dict)
allspec2D = spec2dobj.AllSpec2DObj()
allspec2D['meta']['bkg_redux'] = False
allspec2D['meta']['find_negative'] = False
allspec2D[spec2DObj.detname] = spec2DObj
#
kast_file = tstutils.data_path('b1.fits.gz')
header = fits.getheader(kast_file)
spectrograph = load_spectrograph('shane_kast_blue')
# Do it
hdr = allspec2D.build_primary_hdr(header, spectrograph, calib_dir=tstutils.data_path(''))
# Test it
assert hdr['SKYSUB'] == 'MODEL'
def test_all2dobj_write(init_dict):
# Build one
init_dict['detector'] = tstutils.get_kastb_detector()
spec2DObj = spec2dobj.Spec2DObj(**init_dict)
allspec2D = spec2dobj.AllSpec2DObj()
allspec2D['meta']['bkg_redux'] = False
allspec2D['meta']['find_negative'] = False
detname = spec2DObj.detname
allspec2D[detname] = spec2DObj
# Write
ofile = tstutils.data_path('tst_allspec2d.fits')
if os.path.isfile(ofile):
os.remove(ofile)
allspec2D.write_to_fits(ofile)
# Read
_allspec2D = spec2dobj.AllSpec2DObj.from_fits(ofile)
# Check
assert allspec2D.detectors == _allspec2D.detectors, 'Bad read: detector mismatch'
assert allspec2D['meta'] == _allspec2D['meta'], 'Bad read: meta mismatch'
# Try to update it
_allspec2D['meta']['bkg_redux'] = True
_allspec2D[detname].vel_corr = 2.
_allspec2D.write_to_fits(ofile, update_det='DET01')
__allspec2D = spec2dobj.AllSpec2DObj.from_fits(ofile)
assert __allspec2D['meta'] == _allspec2D['meta'], 'Bad read: meta mismatch'
assert __allspec2D['meta'] != allspec2D['meta'], 'Bad read: meta mismatch'
assert __allspec2D[detname].vel_corr == 2., 'Bad update'
os.remove(ofile)
def METHOD_NAME(init_dict):
allspec2D = spec2dobj.AllSpec2DObj()
allspec2D['meta']['bkg_redux'] = False
allspec2D['meta']['find_negative'] = False
for i in range(2):
d = load_spectrograph('keck_deimos').get_detector_par(i+1)
allspec2D[d.name] = spec2dobj.Spec2DObj(detector=d, **init_dict)
# Write
ofile = tstutils.data_path('tst_allspec2d.fits')
if os.path.isfile(ofile):
os.remove(ofile)
allspec2D.write_to_fits(ofile)
_allspec2D = spec2dobj.AllSpec2DObj()
_allspec2D['meta']['bkg_redux'] = False
_allspec2D['meta']['find_negative'] = False
d = load_spectrograph('keck_deimos').get_detector_par(2)
detname = d.name
init_dict['sciimg'] = allspec2D[detname].sciimg.copy() * 2
_allspec2D[detname] = spec2dobj.Spec2DObj(detector=d, **init_dict)
_allspec2D.write_to_fits(ofile, update_det=_allspec2D.detectors, overwrite=True)
# Check
allspec2D_2 = spec2dobj.AllSpec2DObj.from_fits(ofile)
assert np.array_equal(allspec2D_2[detname].sciimg, _allspec2D[detname].sciimg), 'Bad update'
assert np.array_equal(allspec2D_2[detname].sciimg, allspec2D[detname].sciimg*2), 'Bad update'
os.remove(ofile)
| null |
5,971 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# pylint: disable=no-init
from mantid.api import *
from mantid.kernel import *
from vesuvio.base import VesuvioBase
class VesuvioResolution(VesuvioBase):
_workspace_index = None
_mass = None
def category(self):
return "Inelastic\\Indirect\\Vesuvio"
def summary(self):
return "Calculates the resolution function for VESUVIO"
def PyInit(self):
self.declareProperty(
MatrixWorkspaceProperty(name="Workspace", defaultValue="", direction=Direction.Input), doc="Sample matrix workspace"
)
self.declareProperty(name="WorkspaceIndex", defaultValue=0, doc="Workspace index to use for resolution")
self.declareProperty(name="Mass", defaultValue=100.0, doc="The mass defining the recoil peak in AMU")
self.declareProperty(
WorkspaceProperty(name="OutputWorkspaceTOF", defaultValue="", direction=Direction.Output, optional=PropertyMode.Optional),
doc="Output resolution workspace in TOF",
)
self.declareProperty(
WorkspaceProperty(name="OutputWorkspaceYSpace", defaultValue="", direction=Direction.Output, optional=PropertyMode.Optional),
doc="Output resolution workspace in ySpace",
)
def validateInputs(self):
"""
Does basic validation for inputs.
"""
issues = dict()
sample_ws = self.getProperty("Workspace").value
workspace_index = self.getProperty("WorkspaceIndex").value
if not isinstance(sample_ws, MatrixWorkspace):
issues["Workspace"] = "The Workspace must be a MatrixWorkspace"
elif workspace_index > sample_ws.getNumberHistograms() - 1:
issues["WorkspaceIndex"] = "Workspace index is out of range"
out_ws_tof = self.getPropertyValue("OutputWorkspaceTOF")
out_ws_ysp = self.getPropertyValue("OutputWorkspaceYSpace")
output_tof = out_ws_tof != ""
output_ysp = out_ws_ysp != ""
if not (output_tof or output_ysp):
warning_message = "Must output in either time of flight or ySpace"
issues["OutputWorkspaceTOF"] = warning_message
issues["OutputWorkspaceYSpace"] = warning_message
return issues
def METHOD_NAME(self):
sample_ws = self.getProperty("Workspace").value
out_ws_tof = self.getPropertyValue("OutputWorkspaceTOF")
out_ws_ysp = self.getPropertyValue("OutputWorkspaceYSpace")
self._workspace_index = self.getProperty("WorkspaceIndex").value
self._mass = self.getProperty("Mass").value
output_tof = out_ws_tof != ""
output_ysp = out_ws_ysp != ""
if output_tof:
res_tof = self._calculate_resolution(sample_ws)
self.setProperty("OutputWorkspaceTOF", res_tof)
if output_ysp:
y_space_conv = self._execute_child_alg(
"ConvertToYSpace", return_values="OutputWorkspace", InputWorkspace=sample_ws, Mass=self._mass
)
res_ysp = self._calculate_resolution(y_space_conv)
self.setProperty("OutputWorkspaceYSpace", res_ysp)
def _calculate_resolution(self, workspace):
"""
Calculates the resolution function using the VesuvioResolution fit function.
@param workspace The sample workspace
"""
function = "name=VesuvioResolution, Mass=%f" % self._mass
fit_naming_stem = "__vesuvio_res_fit"
# Execute the resolution function using fit.
# Functions can't currently be executed as stand alone objects,
# so for now we will run fit with zero iterations to achieve the same result.
fit_ws = self._execute_child_alg(
"Fit",
return_values="OutputWorkspace",
Function=function,
InputWorkspace=workspace,
MaxIterations=0,
CreateOutput=True,
Output=fit_naming_stem,
WorkspaceIndex=self._workspace_index,
OutputCompositeMembers=False,
)
# Extract just the function values from the fit spectrum
res_ws = self._execute_child_alg("ExtractSingleSpectrum", InputWorkspace=fit_ws, WorkspaceIndex=1)
return res_ws
AlgorithmFactory.subscribe(VesuvioResolution) | null |
5,972 | from collections import namedtuple
_ColumnMetadata = namedtuple(
"ColumnMetadata", ["name", "datatype", "foreignkeys", "default",
"has_default"]
)
def ColumnMetadata(name, datatype, foreignkeys=None, default=None,
has_default=False):
return _ColumnMetadata(name, datatype, foreignkeys or [], default,
has_default)
ForeignKey = namedtuple(
"ForeignKey",
[
"parentschema",
"parenttable",
"parentcolumn",
"childschema",
"childtable",
"childcolumn",
],
)
TableMetadata = namedtuple("TableMetadata", "name columns")
def parse_defaults(defaults_string):
"""Yields default values for a function, given the string provided by
pg_get_expr(pg_catalog.pg_proc.proargdefaults, 0)"""
if not defaults_string:
return
current = ""
in_quote = None
for char in defaults_string:
if current == "" and char == " ":
# Skip space after comma separating default expressions
continue
if char == '"' or char == "'":
if in_quote and char == in_quote:
# End quote
in_quote = None
elif not in_quote:
# Begin quote
in_quote = char
elif char == "," and not in_quote:
# End of expression
yield current
current = ""
continue
current += char
yield current
class FunctionMetadata:
def __init__(
self,
schema_name,
func_name,
arg_names,
arg_types,
arg_modes,
return_type,
is_aggregate,
is_window,
is_set_returning,
is_extension,
arg_defaults,
):
"""Class for describing a postgresql function"""
self.schema_name = schema_name
self.func_name = func_name
self.arg_modes = tuple(arg_modes) if arg_modes else None
self.arg_names = tuple(arg_names) if arg_names else None
# Be flexible in not requiring arg_types -- use None as a placeholder
# for each arg. (Used for compatibility with old versions of postgresql
# where such info is hard to get.
if arg_types:
self.arg_types = tuple(arg_types)
elif arg_modes:
self.arg_types = tuple([None] * len(arg_modes))
elif arg_names:
self.arg_types = tuple([None] * len(arg_names))
else:
self.arg_types = None
self.arg_defaults = tuple(parse_defaults(arg_defaults))
self.return_type = return_type.strip()
self.is_aggregate = is_aggregate
self.is_window = is_window
self.is_set_returning = is_set_returning
self.is_extension = bool(is_extension)
self.is_public = self.schema_name and self.schema_name == "public"
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def _signature(self):
return (
self.schema_name,
self.func_name,
self.arg_names,
self.arg_types,
self.arg_modes,
self.return_type,
self.is_aggregate,
self.is_window,
self.is_set_returning,
self.is_extension,
self.arg_defaults,
)
def __hash__(self):
return hash(self._signature())
def __repr__(self):
return (
"%s(schema_name=%r, func_name=%r, arg_names=%r, "
"arg_types=%r, arg_modes=%r, return_type=%r, is_aggregate=%r, "
"is_window=%r, is_set_returning=%r, is_extension=%r, "
"arg_defaults=%r)"
) % ((self.__class__.__name__,) + self._signature())
def has_variadic(self):
return self.arg_modes and \
any(arg_mode == "v" for arg_mode in self.arg_modes)
def METHOD_NAME(self):
"""Returns a list of input-parameter ColumnMetadata namedtuples."""
if not self.arg_names:
return []
modes = self.arg_modes or ["i"] * len(self.arg_names)
METHOD_NAME = [
(name, typ)
for name, typ, mode in zip(self.arg_names, self.arg_types, modes)
if mode in ("i", "b", "v") # IN, INOUT, VARIADIC
]
def arg(name, typ, num):
num_args = len(METHOD_NAME)
num_defaults = len(self.arg_defaults)
has_default = num + num_defaults >= num_args
default = (
self.arg_defaults[num - num_args + num_defaults]
if has_default
else None
)
return ColumnMetadata(name, typ, [], default, has_default)
return [arg(name, typ, num) for num, (name, typ) in enumerate(METHOD_NAME)]
def fields(self):
"""Returns a list of output-field ColumnMetadata namedtuples"""
if self.return_type.lower() == "void":
return []
elif not self.arg_modes:
# For functions without output parameters, the function name
# is used as the name of the output column.
# E.g. 'SELECT unnest FROM unnest(...);'
return [ColumnMetadata(self.func_name, self.return_type, [])]
return [
ColumnMetadata(name, typ, [])
for name, typ, mode in zip(self.arg_names, self.arg_types,
self.arg_modes)
if mode in ("o", "b", "t")
] # OUT, INOUT, TABLE | null |
5,973 | # Min heap data structure
# with decrease key functionality - in O(log(n)) time
class Node:
def __init__(self, name, val):
self.name = name
self.val = val
def __str__(self):
return f"{self.__class__.__name__}({self.name}, {self.val})"
def __lt__(self, other):
return self.val < other.val
class MinHeap:
"""
>>> r = Node("R", -1)
>>> b = Node("B", 6)
>>> a = Node("A", 3)
>>> x = Node("X", 1)
>>> e = Node("E", 4)
>>> print(b)
Node(B, 6)
>>> myMinHeap = MinHeap([r, b, a, x, e])
>>> myMinHeap.decrease_key(b, -17)
>>> print(b)
Node(B, -17)
>>> myMinHeap["B"]
-17
"""
def __init__(self, array):
self.idx_of_element = {}
self.heap_dict = {}
self.heap = self.build_heap(array)
def __getitem__(self, key):
return self.get_value(key)
def get_parent_idx(self, idx):
return (idx - 1) // 2
def get_left_child_idx(self, idx):
return idx * 2 + 1
def get_right_child_idx(self, idx):
return idx * 2 + 2
def get_value(self, key):
return self.heap_dict[key]
def build_heap(self, array):
last_idx = len(array) - 1
start_from = self.get_parent_idx(last_idx)
for idx, i in enumerate(array):
self.idx_of_element[i] = idx
self.heap_dict[i.name] = i.val
for i in range(start_from, -1, -1):
self.sift_down(i, array)
return array
# this is min-heapify method
def sift_down(self, idx, array):
while True:
l = self.get_left_child_idx(idx) # noqa: E741
r = self.get_right_child_idx(idx)
smallest = idx
if l < len(array) and array[l] < array[idx]:
smallest = l
if r < len(array) and array[r] < array[smallest]:
smallest = r
if smallest != idx:
array[idx], array[smallest] = array[smallest], array[idx]
(
self.idx_of_element[array[idx]],
self.idx_of_element[array[smallest]],
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
idx = smallest
else:
break
def sift_up(self, idx):
p = self.get_parent_idx(idx)
while p >= 0 and self.heap[p] > self.heap[idx]:
self.heap[p], self.heap[idx] = self.heap[idx], self.heap[p]
self.idx_of_element[self.heap[p]], self.idx_of_element[self.heap[idx]] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
idx = p
p = self.get_parent_idx(idx)
def peek(self):
return self.heap[0]
def remove(self):
self.heap[0], self.heap[-1] = self.heap[-1], self.heap[0]
self.idx_of_element[self.heap[0]], self.idx_of_element[self.heap[-1]] = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
x = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0, self.heap)
return x
def METHOD_NAME(self, node):
self.heap.append(node)
self.idx_of_element[node] = len(self.heap) - 1
self.heap_dict[node.name] = node.val
self.sift_up(len(self.heap) - 1)
def is_empty(self):
return len(self.heap) == 0
def decrease_key(self, node, new_value):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
node.val = new_value
self.heap_dict[node.name] = new_value
self.sift_up(self.idx_of_element[node])
# USAGE
r = Node("R", -1)
b = Node("B", 6)
a = Node("A", 3)
x = Node("X", 1)
e = Node("E", 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
my_min_heap = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print("Min Heap - before decrease key")
for i in my_min_heap.heap:
print(i)
print("Min Heap - After decrease key of node [B -> -17]")
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod() | null |
5,974 | """
guiscrcpy
Licensed under GNU Public License
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import json
import os
import shutil
from guiscrcpy.platform import platform
class InvalidConfigurationError(RuntimeError):
pass
class InterfaceConfig:
def __init__(self, load=True):
"""
Manages guiscrcpy's configuration files
"""
self.os = platform.System()
self.cfgpath = self.os.cfgpath()
self.paths = self.os.paths()
self.config = {
"version": 1,
"paths": self.paths,
"scrcpy": None,
"adb": None,
"panels": {"swipe": True, "bottom": True, "toolkit": True},
"mapper": "",
"sha_shift": 5,
"scrcpy-server": None,
"dimension": None,
"swtouches": False,
"bitrate": 8000,
"fullscreen": False,
"dispRO": False,
"extra": "",
"cmx": "",
"device": {},
"theme": "Breeze",
}
self.json_file = "guiscrcpy.json"
if load:
self.load_config()
def load_config(self):
self.METHOD_NAME()
self.validate()
def validate(self):
# check scrcpy and adb are not None, else replace it with original
# values
if os.getenv("APPIMAGE") is not None:
# no need further configuration for adb, scrcpy and scrcpy_server
self.config["adb"] = os.getenv("GUISCRCPY_ADB")
self.config["scrcpy"] = os.getenv("GUISCRCPY_SCRCPY")
return True
if self.config["adb"] is None:
adb_path = shutil.which("adb")
self.config["adb"] = adb_path
else:
_adb_path = self.config["adb"]
if not os.path.exists(_adb_path):
raise InvalidConfigurationError(
"The configuration key 'adb' is "
"invalid. {} does not exist. "
"If you did not set it on purpose, "
"run `guiscrcpy config -r` to reset "
"the configuration".format(self.config["adb"])
)
if self.config["scrcpy"] is None:
scrcpy_path = shutil.which("scrcpy")
self.config["scrcpy"] = scrcpy_path
else:
_scrcpy_path = self.config["scrcpy"]
if not os.path.exists(_scrcpy_path):
raise InvalidConfigurationError(
"The configuration key 'scrcpy' is "
"invalid. {} does not exist. "
"If you did not set it on purpose, "
"run `guiscrcpy config -r` to reset "
"the configuration".format(self.config["scrcpy"])
)
if (self.config["scrcpy-server"] is not None) and (
platform.System() == "Windows"
):
os.environ["SCRCPY_SERVER_PATH"] = self.config["scrcpy-server"]
return True
def __setitem__(self, key, value):
self.config[key] = value
self.write_file()
def __getitem__(self, item):
return self.config.get(item)
def get_config(self):
return self.config
def get_scrcpy(self):
if self.config["scrcpy"] is not None:
return self.config["scrcpy"]
else:
return None
def get_adb(self):
if self.config["adb"] is not None:
return self.config["adb"]
else:
return None
def get_cfgpath(self):
return self.cfgpath
def read_file(self):
with open(os.path.join(self.cfgpath, self.json_file), "r") as f:
config = json.load(f)
self.update_config(config)
def write_file(self):
with open(os.path.join(self.cfgpath, self.json_file), "w") as f:
json.dump(self.config, f, indent=4, sort_keys=True)
def METHOD_NAME(self):
if not os.path.exists(self.cfgpath):
os.makedirs(self.cfgpath)
if not os.path.exists(os.path.join(self.cfgpath, self.json_file)):
self.write_file()
self.read_file()
def update_config(self, new_conf):
for i in new_conf:
for j in self.config:
if i == j:
self.config[i] = new_conf[i]
def reset_config(self):
os.remove(os.path.join(self.get_cfgpath(), self.json_file))
return True
def __repr__(self):
return 'GuiscrcpyConfig({}, "{}")'.format(
json.dumps(self.config, indent=4), self.cfgpath
) | null |
5,975 | import ctypes
import io
import struct
import pytest
import env
from pybind11_tests import ConstructorStats
from pybind11_tests import buffers as m
np = pytest.importorskip("numpy")
def test_from_python():
with pytest.raises(RuntimeError) as excinfo:
m.Matrix(np.array([1, 2, 3])) # trying to assign a 1D array
assert str(excinfo.value) == "Incompatible buffer format!"
m3 = np.array([[1, 2, 3], [4, 5, 6]]).astype(np.float32)
m4 = m.Matrix(m3)
for i in range(m4.rows()):
for j in range(m4.cols()):
assert m3[i, j] == m4[i, j]
cstats = ConstructorStats.get(m.Matrix)
assert cstats.alive() == 1
del m3, m4
assert cstats.alive() == 0
assert cstats.values() == ["2x3 matrix"]
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Don't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
# https://foss.heptapod.net/pypy/pypy/-/issues/2444
# TODO: fix on recent PyPy
@pytest.mark.xfail(
env.PYPY, reason="PyPy 7.3.7 doesn't clear this anymore", strict=False
)
def test_to_python():
mat = m.Matrix(5, 4)
assert memoryview(mat).shape == (5, 4)
assert mat[2, 3] == 0
mat[2, 3] = 4.0
mat[3, 2] = 7.0
assert mat[2, 3] == 4
assert mat[3, 2] == 7
assert struct.unpack_from("f", mat, (3 * 4 + 2) * 4) == (7,)
assert struct.unpack_from("f", mat, (2 * 4 + 3) * 4) == (4,)
mat2 = np.array(mat, copy=False)
assert mat2.shape == (5, 4)
assert abs(mat2).sum() == 11
assert mat2[2, 3] == 4 and mat2[3, 2] == 7
mat2[2, 3] = 5
assert mat2[2, 3] == 5
cstats = ConstructorStats.get(m.Matrix)
assert cstats.alive() == 1
del mat
pytest.gc_collect()
assert cstats.alive() == 1
del mat2 # holds a mat reference
pytest.gc_collect()
assert cstats.alive() == 0
assert cstats.values() == ["5x4 matrix"]
assert cstats.copy_constructions == 0
# assert cstats.move_constructions >= 0 # Don't invoke any
assert cstats.copy_assignments == 0
assert cstats.move_assignments == 0
def test_inherited_protocol():
"""SquareMatrix is derived from Matrix and inherits the buffer protocol"""
matrix = m.SquareMatrix(5)
assert memoryview(matrix).shape == (5, 5)
assert np.asarray(matrix).shape == (5, 5)
def test_pointer_to_member_fn():
for cls in [m.Buffer, m.ConstBuffer, m.DerivedBuffer]:
buf = cls()
buf.value = 0x12345678
value = struct.unpack("i", bytearray(buf))[0]
assert value == 0x12345678
def METHOD_NAME():
buf = m.BufferReadOnly(0x64)
view = memoryview(buf)
assert view[0] == 0x64
assert view.readonly
with pytest.raises(TypeError):
view[0] = 0
def test_selective_readonly_buffer():
buf = m.BufferReadOnlySelect()
memoryview(buf)[0] = 0x64
assert buf.value == 0x64
io.BytesIO(b"A").readinto(buf)
assert buf.value == ord(b"A")
buf.readonly = True
with pytest.raises(TypeError):
memoryview(buf)[0] = 0
with pytest.raises(TypeError):
io.BytesIO(b"1").readinto(buf)
def test_ctypes_array_1d():
char1d = (ctypes.c_char * 10)()
int1d = (ctypes.c_int * 15)()
long1d = (ctypes.c_long * 7)()
for carray in (char1d, int1d, long1d):
info = m.get_buffer_info(carray)
assert info.itemsize == ctypes.sizeof(carray._type_)
assert info.size == len(carray)
assert info.ndim == 1
assert info.shape == [info.size]
assert info.strides == [info.itemsize]
assert not info.readonly
def test_ctypes_array_2d():
char2d = ((ctypes.c_char * 10) * 4)()
int2d = ((ctypes.c_int * 15) * 3)()
long2d = ((ctypes.c_long * 7) * 2)()
for carray in (char2d, int2d, long2d):
info = m.get_buffer_info(carray)
assert info.itemsize == ctypes.sizeof(carray[0]._type_)
assert info.size == len(carray) * len(carray[0])
assert info.ndim == 2
assert info.shape == [len(carray), len(carray[0])]
assert info.strides == [info.itemsize * len(carray[0]), info.itemsize]
assert not info.readonly
def test_ctypes_from_buffer():
test_pystr = b"0123456789"
for pyarray in (test_pystr, bytearray(test_pystr)):
pyinfo = m.get_buffer_info(pyarray)
if pyinfo.readonly:
cbytes = (ctypes.c_char * len(pyarray)).from_buffer_copy(pyarray)
cinfo = m.get_buffer_info(cbytes)
else:
cbytes = (ctypes.c_char * len(pyarray)).from_buffer(pyarray)
cinfo = m.get_buffer_info(cbytes)
assert cinfo.size == pyinfo.size
assert cinfo.ndim == pyinfo.ndim
assert cinfo.shape == pyinfo.shape
assert cinfo.strides == pyinfo.strides
assert not cinfo.readonly | null |
5,976 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""_BatchNormFold2Grad op"""
import te.lang.cce
from te import tvm
from te.platform.fusion_manager import fusion_manager
from tbe.tvm.topi import generic
from tbe.tvm.topi.cce import util
from mindspore.ops.op_info_register import op_info_register, TBERegOp, DataType
SHAPE_SIZE_LIMIT = 2147483648
batchnorm_fold2_grad_op_info = TBERegOp("BatchNormFold2GradD") \
.fusion_type("OPAQUE") \
.async_flag(False) \
.binfile_name("batchnorm_fold2_grad.so") \
.compute_cost(10) \
.kernel_name("batchnorm_fold2_grad") \
.partial_flag(True) \
.input(0, "dout", None, "required", None) \
.input(1, "dout_reduce", None, "required", None) \
.input(2, "dout_x_reduce", None, "required", None) \
.input(3, "gamma", None, "required", None) \
.input(4, "batch_std", None, "required", None) \
.input(5, "batch_mean", None, "required", None) \
.input(6, "running_std", None, "required", None) \
.output(0, "d_batch_std", True, "required", "all") \
.output(1, "d_batch_mean", True, "required", "all") \
.output(2, "d_gamma", True, "required", "all") \
.output(3, "dx", True, "required", "all") \
.dtype_format(DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD,
DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD, DataType.F32_5HD,
DataType.F32_5HD) \
.get_op_info()
@op_info_register(batchnorm_fold2_grad_op_info)
def METHOD_NAME():
"""_BatchNormFold2Grad TBE register"""
return
@fusion_manager.register("batchnorm_fold2_grad")
def batchnorm_fold2_grad_compute(dout, dout_reduce, dout_x_reduce, gamma, batch_std, batch_mean, running_std,
kernel_name="batchnorm_fold2_grad"):
"""_BatchNormFold2Grad"""
shape_x = te.lang.cce.util.shape_to_list(dout.shape)
d_batch_std_1 = te.lang.cce.vmul(dout_reduce, batch_mean)
d_batch_std_1 = te.lang.cce.vmul(d_batch_std_1, gamma)
d_batch_std_2 = te.lang.cce.vmul(dout_x_reduce, running_std)
d_batch_std = te.lang.cce.vsub(d_batch_std_1, d_batch_std_2)
d_batch_std = te.lang.cce.vdiv(d_batch_std, batch_std)
d_batch_std = te.lang.cce.vdiv(d_batch_std, batch_std)
d_batch_mean = te.lang.cce.vmul(dout_reduce, gamma)
d_batch_mean = te.lang.cce.vdiv(d_batch_mean, batch_std)
d_batch_mean = te.lang.cce.vmuls(d_batch_mean, -1.)
d_gamma = te.lang.cce.vmul(dout_reduce, batch_mean)
d_gamma = te.lang.cce.vdiv(d_gamma, batch_std)
d_gamma = te.lang.cce.vmuls(d_gamma, -1.)
dx = te.lang.cce.vdiv(running_std, batch_std)
dx = te.lang.cce.broadcast(dx, shape_x)
dx = te.lang.cce.vmul(dx, dout)
return [d_batch_std, d_batch_mean, d_gamma, dx]
@util.check_input_type(dict, dict, dict, dict, dict, dict, dict, dict, dict, dict, dict, str)
def batchnorm_fold2_grad(dout, dout_reduce, dout_x_reduce, gamma, batch_std, batch_mean, running_std, d_batch_std,
d_batch_mean, d_gamma, dx, kernel_name="batchnorm_fold2_grad"):
"""_BatchNormFold2Grad op """
shape = dout.get("shape")
util.check_kernel_name(kernel_name)
util.check_shape_rule(shape)
util.check_shape_size(shape, SHAPE_SIZE_LIMIT)
check_list = ["float16", "float32"]
inp_dtype = dout.get("dtype").lower()
if inp_dtype not in check_list:
raise RuntimeError("Dtype of input only support float16, float32")
data_format = dout.get("format")
ori_format = dout.get("ori_format")
if data_format.upper() not in ("NC1HWC0", "NCHW"):
raise RuntimeError("Un supported data format {}".format(data_format))
if data_format.upper() == "NCHW" and ori_format != "NCHW":
raise RuntimeError("data_format(NCHW) must same as ori_format")
shape_c = gamma.get("shape")
if gamma.get("format").upper() == "NCHW":
shape_c = 1, gamma.get("shape")[0], 1, 1
dout_t = tvm.placeholder(shape, name="dout", dtype=inp_dtype)
dout_reduce_t = tvm.placeholder(shape_c, name="dout_reduce", dtype=inp_dtype)
dout_x_reduce_t = tvm.placeholder(shape_c, name="dout_x_reduce", dtype=inp_dtype)
gamma_t = tvm.placeholder(shape_c, name="gamma", dtype=inp_dtype)
batch_std_t = tvm.placeholder(shape_c, name="batch_std", dtype=inp_dtype)
batch_mean_t = tvm.placeholder(shape_c, name="batch_mean", dtype=inp_dtype)
running_std_t = tvm.placeholder(shape_c, name="running_std", dtype=inp_dtype)
res_list = batchnorm_fold2_grad_compute(dout_t, dout_reduce_t, dout_x_reduce_t, gamma_t, batch_std_t, batch_mean_t,
running_std_t, kernel_name)
with tvm.target.cce():
sch = generic.auto_schedule(res_list)
tensor_list = [dout_t, dout_reduce_t, dout_x_reduce_t, gamma_t, batch_std_t, batch_mean_t, running_std_t] + list(
res_list)
config = {"print_ir": False,
"name": kernel_name,
"tensor_list": tensor_list}
te.lang.cce.cce_build_code(sch, config) | null |
5,977 | from __future__ import annotations
from collections import Counter
import click
import mock
import pytest
from requests import HTTPError, Response
from requests.adapters import BaseAdapter
from meltano.cli import cli
from meltano.cli.hub import hub
from meltano.core.hub.client import HubConnectionError, HubPluginVariantNotFoundError
from meltano.core.plugin.base import PluginType, Variant
from meltano.core.plugin.error import PluginNotFoundError
from meltano.core.project import Project
class TestMeltanoHubService:
def test_find_definition_specified_variant(
self,
project: Project,
hub_request_counter: Counter,
):
definition = project.hub_service.find_definition(
PluginType.EXTRACTORS,
"tap-mock",
variant_name="meltano",
)
assert definition.name == "tap-mock"
assert definition.variants[0].name == "meltano"
assert hub_request_counter["/extractors/index"] == 1
assert hub_request_counter["/extractors/tap-mock--meltano"] == 1
def test_find_definition_default_variant(
self,
project: Project,
hub_request_counter: Counter,
):
definition = project.hub_service.find_definition(
PluginType.EXTRACTORS,
"tap-mock",
)
assert definition.name == "tap-mock"
assert definition.variants[0].name == "meltano"
assert hub_request_counter["/extractors/index"] == 1
assert hub_request_counter["/extractors/tap-mock--meltano"] == 1
def test_find_definition_original_variant(
self,
project: Project,
hub_request_counter: Counter,
):
definition = project.hub_service.find_definition(
PluginType.EXTRACTORS,
"tap-mock",
variant_name=Variant.ORIGINAL_NAME,
)
assert definition.name == "tap-mock"
assert definition.variants[0].name == "meltano"
assert hub_request_counter["/extractors/index"] == 1
assert hub_request_counter["/extractors/tap-mock--meltano"] == 1
def test_definition_not_found(
self,
project: Project,
hub_request_counter: Counter,
):
with pytest.raises(PluginNotFoundError):
project.hub_service.find_definition(PluginType.EXTRACTORS, "tap-not-found")
assert hub_request_counter["/extractors/index"] == 1
assert len(hub_request_counter) == 1
def test_variant_not_found(
self,
project: Project,
hub_request_counter: Counter,
):
with pytest.raises(HubPluginVariantNotFoundError):
project.hub_service.find_definition(
PluginType.EXTRACTORS,
"tap-mock",
"not-found",
)
assert hub_request_counter["/extractors/index"] == 1
assert len(hub_request_counter) == 1
def test_get_plugins_of_type(
self,
project: Project,
hub_request_counter: Counter,
):
extractors = project.hub_service.get_plugins_of_type(PluginType.EXTRACTORS)
assert len(extractors) == 9
assert len(extractors["tap-mock"].variants) == 2
assert extractors["tap-mock"].variant_labels == [
"meltano (default)",
"singer-io",
]
assert hub_request_counter["/extractors/index"] == 1
def test_hub_auth(self, project):
project.settings.set("hub_url_auth", "Bearer s3cr3t")
assert project.hub_service.session.headers["Authorization"] == "Bearer s3cr3t"
def test_server_error(self, project: Project):
with pytest.raises(
HubConnectionError,
match="Could not connect to Meltano Hub. 500 Server Error",
) as exc_info:
project.hub_service.find_definition(
PluginType.EXTRACTORS,
"this-returns-500",
)
assert isinstance(exc_info.value.__cause__, HTTPError)
assert isinstance(exc_info.value.__cause__.response, Response)
assert exc_info.value.__cause__.response.status_code == 500
assert exc_info.value.__cause__.response.json() == {"error": "Server error"}
assert exc_info.value.__cause__.response.url == (
"https://hub.meltano.com/meltano/api/v1/plugins/extractors"
"/this-returns-500--original"
)
def test_request_headers(self, project: Project):
with mock.patch("click.get_current_context") as get_context:
get_context.return_value = click.Context(
hub,
info_name="hub",
parent=click.Context(cli, info_name="meltano"),
)
request = project.hub_service._build_request("GET", "https://example.com")
assert request.headers["X-Meltano-Command"] == "meltano hub"
with mock.patch("click.get_current_context") as get_context:
get_context.return_value = None
request = project.hub_service._build_request("GET", "https://example.com")
assert "X-Meltano-Command" not in request.headers
def test_custom_ca(self, project, monkeypatch):
send_kwargs = {}
class _Adapter(BaseAdapter):
def send(
self,
request, # noqa: ARG002
**kwargs,
):
nonlocal send_kwargs
send_kwargs = kwargs
response = Response()
response._content = b'{"name": "tap-mock", "namespace": "tap_mock"}'
response.status_code = 200
return response
mock_url = "hub://meltano"
hub = project.hub_service
hub.session.mount(mock_url, _Adapter())
monkeypatch.setenv("REQUESTS_CA_BUNDLE", "/path/to/ca.pem")
hub._get(mock_url)
assert send_kwargs["verify"] == "/path/to/ca.pem"
def METHOD_NAME(self, project, monkeypatch):
send_kwargs = {}
class _Adapter(BaseAdapter):
def send(
self,
request, # noqa: ARG002
**kwargs,
):
nonlocal send_kwargs
send_kwargs = kwargs
response = Response()
response._content = b'{"name": "tap-mock", "namespace": "tap_mock"}'
response.status_code = 200
return response
mock_url = "hub://meltano"
hub = project.hub_service
hub.session.mount(mock_url, _Adapter())
monkeypatch.setenv("HTTPS_PROXY", "https://www.example.com:3128/")
hub._get(mock_url)
assert send_kwargs["proxies"] == {"https": "https://www.example.com:3128/"} | null |
5,978 | # Copyright 2019-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops.operations import _inner_ops as inner
class NetZerosLike(nn.Cell):
def __init__(self):
super(NetZerosLike, self).__init__()
self.zeros_like = P.ZerosLike()
def construct(self, x):
return self.zeros_like(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_ZerosLike():
x0_np = np.random.uniform(-2, 2, (2, 3, 4, 4)).astype(np.float32)
x1_np = np.random.uniform(-2, 2, 1).astype(np.float32)
x0 = Tensor(x0_np)
x1 = Tensor(x1_np)
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
zeros_like = NetZerosLike()
output0 = zeros_like(x0)
expect0 = np.zeros_like(x0_np)
diff0 = output0.asnumpy() - expect0
error0 = np.ones(shape=expect0.shape) * 1.0e-5
assert np.all(diff0 < error0)
assert output0.shape == expect0.shape
output1 = zeros_like(x1)
expect1 = np.zeros_like(x1_np)
diff1 = output1.asnumpy() - expect1
error1 = np.ones(shape=expect1.shape) * 1.0e-5
assert np.all(diff1 < error1)
assert output1.shape == expect1.shape
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
zeros_like = NetZerosLike()
output0 = zeros_like(x0)
expect0 = np.zeros_like(x0_np)
diff0 = output0.asnumpy() - expect0
error0 = np.ones(shape=expect0.shape) * 1.0e-5
assert np.all(diff0 < error0)
assert output0.shape == expect0.shape
output1 = zeros_like(x1)
expect1 = np.zeros_like(x1_np)
diff1 = output1.asnumpy() - expect1
error1 = np.ones(shape=expect1.shape) * 1.0e-5
assert np.all(diff1 < error1)
assert output1.shape == expect1.shape
class ZerosLikeDynamicNet(nn.Cell):
def __init__(self):
super(ZerosLikeDynamicNet, self).__init__()
self.gpu_convert_to_dynamic_shape = inner.GpuConvertToDynamicShape()
self.zeros_like = P.ZerosLike()
def construct(self, x):
converted_to_dynamic = self.gpu_convert_to_dynamic_shape(x)
return self.zeros_like(converted_to_dynamic)
def zeros_like_dynamic(x):
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = ZerosLikeDynamicNet()
return net(x)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_zeros_like_dynamic_bool():
x = Tensor(np.arange(120).reshape(3, 4, 1, 2, 5).astype(np.bool))
output = zeros_like_dynamic(x)
expected = np.zeros([3, 4, 1, 2, 5])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_zeros_like_dynamic_int8():
x = Tensor(np.arange(24).reshape(1, 4, 1, 6).astype(np.int8))
output = zeros_like_dynamic(x)
expected = np.zeros([1, 4, 1, 6])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def METHOD_NAME():
x = Tensor(np.arange(30).reshape(3, 2, 5).astype(np.uint8))
output = zeros_like_dynamic(x)
expected = np.zeros([3, 2, 5])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_zeros_like_dynamic_int32():
x = Tensor(np.arange(16).reshape(2, 2, 2, 2).astype(np.int32))
output = zeros_like_dynamic(x)
expected = np.zeros([2, 2, 2, 2])
np.testing.assert_array_equal(output.asnumpy(), expected)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_zeros_like_dynamic_float16():
x = Tensor(np.arange(120).reshape(3, 4, 1, 2, 5).astype(np.float16))
output = zeros_like_dynamic(x)
expected = np.zeros([3, 4, 1, 2, 5])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_zeros_like_dynamic_float32():
x = Tensor(np.arange(63).reshape(3, 7, 3).astype(np.float32))
output = zeros_like_dynamic(x)
expected = np.zeros([3, 7, 3])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_zeros_like_dynamic_float64():
x = Tensor(np.arange(2).reshape(2, 1, 1).astype(np.float64))
output = zeros_like_dynamic(x)
expected = np.zeros([2, 1, 1])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level1
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_zeros_like_dynamic_multiple_inputs():
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
net = ZerosLikeDynamicNet()
x = Tensor(np.arange(4).reshape(4).astype(np.float32))
output = net(x)
expected = np.zeros([4])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
x = Tensor(np.arange(8).reshape(2, 1, 2, 2).astype(np.uint8))
output = net(x)
expected = np.zeros([2, 1, 2, 2])
np.testing.assert_array_equal(output.asnumpy(), expected)
x = Tensor(np.arange(1).reshape(1).astype(np.float16))
output = net(x)
expected = np.zeros([1])
np.testing.assert_array_almost_equal(output.asnumpy(), expected) | null |
5,979 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetCertificateResult',
'AwaitableGetCertificateResult',
'get_certificate',
'get_certificate_output',
]
@pulumi.output_type
class GetCertificateResult:
"""
A collection of values returned by getCertificate.
"""
def __init__(__self__, account_name=None, format=None, id=None, name=None, public_data=None, resource_group_name=None, thumbprint=None, thumbprint_algorithm=None):
if account_name and not isinstance(account_name, str):
raise TypeError("Expected argument 'account_name' to be a str")
pulumi.set(__self__, "account_name", account_name)
if format and not isinstance(format, str):
raise TypeError("Expected argument 'format' to be a str")
pulumi.set(__self__, "format", format)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if public_data and not isinstance(public_data, str):
raise TypeError("Expected argument 'public_data' to be a str")
pulumi.set(__self__, "public_data", public_data)
if resource_group_name and not isinstance(resource_group_name, str):
raise TypeError("Expected argument 'resource_group_name' to be a str")
pulumi.set(__self__, "resource_group_name", resource_group_name)
if thumbprint and not isinstance(thumbprint, str):
raise TypeError("Expected argument 'thumbprint' to be a str")
pulumi.set(__self__, "thumbprint", thumbprint)
if thumbprint_algorithm and not isinstance(thumbprint_algorithm, str):
raise TypeError("Expected argument 'thumbprint_algorithm' to be a str")
pulumi.set(__self__, "thumbprint_algorithm", thumbprint_algorithm)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> str:
return pulumi.get(self, "account_name")
@property
@pulumi.getter
def format(self) -> str:
"""
The format of the certificate, such as `Cer` or `Pfx`.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="publicData")
def public_data(self) -> str:
"""
The public key of the certificate.
"""
return pulumi.get(self, "public_data")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> str:
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def thumbprint(self) -> str:
"""
The thumbprint of the certificate.
"""
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter(name="thumbprintAlgorithm")
def thumbprint_algorithm(self) -> str:
"""
The algorithm of the certificate thumbprint.
"""
return pulumi.get(self, "thumbprint_algorithm")
class AwaitableGetCertificateResult(GetCertificateResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetCertificateResult(
account_name=self.account_name,
format=self.format,
id=self.id,
name=self.name,
public_data=self.public_data,
resource_group_name=self.resource_group_name,
thumbprint=self.thumbprint,
thumbprint_algorithm=self.thumbprint_algorithm)
def get_certificate(account_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCertificateResult:
"""
Use this data source to access information about an existing certificate in a Batch Account.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.batch.get_certificate(name="SHA1-42C107874FD0E4A9583292A2F1098E8FE4B2EDDA",
account_name="examplebatchaccount",
resource_group_name="example")
pulumi.export("thumbprint", example.thumbprint)
```
:param str account_name: The name of the Batch account.
:param str name: The name of the Batch certificate.
:param str resource_group_name: The Name of the Resource Group where this Batch account exists.
"""
__args__ = dict()
__args__['accountName'] = account_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure:batch/getCertificate:getCertificate', __args__, opts=opts, typ=GetCertificateResult).value
return AwaitableGetCertificateResult(
account_name=pulumi.get(__ret__, 'account_name'),
format=pulumi.get(__ret__, 'format'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
public_data=pulumi.get(__ret__, 'public_data'),
resource_group_name=pulumi.get(__ret__, 'resource_group_name'),
thumbprint=pulumi.get(__ret__, 'thumbprint'),
thumbprint_algorithm=pulumi.get(__ret__, 'thumbprint_algorithm'))
@_utilities.lift_output_func(get_certificate)
def METHOD_NAME(account_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetCertificateResult]:
"""
Use this data source to access information about an existing certificate in a Batch Account.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example = azure.batch.get_certificate(name="SHA1-42C107874FD0E4A9583292A2F1098E8FE4B2EDDA",
account_name="examplebatchaccount",
resource_group_name="example")
pulumi.export("thumbprint", example.thumbprint)
```
:param str account_name: The name of the Batch account.
:param str name: The name of the Batch certificate.
:param str resource_group_name: The Name of the Resource Group where this Batch account exists.
"""
... | null |
5,980 | from sha3 import keccak_256
import json
from web3.auto import w3
from eth_keys import keys
import struct
def unpack(data):
ch = data[0]
if ch <= 0x7F:
return ch, data[1:]
elif ch == 0x80:
return None, data[1:]
elif ch <= 0xB7:
l = ch - 0x80
return data[1:1 + l].tobytes(), data[1 + l:]
elif ch <= 0xBF:
lLen = ch - 0xB7
l = int.from_bytes(data[1:1 + lLen], byteorder='big')
return data[1 + lLen:1 + lLen + l].tobytes(), data[1 + lLen + l:]
elif ch == 0xC0:
return (), data[1:]
elif ch <= 0xF7:
l = ch - 0xC0
lst = list()
sub = data[1:1 + l]
while len(sub):
(item, sub) = unpack(sub)
lst.append(item)
return lst, data[1 + l:]
else:
lLen = ch - 0xF7
l = int.from_bytes(data[1:1 + lLen], byteorder='big')
lst = list()
sub = data[1 + lLen:1 + lLen + l]
while len(sub):
(item, sub) = unpack(sub)
lst.append(item)
return lst, data[1 + lLen + l:]
def pack(data):
if data is None:
return (0x80).to_bytes(1, 'big')
if isinstance(data, str):
return pack(data.encode('utf8'))
elif isinstance(data, bytes):
if len(data) <= 55:
return (len(data) + 0x80).to_bytes(1, 'big') + data
else:
l = len(data)
lLen = (l.bit_length() + 7) // 8
return (0xB7 + lLen).to_bytes(1, 'big') + l.to_bytes(lLen, 'big') + data
elif isinstance(data, int):
if data < 0x80:
return data.to_bytes(1, 'big')
else:
l = (data.bit_length() + 7) // 8
return (l + 0x80).to_bytes(1, 'big') + data.to_bytes(l, 'big')
pass
elif isinstance(data, list) or isinstance(data, tuple):
if len(data) == 0:
return (0xC0).to_bytes(1, 'big')
else:
res = bytearray()
for d in data:
res += pack(d)
l = len(res)
if l <= 55:
return (l + 0xC0).to_bytes(1, 'big') + res
else:
lLen = (l.bit_length() + 7) // 8
return (lLen + 0xF7).to_bytes(1, 'big') + l.to_bytes(lLen, 'big') + res
else:
raise Exception("Unknown type {} of data".format(str(type(data))))
def get_int(a):
if isinstance(a, int):
return a
if isinstance(a, bytes):
return int.from_bytes(a, 'big')
if a is None:
return a
raise Exception("Invalid convertion from {} to int".format(a))
class Trx:
def __init__(self):
self.nonce = None
self.gasPrice = None
self.gasLimit = None
self.toAddress = None
self.value = None
self.callData = None
self.v = None
self.r = None
self.s = None
@classmethod
def from_string(cls, s):
t = Trx()
(unpacked, data) = unpack(memoryview(s))
(nonce, gasPrice, gasLimit, toAddress, value, callData, v, r, s) = unpacked
t.nonce = get_int(nonce)
t.gasPrice = get_int(gasPrice)
t.gasLimit = get_int(gasLimit)
t.toAddress = toAddress
t.value = get_int(value)
t.callData = callData
t.v = get_int(v)
t.r = get_int(r)
t.s = get_int(s)
return t
def chain_id(self):
# chainid*2 + 35 xxxxx0 + 100011 xxxx0 + 100010 +1
# chainid*2 + 36 xxxxx0 + 100100 xxxx0 + 100011 +1
return (self.v - 1) // 2 - 17
def __str__(self):
return pack((
self.nonce,
self.gasPrice,
self.gasLimit,
self.toAddress,
self.value,
self.callData,
self.v,
self.r.to_bytes(32, 'big') if self.r else None,
self.s.to_bytes(32, 'big') if self.s else None)
).hex()
def get_msg(self, chain_id=None):
return pack((
self.nonce,
self.gasPrice,
self.gasLimit,
self.toAddress,
self.value,
self.callData,
chain_id or self.chain_id(), None, None))
def hash(self, chain_id=None):
trx = pack((
self.nonce,
self.gasPrice,
self.gasLimit,
self.toAddress,
self.value,
self.callData,
chain_id or self.chain_id(), None, None))
return keccak_256(trx).digest()
def METHOD_NAME(self):
msg_hash = self.hash()
sig = keys.Signature(vrs=[1 if self.v % 2 == 0 else 0, self.r, self.s])
pub = sig.recover_public_key_from_msg_hash(msg_hash)
return pub.to_canonical_address().hex()
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, bytes):
return obj.hex()
return json.JSONEncoder.default(obj)
def make_instruction_data_from_tx(instruction, private_key=None):
if isinstance(instruction, dict):
if instruction['chainId'] is None:
raise Exception("chainId value is needed in input dict")
if private_key is None:
raise Exception("Needed private key for transaction creation from fields")
signed_tx = w3.eth.account.sign_transaction(instruction, private_key)
# print(signed_tx.rawTransaction.hex())
_trx = Trx.from_string(signed_tx.rawTransaction)
# print(json.dumps(_trx.__dict__, cls=JsonEncoder, indent=3))
raw_msg = _trx.get_msg(instruction['chainId'])
sig = keys.Signature(vrs=[1 if _trx.v % 2 == 0 else 0, _trx.r, _trx.s])
pub = sig.recover_public_key_from_msg_hash(_trx.hash())
# print(pub.to_hex())
return pub.to_canonical_address(), sig.to_bytes(), raw_msg
elif isinstance(instruction, str):
if instruction[:2] == "0x":
instruction = instruction[2:]
_trx = Trx.from_string(bytearray.fromhex(instruction))
# print(json.dumps(_trx.__dict__, cls=JsonEncoder, indent=3))
raw_msg = _trx.get_msg()
sig = keys.Signature(vrs=[1 if _trx.v % 2 == 0 else 0, _trx.r, _trx.s])
pub = sig.recover_public_key_from_msg_hash(_trx.hash())
data = pub.to_canonical_address()
data += sig.to_bytes()
data += raw_msg
return pub.to_canonical_address(), sig.to_bytes(), raw_msg
else:
raise Exception("function gets ")
def make_keccak_instruction_data(check_instruction_index, msg_len, data_start):
if 255 < check_instruction_index < 0:
raise Exception("Invalid index for instruction - {}".format(check_instruction_index))
check_count = 1
eth_address_size = 20
signature_size = 65
eth_address_offset = data_start
signature_offset = eth_address_offset + eth_address_size
message_data_offset = signature_offset + signature_size
data = struct.pack("B", check_count)
data += struct.pack("<H", signature_offset)
data += struct.pack("B", check_instruction_index)
data += struct.pack("<H", eth_address_offset)
data += struct.pack("B", check_instruction_index)
data += struct.pack("<H", message_data_offset)
data += struct.pack("<H", msg_len)
data += struct.pack("B", check_instruction_index)
return data | null |
5,981 | """
django-helpdesk - A Django powered ticket tracker for small enterprise.
(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.
lib.py - Common functions (eg multipart e-mail)
"""
from datetime import date, datetime, time
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.encoding import smart_str
from helpdesk.settings import CUSTOMFIELD_DATE_FORMAT, CUSTOMFIELD_DATETIME_FORMAT, CUSTOMFIELD_TIME_FORMAT
import logging
import mimetypes
logger = logging.getLogger('helpdesk')
def ticket_template_context(ticket):
context = {}
for field in ('title', 'created', 'modified', 'submitter_email',
'status', 'get_status_display', 'on_hold', 'description',
'resolution', 'priority', 'get_priority_display',
'last_escalation', 'ticket', 'ticket_for_url', 'merged_to',
'get_status', 'ticket_url', 'staff_url', '_get_assigned_to'
):
attr = getattr(ticket, field, None)
if callable(attr):
context[field] = '%s' % attr()
else:
context[field] = attr
context['assigned_to'] = context['_get_assigned_to']
return context
def queue_template_context(queue):
context = {}
for field in ('title', 'slug', 'email_address', 'from_address', 'locale'):
attr = getattr(queue, field, None)
if callable(attr):
context[field] = attr()
else:
context[field] = attr
return context
def METHOD_NAME(ticket):
"""
Return a dictionary that can be used as a template context to render
comments and other details with ticket or queue parameters. Note that
we don't just provide the Ticket & Queue objects to the template as
they could reveal confidential information. Just imagine these two options:
* {{ ticket.queue.email_box_password }}
* {{ ticket.assigned_to.password }}
Ouch!
The downside to this is that if we make changes to the model, we will also
have to update this code. Perhaps we can find a better way in the future.
"""
context = {
'queue': queue_template_context(ticket.queue),
'ticket': ticket_template_context(ticket),
}
context['ticket']['queue'] = context['queue']
return context
def text_is_spam(text, request):
# Based on a blog post by 'sciyoshi':
# http://sciyoshi.com/blog/2008/aug/27/using-akismet-djangos-new-comments-framework/
# This will return 'True' is the given text is deemed to be spam, or
# False if it is not spam. If it cannot be checked for some reason, we
# assume it isn't spam.
try:
from akismet import Akismet
except ImportError:
return False
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
try:
site = Site.objects.get_current()
except ImproperlyConfigured:
site = Site(domain='configure-django-sites.com')
# see https://akismet.readthedocs.io/en/latest/overview.html#using-akismet
apikey = None
if hasattr(settings, 'TYPEPAD_ANTISPAM_API_KEY'):
apikey = settings.TYPEPAD_ANTISPAM_API_KEY
elif hasattr(settings, 'PYTHON_AKISMET_API_KEY'):
# new env var expected by python-akismet package
apikey = settings.PYTHON_AKISMET_API_KEY
elif hasattr(settings, 'AKISMET_API_KEY'):
# deprecated, but kept for backward compatibility
apikey = settings.AKISMET_API_KEY
else:
return False
ak = Akismet(
blog_url='http://%s/' % site.domain,
key=apikey,
)
if hasattr(settings, 'TYPEPAD_ANTISPAM_API_KEY'):
ak.baseurl = 'api.antispam.typepad.com/1.1/'
if ak.verify_key():
ak_data = {
'user_ip': request.META.get('REMOTE_ADDR', '127.0.0.1'),
'user_agent': request.headers.get('User-Agent', ''),
'referrer': request.headers.get('Referer', ''),
'comment_type': 'comment',
'comment_author': '',
}
return ak.comment_check(smart_str(text), data=ak_data)
return False
def process_attachments(followup, attached_files):
max_email_attachment_size = getattr(
settings, 'HELPDESK_MAX_EMAIL_ATTACHMENT_SIZE', 512000)
attachments = []
errors = set()
for attached in attached_files:
if attached.size:
from helpdesk.models import FollowUpAttachment
filename = smart_str(attached.name)
att = FollowUpAttachment(
followup=followup,
file=attached,
filename=filename,
mime_type=attached.content_type or
mimetypes.guess_type(filename, strict=False)[0] or
'application/octet-stream',
size=attached.size,
)
try:
att.full_clean()
except ValidationError as e:
errors.add(e)
else:
att.save()
if attached.size < max_email_attachment_size:
# Only files smaller than 512kb (or as defined in
# settings.HELPDESK_MAX_EMAIL_ATTACHMENT_SIZE) are sent via
# email.
attachments.append([filename, att.file])
if errors:
raise ValidationError(list(errors))
return attachments
def format_time_spent(time_spent):
"""Format time_spent attribute to "[H]HHh:MMm" text string to be allign in
all graphical outputs
"""
if time_spent:
time_spent = "{0:02d}h:{1:02d}m".format(
time_spent.seconds // 3600,
time_spent.seconds // 60
)
else:
time_spent = ""
return time_spent
def convert_value(value):
""" Convert date/time data type to known fixed format string """
if type(value) == datetime:
return value.strftime(CUSTOMFIELD_DATETIME_FORMAT)
elif type(value) == date:
return value.strftime(CUSTOMFIELD_DATE_FORMAT)
elif type(value) == time:
return value.strftime(CUSTOMFIELD_TIME_FORMAT)
else:
return value | null |
5,982 | import json
import uuid
from pilot.common.schema import DBType
from pilot.configs.config import Config
from pilot.configs.model_config import (
KNOWLEDGE_UPLOAD_ROOT_PATH,
EMBEDDING_MODEL_CONFIG,
LOGDIR,
)
from pilot.scene.base import ChatScene
from pilot.scene.base_chat import BaseChat
from pilot.scene.chat_factory import ChatFactory
from pilot.summary.rdbms_db_summary import RdbmsSummary
from pilot.utils import build_logger
logger = build_logger("db_summary", LOGDIR + "db_summary.log")
CFG = Config()
chat_factory = ChatFactory()
class DBSummaryClient:
"""db summary client, provide db_summary_embedding(put db profile and table profile summary into vector store)
, get_similar_tables method(get user query related tables info)
"""
def __init__(self):
pass
def db_summary_embedding(self, dbname, db_type):
"""put db profile and table profile summary into vector store"""
from langchain.embeddings import HuggingFaceEmbeddings
from pilot.embedding_engine.string_embedding import StringEmbedding
db_summary_client = RdbmsSummary(dbname, db_type)
embeddings = HuggingFaceEmbeddings(
model_name=EMBEDDING_MODEL_CONFIG[CFG.EMBEDDING_MODEL]
)
vector_store_config = {
"vector_store_name": dbname + "_summary",
"vector_store_type": CFG.VECTOR_STORE_TYPE,
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
"embeddings": embeddings,
}
embedding = StringEmbedding(
file_path=db_summary_client.get_summery(),
vector_store_config=vector_store_config,
)
self.init_db_profile(db_summary_client, dbname, embeddings)
if not embedding.vector_name_exist():
if CFG.SUMMARY_CONFIG == "FAST":
for vector_table_info in db_summary_client.get_summery():
embedding = StringEmbedding(
vector_table_info,
vector_store_config,
)
embedding.source_embedding()
else:
embedding = StringEmbedding(
file_path=db_summary_client.get_summery(),
vector_store_config=vector_store_config,
)
embedding.source_embedding()
for (
table_name,
table_summary,
) in db_summary_client.get_table_summary().items():
table_vector_store_config = {
"vector_store_name": dbname + "_" + table_name + "_ts",
"vector_store_type": CFG.VECTOR_STORE_TYPE,
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
"embeddings": embeddings,
}
embedding = StringEmbedding(
table_summary,
table_vector_store_config,
)
embedding.source_embedding()
logger.info("db summary embedding success")
def get_db_summary(self, dbname, query, topk):
from pilot.embedding_engine.embedding_engine import EmbeddingEngine
vector_store_config = {
"vector_store_name": dbname + "_profile",
"vector_store_type": CFG.VECTOR_STORE_TYPE,
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
}
knowledge_embedding_client = EmbeddingEngine(
model_name=EMBEDDING_MODEL_CONFIG[CFG.EMBEDDING_MODEL],
vector_store_config=vector_store_config,
)
table_docs = knowledge_embedding_client.similar_search(query, topk)
ans = [d.page_content for d in table_docs]
return ans
def get_similar_tables(self, dbname, query, topk):
"""get user query related tables info"""
from pilot.embedding_engine.embedding_engine import EmbeddingEngine
vector_store_config = {
"vector_store_name": dbname + "_summary",
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
"vector_store_type": CFG.VECTOR_STORE_TYPE,
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
}
knowledge_embedding_client = EmbeddingEngine(
model_name=EMBEDDING_MODEL_CONFIG[CFG.EMBEDDING_MODEL],
vector_store_config=vector_store_config,
)
if CFG.SUMMARY_CONFIG == "FAST":
table_docs = knowledge_embedding_client.similar_search(query, topk)
related_tables = [
json.loads(table_doc.page_content)["table_name"]
for table_doc in table_docs
]
else:
table_docs = knowledge_embedding_client.similar_search(query, 1)
# prompt = KnownLedgeBaseQA.build_db_summary_prompt(
# query, table_docs[0].page_content
# )
related_tables = _get_llm_response(
query, dbname, table_docs[0].page_content
)
related_table_summaries = []
for table in related_tables:
vector_store_config = {
"vector_store_name": dbname + "_" + table + "_ts",
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
"vector_store_type": CFG.VECTOR_STORE_TYPE,
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
}
knowledge_embedding_client = EmbeddingEngine(
model_name=EMBEDDING_MODEL_CONFIG[CFG.EMBEDDING_MODEL],
vector_store_config=vector_store_config,
)
table_summery = knowledge_embedding_client.similar_search(query, 1)
related_table_summaries.append(table_summery[0].page_content)
return related_table_summaries
def METHOD_NAME(self):
db_mange = CFG.LOCAL_DB_MANAGE
dbs = db_mange.get_db_list()
for item in dbs:
try:
self.db_summary_embedding(item["db_name"], item["db_type"])
except Exception as e:
logger.warn(
f'{item["db_name"]}, {item["db_type"]} summary error!{str(e)}', e
)
def init_db_profile(self, db_summary_client, dbname, embeddings):
from pilot.embedding_engine.string_embedding import StringEmbedding
profile_store_config = {
"vector_store_name": dbname + "_profile",
"chroma_persist_path": KNOWLEDGE_UPLOAD_ROOT_PATH,
"vector_store_type": CFG.VECTOR_STORE_TYPE,
"embeddings": embeddings,
}
embedding = StringEmbedding(
file_path=db_summary_client.get_db_summery(),
vector_store_config=profile_store_config,
)
if not embedding.vector_name_exist():
docs = []
docs.extend(embedding.read_batch())
for table_summary in db_summary_client.table_info_json():
embedding = StringEmbedding(
table_summary,
profile_store_config,
)
docs.extend(embedding.read_batch())
embedding.index_to_store(docs)
logger.info("init db profile success...")
def _get_llm_response(query, db_input, dbsummary):
chat_param = {
"temperature": 0.7,
"max_new_tokens": 512,
"chat_session_id": uuid.uuid1(),
"user_input": query,
"db_select": db_input,
"db_summary": dbsummary,
}
chat: BaseChat = chat_factory.get_implementation(
ChatScene.InnerChatDBSummary.value, **chat_param
)
res = chat._blocking_nostream_call()
return json.loads(res)["table"] | null |
5,983 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
# std imports
import numpy as np
import unittest
from unittest.mock import MagicMock, create_autospec, patch
# thirdparty imports
from mantid.api import MatrixWorkspace
from mantid.dataobjects import PeaksWorkspace
from mantid.kernel import SpecialCoordinateSystem
from numpy.testing import assert_allclose
# local imports
from mantidqt.widgets.sliceviewer.peaksviewer.model import PeaksViewerModel, create_peaksviewermodel
from mantidqt.widgets.sliceviewer.peaksviewer.test.modeltesthelpers import create_peaks_viewer_model, draw_peaks
class PeaksViewerModelTest(unittest.TestCase):
# -------------------------- Success Tests --------------------------------
def test_peaks_workspace_returns_same_workspace_given_to_model(self):
peaks_workspace = create_autospec(PeaksWorkspace)
model = PeaksViewerModel(peaks_workspace, "b", "1.0")
self.assertEqual(peaks_workspace, model.peaks_workspace)
def test_color_returns_string_identifier_given_to_model(self):
fg_color, bg_color = "b", "0.5"
model = PeaksViewerModel(create_autospec(PeaksWorkspace), fg_color, bg_color)
self.assertEqual(fg_color, model.fg_color)
self.assertEqual(bg_color, model.bg_color)
@patch("mantidqt.widgets.sliceviewer.peaksviewer.model._get_peaksworkspace")
def test_create_peaksviewermodel_uses_given_colors(self, mock_get_peaks_workspace):
mock_get_peaks_workspace.return_value = MagicMock(spec=PeaksWorkspace)
first_model = create_peaksviewermodel("test", "red", "gray")
second_model = create_peaksviewermodel("test", "blue", "white")
self.assertEqual("red", first_model.fg_color)
self.assertEqual("gray", first_model.bg_color)
self.assertEqual("blue", second_model.fg_color)
self.assertEqual("white", second_model.bg_color)
def METHOD_NAME(self):
fg_color = "r"
# create 2 peaks: 1 visible, 1 not (far outside Z range)
visible_peak_center, invisible_center = (0.5, 0.2, 0.25), (0.4, 0.3, 25)
_, mock_painter = draw_peaks((visible_peak_center, invisible_center), fg_color, slice_value=0.5, slice_width=30)
self.assertEqual(1, mock_painter.cross.call_count)
call_args, call_kwargs = mock_painter.cross.call_args
self.assertEqual(visible_peak_center[0], call_args[0])
self.assertEqual(visible_peak_center[1], call_args[1])
self.assertAlmostEqual(0.03, call_args[2], places=3)
self.assertAlmostEqual(0.356, call_kwargs["alpha"], places=3)
self.assertEqual(fg_color, call_kwargs["color"])
def test_clear_peaks_removes_all_drawn(self):
# create 2 peaks: 1 visible, 1 not (far outside Z range)
visible_peak_center, invisible_center = (0.5, 0.2, 0.25), (0.4, 0.3, 25)
model, mock_painter = draw_peaks((visible_peak_center, invisible_center), fg_color="r", slice_value=0.5, slice_width=30)
model.clear_peak_representations()
mock_painter.remove.assert_called_once()
def test_slicepoint_transforms_center_to_correct_frame_and_order(self):
peak_center = (1, 2, 3)
model = create_peaks_viewer_model(centers=[peak_center], fg_color="red")
slice_info = MagicMock()
slice_info.slicepoint = [0.5, None, None]
slice_info.z_index = 0
slice_info.adjust_index_for_preceding_nonq_dims.side_effect = lambda index: index
slicepoint = model.slicepoint(0, slice_info, SpecialCoordinateSystem.QSample)
peak0 = model.ws.getPeak(0)
peak0.getQSampleFrame.assert_called_once()
peak0.getQLabFrame.assert_not_called()
peak0.getHKL.assert_not_called()
self.assertEqual([1, None, None], slicepoint)
def test_delete_peak(self):
peak_centers = [[1.0, 0.0, 0.0], [1.0, 1.0, 0.0]]
model = create_peaks_viewer_model(centers=peak_centers, fg_color="red")
assert model.delete_peak(np.array([1.0, 0.9, 0.1]), SpecialCoordinateSystem.QLab) == 1
assert model.delete_peak(np.array([1.1, 0.0, 0.1]), SpecialCoordinateSystem.QSample) == 0
assert model.delete_peak(np.array([0.0, 0.0, 0.0]), SpecialCoordinateSystem.QSample) == 0
def test_viewlimits(self):
visible_peak_center, invisible_center = (0.5, 0.2, 0.25), (0.4, 0.3, 25)
model, mock_painter = draw_peaks((visible_peak_center, invisible_center), fg_color="r", slice_value=0.5, slice_width=30)
xlim, ylim = model.viewlimits(0)
assert_allclose((-0.13, 1.13), xlim)
assert_allclose((-0.43, 0.83), ylim)
# Force case where no representation are able to be draw
# This can happen when the peak integration volume doesn't intersect any planes of data
model._representations = [None]
xlim, ylim = model.viewlimits(0)
self.assertEqual((None, None), xlim)
self.assertEqual((None, None), ylim)
def test_peaks_workspace_add_peak(self):
peaks_workspace = create_autospec(PeaksWorkspace)
model = PeaksViewerModel(peaks_workspace, "b", "1.0")
model.add_peak([1, 1, 1], SpecialCoordinateSystem.QLab)
peaks_workspace.addPeak.assert_called_with([1, 1, 1], SpecialCoordinateSystem.QLab)
model.add_peak([2, 2, 2], SpecialCoordinateSystem.QSample)
peaks_workspace.addPeak.assert_called_with([2, 2, 2], SpecialCoordinateSystem.QSample)
model.add_peak([3, 3, 3], SpecialCoordinateSystem.HKL)
peaks_workspace.addPeak.assert_called_with([3, 3, 3], SpecialCoordinateSystem.HKL)
# -------------------------- Failure Tests --------------------------------
def test_model_accepts_only_peaks_workspaces(self):
self.assertRaises(ValueError, PeaksViewerModel, create_autospec(MatrixWorkspace), "w", "1.0")
if __name__ == "__main__":
unittest.main() | null |
5,984 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple generator for local Makefile rules."""
import os
import sys
from common import yaml_utils
from common import benchmark_utils
from common import fuzzer_utils
from experiment.build import docker_images
BASE_TAG = 'gcr.io/fuzzbench'
BENCHMARK_DIR = benchmark_utils.BENCHMARKS_DIR
def _get_benchmark_fuzz_target(benchmarks):
"""Returns benchmark variables from benchmark.yaml files."""
variables = ''
for benchmark in benchmarks:
benchmark_vars = yaml_utils.read(
os.path.join(BENCHMARK_DIR, benchmark, 'benchmark.yaml'))
variables += (benchmark + '-fuzz-target=' +
benchmark_vars['fuzz_target'] + '\n')
variables += '\n'
return variables
def _get_makefile_run_template(image):
fuzzer = image['fuzzer']
benchmark = image['benchmark']
section = ''
run_types = ['run', 'debug', 'test-run', 'debug-builder']
testcases_dir = os.path.join(BENCHMARK_DIR, benchmark, 'testcases')
if os.path.exists(testcases_dir):
run_types.append('repro-bugs')
for run_type in run_types:
if run_type == 'debug-builder':
section += f'{run_type}-{fuzzer}-{benchmark}: '
section += f'.{fuzzer}-{benchmark}-builder-debug\n'
else:
section += f'{run_type}-{fuzzer}-{benchmark}: '
section += f'.{fuzzer}-{benchmark}-runner\n'
section += f'\
\tdocker run \\\n\
\t--cpus=1 \\\n\
\t--shm-size=2g \\\n\
\t--cap-add SYS_NICE \\\n\
\t--cap-add SYS_PTRACE \\\n\
\t-e FUZZ_OUTSIDE_EXPERIMENT=1 \\\n\
\t-e FORCE_LOCAL=1 \\\n\
\t-e TRIAL_ID=1 \\\n\
\t-e FUZZER={fuzzer} \\\n\
\t-e BENCHMARK={benchmark} \\\n\
\t-e FUZZ_TARGET=$({benchmark}-fuzz-target) \\\
\n'
if run_type == 'test-run':
section += '\t-e MAX_TOTAL_TIME=20 \\\n\t-e SNAPSHOT_PERIOD=10 \\\n'
if run_type == 'debug-builder':
section += '\t-e DEBUG_BUILDER=1 \\\n'
section += '\t--entrypoint "/bin/bash" \\\n\t-it '
elif run_type == 'debug':
section += '\t--entrypoint "/bin/bash" \\\n\t-it '
elif run_type == 'repro-bugs':
section += f'\t-v {testcases_dir}:/testcases \\\n\t'
section += '--entrypoint /bin/bash '
section += os.path.join(BASE_TAG, image['tag'])
section += ' -c "for f in /testcases/*; do '
section += 'echo _________________________________________; '
section += 'echo \\$$f:; '
section += '\\$$OUT/\\$$FUZZ_TARGET -timeout=25 -rss_limit_mb=2560 '
section += '\\$$f; done;" '
section += '\n\n'
continue
elif run_type == 'run':
section += '\t-it '
else:
section += '\t'
if run_type != 'debug-builder':
section += os.path.join(BASE_TAG, image['tag'])
else:
section += os.path.join(
BASE_TAG, image['tag'].replace('runners/', 'builders/', 1))
section += '\n\n'
return section
def METHOD_NAME(name, image):
"""Returns makefile section for |image|."""
if not ('base-' in name or 'dispatcher-' in name or name == 'worker'):
section = '.'
else:
section = ''
section += name + ':'
if 'depends_on' in image:
for dep in image['depends_on']:
if 'base' in dep:
section += ' ' + dep
else:
section += ' .' + dep
section += '\n'
if 'base-' in name:
section += '\tdocker pull ubuntu:focal\n'
section += '\tdocker build \\\n'
section += '\t--tag ' + os.path.join(BASE_TAG, image['tag']) + ' \\\n'
section += '\t--build-arg BUILDKIT_INLINE_CACHE=1 \\\n'
section += ('\t--cache-from ' + os.path.join(BASE_TAG, image['tag']) +
' \\\n')
if 'build_arg' in image:
for arg in image['build_arg']:
section += '\t--build-arg ' + arg + ' \\\n'
if 'dockerfile' in image:
section += '\t--file ' + image['dockerfile'] + ' \\\n'
section += '\t' + image['context'] + '\n'
section += '\n'
# Print run, debug, test-run and debug-builder rules if image is a runner.
if 'runner' in name and not ('intermediate' in name or 'base' in name):
section += _get_makefile_run_template(image)
return section
def main():
"""Writes Makefile with docker image build rules to sys.argv[1]."""
if len(sys.argv) != 2:
print(f'Usage: {sys.argv[0]} <makefile>')
return 1
makefile_path = sys.argv[1]
makefile_contents = generate_makefile()
with open(makefile_path, 'w', encoding='utf-8') as file_handle:
file_handle.write(makefile_contents)
return 0
def generate_makefile():
"""Generates the contents of the makefile and returns it."""
fuzzers = fuzzer_utils.get_fuzzer_names()
benchmarks = benchmark_utils.get_all_benchmarks()
buildable_images = docker_images.get_images_to_build(fuzzers, benchmarks)
makefile = 'export DOCKER_BUILDKIT := 1\n\n'
# Print oss-fuzz benchmarks property variables.
makefile += _get_benchmark_fuzz_target(benchmarks)
for name, image in buildable_images.items():
makefile += METHOD_NAME(name, image)
# Print build targets for all fuzzer-benchmark pairs (including coverage).
fuzzers.append('coverage')
for fuzzer in fuzzers:
image_type = 'runner'
if 'coverage' in fuzzer:
image_type = 'builder'
for benchmark in benchmarks:
makefile += (f'build-{fuzzer}-{benchmark}: ' +
f'.{fuzzer}-{benchmark}-{image_type}\n')
makefile += '\n'
# Print fuzzer-all benchmarks build targets.
for fuzzer in fuzzers:
all_build_targets = ' '.join(
[f'build-{fuzzer}-{benchmark}' for benchmark in benchmarks])
makefile += f'build-{fuzzer}-all: {all_build_targets}\n'
all_test_run_targets = ' '.join(
[f'test-run-{fuzzer}-{benchmark}' for benchmark in benchmarks])
makefile += f'test-run-{fuzzer}-all: {all_test_run_targets}\n'
# Print all targets build target.
all_build_targets = ' '.join([f'build-{fuzzer}-all' for fuzzer in fuzzers])
makefile += f'build-all: {all_build_targets}'
return makefile
if __name__ == '__main__':
sys.exit(main()) | null |
5,985 | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.ops.operations import _grad_ops as G
from mindspore.ops.functional import vmap
class NLLLossGradNet(nn.Cell):
def __init__(self, reduction):
super(NLLLossGradNet, self).__init__()
self.grad = G.NLLLossGrad(reduction=reduction)
def construct(self, x, dout_x, target, weight, total_weight):
gout = self.grad(x, dout_x, target, weight, total_weight)
return gout
def get_grad_inputs_and_output(nptype_input, nptype_weight, reduction, input_type="Tensor"):
"""Get inputs and outputs for nll loss grad."""
x = np.array([[0.53, 0.74, -2.12], [1.29, -0.34, -1.13]]).astype(nptype_input)
if reduction == "none":
dloss = np.array([3.24, -2.13]).astype(nptype_input)
else:
dloss = np.array(1.23).astype(nptype_input)
target = np.array([0, 1]).astype(np.int32)
weight = np.array([0.45, -0.32, 1.21]).astype(nptype_weight)
total_weight = np.array(0.13).astype(nptype_weight)
inputs = (x, dloss, target, weight, total_weight)
if input_type == "Tensor":
inputs = (Tensor(input_element) for input_element in inputs)
if reduction == "none":
dx_expected = np.array([[-1.45799994, 0, 0], [0, -0.681600034, 0]])
elif reduction == "mean":
dx_expected = np.array([[-4.25769234, 0, 0], [0, 3.02769232, 0]])
else:
dx_expected = np.array([[-0.553499997, 0, 0], [0, 0.393599987, 0]])
outputs = (dx_expected,)
return inputs, outputs
def nll_loss_grad_template(nptype_input, nptype_weight, reduction, dynamic=False):
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
nll_loss_grad_net = NLLLossGradNet(reduction)
inputs, expected_outputs = get_grad_inputs_and_output(nptype_input, nptype_weight, reduction)
x, dloss, target, weight, total_weight = inputs
if dynamic:
x_dyn = Tensor(shape=[x.shape[0], None], dtype=x.dtype)
nll_loss_grad_net.set_inputs(x_dyn, dloss, target, weight, total_weight)
dx = nll_loss_grad_net(x, dloss, target, weight, total_weight)
dx_np = dx.asnumpy()
print(dx)
dx_expected = expected_outputs[0]
if nptype_input == np.float32 and nptype_weight == np.float32:
ertol_loss = 1e-06
else:
ertol_loss = 1e-02
np.testing.assert_allclose(dx_np, dx_expected, ertol_loss)
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
def METHOD_NAME():
"""
Feature: test NLLLossGrad vmap interface.
Description: test the rightness of NLLLossGrad kernel.
Expectation: the result match with numpy result
"""
context.set_context(mode=context.PYNATIVE_MODE)
reduction = "none"
def cal_nll_loss_grad(x, dout_x, target, weight, total_weight):
return G.NLLLossGrad(reduction)(x, dout_x, target, weight, total_weight)
inputs, expected_outputs = get_grad_inputs_and_output(np.float32, np.float32, "none", "numpy")
x, dloss, target, weight, total_weight = inputs
dim_size = 3
stack_x = np.stack([x] * dim_size)
stack_dloss = np.stack([dloss] * dim_size)
stack_target = np.stack([target] * dim_size)
outputs = vmap(cal_nll_loss_grad, in_axes=(0, 0, 0, None, None), out_axes=0)(
Tensor(stack_x), Tensor(stack_dloss), Tensor(stack_target), Tensor(weight), Tensor(total_weight))
expect = np.stack([expected_outputs[0]] * dim_size)
ertol_loss = 1e-06
np.testing.assert_allclose(outputs.asnumpy(), expect, ertol_loss)
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
def test_nll_loss_grad_no_reduction():
"""
Feature: test NLLLossGrad kernel.
Description: test the rightness of NLLLossGrad kernel.
Expectation: the result match with numpy result
"""
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_grad_template(np.float32, np.float32, "mean")
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
def test_nll_loss_grad_no_reduction_dynamic():
"""
Feature: test NLLLossGrad kernel with dynamic case.
Description: test the rightness of NLLLossGrad kernel.
Expectation: the result match with numpy result
"""
# Four combinations of fp32 and fp16 inputs and weights
nll_loss_grad_template(np.float32, np.float32, "mean", True) | null |
5,986 | # THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Submits job scripts to the rudimentary Unix ``at`` scheduler.
.. cylc-scope:: flow.cylc[runtime][<namespace>]
.. note::
The ``atd`` daemon must be running.
.. note::
The ``atq`` command does not report if the job is running or not.
If an :cylc:conf:`execution time limit` is specified for a task, its job will
be wrapped by the ``timeout`` command.
.. cylc-scope::
"""
import errno
import os
import re
from subprocess import PIPE
class AtCommandHandler():
"""Logic to submit jobs to the "at" job runner."""
# List of known error strings when atd is not running
ERR_NO_ATD_STRS = [
"Can't open /var/run/atd.pid to signal atd. No atd running?",
"Warning: at daemon not running",
]
SHOULD_KILL_PROC_GROUP = True
SHOULD_POLL_PROC_GROUP = True
KILL_CMD_TMPL = "atrm '%(job_id)s'"
POLL_CMD = "atq"
REC_ERR_FILTERS = [
re.compile("warning: commands will be executed using /bin/sh")]
REC_ID_FROM_SUBMIT_ERR = re.compile(r"\Ajob\s(?P<id>\S+)\sat")
# Note: The SUBMIT_CMD_STDIN_TMPL below requires "sh" compatible shell. The
# safest way, therefore, is to force the command to run under "/bin/sh" by
# exporting "SHELL=/bin/sh" for the "at" command.
SUBMIT_CMD_ENV = {"SHELL": "/bin/sh"}
SUBMIT_CMD_TMPL = "at now"
# Note: The perl command ensures that the job script is executed in its own
# process group, which allows the job script and its child processes to be
# killed correctly.
SUBMIT_CMD_STDIN_TMPL = (
r"exec perl -e 'setpgrp(0,0);exec(@ARGV)'" +
r" '%(job)s' 1>'%(job)s.out' 2>'%(job)s.err'")
SUBMIT_CMD_STDIN_TMPL_2 = (
r"exec perl -e 'setpgrp(0,0);exec(@ARGV)'" +
r" timeout --signal=XCPU %(execution_time_limit)d" +
r" '%(job)s' 1>'%(job)s.out' 2>'%(job)s.err'")
# atq properties:
# * stdout is "job-num date hour queue username", e.g.:
# 1762 Wed May 15 00:20:00 2013 = hilary
# * queue is '=' if running
#
def filter_submit_output(self, out, err):
"""Suppress at's routine output to stderr.
Otherwise we get warning messages that suggest something is wrong.
1) move the standard job ID message from stderr to stdout
2) suppress the message warning that commands will be executed with
/bin/sh (this refers to the command line that runs the job script).
Call get_id() first, to extract the job ID.
"""
new_err = ""
if err:
for line in err.splitlines(True):
if self.REC_ID_FROM_SUBMIT_ERR.match(line):
out += line
elif any(rec.match(line) for rec in self.REC_ERR_FILTERS):
continue
elif line.strip() in self.ERR_NO_ATD_STRS:
raise OSError(
errno.ESRCH, os.strerror(errno.ESRCH), line)
else:
new_err += line
return out, new_err
@classmethod
def METHOD_NAME(cls, job_file_path, submit_opts):
"""Return proc_stdin_arg, proc_stdin_value."""
try:
return (PIPE, cls.SUBMIT_CMD_STDIN_TMPL_2 % {
"job": job_file_path,
"execution_time_limit": submit_opts["execution_time_limit"]})
except KeyError:
return (PIPE, cls.SUBMIT_CMD_STDIN_TMPL % {"job": job_file_path})
JOB_RUNNER_HANDLER = AtCommandHandler() | null |
5,987 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2019 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantidqt package
#
import os
import shutil
import time
import unittest
from mantid.api import AnalysisDataService as ADS
from mantid.kernel import ConfigService
from mantid.simpleapi import CreateSampleWorkspace
from unittest import mock
from workbench.projectrecovery.projectrecovery import ProjectRecovery, NO_OF_CHECKPOINTS_KEY
from workbench.projectrecovery.recoverygui.projectrecoverymodel import ProjectRecoveryModel
class ProjectRecoveryModelTest(unittest.TestCase):
def METHOD_NAME(self):
self.pr = ProjectRecovery(multifileinterpreter=None)
# Make absolutely sure that the workbench-recovery directory is cleared.
if os.path.exists(self.pr.recovery_directory):
shutil.rmtree(self.pr.recovery_directory)
# Set up some checkpoints
self.setup_some_checkpoints()
self.pr._make_process_from_pid = mock.MagicMock()
self.pr._is_mantid_workbench_process = mock.MagicMock(return_value=True)
self.prm = ProjectRecoveryModel(self.pr, mock.MagicMock())
def tearDown(self):
# Make sure to clear the hostname layer between tests
ADS.clear()
if os.path.exists(self.pid):
shutil.rmtree(self.pid)
def setup_some_checkpoints(self):
self.pr._spin_off_another_time_thread = mock.MagicMock()
directory = self.pr.recovery_directory_hostname
# Add a numbered folder for the pid
self.pid = os.path.join(directory, "3000000")
if not os.path.exists(self.pid):
os.makedirs(self.pid)
self.pr._recovery_directory_pid = self.pid
# Add 5 workspaces
for ii in range(0, 5):
CreateSampleWorkspace(OutputWorkspace=str(ii))
self.pr.saver._spin_off_another_time_thread = mock.MagicMock()
self.pr.recovery_save()
def test_find_number_of_workspaces_in_directory(self):
# Expect 0 as just checkpoints
self.assertEqual(self.prm.find_number_of_workspaces_in_directory(self.pid), 0)
self.assertTrue(os.path.exists(self.pid))
list_dir = os.listdir(self.pid)
list_dir.sort()
self.assertEqual(self.prm.find_number_of_workspaces_in_directory(os.path.join(self.pid, list_dir[0])), 5)
def test_get_row_as_string(self):
row = self.prm.rows[0]
self.assertEqual(self.prm.get_row(row[0]), row)
def test_get_row_as_int(self):
row = self.prm.rows[0]
self.assertEqual(self.prm.get_row(0), row)
def test_get_row_as_string_not_found(self):
row = ["", "", ""]
self.assertEqual(self.prm.get_row("asdadasdasd"), row)
def test_start_mantid_normally(self):
self.prm.start_mantid_normally()
self.assertEqual(self.prm.presenter.close_view.call_count, 1)
def test_recover_selected_checkpoint(self):
checkpoint = os.listdir(self.pid)[0]
self.prm._start_recovery_of_checkpoint = mock.MagicMock()
self.prm.recover_selected_checkpoint(checkpoint)
self.assertEqual(1, self.prm.presenter.change_start_mantid_to_cancel_label.call_count)
self.assertEqual(1, self.prm._start_recovery_of_checkpoint.call_count)
def test_open_selected_in_editor(self):
checkpoint = os.listdir(self.pid)[0]
self.prm.project_recovery.open_checkpoint_in_script_editor = mock.MagicMock()
self.prm.open_selected_in_editor(checkpoint)
self.assertEqual(1, self.prm.project_recovery.open_checkpoint_in_script_editor.call_count)
self.assertEqual(
self.prm.project_recovery.open_checkpoint_in_script_editor.call_args, mock.call(os.path.join(self.pid, checkpoint))
)
def test_decide_last_checkpoint(self):
CreateSampleWorkspace(OutputWorkspace="6")
self.pr.recovery_save()
checkpoints = os.listdir(self.pid)
checkpoints.sort()
last_checkpoint = self.prm.decide_last_checkpoint()
self.assertEqual(checkpoints[-1], os.path.basename(last_checkpoint))
def test_fill_rows(self):
# wait a second so that we can add a second checkpoint with a different name, because the checkpoints differ at
# most by a second.
time.sleep(1)
CreateSampleWorkspace(OutputWorkspace="6")
self.pr.recovery_save()
self.prm.fill_rows()
checkpoints = os.listdir(self.pid)
checkpoints.sort()
self.assertEqual(["", "", ""], self.prm.rows[2])
self.assertEqual([checkpoints[0].replace("T", " "), "5", "No"], self.prm.rows[1])
self.assertEqual([checkpoints[1].replace("T", " "), "6", "No"], self.prm.rows[0])
def test_get_number_of_checkpoints(self):
self.assertEqual(int(ConfigService.getString(NO_OF_CHECKPOINTS_KEY)), self.prm.get_number_of_checkpoints())
def test_update_checkpoint_tried(self):
checkpoints = os.listdir(self.pid)
self.assertEqual(self.prm.rows[0][2], "No")
self.prm._update_checkpoint_tried(os.path.join(self.pid, checkpoints[0]))
self.assertEqual(self.prm.rows[0][2], "Yes") | null |
5,988 | import queue
import string
from functools import partial, reduce
from echo.callback_container import CallbackContainer
__all__ = ['DeferredMethod', 'nonpartial', 'lookup_class', 'as_variable_name',
'as_list', 'file_format', 'CallbackMixin', 'PropertySetMixin',
'Pointer', 'common_prefix', 'queue_to_list', 'format_choices']
class DeferredMethod(object):
"""
This class stubs out a method, and provides a callable interface that logs
its calls. These can later be actually executed on the original
(non-stubbed) method by calling executed_deferred_calls
"""
def __init__(self, method):
self.method = method
self.calls = [] # avoid hashability issues with dict/set
@property
def original_method(self):
return self.method
def __call__(self, instance, *a, **k):
if instance not in (c[0] for c in self.calls):
self.calls.append((instance, a, k))
def __get__(self, instance, owner):
if instance is None:
return self
return partial(self.__call__, instance)
def execute_deferred_calls(self):
for instance, args, kwargs in self.calls:
self.method(instance, *args, **kwargs)
def nonpartial(func, *args, **kwargs):
"""
Like functools.partial, this returns a function which, when called, calls
``func(*args, **kwargs)``.
Unlike functools.partial, extra arguments passed to the returned function
are *not* passed to the input function.
This is used when connecting slots to ``QAction.triggered`` signals, which
appear to have different signatures, which seem to add and extra argument
in PyQt but not PySide
"""
def result(*a, **k):
return func(*args, **kwargs)
return result
def lookup_class(ref):
"""
Look up an object via its module string (e.g., 'glue.core.data.Data')
Parameters
----------
ref : str
The module string
"""
if ref.startswith('__builtin__'):
ref = '.'.join(['builtins'] + ref.split('.')[1:])
mod = ref.rsplit('.', 1)[0]
try:
result = __import__(mod)
except ImportError:
raise ValueError("Module '{0}' not found".format(mod))
try:
for attr in ref.split('.')[1:]:
result = getattr(result, attr)
return result
except AttributeError:
raise ValueError("Object '{0}' not found".format(ref))
def as_variable_name(x):
"""
Convert a string to a legal python variable name
Parameters
----------
x : str
A string to (possibly) rename
Returns
-------
variable_name : str
A legal Python variable name
"""
allowed = string.ascii_letters + string.digits + '_'
result = [letter if letter in allowed else '_' for letter in x or 'x']
if result[0] in string.digits:
result.insert(0, '_')
return ''.join(result)
def as_list(x):
if isinstance(x, list):
return x
return [x]
def METHOD_NAME(filename):
if filename.find('.') == -1:
return ''
if filename.lower().endswith('.gz'):
result = filename.lower().rsplit('.', 2)[1]
else:
result = filename.lower().rsplit('.', 1)[1]
return result
class CallbackMixin(object):
"""
A mixin that provides a utility for attaching callback
functions to methods
"""
def __init__(self):
self._callbacks = CallbackContainer()
def add_callback(self, function):
self._callbacks.append(function)
def remove_callback(self, function):
self._callbacks.remove(function)
def notify(self, *args, **kwargs):
for func in self._callbacks:
func(*args, **kwargs)
class PropertySetMixin(object):
"""An object that provides a set of properties that
are meant to encapsulate state information
This class exposes a properties attribute, which is a dict
of all properties. Similarly, assigning to the properties dict
will update the individual properties
"""
_property_set = []
@property
def properties(self):
""" A dict mapping property names to values """
return dict((p, getattr(self, p)) for p in self._property_set)
@properties.setter
def properties(self, value):
""" Update the properties with a new dict.
Keys in the new dict must be valid property names defined in
the _property_set class level attribute"""
invalid = set(value.keys()) - set(self._property_set)
if invalid:
raise ValueError("Invalid property values: %s" % invalid)
for k in self._property_set:
if k not in value:
continue
setattr(self, k, value[k])
def common_prefix(strings, exclude_punctuation=True):
"""
Given a list of strings, find the longest prefix common to all of them
"""
if len(strings) > 0:
for i in range(len(strings[0]), 0, -1):
if exclude_punctuation and strings[0][i - 1] in string.punctuation:
continue
for st in strings[1:]:
if st[:i] != strings[0][:i]:
break
else:
return strings[0][:i]
return ''
class Pointer(object):
def __init__(self, key):
self.key = key
def __get__(self, instance, type=None):
val = instance
for k in self.key.split('.'):
val = getattr(val, k, None)
return val
def __set__(self, instance, value):
v = self.key.split('.')
attr = reduce(getattr, [instance] + v[:-1])
setattr(attr, v[-1], value)
def queue_to_list(q):
"""
Get all the values in a :class:`queue.Queue` object and return a list.
"""
l = []
while True:
try:
l.append(q.get_nowait())
except queue.Empty:
return l
def format_choices(options, index=False):
"""
Return a string with an error message formatted as:
* option 1
* option 2
This can be preprended to existing error messages.
"""
updated_options = []
for option in options:
if isinstance(option, str):
updated_options.append("'{0}'".format(option))
elif isinstance(option, type):
updated_options.append(str(option.__module__) + '.' + option.__name__)
else:
updated_options.append(option)
if index:
return "\n\n" + '\n'.join(['* {0} or {1}'.format(idx, option) for idx, option in enumerate(updated_options)])
else:
return "\n\n" + '\n'.join(['* {0}'.format(option) for option in updated_options]) | null |
5,989 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Run `yarn test-python` in the package root directory.
# This test only supports running in Linux.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os.path
import tempfile
import shutil
import tensorflow as tf
import inference
class InferenceTest(tf.test.TestCase):
def testInference(self):
backends = ['cpu', 'wasm']
for backend in backends:
binary_path = os.path.join('../binaries', 'tfjs-inference-linux')
model_path = os.path.join('../test_data', 'model.json')
test_data_dir = os.path.join('../test_data')
tmp_dir = tempfile.mkdtemp()
inference.predict(binary_path, model_path, test_data_dir, tmp_dir, backend=backend)
with open(os.path.join(tmp_dir, 'data.json'), 'rt') as f:
ys_values = json.load(f)
# The output is a list of tensor data in the form of dict.
# Example output:
# [{"0":0.7567615509033203,"1":-0.18349379301071167,"2":0.7567615509033203,"3":-0.18349379301071167}]
ys_values = [list(y.values()) for y in ys_values]
with open(os.path.join(tmp_dir, 'shape.json'), 'rt') as f:
ys_shapes = json.load(f)
with open(os.path.join(tmp_dir, 'dtype.json'), 'rt') as f:
ys_dtypes = json.load(f)
self.assertAllClose(ys_values[0], [
0.7567615509033203, -0.18349379301071167, 0.7567615509033203,
-0.18349379301071167
])
self.assertAllEqual(ys_shapes[0], [2, 2])
self.assertEqual(ys_dtypes[0], 'float32')
self.assertFalse(os.path.exists(os.path.join(tmp_dir, 'name.json')))
# Cleanup tmp dir.
shutil.rmtree(tmp_dir)
# Todo(linazhao): Add a test model that outputs multiple tensors.
def testInferenceWithOutputNameFile(self):
binary_path = os.path.join('../binaries', 'tfjs-inference-linux')
model_path = os.path.join('../test_data', 'model.json')
test_data_dir = os.path.join('../test_data')
tmp_dir = tempfile.mkdtemp()
inference.predict(binary_path, model_path, test_data_dir, tmp_dir, tf_output_name_file='tf_output_name.json')
with open(os.path.join(tmp_dir, 'data.json'), 'rt') as f:
ys_values = json.load(f)
# The output is a list of tensor data in the form of dict.
# Example output:
# [{"0":0.7567615509033203,"1":-0.18349379301071167,"2":0.7567615509033203,"3":-0.18349379301071167}]
ys_values = [list(y.values()) for y in ys_values]
with open(os.path.join(tmp_dir, 'shape.json'), 'rt') as f:
ys_shapes = json.load(f)
with open(os.path.join(tmp_dir, 'dtype.json'), 'rt') as f:
ys_dtypes = json.load(f)
self.assertAllClose(ys_values[0], [
0.7567615509033203, -0.18349379301071167, 0.7567615509033203,
-0.18349379301071167
])
self.assertAllEqual(ys_shapes[0], [2, 2])
self.assertEqual(ys_dtypes[0], 'float32')
self.assertFalse(os.path.exists(os.path.join(tmp_dir, 'name.json')))
# Cleanup tmp dir.
shutil.rmtree(tmp_dir)
def testInferenceWithNonExistingOutputNameFile(self):
binary_path = os.path.join('../binaries', 'tfjs-inference-linux')
model_path = os.path.join('../test_data', 'model.json')
test_data_dir = os.path.join('../test_data')
tmp_dir = tempfile.mkdtemp()
# Throws an error
with self.assertRaises(ValueError):
inference.predict(binary_path, model_path, test_data_dir, tmp_dir, tf_output_name_file='non_exist.json')
# ...and does not create an output file.
with self.assertRaises(FileNotFoundError):
with open(os.path.join(tmp_dir, 'data.json'), 'rt') as f:
json.load(f)
# Cleanup tmp dir.
shutil.rmtree(tmp_dir)
def METHOD_NAME(self):
backends = ['cpu', 'wasm']
for backend in backends:
binary_path = os.path.join('../binaries', 'tfjs-inference-linux')
model_path = os.path.join('../test_data', 'model_structured_outputs.json')
test_data_dir = os.path.join('../test_data')
tmp_dir = tempfile.mkdtemp()
inference.predict(binary_path, model_path, test_data_dir, tmp_dir, backend=backend)
with open(os.path.join(tmp_dir, 'data.json'), 'rt') as f:
ys_values = json.load(f)
# The output is a list of tensor data in the form of dict.
# Example output:
# [{"0":0.7567615509033203,"1":-0.18349379301071167,"2":0.7567615509033203,"3":-0.18349379301071167}]
ys_values = [list(y.values()) for y in ys_values]
with open(os.path.join(tmp_dir, 'shape.json'), 'rt') as f:
ys_shapes = json.load(f)
with open(os.path.join(tmp_dir, 'dtype.json'), 'rt') as f:
ys_dtypes = json.load(f)
with open(os.path.join(tmp_dir, 'name.json'), 'rt') as f:
ys_names = json.load(f)
self.assertAllClose(ys_values[0], [
0.7567615509033203, -0.18349379301071167, 0.7567615509033203,
-0.18349379301071167
])
self.assertAllEqual(ys_shapes[0], [2, 2])
self.assertEqual(ys_dtypes[0], 'float32')
self.assertEqual(ys_names[0], 'testName')
# Cleanup tmp dir.
shutil.rmtree(tmp_dir)
if __name__ == '__main__':
tf.test.main() | null |
5,990 | import logging
import click
from tqdm import tqdm
from pymobiledevice3.cli.cli_common import Command
from pymobiledevice3.lockdown import LockdownClient
from pymobiledevice3.services.mobilebackup2 import Mobilebackup2Service
source_option = click.option('--source', default='', help='The UDID of the source device.')
password_option = click.option('-p', '--password', default='', help='Backup password.')
backup_directory_arg = click.argument('backup-directory', type=click.Path(exists=True, file_okay=False))
backup_directory_option = click.option('-b', '--backup-directory', type=click.Path(exists=True, file_okay=False),
default='.')
logger = logging.getLogger(__name__)
@click.group()
def cli():
""" backup cli """
pass
@cli.group()
def backup2():
""" backup utils """
pass
@backup2.command(cls=Command)
@click.argument('backup-directory', type=click.Path(file_okay=False))
@click.option('--full', is_flag=True, help=('Whether to do a full backup.'
' If full is True, any previous backup attempts will be discarded.'))
def backup(service_provider: LockdownClient, backup_directory, full):
"""
Backup device.
All backup data will be written to BACKUP_DIRECTORY, under a directory named with the device's udid.
"""
backup_client = Mobilebackup2Service(service_provider)
with tqdm(total=100, dynamic_ncols=True) as pbar:
def update_bar(percentage):
pbar.n = percentage
pbar.refresh()
backup_client.backup(full=full, backup_directory=backup_directory, progress_callback=update_bar)
@backup2.command(cls=Command)
@backup_directory_arg
@click.option('--system/--no-system', default=False, help='Restore system files.')
@click.option('--reboot/--no-reboot', default=True, help='Reboot the device when done.')
@click.option('--copy/--no-copy', default=True, help='Create a copy of backup folder before restoring.')
@click.option('--settings/--no-settings', default=True, help='Restore device settings.')
@click.option('--remove/--no-remove', default=False, help='Remove items which aren\'t being restored.')
@password_option
@source_option
def restore(service_provider: LockdownClient, backup_directory, system, reboot, copy, settings, remove, password, source):
"""
Restore a backup to a device.
The backup will be restored from a directory with the device udid under BACKUP_DIRECTORY.
"""
backup_client = Mobilebackup2Service(service_provider)
with tqdm(total=100, dynamic_ncols=True) as pbar:
def update_bar(percentage):
pbar.n = percentage
pbar.refresh()
backup_client.restore(backup_directory=backup_directory, progress_callback=update_bar, system=system,
reboot=reboot, copy=copy, settings=settings, remove=remove, password=password,
source=source)
@backup2.command(cls=Command)
@backup_directory_arg
@source_option
def info(service_provider: LockdownClient, backup_directory, source):
"""
Print information about a backup.
"""
backup_client = Mobilebackup2Service(service_provider)
print(backup_client.info(backup_directory=backup_directory, source=source))
@backup2.command('list', cls=Command)
@backup_directory_arg
@source_option
def list_(service_provider: LockdownClient, backup_directory, source):
"""
List all file in the backup in a CSV format.
"""
backup_client = Mobilebackup2Service(service_provider)
print(backup_client.list(backup_directory=backup_directory, source=source))
@backup2.command(cls=Command)
@backup_directory_arg
@password_option
@source_option
def unback(service_provider: LockdownClient, backup_directory, password, source):
"""
Convert all files in the backup to the correct directory hierarchy.
"""
backup_client = Mobilebackup2Service(service_provider)
backup_client.unback(backup_directory=backup_directory, password=password, source=source)
@backup2.command(cls=Command)
@click.argument('domain-name')
@click.argument('relative-path')
@backup_directory_arg
@password_option
@source_option
def METHOD_NAME(service_provider: LockdownClient, domain_name, relative_path, backup_directory, password, source):
"""
Extract a file from the backup.
The file that belongs to the domain DOMAIN_NAME and located on the device in the path RELATIVE_PATH,
will be extracted to the BACKUP_DIRECTORY.
"""
backup_client = Mobilebackup2Service(service_provider)
backup_client.METHOD_NAME(domain_name, relative_path, backup_directory=backup_directory, password=password,
source=source)
@backup2.command(cls=Command)
@click.argument('mode', type=click.Choice(['on', 'off'], case_sensitive=False))
@click.argument('password')
@backup_directory_option
def encryption(service_provider: LockdownClient, backup_directory, mode, password):
"""
Set backup encryption on / off.
When on, PASSWORD will be the new backup password.
When off, PASSWORD is the current backup password.
"""
backup_client = Mobilebackup2Service(service_provider)
should_encrypt = mode.lower() == 'on'
if should_encrypt == backup_client.will_encrypt:
logger.error('Encryption already ' + ('on!' if should_encrypt else 'off!'))
return
if should_encrypt:
backup_client.change_password(backup_directory, new=password)
else:
backup_client.change_password(backup_directory, old=password)
@backup2.command(cls=Command)
@click.argument('old-password')
@click.argument('new-password')
@backup_directory_option
def change_password(service_provider: LockdownClient, old_password, new_password, backup_directory):
"""
Change the backup password.
"""
backup_client = Mobilebackup2Service(service_provider)
if not backup_client.will_encrypt:
logger.error('Encryption is not turned on!')
return
backup_client.change_password(backup_directory, old=old_password, new=new_password)
@backup2.command(cls=Command)
@backup_directory_arg
def erase_device(service_provider: LockdownClient, backup_directory):
"""
Erase all data on the device.
"""
backup_client = Mobilebackup2Service(service_provider)
backup_client.erase_device(backup_directory) | null |
5,991 | from typing import Dict, Iterator, List, Optional, Tuple, Union, cast
import torch
from torch.distributions import Categorical
from kornia.augmentation.auto.base import SUBPLOLICY_CONFIG, PolicyAugmentBase
from kornia.augmentation.auto.operations import OperationBase
from kornia.augmentation.auto.operations.policy import PolicySequential
from kornia.augmentation.container.params import ParamItem
from kornia.core import Module, Tensor
from . import ops
default_policy: List[SUBPLOLICY_CONFIG] = [
[("auto_contrast", 0, 1)],
[("equalize", 0, 1)],
[("invert", 0, 1)],
[("rotate", -30.0, 30.0)],
[("posterize", 0.0, 4)],
[("solarize", 0.0, 1.0)],
[("solarize_add", 0.0, 0.43)],
[("color", 0.1, 1.9)],
[("contrast", 0.1, 1.9)],
[("brightness", 0.1, 1.9)],
[("sharpness", 0.1, 1.9)],
[("shear_x", -0.3, 0.3)],
[("shear_y", -0.3, 0.3)],
# (CutoutAbs, 0, 40),
[("translate_x", -0.1, 0.1)],
[("translate_x", -0.1, 0.1)],
]
class RandAugment(PolicyAugmentBase):
"""Apply RandAugment :cite:`cubuk2020randaugment` augmentation strategies.
Args:
n: the number of augmentations to apply sequentially.
m: magnitude for all the augmentations, ranged from [0, 30].
policy: candidate transformations. If None, a default candidate list will be used.
transformation_matrix_mode: computation mode for the chained transformation matrix, via `.transform_matrix`
attribute.
If `silent`, transformation matrix will be computed silently and the non-rigid
modules will be ignored as identity transformations.
If `rigid`, transformation matrix will be computed silently and the non-rigid
modules will trigger errors.
If `skip`, transformation matrix will be totally ignored.
Examples:
>>> import kornia.augmentation as K
>>> in_tensor = torch.rand(5, 3, 30, 30)
>>> aug = K.AugmentationSequential(RandAugment(n=2, m=10))
>>> aug(in_tensor).shape
torch.Size([5, 3, 30, 30])
"""
def __init__(
self,
n: int,
m: int,
policy: Optional[List[SUBPLOLICY_CONFIG]] = None,
transformation_matrix_mode: str = "silent",
) -> None:
if m <= 0 or m >= 30:
raise ValueError(f"Expect `m` in [0, 30]. Got {m}.")
if policy is None:
_policy = default_policy
else:
_policy = policy
super().__init__(_policy, transformation_matrix_mode=transformation_matrix_mode)
selection_weights = torch.tensor([1.0 / len(self)] * len(self))
self.rand_selector = Categorical(selection_weights)
self.n = n
self.m = m
def METHOD_NAME(self, subpolicy: SUBPLOLICY_CONFIG) -> PolicySequential:
if len(subpolicy) != 1:
raise RuntimeError(f"Each policy must have only one operation for RandAugment. Got {len(subpolicy)}.")
name, low, high = subpolicy[0]
return PolicySequential(*[getattr(ops, name)(low, high)])
def get_forward_sequence(self, params: Optional[List[ParamItem]] = None) -> Iterator[Tuple[str, Module]]:
if params is None:
idx = self.rand_selector.sample((self.n,))
return self.get_children_by_indices(idx)
return self.get_children_by_params(params)
def forward_parameters(self, batch_shape: torch.Size) -> List[ParamItem]:
named_modules: Iterator[Tuple[str, Module]] = self.get_forward_sequence()
params: List[ParamItem] = []
mod_param: Union[Dict[str, Tensor], List[ParamItem]]
m = torch.tensor([self.m / 30] * batch_shape[0])
for name, module in named_modules:
# The Input PolicySequential only got one child.
op = cast(PolicySequential, module)[0]
op = cast(OperationBase, op)
mag = None
if op.magnitude_range is not None:
minval, maxval = op.magnitude_range
mag = m * float(maxval - minval) + minval
mod_param = op.forward_parameters(batch_shape, mag=mag)
# Compose it
param = ParamItem(name, [ParamItem(next(iter(module.named_children()))[0], mod_param)])
params.append(param)
return params | null |
5,992 | from lxml import etree
from bbot.modules.base import BaseModule
class nmap(BaseModule):
watched_events = ["IP_ADDRESS", "DNS_NAME"]
produced_events = ["OPEN_TCP_PORT"]
flags = ["active", "portscan", "aggressive", "web-thorough"]
meta = {"description": "Execute port scans with nmap"}
options = {
"ports": "",
"top_ports": 100,
"timing": "T4",
"skip_host_discovery": True,
}
options_desc = {
"ports": "ports to scan",
"top_ports": "top ports to scan",
"timing": "-T<0-5>: Set timing template (higher is faster)",
"skip_host_discovery": "skip host discovery (-Pn)",
}
max_event_handlers = 2
batch_size = 256
_priority = 2
deps_apt = ["nmap"]
deps_pip = ["lxml~=4.9.2"]
async def setup(self):
self.helpers.depsinstaller.ensure_root(message="Nmap requires root privileges")
self.ports = self.config.get("ports", "")
self.timing = self.config.get("timing", "T4")
self.top_ports = self.config.get("top_ports", 100)
self.skip_host_discovery = self.config.get("skip_host_discovery", True)
return True
async def handle_batch(self, *events):
target = self.helpers.make_target(events)
targets = list(set(str(e.data) for e in events))
command, output_file = self.construct_command(targets)
try:
await self.helpers.run(command, sudo=True)
for host in self.parse_nmap_xml(output_file):
source_event = None
for h in [host.address] + host.hostnames:
source_event = target.get(h)
if source_event is not None:
break
if source_event is None:
self.warning(f"Failed to correlate source event from {host}")
source_event = self.scan.root_event
for port in host.open_ports:
port_number = int(port.split("/")[0])
netloc = self.helpers.make_netloc(host.address, port_number)
self.emit_event(netloc, "OPEN_TCP_PORT", source=source_event)
for hostname in host.hostnames:
netloc = self.helpers.make_netloc(hostname, port_number)
self.emit_event(netloc, "OPEN_TCP_PORT", source=source_event)
finally:
output_file.unlink(missing_ok=True)
def construct_command(self, targets):
ports = self.config.get("ports", "")
top_ports = self.config.get("top_ports", "")
temp_filename = self.helpers.temp_filename(extension="xml")
command = [
"nmap",
"-n",
f"-{self.timing}",
"-oX",
temp_filename,
]
if self.skip_host_discovery:
command += ["-Pn"]
if ports:
command += ["-p", ports]
else:
command += ["--top-ports", top_ports]
command += targets
return command, temp_filename
def parse_nmap_xml(self, xml_file):
try:
with open(xml_file, "rb") as f:
et = etree.parse(f)
for host in et.iter("host"):
yield NmapHost(host)
except Exception as e:
self.warning(f"Error parsing Nmap XML at {xml_file}: {e}")
async def METHOD_NAME(self):
resume_file = self.helpers.current_dir / "resume.cfg"
resume_file.unlink(missing_ok=True)
class NmapHost(str):
def __init__(self, xml):
self.etree = xml
# convenient host information
self.status = self.etree.find("status").attrib.get("state", "down")
self.address = self.etree.find("address").attrib.get("addr", "")
self.hostnames = []
for hostname in self.etree.findall("hostnames/hostname"):
hostname = hostname.attrib.get("name")
if hostname and not hostname in self.hostnames:
self.hostnames.append(hostname)
# convenient port information
self.scripts = dict()
self.open_ports = []
self.closed_ports = []
self.filtered_ports = []
for port in self.etree.findall("ports/port"):
port_name = port.attrib.get("portid", "0") + "/" + port.attrib.get("protocol", "tcp").lower()
port_status = port.find("state").attrib.get("state", "closed")
if port_status in ("open", "closed", "filtered"):
getattr(self, f"{port_status}_ports").append(port_name)
for script in port.iter("script"):
script_name = script.attrib.get("id", "")
script_output = script.attrib.get("output", "")
if script_name:
try:
self.scripts[port_name][script_name] = script_output
except KeyError:
self.scripts[port_name] = {script_name: script_output}
def __str__(self):
address = self.address + (" " if self.address else "")
hostnames = "(" + ", ".join(self.hostnames) + ")" if self.hostnames else ""
return f"{address}{hostnames}"
def __repr__(self):
return str(self) | null |
5,993 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for retrieving paths for various types of artifacts."""
import os
import absl
from tfx.dsl.io import fileio
from tfx.types import artifact
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
from tfx.utils import io_utils
from tfx.utils import path_constants
_OLD_EVAL_MODEL_DIR = 'eval_model_dir'
_OLD_SERVING_MODEL_DIR = 'serving_model_dir'
"""Directory structure of exported model for estimator based trainer:
|-- <ModelExportPath>
|-- EVAL_MODEL_DIR <- eval_model_dir, eval_model_path
|-- saved_model.pb
|-- ...
|-- SERVING_MODEL_DIR <- serving_model_dir, serving_model_path
|-- saved_model.pb
|-- ...
For generic trainer with Keras, there won't be eval model:
|-- <ModelExportPath>
|-- SERVING_MODEL_DIR <- serving_model_dir, serving_model_path
|-- saved_model.pb
|-- ...
TODO(b/160795287): Deprecate estimator based executor.
Support for estimator-based executor and model export will be
deprecated soon. The following estimator working directory
structure is still supported for backwards compatibility:
Directory structure of exported model for estimator based trainer:
|-- <ModelExportPath>
|-- EVAL_MODEL_DIR <- eval_model_dir
|-- <timestamped model> <- eval_model_path
|-- saved_model.pb
|-- ...
|-- SERVING_MODEL_DIR <- serving_model_dir
|-- export
|-- <exporter name>
|-- <timestamped model> <- serving_model_path
|-- saved_model.pb
|-- ...
|-- ...
"""
def is_old_model_artifact(model_artifact: artifact.Artifact) -> bool:
"""Check whether the model artifact is generated by old TFX version."""
if model_artifact.type != standard_artifacts.Model:
absl.logging.warning(f'Artifact type is not Model: {model_artifact.type}.')
return artifact_utils.is_artifact_version_older_than(
model_artifact, artifact_utils._ARTIFACT_VERSION_FOR_MODEL_UPDATE) # pylint: disable=protected-access
def eval_model_dir(output_uri: str, is_old_artifact: bool = False) -> str:
"""Returns directory for exported model for evaluation purpose."""
if is_old_artifact:
return os.path.join(output_uri, _OLD_EVAL_MODEL_DIR)
return os.path.join(output_uri, path_constants.EVAL_MODEL_DIR)
def METHOD_NAME(output_uri: str, is_old_artifact: bool = False) -> str:
"""Returns final path to exported model for evaluation purpose."""
model_dir = eval_model_dir(output_uri, is_old_artifact)
model_file = os.path.join(model_dir, 'saved_model.pb')
if fileio.exists(model_file):
return model_dir
elif fileio.exists(model_dir):
# TODO(b/160795287): Deprecate estimator based executor.
absl.logging.warning('Support for estimator-based executor and model'
' export will be deprecated soon. Please use'
' export structure '
'<ModelExportPath>/eval_model_dir/saved_model.pb"')
return io_utils.get_only_uri_in_dir(model_dir)
else:
# If eval model doesn't exist, use serving model for eval.
return serving_model_path(output_uri, is_old_artifact)
def serving_model_dir(output_uri: str, is_old_artifact: bool = False) -> str:
"""Returns directory for exported model for serving purpose."""
if is_old_artifact:
return os.path.join(output_uri, _OLD_SERVING_MODEL_DIR)
return os.path.join(output_uri, path_constants.SERVING_MODEL_DIR)
def get_model_dir_by_type(output_uri: str,
model_type: str,
is_old_artifact: bool = False) -> str:
"""Returns directly for exported model depending on model_type."""
if model_type == path_constants.TFMA_EVAL:
return eval_model_dir(output_uri, is_old_artifact)
else:
return serving_model_dir(output_uri, is_old_artifact)
def serving_model_path(output_uri: str, is_old_artifact: bool = False) -> str:
"""Returns path for exported serving model."""
model_dir = serving_model_dir(output_uri, is_old_artifact)
export_dir = os.path.join(model_dir, 'export')
if fileio.exists(export_dir):
# TODO(b/160795287): Deprecate estimator based executor.
absl.logging.warning(
'Support for estimator-based executor and model export'
' will be deprecated soon. Please use export structure '
'<ModelExportPath>/serving_model_dir/saved_model.pb"')
model_dir = io_utils.get_only_uri_in_dir(export_dir)
return io_utils.get_only_uri_in_dir(model_dir)
else:
# If dir doesn't match estimator structure, use serving model root directly.
return model_dir
def stamped_model_path(output_uri: str) -> str:
"""Returns path for the stamped model."""
return os.path.join(output_uri, path_constants.STAMPED_MODEL_DIR)
def warmup_file_path(saved_model_path: str) -> str:
"""Returns SavedModel Warmup file path.
See https://www.tensorflow.org/tfx/serving/saved_model_warmup.
This is a lexical operation, and does not guarantee the path is valid.
Args:
saved_model_path: A POSIX path to the TensorFlow SavedModel.
Returns:
A POSIX path to the SavedModel Warmup file.
"""
return os.path.join(
saved_model_path,
'assets.extra',
'tf_serving_warmup_requests') | null |
5,994 | import binascii
import pytest
from paradox import event
from paradox.hardware.evo.event import event_map
from paradox.hardware.evo.parsers import LiveEvent, RequestedEvent
def label_provider(type, id):
if type == "user":
assert id == 1
return "Test"
elif type == "partition":
assert id == 5
return "First floor"
elif type == "door":
assert id == 5
return "Door 1"
else:
raise AssertionError()
def test_zone_ok(mocker):
payload = b"\xe2\xff\xad\x06\x14\x13\x01\x04\x0e\x10\x00\x01\x05\x00\x00\x00\x00\x00\x02Living room \x00\xcc"
raw = LiveEvent.parse(payload)
def my_label_provider(type, id):
assert type == "partition"
assert id == 1
return "First floor"
mocker.patch.dict(
event_map[0], {"message": "Zone {label} OK in partition {@partition}"}
)
event_ = event.LiveEvent(raw, event_map, label_provider=my_label_provider)
assert event_.change == {"open": False}
assert "Zone Living room OK in partition First floor" == event_.message
print(event_)
def test_door_user(mocker):
payload = b"\xe2\xff\xad\x06\x14\x13\x01\x04\x0e\x10\x06\x01\x05\x01\x00\x00\x00\x00\x02Living room \x00\xd3"
raw = LiveEvent.parse(payload)
mocker.patch.dict(event_map[6], {"message": "User {@user} access on door {@door}"})
event_ = event.LiveEvent(raw, event_map, label_provider=label_provider)
assert "User Test access on door Door 1" == event_.message
print(event_)
def test_door_user2(mocker):
payload = b"\xe2\xff\xad\x06\x14\x13\x01\x04\x0e\x10\x06\x01\x05\x01\x00\x00\x00\x00\x02Living room \x00\xd3"
raw = LiveEvent.parse(payload)
def label_provider(type, id):
if type == "user":
assert id == 5
return "Test"
elif type == "partition":
assert id == 5
return "First floor"
elif type == "door":
assert id == 5
return "Door 1"
else:
raise AssertionError()
mocker.patch.dict(
event_map[6], {"message": "User {@user#minor} access on door {@door}"}
)
event_ = event.LiveEvent(raw, event_map, label_provider=label_provider)
assert "User Test access on door Door 1" == event_.message
print(event_)
def test_zone_open():
payload = b"\xe2\xff\xad\x06\x14\x13\x01\x04\x0e\x10\x01\x01\x05\x00\x00\x00\x00\x00\x02Living room \x00\xcd"
raw = LiveEvent.parse(payload)
event_ = event.LiveEvent(raw, event_map, label_provider=label_provider)
assert event_.change == {"open": True}
assert "Zone Living room open" == event_.message
print(event_)
def test_event_winload_connected():
payload = b"\xe2\xff\xaa\xb0\x14\x13\x01\x04\x0b$-\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc7"
raw = LiveEvent.parse(payload)
event_ = event.LiveEvent(raw, event_map)
assert "Special events: WinLoad in (connected)" == event_.message
print(event_)
def test_event_clock_restore():
payload = b"\xe2\xff\xaa\xb1\x14\x13\x01\x04\x0b$%\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xc2"
raw = LiveEvent.parse(payload)
event_ = event.LiveEvent(raw, event_map)
print(event_)
assert "Trouble restore: Clock loss restore" == event_.message
assert all(t in event_.tags for t in ["trouble", "clock"])
def test_disconnect_event():
payload = b"\xe0\xff\xe1\xe8\x14\x13\x02\x11\x0f%-\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00H"
raw = LiveEvent.parse(payload)
event_ = event.LiveEvent(raw, event_map)
assert "Special events: WinLoad out (disconnected)" == event_.message
print(event_)
def METHOD_NAME():
payload = binascii.unhexlify(
"e243000009fa79942713a500060000000000819426000400090000000000819426ab8500010000000000819426ab8920010000000000819426ab8910010000000000de"
)
raw = RequestedEvent.parse(payload)
values = raw.fields.value
assert values.po.command == 0xE and (
not hasattr(values, "event_source") or values.event_source == 0xFF
)
print(raw)
def test_zone_generated_alarm():
payload = binascii.unhexlify(
"e2ff1cc414130b010f2c1801030000000000024f66666963652020202020202020202000d9"
)
raw = LiveEvent.parse(payload)
event_ = event.LiveEvent(raw, event_map)
assert "Zone Office in alarm" == event_.message
print(event_)
def test_zone_alarm_restored():
payload = binascii.unhexlify(
"e2ff1cd914130b010f2f1a0102000000000002456e7472616e636520202020202020200096"
)
raw = LiveEvent.parse(payload)
event_ = event.LiveEvent(raw, event_map)
assert "Zone Entrance alarm restore" == event_.message
print(event_)
def test_partition_armed_by_pai():
payload = binascii.unhexlify(
"e2ff187914130b0e0e3b0c0101000000000000000000000000000000000000000000000009"
)
raw = LiveEvent.parse(payload)
event_ = event.LiveEvent(raw, event_map)
assert (
"Special arming [partition:1]: arming with Winload by [user:0]"
== event_.message
)
print(event_)
def test_partition_armed_by_user():
payload = binascii.unhexlify(
"e2ffe8601414030d152d0a0802000000000001546573740000000000000000000000000058"
)
raw = LiveEvent.parse(payload)
event_ = event.LiveEvent(raw, event_map)
assert "Arming [partition:8] with [user:2] code" == event_.message
print(event_)
def test_partition_disarmed_by_user():
payload = binascii.unhexlify(
"e2ffe85e1414030d152c0e0802000000000001546573740000000000000000000000000059"
)
raw = LiveEvent.parse(payload)
event_ = event.LiveEvent(raw, event_map)
assert "[partition:8] disarmed with [user:2] code" == event_.message
print(event_)
def test_partition_armed_by_user_master():
payload = binascii.unhexlify(
"e2ffe8701414030d1539090801000000000001546573740000000000000000000000000072"
)
raw = LiveEvent.parse(payload)
event_ = event.LiveEvent(raw, event_map)
assert "Arming [partition:8] with [user:1] master code" == event_.message
print(event_)
def test_partition_disarmed_by_user_master():
payload = binascii.unhexlify(
"e2ffe86e1414030d15390d0801000000000001546573740000000000000000000000000074"
)
raw = LiveEvent.parse(payload)
event_ = event.LiveEvent(raw, event_map)
assert "[partition:8] disarmed with [user:1] master code" == event_.message
print(event_)
def test_door_access():
payload = b"\xe0\xff\xc1d\x14\x13\x0c\x05\x11\x19\x06\x00\x01\n\x00\x00\x00\x00\x03V\x99LIKS DOOR\x00\x00\x00\x00\x00+\x1b"
raw = LiveEvent.parse(payload)
event_ = event.LiveEvent(raw, event_map)
assert "[user:10] access on door VÄLIKS DOOR" == event_.message
print(event_)
def test_door_access_granted():
payload = b"\xe0\xff\xc1c\x14\x13\x0c\x05\x11\x19>\x00\n\x01\x00\x00\x00\x00\x01First Lastname \x001\x5d"
raw = LiveEvent.parse(payload)
event_ = event.LiveEvent(raw, event_map)
assert "[door:1] access granted to user First Lastname" == event_.message
print(event_)
@pytest.mark.parametrize(
"payload",
[
("e2ff004316010002011e04000b00000000000000000000000000000000000000000000006b"),
("e2ff004416010002011e2d000400000000000000000000000000000000000000000000008e"),
],
)
def test_event_without_date(payload):
payload = bytes.fromhex(payload)
message = LiveEvent.parse(payload)
event_ = event.LiveEvent(message, event_map)
assert event_.timestamp is None
print(event_)
def test_c2():
binascii.unhexlify("c2001903000b00000001a96c7106152c00200132010000000b")
def test_8207000005fa88():
binascii.unhexlify("8207000005fa88") | null |
5,995 | import importlib
from importlib import abc
from importlib import util
import sys
import types
import unittest
from test.test_importlib import util as test_util
class CollectInit:
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def exec_module(self, module):
return self
class LazyLoaderFactoryTests(unittest.TestCase):
def test_init(self):
factory = util.LazyLoader.factory(CollectInit)
# E.g. what importlib.machinery.FileFinder instantiates loaders with
# plus keyword arguments.
lazy_loader = factory('module name', 'module path', kw='kw')
loader = lazy_loader.loader
self.assertEqual(('module name', 'module path'), loader.args)
self.assertEqual({'kw': 'kw'}, loader.kwargs)
def test_validation(self):
# No exec_module(), no lazy loading.
with self.assertRaises(TypeError):
util.LazyLoader.factory(object)
class TestingImporter(abc.MetaPathFinder, abc.Loader):
module_name = 'lazy_loader_test'
mutated_name = 'changed'
loaded = None
source_code = 'attr = 42; __name__ = {!r}'.format(mutated_name)
def find_spec(self, name, path, target=None):
if name != self.module_name:
return None
return util.spec_from_loader(name, util.LazyLoader(self))
def exec_module(self, module):
exec(self.source_code, module.__dict__)
self.loaded = module
class LazyLoaderTests(unittest.TestCase):
def test_init(self):
with self.assertRaises(TypeError):
# Classes that don't define exec_module() trigger TypeError.
util.LazyLoader(object)
def new_module(self, source_code=None):
loader = TestingImporter()
if source_code is not None:
loader.source_code = source_code
spec = util.spec_from_loader(TestingImporter.module_name,
util.LazyLoader(loader))
module = spec.loader.create_module(spec)
if module is None:
module = types.ModuleType(TestingImporter.module_name)
module.__spec__ = spec
module.__loader__ = spec.loader
spec.loader.exec_module(module)
# Module is now lazy.
self.assertIsNone(loader.loaded)
return module
# TODO: RUSTPYTHON
@unittest.expectedFailure
def METHOD_NAME(self):
# End-to-end test to verify the load is in fact lazy.
importer = TestingImporter()
assert importer.loaded is None
with test_util.uncache(importer.module_name):
with test_util.import_state(meta_path=[importer]):
module = importlib.import_module(importer.module_name)
self.assertIsNone(importer.loaded)
# Trigger load.
self.assertEqual(module.__loader__, importer)
self.assertIsNotNone(importer.loaded)
self.assertEqual(module, importer.loaded)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_attr_unchanged(self):
# An attribute only mutated as a side-effect of import should not be
# changed needlessly.
module = self.new_module()
self.assertEqual(TestingImporter.mutated_name, module.__name__)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_new_attr(self):
# A new attribute should persist.
module = self.new_module()
module.new_attr = 42
self.assertEqual(42, module.new_attr)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_mutated_preexisting_attr(self):
# Changing an attribute that already existed on the module --
# e.g. __name__ -- should persist.
module = self.new_module()
module.__name__ = 'bogus'
self.assertEqual('bogus', module.__name__)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_mutated_attr(self):
# Changing an attribute that comes into existence after an import
# should persist.
module = self.new_module()
module.attr = 6
self.assertEqual(6, module.attr)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_delete_eventual_attr(self):
# Deleting an attribute should stay deleted.
module = self.new_module()
del module.attr
self.assertFalse(hasattr(module, 'attr'))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_delete_preexisting_attr(self):
module = self.new_module()
del module.__name__
self.assertFalse(hasattr(module, '__name__'))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_module_substitution_error(self):
with test_util.uncache(TestingImporter.module_name):
fresh_module = types.ModuleType(TestingImporter.module_name)
sys.modules[TestingImporter.module_name] = fresh_module
module = self.new_module()
with self.assertRaisesRegex(ValueError, "substituted"):
module.__name__
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_module_already_in_sys(self):
with test_util.uncache(TestingImporter.module_name):
module = self.new_module()
sys.modules[TestingImporter.module_name] = module
# Force the load; just care that no exception is raised.
module.__name__
if __name__ == '__main__':
unittest.main() | null |
5,996 | import siliconcompiler
import re
from siliconcompiler import SiliconCompilerError
from siliconcompiler.flows._common import setup_frontend
from siliconcompiler.tools.yosys import syn_fpga as yosys_syn
from siliconcompiler.tools.vpr import place as vpr_place
from siliconcompiler.tools.vpr import route as vpr_route
from siliconcompiler.tools.genfasm import bitstream as genfasm_bitstream
from siliconcompiler.tools.vivado import syn_fpga as vivado_syn
from siliconcompiler.tools.vivado import place as vivado_place
from siliconcompiler.tools.vivado import route as vivado_route
from siliconcompiler.tools.vivado import bitstream as vivado_bitstream
from siliconcompiler.tools.nextpnr import apr as nextpnr_apr
############################################################################
# DOCS
############################################################################
def make_docs(chip):
chip.set('fpga', 'partname', 'ice40up5k-sg48')
return METHOD_NAME(chip)
############################################################################
# Flowgraph Setup
############################################################################
def METHOD_NAME(chip, flowname='fpgaflow'):
'''
A configurable FPGA compilation flow.
The 'fpgaflow' module is a configurable FPGA flow with support for
open source and commercial tool flows.
The following step convention is recommended for VPR.
* **import**: Sources are collected and packaged for compilation
* **syn**: Synthesize RTL into an device specific netlist
* **place**: FPGA specific placement step
* **route**: FPGA specific routing step
* **bitstream**: Bitstream generation
Note that nextpnr does not appear to support breaking placement, routing,
and bitstream generation into individual steps, leading to the following
recommended step convention
* **import**: Sources are collected and packaged for compilation
* **syn**: Synthesize RTL into an device specific netlist
* **apr**: One-step execution of place, route, bitstream with nextpnr
Schema keypaths:
'''
flow = siliconcompiler.Flow(chip, flowname)
flow_pipe = flow_lookup(chip.get('fpga', 'partname'))
flowtools = setup_frontend(chip)
flowtools.extend(flow_pipe)
# Minimal setup
index = '0'
prevstep = None
for step, tool_module in flowtools:
# Flow
flow.node(flowname, step, tool_module)
if prevstep:
flow.edge(flowname, prevstep, step)
# Hard goals
for metric in ('errors', 'warnings', 'drvs', 'unconstrained',
'holdwns', 'holdtns', 'holdpaths',
'setupwns', 'setuptns', 'setuppaths'):
flow.set('flowgraph', flowname, step, index, 'goal', metric, 0)
# Metrics
for metric in ('luts', 'dsps', 'brams', 'registers',
'pins', 'peakpower', 'leakagepower'):
flow.set('flowgraph', flowname, step, index, 'weight', metric, 1.0)
prevstep = step
return flow
##################################################
def flow_lookup(partname):
'''
Returns a list for the the flow selected based on the part number
regular expression.
'''
if not partname:
raise SiliconCompilerError('A part number must be specified to setup the fpga flow.')
partname = partname.lower()
###########
# xilinx
###########
spartan6 = bool(re.match('^xc6', partname))
spartan7 = bool(re.match('^xc7s', partname))
artix = bool(re.match('^xc7a', partname))
artixultra = bool(re.match('^au', partname))
kintex7 = bool(re.match('^xc7k', partname))
kintexultra = bool(re.match('^xcku', partname))
zynq = bool(re.match(r'^z\-7', partname))
zynqultra = bool(re.match('^zu', partname))
virtex7 = bool(re.match('^xc7v', partname))
virtexultra = bool(re.match('^xcvu', partname))
xilinx = spartan6 or spartan7 or \
artix or artixultra or \
kintex7 or kintexultra or \
zynq or zynqultra or \
virtex7 or virtexultra
xilinx_flow = [
('syn_fpga', vivado_syn),
('place', vivado_place),
('route', vivado_route),
('bitstream', vivado_bitstream)]
#############
# intel
#############
cyclone4 = bool(re.match('^ep4', partname))
cyclone5 = bool(re.match('^5cs', partname))
cyclone10 = bool(re.match('^10cl', partname))
stratix5 = bool(re.match('^5sg', partname))
intel = cyclone10 or cyclone4 or cyclone5 or stratix5
intel_flow = None
###########
# lattice
###########
ice40 = re.match('^ice40', partname)
ice40_flow = [('syn', yosys_syn),
('apr', nextpnr_apr)]
###########
# example
###########
example = re.match('^example_arch', partname)
example_flow = [('syn', yosys_syn),
('place', vpr_place),
('route', vpr_route),
('bitstream', genfasm_bitstream)]
flow = None
if xilinx:
flow = xilinx_flow
elif intel:
flow = intel_flow
elif ice40:
flow = ice40_flow
elif example:
flow = example_flow
if not flow:
raise SiliconCompilerError(
f'fpgaflow: unsupported partname {partname}'
)
return flow
##################################################
if __name__ == "__main__":
flow = make_docs(siliconcompiler.Chip('<flow>'))
flow.write_flowgraph(f"{flow.top()}.png", flow=flow.top()) | null |
5,997 | # Copyright 2020-2023 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
test mindspore grammar constraints
1. function must have return statement
2. raise statement can not be used
"""
# pylint: disable=R1705, R1710, W0223
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore import dtype as mstype
context.set_context(mode=context.GRAPH_MODE)
def METHOD_NAME():
class NetMissReturn(nn.Cell):
def construct(self, x, y, z):
if x == 1:
return 10
elif x == 20:
if y == 1:
return 3
elif y == 2:
for i in range(z):
return i + z
i = 0
while i < z:
return i + z
def g(u):
return x + u
# here method 'construct' misses a return statement, mean return None
g(y)
else:
return 7
else:
return 5
net = NetMissReturn()
x = Tensor(0, mstype.int32)
y = Tensor(5, mstype.int32)
z = Tensor(2, mstype.int32)
with pytest.raises(TypeError) as er:
net(x, y, z)
assert "For 'make_range', the 0th input should be a int scalar" in str(er.value)
def test_nest_function_missing_return():
class NetNestFuncMissReturn(nn.Cell):
def construct(self, x, y, z):
if x == 1:
return 10
elif x == 20:
if y == 1:
return 3
elif y == 2:
for i in range(z):
return i + z
i = 0
while i < z:
return i + z
def g(u):
x += u
# nested function 'g' misses a return a statement, mean that return None.
return g(y)
else:
return 7
else:
return 5
net = NetNestFuncMissReturn()
x = Tensor(0, mstype.int32)
y = Tensor(5, mstype.int32)
z = Tensor(2, mstype.int32)
with pytest.raises(TypeError) as er:
net(x, y, z)
assert "For 'make_range', the 0th input should be a int scalar" in str(er.value)
@pytest.mark.skip(reason='Case will not appear for now, but may appear in the future')
def test_raise_in_method():
class NetRaiseInMethod(nn.Cell):
def construct(self, x, y, z):
if x == 1:
return Tensor(10, mstype.int32)
elif x == 20:
# add not support grammar 'raise' here
raise ValueError('Illegal case')
else:
return y + z
net = NetRaiseInMethod()
x = Tensor(0, mstype.int32)
y = Tensor(5, mstype.int32)
z = Tensor(2, mstype.int32)
with pytest.raises(RuntimeError) as er:
net(x, y, z)
assert "Currently only supports raise in constant scenarios." in str(er.value)
def test_nest_branch_with_return():
class NetBranchWithReturn(nn.Cell):
def construct(self, x, y, z):
if x == 1:
return 10
else:
return 5
net = NetBranchWithReturn()
x = Tensor(0, mstype.int32)
y = Tensor(5, mstype.int32)
z = Tensor(2, mstype.int32)
net(x, y, z)
def test_any_with_no_return():
class NetAnyNoReturn(nn.Cell):
def construct(self, inp):
result = inp.any()
if result:
return 6
np_input = np.arange(2 * 3 * 4).reshape((2, 3, 4)).astype(np.bool_)
tensor = Tensor(np_input)
net = NetAnyNoReturn()
net(tensor)
def test_missing_construct():
class NetMissConstruct(nn.Cell):
def construct1(self, inp):
return 5
np_input = np.arange(2 * 3 * 4).reshape((2, 3, 4)).astype(np.bool_)
tensor = Tensor(np_input)
net = NetMissConstruct()
with pytest.raises(AttributeError) as info:
net(tensor)
assert "construct" in str(info.value)
assert "not defined" in str(info.value) | null |
5,998 | from collections import ChainMap
from typing import (
TYPE_CHECKING,
Any,
Dict,
ItemsView,
Iterable,
MutableMapping,
NamedTuple,
Optional,
Type,
TypeVar,
Union,
ValuesView,
)
from docarray.array.list_advance_indexing import ListAdvancedIndexing
from docarray.typing import NdArray
from docarray.typing.tensor.abstract_tensor import AbstractTensor
if TYPE_CHECKING:
from docarray.array.doc_vec.doc_vec import DocVec
IndexIterType = Union[slice, Iterable[int], Iterable[bool], None]
T = TypeVar('T', bound='ColumnStorage')
class ColumnsJsonCompatible(NamedTuple):
tensor_columns: Dict[str, Any]
doc_columns: Dict[str, Any]
docs_vec_columns: Dict[str, Any]
any_columns: Dict[str, Any]
class ColumnStorage:
"""
ColumnStorage is a container to store the columns of the
:class:`~docarray.array.doc_vec.DocVec`.
:param tensor_columns: a Dict of AbstractTensor
:param doc_columns: a Dict of :class:`~docarray.array.doc_vec.DocVec`
:param docs_vec_columns: a Dict of List of :class:`~docarray.array.doc_vec.DocVec`
:param any_columns: a Dict of List
:param tensor_type: Class used to wrap the doc_vec tensors
"""
def __init__(
self,
tensor_columns: Dict[str, Optional[AbstractTensor]],
doc_columns: Dict[str, Optional['DocVec']],
docs_vec_columns: Dict[str, Optional[ListAdvancedIndexing['DocVec']]],
any_columns: Dict[str, ListAdvancedIndexing],
tensor_type: Type[AbstractTensor] = NdArray,
):
self.tensor_columns = tensor_columns
self.doc_columns = doc_columns
self.docs_vec_columns = docs_vec_columns
self.any_columns = any_columns
self.tensor_type = tensor_type
self.columns = ChainMap( # type: ignore
self.tensor_columns, # type: ignore
self.doc_columns, # type: ignore
self.docs_vec_columns, # type: ignore
self.any_columns, # type: ignore
) # type: ignore
def __len__(self) -> int:
return len(self.any_columns['id']) # TODO what if ID are None ?
def __getitem__(self: T, item: IndexIterType) -> T:
if isinstance(item, tuple):
item = list(item)
tensor_columns = {
key: col[item] if col is not None else None
for key, col in self.tensor_columns.items()
}
doc_columns = {
key: col[item] if col is not None else None
for key, col in self.doc_columns.items()
}
docs_vec_columns = {
key: col[item] if col is not None else None
for key, col in self.docs_vec_columns.items()
}
any_columns = {
key: col[item] if col is not None else None
for key, col in self.any_columns.items()
}
return self.__class__(
tensor_columns,
doc_columns,
docs_vec_columns,
any_columns,
self.tensor_type,
)
def columns_json_compatible(self) -> ColumnsJsonCompatible:
tens_cols = {
key: value._docarray_to_json_compatible() if value is not None else value
for key, value in self.tensor_columns.items()
}
doc_cols = {
key: value._docarray_to_json_compatible() if value is not None else value
for key, value in self.doc_columns.items()
}
doc_vec_cols = {
key: [vec._docarray_to_json_compatible() for vec in value]
if value is not None
else value
for key, value in self.docs_vec_columns.items()
}
return ColumnsJsonCompatible(
tens_cols, doc_cols, doc_vec_cols, self.any_columns
)
def __eq__(self, other: Any) -> bool:
if not isinstance(other, ColumnStorage):
return False
if self.tensor_type != other.tensor_type:
return False
for col_map_self, col_map_other in zip(self.columns.maps, other.columns.maps):
if col_map_self.keys() != col_map_other.keys():
return False
for key_self in col_map_self.keys():
if key_self == 'id':
continue
val1, val2 = col_map_self[key_self], col_map_other[key_self]
if isinstance(val1, AbstractTensor):
values_are_equal = val1.get_comp_backend().equal(val1, val2)
elif isinstance(val2, AbstractTensor):
values_are_equal = val2.get_comp_backend().equal(val1, val2)
else:
values_are_equal = val1 == val2
if not values_are_equal:
return False
return True
class ColumnStorageView(dict, MutableMapping[str, Any]):
index: int
storage: ColumnStorage
def __init__(self, index: int, storage: ColumnStorage):
super().__init__()
self.index = index
self.storage = storage
def __getitem__(self, name: str) -> Any:
if name in self.storage.tensor_columns.keys():
tensor = self.storage.tensor_columns[name]
if tensor is None:
return None
if tensor.get_comp_backend().n_dim(tensor) == 1:
# to ensure consistensy between numpy and pytorch
# we wrap the scalr in a tensor of ndim = 1
# otherwise numpy pass by value whereas torch by reference
col = self.storage.tensor_columns[name]
if col is not None:
return col[self.index : self.index + 1]
else:
return None
col = self.storage.columns[name]
if col is None:
return None
return col[self.index]
def __reduce__(self):
# implementing __reduce__ to solve a pickle issue when subclassing dict
# see here: https://stackoverflow.com/questions/21144845/how-can-i-unpickle-a-subclass-of-dict-that-validates-with-setitem-in-pytho
return (ColumnStorageView, (self.index, self.storage))
def __setitem__(self, name, value) -> None:
if self.storage.columns[name] is None:
raise ValueError(
f'Cannot set an item to a None column. This mean that '
f'the DocVec that encapsulate this doc has the field '
f'{name} set to None. If you want to modify that you need to do it at the'
f'DocVec level. `docs.field = np.zeros(10)`'
)
self.storage.columns[name][self.index] = value
def __delitem__(self, key):
raise RuntimeError('Cannot delete an item from a StorageView')
def __iter__(self):
return self.storage.columns.keys()
def __len__(self):
return len(self.storage.columns)
def _local_dict(self):
"""The storage.columns dictionary with every value at position self.index"""
return {key: self[key] for key in self.storage.columns.keys()}
def keys(self):
return self.storage.columns.keys()
# type ignore because return type dict_values is private and we cannot use it.
# context: https://github.com/python/typing/discussions/1033
def METHOD_NAME(self) -> ValuesView: # type: ignore
return ValuesView(self._local_dict())
# type ignore because return type dict_items is private and we cannot use it.
# context: https://github.com/python/typing/discussions/1033
def items(self) -> ItemsView: # type: ignore
return ItemsView(self._local_dict())
def to_dict(self) -> Dict[str, Any]:
"""
Return a dictionary with the same keys as the storage.columns
and the values at position self.index.
Warning: modification on the dict will not be reflected on the storage.
"""
return {key: self[key] for key in self.storage.columns.keys()} | null |
5,999 | from sympy.concrete.summations import Sum
from sympy.core.expr import Expr
from sympy.core.function import (Derivative, Function, diff, Subs)
from sympy.core.numbers import (I, Rational, pi)
from sympy.core.relational import Eq
from sympy.core.singleton import S
from sympy.core.symbol import Symbol
from sympy.functions.combinatorial.factorials import factorial
from sympy.functions.elementary.complexes import (im, re)
from sympy.functions.elementary.exponential import (exp, log)
from sympy.functions.elementary.miscellaneous import Max
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import (cos, cot, sin, tan)
from sympy.tensor.array.ndim_array import NDimArray
from sympy.testing.pytest import raises
from sympy.abc import a, b, c, x, y, z
def METHOD_NAME():
assert Rational(1, 3).diff(x) is S.Zero
assert I.diff(x) is S.Zero
assert pi.diff(x) is S.Zero
assert x.diff(x, 0) == x
assert (x**2).diff(x, 2, x) == 0
assert (x**2).diff((x, 2), x) == 0
assert (x**2).diff((x, 1), x) == 2
assert (x**2).diff((x, 1), (x, 1)) == 2
assert (x**2).diff((x, 2)) == 2
assert (x**2).diff(x, y, 0) == 2*x
assert (x**2).diff(x, (y, 0)) == 2*x
assert (x**2).diff(x, y) == 0
raises(ValueError, lambda: x.diff(1, x))
p = Rational(5)
e = a*b + b**p
assert e.diff(a) == b
assert e.diff(b) == a + 5*b**4
assert e.diff(b).diff(a) == Rational(1)
e = a*(b + c)
assert e.diff(a) == b + c
assert e.diff(b) == a
assert e.diff(b).diff(a) == Rational(1)
e = c**p
assert e.diff(c, 6) == Rational(0)
assert e.diff(c, 5) == Rational(120)
e = c**Rational(2)
assert e.diff(c) == 2*c
e = a*b*c
assert e.diff(c) == a*b
def test_diff2():
n3 = Rational(3)
n2 = Rational(2)
n6 = Rational(6)
e = n3*(-n2 + x**n2)*cos(x) + x*(-n6 + x**n2)*sin(x)
assert e == 3*(-2 + x**2)*cos(x) + x*(-6 + x**2)*sin(x)
assert e.diff(x).expand() == x**3*cos(x)
e = (x + 1)**3
assert e.diff(x) == 3*(x + 1)**2
e = x*(x + 1)**3
assert e.diff(x) == (x + 1)**3 + 3*x*(x + 1)**2
e = 2*exp(x*x)*x
assert e.diff(x) == 2*exp(x**2) + 4*x**2*exp(x**2)
def test_diff3():
p = Rational(5)
e = a*b + sin(b**p)
assert e == a*b + sin(b**5)
assert e.diff(a) == b
assert e.diff(b) == a + 5*b**4*cos(b**5)
e = tan(c)
assert e == tan(c)
assert e.diff(c) in [cos(c)**(-2), 1 + sin(c)**2/cos(c)**2, 1 + tan(c)**2]
e = c*log(c) - c
assert e == -c + c*log(c)
assert e.diff(c) == log(c)
e = log(sin(c))
assert e == log(sin(c))
assert e.diff(c) in [sin(c)**(-1)*cos(c), cot(c)]
e = (Rational(2)**a/log(Rational(2)))
assert e == 2**a*log(Rational(2))**(-1)
assert e.diff(a) == 2**a
def test_diff_no_eval_derivative():
class My(Expr):
def __new__(cls, x):
return Expr.__new__(cls, x)
# My doesn't have its own _eval_derivative method
assert My(x).diff(x).func is Derivative
assert My(x).diff(x, 3).func is Derivative
assert re(x).diff(x, 2) == Derivative(re(x), (x, 2)) # issue 15518
assert diff(NDimArray([re(x), im(x)]), (x, 2)) == NDimArray(
[Derivative(re(x), (x, 2)), Derivative(im(x), (x, 2))])
# it doesn't have y so it shouldn't need a method for this case
assert My(x).diff(y) == 0
def test_speed():
# this should return in 0.0s. If it takes forever, it's wrong.
assert x.diff(x, 10**8) == 0
def test_deriv_noncommutative():
A = Symbol("A", commutative=False)
f = Function("f")
assert A*f(x)*A == f(x)*A**2
assert A*f(x).diff(x)*A == f(x).diff(x) * A**2
def test_diff_nth_derivative():
f = Function("f")
n = Symbol("n", integer=True)
expr = diff(sin(x), (x, n))
expr2 = diff(f(x), (x, 2))
expr3 = diff(f(x), (x, n))
assert expr.subs(sin(x), cos(-x)) == Derivative(cos(-x), (x, n))
assert expr.subs(n, 1).doit() == cos(x)
assert expr.subs(n, 2).doit() == -sin(x)
assert expr2.subs(Derivative(f(x), x), y) == Derivative(y, x)
# Currently not supported (cannot determine if `n > 1`):
#assert expr3.subs(Derivative(f(x), x), y) == Derivative(y, (x, n-1))
assert expr3 == Derivative(f(x), (x, n))
assert diff(x, (x, n)) == Piecewise((x, Eq(n, 0)), (1, Eq(n, 1)), (0, True))
assert diff(2*x, (x, n)).dummy_eq(
Sum(Piecewise((2*x*factorial(n)/(factorial(y)*factorial(-y + n)),
Eq(y, 0) & Eq(Max(0, -y + n), 0)),
(2*factorial(n)/(factorial(y)*factorial(-y + n)), Eq(y, 0) & Eq(Max(0,
-y + n), 1)), (0, True)), (y, 0, n)))
# TODO: assert diff(x**2, (x, n)) == x**(2-n)*ff(2, n)
exprm = x*sin(x)
mul_diff = diff(exprm, (x, n))
assert isinstance(mul_diff, Sum)
for i in range(5):
assert mul_diff.subs(n, i).doit() == exprm.diff((x, i)).expand()
exprm2 = 2*y*x*sin(x)*cos(x)*log(x)*exp(x)
dex = exprm2.diff((x, n))
assert isinstance(dex, Sum)
for i in range(7):
assert dex.subs(n, i).doit().expand() == \
exprm2.diff((x, i)).expand()
assert (cos(x)*sin(y)).diff([[x, y, z]]) == NDimArray([
-sin(x)*sin(y), cos(x)*cos(y), 0])
def test_issue_16160():
assert Derivative(x**3, (x, x)).subs(x, 2) == Subs(
Derivative(x**3, (x, 2)), x, 2)
assert Derivative(1 + x**3, (x, x)).subs(x, 0
) == Derivative(1 + y**3, (y, 0)).subs(y, 0) | null |