label
stringlengths
1
61
code
stringlengths
4k
8k
code_compressed
null
communicate
""" :codeauthor: Pedro Algarvio ([email protected]) salt.utils.nb_popen ~~~~~~~~~~~~~~~~~~~ Non blocking subprocess Popen. This functionality has been adapted to work on windows following the recipe found on: http://code.activestate.com/recipes/440554/ """ import errno import logging import os import select import subprocess import sys import tempfile import time mswindows = sys.platform == "win32" try: import msvcrt import pywintypes from win32file import ReadFile, WriteFile from win32pipe import PeekNamedPipe except ImportError: import fcntl log = logging.getLogger(__name__) class NonBlockingPopen(subprocess.Popen): # _stdin_logger_name_ = 'salt.utils.nb_popen.STDIN.PID-{pid}' _stdout_logger_name_ = "salt.utils.nb_popen.STDOUT.PID-{pid}" _stderr_logger_name_ = "salt.utils.nb_popen.STDERR.PID-{pid}" def __init__(self, *args, **kwargs): self.stream_stds = kwargs.pop("stream_stds", False) # Half a megabyte in memory is more than enough to start writing to # a temporary file. self.max_size_in_mem = kwargs.pop("max_size_in_mem", 512000) # Let's configure the std{in, out,err} logging handler names # self._stdin_logger_name_ = kwargs.pop( # 'stdin_logger_name', self._stdin_logger_name_ # ) self._stdout_logger_name_ = kwargs.pop( "stdout_logger_name", self._stdout_logger_name_ ) self._stderr_logger_name_ = kwargs.pop( "stderr_logger_name", self._stderr_logger_name_ ) logging_command = kwargs.pop("logging_command", None) stderr = kwargs.get("stderr", None) super().__init__(*args, **kwargs) # self._stdin_logger = logging.getLogger( # self._stdin_logger_name_.format(pid=self.pid) # ) self.stdout_buff = tempfile.SpooledTemporaryFile(self.max_size_in_mem) self._stdout_logger = logging.getLogger( self._stdout_logger_name_.format(pid=self.pid) ) if stderr is subprocess.STDOUT: self.stderr_buff = self.stdout_buff self._stderr_logger = self._stdout_logger else: self.stderr_buff = tempfile.SpooledTemporaryFile(self.max_size_in_mem) self._stderr_logger = logging.getLogger( self._stderr_logger_name_.format(pid=self.pid) ) log.info( "Running command under pid %s: '%s'", self.pid, args if logging_command is None else logging_command, ) def recv(self, maxsize=None): return self._recv("stdout", maxsize) def recv_err(self, maxsize=None): return self._recv("stderr", maxsize) def send_recv(self, input="", maxsize=None): return self.send(input), self.recv(maxsize), self.recv_err(maxsize) def get_conn_maxsize(self, which, maxsize): if maxsize is None: maxsize = 1024 elif maxsize < 1: maxsize = 1 return getattr(self, which), maxsize def _close(self, which): getattr(self, which).close() setattr(self, which, None) if mswindows: def send(self, input): if not self.stdin: return None try: x = msvcrt.get_osfhandle(self.stdin.fileno()) (errCode, written) = WriteFile(x, input) # self._stdin_logger.debug(input.rstrip()) except ValueError: return self._close("stdin") except (pywintypes.error, Exception) as why: if why.args[0] in (109, errno.ESHUTDOWN): return self._close("stdin") raise return written def _recv(self, which, maxsize): conn, maxsize = self.get_conn_maxsize(which, maxsize) if conn is None: return None try: x = msvcrt.get_osfhandle(conn.fileno()) (read, nAvail, nMessage) = PeekNamedPipe(x, 0) if maxsize < nAvail: nAvail = maxsize if nAvail > 0: (errCode, read) = ReadFile(x, nAvail, None) except ValueError: return self._close(which) except (pywintypes.error, Exception) as why: if why.args[0] in (109, errno.ESHUTDOWN): return self._close(which) raise getattr(self, "{}_buff".format(which)).write(read) getattr(self, "_{}_logger".format(which)).debug(read.rstrip()) if self.stream_stds: getattr(sys, which).write(read) if self.universal_newlines: read = self._translate_newlines(read) return read else: def send(self, input): if not self.stdin: return None if not select.select([], [self.stdin], [], 0)[1]: return 0 try: written = os.write(self.stdin.fileno(), input) # self._stdin_logger.debug(input.rstrip()) except OSError as why: if why.args[0] == errno.EPIPE: # broken pipe return self._close("stdin") raise return written def _recv(self, which, maxsize): conn, maxsize = self.get_conn_maxsize(which, maxsize) if conn is None: return None flags = fcntl.fcntl(conn, fcntl.F_GETFL) if not conn.closed: fcntl.fcntl(conn, fcntl.F_SETFL, flags | os.O_NONBLOCK) try: if not select.select([conn], [], [], 0)[0]: return "" buff = conn.read(maxsize) if not buff: return self._close(which) if self.universal_newlines: buff = self._translate_newlines(buff) getattr(self, "{}_buff".format(which)).write(buff) getattr(self, "_{}_logger".format(which)).debug(buff.rstrip()) if self.stream_stds: getattr(sys, which).write(buff) return buff finally: if not conn.closed: fcntl.fcntl(conn, fcntl.F_SETFL, flags) def poll_and_read_until_finish(self, interval=0.01): silent_iterations = 0 while self.poll() is None: if self.stdout is not None: silent_iterations = 0 self.recv() if self.stderr is not None: silent_iterations = 0 self.recv_err() silent_iterations += 1 if silent_iterations > 100: silent_iterations = 0 (stdoutdata, stderrdata) = self.METHOD_NAME() if stdoutdata: log.debug(stdoutdata) if stderrdata: log.error(stderrdata) time.sleep(interval) def METHOD_NAME(self, input=None): # pylint: disable=arguments-differ super().METHOD_NAME(input) self.stdout_buff.flush() self.stdout_buff.seek(0) self.stderr_buff.flush() self.stderr_buff.seek(0) return self.stdout_buff.read(), self.stderr_buff.read()
null
connect
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import abc import base64 import logging import shlex from urllib.parse import urlencode import arrow import tornado.gen from django.utils.encoding import smart_bytes, smart_str from django.utils.translation import ugettext_lazy as _ from tornado.httpclient import HTTPRequest from tornado.ioloop import IOLoop from tornado.websocket import websocket_connect from backend.web_console import constants from backend.web_console.utils import clean_bash_escape, hello_message logger = logging.getLogger(__name__) class BCSClientBase(abc.ABC): def __init__(self, url, rows, cols, msg_handler): self.init_rows = rows self.init_cols = cols self.url = HTTPRequest(url, validate_cert=False) self.msg_handler = msg_handler self.ws = None self.output_record = [] self.output_buffer = "" self.last_output_ts = IOLoop.current().time() @tornado.gen.coroutine def METHOD_NAME(self): logger.info("trying to connect %s, %s", self.url.url, self.url.headers) try: self.ws = yield websocket_connect(self.url, ping_interval=constants.WEBSOCKET_PING_INTERVAL) except Exception as e: logger.exception("connection error, %s" % e) self.msg_handler.close() else: self.post_connected() self.run() def encode_console_msg(self, msg): """前后端统一使用base64编码""" encode_msg = base64.b64encode(smart_bytes(msg)) return encode_msg def post_connected(self): logger.info("bcs client connected, %s", self.msg_handler.user_pod_name) msg = hello_message(self.msg_handler.source) self.msg_handler.write_message(self.encode_console_msg(msg)) self.msg_handler.start_record() self.msg_handler.tick_timeout() self.set_pty_size(self.init_rows, self.init_cols) def flush_output_record(self): """获取输出记录""" record = self.output_record[:] self.output_record = [] return record def close_transmission(self): """结束通讯, 发送CTRL-D ASCII编码 EOT 04 """ try: self.write_message(chr(4)) except Exception as error: logger.warning("close_transmission %s error: %s", self.msg_handler.user_pod_name, error) def handle_message(self, message): """消息格式转换""" return message def write_message(self, message): """写入消息""" self.ws.write_message(message) @classmethod def get_command_params(cls, context): """获取k8s标准的命令参数""" if context.get("command") and context["command"] != "sh": command_list = shlex.split(context["command"]) else: command_list = constants.DEFAULT_COMMAND command_list = [("command", i) for i in command_list] command = urlencode(command_list) return command @abc.abstractmethod def set_pty_size(self, rows: int, cols: int): """自动宽度适应""" @tornado.gen.coroutine def run(self): while True: msg = yield self.ws.read_message() if msg is None: logger.info("bcs client connection closed, %s", self.msg_handler.user_pod_name) message = str(_("BCS Console 服务端连接断开,请重新登录")) self.msg_handler.close(reason=message) break if self.msg_handler.stream.closed(): logger.info("msg_handler connection closed, %s", self.msg_handler.user_pod_name) self.ws.close() break try: self.last_output_ts = IOLoop.current().time() # 不同类型, 子类继承处理 message raw_msg = self.handle_message(msg) if not raw_msg: continue try: msg = smart_str(raw_msg) except Exception: msg = smart_str(raw_msg, "latin1") self.output_buffer += msg if constants.OUTPUT_LINE_BREAKER in self.output_buffer: line_msg = self.output_buffer.split(constants.OUTPUT_LINE_BREAKER) for i in line_msg[:-1]: record = "%s: %s" % (arrow.now().strftime("%Y-%m-%d %H:%M:%S.%f"), clean_bash_escape(i)) self.output_record.append(record) # 前面多行已经赋值到record, 最后一行可能剩余未换行的数据 self.output_buffer = line_msg[-1] # 前端对\r不会换行处理,在后台替换,规则是前后没有\n的\r字符,都会添加\n # msg = re.sub(r'(?<!\n)\r(?!\n)', '\r\n', msg) # 删除异常回车键 # msg = re.sub(r'[\b]+$', '\b', msg) self.msg_handler.write_message(self.encode_console_msg(raw_msg)) except Exception as e: logger.exception(e) self.ws.close()
null
copy tensor
# Copyright (c) Alibaba, Inc. and its affiliates. from typing import List import numpy as np import torch def collate_fn(samples, pad_idx, eos_idx): r""" convert the sample to batch tensor. """ if len(samples) == 0: return {} def merge(key): return collate_tokens([s[key] for s in samples], pad_idx, eos_idx=eos_idx) batch = { 'nsentences': len(samples), 'net_input': {}, } if samples[0].get('source', None) is not None: batch['net_input']['input_ids'] = merge('source') if samples[0].get('id', None) is not None: batch['id'] = np.array([s.get('id') for s in samples]) if samples[0].get('target', None) is not None: batch['target'] = merge('target') tgt_lengths = torch.LongTensor( [s['target'].ne(pad_idx).long().sum() for s in samples]) ntokens = tgt_lengths.sum().item() batch['ntokens'] = ntokens if samples[0].get('prev_output_tokens', None) is not None: batch['net_input']['decoder_input_ids'] = merge('prev_output_tokens') if samples[0].get('patch_image', None) is not None: batch['net_input']['patch_images'] = torch.stack( [sample['patch_image'] for sample in samples], dim=0) if samples[0].get('patch_mask', None) is not None: batch['net_input']['patch_masks'] = torch.cat( [sample['patch_mask'] for sample in samples]) # image generation if samples[0].get('code_mask', None) is not None: batch['net_input']['code_masks'] = torch.cat( [sample['code_mask'] for sample in samples]) if samples[0].get('code_image', None) is not None: batch['code_images'] = torch.cat( [sample['code_image'] for sample in samples]) # For classification tasks (i.e., VQA, SNLI-VE, GLUE) if samples[0].get('conf', None) is not None: batch['conf'] = torch.cat([s['conf'] for s in samples], dim=0) if samples[0].get('ref_dict', None) is not None: batch['ref_dict'] = np.array([s['ref_dict'] for s in samples]) if samples[0].get('label', None) is not None: batch['labels'] = np.array([s['label'] for s in samples]).tolist() if samples[0].get('constraint_mask', None) is not None: batch['constraint_masks'] = merge('constraint_mask') if samples[0].get('decoder_prompt', None) is not None: batch['decoder_prompts'] = np.array( [s['decoder_prompt'].tolist() for s in samples]) if samples[0].get('prefix_token', None) is not None: batch['prefix_tokens'] = merge('prefix_token') # For detection and visual grounding if samples[0].get('w_resize_ratio', None) is not None: batch['w_resize_ratios'] = torch.stack( [s['w_resize_ratio'] for s in samples], dim=0) if samples[0].get('h_resize_ratio', None) is not None: batch['h_resize_ratios'] = torch.stack( [s['h_resize_ratio'] for s in samples], dim=0) if samples[0].get('region_coord', None) is not None: batch['region_coords'] = torch.stack( [s['region_coord'] for s in samples], dim=0) if samples[0].get('sample', None) is not None: batch['samples'] = [s['sample'] for s in samples] # For asr if samples[0].get('fbank', None) is not None: batch['net_input']['fbank'] = _collate_frames( [s['fbank'] for s in samples]) batch['net_input']['fbank_length'] = torch.tensor( [s['fbank'].size(0) for s in samples], dtype=torch.long) if samples[0].get('fbank_mask', None) is not None: batch['net_input']['fbank_masks'] = torch.cat( [s['fbank_mask'] for s in samples]) if samples[0].get('phone_item', None) is not None: batch['net_input']['phone_items'] = merge('phone_item') batch['net_input']['phone_masks'] = torch.cat( [s['phone_mask'] for s in samples]) if samples[0].get('phone_target', None) is not None: batch['phone_target'] = merge('phone_target') batch['phone_length'] = torch.tensor( [s['phone_target'].size(0) for s in samples], dtype=torch.long) # for sudoku if samples[0].get('db_struct', None) is not None: db_struct = [sample['db_struct'] for sample in samples] batch['db_struct'] = db_struct if samples[0].get('mask_ratio', None) is not None: mask_ratio = [sample['mask_ratio'] for sample in samples] batch['mask_ratio'] = mask_ratio if samples[0].get('seg_col_tokens', None) is not None: seg_col_tokens = merge('seg_col_tokens') batch['net_input']['seg_col_tokens'] = seg_col_tokens if samples[0].get('seg_row_tokens', None) is not None: seg_row_tokens = merge('seg_row_tokens') batch['net_input']['seg_row_tokens'] = seg_row_tokens return batch def collate_tokens( values, pad_idx, eos_idx=None, left_pad=False, move_eos_to_beginning=False, pad_to_length=None, pad_to_multiple=1, pad_to_bsz=None, ): """Convert a list of 1d tensors into a padded 2d tensor.""" size = max(v.size(0) for v in values) size = size if pad_to_length is None else max(size, pad_to_length) if pad_to_multiple != 1 and size % pad_to_multiple != 0: size = int(((size - 0.1) // pad_to_multiple + 1) * pad_to_multiple) def METHOD_NAME(src, dst): assert dst.numel() == src.numel() if move_eos_to_beginning: if eos_idx is None: # if no eos_idx is specified, then use the last token in src dst[0] = src[-1] else: dst[0] = eos_idx dst[1:] = src[:-1] else: dst.copy_(src) if values[0].dim() == 1: res = values[0].new(len(values), size).fill_(pad_idx) elif values[0].dim() == 2: assert move_eos_to_beginning is False res = values[0].new(len(values), size, values[0].size(1)).fill_(pad_idx) else: raise NotImplementedError for i, v in enumerate(values): METHOD_NAME(v, res[i][size - len(v):] if left_pad else res[i][:len(v)]) return res def _collate_frames(frames: List[torch.Tensor]): """ Convert a list of 2D frames into a padded 3D tensor Args: frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i] """ max_len = max(frame.size(0) for frame in frames) out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1))) for i, v in enumerate(frames): out[i, :v.size(0)] = v return out
null
find equivalent identifier of type
import re class DeviceIdentifierType: FIT = "fit" TCX = "tcx" GC = "gc" class DeviceIdentifier: def Match(self, query): compareDict = dict(self.__dict__) compareDict.update(query) return compareDict == self.__dict__ # At the time it felt like a better idea than iterating through keys? class FITDeviceIdentifier(DeviceIdentifier): def __init__(self, manufacturer, product=None): self.Type = DeviceIdentifierType.FIT self.Manufacturer = manufacturer self.Product = product class TCXDeviceIdentifier(DeviceIdentifier): def __init__(self, name, productId=None): self.Type = DeviceIdentifierType.TCX self.Name = name self.ProductID = productId class GCDeviceIdentifier(DeviceIdentifier): def __init__(self, name): # Edge 810 -> edge810 # They're quite stubborn with giving the whole list of these device keys. # So this is really a guess. self.Key = re.sub("[^a-z0-9]", "", name.lower()) self.Type = DeviceIdentifierType.GC def Match(self, query): # Add some fuzziness becaise I can't be bothered figuring out what the pattern is return query["Key"] == self.Key or query["Key"] == ("garmin%s" % self.Key) class DeviceIdentifier: _identifierGroups = [] def AddIdentifierGroup(*identifiers): DeviceIdentifier._identifierGroups.append(identifiers) def FindMatchingIdentifierOfType(type, query): for group in DeviceIdentifier._identifierGroups: for identifier in group: if identifier.Type != type: continue if identifier.Match(query): return identifier def METHOD_NAME(type, identifier): if not identifier: return if identifier.Type == type: return identifier # We preemptively do this, so international variants have a chance of being preserved for group in DeviceIdentifier._identifierGroups: if identifier not in group: continue for altIdentifier in group: if altIdentifier.Type == type: return altIdentifier class Device: def __init__(self, identifier, serial=None, verMaj=None, verMin=None): self.Identifier = identifier self.Serial = serial self.VersionMajor = verMaj self.VersionMinor = verMin # I think Garmin devices' TCX ProductID match their FIT garmin_product id # And, since the FIT SDK is lagging behind: # - Forerunner 620 is 1623 def _garminIdentifier(name, *fitIds): return [TCXDeviceIdentifier("Garmin %s" % name, fitIds[0]), GCDeviceIdentifier(name)] + [FITDeviceIdentifier(1, fitId) for fitId in fitIds] # This list is REGEXed from the FIT SDK - I have no clue what some of the entries are... # Some products have international variants with different FIT IDs - the first ID given is used for TCX # DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("HRM1", 1)) - Garmin Connect reports itself as ID 1 too. DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("AXH01", 2)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("AXB01", 3)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("AXB02", 4)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("HRM2SS", 5)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("DSI_ALF02", 6)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 301", 473, 474, 475, 494)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 405", 717, 987)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 50", 782)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 60", 988)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("DSI_ALF01", 1011)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 310XT", 1018, 1446)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Edge 500", 1036, 1199, 1213, 1387, 1422)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 110", 1124, 1274)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Edge 800", 1169, 1333, 1334, 1497, 1386)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Chirp", 1253)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Edge 200", 1325, 1555)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 910XT", 1328, 1537, 1600, 1664)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 920XT", 1765)) # The SDK isn't updated yet, don't have international variants DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("ALF04", 1341)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 610", 1345, 1410)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 210", 1360)) # In the SDK this is marked as "JAPAN" :S DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 70", 1436)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("AMX", 1461)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 10", 1482, 1688)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Swim", 1499)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Fenix", 1551)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Fenix 2", 1967)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Fenix 3", 2050)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Edge 510", 1561, 1742, 1821)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Edge 810", 1567, 1721, 1822, 1823)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Edge 1000", 1836)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Tempe", 1570)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("VIRB Elite", 1735)) # Where's the VIRB Proletariat? DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Edge Touring", 1736)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("HRM Run", 1752)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("SDM4", 10007)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Training Center", 20119)) DeviceIdentifier.AddIdentifierGroup(*_garminIdentifier("Forerunner 620", 1623)) # TomTom MySports Connect appears to produce these IDs for all of their # models of GPS watches (Runner, MultiSport, and Cardio versions of the same). DeviceIdentifier.AddIdentifierGroup(TCXDeviceIdentifier("TomTom GPS Sport Watch", 0), FITDeviceIdentifier(71, 0))
null
test mutator sequence
import json from pathlib import Path import pytest from nni.common.framework import get_default_framework, set_default_framework from nni.nas.space import StationaryMutator, Mutator, MutationSampler, GraphModelSpace, ModelStatus, MutatorSequence from nni.nas.space.mutator import _RandomSampler from nni.nas.space.graph_op import Operation @pytest.fixture(autouse=True, scope='module') def default_framework(): original_framework = get_default_framework() set_default_framework('tensorflow') yield set_default_framework(original_framework) @pytest.fixture(autouse=True) def max_pool(): yield Operation.new('MaxPool2D', {'pool_size': 2}) @pytest.fixture(autouse=True) def avg_pool(): yield Operation.new('AveragePooling2D', {'pool_size': 2}) @pytest.fixture(autouse=True) def global_pool(): yield Operation.new('GlobalAveragePooling2D') class DebugSampler(MutationSampler): def __init__(self): self.iteration = 0 def choice(self, candidates, mutator, model, index): idx = (self.iteration + index) % len(candidates) return candidates[idx] def mutation_start(self, mutator, model): self.iteration += 1 class DebugMutator(Mutator): def __init__(self, ops, label): super().__init__(label=label) self.ops = ops def mutate(self, model): pool1 = model.graphs['stem'].get_node_by_name('pool1') op = self.choice(self.ops) pool1.update_operation(op) pool2 = model.graphs['stem'].get_node_by_name('pool2') if op == self.ops[0]: pool2.update_operation(self.ops[0]) else: pool2.update_operation(self.choice(self.ops)) class StationaryDebugMutator(StationaryMutator): def __init__(self, ops, label): super().__init__(label=label) self.ops = ops def mutate(self, model): pool1 = model.graphs['stem'].get_node_by_name('pool1') pool1.update_operation(self.choice(self.ops)) pool2 = model.graphs['stem'].get_node_by_name('pool2') pool2.update_operation(self.choice(self.ops)) @pytest.fixture def mutator(max_pool, avg_pool, global_pool): sampler = DebugSampler() mutator = StationaryDebugMutator(ops=[max_pool, avg_pool, global_pool], label='debug') mutator.bind_sampler(sampler) sampler.iteration = 0 return mutator @pytest.fixture def mutator1(max_pool, avg_pool, global_pool): sampler = DebugSampler() mutator = DebugMutator(ops=[max_pool, avg_pool, global_pool], label='debug') mutator.bind_sampler(sampler) sampler.iteration = 0 return mutator @pytest.fixture def model0(): json_path = Path(__file__).parent / 'mnist_tensorflow.json' ir = json.load(json_path.open()) return GraphModelSpace._load(_internal=True, **ir) def test_dry_run(model0, mutator, max_pool, avg_pool, global_pool): assert model0.status == ModelStatus.Initialized candidates, model1 = mutator.dry_run(model0) assert model0.status == ModelStatus.Initialized assert model1.status == ModelStatus.Mutating assert len(candidates) == 2 assert candidates['debug/0'].values == [max_pool, avg_pool, global_pool] assert candidates['debug/1'].values == [max_pool, avg_pool, global_pool] def test_mutation(model0, mutator, max_pool, avg_pool, global_pool): model1 = mutator.apply(model0) assert _get_pools(model1) == (avg_pool, global_pool) model2 = mutator.apply(model1) assert _get_pools(model2) == (global_pool, max_pool) assert len(model2.history) == 2 assert model2.history[0].from_ == model0 assert model2.history[0].to == model1 assert model2.history[1].from_ == model1 assert model2.history[1].to == model2 assert model2.history[0].mutator == mutator assert model2.history[1].mutator == mutator assert _get_pools(model0) == (max_pool, max_pool) assert _get_pools(model1) == (avg_pool, global_pool) def METHOD_NAME(model0, mutator, max_pool, avg_pool): mutators = MutatorSequence([mutator]) with pytest.raises(AssertionError, match='bound to a model'): mutators.simplify() with mutators.bind_model(model0): assert list(mutators.simplify().keys()) == ['debug/0', 'debug/1'] with mutators.bind_model(model0): model1 = mutators.freeze({'debug/0': avg_pool, 'debug/1': max_pool}) assert model1.status == ModelStatus.Mutating assert len(model1.history) == 1 assert _get_pools(model1) == (avg_pool, max_pool) def test_simplify_and_random(model0, mutator, max_pool, avg_pool, global_pool): model0.mutators = MutatorSequence([mutator]) assert list(model0.simplify().keys()) == ['debug/0', 'debug/1'] mutator.sampler = None model1 = model0.random() assert model1.status == ModelStatus.Frozen assert list(model1.sample.keys()) == ['debug/0', 'debug/1'] assert model1.sample['debug/0'] in [max_pool, avg_pool, global_pool] assert model1.sample['debug/1'] in [max_pool, avg_pool, global_pool] def test_nonstationary_mutator(model0, mutator1, max_pool, avg_pool, global_pool): model = model0 for _ in range(10): model = mutator1.apply(model) pools = _get_pools(model) if pools[0] == max_pool: assert pools[1] == max_pool else: assert pools[0] in [avg_pool, global_pool] assert pools[1] in [max_pool, avg_pool, global_pool] def test_nonstationary_mutator_simplify(model0, mutator1, max_pool, avg_pool, global_pool): model0.mutators = MutatorSequence([mutator1]) assert model0.simplify() == {'debug': mutator1} mutator1.sampler = None model1 = model0.random() assert model1.status == ModelStatus.Frozen assert isinstance(model1.sample['debug'], _RandomSampler) pools = _get_pools(model1) assert pools[0] in [max_pool, avg_pool, global_pool] assert pools[1] in [max_pool, avg_pool, global_pool] def _get_pools(model): pool1 = model.graphs['stem'].get_node_by_name('pool1').operation pool2 = model.graphs['stem'].get_node_by_name('pool2').operation return pool1, pool2
null
run
#!/usr/bin/env python3 """A standalone tool for executing compiled Xilinx XRT bitstreams. This tool can be invoked as a subprocess to run a compiled `.xclbin`, which may be compiled either for RTL simulation or for actual on-FPGA execution. It consumes and produces fud-style JSON input/output data files but is otherwise isolated from the rest of fud and can be invoked as a standalone program. This separate-process model is important so the user (or parent process) can set the *required* environment variables that the Xilinx toolchain needs to control its execution mode and to find its support files. This tool currently uses the `PYNQ`_ Python library, which is meant for high-level application interaction but is also a fairly stable wrapper around the underlying XRT libraries. In the future, we can consider replcaing PYNQ with directly using the `pyxrt`_ library, or abandoning Python altogether and using the native XRT library directly for simplicity. A bunch of environment variables have to be set to use xclrun. A minimal invocation of xclrun looks something like this:: $ source /scratch/opt/Xilinx/Vitis/2020.2/settings64.sh $ source /scratch/opt/xilinx/xrt/setup.sh $ export EMCONFIG_PATH=`pwd` $ XCL_EMULATION_MODE=hw_emu $ XRT_INI_PATH=`pwd`/xrt.ini $ python -m fud.xclrun something.xclbin data.json .. _PYNQ: https://github.com/xilinx/pynq .. _pyxrt: https://github.com/Xilinx/XRT/blob/master/src/python/pybind11/src/pyxrt.cpp """ import argparse import pynq import numpy as np import simplejson as sjson import sys from typing import Mapping, Any, Dict from pathlib import Path from fud.stages.verilator.json_to_dat import parse_fp_widths, float_to_fixed from fud.errors import InvalidNumericType def mem_to_buf(mem): """Convert a fud-style JSON memory object to a PYNQ buffer.""" ndarray = np.array(mem["data"], dtype=_dtype(mem["format"])) buffer = pynq.allocate(ndarray.shape, dtype=ndarray.dtype) buffer[:] = ndarray[:] return buffer def buf_to_mem(fmt, buf): """Convert a PYNQ buffer to a fud-style JSON memory value.""" # converts int representation into fixed point if fmt["numeric_type"] == "fixed_point": width, int_width = parse_fp_widths(fmt) frac_width = width - int_width def convert_to_fp(value: float): float_to_fixed(float(value), frac_width) convert_to_fp(buf) return list(buf) elif fmt["numeric_type"] == "bitnum": return list([int(e) for e in buf]) else: raise InvalidNumericType('Fud only supports "fixed_point" and "bitnum".') def METHOD_NAME(xclbin: Path, data: Mapping[str, Any]) -> Dict[str, Any]: """Takes in a json data output and runs pynq using the data provided returns a dictionary that can be converted into json `xclbin` is path to relevant xclbin file. Assumes that data is a properly formatted calyx data file. Data file order must match the expected call signature in terms of order Also assume that the data Mapping values type are valid json-type equivalents """ # Load the PYNQ overlay from the .xclbin file, raising a FileNotFoundError # if the file does not exist. ol = pynq.Overlay(str(xclbin.resolve(strict=True))) # Send all the input data. buffers = [mem_to_buf(mem) for mem in data.values()] for buffer in buffers: buffer.sync_to_device() # Run the kernel. kernel = getattr(ol, list(ol.ip_dict)[0]) # Like ol.Toplevel_1 # XXX(nathanielnrn) 2022-07-19: timeout is not currently used anywhere in # generated verilog code, passed in because kernel.xml is generated to # expect it as an argument timeout = 1000 kernel.call(timeout, *buffers) # Collect the output data. for buf in buffers: buf.sync_from_device() mems = {name: buf_to_mem(data[name]["format"], buf) for name, buf in zip(data, buffers)} # PYNQ recommends explicitly freeing its resources. del buffers ol.free() return {"memories": mems} def _dtype(fmt) -> np.dtype: # See https://numpy.org/doc/stable/reference/arrays.dtypes.html for typing # details type_string = "i" if fmt["is_signed"] else "u" byte_size = int(fmt["width"] / 8) type_string = type_string + str(byte_size) return np.dtype(type_string) def xclrun(): # Parse command-line arguments. parser = argparse.ArgumentParser( description='run a compiled XRT program', ) parser.add_argument('bin', metavar='XCLBIN', help='the .xclbin binary file to run') parser.add_argument('data', metavar='DATA', help='the JSON input data file') parser.add_argument('--out', '-o', metavar='FILE', help='write JSON results to a file instead of stdout') args = parser.parse_args() # Load the input JSON data file. with open(args.data) as f: in_data = sjson.load(f, use_decimal=True) # Run the program. out_data = METHOD_NAME(Path(args.bin), in_data) # Dump the output JSON data. outfile = open(args.out, 'w') if args.out else sys.stdout sjson.dump(out_data, outfile, indent=2, use_decimal=True) if __name__ == "__main__": xclrun()
null
test output raw halfs
# SPDX-License-Identifier: BSD-3-Clause # Copyright Contributors to the OpenColorIO Project. import logging import unittest logger = logging.getLogger(__name__) try: import numpy as np except ImportError: logger.warning( "NumPy could not be imported. " "Test case will lack significant coverage!" ) np = None import PyOpenColorIO as OCIO from TransformsBaseTest import TransformsBaseTest class Lut1DTransformTest(unittest.TestCase, TransformsBaseTest): def setUp(self): self.tr = OCIO.Lut1DTransform() def test_default_constructor(self): """ Test the default constructor. """ self.assertEqual(self.tr.getLength(), 2) self.assertEqual(self.tr.getDirection(), OCIO.TRANSFORM_DIR_FORWARD) self.assertEqual(self.tr.getHueAdjust(), OCIO.HUE_NONE) self.assertFalse(self.tr.getInputHalfDomain()) self.assertFalse(self.tr.getOutputRawHalfs()) self.assertEqual(self.tr.getInterpolation(), OCIO.INTERP_DEFAULT) self.assertEqual(self.tr.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UNKNOWN) r, g, b = self.tr.getValue(0) self.assertEqual([r, g, b], [0, 0, 0]) r, g, b = self.tr.getValue(1) self.assertEqual([r, g, b], [1, 1, 1]) def test_format_metadata(self): """ Test the getFormatMetadata() method. """ format_metadata = self.tr.getFormatMetadata() self.assertIsInstance(format_metadata, OCIO.FormatMetadata) self.assertEqual(format_metadata.getElementName(), 'ROOT') self.assertEqual(format_metadata.getName(), '') self.assertEqual(format_metadata.getID(), '') format_metadata.setName('name') format_metadata.setID('id') self.assertEqual(format_metadata.getName(), 'name') self.assertEqual(format_metadata.getID(), 'id') def test_file_output_bit_depth(self): """ Test the setFileOutputBitDepth() and getFileOutputBitDepth() methods. """ self.assertEqual(self.tr.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UNKNOWN) self.tr.setFileOutputBitDepth(OCIO.BIT_DEPTH_UINT10) self.assertEqual(self.tr.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UINT10) def test_hue_adjust(self): """ Test the setHueAdjust() and getHueAdjust() methods. """ self.assertEqual(self.tr.getHueAdjust(), OCIO.HUE_NONE) self.tr.setHueAdjust(OCIO.HUE_DW3) self.assertEqual(self.tr.getHueAdjust(), OCIO.HUE_DW3) with self.assertRaises(OCIO.Exception): self.tr.setHueAdjust(OCIO.HUE_WYPN) def test_input_half_domain(self): """ Test the setInputHalfDomain() and getInputHalfDomain() methods. """ self.assertFalse(self.tr.getInputHalfDomain()) self.tr.setInputHalfDomain(True) self.assertTrue(self.tr.getInputHalfDomain()) def METHOD_NAME(self): """ Test the setOutputRawHalfs() and getOutputRawHalfs() methods. """ self.assertFalse(self.tr.getOutputRawHalfs()) self.tr.setOutputRawHalfs(True) self.assertTrue(self.tr.getOutputRawHalfs()) def test_length(self): """ Test the setLength() and getLength() methods. """ self.assertEqual(self.tr.getLength(), 2) self.tr.setValue(0, 0.1, 0.2, 0.3) self.tr.setLength(3) self.assertEqual(self.tr.getLength(), 3) # Changing the length reset LUT values to identity. r, g, b = self.tr.getValue(0) self.assertEqual([r, g, b], [0, 0, 0]) def test_constructor_with_keywords(self): """ Test Lut1DTransform constructor with keywords and validate its values. """ lut = OCIO.Lut1DTransform( length=65536, inputHalfDomain=True, outputRawHalfs=True, fileOutputBitDepth=OCIO.BIT_DEPTH_UINT10, hueAdjust=OCIO.HUE_DW3, interpolation=OCIO.INTERP_BEST, direction=OCIO.TRANSFORM_DIR_INVERSE) self.assertEqual(lut.getLength(), 65536) self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_INVERSE) self.assertEqual(lut.getHueAdjust(), OCIO.HUE_DW3) self.assertTrue(lut.getInputHalfDomain()) self.assertTrue(lut.getOutputRawHalfs()) self.assertEqual(lut.getInterpolation(), OCIO.INTERP_BEST) self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UINT10) lut = OCIO.Lut1DTransform( length=4, direction=OCIO.TRANSFORM_DIR_INVERSE) self.assertEqual(lut.getLength(), 4) self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_INVERSE) self.assertEqual(lut.getHueAdjust(), OCIO.HUE_NONE) self.assertFalse(lut.getInputHalfDomain()) self.assertFalse(lut.getOutputRawHalfs()) self.assertEqual(lut.getInterpolation(), OCIO.INTERP_DEFAULT) self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UNKNOWN) def test_constructor_with_positional(self): """ Test Lut1DTransform constructor without keywords and validate its values. """ lut = OCIO.Lut1DTransform(65536, True, True, OCIO.BIT_DEPTH_UINT10, OCIO.HUE_DW3, OCIO.INTERP_BEST, OCIO.TRANSFORM_DIR_INVERSE) self.assertEqual(lut.getLength(), 65536) self.assertEqual(lut.getDirection(), OCIO.TRANSFORM_DIR_INVERSE) self.assertEqual(lut.getHueAdjust(), OCIO.HUE_DW3) self.assertTrue(lut.getInputHalfDomain()) self.assertTrue(lut.getOutputRawHalfs()) self.assertEqual(lut.getInterpolation(), OCIO.INTERP_BEST) self.assertEqual(lut.getFileOutputBitDepth(), OCIO.BIT_DEPTH_UINT10) def test_array(self): """ Test the setValue() and getValue() methods. """ self.tr = OCIO.Lut1DTransform(length=3) r, g, b = self.tr.getValue(0) self.assertEqual([r, g, b], [0, 0, 0]) r, g, b = self.tr.getValue(1) self.assertEqual([r, g, b], [0.5, 0.5, 0.5]) r, g, b = self.tr.getValue(2) self.assertEqual([r, g, b], [1, 1, 1]) self.tr.setValue(0, 0.1, 0.2, 0.3) r, g, b = self.tr.getValue(0) # Values are stored as float. self.assertAlmostEqual(r, 0.1, delta=1e-6) self.assertAlmostEqual(g, 0.2, delta=1e-6) self.assertAlmostEqual(b, 0.3, delta=1e-6) if not np: logger.warning("NumPy not found. Skipping part of test!") return data = self.tr.getData() expected = np.array([0.1, 0.2, 0.3, 0.5, 0.5, 0.5, 1., 1., 1.]).astype(np.float32) self.assertEqual(data.all(), expected.all()) data[6] = 0.9 data[7] = 1.1 data[8] = 1.2 self.tr.setData(data) r, g, b = self.tr.getValue(2) self.assertAlmostEqual(r, 0.9, delta=1e-6) self.assertAlmostEqual(g, 1.1, delta=1e-6) self.assertAlmostEqual(b, 1.2, delta=1e-6) def test_equals(self): """ Test equals. """ lut = OCIO.Lut1DTransform() lut2 = OCIO.Lut1DTransform() self.assertTrue(lut.equals(lut2)) lut.setValue(0, 0.1, 0.2, 0.3) self.assertFalse(lut.equals(lut2))
null
test context job metadata with unicode
# -*- coding: utf-8 -*- from copy import deepcopy import datetime import pytest # from awx.main.models import NotificationTemplates, Notifications, JobNotificationMixin from awx.main.models import AdHocCommand, InventoryUpdate, Job, JobNotificationMixin, ProjectUpdate, Schedule, SystemJob, WorkflowJob from awx.api.serializers import UnifiedJobSerializer class TestJobNotificationMixin(object): CONTEXT_STRUCTURE = { 'job': { 'allow_simultaneous': bool, 'artifacts': {}, 'custom_virtualenv': str, 'controller_node': str, 'created': datetime.datetime, 'description': str, 'diff_mode': bool, 'elapsed': float, 'execution_node': str, 'failed': bool, 'finished': bool, 'force_handlers': bool, 'forks': int, 'host_status_counts': {'skipped': int, 'ok': int, 'changed': int, 'failures': int, 'dark': int, 'processed': int, 'rescued': int, 'failed': bool}, 'id': int, 'job_explanation': str, 'job_slice_count': int, 'job_slice_number': int, 'job_tags': str, 'job_type': str, 'launch_type': str, 'limit': str, 'modified': datetime.datetime, 'name': str, 'playbook': str, 'scm_branch': str, 'scm_revision': str, 'skip_tags': str, 'start_at_task': str, 'started': str, 'status': str, 'summary_fields': { 'created_by': {'first_name': str, 'id': int, 'last_name': str, 'username': str}, 'instance_group': {'id': int, 'name': str}, 'inventory': { 'description': str, 'has_active_failures': bool, 'has_inventory_sources': bool, 'hosts_with_active_failures': int, 'id': int, 'inventory_sources_with_failures': int, 'kind': str, 'name': str, 'organization_id': int, 'total_groups': int, 'total_hosts': int, 'total_inventory_sources': int, }, 'job_template': {'description': str, 'id': int, 'name': str}, 'labels': {'count': int, 'results': list}, 'project': {'description': str, 'id': int, 'name': str, 'scm_type': str, 'status': str}, 'schedule': {'description': str, 'id': int, 'name': str, 'next_run': datetime.datetime}, 'unified_job_template': {'description': str, 'id': int, 'name': str, 'unified_job_type': str}, }, 'timeout': int, 'type': str, 'url': str, 'use_fact_cache': bool, 'verbosity': int, }, 'job_friendly_name': str, 'job_metadata': str, 'approval_status': str, 'approval_node_name': str, 'workflow_url': str, 'url': str, } def check_structure(self, expected_structure, obj): if isinstance(expected_structure, dict): assert isinstance(obj, dict) for key in obj: assert key in expected_structure if obj[key] is None: continue if isinstance(expected_structure[key], dict): assert isinstance(obj[key], dict) self.check_structure(expected_structure[key], obj[key]) else: if key == 'job_explanation': assert isinstance(str(obj[key]), expected_structure[key]) else: assert isinstance(obj[key], expected_structure[key]) @pytest.mark.django_db @pytest.mark.parametrize('JobClass', [AdHocCommand, InventoryUpdate, Job, ProjectUpdate, SystemJob, WorkflowJob]) def test_context(self, JobClass, sqlite_copy, project, inventory_source): """The Jinja context defines all of the fields that can be used by a template. Ensure that the context generated for each job type has the expected structure.""" kwargs = {} if JobClass is InventoryUpdate: kwargs['inventory_source'] = inventory_source kwargs['source'] = inventory_source.source elif JobClass is ProjectUpdate: kwargs['project'] = project job = JobClass.objects.create(name='foo', **kwargs) job_serialization = UnifiedJobSerializer(job).to_representation(job) context = job.context(job_serialization) self.check_structure(TestJobNotificationMixin.CONTEXT_STRUCTURE, context) @pytest.mark.django_db def test_schedule_context(self, job_template, admin_user): schedule = Schedule.objects.create(name='job-schedule', rrule='DTSTART:20171129T155939z\nFREQ=MONTHLY', unified_job_template=job_template) job = Job.objects.create(name='fake-job', launch_type='workflow', schedule=schedule, job_template=job_template) job_serialization = UnifiedJobSerializer(job).to_representation(job) context = job.context(job_serialization) self.check_structure(TestJobNotificationMixin.CONTEXT_STRUCTURE, context) @pytest.mark.django_db def METHOD_NAME(self): job = Job.objects.create(name='批量安装项目') job_serialization = UnifiedJobSerializer(job).to_representation(job) context = job.context(job_serialization) assert '批量安装项目' in context['job_metadata'] def test_context_stub(self): """The context stub is a fake context used to validate custom notification messages. Ensure that this also has the expected structure. Furthermore, ensure that the stub context contains *all* fields that could possibly be included in a context.""" def check_structure_and_completeness(expected_structure, obj): expected_structure = deepcopy(expected_structure) if isinstance(expected_structure, dict): assert isinstance(obj, dict) for key in obj: assert key in expected_structure # Context stub should not have any undefined fields assert obj[key] is not None if isinstance(expected_structure[key], dict): assert isinstance(obj[key], dict) check_structure_and_completeness(expected_structure[key], obj[key]) expected_structure.pop(key) else: assert isinstance(obj[key], expected_structure[key]) expected_structure.pop(key) # Ensure all items in expected structure were present assert not len(expected_structure) context_stub = JobNotificationMixin.context_stub() check_structure_and_completeness(TestJobNotificationMixin.CONTEXT_STRUCTURE, context_stub)
null
get copied from assignment
import logging from typing import List, Optional from sqlalchemy.orm import Session from lms.models import ( Assignment, AssignmentGrouping, AssignmentMembership, Grouping, LTIRole, User, ) from lms.services.upsert import bulk_upsert LOG = logging.getLogger(__name__) class AssignmentService: """A service for getting and setting assignments.""" def __init__(self, db: Session, misc_plugin, grouping_plugin): self._db = db self._misc_plugin = misc_plugin self._grouping_plugin = grouping_plugin def get_assignment(self, tool_consumer_instance_guid, resource_link_id): """Get an assignment by resource_link_id.""" return ( self._db.query(Assignment) .filter_by( tool_consumer_instance_guid=tool_consumer_instance_guid, resource_link_id=resource_link_id, ) .one_or_none() ) def create_assignment(self, tool_consumer_instance_guid, resource_link_id): """Create a new assignment.""" assignment = Assignment( tool_consumer_instance_guid=tool_consumer_instance_guid, resource_link_id=resource_link_id, extra={}, ) self._db.add(assignment) return assignment def update_assignment(self, request, assignment, document_url, group_set_id): """Update an existing assignment.""" assignment.document_url = document_url assignment.extra["group_set_id"] = group_set_id # Metadata based on the launch assignment.title = request.lti_params.get("resource_link_title") assignment.description = request.lti_params.get("resource_link_description") assignment.is_gradable = self._misc_plugin.is_assignment_gradable( request.lti_params ) return assignment def METHOD_NAME(self, lti_params) -> Optional[Assignment]: """Return the assignment that the current assignment was copied from.""" resource_link_history_params = [ "resource_link_id_history", # Blackboard, LTI 1.1 "ext_d2l_resource_link_id_history", # D2L, LTI 1.1 "custom_ResourceLink.id.history", # Blackboard and D2L, LTI 1.3 ] for param in resource_link_history_params: if historical_resource_link_id := lti_params.get(param): # History might have a long chain of comma separated # copies of copies, take the most recent one. historical_resource_link_id = historical_resource_link_id.split(",")[0] if historical_assignment := self.get_assignment( tool_consumer_instance_guid=lti_params.get( "tool_consumer_instance_guid" ), resource_link_id=historical_resource_link_id, ): return historical_assignment return None def get_assignment_for_launch(self, request) -> Optional[Assignment]: """ Get or create an assignment for the current launch. The returned assignment will have the relevant configuration for this launch. :returns: An assignment or None if one cannot be found or created. """ lti_params = request.lti_params tool_consumer_instance_guid = lti_params["tool_consumer_instance_guid"] resource_link_id = lti_params.get("resource_link_id") # Get the potentially relevant assignments from the DB assignment = self.get_assignment(tool_consumer_instance_guid, resource_link_id) historical_assignment = None if not assignment: historical_assignment = self.METHOD_NAME(lti_params) # Get the configuration for the assignment # it might be based on the assignments we just queried or the request document_url = self._misc_plugin.get_document_url( request, assignment, historical_assignment ) group_set_id = self._grouping_plugin.get_group_set_id( request, assignment, historical_assignment ) if not document_url: # We can't find a document_url, we shouldn't try to create an # assignment yet. return None if not assignment: # We don't have an assignment in the DB, but we know which document # url it should point to. This might happen for example on: # # * The first launch of a deep linked assignment # * The first launch copied assignment assignment = self.create_assignment( tool_consumer_instance_guid, resource_link_id ) # While creating a new assignment we found the assignment we # copied this one from. Reference this in the DB. assignment.copied_from = historical_assignment # Always update the assignment configuration # It often will be the same one while launching the assignment again but # it might for example be an updated deep linked URL or similar. return self.update_assignment(request, assignment, document_url, group_set_id) def upsert_assignment_membership( self, assignment: Assignment, user: User, lti_roles: List[LTIRole] ) -> List[AssignmentMembership]: """Store details of the roles a user plays in an assignment.""" # Commit any changes to ensure that our user and role objects have ids self._db.flush() values = [ { "user_id": user.id, "assignment_id": assignment.id, "lti_role_id": lti_role.id, } for lti_role in lti_roles ] return list( bulk_upsert( self._db, model_class=AssignmentMembership, values=values, index_elements=["user_id", "assignment_id", "lti_role_id"], update_columns=["updated"], ) ) def upsert_assignment_groupings( self, assignment: Assignment, groupings: List[Grouping] ) -> List[AssignmentGrouping]: """Store details of any groups and courses an assignment is in.""" # Commit any changes to ensure that our user and role objects have ids self._db.flush() values = [ {"assignment_id": assignment.id, "grouping_id": grouping.id} for grouping in groupings ] return list( bulk_upsert( self._db, model_class=AssignmentGrouping, values=values, index_elements=["assignment_id", "grouping_id"], update_columns=["updated"], ) ) def factory(_context, request): return AssignmentService( db=request.db, misc_plugin=request.product.plugin.misc, grouping_plugin=request.product.plugin.grouping, )
null
coco keypoints
# Copyright The PyTorch Lightning team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import json import os from typing import Any import numpy as np import pytest import torch from flash import Trainer from flash.core.data.io.input import DataKeys from flash.core.utilities.imports import _ICEDATA_AVAILABLE, _ICEVISION_AVAILABLE, _TOPIC_IMAGE_AVAILABLE from flash.image import KeypointDetectionData, KeypointDetector from tests.helpers.task_tester import TaskTester if _TOPIC_IMAGE_AVAILABLE: from PIL import Image COCODataConfig = collections.namedtuple("COCODataConfig", "train_folder train_ann_file predict_folder") @pytest.fixture() def METHOD_NAME(tmpdir): rand_image = Image.fromarray(np.random.randint(0, 255, (64, 64, 3), dtype="uint8")) os.makedirs(tmpdir / "train_folder", exist_ok=True) os.makedirs(tmpdir / "predict_folder", exist_ok=True) train_folder = tmpdir / "train_folder" train_ann_file = tmpdir / "train_annotations.json" predict_folder = tmpdir / "predict_folder" _ = [rand_image.save(str(train_folder / f"image_{i}.png")) for i in range(1, 4)] _ = [rand_image.save(str(predict_folder / f"predict_image_{i}.png")) for i in range(1, 4)] annotations = { "annotations": [ { "area": 50, "bbox": [10, 20, 5, 10], "num_keypoints": 2, "keypoints": [10, 15, 2, 20, 30, 2], "category_id": 1, "id": 1, "image_id": 1, "iscrowd": 0, }, { "area": 100, "bbox": [20, 30, 10, 10], "num_keypoints": 2, "keypoints": [20, 30, 2, 30, 40, 2], "category_id": 2, "id": 2, "image_id": 2, "iscrowd": 0, }, { "area": 125, "bbox": [10, 20, 5, 25], "num_keypoints": 2, "keypoints": [10, 15, 2, 20, 45, 2], "category_id": 1, "id": 3, "image_id": 3, "iscrowd": 0, }, ], "categories": [ {"id": 1, "name": "cat", "supercategory": "cat", "keypoints": ["left ear", "right ear"]}, {"id": 2, "name": "dog", "supercategory": "dog", "keypoints": ["left ear", "right ear"]}, ], "images": [ {"file_name": "image_1.png", "height": 64, "width": 64, "id": 1}, {"file_name": "image_2.png", "height": 64, "width": 64, "id": 2}, {"file_name": "image_3.png", "height": 64, "width": 64, "id": 3}, ], } with open(train_ann_file, "w") as annotation_file: json.dump(annotations, annotation_file) return COCODataConfig(train_folder, train_ann_file, predict_folder) @pytest.mark.skipif(not _ICEDATA_AVAILABLE, reason="icedata is not installed for testing") @pytest.mark.skipif(not _ICEVISION_AVAILABLE, reason="icevision is not installed for testing") class TestKeypointDetector(TaskTester): task = KeypointDetector task_args = (2,) task_kwargs = {"num_classes": 2} cli_command = "keypoint_detection" is_testing = _TOPIC_IMAGE_AVAILABLE is_available = _TOPIC_IMAGE_AVAILABLE and _ICEVISION_AVAILABLE # TODO: Resolve JIT support traceable = False scriptable = False @property def example_forward_input(self): return torch.rand(1, 3, 32, 32) def check_forward_output(self, output: Any): assert {"keypoints", "labels", "scores"} <= output[0].keys() @property def example_train_sample(self): return { DataKeys.INPUT: torch.rand(3, 224, 224), DataKeys.TARGET: { "bboxes": [ {"xmin": 10, "ymin": 10, "width": 20, "height": 20}, {"xmin": 30, "ymin": 30, "width": 40, "height": 40}, ], "labels": [0, 1], "keypoints": [ [{"x": 10, "y": 10, "visible": 1}], [{"x": 10, "y": 10, "visible": 1}], ], }, } @property def example_val_sample(self): return self.example_train_sample @property def example_test_sample(self): return self.example_train_sample @pytest.mark.skipif(not _TOPIC_IMAGE_AVAILABLE, reason="image libraries aren't installed.") @pytest.mark.parametrize(("backbone", "head"), [("resnet18_fpn", "keypoint_rcnn")]) def test_model(METHOD_NAME, backbone, head): datamodule = KeypointDetectionData.from_coco( train_folder=METHOD_NAME.train_folder, train_ann_file=METHOD_NAME.train_ann_file, predict_folder=METHOD_NAME.predict_folder, transform_kwargs={"image_size": (128, 128)}, batch_size=2, ) assert datamodule.num_classes == 3 assert datamodule.labels == ["background", "cat", "dog"] model = KeypointDetector(2, num_classes=datamodule.num_classes, backbone=backbone, head=head) trainer = Trainer(fast_dev_run=True) trainer.fit(model, datamodule=datamodule) trainer.predict(model, datamodule=datamodule)
null
locale
from .base import ObjectBase class Order(ObjectBase): def __init__(self, data, client): super().__init__(data, client) @classmethod def get_resource_class(cls, client): from ..resources import Orders return Orders(client) STATUS_CREATED = "created" STATUS_PAID = "paid" STATUS_AUTHORIZED = "authorized" STATUS_CANCELED = "canceled" STATUS_SHIPPING = "shipping" STATUS_COMPLETED = "completed" STATUS_EXPIRED = "expired" @property def id(self): return self._get_property("id") @property def resource(self): return self._get_property("resource") @property def profile_id(self): return self._get_property("profileId") @property def method(self): return self._get_property("method") @property def mode(self): return self._get_property("mode") @property def amount(self): return self._get_property("amount") @property def amount_captured(self): return self._get_property("amountCaptured") @property def amount_refunded(self): return self._get_property("amountRefunded") @property def status(self): return self._get_property("status") @property def is_cancelable(self): return self._get_property("isCancelable") @property def billing_address(self): return self._get_property("billingAddress") @property def consumer_date_of_birth(self): return self._get_property("consumerDateOfBirth") @property def order_number(self): return self._get_property("orderNumber") @property def shipping_address(self): return self._get_property("shippingAddress") @property def METHOD_NAME(self): return self._get_property("locale") @property def metadata(self): return self._get_property("metadata") @property def redirect_url(self): return self._get_property("redirectUrl") @property def webhook_url(self): return self._get_property("webhookUrl") @property def created_at(self): return self._get_property("createdAt") @property def expires_at(self): return self._get_property("expiresAt") @property def expired_at(self): return self._get_property("expiredAt") @property def paid_at(self): return self._get_property("paidAt") @property def authorized_at(self): return self._get_property("authorizedAt") @property def canceled_at(self): return self._get_property("canceledAt") @property def completed_at(self): return self._get_property("completedAt") @property def cancel_url(self): return self._get_property("cancelUrl") # documented _links @property def checkout_url(self): return self._get_link("checkout") # additional methods def is_created(self): return self.status == self.STATUS_CREATED def is_paid(self): return self.status == self.STATUS_PAID def is_authorized(self): return self.status == self.STATUS_AUTHORIZED def is_canceled(self): return self.status == self.STATUS_CANCELED def is_shipping(self): return self.status == self.STATUS_SHIPPING def is_completed(self): return self.status == self.STATUS_COMPLETED def is_expired(self): return self.status == self.STATUS_EXPIRED def has_refunds(self): return self.amount_refunded is not None @property def refunds(self): from ..resources import OrderRefunds return OrderRefunds(self.client, self) @property def lines(self): from ..resources import OrderLines return OrderLines(self.client, self) @property def shipments(self): from ..resources import OrderShipments return OrderShipments(self.client, self) @property def payments(self): from ..resources import OrderPayments return OrderPayments(self.client, self)
null
bind global
import io import os import sys import copy import json import time import uuid import socket import logging import traceback from threading import local from collections import OrderedDict from contextlib import contextmanager LOG_TIMESTAMPS = "LOG_TIMESTAMPS" in os.environ def json_handler(obj): # if isinstance(obj, (datetime.date, datetime.time)): # return obj.isoformat() return repr(obj) def json_robust_dumps(obj): return json.dumps(obj, default=json_handler) class NiceOrderedDict(OrderedDict): def __str__(self): return json_robust_dumps(self) class SwagFormatter(logging.Formatter): def __init__(self, swaglogger): logging.Formatter.__init__(self, None, '%a %b %d %H:%M:%S %Z %Y') self.swaglogger = swaglogger self.host = socket.gethostname() def format_dict(self, record): record_dict = NiceOrderedDict() if isinstance(record.msg, dict): record_dict['msg'] = record.msg else: try: record_dict['msg'] = record.getMessage() except (ValueError, TypeError): record_dict['msg'] = [record.msg]+record.args record_dict['ctx'] = self.swaglogger.get_ctx() if record.exc_info: record_dict['exc_info'] = self.formatException(record.exc_info) record_dict['level'] = record.levelname record_dict['levelnum'] = record.levelno record_dict['name'] = record.name record_dict['filename'] = record.filename record_dict['lineno'] = record.lineno record_dict['pathname'] = record.pathname record_dict['module'] = record.module record_dict['funcName'] = record.funcName record_dict['host'] = self.host record_dict['process'] = record.process record_dict['thread'] = record.thread record_dict['threadName'] = record.threadName record_dict['created'] = record.created return record_dict def format(self, record): if self.swaglogger is None: raise Exception("must set swaglogger before calling format()") return json_robust_dumps(self.format_dict(record)) class SwagLogFileFormatter(SwagFormatter): def fix_kv(self, k, v): # append type to names to preserve legacy naming in logs # avoids overlapping key namespaces with different types # e.g. log.info() creates 'msg' -> 'msg$s' # log.event() creates 'msg.health.logMonoTime' -> 'msg.health.logMonoTime$i' # because overlapping namespace 'msg' caused problems if isinstance(v, (str, bytes)): k += "$s" elif isinstance(v, float): k += "$f" elif isinstance(v, bool): k += "$b" elif isinstance(v, int): k += "$i" elif isinstance(v, dict): nv = {} for ik, iv in v.items(): ik, iv = self.fix_kv(ik, iv) nv[ik] = iv v = nv elif isinstance(v, list): k += "$a" return k, v def format(self, record): if isinstance(record, str): v = json.loads(record) else: v = self.format_dict(record) mk, mv = self.fix_kv('msg', v['msg']) del v['msg'] v[mk] = mv v['id'] = uuid.uuid4().hex return json_robust_dumps(v) class SwagErrorFilter(logging.Filter): def filter(self, record): return record.levelno < logging.ERROR def _tmpfunc(): return 0 def _srcfile(): return os.path.normcase(_tmpfunc.__code__.co_filename) class SwagLogger(logging.Logger): def __init__(self): logging.Logger.__init__(self, "swaglog") self.global_ctx = {} self.log_local = local() self.log_local.ctx = {} def local_ctx(self): try: return self.log_local.ctx except AttributeError: self.log_local.ctx = {} return self.log_local.ctx def get_ctx(self): return dict(self.local_ctx(), **self.global_ctx) @contextmanager def ctx(self, **kwargs): old_ctx = self.local_ctx() self.log_local.ctx = copy.copy(old_ctx) or {} self.log_local.ctx.update(kwargs) try: yield finally: self.log_local.ctx = old_ctx def bind(self, **kwargs): self.local_ctx().update(kwargs) def METHOD_NAME(self, **kwargs): self.global_ctx.update(kwargs) def event(self, event, *args, **kwargs): evt = NiceOrderedDict() evt['event'] = event if args: evt['args'] = args evt.update(kwargs) if 'error' in kwargs: self.error(evt) elif 'debug' in kwargs: self.debug(evt) else: self.info(evt) def timestamp(self, event_name): if LOG_TIMESTAMPS: t = time.monotonic() tstp = NiceOrderedDict() tstp['timestamp'] = NiceOrderedDict() tstp['timestamp']["event"] = event_name tstp['timestamp']["time"] = t*1e9 self.debug(tstp) def findCaller(self, stack_info=False, stacklevel=1): """ Find the stack frame of the caller so that we can note the source file name, line number and function name. """ f = sys._getframe(3) #On some versions of IronPython, currentframe() returns None if #IronPython isn't run with -X:Frames. if f is not None: f = f.f_back orig_f = f while f and stacklevel > 1: f = f.f_back stacklevel -= 1 if not f: f = orig_f rv = "(unknown file)", 0, "(unknown function)", None while hasattr(f, "f_code"): co = f.f_code filename = os.path.normcase(co.co_filename) # TODO: is this pylint exception correct? if filename == _srcfile: # pylint: disable=comparison-with-callable f = f.f_back continue sinfo = None if stack_info: sio = io.StringIO() sio.write('Stack (most recent call last):\n') traceback.print_stack(f, file=sio) sinfo = sio.getvalue() if sinfo[-1] == '\n': sinfo = sinfo[:-1] sio.close() rv = (co.co_filename, f.f_lineno, co.co_name, sinfo) break return rv if __name__ == "__main__": log = SwagLogger() stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setLevel(logging.INFO) stdout_handler.addFilter(SwagErrorFilter()) log.addHandler(stdout_handler) stderr_handler = logging.StreamHandler(sys.stderr) stderr_handler.setLevel(logging.ERROR) log.addHandler(stderr_handler) log.info("asdasd %s", "a") log.info({'wut': 1}) log.warning("warning") log.error("error") log.critical("critical") log.event("test", x="y") with log.ctx(): stdout_handler.setFormatter(SwagFormatter(log)) stderr_handler.setFormatter(SwagFormatter(log)) log.bind(user="some user") log.info("in req") print("") log.warning("warning") print("") log.error("error") print("") log.critical("critical") print("") log.event("do_req", a=1, b="c")
null
build actuator
# Module 'panel' # # Support for the Panel library. # Uses built-in module 'pnl'. # Applications should use 'panel.function' instead of 'pnl.function'; # most 'pnl' functions are transparently exported by 'panel', # but dopanel() is overridden and you have to use this version # if you want to use callbacks. from warnings import warnpy3k warnpy3k("the panel module has been removed in Python 3.0", stacklevel=2) del warnpy3k import pnl debug = 0 # Test if an object is a list. # def is_list(x): return type(x) == type([]) # Reverse a list. # def reverse(list): res = [] for item in list: res.insert(0, item) return res # Get an attribute of a list, which may itself be another list. # Don't use 'prop' for name. # def getattrlist(list, name): for item in list: if item and is_list(item) and item[0] == name: return item[1:] return [] # Get a property of a list, which may itself be another list. # def getproplist(list, name): for item in list: if item and is_list(item) and item[0] == 'prop': if len(item) > 1 and item[1] == name: return item[2:] return [] # Test if an actuator description contains the property 'end-of-group' # def is_endgroup(list): x = getproplist(list, 'end-of-group') return (x and x[0] == '#t') # Neatly display an actuator definition given as S-expression # the prefix string is printed before each line. # def show_actuator(prefix, a): for item in a: if not is_list(item): print prefix, item elif item and item[0] == 'al': print prefix, 'Subactuator list:' for a in item[1:]: show_actuator(prefix + ' ', a) elif len(item) == 2: print prefix, item[0], '=>', item[1] elif len(item) == 3 and item[0] == 'prop': print prefix, 'Prop', item[1], '=>', print item[2] else: print prefix, '?', item # Neatly display a panel. # def show_panel(prefix, p): for item in p: if not is_list(item): print prefix, item elif item and item[0] == 'al': print prefix, 'Actuator list:' for a in item[1:]: show_actuator(prefix + ' ', a) elif len(item) == 2: print prefix, item[0], '=>', item[1] elif len(item) == 3 and item[0] == 'prop': print prefix, 'Prop', item[1], '=>', print item[2] else: print prefix, '?', item # Exception raised by build_actuator or build_panel. # panel_error = 'panel error' # Dummy callback used to initialize the callbacks. # def dummy_callback(arg): pass # Assign attributes to members of the target. # Attribute names in exclist are ignored. # The member name is the attribute name prefixed with the prefix. # def assign_members(target, attrlist, exclist, prefix): for item in attrlist: if is_list(item) and len(item) == 2 and item[0] not in exclist: name, value = item[0], item[1] ok = 1 if value[0] in '-0123456789': value = eval(value) elif value[0] == '"': value = value[1:-1] elif value == 'move-then-resize': # Strange default set by Panel Editor... ok = 0 else: print 'unknown value', value, 'for', name ok = 0 if ok: lhs = 'target.' + prefix + name stmt = lhs + '=' + repr(value) if debug: print 'exec', stmt try: exec stmt + '\n' except KeyboardInterrupt: # Don't catch this! raise KeyboardInterrupt except: print 'assign failed:', stmt # Build a real actuator from an actuator description. # Return a pair (actuator, name). # def METHOD_NAME(descr): namelist = getattrlist(descr, 'name') if namelist: # Assume it is a string actuatorname = namelist[0][1:-1] else: actuatorname = '' type = descr[0] if type[:4] == 'pnl_': type = type[4:] act = pnl.mkact(type) act.downfunc = act.activefunc = act.upfunc = dummy_callback # assign_members(act, descr[1:], ['al', 'data', 'name'], '') # # Treat actuator-specific data # datalist = getattrlist(descr, 'data') prefix = '' if type[-4:] == 'puck': prefix = 'puck_' elif type == 'mouse': prefix = 'mouse_' assign_members(act, datalist, [], prefix) # return act, actuatorname # Build all sub-actuators and add them to the super-actuator. # The super-actuator must already have been added to the panel. # Sub-actuators with defined names are added as members to the panel # so they can be referenced as p.name. # # Note: I have no idea how panel.endgroup() works when applied # to a sub-actuator. # def build_subactuators(panel, super_act, al): # # This is nearly the same loop as below in build_panel(), # except a call is made to addsubact() instead of addact(). # for a in al: act, name = METHOD_NAME(a) act.addsubact(super_act) if name: stmt = 'panel.' + name + ' = act' if debug: print 'exec', stmt exec stmt + '\n' if is_endgroup(a): panel.endgroup() sub_al = getattrlist(a, 'al') if sub_al: build_subactuators(panel, act, sub_al) # # Fix the actuator to which whe just added subactuators. # This can't hurt (I hope) and is needed for the scroll actuator. # super_act.fixact() # Build a real panel from a panel definition. # Return a panel object p, where for each named actuator a, p.name is a # reference to a. # def build_panel(descr): # # Sanity check # if (not descr) or descr[0] != 'panel': raise panel_error, 'panel description must start with "panel"' # if debug: show_panel('', descr) # # Create an empty panel # panel = pnl.mkpanel() # # Assign panel attributes # assign_members(panel, descr[1:], ['al'], '') # # Look for actuator list # al = getattrlist(descr, 'al') # # The order in which actuators are created is important # because of the endgroup() operator. # Unfortunately the Panel Editor outputs the actuator list # in reverse order, so we reverse it here. # al = reverse(al) # for a in al: act, name = METHOD_NAME(a) act.addact(panel) if name: stmt = 'panel.' + name + ' = act' exec stmt + '\n' if is_endgroup(a): panel.endgroup() sub_al = getattrlist(a, 'al') if sub_al: build_subactuators(panel, act, sub_al) # return panel # Wrapper around pnl.dopanel() which calls call-back functions. # def my_dopanel(): # Extract only the first 4 elements to allow for future expansion a, down, active, up = pnl.dopanel()[:4] if down: down.downfunc(down) if active: active.activefunc(active) if up: up.upfunc(up) return a # Create one or more panels from a description file (S-expressions) # generated by the Panel Editor. # def defpanellist(file): import panelparser descrlist = panelparser.parse_file(open(file, 'r')) panellist = [] for descr in descrlist: panellist.append(build_panel(descr)) return panellist # Import everything from built-in method pnl, so the user can always # use panel.foo() instead of pnl.foo(). # This gives *no* performance penalty once this module is imported. # from pnl import * # for export dopanel = my_dopanel # override pnl.dopanel
null
init reader
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function from utils.static_ps.reader_helper import get_reader, get_infer_reader, get_example_num, get_file_list, get_word_num from utils.static_ps.program_helper import get_model, get_strategy, set_dump_config from utils.static_ps.common_ps import YamlHelper, is_distributed_env import argparse import time import sys import paddle.distributed.fleet as fleet import paddle.distributed.fleet.base.role_maker as role_maker from paddle.distributed.ps.coordinator import FLClient import paddle import os import warnings import logging import ast import numpy as np import struct __dir__ = os.path.dirname(os.path.abspath(__file__)) sys.path.append(os.path.abspath(os.path.join(__dir__, '..'))) logging.basicConfig( format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) def parse_args(): parser = argparse.ArgumentParser("PaddleRec train script") parser.add_argument( '-m', '--config_yaml', type=str, required=True, help='config file path') parser.add_argument( '-bf16', '--pure_bf16', type=ast.literal_eval, default=False, help="whether use bf16") args = parser.parse_args() args.abs_dir = os.path.dirname(os.path.abspath(args.config_yaml)) yaml_helper = YamlHelper() config = yaml_helper.load_yaml(args.config_yaml) config["yaml_path"] = args.config_yaml config["config_abs_dir"] = args.abs_dir config["pure_bf16"] = args.pure_bf16 yaml_helper.print_yaml(config) return config def bf16_to_fp32(val): return np.float32(struct.unpack('<f', struct.pack('<I', val << 16))[0]) class MyFLClient(FLClient): def __init__(self): pass class Trainer(object): def __init__(self, config): self.metrics = {} self.config = config self.input_data = None self.train_dataset = None self.test_dataset = None self.model = None self.pure_bf16 = self.config['pure_bf16'] self.use_cuda = int(self.config.get("runner.use_gpu")) self.place = paddle.CUDAPlace(0) if self.use_cuda else paddle.CPUPlace( ) self.role = None def run(self): self.init_fleet() self.init_network() if fleet.is_server(): self.run_server() elif fleet.is_worker(): self.METHOD_NAME() self.run_worker() elif fleet.is_coordinator(): self.run_coordinator() logger.info("Run Success, Exit.") def init_fleet(self, use_gloo=True): if use_gloo: os.environ["PADDLE_WITH_GLOO"] = "1" self.role = role_maker.PaddleCloudRoleMaker() fleet.init(self.role) else: fleet.init() def init_network(self): self.model = get_model(self.config) self.input_data = self.model.create_feeds() self.metrics = self.model.net(self.input_data) self.model.create_optimizer(get_strategy(self.config)) ## get_strategy if self.pure_bf16: self.model.optimizer.amp_init(self.place) def METHOD_NAME(self): self.train_dataset, self.train_file_list = get_reader(self.input_data, config) self.test_dataset, self.test_file_list = get_infer_reader( self.input_data, config) if self.role is not None: self.fl_client = MyFLClient() self.fl_client.set_basic_config(self.role, self.config, self.metrics) else: raise ValueError("self.role is none") self.fl_client.set_train_dataset_info(self.train_dataset, self.train_file_list) self.fl_client.set_test_dataset_info(self.test_dataset, self.test_file_list) example_nums = 0 self.count_method = self.config.get("runner.example_count_method", "example") if self.count_method == "example": example_nums = get_example_num(self.train_file_list) elif self.count_method == "word": example_nums = get_word_num(self.train_file_list) else: raise ValueError( "Set static_benchmark.example_count_method for example / word for example count." ) self.fl_client.set_train_example_num(example_nums) def run_coordinator(self): logger.info("Run Coordinator Begin") fleet.init_coordinator() fleet.make_fl_strategy() def run_server(self): logger.info("Run Server Begin") fleet.init_server(config.get("runner.warmup_model_path")) fleet.run_server() def run_worker(self): logger.info("Run Worker Begin") self.fl_client.run() if __name__ == "__main__": paddle.enable_static() config = parse_args() os.environ["CPU_NUM"] = str(config.get("runner.thread_num")) trainer = Trainer(config) trainer.run()
null
run script
import json import logging import sys import traceback import uuid from django.contrib.auth import get_user_model from django.core.management.base import BaseCommand, CommandError from django.db import transaction from core.choices import JobStatusChoices from core.models import Job from extras.api.serializers import ScriptOutputSerializer from extras.context_managers import change_logging from extras.scripts import get_module_and_script from extras.signals import clear_webhooks from utilities.exceptions import AbortTransaction from utilities.utils import NetBoxFakeRequest class Command(BaseCommand): help = "Run a script in NetBox" def add_arguments(self, parser): parser.add_argument( '--loglevel', help="Logging Level (default: info)", dest='loglevel', default='info', choices=['debug', 'info', 'warning', 'error', 'critical']) parser.add_argument('--commit', help="Commit this script to database", action='store_true') parser.add_argument('--user', help="User script is running as") parser.add_argument('--data', help="Data as a string encapsulated JSON blob") parser.add_argument('script', help="Script to run") def handle(self, *args, **options): def METHOD_NAME(): """ Core script execution task. We capture this within a subfunction to allow for conditionally wrapping it with the change_logging context manager (which is bypassed if commit == False). """ try: try: with transaction.atomic(): script.output = script.run(data=data, commit=commit) if not commit: raise AbortTransaction() except AbortTransaction: script.log_info("Database changes have been reverted automatically.") clear_webhooks.send(request) job.data = ScriptOutputSerializer(script).data job.terminate() except Exception as e: stacktrace = traceback.format_exc() script.log_failure( f"An exception occurred: `{type(e).__name__}: {e}`\n```\n{stacktrace}\n```" ) script.log_info("Database changes have been reverted due to error.") logger.error(f"Exception raised during script execution: {e}") clear_webhooks.send(request) job.data = ScriptOutputSerializer(script).data job.terminate(status=JobStatusChoices.STATUS_ERRORED) logger.info(f"Script completed in {job.duration}") User = get_user_model() # Params script = options['script'] loglevel = options['loglevel'] commit = options['commit'] try: data = json.loads(options['data']) except TypeError: data = {} module_name, script_name = script.split('.', 1) module, script = get_module_and_script(module_name, script_name) # Take user from command line if provided and exists, other if options['user']: try: user = User.objects.get(username=options['user']) except User.DoesNotExist: user = User.objects.filter(is_superuser=True).order_by('pk')[0] else: user = User.objects.filter(is_superuser=True).order_by('pk')[0] # Setup logging to Stdout formatter = logging.Formatter(f'[%(asctime)s][%(levelname)s] - %(message)s') stdouthandler = logging.StreamHandler(sys.stdout) stdouthandler.setLevel(logging.DEBUG) stdouthandler.setFormatter(formatter) logger = logging.getLogger(f"netbox.scripts.{script.full_name}") logger.addHandler(stdouthandler) try: logger.setLevel({ 'critical': logging.CRITICAL, 'debug': logging.DEBUG, 'error': logging.ERROR, 'fatal': logging.FATAL, 'info': logging.INFO, 'warning': logging.WARNING, }[loglevel]) except KeyError: raise CommandError(f"Invalid log level: {loglevel}") # Initialize the script form script = script() form = script.as_form(data, None) # Create the job job = Job.objects.create( object=module, name=script.name, user=User.objects.filter(is_superuser=True).order_by('pk')[0], job_id=uuid.uuid4() ) request = NetBoxFakeRequest({ 'META': {}, 'POST': data, 'GET': {}, 'FILES': {}, 'user': user, 'path': '', 'id': job.job_id }) if form.is_valid(): job.status = JobStatusChoices.STATUS_RUNNING job.save() logger.info(f"Running script (commit={commit})") script.request = request # Execute the script. If commit is True, wrap it with the change_logging context manager to ensure we process # change logging, webhooks, etc. with change_logging(request): METHOD_NAME() else: logger.error('Data is not valid:') for field, errors in form.errors.get_json_data().items(): for error in errors: logger.error(f'\t{field}: {error.get("message")}') job.status = JobStatusChoices.STATUS_ERRORED job.save()
null
private link service connection state
# coding=utf-8 # *** WARNING: this file was generated by pulumi. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs __all__ = [ 'GetPrivateEndpointConnectionResult', 'AwaitableGetPrivateEndpointConnectionResult', 'get_private_endpoint_connection', 'get_private_endpoint_connection_output', ] @pulumi.output_type class GetPrivateEndpointConnectionResult: """ A private endpoint connection """ def __init__(__self__, id=None, name=None, private_endpoint=None, METHOD_NAME=None, provisioning_state=None, type=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if private_endpoint and not isinstance(private_endpoint, dict): raise TypeError("Expected argument 'private_endpoint' to be a dict") pulumi.set(__self__, "private_endpoint", private_endpoint) if METHOD_NAME and not isinstance(METHOD_NAME, dict): raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict") pulumi.set(__self__, "private_link_service_connection_state", METHOD_NAME) if provisioning_state and not isinstance(provisioning_state, str): raise TypeError("Expected argument 'provisioning_state' to be a str") pulumi.set(__self__, "provisioning_state", provisioning_state) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def id(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ The name of the resource """ return pulumi.get(self, "name") @property @pulumi.getter(name="privateEndpoint") def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']: """ The private endpoint which the connection belongs to. """ return pulumi.get(self, "private_endpoint") @property @pulumi.getter(name="privateLinkServiceConnectionState") def METHOD_NAME(self) -> Optional['outputs.PrivateLinkServiceConnectionStateResponse']: """ Connection state of the private endpoint connection. """ return pulumi.get(self, "private_link_service_connection_state") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> str: """ Provisioning state of the private endpoint connection. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter def type(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type") class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetPrivateEndpointConnectionResult( id=self.id, name=self.name, private_endpoint=self.private_endpoint, METHOD_NAME=self.METHOD_NAME, provisioning_state=self.provisioning_state, type=self.type) def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str] = None, resource_group_name: Optional[str] = None, workspace_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult: """ Gets a private endpoint connection. :param str private_endpoint_connection_name: The name of the private endpoint connection. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str workspace_name: The name of the workspace. """ __args__ = dict() __args__['privateEndpointConnectionName'] = private_endpoint_connection_name __args__['resourceGroupName'] = resource_group_name __args__['workspaceName'] = workspace_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:synapse/v20210601preview:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value return AwaitableGetPrivateEndpointConnectionResult( id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), private_endpoint=pulumi.get(__ret__, 'private_endpoint'), METHOD_NAME=pulumi.get(__ret__, 'private_link_service_connection_state'), provisioning_state=pulumi.get(__ret__, 'provisioning_state'), type=pulumi.get(__ret__, 'type')) @_utilities.lift_output_func(get_private_endpoint_connection) def get_private_endpoint_connection_output(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, workspace_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]: """ Gets a private endpoint connection. :param str private_endpoint_connection_name: The name of the private endpoint connection. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str workspace_name: The name of the workspace. """ ...
null
allocate lock
"""Drop-in replacement for the thread module. Meant to be used as a brain-dead substitute so that threaded code does not need to be rewritten for when the thread module is not present. Suggested usage is:: try: import thread except ImportError: import dummy_thread as thread """ # Exports only things specified by thread documentation; # skipping obsolete synonyms allocate(), start_new(), exit_thread(). __all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock', 'interrupt_main', 'LockType'] import traceback as _traceback class error(Exception): """Dummy implementation of thread.error.""" def __init__(self, *args): self.args = args def start_new_thread(function, args, kwargs={}): """Dummy implementation of thread.start_new_thread(). Compatibility is maintained by making sure that ``args`` is a tuple and ``kwargs`` is a dictionary. If an exception is raised and it is SystemExit (which can be done by thread.exit()) it is caught and nothing is done; all other exceptions are printed out by using traceback.print_exc(). If the executed function calls interrupt_main the KeyboardInterrupt will be raised when the function returns. """ if type(args) != type(tuple()): raise TypeError("2nd arg must be a tuple") if type(kwargs) != type(dict()): raise TypeError("3rd arg must be a dict") global _main _main = False try: function(*args, **kwargs) except SystemExit: pass except: _traceback.print_exc() _main = True global _interrupt if _interrupt: _interrupt = False raise KeyboardInterrupt def exit(): """Dummy implementation of thread.exit().""" raise SystemExit def get_ident(): """Dummy implementation of thread.get_ident(). Since this module should only be used when threadmodule is not available, it is safe to assume that the current process is the only thread. Thus a constant can be safely returned. """ return -1 def METHOD_NAME(): """Dummy implementation of thread.allocate_lock().""" return LockType() def stack_size(size=None): """Dummy implementation of thread.stack_size().""" if size is not None: raise error("setting thread stack size not supported") return 0 class LockType(object): """Class implementing dummy implementation of thread.LockType. Compatibility is maintained by maintaining self.locked_status which is a boolean that stores the state of the lock. Pickling of the lock, though, should not be done since if the thread module is then used with an unpickled ``lock()`` from here problems could occur from this class not having atomic methods. """ def __init__(self): self.locked_status = False def acquire(self, waitflag=None): """Dummy implementation of acquire(). For blocking calls, self.locked_status is automatically set to True and returned appropriately based on value of ``waitflag``. If it is non-blocking, then the value is actually checked and not set if it is already acquired. This is all done so that threading.Condition's assert statements aren't triggered and throw a little fit. """ if waitflag is None or waitflag: self.locked_status = True return True else: if not self.locked_status: self.locked_status = True return True else: return False __enter__ = acquire def __exit__(self, typ, val, tb): self.release() def release(self): """Release the dummy lock.""" # XXX Perhaps shouldn't actually bother to test? Could lead # to problems for complex, threaded code. if not self.locked_status: raise error self.locked_status = False return True def locked(self): return self.locked_status # Used to signal that interrupt_main was called in a "thread" _interrupt = False # True when not executing in a "thread" _main = True def interrupt_main(): """Set _interrupt flag to True to have start_new_thread raise KeyboardInterrupt upon exiting.""" if _main: raise KeyboardInterrupt else: global _interrupt _interrupt = True
null
get dummy pillow
import json from django.test import SimpleTestCase, TestCase, override_settings from pillowtop import get_all_pillow_instances from corehq.apps.callcenter.tests.test_utils import CallCenterDomainMockTest from corehq.apps.hqadmin.utils import check_for_rewind from corehq.util.test_utils import generate_cases from testapps.test_pillowtop.utils import real_pillow_settings from ..models import HistoricalPillowCheckpoint from ..utils import EPSILON, parse_celery_pings, parse_celery_workers def METHOD_NAME(): from pillowtop.tests.utils import make_fake_constructed_pillow return make_fake_constructed_pillow('dummy pillow', 'test_checkpoint_seq_store') DummyPillow = METHOD_NAME @override_settings(PILLOWTOPS={'test': ['corehq.apps.hqadmin.tests.test_utils.DummyPillow']}) class TestPillowCheckpointSeqStore(TestCase): def setUp(self): super(TestPillowCheckpointSeqStore, self).setUp() self.pillow = DummyPillow() def tearDown(self): super(TestPillowCheckpointSeqStore, self).tearDown() def test_basic_cloudant_seq(self): seq = '1-blahblah' self.pillow.set_checkpoint({'seq': seq}) HistoricalPillowCheckpoint.create_checkpoint_snapshot(self.pillow.checkpoint) store = HistoricalPillowCheckpoint.get_latest(self.pillow.checkpoint.checkpoint_id) self.assertEqual(store.seq, seq) def test_basic_couchdb_seq(self): seq = 100 self.pillow.set_checkpoint({'seq': seq}) HistoricalPillowCheckpoint.create_checkpoint_snapshot(self.pillow.checkpoint) store = HistoricalPillowCheckpoint.get_latest(self.pillow.checkpoint.checkpoint_id) self.assertEqual(store.seq, str(seq)) def test_small_rewind(self): """ We should not notify if the seq is not significantly less than the previous """ seq = '10-blahblah' self.pillow.set_checkpoint({'seq': seq}) HistoricalPillowCheckpoint.create_checkpoint_snapshot(self.pillow.checkpoint) seq_rewind = '9-blahblah' self.pillow.set_checkpoint({'seq': seq_rewind}) HistoricalPillowCheckpoint.create_checkpoint_snapshot(self.pillow.checkpoint) has_rewound, historical_seq = check_for_rewind(self.pillow.checkpoint) self.assertFalse(has_rewound) self.assertEqual(historical_seq, seq) def test_large_rewind(self): """ We should notify if the seq is significantly less than the previous and not update the seq """ seq = '{}-blahblah'.format(EPSILON + 10) self.pillow.set_checkpoint({'seq': seq}) HistoricalPillowCheckpoint.create_checkpoint_snapshot(self.pillow.checkpoint) seq_rewind = '9-blahblah' self.pillow.set_checkpoint({'seq': seq_rewind}) HistoricalPillowCheckpoint.create_checkpoint_snapshot(self.pillow.checkpoint) has_rewound, historical_seq = check_for_rewind(self.pillow.checkpoint) self.assertTrue(has_rewound) self.assertEqual(historical_seq, seq) def test_get_latest_for_pillow(self): seq = '10-blahblah' self.pillow.set_checkpoint({'seq': seq}) HistoricalPillowCheckpoint.create_checkpoint_snapshot(self.pillow.checkpoint) store = HistoricalPillowCheckpoint.get_latest_for_pillow('DummyPillow') self.assertIsNotNone(store) store = HistoricalPillowCheckpoint.get_latest_for_pillow('DummyPillowThatDoesNotExist') self.assertIsNone(store) def test_get_historical_max(self): seq0 = '12-blahblah' self.pillow.set_checkpoint({'seq': seq0}) HistoricalPillowCheckpoint.create_checkpoint_snapshot(self.pillow.checkpoint) seq1 = '10-blahblah' self.pillow.set_checkpoint({'seq': seq1}) HistoricalPillowCheckpoint.create_checkpoint_snapshot(self.pillow.checkpoint) seq2 = '2-blahblah' self.pillow.set_checkpoint({'seq': seq2}) HistoricalPillowCheckpoint.create_checkpoint_snapshot(self.pillow.checkpoint) store = HistoricalPillowCheckpoint.get_historical_max(self.pillow.checkpoint.checkpoint_id) self.assertIsNotNone(store) self.assertEqual(store.seq, seq0) store = HistoricalPillowCheckpoint.get_historical_max('CheckpointThatDoesNotExist') self.assertIsNone(store) class TestHistoricalPillowCheckpoint(CallCenterDomainMockTest): @real_pillow_settings() def test_all_pillows(self): for pillow in get_all_pillow_instances(): checkpoint = pillow.checkpoint current_seq = checkpoint.get_current_sequence_id() HistoricalPillowCheckpoint.create_checkpoint_snapshot(checkpoint) latest = HistoricalPillowCheckpoint.get_latest(checkpoint.checkpoint_id) checkpoint.reset() checkpoint.update_to(latest.seq) if checkpoint.get_current_sequence_id() != current_seq: self.assertDictEqual(json.loads(checkpoint.get_current_sequence_id()), json.loads(current_seq)) class TestParseCeleryWorkerPings(SimpleTestCase): """ Ensures that we correctly response the celery ping responses """ def test_celery_worker_pings(self): response = parse_celery_pings([ {'celery@myhost': {'ok': 'pong'}}, {'celery@otherhost': {'ok': 'pong'}}, {'celery@yikes': {'ok': 'notpong'}}, ]) self.assertEqual(response, { 'celery@myhost': True, 'celery@otherhost': True, 'celery@yikes': False, }) def test_celery_worker_pings_empty(self): response = parse_celery_pings([]) self.assertEqual(response, {}) class TestParseCeleryWorkers(SimpleTestCase): """ Ensures that we parse the hosts returned from flower into workers we expect to be running and workers we don't. """ @generate_cases([ # Ensures we correctly parse a single regular worker ({'regular_host': True}, (['regular_host'], [])), # Ensures we correctly parse a single timestamped worker ({'main_.20_timestamp': True}, (['main_.20_timestamp'], [])), # Ensures we parse timestamped and regular ({ 'main_.40_timestamp': True, 'regular_host': True, }, (['regular_host', 'main_.40_timestamp'], [])), # Ensures we correctly parse multiple timestamped workers ({ 'main_.40_timestamp': True, 'main_.20_timestamp': True, 'main_.30_timestamp': True, }, (['main_.40_timestamp'], ['main_.30_timestamp', 'main_.20_timestamp'])), # Ensures we correctly parse multiple timestamped workers ({ 'main_.40_timestamp': True, 'main_.20_timestamp': True, 'main_.30_timestamp': True, 'secondary_.30_timestamp': True, 'secondary_.20_timestamp': True, }, ( ['main_.40_timestamp', 'secondary_.30_timestamp'], ['main_.30_timestamp', 'main_.20_timestamp', 'secondary_.20_timestamp'], )), ], TestParseCeleryWorkers) def test_parse_celery_workers(self, workers, expected): self.assertEqual(parse_celery_workers(workers), expected)
null
test general for multi qubit gate
# Copyright (C) Unitary Fund # # This source code is licensed under the GPL license (v3) found in the # LICENSE file in the root directory of this source tree. """Unit tests for DDD rules.""" import pytest from cirq import CNOT, Circuit, I, LineQubit, X, Y, Z, bit_flip from mitiq.ddd.rules.rules import general_rule, repeated_rule, xx, xyxy, yy from mitiq.utils import _equal @pytest.mark.parametrize( "slack_length", [4, 5, 8, 13, 21, 34], ) @pytest.mark.parametrize( "gates", [ [X, X], [X, Y, X, Y], [Y, Y], [X, Y, Z], ], ) def test_general_sequences(slack_length, gates): sequence = general_rule( slack_length=slack_length, gates=gates, ) gate_set = {X, Y, Z} seq_gates = [op.gate for op in sequence.all_operations()] assert len(sequence) == slack_length assert gates == [gate for gate in seq_gates if gate in gate_set] @pytest.mark.parametrize( "slack_length", [5, 8, 13, 21, 34], ) @pytest.mark.parametrize( "rule", [ xx, xyxy, yy, ], ) def test_built_in_sequences(rule, slack_length): name = rule.__name__ sequence = rule(slack_length) gates = [X if i == "x" else Y for i in name] gate_set = {X, Y} seq_gates = [op.gate for op in sequence.all_operations()] assert len(sequence) == slack_length assert gates == [gate for gate in seq_gates if gate in gate_set] @pytest.mark.parametrize( ("slack_length", "rule", "sequence"), [ ( 5, xx, Circuit( [ X(LineQubit(0)) if i % 2 else I(LineQubit(0)) for i in range(5) ] ), ), ( 5, yy, Circuit( [ Y(LineQubit(0)) if i % 2 else I(LineQubit(0)) for i in range(5) ] ), ), ( 4, xyxy, Circuit( [ Y(LineQubit(0)) if i % 2 else X(LineQubit(0)) for i in range(4) ] ), ), ], ) def test_exact_sequences(slack_length, rule, sequence): sequence_to_test = rule(slack_length) assert _equal(sequence_to_test, sequence) @pytest.mark.parametrize( "slack_length", [1, 2, 3, 5, 8, 13, 21, 34], ) @pytest.mark.parametrize("spacing", [i for i in range(5, 7)]) def test_rule_failures(slack_length, spacing): num_decoupling_gates = 3 if slack_length < num_decoupling_gates: sequence = general_rule( slack_length=slack_length, spacing=spacing, gates=[X, Y, Z], ) assert len(sequence) == 0 elif slack_length < ( (num_decoupling_gates + 1) * spacing + num_decoupling_gates ): sequence = general_rule( slack_length=slack_length, spacing=spacing, gates=[X, Y, Z], ) assert len(sequence) == 0 else: sequence = general_rule( slack_length=slack_length, spacing=spacing, gates=[X, Y, Z], ) assert len(sequence) == slack_length @pytest.mark.parametrize( "slack_length", [1, 2, 3, 5], ) @pytest.mark.parametrize( "gates", [ [X], [Y], [Z], ], ) def test_general_for_incomplete_rule(slack_length, gates): with pytest.raises(ValueError, match="too short to make a ddd sequence"): general_rule( slack_length=slack_length, gates=gates, ) @pytest.mark.parametrize( "slack_length", [3, 5], ) @pytest.mark.parametrize( "gates", [ [CNOT, X, Y], ], ) def METHOD_NAME(slack_length, gates): with pytest.raises(ValueError, match="Wrong number of qubits"): general_rule( slack_length=slack_length, gates=gates, ) @pytest.mark.parametrize( "slack_length", [4, 5, 8, 13, 21, 34], ) @pytest.mark.parametrize( "gates", [ [X, X], [X, Y, X, Y], [Y, Y], [X, Y, Z], ], ) def test_repeated_sequences(slack_length, gates): sequence = repeated_rule( slack_length=slack_length, gates=gates, ) num_reps = slack_length // len(gates) gate_set = {X, Y, Z} seq_gates = [op.gate for op in sequence.all_operations()] assert len(sequence) == slack_length assert gates * num_reps == [gate for gate in seq_gates if gate in gate_set] @pytest.mark.parametrize( "slack_length", [2, 3], ) @pytest.mark.parametrize( "gates", [ [X, Y, X, Y], [Y, Y, Y, Y], ], ) def test_short_repeated_sequences(slack_length, gates): sequence = repeated_rule( slack_length=slack_length, gates=gates, ) assert len(sequence) == 0 @pytest.mark.parametrize( "gates", [ [bit_flip(p=0.1), bit_flip(p=0.1)], [X, X, X], ], ) def test_not_unitary(gates): if bit_flip(p=0.1) in gates: with pytest.raises(TypeError, match="cirq.unitary failed"): general_rule(slack_length=17, gates=gates) else: with pytest.raises( ValueError, match="is not equivalent to the identity" ): general_rule(slack_length=17, gates=gates)
null
crate unpack
# ex:ts=4:sw=4:sts=4:et # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- """ BitBake 'Fetch' implementation for crates.io """ # Copyright (C) 2016 Doug Goldstein # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Based on functions from the base bb module, Copyright 2003 Holger Schurig import hashlib import json import os import shutil import subprocess import bb from bb.fetch2 import logger, subprocess_setup, UnpackError from bb.fetch2.wget import Wget class Crate(Wget): """Class to fetch crates via wget""" def _cargo_bitbake_path(self, rootdir): return os.path.join(rootdir, "cargo_home", "bitbake") def supports(self, ud, d): """ Check to see if a given url is for this fetcher """ return ud.type in ['crate'] def recommends_checksum(self, urldata): return False def urldata_init(self, ud, d): """ Sets up to download the respective crate from crates.io """ if ud.type == 'crate': self._crate_urldata_init(ud, d) super(Crate, self).urldata_init(ud, d) def _crate_urldata_init(self, ud, d): """ Sets up the download for a crate """ # URL syntax is: crate://NAME/VERSION # break the URL apart by / parts = ud.url.split('/') if len(parts) < 5: raise bb.fetch2.ParameterError("Invalid URL: Must be crate://HOST/NAME/VERSION", ud.url) # last field is version version = parts[len(parts) - 1] # second to last field is name name = parts[len(parts) - 2] # host (this is to allow custom crate registries to be specified host = '/'.join(parts[2:len(parts) - 2]) # if using upstream just fix it up nicely if host == 'crates.io': host = 'crates.io/api/v1/crates' ud.url = "https://%s/%s/%s/download" % (host, name, version) ud.parm['downloadfilename'] = "%s-%s.crate" % (name, version) ud.parm['name'] = name logger.debug(2, "Fetching %s to %s" % (ud.url, ud.parm['downloadfilename'])) def unpack(self, ud, rootdir, d): """ Uses the crate to build the necessary paths for cargo to utilize it """ if ud.type == 'crate': return self.METHOD_NAME(ud, rootdir, d) else: super(Crate, self).unpack(ud, rootdir, d) def METHOD_NAME(self, ud, rootdir, d): """ Unpacks a crate """ thefile = ud.localpath # possible metadata we need to write out metadata = {} # change to the rootdir to unpack but save the old working dir save_cwd = os.getcwd() os.chdir(rootdir) pn = d.getVar('BPN') if pn == ud.parm.get('name'): cmd = "tar -xz --no-same-owner -f %s" % thefile else: cargo_bitbake = self._cargo_bitbake_path(rootdir) cmd = "tar -xz --no-same-owner -f %s -C %s" % (thefile, cargo_bitbake) # ensure we've got these paths made bb.utils.mkdirhier(cargo_bitbake) # generate metadata necessary with open(thefile, 'rb') as f: # get the SHA256 of the original tarball tarhash = hashlib.sha256(f.read()).hexdigest() metadata['files'] = {} metadata['package'] = tarhash # path it path = d.getVar('PATH') if path: cmd = "PATH=\"%s\" %s" % (path, cmd) bb.note("Unpacking %s to %s/" % (thefile, os.getcwd())) ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True) os.chdir(save_cwd) if ret != 0: raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), ud.url) # if we have metadata to write out.. if len(metadata) > 0: cratepath = os.path.splitext(os.path.basename(thefile))[0] bbpath = self._cargo_bitbake_path(rootdir) mdfile = '.cargo-checksum.json' mdpath = os.path.join(bbpath, cratepath, mdfile) with open(mdpath, "w") as f: json.dump(metadata, f)
null
get m t obj
""" Description: todo to be written Usage: todo tobe written Author: YingzhiGou Date: 20/06/2017 """ import os import mtpy.core.mt as mt from mtpy.utils.mtpylog import MtPyLog DEFAULT_GROUP_PREFIX = 'Group' DEFAULT_GROUP = "Default Group" class FileHandler(): """ Description: container that holds all file references and MT object created from the files """ def __init__(self): self._station_dict = dict() self._logger = MtPyLog.get_mtpy_logger(self.__class__.__name__) self._file_dict = dict() self._group_dict = dict() self._file_to_groups = dict() def add_file(self, file_name, group_id=None): """ :param file_name: :param group_id: :type file_name: str :type group_id: str :return: """ file_ref = mt_obj = None if isinstance(file_name, str): if os.path.isfile(file_name): if file_name in self._file_dict and self._file_dict[file_name] is not None: self._logger.warning("File %s already loaded." % file_name) file_ref = file_name mt_obj = self.METHOD_NAME(file_name) else: file_ref = file_name self._logger.info("loading %s" % file_name) mt_obj = mt.MT(file_name) elif isinstance(file_name, mt.MT): mt_obj = file_name file_ref = mt_obj.fn else: raise FileHandlingException("Unsupported input type %s" % type(file_name)) # add file in to container self._logger.info("referencing %s to %s" % (file_ref, mt_obj.station)) if file_ref not in self._file_dict: self._file_dict[file_ref] = mt_obj if mt_obj.station in self._station_dict: raise FileHandlingException("Station %s already loaded from %s, you could try to unload this first" % (mt_obj.station, self.station2ref(mt_obj.station))) else: self._station_dict[mt_obj.station] = (file_ref) self._file_to_groups[file_ref] = set() # add file to group return self.add_to_group(group_id, file_ref) def add_files(self, file_list, group_id=None): for file_name in file_list: self.add_file(file_name, group_id) return True def station2ref(self, station): if station in self._station_dict: return self._station_dict[station] else: return None def add_to_group(self, group_ids, file_ref): """ add a file ref to a group :param group_id: :type group_id str :param file_ref: :type file_ref str :return: True """ if not group_ids: group_ids = [DEFAULT_GROUP] elif isinstance(group_ids, str): group_ids = [group_ids] for group_id in group_ids: if isinstance(group_id, str): if group_id not in self._group_dict: self._group_dict[group_id] = set() if file_ref in self._file_dict: self._logger.info("adding %s to group \"%s\"" % (self._file_dict[file_ref].station, group_id)) self._group_dict[group_id].add(file_ref) self._file_to_groups[file_ref].add(group_id) self._logger.info("%s now in group %s" % (self._file_dict[file_ref].station, ", ".join(self._file_to_groups[file_ref]))) else: self._logger.error("File %s has not yet been loaded." % file_ref) return False else: self._logger.warning("Unsupported group ID \"%s\", add file %s to \"%s\"" % ( type(group_id), file_ref, DEFAULT_GROUP)) return self.add_to_group(DEFAULT_GROUP, file_ref) return True def remove_file_from_group(self, group_id, file_ref): """ :param group_id: :type group_id str :param file_ref: :type file_ref str :return: """ self._logger.info("Remove %s from group %s" % (self.METHOD_NAME(file_ref).station, group_id)) if group_id in self._group_dict: try: self._group_dict[group_id].remove(file_ref) self._file_to_groups[file_ref].remove(group_id) if not self._file_to_groups[file_ref]: self.add_to_group(DEFAULT_GROUP, file_ref) return True except KeyError: return False return False def unload(self, file_ref): self._logger.info("unload %s" % file_ref) for group in self._file_to_groups[file_ref]: self._group_dict[group].remove(file_ref) station = self.METHOD_NAME(file_ref).station del self._station_dict[station] del self._file_to_groups[file_ref] del self._file_dict[file_ref] def remove_group(self, group_id): self._logger.info("Remove group %s" % group_id) members = self.get_group_members(group_id) if members is not None: for ref in members: self._file_to_groups[ref].remove(group_id) if not self._file_to_groups[ref]: self.add_to_group(DEFAULT_GROUP, ref) del self._group_dict[group_id] def get_groups(self): return list(self._group_dict.keys()) # properties def get_group_members(self, group): if group in self._group_dict: return self._group_dict[group] else: self._logger.error("Group \"%s\" does not exist." % group) return None def METHOD_NAME(self, ref): if ref in self._file_dict: return self._file_dict[ref] else: self._logger.warning("File \"%s\" is not loaded" % ref) return None def get_file_refs(self): return list(self._file_dict.keys()) def create_group(self, group_id): if group_id not in self._group_dict: self._group_dict[group_id] = set() return True else: self._logger.warning("Group %s exists!" % group_id) return False class FileHandlingException(Exception): def __init__(self, *args, **kwargs): Exception.__init__(self, *args, **kwargs)
null
add cli sdist
#!/usr/bin/env python """Script to create self contained install. The goal of this script is simple: * Create a self contained install of the CLI that has requires no external resources during installation. It does this by using all the normal python tooling (virtualenv, pip) but provides a simple, easy to use interface for those not familiar with the python ecosystem. """ import os import shutil import subprocess import sys import tempfile import zipfile from contextlib import contextmanager EXTRA_RUNTIME_DEPS = [ # Use an up to date virtualenv/pip/setuptools on > 2.6. ('virtualenv', '16.7.8'), ] BUILDTIME_DEPS = [ ('setuptools-scm', '3.3.3'), ('wheel', '0.33.6'), ] PIP_DOWNLOAD_ARGS = '--no-binary :all:' # The constraints file is used to lock the version of dateutils needed # to be 2.8.0 until we can drop py26/py33 support. This lets us put # botocore's version range on dateutils to be <3.0. CONSTRAINTS_FILE = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'assets', 'constraints-bundled.txt' ) class BadRCError(Exception): pass @contextmanager def cd(dirname): original = os.getcwd() os.chdir(dirname) try: yield finally: os.chdir(original) def run(cmd): sys.stdout.write("Running cmd: %s\n" % cmd) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() rc = p.wait() if p.returncode != 0: raise BadRCError("Bad rc (%s) for cmd '%s': %s" % ( rc, cmd, stderr + stdout)) return stdout def create_scratch_dir(): # This creates the dir where all the bundling occurs. # First we need a top level dir. dirname = tempfile.mkdtemp(prefix='bundle') # Then we need to create a dir where all the packages # will come from. os.mkdir(os.path.join(dirname, 'packages')) os.mkdir(os.path.join(dirname, 'packages', 'setup')) return dirname def download_package_tarballs(dirname, packages): with cd(dirname): for package, package_version in packages: run('%s -m pip download %s==%s %s' % ( sys.executable, package, package_version, PIP_DOWNLOAD_ARGS )) def download_cli_deps(scratch_dir): cfnlint_dir = os.path.dirname( os.path.dirname(os.path.abspath(__file__))) with cd(scratch_dir): run('pip download -c %s %s %s' % ( CONSTRAINTS_FILE, PIP_DOWNLOAD_ARGS, cfnlint_dir)) def _remove_cli_zip(scratch_dir): clidir = [f for f in os.listdir(scratch_dir) if f.startswith('cfn-lint')] assert len(clidir) == 1 os.remove(os.path.join(scratch_dir, clidir[0])) def METHOD_NAME(scratch_dir): cfnlint_dir = os.path.dirname( os.path.dirname(os.path.abspath(__file__))) if os.path.exists(os.path.join(cfnlint_dir, 'dist')): shutil.rmtree(os.path.join(cfnlint_dir, 'dist')) with cd(cfnlint_dir): run('%s setup.py sdist' % sys.executable) filename = os.listdir('dist')[0] shutil.move(os.path.join('dist', filename), os.path.join(scratch_dir, filename)) def create_bootstrap_script(scratch_dir): install_script = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'install') shutil.copy(install_script, os.path.join(scratch_dir, 'install')) def zip_dir(scratch_dir): basename = 'cfn-lint-bundle.zip' dirname, tmpdir = os.path.split(scratch_dir) final_dir_name = os.path.join(dirname, 'cfn-lint-bundle') if os.path.isdir(final_dir_name): shutil.rmtree(final_dir_name) shutil.move(scratch_dir, final_dir_name) with cd(dirname): with zipfile.ZipFile(basename, 'w', zipfile.ZIP_DEFLATED) as zipped: for root, dirnames, filenames in os.walk('cfn-lint-bundle'): for filename in filenames: zipped.write(os.path.join(root, filename)) return os.path.join(dirname, basename) def verify_preconditions(): # The pip version looks like: # 'pip 1.4.1 from ....' pip_version = run( '%s -m pip --version' % sys.executable).strip().split()[1] # Virtualenv version just has the version string: '1.14.5\n' virtualenv_version = run( '%s -m virtualenv --version' % sys.executable).strip() _min_version_required('9.0.1', pip_version, 'pip') _min_version_required('15.1.0', virtualenv_version, 'virtualenv') def _min_version_required(min_version, actual_version, name): # precondition: min_version is major.minor.patch # actual_version is major.minor.patch min_split = min_version.split('.') actual_split = actual_version.decode('utf-8').split('.') for min_version_part, actual_version_part in zip(min_split, actual_split): if int(actual_version_part) >= int(min_version_part): return raise ValueError("%s requires at least version %s, but version %s was " "found." % (name, min_version, actual_version)) def main(): verify_preconditions() scratch_dir = create_scratch_dir() package_dir = os.path.join(scratch_dir, 'packages') print("Bundle dir at: %s" % scratch_dir) download_package_tarballs( package_dir, packages=EXTRA_RUNTIME_DEPS, ) # Some packages require setup time dependencies, and so we will need to # manually install them. We isolate them to a particular directory so we # can run the install before the things they're dependent on. We have to do # this because pip won't actually find them since it doesn't handle build # dependencies. setup_dir = os.path.join(package_dir, 'setup') download_package_tarballs( setup_dir, packages=BUILDTIME_DEPS, ) download_cli_deps(package_dir) METHOD_NAME(package_dir) create_bootstrap_script(scratch_dir) zip_filename = zip_dir(scratch_dir) print("Zipped bundle installer is at: %s" % zip_filename) if __name__ == '__main__': main()
null
test scan delete unseen key while scanning
from time import sleep import pytest import redis from test.testtools import key_val_dict def test_sscan_delete_key_while_scanning_should_not_returns_it_in_scan(r: redis.Redis): size = 600 name = 'sscan-test' all_keys_set = {f'{i}'.encode() for i in range(size)} r.sadd(name, *[k for k in all_keys_set]) assert r.scard(name) == size cursor, keys = r.sscan(name, 0) assert len(keys) < len(all_keys_set) key_to_remove = next(x for x in all_keys_set if x not in keys) assert r.srem(name, key_to_remove) == 1 assert not r.sismember(name, key_to_remove) while cursor != 0: cursor, data = r.sscan(name, cursor=cursor) keys.extend(data) assert len(set(keys)) == len(keys) assert len(keys) == size - 1 assert key_to_remove not in keys def test_hscan_delete_key_while_scanning_should_not_returns_it_in_scan(r: redis.Redis): size = 600 name = 'hscan-test' all_keys_dict = key_val_dict(size=size) r.hset(name, mapping=all_keys_dict) assert len(r.hgetall(name)) == size cursor, keys = r.hscan(name, 0) assert len(keys) < len(all_keys_dict) key_to_remove = next(x for x in all_keys_dict if x not in keys) assert r.hdel(name, key_to_remove) == 1 assert r.hget(name, key_to_remove) is None while cursor != 0: cursor, data = r.hscan(name, cursor=cursor) keys.update(data) assert len(set(keys)) == len(keys) assert len(keys) == size - 1 assert key_to_remove not in keys def METHOD_NAME(r: redis.Redis): size = 30 all_keys_dict = key_val_dict(size=size) assert all(r.set(k, v) for k, v in all_keys_dict.items()) assert len(r.keys()) == size cursor, keys = r.scan() key_to_remove = next(x for x in all_keys_dict if x not in keys) assert r.delete(key_to_remove) == 1 assert r.get(key_to_remove) is None while cursor != 0: cursor, data = r.scan(cursor=cursor) keys.extend(data) assert len(set(keys)) == len(keys) assert len(keys) == size - 1 assert key_to_remove not in keys @pytest.mark.xfail def test_scan_delete_seen_key_while_scanning_should_return_all_keys(r: redis.Redis): size = 30 all_keys_dict = key_val_dict(size=size) assert all(r.set(k, v) for k, v in all_keys_dict.items()) assert len(r.keys()) == size cursor, keys = r.scan() key_to_remove = keys[0] assert r.delete(keys[0]) == 1 assert r.get(key_to_remove) is None while cursor != 0: cursor, data = r.scan(cursor=cursor) keys.extend(data) assert len(set(keys)) == len(keys) keys = set(keys) assert len(keys) == size, f"{set(all_keys_dict).difference(keys)} is not empty but should be" assert key_to_remove in keys def test_scan_add_key_while_scanning_should_return_all_keys(r: redis.Redis): size = 30 all_keys_dict = key_val_dict(size=size) assert all(r.set(k, v) for k, v in all_keys_dict.items()) assert len(r.keys()) == size cursor, keys = r.scan() r.set('new_key', 'new val') while cursor != 0: cursor, data = r.scan(cursor=cursor) keys.extend(data) keys = set(keys) assert len(keys) >= size, f"{set(all_keys_dict).difference(keys)} is not empty but should be" def test_scan(r: redis.Redis): # Set up the data for ix in range(20): k = 'scan-test:%s' % ix v = 'result:%s' % ix r.set(k, v) expected = r.keys() assert len(expected) == 20 # Ensure we know what we're testing # Test that we page through the results and get everything out results = [] cursor = '0' while cursor != 0: cursor, data = r.scan(cursor, count=6) results.extend(data) assert set(expected) == set(results) # Now test that the MATCH functionality works results = [] cursor = '0' while cursor != 0: cursor, data = r.scan(cursor, match='*7', count=100) results.extend(data) assert b'scan-test:7' in results assert b'scan-test:17' in results assert len(set(results)) == 2 # Test the match on iterator results = [r for r in r.scan_iter(match='*7')] assert b'scan-test:7' in results assert b'scan-test:17' in results assert len(set(results)) == 2 def test_scan_single(r: redis.Redis): r.set('foo1', 'bar1') assert r.scan(match="foo*") == (0, [b'foo1']) def test_scan_iter_single_page(r: redis.Redis): r.set('foo1', 'bar1') r.set('foo2', 'bar2') assert set(r.scan_iter(match="foo*")) == {b'foo1', b'foo2'} assert set(r.scan_iter()) == {b'foo1', b'foo2'} assert set(r.scan_iter(match="")) == set() assert set(r.scan_iter(match="foo1", _type="string")) == {b'foo1', } def test_scan_iter_multiple_pages(r: redis.Redis): all_keys = key_val_dict(size=100) assert all(r.set(k, v) for k, v in all_keys.items()) assert set(r.scan_iter()) == set(all_keys) def test_scan_iter_multiple_pages_with_match(r: redis.Redis): all_keys = key_val_dict(size=100) assert all(r.set(k, v) for k, v in all_keys.items()) # Now add a few keys that don't match the key:<number> pattern. r.set('otherkey', 'foo') r.set('andanother', 'bar') actual = set(r.scan_iter(match='key:*')) assert actual == set(all_keys) def test_scan_multiple_pages_with_count_arg(r: redis.Redis): all_keys = key_val_dict(size=100) assert all(r.set(k, v) for k, v in all_keys.items()) assert set(r.scan_iter(count=1000)) == set(all_keys) def test_scan_all_in_single_call(r: redis.Redis): all_keys = key_val_dict(size=100) assert all(r.set(k, v) for k, v in all_keys.items()) # Specify way more than the 100 keys we've added. actual = r.scan(count=1000) assert set(actual[1]) == set(all_keys) assert actual[0] == 0 @pytest.mark.slow def test_scan_expired_key(r: redis.Redis): r.set('expiringkey', 'value') r.pexpire('expiringkey', 1) sleep(1) assert r.scan()[1] == [] def test_scan_stream(r: redis.Redis): r.xadd("mystream", {"test": "value"}) assert r.type("mystream") == b"stream" for s in r.scan_iter(_type="STRING"): print(s)
null
test writer crop
# # DeepLabCut Toolbox (deeplabcut.org) # © A. & M.W. Mathis Labs # https://github.com/DeepLabCut/DeepLabCut # # Please see AUTHORS for contributors. # https://github.com/DeepLabCut/DeepLabCut/blob/master/AUTHORS # # Licensed under GNU Lesser General Public License v3.0 # import os import pytest from conftest import TEST_DATA_DIR from deeplabcut.utils.auxfun_videos import VideoWriter POS_FRAMES = 1 # Equivalent to cv2.CAP_PROP_POS_FRAMES @pytest.fixture() def video_clip(): return VideoWriter(os.path.join(TEST_DATA_DIR, "vid.avi")) def test_reader_wrong_inputs(tmp_path): with pytest.raises(ValueError): VideoWriter(str(tmp_path)) fake_vid = tmp_path / "fake.avi" fake_vid.write_bytes(b"42") with pytest.raises(IOError): VideoWriter(str(fake_vid)) def test_reader_check_integrity(video_clip): video_clip.check_integrity() log_file = os.path.join(video_clip.directory, f"{video_clip.name}.log") assert os.path.getsize(log_file) == 0 def test_reader_video_path(video_clip): assert video_clip.name == "vid" assert video_clip.format == ".avi" assert video_clip.directory == TEST_DATA_DIR def test_reader_metadata(video_clip): metadata = video_clip.metadata assert metadata["n_frames"] == video_clip.get_n_frames(True) == 256 assert metadata["fps"] == 30 assert metadata["width"] == 416 assert metadata["height"] == 374 def test_reader_wrong_fps(video_clip): with pytest.raises(ValueError): video_clip.fps = 0 def test_reader_duration(video_clip): assert video_clip.calc_duration() == pytest.approx( video_clip.calc_duration(robust=False), abs=0.01 ) def test_reader_set_frame(video_clip): with pytest.raises(ValueError): video_clip.set_to_frame(-1) video_clip.set_to_frame(2) assert int(video_clip.video.get(POS_FRAMES)) == 2 video_clip.set_to_frame(len(video_clip) + 10) assert int(video_clip.video.get(POS_FRAMES)) == len(video_clip) - 1 video_clip.reset() assert int(video_clip.video.get(POS_FRAMES)) == 0 @pytest.mark.parametrize("shrink, crop", [(1, False), (1, True), (2, False), (2, True)]) def test_reader_read_frame(video_clip, shrink, crop): if crop: video_clip.set_bbox(0, 0.5, 0, 0.5, relative=True) frame = video_clip.read_frame(shrink, crop) height, width, _ = frame.shape assert height == video_clip.height // shrink assert width == video_clip.width // shrink def test_writer_bbox(video_clip): bbox = 0, 100, 0, 100 video_clip.set_bbox(*bbox) assert video_clip.get_bbox() == bbox with pytest.raises(ValueError): video_clip.set_bbox(200, 100, 0, 100, relative=False) video_clip.set_bbox(0, 1, 0, 1.01, relative=True) assert video_clip.get_bbox(relative=True) == (0, 1, 0, 1) @pytest.mark.parametrize( "start, end", [(0, 10), ("0:0", "0:10"), ("00:00:00", "00:00:10")] ) def test_writer_shorten_invalid_timestamps(video_clip, start, end): with pytest.raises(ValueError): video_clip.shorten(start, end) def test_writer_shorten(tmp_path, video_clip): file = video_clip.shorten("00:00:00", "00:00:02", dest_folder=str(tmp_path)) vid = VideoWriter(file) assert pytest.approx(vid.calc_duration(), abs=0.1) == 2 def test_writer_split(tmp_path, video_clip): with pytest.raises(ValueError): video_clip.split(1) n_splits = 3 clips = video_clip.split(n_splits, dest_folder=str(tmp_path)) assert len(clips) == n_splits vid = VideoWriter(clips[0]) assert pytest.approx(len(vid), abs=1) == len(video_clip) // n_splits def METHOD_NAME(tmp_path, video_clip): x1, x2, y1, y2 = 0, 50, 0, 100 video_clip.set_bbox(x1, x2, y1, y2) file = video_clip.crop(dest_folder=str(tmp_path)) vid = VideoWriter(file) assert vid.dimensions == (x2 - x1, y2 - y1) @pytest.mark.parametrize("target_height", [200, 177]) def test_writer_rescale(tmp_path, video_clip, target_height): file = video_clip.rescale(width=-1, height=target_height, dest_folder=str(tmp_path)) vid = VideoWriter(file) assert vid.height == target_height # Verify the aspect ratio is preserved ar = video_clip.height / target_height assert vid.width == pytest.approx(video_clip.width // ar, abs=1)
null
test vector length setter getter
# ----------------------------------------------------------------------------- # BSD 3-Clause License # # Copyright (c) 2022-2023, Science and Technology Facilities Council # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ----------------------------------------------------------------------------- # Author R. W. Ford, STFC Daresbury Lab '''Module containing tests for the FieldVectorArgMetadata class. ''' import pytest from fparser.two import Fortran2003 from psyclone.domain.lfric.kernel import FieldVectorArgMetadata @pytest.mark.parametrize("datatype, access, function_space", [ ("GH_REAL", "GH_READ", "W0"), ("gh_real", "gh_read", "w0")]) def test_create(datatype, access, function_space): '''Test that an instance of FieldVectorArgMetadata can be created successfully. Also test that the arguments are case insensitive. ''' field_vector_arg = FieldVectorArgMetadata( datatype, access, function_space, "2") assert isinstance(field_vector_arg, FieldVectorArgMetadata) assert field_vector_arg.form == "gh_field" assert field_vector_arg._datatype == "gh_real" assert field_vector_arg._access == "gh_read" assert field_vector_arg._function_space == "w0" assert field_vector_arg._vector_length == "2" assert field_vector_arg._stencil is None def test_create_stencil(): '''Test that an instance of FieldVectorArgMetadata can be created successfully when stencil metadata is supplied. ''' field_vector_arg = FieldVectorArgMetadata( "gh_real", "gh_read", "w0", "2", stencil="xory1d") assert isinstance(field_vector_arg, FieldVectorArgMetadata) assert field_vector_arg.form == "gh_field" assert field_vector_arg._datatype == "gh_real" assert field_vector_arg._access == "gh_read" assert field_vector_arg._function_space == "w0" assert field_vector_arg._vector_length == "2" assert field_vector_arg._stencil == "xory1d" def test_init_invalid_vector_length(): '''Test that an invalid vector length supplied to the constructor raises the expected exception. ''' with pytest.raises(TypeError) as info: _ = FieldVectorArgMetadata("GH_REAL", "GH_READ", "W0", 1) assert ("The vector size should be a string but found int." in str(info.value)) def test_init_invalid_stencil(): '''Test that an invalid stencil supplied to the constructor raises the expected exception. ''' with pytest.raises(ValueError) as info: _ = FieldVectorArgMetadata( "GH_REAL", "GH_READ", "W0", "2", stencil="invalid") assert ("The 'stencil' metadata should be a recognised value (one of " "['x1d', 'y1d', 'xory1d', 'cross', 'region', 'cross2d']) but " "found 'invalid'." in str(info.value)) def test_get_metadata(): '''Test that the get_metadata class method works as expected.''' fparser2_tree = FieldVectorArgMetadata.create_fparser2( "arg_type(GH_FIELD*3, GH_REAL, GH_READ, W0)", Fortran2003.Part_Ref) datatype, access, function_space, vector_length, stencil = \ FieldVectorArgMetadata._get_metadata(fparser2_tree) assert datatype == "GH_REAL" assert access == "GH_READ" assert function_space == "W0" assert vector_length == "3" assert stencil is None @pytest.mark.parametrize("fortran_string", [ "arg_type(GH_FIELD*3, GH_REAL, GH_READ, W0)", "arg_type(GH_FIELD*3, GH_REAL, GH_READ, W0, STENCIL(Y1D))"]) def test_fortran_string(fortran_string): '''Test that the fortran_string method works as expected.''' field_vector_arg = FieldVectorArgMetadata.create_from_fortran_string( fortran_string) result = field_vector_arg.fortran_string() assert result == fortran_string.lower() def METHOD_NAME(): '''Test that the vector length setter and getter work as expected, including raising an exception if the value is invalid. ''' field_vector_arg = FieldVectorArgMetadata("GH_REAL", "GH_READ", "W0", "2") with pytest.raises(ValueError) as info: field_vector_arg.vector_length = "invalid" assert ("The vector size should be a string containing an integer, " "but found 'invalid'." in str(info.value)) with pytest.raises(ValueError) as info: field_vector_arg.vector_length = "1" assert ("The vector size should be an integer greater than 1 but found 1." in str(info.value)) field_vector_arg.vector_length = "3" assert field_vector_arg.vector_length == "3"
null
setup
from dataclasses import dataclass, field from typing import ClassVar, Dict, Optional from qm import generate_qua_script, qua from qm.qua import declare, for_ from qm.QuantumMachinesManager import QuantumMachinesManager from qibolab import AveragingMode from qibolab.instruments.abstract import Controller from .config import IQPortId, QMConfig, QMPort from .sequence import Sequence from .sweepers import sweep @dataclass class QMOPX(Controller): """Instrument object for controlling Quantum Machines (QM) OPX controllers. Playing pulses on QM controllers requires a ``config`` dictionary and a program written in QUA language. The ``config`` file is generated in parts in the following places in the ``register_*`` methods. The controllers, elements and pulses are all registered after a pulse sequence is given, so that the config contains only elements related to the participating qubits. The QUA program for executing an arbitrary qibolab ``PulseSequence`` is written in ``play`` and ``play_pulses`` and executed in ``execute_program``. Args: name (str): Name of the instrument instance. address (str): IP address and port for connecting to the OPX instruments. """ PortType: ClassVar = QMPort name: str address: str manager: Optional[QuantumMachinesManager] = None """Manager object used for controlling the QM OPXs.""" config: QMConfig = field(default_factory=QMConfig) """Configuration dictionary required for pulse execution on the OPXs.""" is_connected: bool = False """Boolean that shows whether we are connected to the QM manager.""" time_of_flight: int = 0 """Time of flight used for hardware signal integration.""" smearing: int = 0 """Smearing used for hardware signal integration.""" _ports: Dict[IQPortId, QMPort] = field(default_factory=dict) """Dictionary holding the ports of controllers that are connected.""" script_file_name: Optional[str] = "qua_script.txt" """Name of the file that the QUA program will dumped in that after every execution. If ``None`` the program will not be dumped. """ def __post_init__(self): super().__init__(self.name, self.address) def connect(self): """Connect to the QM manager.""" host, port = self.address.split(":") self.manager = QuantumMachinesManager(host, int(port)) def METHOD_NAME(self): """Deprecated method.""" # controllers are defined when registering pulses pass def start(self): # TODO: Start the OPX flux offsets? pass def stop(self): """Close all running Quantum Machines.""" # TODO: Use logging # log.warn("Closing all Quantum Machines.") print("Closing all Quantum Machines.") self.manager.close_all_quantum_machines() def disconnect(self): """Disconnect from QM manager.""" if self.is_connected: self.manager.close() self.is_connected = False def execute_program(self, program): """Executes an arbitrary program written in QUA language. Args: program: QUA program. Returns: TODO """ machine = self.manager.open_qm(self.config.__dict__) return machine.execute(program) @staticmethod def fetch_results(result, ro_pulses): """Fetches results from an executed experiment. Defined as ``@staticmethod`` because it is overwritten in :class:`qibolab.instruments.qm.simulator.QMSim`. """ # TODO: Update result asynchronously instead of waiting # for all values, in order to allow live plotting # using ``handles.is_processing()`` handles = result.result_handles handles.wait_for_all_values() results = {} for qmpulse in ro_pulses: pulse = qmpulse.pulse results[pulse.qubit] = results[pulse.serial] = qmpulse.acquisition.fetch(handles) return results def play(self, qubits, sequence, options): return self.sweep(qubits, sequence, options) def sweep(self, qubits, sequence, options, *sweepers): if not sequence: return {} buffer_dims = [len(sweeper.values) for sweeper in reversed(sweepers)] if options.averaging_mode is AveragingMode.SINGLESHOT: buffer_dims.append(options.nshots) # register flux elements for all qubits so that they are # always at sweetspot even when they are not used for qubit in qubits.values(): if qubit.flux: self.config.register_flux_element(qubit) qmsequence = Sequence.create(qubits, sequence, sweepers, self.config, self.time_of_flight, self.smearing) # play pulses using QUA with qua.program() as experiment: n = declare(int) for qmpulse in qmsequence.ro_pulses: threshold = qubits[qmpulse.pulse.qubit].threshold iq_angle = qubits[qmpulse.pulse.qubit].iq_angle qmpulse.declare_output(options, threshold, iq_angle) with for_(n, 0, n < options.nshots, n + 1): sweep(list(sweepers), qubits, qmsequence, options.relaxation_time, self.config) with qua.stream_processing(): for qmpulse in qmsequence.ro_pulses: qmpulse.acquisition.download(*buffer_dims) if self.script_file_name is not None: with open(self.script_file_name, "w") as file: file.write(generate_qua_script(experiment, self.config.__dict__)) result = self.execute_program(experiment) return self.fetch_results(result, qmsequence.ro_pulses) def play_sequences(self, qubits, sequence, options): raise NotImplementedError
null
add noise
# Authors: Alexandre Gramfort <[email protected]> # Daniel Strohmeier <[email protected]> # Martin Luessi <[email protected]> # # License: BSD-3-Clause import math import numpy as np from scipy.signal import lfilter from ..cov import Covariance, compute_whitener from ..epochs import BaseEpochs from ..evoked import Evoked from .._fiff.pick import pick_info from ..forward import apply_forward from ..io import BaseRaw from ..utils import logger, verbose, check_random_state, _check_preload, _validate_type @verbose def simulate_evoked( fwd, stc, info, cov=None, nave=30, iir_filter=None, random_state=None, use_cps=True, verbose=None, ): """Generate noisy evoked data. .. note:: No projections from ``info`` will be present in the output ``evoked``. You can use e.g. :func:`evoked.add_proj <mne.Evoked.add_proj>` or :func:`evoked.set_eeg_reference <mne.Evoked.set_eeg_reference>` to add them afterward as necessary. Parameters ---------- fwd : instance of Forward A forward solution. stc : SourceEstimate object The source time courses. %(info_not_none)s Used to generate the evoked. cov : Covariance object | None The noise covariance. If None, no noise is added. nave : int Number of averaged epochs (defaults to 30). .. versionadded:: 0.15.0 iir_filter : None | array IIR filter coefficients (denominator) e.g. [1, -1, 0.2]. %(random_state)s %(use_cps)s .. versionadded:: 0.15 %(verbose)s Returns ------- evoked : Evoked object The simulated evoked data. See Also -------- simulate_raw simulate_stc simulate_sparse_stc Notes ----- To make the equivalence between snr and nave, when the snr is given instead of nave:: nave = (1 / 10 ** ((actual_snr - snr)) / 20) ** 2 where actual_snr is the snr to the generated noise before scaling. .. versionadded:: 0.10.0 """ evoked = apply_forward(fwd, stc, info, use_cps=use_cps) if cov is None: return evoked if nave < np.inf: noise = _simulate_noise_evoked(evoked, cov, iir_filter, random_state) evoked.data += noise.data / math.sqrt(nave) evoked.nave = np.int64(nave) if cov.get("projs", None): evoked.add_proj(cov["projs"]).apply_proj() return evoked def _simulate_noise_evoked(evoked, cov, iir_filter, random_state): noise = evoked.copy() noise.data[:] = 0 return _add_noise(noise, cov, iir_filter, random_state, allow_subselection=False) @verbose def METHOD_NAME(inst, cov, iir_filter=None, random_state=None, verbose=None): """Create noise as a multivariate Gaussian. The spatial covariance of the noise is given from the cov matrix. Parameters ---------- inst : instance of Evoked, Epochs, or Raw Instance to which to add noise. cov : instance of Covariance The noise covariance. iir_filter : None | array-like IIR filter coefficients (denominator). %(random_state)s %(verbose)s Returns ------- inst : instance of Evoked, Epochs, or Raw The instance, modified to have additional noise. Notes ----- Only channels in both ``inst.info['ch_names']`` and ``cov['names']`` will have noise added to them. This function operates inplace on ``inst``. .. versionadded:: 0.18.0 """ # We always allow subselection here return _add_noise(inst, cov, iir_filter, random_state) def _add_noise(inst, cov, iir_filter, random_state, allow_subselection=True): """Add noise, possibly with channel subselection.""" _validate_type(cov, Covariance, "cov") _validate_type( inst, (BaseRaw, BaseEpochs, Evoked), "inst", "Raw, Epochs, or Evoked" ) _check_preload(inst, "Adding noise") data = inst._data assert data.ndim in (2, 3) if data.ndim == 2: data = data[np.newaxis] # Subselect if necessary info = inst.info info._check_consistency() picks = gen_picks = slice(None) if allow_subselection: use_chs = list(set(info["ch_names"]) & set(cov["names"])) picks = np.where(np.in1d(info["ch_names"], use_chs))[0] logger.info( "Adding noise to %d/%d channels (%d channels in cov)" % (len(picks), len(info["chs"]), len(cov["names"])) ) info = pick_info(inst.info, picks) info._check_consistency() gen_picks = np.arange(info["nchan"]) for epoch in data: epoch[picks] += _generate_noise( info, cov, iir_filter, random_state, epoch.shape[1], picks=gen_picks )[0] return inst def _generate_noise( info, cov, iir_filter, random_state, n_samples, zi=None, picks=None ): """Create spatially colored and temporally IIR-filtered noise.""" rng = check_random_state(random_state) _, _, colorer = compute_whitener( cov, info, pca=True, return_colorer=True, picks=picks, verbose=False ) noise = np.dot(colorer, rng.standard_normal((colorer.shape[1], n_samples))) if iir_filter is not None: if zi is None: zi = np.zeros((len(colorer), len(iir_filter) - 1)) noise, zf = lfilter([1], iir_filter, noise, axis=-1, zi=zi) else: zf = None return noise, zf
null
test auto all
import datetime import os import shutil import tempfile from unittest import TestCase from unittest.mock import MagicMock, call, patch from piccolo.apps.migrations.commands.new import ( BaseMigrationManager, _create_new_migration, _generate_migration_meta, new, ) from piccolo.conf.apps import AppConfig from piccolo.utils.sync import run_sync from tests.base import engines_only from tests.example_apps.music.tables import Manager class TestNewMigrationCommand(TestCase): def test_manual(self): """ Create a manual migration (i.e. non-auto). """ migration_folder = os.path.join( tempfile.gettempdir(), "piccolo_migrations" ) if os.path.exists(migration_folder): shutil.rmtree(migration_folder) os.mkdir(migration_folder) app_config = AppConfig( app_name="music", migrations_folder_path=migration_folder, table_classes=[Manager], ) run_sync(_create_new_migration(app_config, auto=False)) migration_modules = BaseMigrationManager().get_migration_modules( migration_folder ) self.assertTrue(len(migration_modules.keys()) == 1) @engines_only("postgres") @patch("piccolo.apps.migrations.commands.new.print") def test_auto(self, print_: MagicMock): """ Call the command, when no migration changes are needed. """ run_sync(new(app_name="music", auto=True)) self.assertListEqual( print_.call_args_list, [ call("🚀 Creating new migration ..."), call("🏁 No changes detected."), call("\n✅ Finished\n"), ], ) @engines_only("postgres") @patch("piccolo.apps.migrations.commands.new.print") def METHOD_NAME(self, print_: MagicMock): """ Try auto migrating all apps. """ run_sync(new(app_name="all", auto=True)) self.assertListEqual( print_.call_args_list, [ call("🚀 Creating new migration ..."), call("🏁 No changes detected."), call("🚀 Creating new migration ..."), call("🏁 No changes detected."), call("\n✅ Finished\n"), ], ) @engines_only("postgres") def test_auto_all_error(self): """ Call the command, when no migration changes are needed. """ with self.assertRaises(ValueError) as manager: run_sync(new(app_name="all", auto=False)) self.assertEqual( manager.exception.__str__(), "Only use `--app_name=all` in conjunction with `--auto`.", ) class TestGenerateMigrationMeta(TestCase): @patch("piccolo.apps.migrations.commands.new.now") def test_filename(self, now: MagicMock): now.return_value = datetime.datetime( year=2022, month=1, day=10, hour=7, minute=15, second=20, microsecond=3000, ) # Try with an app name which already contains valid characters for a # Python module. migration_meta = _generate_migration_meta( app_config=AppConfig( app_name="app_name", migrations_folder_path="/tmp/", ) ) self.assertEqual( migration_meta.migration_filename, "app_name_2022_01_10t07_15_20_003000", ) self.assertEqual( migration_meta.migration_path, "/tmp/app_name_2022_01_10t07_15_20_003000.py", ) # Try with an app name with invalid characters for a Python module. migration_meta = _generate_migration_meta( app_config=AppConfig( app_name="App-Name!", migrations_folder_path="/tmp/", ) ) self.assertEqual( migration_meta.migration_filename, "app_name_2022_01_10t07_15_20_003000", ) self.assertEqual( migration_meta.migration_path, "/tmp/app_name_2022_01_10t07_15_20_003000.py", )
null
get output file name
#!/usr/bin/env python3 # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; specifically version 2 of the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # # See LICENSE for more details. # # Copyright: RedHat 2015 # Author: Cleber Rosa <[email protected]> import argparse import datetime import getpass import json import os import subprocess import sys class Parser(argparse.ArgumentParser): def __init__(self): super(Parser, self).__init__( prog='avocado-run-testplan', description='Tracks manual test plans progress and results') self.add_argument('-t', '--template', type=argparse.FileType('r'), help='Template file with the predefined test plan') self.add_argument('-o', '--output', help='Output (test plan results) file location') self.add_argument('-i', '--input', type=argparse.FileType('r'), help=('A previously saved result file to use. This ' 'will show a human readable report for the ' 'given result file')) RESULT_MAP = {"P": "PASS", "p": "PASS", "F": "FAIL", "f": "FAIL", "S": "SKIP", "s": "SKIP"} class App: def __init__(self): self.parser = Parser() self.json = None self.datetime = None self.results = [] self.args = None self.user_identification = None def run(self): self.args, _ = self.parser.parse_known_args() if not (self.args.template or self.args.input): self.parser.print_usage() return 0 if self.args.input: self.report() else: try: self.run_test_plan() except KeyboardInterrupt: print("\nTest Plan interrupted by the user") return 1 def run_test_plan(self): self.json = json.load(self.args.template) self.user_identification = None self.datetime = datetime.datetime.now() print("Name: %s" % self.json.get("name")) print("Description: %s\n" % self.json.get("description")) test_count = len(self.json.get("tests")) current = 1 for test in self.json.get("tests"): print("Test %d/%d: %s" % (current, test_count, test.get("name"))) print("Description: %s\n" % test.get("description")) current += 1 result = None while True: result = input("Result ([P]ass, [F]ail, [S]kip): ") if result in RESULT_MAP.keys(): notes = input("Additional Notes: ") break print("") self.results.append({"name": test.get("name"), "result": RESULT_MAP.get(result), "notes": notes.strip()}) user = input("Your identification [%s]: " % getpass.getuser()) if not user: user = getpass.getuser() self.user_identification = user self.save() return 0 def METHOD_NAME(self, suffix='json'): """ Return the user given or default output file name """ if self.args.output: return self.args.output name = self.json.get("name") name = name.strip() name = name.replace(" ", "_") return "%s_%s_%s.%s" % (name, self.user_identification, self.datetime.isoformat(), suffix) def result_to_output_format(self): return {"name": self.json.get("name"), "user_identification": self.user_identification, "datetime": self.datetime.isoformat(), "results": self.results} def save(self): """ Save the test plan execution result to a file """ filename = self.METHOD_NAME() with open(filename, 'w') as output: json.dump(self.result_to_output_format(), output) print("Wrote results to: %s" % filename) def report(self): """ Write the test plan execution result to a human readable report """ if self.args.input: data = json.load(self.args.input) else: data = self.result_to_output_format() print("Test Plan: %s" % data.get("name")) print("Run by '%s' at %s" % (data.get("user_identification"), data.get("datetime"))) print("") for result in data.get("results"): print("%s: '%s': %s" % (result.get("result"), result.get("name"), result.get("notes"))) print("") for name in sorted(os.listdir(os.path.pardir)): path = os.path.join(os.path.pardir, name) if not os.path.isdir(path): continue proc = subprocess.Popen("cd '%s' && git rev-parse HEAD" % path, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) out = proc.communicate()[0].strip().decode() if not proc.poll(): print("%s: %s" % (name, out)) return 0 if __name__ == '__main__': app = App() sys.exit(app.run())
null
get all permissions
from django.contrib.contenttypes.models import ContentType from django.contrib.auth.models import User, Permission from django.db.models.base import Model from django.contrib.auth.backends import ModelBackend from tendenci.apps.perms.object_perms import ObjectPermission from tendenci.apps.perms.utils import can_view class ObjectPermBackend(ModelBackend): """ Custom backend that supports tendenci's version of group permissions and row level permissions, most of the code is copied from django with a few modifications """ supports_object_permissions = True supports_anonymous_user = True def authenticate(self, request=None, username=None, password=None, user=None): """ Modified version of django's authenticate. Will accept a user object, bypassing the password check. Returns the user for auto_login purposes """ if user: if hasattr(user, 'auto_login'): if not user.is_anonymous and user.auto_login: return user else: try: user = User.objects.get(username=username) if user.check_password(password): return user except User.DoesNotExist: return None return None def get_group_permissions(self, user_obj): """ Returns a set of permission strings that this user has through his/her groups. """ if not hasattr(user_obj, '_group_perm_cache'): # tendenci user_groups group_perms = Permission.objects.filter(group_permissions__members=user_obj, ).values_list('content_type__app_label', 'codename' ).order_by() group_perms_1 = ["%s.%s" % (ct, name) for ct, name in group_perms] # django auth groups group_perms = Permission.objects.filter(group__user=user_obj, ).values_list('content_type__app_label', 'codename' ).order_by() group_perms_2 = ["%s.%s" % (ct, name) for ct, name in group_perms] user_obj._group_perm_cache = set(group_perms_1 + group_perms_2) return user_obj._group_perm_cache def METHOD_NAME(self, user_obj): if user_obj.is_anonymous: return set() if not hasattr(user_obj, '_perm_cache'): user_obj._perm_cache = set(["%s.%s" % (p.content_type.app_label, p.codename) for p in user_obj.user_permissions.select_related()]) user_obj._perm_cache.update(self.get_group_permissions(user_obj)) return user_obj._perm_cache def get_group_object_permissions(self, user_obj, obj): if not obj.pk: return [] app_label = obj._meta.app_label user_obj_attr = '_%s_%d_group_object_perm_cache' % ( app_label, obj.pk ) if not hasattr(user_obj, user_obj_attr): content_type = ContentType.objects.get_for_model(obj) filters = { 'group__members': user_obj, 'content_type': content_type, 'object_id': obj.pk } group_object_perms = ObjectPermission.objects.filter(**filters) user_obj._group_object_perm_cache = set([u"%s.%s.%s" % (p.object_id, p.content_type.app_label, p.codename) for p in group_object_perms]) return user_obj._group_object_perm_cache def get_all_object_permissions(self, user_obj, obj): if not obj.pk: return [] app_label = obj._meta.app_label user_obj_attr = '_%s_%d_object_perm_cache' % ( app_label, obj.pk ) if not hasattr(user_obj, user_obj_attr): content_type = ContentType.objects.get_for_model(obj) filters = { 'content_type': content_type, 'object_id': obj.pk, 'user': user_obj } perms = ObjectPermission.objects.filter(**filters) user_obj._object_perm_cache = set([u"%s.%s.%s" % (p.object_id, p.content_type.app_label, p.codename) for p in perms]) user_obj._object_perm_cache.update(self.get_group_object_permissions(user_obj, obj)) return user_obj._object_perm_cache def has_perm(self, user, perm, obj=None): # check codename, return false if its a malformed codename try: perm_type = perm.split('.')[-1].split('_')[0] perm.split('.')[1] # codename except IndexError: return False # check group and user permissions, it check the regular users permissions and # the custom groups user permissions if perm in self.METHOD_NAME(user): return True if not obj: return False # they are non-admin, should not view any content with status=False - GJQ if hasattr(obj, "status") and not obj.status: return False # object anonymous and use bits if perm_type == 'view': has_attr_aov = hasattr(obj, "allow_anonymous_view") has_attr_auv = hasattr(obj, "allow_user_view") has_attr_amv = hasattr(obj, "allow_member_view") obj_is_active = hasattr(obj, "status_detail") and getattr(obj, 'status_detail') in ['active', 'published'] if all([has_attr_aov, has_attr_auv, has_attr_amv, obj_is_active]): if obj.allow_anonymous_view: return True if user.is_authenticated and obj.allow_user_view: return True if user.profile.is_member and obj.allow_member_view: return True if perm_type == 'change': has_attr_aue = hasattr(obj, "allow_user_edit") has_attr_ame = hasattr(obj, "allow_member_edit") if all([has_attr_aue, has_attr_ame]): if user.is_authenticated and obj.allow_user_edit: return True if user.profile.is_member and obj.allow_member_edit: return True # no anonymous user currently if not user.is_authenticated: return False # check creator and owner if perm_type not in ('approve', 'delete'): # Non-admin creator or owner shouldn't be able to approve or delete their own items if hasattr(obj, 'creator'): if obj.creator_id == user.id: return True if hasattr(obj, 'owner'): if obj.owner_id == user.id: return True if not isinstance(obj, Model): return False # lets check the search index for view permissions # before we ever hit the database, faster if 'view' in perm: try: # test for an index and make the query from haystack import connections site = connections['default'].unified_index() site.get_index(obj.__class__) if can_view(user, obj): return True except AssertionError: raise except: pass # check the permissions on the object level of groups or user perm = '%s.%s' % (obj.pk, perm) if perm in self.get_all_object_permissions(user, obj): return True def has_module_perms(self, user_obj, app_label): """ Returns True if user_obj has any permissions in the given app_label. """ for perm in self.METHOD_NAME(user_obj): if perm[:perm.index('.')] == app_label: return True return False def get_user(self, user_id): try: return User.objects.get(pk=user_id) except User.DoesNotExist: return None
null
name
######################################################################### # # Copyright (C) 2023 Open Source Geospatial Foundation - all rights reserved # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ######################################################################### import logging from django.conf import settings DEFAULT_FACET_PAGE_SIZE = 10 # Well known types of facet - not an enum bc it needs to be extensible FACET_TYPE_PLACE = "place" FACET_TYPE_USER = "user" FACET_TYPE_THESAURUS = "thesaurus" FACET_TYPE_CATEGORY = "category" FACET_TYPE_BASE = "base" FACET_TYPE_KEYWORD = "keyword" logger = logging.getLogger(__name__) class FacetProvider: """ Provides access to the facet information and the related topics """ def __init__(self, **kwargs): self.config = kwargs.get("config", {}).copy() def __str__(self): return f"{self.__class__.__name__}[{self.METHOD_NAME}]" @property def METHOD_NAME(self) -> str: """ Get the name of the facet, to be used as a key for this provider. You may want to override this method in order to have an optimized logic :return: The name of the provider as a str """ self.get_info()["name"] def get_info(self, lang="en", **kwargs) -> dict: """ Get the basic info for this provider, as a dict with these keys: - 'name': the name of the provider (the one returned by name()) - 'filter': the filtering key to be used in a filter query - 'label': a generic label for the facet; the client should try and localize it whenever possible - 'localized_label': a localized label for the facet (localized according to the `lang` param) - 'type': the facet type (e.g. user, region, thesaurus, ...) - 'hierarchical': boolean value telling if the facet items are hierarchically organized - "order": an optional integer suggesting the relative ordering of the facets :param lang: lanuage for label localization :return: a dict """ pass def get_facet_items( self, queryset, start: int = 0, end: int = DEFAULT_FACET_PAGE_SIZE, lang="en", topic_contains: str = None, keys: set = {}, **kwargs, ) -> (int, list): """ Return the items of the facets, in a tuple: - int, total number of items matched - list, topic records. A topic record is a dict having these keys: - key: the key of the items that should be used for filtering - label: a generic label for the item; the client should try and localize it whenever possible - localized_label: a localized label for the item - count: the count of such topic in the current facet - other facet specific keys :param queryset: the prefiltered queryset (may be filtered for authorization or other filters) :param start: int: pagination, the index of the initial returned item :param end: int: pagination, the index of the last returned item :param lang: the preferred language for the labels :param topic_contains: only returns matching topics :param keys: only returns topics with given keys, even if their count is 0 :return: a tuple int:total count of record, list of items """ pass def get_topics(self, keys: list, lang="en", **kwargs) -> list: """ Return the topics with the requested ids as a list - list, topic records. A topic record is a dict having these keys: - key: the key of the items that should be used for filtering - label: a generic label for the item; the client should try and localize it whenever possible - localized_label: a localized label for the item - other facet specific keys :param keys: the list of the keys of the topics, as returned by the get_facet_items() method :param lang: the preferred language for the labels :return: list of items """ pass @classmethod def register(cls, registry, **kwargs) -> None: """ Perform registration of instances of this Provider :param registry: the registry where instances shall be registered :param kwargs: other args that may be needed by Providers """ pass class FacetsRegistry: def __init__(self): self.facet_providers = None def _load_facets_configuration(self) -> None: """ Facet loading is done lazily because some FacetProvider may need to access the DB, which may not have been initialized/created yet """ from django.utils.module_loading import import_string self.facet_providers = dict() logger.info("Initializing Facets") for providerconf in getattr(settings, "FACET_PROVIDERS", []): clz = providerconf["class"] provider = import_string(clz) provider.register(self, config=providerconf.get("config", {})) def register_facet_provider(self, provider: FacetProvider): logger.info(f"Registering {provider}") self.facet_providers[provider.get_info()["name"]] = provider def get_providers(self): if self.facet_providers is None: self._load_facets_configuration() return self.facet_providers.values() def get_provider(self, METHOD_NAME): if self.facet_providers is None: self._load_facets_configuration() return self.facet_providers.get(METHOD_NAME, None) facet_registry = FacetsRegistry()
null
set powermeter unit price
import asyncio import json import logging from homematicip.aio.class_maps import ( TYPE_CLASS_MAP, TYPE_GROUP_MAP, TYPE_RULE_MAP, TYPE_SECURITY_EVENT_MAP, ) from homematicip.aio.connection import AsyncConnection from homematicip.aio.securityEvent import AsyncSecurityEvent from homematicip.base.enums import * from homematicip.home import Home, OAuthOTK LOGGER = logging.getLogger(__name__) class AsyncHome(Home): """this class represents the 'Async Home' of the homematic ip""" _typeClassMap = TYPE_CLASS_MAP _typeGroupMap = TYPE_GROUP_MAP _typeSecurityEventMap = TYPE_SECURITY_EVENT_MAP _typeRuleMap = TYPE_RULE_MAP def __init__(self, loop, websession=None): super().__init__(connection=AsyncConnection(loop, websession)) async def init(self, access_point_id, lookup=True): await self._connection.init(access_point_id, lookup) async def get_current_state(self, clearConfig: bool = False): """downloads the current configuration and parses it into self Args: clearConfig(bool): if set to true, this function will remove all old objects from self.devices, self.client, ... to have a fresh config instead of reparsing them """ LOGGER.debug("get_current_state") json_state = await self.download_configuration() return self.update_home(json_state, clearConfig) async def download_configuration(self): return await self._connection.api_call(*super().download_configuration()) async def enable_events(self) -> asyncio.Task: """Connects to the websocket. Returns a listening task.""" return await self._connection.ws_connect( on_message=self._ws_on_message, on_error=self._ws_on_error ) async def disable_events(self): await self._connection.close_websocket_connection() async def get_OAuth_OTK(self): token = OAuthOTK(self._connection) token.from_json(await self._connection.api_call("home/getOAuthOTK")) return token async def activate_absence_with_duration(self, duration): return await self._connection.api_call( *super().activate_absence_with_duration(duration) ) async def METHOD_NAME(self, price): return await self._connection.api_call( *super().METHOD_NAME(price) ) async def set_intrusion_alert_through_smoke_detectors(self, activate=True): return await self._connection.api_call( *super().set_intrusion_alert_through_smoke_detectors(activate) ) async def set_timezone(self, timezone): return await self._connection.api_call(*super().set_timezone(timezone)) async def set_zones_device_assignment(self, internal_devices, external_devices): return await self._connection.api_call( *super().set_zones_device_assignment(internal_devices, internal_devices) ) async def set_pin(self, newPin, oldPin=None): if newPin is None: newPin = "" data = {"pin": newPin} if oldPin: self._connection.headers["PIN"] = str(oldPin) result = await self._connection.api_call("home/setPin", body=json.dumps(data)) if oldPin: del self._connection.headers["PIN"] return result async def get_security_journal(self): journal = await self._connection.api_call( "home/security/getSecurityJournal", json.dumps(self._connection.clientCharacteristics), ) if journal is None or "errorCode" in journal: LOGGER.error( "Could not get the security journal. Error: %s", journal["errorCode"] ) return None ret = [] for entry in journal["entries"]: try: eventType = SecurityEventType(entry["eventType"]) if eventType in self._typeSecurityEventMap: j = self._typeSecurityEventMap[eventType](self._connection) except: j = AsyncSecurityEvent(self._connection) LOGGER.warning("There is no class for %s yet", entry["eventType"]) j.from_json(entry) ret.append(j) return ret async def activate_absence_with_period(self, endtime): return await self._connection.api_call( *super().activate_absence_with_period(endtime) ) async def activate_absence_permanent(self): return await self._connection.api_call(*super().activate_absence_permanent()) async def deactivate_absence(self): return await self._connection.api_call(*super().deactivate_absence()) async def activate_vacation(self, endtime, temperature): return await self._connection.api_call( *super().activate_vacation(endtime, temperature) ) async def deactivate_vacation(self): return await self._connection.api_call(*super().deactivate_vacation()) async def set_zone_activation_delay(self, delay): return await self._connection.api_call( *super().set_zone_activation_delay(delay) ) async def set_security_zones_activation(self, internal=True, external=True): return await self._connection.api_call( *super().set_security_zones_activation(internal, external) ) async def delete_group(self, group): return await group.delete() async def set_location(self, city, latitude, longitude): return await self._connection.api_call( *super().set_location(city, latitude, longitude) )
null
to json
############################################################## # Copyright 2023 Lawrence Livermore National Security, LLC # (c.f. AUTHORS, NOTICE.LLNS, COPYING) # # This file is part of the Flux resource manager framework. # For details, see https://github.com/flux-framework. # # SPDX-License-Identifier: LGPL-3.0 ############################################################## import argparse import json import logging import math import sys import flux import flux.job import flux.util LOGGER = logging.getLogger("flux-update") class JobspecUpdates: """ Convenience class for building a jobspec-update payload from a set of KEY=VALUE pairs on the command line, and a method to send the update as a request to the job manager. """ # Mapping of short key names, i.e. as given on the command line, # to full dotted-path location in jobspec. # # Note: If a key doesn't exist in this mapping, but also does not start # with 'attributes.', 'resources.' or 'tasks.', then 'attributes.system' # is assumed. # key_aliases = {"name": "attributes.system.job.name"} def __init__(self, jobid, flux_handle=None): self._flux_handle = flux_handle self.jobid = jobid self.updates = None self.jobspec = None @property def flux_handle(self): if self._flux_handle is None: self._flux_handle = flux.Flux() return self._flux_handle def _apply_jobspec_updates(self, eventlog): """ Apply jobspec updates from eventlog to internal jobspec: """ for entry in eventlog.splitlines(): event = flux.job.EventLogEvent(entry) if event.name == "jobspec-update": for key, value in event.context.items(): self.jobspec.setattr(key, value) def _fetch_jobspec(self, key): """ Fetch dotted key 'key' in jobspec for this job, fetching jobspec and eventlog (to apply jobspec-updates) if necessary. """ if self.jobspec is None: lookup = flux.job.job_kvs_lookup( self.flux_handle, jobid=self.jobid, keys=["jobspec", "eventlog"] ) self.jobspec = flux.job.JobspecV1(**lookup["jobspec"]) self._apply_jobspec_updates(lookup["eventlog"]) return self.jobspec.getattr(key) def update_attributes_system_duration(self, value): """ Handle a duration update. If update begins with "+" or "-", then get duration from jobspec and increase or decrease by the amount of the remaining argument. O/w, treat value as an explicit new duration. """ result = None if value.startswith(("-", "+")): # relative update, fetch value first duration = self._fetch_jobspec("attributes.system.duration") if duration == 0: raise ValueError( f"duration for {self.jobid} is unlimited, " f"can't update by {value}" ) arg = flux.util.parse_fsd(value[1:]) if value.startswith("-"): result = duration - arg if result <= 0.0: duration = flux.util.fsd(duration) raise ValueError( f"current duration for {self.jobid} ({duration})" f" cannot be reduced by {value[1:]}" ) else: result = duration + arg else: result = flux.util.parse_fsd(value) # An unlimited duration is represented as 0. in jobspec, so # check for infinity here and replace with 0. # if math.isinf(result): result = 0.0 return result def add_update(self, key, value): """ Append an update to the current updates object. """ if self.updates is None: self.updates = {} # Handle any special keys aliases if key in self.key_aliases: key = self.key_aliases[key] # If key doesn't start with attributes, resources, or tasks, # assume 'attributes.system.' for convenience: if not key.startswith(("attributes.", "resources.", "tasks.")): key = f"attributes.system.{key}" try: # Use any function update_attributes_system_blah() if # registered to process the value: # function_signature = "update_" + key.replace(".", "_") value = getattr(self, function_signature)(value) except AttributeError: # Otherwise, attempt to load value as JSON: # try: value = json.loads(value) except json.decoder.JSONDecodeError: # Otherwise, load value as string: # value = str(value) self.updates[key] = value def items(self): """ Convenience wrapper to return a copy of the current update dictionary key, value pairs """ return self.updates.items() def METHOD_NAME(self): return json.dumps(self.updates) def send_rpc(self): payload = {"id": self.jobid, "updates": self.updates} return self.flux_handle.rpc("job-manager.update", payload) def parse_args(): parser = argparse.ArgumentParser( prog="flux-update", formatter_class=flux.util.help_formatter() ) parser.add_argument( "-n", "--dry-run", action="store_true", help="Do not apply any updates, just emit update payload to stdout", ) parser.add_argument( "-v", "--verbose", action="store_true", default=0, help="Be more verbose. Log updated items after success.", ) parser.add_argument( "jobid", metavar="JOBID", type=flux.job.JobID, help="Target jobid", ) parser.add_argument( "updates", metavar="KEY=VALUE", type=str, nargs="+", help="Requested jobspec updates in KEY=VALUE form", ) return parser.parse_args() @flux.util.CLIMain(LOGGER) def main(): sys.stdout = open( sys.stdout.fileno(), "w", encoding="utf8", errors="surrogateescape" ) sys.stderr = open( sys.stderr.fileno(), "w", encoding="utf8", errors="surrogateescape" ) args = parse_args() updates = JobspecUpdates(args.jobid) for arg in args.updates: key, _, value = arg.partition("=") updates.add_update(key, value) if args.dry_run: print(updates.METHOD_NAME()) sys.exit(0) updates.send_rpc().get() if args.verbose: for key, value in updates.items(): LOGGER.info(f"updated {key} to {value}") if __name__ == "__main__": main() # vi: ts=4 sw=4 expandtab
null
mfcalc
class MultiFieldSolverTimeControls: def METHOD_NAME(self, fnumb="", freq="", **kwargs): """Specifies a calculation frequency for a field in an ANSYS Multi-field APDL Command: MFCALC solver analysis. Parameters ---------- fnumb Field number set by the MFELEM command. freq Perform calculation every Nth ANSYS Multi-field solver time step. Defaults to 1 for every time step. Notes ----- This command only applies to a harmonic analysis of the specified field. It is useful when a field contributes negligible field interaction within a single ANSYS Multi-field solver time step. This command is also valid in PREP7. See Multi-field Commands in the Coupled-Field Analysis Guide for a list of all ANSYS Multi-field solver commands and their availability for MFS and MFX analyses. Distributed ANSYS Restriction: This command is not supported in Distributed ANSYS. """ command = f"MFCALC,{fnumb},{freq}" return self.run(command, **kwargs) def mfdtime(self, dtime="", dtmin="", dtmax="", carry="", **kwargs): """Sets time step sizes for an ANSYS Multi-field solver analysis. APDL Command: MFDTIME Parameters ---------- dtime Multi-field time step size. If automatic time stepping is being used [see Notes below], DTIME is the starting time step. dtmin Minimum time step. Defaults to DTIME. dtmax Maximum time step. Defaults to DTIME. carry Time step carryover key. OFF - Use DTIME as the starting time step for the next restart run (default). ON - Use the final time step from the previous run as the starting time step for the next restart run. Notes ----- This command specifies time step sizes for an ANSYS Multi-field solver analysis. If either DTMIN or DTMAX is not equal to DTIME, auto time- stepping is turned on for the multi-field loop. ANSYS will automatically adjust the time step size for the next multi-field step between DTMIN and DTMAX, based on the status of the current convergence, the number of target stagger iterations (specified by MFITER), and the actual number of iterations needed to reach convergence at the current time step. If auto time-stepping is turned off, the time step size must be evenly divisible into the end time (specified by MFTIME) minus the start time (0 for a new analysis or a restart time specified by MFRSTART). You can use a smaller time step within each ANSYS field analysis. This is called subcycling. Use the DELTIM and AUTOTS commands to subcycle a structural, thermal, or electromagnetic analysis. This command is also valid in PREP7. See Multi-field Commands in the Coupled-Field Analysis Guide for a list of all ANSYS Multi-field solver commands and their availability for MFS and MFX analyses. Distributed ANSYS Restriction: This command is not supported in Distributed ANSYS. """ command = f"MFDTIME,{dtime},{dtmin},{dtmax},{carry}" return self.run(command, **kwargs) def mfoutput(self, freq="", **kwargs): """Specifies results file output frequency for an ANSYS APDL Command: MFOUTPUT Multi-field solver analysis. Parameters ---------- freq N N - Write solution every Nth (and the last) time step. Defaults to 1, for every time step. -N - Writes up to -N equally spaced results (for multifield auto time stepping). NONE - Suppresses writing of results for all multifield time steps. ALL - Writes results for every multifield time step (default). LAST - Writes results for only the last multifield time step. %array% - Where %array% is the name of an n X 1 X 1 dimensional array parameter defining n key times, the data for the specified solution results item is written at those key times. Key times in the array parameter must appear in ascending order. Value must be greater than or equal to the ending time values for the load step. For restart runs (see MFRSTART command), either change the parameter values to fall between the beginning and ending time values of the load step, or erase the current settings and reissue the command with a new array parameter. - For more information about defining array parameters, see the ``*DIM`` command documentation. Notes ----- A MFOUTPUT setting overrides any other output frequency setting (OUTRES). To select the solution items, use the OUTRES command. For the case of Freq = -n and Freq = %array%, the results at the time points which first time reaches or exceeds the targeting ouptupt time points will be written. This command is also valid in PREP7. See Multi-field Commands in the Coupled-Field Analysis Guide for a list of all ANSYS Multi-field solver commands and their availability for MFS and MFX analyses. Distributed ANSYS Restriction: This command is not supported in Distributed ANSYS. """ command = f"MFOUTPUT,{freq}" return self.run(command, **kwargs) def mfrstart(self, time="", **kwargs): """Specifies restart status for an ANSYS Multi-field solver analysis. APDL Command: MFRSTART Parameters ---------- time Restart time 0 - New analysis (Default) -1 - Restart from the last result set from a previous run. n - Specify any positive number for the actual time point from which the ANSYS Multi-field solver will restart. ANSYS checks the availability of the result set and database file. Notes ----- For MFX analyses, ANSYS always passes an actual time value to CFX (zero for a new analysis or a positive value for a restart run) and CFX verifies the consistency with the initial results file. For more details about ANSYS restart capabilities, please see Restarting an Analysis in the Basic Analysis Guide. See Multi-field Commands in the Coupled-Field Analysis Guide for a list of all ANSYS Multi-field solver commands and their availability for MFS and MFX analyses. Distributed ANSYS Restriction: This command is not supported in Distributed ANSYS. """ command = f"MFRSTART,{time}" return self.run(command, **kwargs) def mftime(self, time="", **kwargs): """Sets end time for an ANSYS Multi-field solver analysis. APDL Command: MFTIME Parameters ---------- time End time of an ANSYS Multi-field solver analysis. Defaults to 1. Notes ----- A MFTIME setting overrides any other end time setting (TIME). This command is also valid in PREP7. See Multi-field Commands in the Coupled-Field Analysis Guide for a list of all ANSYS Multi-field solver commands and their availability for MFS and MFX analyses. Distributed ANSYS Restriction: This command is not supported in Distributed ANSYS. """ command = f"MFTIME,{time}" return self.run(command, **kwargs)
null
make ellipses
""" =============== GMM covariances =============== Demonstration of several covariances types for Gaussian mixture models. See :ref:`gmm` for more information on the estimator. Although GMM are often used for clustering, we can compare the obtained clusters with the actual classes from the dataset. We initialize the means of the Gaussians with the means of the classes from the training set to make this comparison valid. We plot predicted labels on both training and held out test data using a variety of GMM covariance types on the iris dataset. We compare GMMs with spherical, diagonal, full, and tied covariance matrices in increasing order of performance. Although one would expect full covariance to perform best in general, it is prone to overfitting on small datasets and does not generalize well to held out test data. On the plots, train data is shown as dots, while test data is shown as crosses. The iris dataset is four-dimensional. Only the first two dimensions are shown here, and thus some points are separated in other dimensions. """ # Author: Ron Weiss <[email protected]>, Gael Varoquaux # Modified by Thierry Guillemot <[email protected]> # License: BSD 3 clause import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np from sklearn import datasets from sklearn.mixture import GaussianMixture from sklearn.model_selection import StratifiedKFold colors = ["navy", "turquoise", "darkorange"] def METHOD_NAME(gmm, ax): for n, color in enumerate(colors): if gmm.covariance_type == "full": covariances = gmm.covariances_[n][:2, :2] elif gmm.covariance_type == "tied": covariances = gmm.covariances_[:2, :2] elif gmm.covariance_type == "diag": covariances = np.diag(gmm.covariances_[n][:2]) elif gmm.covariance_type == "spherical": covariances = np.eye(gmm.means_.shape[1]) * gmm.covariances_[n] v, w = np.linalg.eigh(covariances) u = w[0] / np.linalg.norm(w[0]) angle = np.arctan2(u[1], u[0]) angle = 180 * angle / np.pi # convert to degrees v = 2.0 * np.sqrt(2.0) * np.sqrt(v) ell = mpl.patches.Ellipse( gmm.means_[n, :2], v[0], v[1], angle=180 + angle, color=color ) ell.set_clip_box(ax.bbox) ell.set_alpha(0.5) ax.add_artist(ell) ax.set_aspect("equal", "datalim") iris = datasets.load_iris() # Break up the dataset into non-overlapping training (75%) and testing # (25%) sets. skf = StratifiedKFold(n_splits=4) # Only take the first fold. train_index, test_index = next(iter(skf.split(iris.data, iris.target))) X_train = iris.data[train_index] y_train = iris.target[train_index] X_test = iris.data[test_index] y_test = iris.target[test_index] n_classes = len(np.unique(y_train)) # Try GMMs using different types of covariances. estimators = { cov_type: GaussianMixture( n_components=n_classes, covariance_type=cov_type, max_iter=20, random_state=0 ) for cov_type in ["spherical", "diag", "tied", "full"] } n_estimators = len(estimators) plt.figure(figsize=(3 * n_estimators // 2, 6)) plt.subplots_adjust( bottom=0.01, top=0.95, hspace=0.15, wspace=0.05, left=0.01, right=0.99 ) for index, (name, estimator) in enumerate(estimators.items()): # Since we have class labels for the training data, we can # initialize the GMM parameters in a supervised manner. estimator.means_init = np.array( [X_train[y_train == i].mean(axis=0) for i in range(n_classes)] ) # Train the other parameters using the EM algorithm. estimator.fit(X_train) h = plt.subplot(2, n_estimators // 2, index + 1) METHOD_NAME(estimator, h) for n, color in enumerate(colors): data = iris.data[iris.target == n] plt.scatter( data[:, 0], data[:, 1], s=0.8, color=color, label=iris.target_names[n] ) # Plot the test data with crosses for n, color in enumerate(colors): data = X_test[y_test == n] plt.scatter(data[:, 0], data[:, 1], marker="x", color=color) y_train_pred = estimator.predict(X_train) train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100 plt.text(0.05, 0.9, "Train accuracy: %.1f" % train_accuracy, transform=h.transAxes) y_test_pred = estimator.predict(X_test) test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100 plt.text(0.05, 0.8, "Test accuracy: %.1f" % test_accuracy, transform=h.transAxes) plt.xticks(()) plt.yticks(()) plt.title(name) plt.legend(scatterpoints=1, loc="lower right", prop=dict(size=12)) plt.show()
null
log binary
"""Support functions used in library.""" import asyncio import binascii import functools import logging from os import environ, path from typing import Any, List, Sequence, Union import warnings from google.protobuf.text_format import MessageToString from pydantic import BaseModel import pyatv from pyatv import exceptions _PROTOBUF_LINE_LENGTH = 150 _BINARY_LINE_LENGTH = 512 def _shorten(text: Union[str, bytes], length: int) -> str: if isinstance(text, str): return text if len(text) < length else (text[: length - 3] + "...") return str(text if len(text) < length else (text[: length - 3] + b"...")) def _log_value(value): if value is None: return "" if isinstance(value, bytes): return binascii.hexlify(bytearray(value or b"")).decode() return str(value) def prettydataclass(max_length: int = 150): """Prettify dataclasses. Prettify an existing dataclass by replacing __repr__ with a method that shortens variables to a max length, greatly reducing output for long strings in debug logs. """ def _repr(self) -> str: def _format(value: Any) -> str: if isinstance(value, (str, bytes)): return _shorten(value, max_length) return value return ( self.__class__.__name__ + "(" + ", ".join( [ f"{f}={_format(getattr(self, f))}" for f in self.__dataclass_fields__.keys() ] ) + ")" ) def _wrap(cls): setattr(cls, "__repr__", _repr) return cls return _wrap async def error_handler(func, fallback, *args, **kwargs): """Call a function and re-map exceptions to match pyatv interface.""" try: return await func(*args, **kwargs) except (OSError, asyncio.TimeoutError) as ex: raise exceptions.ConnectionFailedError(str(ex)) from ex except exceptions.BackOffError: raise except exceptions.NoCredentialsError: raise except Exception as ex: raise fallback(str(ex)) from ex # Special log method to avoid hexlify conversion if debug is on def METHOD_NAME(logger, message, level=logging.DEBUG, **kwargs): """Log binary data if debug is enabled.""" if logger.isEnabledFor(level): override_length = int(environ.get("PYATV_BINARY_MAX_LINE", 0)) line_length = override_length or _BINARY_LINE_LENGTH output = ( f"{k}={_shorten(_log_value(v), line_length)}" for k, v in sorted(kwargs.items()) ) logger.debug("%s (%s)", message, ", ".join(output)) def log_protobuf(logger, text, message): """Log protobuf message and shorten line length.""" if logger.isEnabledFor(logging.DEBUG): override_length = int(environ.get("PYATV_PROTOBUF_MAX_LINE", 0)) line_length = override_length or _PROTOBUF_LINE_LENGTH lines = MessageToString(message, print_unknown_fields=True).splitlines() msg_str = "\n".join([_shorten(x, line_length) for x in lines]) logger.debug("%s: %s", text, msg_str) def _running_in_pyatv_repo() -> bool: """Return pyatv is run via pytest inside its own repo.""" current_test = environ.get("PYTEST_CURRENT_TEST") if current_test: pyatv_path = path.dirname(path.dirname(pyatv.__file__)) test_file = current_test.split("::")[0] abs_path = path.join(pyatv_path, test_file) return path.exists(abs_path) return False # https://stackoverflow.com/questions/2536307/ # decorators-in-the-python-standard-lib-deprecated-specifically def deprecated(func): """Decorate functions that are deprecated.""" if _running_in_pyatv_repo(): return func @functools.wraps(func) def new_func(*args, **kwargs): # Tests typically call deprecated methods, yielding warnings. Suppress these # warnings when running tests with pytest. if not _running_in_pyatv_repo(): warnings.simplefilter("always", DeprecationWarning) # turn off filter warnings.warn( f"Call to deprecated function {func.__name__}.", category=DeprecationWarning, stacklevel=2, ) warnings.simplefilter("default", DeprecationWarning) # reset filter return func(*args, **kwargs) return new_func def map_range( value: float, in_min: float, in_max: float, out_min: float, out_max: float ) -> float: """Map a value in one range to another.""" if in_max - in_min <= 0.0: raise ValueError("invalid input range") if out_max - out_min <= 0.0: raise ValueError("invalid output range") if value < in_min or value > in_max: raise ValueError("input value out of range") return (value - in_min) * (out_max - out_min) / (in_max - in_min) + out_min def shift_hex_identifier(identifier: str) -> str: """Repeatably modify a unique identifier to avoid collisions.""" assert len(identifier) >= 2 first, rest = identifier[:2], identifier[2:] shifted = f"{(int(first, 16) + 1) % 256:02x}" if identifier.isupper(): shifted = shifted.upper() return shifted + rest def stringify_model(model: BaseModel) -> Sequence[str]: """Recursively traverse a pydantic model and print values. This method will traverse a model and present each field with a "dotted" string path, current value and data type. It is supposed to be used with pyatv.settings. It is assumed optional field does not contain other models (only basic types). """ def _recurse_into( current_model: BaseModel, prefix: str, output: List[str] ) -> Sequence[str]: for name, field in current_model.model_fields.items(): if field.annotation.__dict__.get("__origin__") is Union: field_types = ", ".join( arg.__name__ for arg in field.annotation.__args__ ) output.append( f"{prefix}{name} = {getattr(current_model, name)} ({field_types})" ) elif BaseModel in field.annotation.__mro__: _recurse_into( getattr(current_model, name), (prefix or "") + f"{name}.", output ) elif field.default is not None: output.append( f"{prefix}{name} = " f"{getattr(current_model, name)} " f"({field.annotation.__name__})" ) return output return _recurse_into(model, "", []) def update_model_field( model: BaseModel, field: str, value: Union[str, int, float, None] ) -> None: """Update a field in a model using dotting string path.""" splitted_path = field.split(".", maxsplit=1) next_field = splitted_path[0] if not hasattr(model, next_field): raise AttributeError(f"{model} has no field {next_field}") if len(splitted_path) > 1: update_model_field(getattr(model, next_field), splitted_path[1], value) else: model.model_validate({field: value}) setattr(model, field, value)
null
check activity dates
# Copyright Swiss Data Science Center (SDSC). A partnership between # École Polytechnique Fédérale de Lausanne (EPFL) and # Eidgenössische Technische Hochschule Zürich (ETHZ). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Checks needed to determine integrity of datasets.""" import itertools import click from renku.command.command_builder import inject from renku.command.util import WARNING from renku.core.interface.activity_gateway import IActivityGateway from renku.core.util import communication from renku.domain_model.project_context import project_context @inject.autoparams("activity_gateway") def check_migrated_activity_ids(fix, activity_gateway: IActivityGateway, **_): """Check that activity ids were correctly migrated in the past.""" activities = activity_gateway.get_all_activities(include_deleted=True) wrong_activities = [a for a in activities if not a.id.startswith("/activities/")] if fix: current_database = project_context.database for activity in wrong_activities: communication.info(f"Fixing activity '{activity.id}'") activity_gateway.remove(activity, keep_reference=False) # NOTE: Modify id on activity and children activity.unfreeze() activity.id = f"/activities/{activity.id}" activity._p_oid = current_database.hash_id(activity.id) activity.freeze() for attribute in itertools.chain( activity.usages, activity.hidden_usages, activity.generations, activity.parameters ): object.__setattr__(attribute, "id", f"/activities/{attribute.id}") # type: ignore activity.association.id = f"/activities/{activity.association.id}" activity_gateway.add(activity) wrong_activities = [] if not wrong_activities: return True, False, None problems = ( WARNING + "There are invalid activity ids in the project (use 'renku doctor --fix' to fix them):" + "\n\n\t" + "\n\t".join(click.style(a.id, fg="yellow") for a in wrong_activities) + "\n" ) return False, True, problems @inject.autoparams("activity_gateway") def METHOD_NAME(fix, activity_gateway: IActivityGateway, **_): """Check activities have correct start/end/delete dates. Args: fix(bool): Whether to fix found issues. activity_gateway(IActivityGateway): Injected ActivityGateway. _: keyword arguments. Returns: Tuple[bool, Optional[str]]: Tuple of whether there are activities with invalid dates, if they can be automatically fixed and a string of the problem. """ invalid_activities = [] for activity in activity_gateway.get_all_activities(include_deleted=True): plan = activity.association.plan if ( activity.started_at_time < plan.date_created or activity.ended_at_time < activity.started_at_time or (activity.invalidated_at and activity.invalidated_at < activity.ended_at_time) ): invalid_activities.append(activity) if not invalid_activities: return True, False, None if not fix: ids = [a.id for a in invalid_activities] message = ( WARNING + "The following activity have incorrect start, end, or delete date (use 'renku doctor --fix' to fix them):" + "\n\t" + "\n\t".join(ids) ) return False, True, message fix_activity_dates(activities=invalid_activities) project_context.database.commit() communication.info("Activity dates were fixed") return True, False, None def fix_activity_dates(activities): """Fix activities' start/end/delete dates.""" for activity in activities: plan = activity.association.plan activity.unfreeze() if activity.started_at_time < plan.date_created: activity.started_at_time = plan.date_created if activity.ended_at_time < activity.started_at_time: activity.ended_at_time = activity.started_at_time if activity.invalidated_at and activity.invalidated_at < activity.ended_at_time: activity.invalidated_at = activity.ended_at_time activity.freeze()
null
find all classifier components
import streamlit as st from sparknlp.annotator import * import nlu from nlu.pipe.utils.resolution.storage_ref_utils import StorageRefUtils from nlu.universe.feature_node_ids import NLP_NODE_IDS class StreamlitUtilsOS(): classifers_OS = [ClassifierDLModel, LanguageDetectorDL, MultiClassifierDLModel, NerDLModel, NerCrfModel, YakeKeywordExtraction, PerceptronModel, SentimentDLModel, SentimentDetectorModel, ViveknSentimentModel, DependencyParserModel, TypedDependencyParserModel, T5Transformer, MarianTransformer, NerConverter] @staticmethod def get_classifier_cols(pipe): classifier_cols = [] for c in pipe.components: if type(c.model) in StreamlitUtilsOS.classifers_OS: classifier_cols += pipe.anno2final_cols[c.model] return classifier_cols @staticmethod def get_embed_cols(pipe): classifier_cols = [] embedders = StreamlitUtilsOS.find_all_embed_components(pipe) for c in embedders: classifier_cols += pipe.anno2final_cols[c.model] return classifier_cols @staticmethod def find_embed_col(df, search_multi=False): """Find col that contains embed""" if not search_multi: for c in df.columns: if 'embed' in c: return c else: e_cols = [] for c in df.columns: if 'embed' in c: e_cols.append(c) return e_cols @staticmethod def find_embed_component(p): """Find first embed component_to_resolve in component_list""" for c in p.components: if 'embed' in c.out_types[0]: return c st.warning("No Embed model_anno_obj in component_list") return None @staticmethod def METHOD_NAME(pipe): """Find ALL classifier component_to_resolve in component_list""" classifier_comps = [] for c in pipe.components: if type(c.model) in StreamlitUtilsOS.classifers_OS: classifier_comps.append(c) return classifier_comps @staticmethod def find_all_embed_components(p): """Find ALL embed component_to_resolve in component_list""" cs = [] for c in p.components: if 'embed' in c.out_types[0] and 'chunk' not in c.out_types[0]: cs.append(c) if len(cs) == 0: st.warning("No Embed model_anno_obj in component_list") return cs @staticmethod def extract_name(component_or_pipe): name = '' if hasattr(component_or_pipe, 'info'): if hasattr(component_or_pipe, 'nlu_ref'): name = component_or_pipe.nlu_ref elif hasattr(component_or_pipe, 'storage_ref'): name = component_or_pipe.storage_ref elif hasattr(component_or_pipe, 'nlp_ref'): name = component_or_pipe.nlp_ref elif hasattr(component_or_pipe, 'nlu_ref'): name = component_or_pipe.nlu_ref return name @staticmethod def find_ner_model(p): """Find NER component_to_resolve in component_list""" from sparknlp.annotator import NerDLModel, NerCrfModel for c in p.components: if isinstance(c.model, (NerDLModel, NerCrfModel)): return c.model st.warning("No NER model_anno_obj in component_list") return None @staticmethod def get_NER_tags_in_pipe(p): """Get NER tags in component_list, used for showing visualizable tags""" n = StreamlitUtilsOS.find_ner_model(p) if n is None: return [] classes_predicted_by_ner_model = n.getClasses() split_iob_tags = lambda s: s.split('-')[1] if '-' in s else '' classes_predicted_by_ner_model = list(map(split_iob_tags, classes_predicted_by_ner_model)) while '' in classes_predicted_by_ner_model: classes_predicted_by_ner_model.remove('') classes_predicted_by_ner_model = list(set(classes_predicted_by_ner_model)) return classes_predicted_by_ner_model @staticmethod def get_manifold_algo(algo, dim, n_jobs=None): from sklearn.manifold import TSNE, Isomap, LocallyLinearEmbedding, MDS, SpectralEmbedding from sklearn.decomposition import TruncatedSVD, DictionaryLearning, FactorAnalysis, FastICA, KernelPCA, PCA, \ LatentDirichletAllocation # manifold if algo == 'TSNE': return TSNE(n_components=dim, n_jobs=n_jobs) if algo == 'ISOMAP': return Isomap(n_components=dim, n_jobs=n_jobs) if algo == 'LLE': return LocallyLinearEmbedding(n_components=dim, n_jobs=n_jobs) if algo == 'Spectral Embedding': return SpectralEmbedding(n_components=dim, n_jobs=n_jobs) if algo == 'MDS': return MDS(n_components=dim, n_jobs=n_jobs) # Matrix Decomposition if algo == 'PCA': return PCA(n_components=dim) # No hyper if algo == 'SVD aka LSA': return TruncatedSVD(n_components=dim) # No hyper if algo == 'DictionaryLearning': return DictionaryLearning(n_components=dim, n_jobs=n_jobs) if algo == 'FactorAnalysis': return FactorAnalysis(n_components=dim) # no hyper if algo == 'FastICA': return FastICA(n_components=dim) # no hyper if algo == 'KernelPCA': return KernelPCA(n_components=dim, n_jobs=n_jobs) # not applicable because negative values, todo we could just take absolute values of all embeds.. if algo == 'LatentDirichletAllocation': return LatentDirichletAllocation(n_components=dim) # if algo =='NMF': return NMF(n_components=dim) @staticmethod @st.cache(allow_output_mutation=True, hash_funcs={"_thread.RLock": lambda _: None}) def get_pipe(model='ner'): return nlu.load(model) @staticmethod def merge_token_classifiers_with_embed_pipe(embed_pipe, token_pipe): """Merge token feature generators into embed component_list. i.e. Pos/Dep_depdency/Untyped_dep if not already present in component_list""" for c in token_pipe.components: if c.name == NLP_NODE_IDS.POS: for emb_c in embed_pipe.components: if emb_c.name == NLP_NODE_IDS.POS: embed_pipe.is_fitted = False embed_pipe.fit() return embed_pipe # only merge if pos not already in component_list embed_pipe.components.append(c) embed_pipe.is_fitted = False embed_pipe.fit() return embed_pipe @staticmethod def extract_all_sentence_storage_refs_or_nlu_refs(e_coms): """extract either NLU_ref or storage_ref as fallback for a list of embedding components""" loaded_storage_refs = [] loaded_embed_nlu_refs = [] for c in e_coms: if not hasattr(c, 'nlu_ref'): continue r = c.nlu_ref if 'en.' not in r and 'embed_sentence.' not in r and 'ner' not in r: loaded_embed_nlu_refs.append('en.embed_sentence.' + r) elif 'en.' in r and 'embed_sentence.' not in r and 'ner' not in r: r = r.split('en.')[0] loaded_embed_nlu_refs.append('en.embed_sentence.' + r) else: loaded_embed_nlu_refs.append(StorageRefUtils.extract_storage_ref(c)) loaded_storage_refs.append(StorageRefUtils.extract_storage_ref(c)) return loaded_embed_nlu_refs, loaded_storage_refs
null
to str
# coding: utf-8 """ Kubernetes No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: release-1.28 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from kubernetes.client.configuration import Configuration class V1TokenReviewSpec(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'audiences': 'list[str]', 'token': 'str' } attribute_map = { 'audiences': 'audiences', 'token': 'token' } def __init__(self, audiences=None, token=None, local_vars_configuration=None): # noqa: E501 """V1TokenReviewSpec - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._audiences = None self._token = None self.discriminator = None if audiences is not None: self.audiences = audiences if token is not None: self.token = token @property def audiences(self): """Gets the audiences of this V1TokenReviewSpec. # noqa: E501 Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver. # noqa: E501 :return: The audiences of this V1TokenReviewSpec. # noqa: E501 :rtype: list[str] """ return self._audiences @audiences.setter def audiences(self, audiences): """Sets the audiences of this V1TokenReviewSpec. Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver. # noqa: E501 :param audiences: The audiences of this V1TokenReviewSpec. # noqa: E501 :type: list[str] """ self._audiences = audiences @property def token(self): """Gets the token of this V1TokenReviewSpec. # noqa: E501 Token is the opaque bearer token. # noqa: E501 :return: The token of this V1TokenReviewSpec. # noqa: E501 :rtype: str """ return self._token @token.setter def token(self, token): """Sets the token of this V1TokenReviewSpec. Token is the opaque bearer token. # noqa: E501 :param token: The token of this V1TokenReviewSpec. # noqa: E501 :type: str """ self._token = token def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def METHOD_NAME(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.METHOD_NAME() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, V1TokenReviewSpec): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, V1TokenReviewSpec): return True return self.to_dict() != other.to_dict()
null
read encoded uint32
# # This file is part of the PyRDP project. # Copyright (C) 2020 GoSecure Inc. # Licensed under the GPLv3 or later. # """ Common stream reading utilities. All section numbers reference MS-RDPEGDI sections: https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-rdpegdi/745f2eee-d110-464c-8aca-06fc1814f6ad """ from io import BytesIO from pyrdp.core.packing import Uint8, Int8, Uint16LE, Int16LE, Uint32LE def read_encoded_uint16(s: BytesIO) -> int: """Read an encoded UINT16.""" # 2.2.2.2.1.2.1.2 b = Uint8.unpack(s) if b & 0x80: return (b & 0x7F) << 8 | Uint8.unpack(s) else: return b & 0x7F def read_encoded_int16(s: BytesIO) -> int: # 2.2.2.2.1.2.1.3 msb = Uint8.unpack(s) val = msb & 0x3F if msb & 0x80: lsb = Uint8.unpack(s) val = (val << 8) | lsb return -val if msb & 0x40 else val def METHOD_NAME(s: BytesIO) -> int: # 2.2.2.2.1.2.1.4 b = Uint8.unpack(s) n = (b & 0xC0) >> 6 if n == 0: return b & 0x3F elif n == 1: return (b & 0x3F) << 8 | Uint8.unpack(s) elif n == 2: return ((b & 0x3F) << 16 | Uint8.unpack(s) << 8 | Uint8.unpack(s)) else: # 3 return ((b & 0x3F) << 24 | Uint8.unpack(s) << 16 | Uint8.unpack(s) << 8 | Uint8.unpack(s)) def read_color(s: BytesIO): """ 2.2.2.2.1.3.4.1.1 TS_COLORREF -> rgb 2.2.2.2.1.2.4.1 TS_COLOR_QUAD -> bgr """ return Uint32LE.unpack(s) & 0x00FFFFFF def read_utf16_str(s: BytesIO, size: int) -> [int]: return [Uint16LE.unpack(s) for _ in range(size)] # Decode into str? def read_glyph_bitmap(w: int, h: int, s: BytesIO) -> bytes: """Read and inflate a glyph bitmap.""" # Glyph encoding is specified in section 2.2.2.2.1.2.6.1 scanline = ((w + 7) // 8) size = scanline * h packed = s.read(size) pad = 4 - (size % 4) if pad < 4: # Skip alignment padding. s.read(pad) # Convert to 1 byte per pixel format for debugging. # data = bytearray(w * h) # for y in range(h): # line = y * w # for x in range(w): # bits = packed[scanline * y + (x // 8)] # px = (bits >> (8 - (x % 8))) & 1 # data[line + x] = px # return data return packed class Glyph: """ TS_CACHE_GLYPH_DATA (2.2.2.2.1.2.5.1) """ @staticmethod def parse(s: BytesIO) -> 'Glyph': self = Glyph() self.cacheIndex = Uint16LE.unpack(s) self.x = Uint16LE.unpack(s) self.y = Uint16LE.unpack(s) self.w = Uint16LE.unpack(s) self.h = Uint16LE.unpack(s) self.data = read_glyph_bitmap(self.w, self.h, s) return self class GlyphV2: """ TS_CACHE_GLYPH_DATA_REV2 (2.2.2.2.1.2.6.1) """ @staticmethod def parse(s: BytesIO) -> Glyph: self = Glyph() self.cacheIndex = Uint8.unpack(s) self.x = read_encoded_int16(s) self.y = read_encoded_int16(s) self.w = read_encoded_uint16(s) self.h = read_encoded_uint16(s) self.data = read_glyph_bitmap(self.w, self.h, s) return self BOUND_LEFT = 0x01 BOUND_TOP = 0x02 BOUND_RIGHT = 0x04 BOUND_BOTTOM = 0x08 BOUND_DELTA_LEFT = 0x10 BOUND_DELTA_TOP = 0x20 BOUND_DELTA_RIGHT = 0x40 BOUND_DELTA_BOTTOM = 0x80 class Bounds: """A bounding rectangle.""" def __init__(self): self.left = 0 self.top = 0 self.bottom = 0 self.right = 0 def update(self, s: BytesIO): flags = Uint8.unpack(s) if flags & BOUND_LEFT: self.left = Int16LE.unpack(s) elif flags & BOUND_DELTA_LEFT: self.left += Int8.unpack(s) if flags & BOUND_TOP: self.top = Int16LE.unpack(s) elif flags & BOUND_DELTA_TOP: self.top += Int8.unpack(s) if flags & BOUND_RIGHT: self.right = Int16LE.unpack(s) elif flags & BOUND_DELTA_RIGHT: self.right += Int8.unpack(s) if flags & BOUND_BOTTOM: self.bottom = Int16LE.unpack(s) elif flags & BOUND_DELTA_BOTTOM: self.bottom += Int8.unpack(s) def __str__(self): return f'<Bounds {self.left}, {self.top}, {self.right - self.left}, {self.bottom - self.top}>'
null
parse value string
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ This script translates synth trace files in format version 2 to format version 3. """ import argparse import json import sys def parse_value_id(value): colonInd = value.find(":") if colonInd == -1: return None tag = value[0:colonInd] if tag == "object": return int(value[colonInd + 1 :]) else: return None def METHOD_NAME(value): colonInd = value.find(":") if colonInd == -1: return None tag = value[0:colonInd] if tag == "string": return value[colonInd + 1 :] else: return None def max_obj_id(trace): maxId = 0 for rec in trace: for key in rec: if key in [ "objID", "propNameID", "propID", "hostObjectID", "propNamesID", "functionID", ]: maxId = max(maxId, rec[key]) elif key in ["value", "retval"]: valueId = parse_value_id(rec[key]) if valueId is not None: maxId = max(maxId, valueId) elif key in ["args", "properties"]: for val in rec[key]: valueId = parse_value_id(val) if valueId is not None: maxId = max(maxId, valueId) return maxId def new_create_string_or_prop_name_id_record(type, str, id): return {"type": type, "objID": id, "encoding": "UTF-8", "chars": str} def new_create_string_record(str, id): return new_create_string_or_prop_name_id_record("CreateStringRecord", str, id) def new_create_prop_name_id_record(str, id): return new_create_string_or_prop_name_id_record("CreatePropNameIDRecord", str, id) def transform_trace_work(trace, maxObjID): curObjID = maxObjID + 1 out = [] for rec in trace: if rec["type"] == "CreateHostFunctionRecord": funcName = "" if "functionName" in rec: funcName = rec["functionName"] out.append(new_create_prop_name_id_record(funcName, curObjID)) rec["propNameID"] = curObjID curObjID += 1 elif rec["type"] in [ "GetPropertyRecord", "SetPropertyRecord", "HasPropertyRecord", ]: out.append(new_create_string_record(rec["propName"], curObjID)) rec["propID"] = curObjID curObjID += 1 elif rec["type"] in ["GetPropertyNativeRecord", "SetPropertyNativeRecord"]: out.append(new_create_prop_name_id_record(rec["propName"], curObjID)) rec["propNameID"] = curObjID curObjID += 1 for key in rec: if key in ["value", "retval"]: valueString = METHOD_NAME(rec[key]) if valueString is not None: out.append(new_create_string_record(valueString, curObjID)) rec[key] = "string:" + str(curObjID) curObjID += 1 elif key in ["args", "properties"]: newArr = [] for val in rec[key]: valueString = METHOD_NAME(val) if valueString is not None: out.append(new_create_string_record(valueString, curObjID)) newArr.append("string:" + str(curObjID)) curObjID += 1 else: newArr.append(val) rec[key] = newArr out.append(rec) return out def transform_trace(trace): return transform_trace_work(trace, max_obj_id(trace)) def transform2to3(jsonInput): out = {} if jsonInput["version"] != 2: print("Expected version 2 trace; got " + str(jsonInput["version"])) return None for key in jsonInput: if key == "version": out[key] = 3 elif key == "trace": out[key] = transform_trace(jsonInput[key]) else: out[key] = jsonInput[key] return out def main() -> None: parser = argparse.ArgumentParser() parser.add_argument( "json_file", nargs="?", type=argparse.FileType("r"), default=sys.stdin ) args = parser.parse_args() jsonInput = json.load(args.json_file) newJson = transform2to3(jsonInput) json.dump(newJson, sys.stdout, indent=4) if __name__ == "__main__": main()
null
test validate protected queries no queries
import pytest from sentry.testutils.silo import validate_protected_queries def METHOD_NAME(): validate_protected_queries([]) def test_validate_protected_queries__ok(): queries = [ {"sql": "SELECT * FROM sentry_organization"}, {"sql": "UPDATE sentry_team SET slug = 'best-team' WHERE id = 1"}, ] validate_protected_queries(queries) def test_validate_protected_queries__missing_fences(): queries = [ {"sql": 'SAVEPOINT "s123abc"'}, {"sql": 'UPDATE "sentry_useremail" SET "is_verified" = true WHERE "id" = 1'}, {"sql": 'UPDATE "sentry_organization" SET "slug" = \'oops\' WHERE "id" = 1'}, {"sql": 'UPDATE "sentry_team" SET "slug" = \'frontend\' WHERE "id" = 3'}, ] with pytest.raises(AssertionError): validate_protected_queries(queries) def test_validate_protected_queries__with_single_fence(): queries = [ {"sql": 'SAVEPOINT "s123abc"'}, {"sql": 'UPDATE "sentry_useremail" SET "is_verified" = true WHERE "id" = 1'}, {"sql": "SELECT 'start_role_override_1'"}, {"sql": 'UPDATE "sentry_organization" SET "slug" = \'oops\' WHERE "id" = 1'}, {"sql": "SELECT 'end_role_override_1'"}, {"sql": 'UPDATE "sentry_team" SET "slug" = \'frontend\' WHERE "id" = 3'}, ] validate_protected_queries(queries) def test_validate_protected_queries__multiple_fences(): queries = [ {"sql": 'SAVEPOINT "s123abc"'}, {"sql": 'UPDATE "sentry_useremail" SET "is_verified" = true WHERE "id" = 1'}, {"sql": "SELECT 'start_role_override_1'"}, {"sql": 'UPDATE "sentry_organization" SET "slug" = \'oops\' WHERE "id" = 1'}, {"sql": "SELECT 'end_role_override_1'"}, {"sql": 'UPDATE "sentry_team" SET "slug" = \'frontend\' WHERE "id" = 3'}, {"sql": "SELECT 'start_role_override_2'"}, {"sql": 'UPDATE "sentry_organization" SET "slug" = \'another-oops\' WHERE "id" = 1'}, {"sql": "SELECT 'end_role_override_2'"}, ] validate_protected_queries(queries) def test_validate_protected_queries__nested_fences(): queries = [ {"sql": 'SAVEPOINT "s123abc"'}, {"sql": 'UPDATE "sentry_useremail" SET "is_verified" = true WHERE "id" = 1'}, {"sql": "SELECT 'start_role_override_1'"}, {"sql": 'UPDATE "sentry_organization" SET "slug" = \'safe\' WHERE "id" = 1'}, # Nested role overrides shouldn't happen but we need to handle them just in case. {"sql": "SELECT 'start_role_override_2'"}, {"sql": 'UPDATE "sentry_organization" SET "slug" = \'also-safe\' WHERE "id" = 1'}, {"sql": "SELECT 'end_role_override_2'"}, {"sql": "SELECT 'end_role_override_1'"}, {"sql": 'UPDATE "sentry_team" SET "slug" = \'frontend\' WHERE "id" = 3'}, {"sql": 'UPDATE "sentry_organizationmemberteam" SET "role" = \'member\' WHERE "id" = 3'}, ] validate_protected_queries(queries) queries = [ {"sql": 'SAVEPOINT "s123abc"'}, {"sql": 'UPDATE "sentry_useremail" SET "is_verified" = true WHERE "id" = 1'}, {"sql": "SELECT 'start_role_override_1'"}, {"sql": 'UPDATE "sentry_organization" SET "slug" = \'safe\' WHERE "id" = 1'}, # Nested role overrides shouldn't happen but we need to handle them just in case. {"sql": "SELECT 'start_role_override_2'"}, {"sql": 'UPDATE "sentry_organization" SET "slug" = \'also-safe\' WHERE "id" = 1'}, {"sql": "SELECT 'end_role_override_2'"}, {"sql": 'UPDATE "sentry_organization" SET "slug" = \'still-safe\' WHERE "id" = 1'}, {"sql": "SELECT 'end_role_override_1'"}, {"sql": 'UPDATE "sentry_organization" SET "slug" = \'not-safe\' WHERE "id" = 1'}, ] with pytest.raises(AssertionError): validate_protected_queries(queries) def test_validate_protected_queries__fenced_and_not(): queries = [ {"sql": 'SAVEPOINT "s123abc"'}, {"sql": 'UPDATE "sentry_useremail" SET "is_verified" = true WHERE "id" = 1'}, {"sql": "SELECT 'start_role_override_1'"}, {"sql": 'UPDATE "sentry_organization" SET "slug" = \'oops\' WHERE "id" = 1'}, {"sql": "SELECT 'end_role_override_1'"}, {"sql": 'UPDATE "sentry_team" SET "slug" = \'frontend\' WHERE "id" = 3'}, # This query is lacking fences {"sql": 'UPDATE "sentry_organization" SET "slug" = \'another-oops\' WHERE "id" = 1'}, ] with pytest.raises(AssertionError): validate_protected_queries(queries)
null
equals
# -*- coding: utf-8 -*- # # ast_source_location.py # # This file is part of NEST. # # Copyright (C) 2004 The NEST Initiative # # NEST is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # # NEST is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with NEST. If not, see <http://www.gnu.org/licenses/>. import sys class ASTSourceLocation: """ This class is used to store information regarding the source position of an element. Attributes: start_line = 0 start_column = 0 end_line = 0 end_column = 0 """ def __init__(self, start_line, start_column, end_line, end_column): """ Standard constructor. :param start_line: The start line of the object :type start_line: int :param start_column: The start column of the object :type start_column: int :param end_line: The end line of the object :type end_line: int :param end_column: The end column of the object :type end_column: int """ self.start_line = start_line self.start_column = start_column self.end_line = end_line self.end_column = end_column @classmethod def make_ast_source_position(cls, start_line, start_column, end_line, end_column): """ Factory method of the ASTSourceLocation class. :param start_line: The start line of the object :type start_line: int :param start_column: The start column of the object :type start_column: int :param end_line: The end line of the object :type end_line: int :param end_column: The end column of the object :type end_column: int :return: a new ASTSourceLocation object :rtype: ASTSourceLocation """ return cls(start_line=start_line, start_column=start_column, end_line=end_line, end_column=end_column) def get_start_line(self): """ Returns the start line of the element. :return: the start line as int :rtype: int """ return self.start_line def get_start_column(self): """ Returns the start column of the element. :return: the start column as int :rtype: int """ return self.start_column def get_end_line(self): """ Returns the end line of the element. :return: the end line as int :rtype: int """ return self.end_line def get_end_column(self): """ Returns the end column of the element. :return: the end column as int :rtype: int """ return self.end_column def METHOD_NAME(self, source_position): """ Checks if the handed over position is equal to this. :param source_position: a source position. :type source_position: ASTSourceLocation :return: True if equal, otherwise False. :rtype: bool """ if not isinstance(source_position, ASTSourceLocation): return False return (self.get_start_line() == source_position.get_start_line() and self.get_start_column() == source_position.get_start_column() and self.get_end_line() == source_position.get_end_line() and self.get_end_column() == source_position.get_end_column()) def before(self, source_position): """ Checks if the handed over position is smaller than this. :param source_position: a source position. :type source_position: ASTSourceLocation :return: True if smaller, otherwise False :rtype: bool """ if not isinstance(source_position, ASTSourceLocation): return False # in th case that it is artificially added or that it is predefined, the rule for before does not apply # here we assume that the insertion is added at a correct point. # If both are predefined, then there is no conflict if self.is_predefined_source_position() and source_position.is_predefined_source_position(): return True # IF both are artificial, then its also ok if self.is_added_source_position() and source_position.is_added_source_position(): return True # Predefined are always added at the beginning, if self.is_predefined_source_position(): return True if self.is_added_source_position(): return False if self.get_start_line() < source_position.get_start_line(): return True elif self.get_start_line() == source_position.get_start_line() and \ self.get_start_column() < source_position.get_start_column(): return True else: return False @classmethod def get_predefined_source_position(cls): """ Returns a source position which symbolizes that the corresponding element is predefined. :return: a source position :rtype: ASTSourceLocation """ return cls(-1, -1, -1, -1) @classmethod def get_added_source_position(cls): """ Returns a source position which symbolize that the corresponding element has been added by the solver. :return: a source position. :rtype: ASTSourceLocation """ return cls(sys.maxsize, sys.maxsize, sys.maxsize, sys.maxsize) def is_predefined_source_position(self): """ Indicates whether this represents a predefined source position. :return: True if predefined, otherwise False. :rtype: bool """ return self.METHOD_NAME(ASTSourceLocation.get_predefined_source_position()) def is_added_source_position(self): """ Indicates whether this represents an artificially added source position.. :return: a source position. :rtype: ASTSourceLocation """ return self.METHOD_NAME(ASTSourceLocation.get_added_source_position()) def encloses(self, source_position): """ Checks if the handed over position is enclosed in this source position, e.g., line 0 to 10 encloses lines 0 to 9 etc. :param source_position: a source position :type source_position: ASTSourceLocation :return: True if enclosed, otherwise False. :rtype: bool """ if not isinstance(source_position, ASTSourceLocation): return False if (self.get_start_line() <= source_position.get_start_line() and self.get_end_line() >= source_position.get_end_line() and self.get_start_column() <= source_position.get_start_column() and self.get_end_column() >= source_position.get_end_column()): return True else: return False def __str__(self): """ A string representation of this source position. :return: a string representation :rtype: str """ if self.is_added_source_position(): return '<ADDED_BY_SOLVER>' elif self.is_predefined_source_position(): return '<PREDEFINED>' else: return '[' + str(self.get_start_line()) + ':' + str(self.get_start_column()) + ';' + \ str(self.get_end_line()) + ':' + str(self.get_end_column()) + ']'
null
package
from conan import ConanFile from conan.errors import ConanInvalidConfiguration from conan.tools.build import check_min_cppstd, valid_min_cppstd from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout from conan.tools.files import apply_conandata_patches, copy, export_conandata_patches, get, rmdir import os required_conan_version = ">=1.53.0" class EasyhttpcppConan(ConanFile): name = "easyhttpcpp" description = "A cross-platform HTTP client library with a focus on usability and speed" license = "MIT" topics = ("http", "client", "protocol") homepage = "https://github.com/sony/easyhttpcpp" url = "https://github.com/conan-io/conan-center-index" package_type = "library" settings = "os", "arch", "compiler", "build_type" options = { "shared": [True, False], "fPIC": [True, False], } default_options = { "shared": False, "fPIC": True, } short_paths = True @property def _min_cppstd(self): return "11" def export_sources(self): export_conandata_patches(self) def config_options(self): if self.settings.os == "Windows": del self.options.fPIC def configure(self): if self.options.shared: self.options.rm_safe("fPIC") def layout(self): cmake_layout(self, src_folder="src") def requirements(self): self.requires("poco/1.12.4", transitive_headers=True, transitive_libs=True) @property def _required_poco_components(self): comps = ["enable_data", "enable_data_sqlite", "enable_net"] if self.settings.os == "Windows": comps.append("enable_netssl_win") else: comps.append("enable_netssl") return comps def validate(self): if any([not self.dependencies["poco"].options.get_safe(comp, False) for comp in self._required_poco_components]): raise ConanInvalidConfiguration( f"{self.ref} requires the following poco options enabled: {', '.join(self._required_poco_components)}" ) if self.settings.compiler.get_safe("cppstd"): check_min_cppstd(self, self._min_cppstd) def source(self): get(self, **self.conan_data["sources"][self.version], strip_root=True) def generate(self): tc = CMakeToolchain(self) tc.variables["FORCE_SHAREDLIB"] = self.options.shared if not valid_min_cppstd(self, self._min_cppstd): tc.variables["CMAKE_CXX_STANDARD"] = self._min_cppstd if self.settings.os == "Windows" and self.options.shared: tc.preprocessor_definitions["EASYHTTPCPP_DLL"] = "1" tc.preprocessor_definitions["EASYHTTPCPP_API_EXPORTS"] = "1" tc.generate() deps = CMakeDeps(self) deps.generate() def build(self): apply_conandata_patches(self) cmake = CMake(self) cmake.configure() cmake.build() def METHOD_NAME(self): copy(self, "LICENSE", src=self.source_folder, dst=os.path.join(self.package_folder, "licenses")) cmake = CMake(self) cmake.install() rmdir(self, os.path.join(self.package_folder, "lib", "cmake")) def package_info(self): self.cpp_info.set_property("cmake_file_name", "easyhttpcppeasyhttp") self.cpp_info.set_property("cmake_target_name", "easyhttpcpp::easyhttp") # TODO: back to global scope in conan v2 libsuffix = "" if self.settings.build_type == "Debug": if self.settings.os == "Windows" and not self.options.shared: libsuffix += "md" libsuffix += "d" self.cpp_info.components["easyhttp"].libs = [f"easyhttp{libsuffix}"] if self.settings.os == "Windows" and self.options.shared: self.cpp_info.components["easyhttp"].defines.append("EASYHTTPCPP_DLL") self.cpp_info.components["easyhttp"].requires = [ "poco::poco_foundation", "poco::poco_data", "poco::poco_datasqlite", "poco::poco_net", ] if self.settings.os == "Windows": self.cpp_info.components["easyhttp"].requires.append("poco::poco_netsslwin") else: self.cpp_info.components["easyhttp"].requires.append("poco::poco_netssl") # TODO: to remove in conan v2 self.cpp_info.filenames["cmake_find_package"] = "easyhttpcppeasyhttp" self.cpp_info.filenames["cmake_find_package_multi"] = "easyhttpcppeasyhttp" self.cpp_info.names["cmake_find_package"] = "easyhttpcpp" self.cpp_info.names["cmake_find_package_multi"] = "easyhttpcpp" self.cpp_info.components["easyhttp"].names["cmake_find_package"] = "easyhttp" self.cpp_info.components["easyhttp"].names["cmake_find_package_multi"] = "easyhttp" self.cpp_info.components["easyhttp"].set_property("cmake_target_name", "easyhttpcpp::easyhttp")
null
test update settings flow
""" Ory APIs Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501 The version of the OpenAPI document: v1.2.1 Contact: [email protected] Generated by: https://openapi-generator.tech """ import unittest import ory_client from ory_client.api.frontend_api import FrontendApi # noqa: E501 class TestFrontendApi(unittest.TestCase): """FrontendApi unit test stubs""" def setUp(self): self.api = FrontendApi() # noqa: E501 def tearDown(self): pass def test_create_browser_login_flow(self): """Test case for create_browser_login_flow Create Login Flow for Browsers # noqa: E501 """ pass def test_create_browser_logout_flow(self): """Test case for create_browser_logout_flow Create a Logout URL for Browsers # noqa: E501 """ pass def test_create_browser_recovery_flow(self): """Test case for create_browser_recovery_flow Create Recovery Flow for Browsers # noqa: E501 """ pass def test_create_browser_registration_flow(self): """Test case for create_browser_registration_flow Create Registration Flow for Browsers # noqa: E501 """ pass def test_create_browser_settings_flow(self): """Test case for create_browser_settings_flow Create Settings Flow for Browsers # noqa: E501 """ pass def test_create_browser_verification_flow(self): """Test case for create_browser_verification_flow Create Verification Flow for Browser Clients # noqa: E501 """ pass def test_create_native_login_flow(self): """Test case for create_native_login_flow Create Login Flow for Native Apps # noqa: E501 """ pass def test_create_native_recovery_flow(self): """Test case for create_native_recovery_flow Create Recovery Flow for Native Apps # noqa: E501 """ pass def test_create_native_registration_flow(self): """Test case for create_native_registration_flow Create Registration Flow for Native Apps # noqa: E501 """ pass def test_create_native_settings_flow(self): """Test case for create_native_settings_flow Create Settings Flow for Native Apps # noqa: E501 """ pass def test_create_native_verification_flow(self): """Test case for create_native_verification_flow Create Verification Flow for Native Apps # noqa: E501 """ pass def test_disable_my_other_sessions(self): """Test case for disable_my_other_sessions Disable my other sessions # noqa: E501 """ pass def test_disable_my_session(self): """Test case for disable_my_session Disable one of my sessions # noqa: E501 """ pass def test_exchange_session_token(self): """Test case for exchange_session_token Exchange Session Token # noqa: E501 """ pass def test_get_flow_error(self): """Test case for get_flow_error Get User-Flow Errors # noqa: E501 """ pass def test_get_login_flow(self): """Test case for get_login_flow Get Login Flow # noqa: E501 """ pass def test_get_recovery_flow(self): """Test case for get_recovery_flow Get Recovery Flow # noqa: E501 """ pass def test_get_registration_flow(self): """Test case for get_registration_flow Get Registration Flow # noqa: E501 """ pass def test_get_settings_flow(self): """Test case for get_settings_flow Get Settings Flow # noqa: E501 """ pass def test_get_verification_flow(self): """Test case for get_verification_flow Get Verification Flow # noqa: E501 """ pass def test_get_web_authn_java_script(self): """Test case for get_web_authn_java_script Get WebAuthn JavaScript # noqa: E501 """ pass def test_list_my_sessions(self): """Test case for list_my_sessions Get My Active Sessions # noqa: E501 """ pass def test_perform_native_logout(self): """Test case for perform_native_logout Perform Logout for Native Apps # noqa: E501 """ pass def test_to_session(self): """Test case for to_session Check Who the Current HTTP Session Belongs To # noqa: E501 """ pass def test_update_login_flow(self): """Test case for update_login_flow Submit a Login Flow # noqa: E501 """ pass def test_update_logout_flow(self): """Test case for update_logout_flow Update Logout Flow # noqa: E501 """ pass def test_update_recovery_flow(self): """Test case for update_recovery_flow Complete Recovery Flow # noqa: E501 """ pass def test_update_registration_flow(self): """Test case for update_registration_flow Update Registration Flow # noqa: E501 """ pass def METHOD_NAME(self): """Test case for update_settings_flow Complete Settings Flow # noqa: E501 """ pass def test_update_verification_flow(self): """Test case for update_verification_flow Complete Verification Flow # noqa: E501 """ pass if __name__ == '__main__': unittest.main()
null
text leading
# # Part of p5: A Python package based on Processing # Copyright (C) 2017-2019 Abhik Pal # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from typing import Optional from . import p5 __all__ = [ "create_font", "load_font", "text", "text_font", "text_align", "text_leading", "text_size", "text_width", "text_ascent", "text_descent", "text_style", "text_wrap", ] def create_font(name: str, size: int = 10): """Create the given font at the appropriate size. :param name: Filename of the font file (only pil, otf and ttf fonts are supported.) :param size: Font size (only required when `name` refers to a truetype font; defaults to None) :type size: int | None """ return p5.renderer.create_font(name, size) def load_font(font_name): """Loads the given font into a font object""" return p5.renderer.load_font(font_name) def text(*args, wrap_at: Optional[int] = None): """Draw the given text on the screen and save the image. :param text_string: text to display :type text_string: str :param x: x-coordinate of text :type x: float :param y: y-coordinate of text :type y: float :param z: z-coordinate of text :type z: float :param position: position of the text on the screen :type position: tuple :param wrap_at: specifies the text wrapping column (defaults to None) :type wrap_at: int :returns: actual text that was drawn to the image (when wrapping is not set, this is just the unmodified text_string) :rtype: str """ if len(args) == 2: text_string, position = args elif len(args) in {3, 4}: text_string, position = args[0], args[1:] else: raise ValueError("Unexpected number of arguments passed to text()") if len(text_string) == 0: return return p5.renderer.text(text_string, position, wrap_at) def text_font(font, size=None): """Set current text font. :param font: PIL.ImageFont.ImageFont for Vispy, Object|String: a font loaded via loadFont(), or a String representing a web safe font (a font that is generally available across all systems) :type font: PIL.ImageFont.ImageFont or Font Object """ p5.renderer.text_font(font, size) def text_align(align_x: str, align_y: Optional[str] = None): """Set the alignment of drawing text :param align_x: "RIGHT", "CENTER" or "LEFT". :param align_y: "TOP", "CENTER" or "BOTTOM". """ p5.renderer.style.text_align_x = align_x if align_y: p5.renderer.style.text_align_y = align_y def METHOD_NAME(leading): """Sets the spacing between lines of text in units of pixels :param leading: the size in pixels for spacing between lines :type align_x: int """ p5.renderer.style.METHOD_NAME = leading def text_size(size: int): """Sets the current font size :param leading: the size of the letters in units of pixels """ # reload the font with new size p5.renderer.text_size(size) def text_width(text: str) -> int: """Calculates and returns the width of any character or text string :param text_string: text :returns: width of any character or text string """ return p5.renderer.text_width(text) def text_ascent() -> float: """Returns ascent of the current font at its current size :returns: ascent of the current font at its current size """ return p5.renderer.text_ascent() def text_descent() -> float: """Returns descent of the current font at its current size :returns: descent of the current font at its current size """ return p5.renderer.text_descent() def text_style(s): """ Sets/Gets the style of the text for system fonts to NORMAL, ITALIC, BOLD or BOLDITALIC For non-system fonts (opentype, truetype, etc.) please load styled fonts instead. :param s: Style for the font :type s: NORMAL | ITALIC | BOLD | BOLDITALIC :returns: Current text style :rtype: NORMAL | ITALIC | BOLD | BOLDITALIC """ return p5.renderer.text_style(s) def text_wrap(wrap_style: str): """ Specifies how lines of text are wrapped within a text box. This requires a wrap_at set on the text area, specified in text() as parameter wrap_at. WORD wrap style only breaks lines at spaces. A single string without spaces that exceeds the boundaries of the canvas or text area is not truncated, and will overflow the desired area, disappearing at the canvas edge. CHAR wrap style breaks lines wherever needed to stay within the text box. WORD is the default wrap style, and both styles will still break lines at any line breaks (\n) specified in the original text. :param wrap_style: One of the wrap style mode 'CHAR' or 'WORD' """ p5.renderer.text_wrap(wrap_style)
null
name
#!/usr/bin/env python3 import matplotlib.pyplot as plt import numpy as np import sys from math import * import json import os import csv def data_cpu(name_demo, iteration, ii): cpu = [0]*iteration memory = [0]*iteration i = 0 j = 0 a = name_demo nb = len(name_demo) while i != iteration*ii: b = data["benchmarks"][i]["name"] if b != b[:nb]+"/process_time": i+=1 continue if a[:nb] == b[:nb]: cpu[j] = data["benchmarks"][i]["cpu_time"] memory = data["benchmarks"][i]["memory"] j+=1 i+=1 return cpu, memory def graph(name_demo, iteration, i): cpu, memory = data_cpu(name_demo, iteration, i) moy = 0 h = 0 tmp = 0 if iteration != 1: iteration-=3 while h < iteration: tmp = cpu[h] + tmp h += 1 moy = tmp / iteration return name_demo, moy, memory, np.min(cpu[:iteration]), np.max(cpu[:iteration]) def METHOD_NAME(i): name_data = data["benchmarks"][i]["name"] z = 0 while z in range( len(name_data)): if name_data[z] == '/': break z += 1 name_data = name_data[:z] return name_data def names(): name_datas = [0] * len(data["benchmarks"]) i = 0 j = 0 while i != len(data["benchmarks"]): if METHOD_NAME(i) != name_datas[j-1]: name_datas[j] = METHOD_NAME(i) j+=1 i+=1 name_datas = name_datas[:j] return name_datas, j def iteration(): tmp = [0] i = 0 x = 0 j = 0 while i != len(data["benchmarks"]): tmp = METHOD_NAME(i) if tmp == METHOD_NAME(i+1): j+=1 i+=1 else: break return j+1 def store(cpu, name_demo, memory, min_v, max_v): version = sys.argv[2] csvfile = open('benchmark/'+name_demo+'.csv', 'a+') writer = csv.writer(csvfile) writer.writerow( (version, cpu, memory, min_v, max_v) ) csvfile.close() def nb_data(name_demo): with open('benchmark/'+name_demo+'.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: line_count += 1 return line_count def plot(name_demo): nb = nb_data(name_demo) v = [0]*nb value = [0]*nb value2 = [0]*nb min_v = [0]*nb max_v = [0]*nb fig, ax = plt.subplots(2, sharex='col', sharey='row') with open('benchmark/'+name_demo+'.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count_marks = 0 for row in csv_reader: line_count_marks += 1 BDM_marks = [0] * line_count_marks with open('benchmark/'+name_demo+'.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=',') line_count = 0 for row in csv_reader: v[line_count] = row[0] v[line_count] = v[line_count][:7] value[line_count] = float(row[1])/10**9 value2[line_count] = float(row[2]) min_v[line_count] = float(row[3])/10**9 max_v[line_count] = float(row[4])/10**9 BDM_marks[line_count] = (value[0] / value[line_count]) *1000 line_count += 1 average = np.average(BDM_marks) print("BDM_marks", name_demo, ':', average) xlabels = v ax[0].plot(range(nb), value, 'bo-') ax[0].plot(range(nb), min_v, 'b--') ax[0].plot(range(nb), max_v, 'b--') ax[0].set_ylabel("CPU Time in second") ax[0].set_xlabel("Version") ax[1].set_ylabel("Memory") ax[1].plot(range(nb), value2, 'ro-') ax[0].set_title(name_demo) ax[1].set_xticks(range(nb)) ax[1].set_xticklabels(xlabels, rotation=90) plt.tight_layout() plt.savefig('benchmark/'+name_demo+'.png') return average def main(): i = 0 it = iteration() name_datas, j = names() cpu = [0]*j name_demo = [0]*j memory = [10]*j min_v = [0]*j max_v = [0]*j average = [0] * j while i < j: name_demo[i], cpu[i], memory[i], min_v[i], max_v[i] = graph(name_datas[i], it, j) store(cpu[i], name_demo[i], memory[i], min_v[i], max_v[i]) average[i] = plot(name_demo[i]) i += 1 print("AVERAGE", np.average(average)) return if __name__ == "__main__": file = sys.argv[1] with open(file) as read_file: data = json.load(read_file) read_file.close() # try: main() # except: # print("ERROR")
null
mobile net v2 x1 5
#copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. from __future__ import absolute_import from __future__ import division from __future__ import print_function import paddle from paddle.nn.initializer import KaimingUniform __all__ = [ 'MobileNetV2', 'MobileNetV2_x0_25, ' 'MobileNetV2_x0_5', 'MobileNetV2_x1_0', 'MobileNetV2_x1_5', 'MobileNetV2_x2_0', 'MobileNetV2_scale' ] train_parameters = { "input_size": [3, 224, 224], "input_mean": [0.485, 0.456, 0.406], "input_std": [0.229, 0.224, 0.225], "learning_strategy": { "name": "piecewise_decay", "batch_size": 256, "epochs": [30, 60, 90], "steps": [0.1, 0.01, 0.001, 0.0001] } } class MobileNetV2(): def __init__(self, scale=1.0, change_depth=False): self.params = train_parameters self.scale = scale self.change_depth = change_depth def net(self, input, class_dim=1000): scale = self.scale change_depth = self.change_depth #if change_depth is True, the new depth is 1.4 times as deep as before. bottleneck_params_list = [ (1, 16, 1, 1), (6, 24, 2, 2), (6, 32, 3, 2), (6, 64, 4, 2), (6, 96, 3, 1), (6, 160, 3, 2), (6, 320, 1, 1), ] if change_depth == False else [ (1, 16, 1, 1), (6, 24, 2, 2), (6, 32, 5, 2), (6, 64, 7, 2), (6, 96, 5, 1), (6, 160, 3, 2), (6, 320, 1, 1), ] #conv1 input = self.conv_bn_layer( input, num_filters=int(32 * scale), filter_size=3, stride=2, padding=1, if_act=True, name='conv1_1') # bottleneck sequences i = 1 in_c = int(32 * scale) for layer_setting in bottleneck_params_list: t, c, n, s = layer_setting i += 1 input = self.invresi_blocks( input=input, in_c=in_c, t=t, c=int(c * scale), n=n, s=s, name='conv' + str(i)) in_c = int(c * scale) #last_conv input = self.conv_bn_layer( input=input, num_filters=int(1280 * scale) if scale > 1.0 else 1280, filter_size=1, stride=1, padding=0, if_act=True, name='conv9') input = paddle.nn.functional.adaptive_avg_pool2d(input, 1) output = paddle.static.nn.fc( input, class_dim, weight_attr=paddle.ParamAttr(name='fc10_weights'), bias_attr=paddle.ParamAttr(name='fc10_offset')) return output def conv_bn_layer(self, input, filter_size, num_filters, stride, padding, channels=None, num_groups=1, if_act=True, name=None, use_cudnn=True): conv = paddle.static.nn.conv2d( input=input, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=padding, groups=num_groups, act=None, use_cudnn=use_cudnn, param_attr=paddle.ParamAttr(name=name + '_weights'), bias_attr=False) bn_name = name + '_bn' bn = paddle.static.nn.batch_norm( input=conv, param_attr=paddle.ParamAttr(name=bn_name + "_scale"), bias_attr=paddle.ParamAttr(name=bn_name + "_offset"), moving_mean_name=bn_name + '_mean', moving_variance_name=bn_name + '_variance') if if_act: return paddle.nn.functional.relu6(bn) else: return bn def shortcut(self, input, data_residual): return paddle.add(input, data_residual) def inverted_residual_unit(self, input, num_in_filter, num_filters, ifshortcut, stride, filter_size, padding, expansion_factor, name=None): num_expfilter = int(round(num_in_filter * expansion_factor)) channel_expand = self.conv_bn_layer( input=input, num_filters=num_expfilter, filter_size=1, stride=1, padding=0, num_groups=1, if_act=True, name=name + '_expand') bottleneck_conv = self.conv_bn_layer( input=channel_expand, num_filters=num_expfilter, filter_size=filter_size, stride=stride, padding=padding, num_groups=num_expfilter, if_act=True, name=name + '_dwise', use_cudnn=False) linear_out = self.conv_bn_layer( input=bottleneck_conv, num_filters=num_filters, filter_size=1, stride=1, padding=0, num_groups=1, if_act=False, name=name + '_linear') if ifshortcut: out = self.shortcut(input=input, data_residual=linear_out) return out else: return linear_out def invresi_blocks(self, input, in_c, t, c, n, s, name=None): first_block = self.inverted_residual_unit( input=input, num_in_filter=in_c, num_filters=c, ifshortcut=False, stride=s, filter_size=3, padding=1, expansion_factor=t, name=name + '_1') last_residual_block = first_block last_c = c for i in range(1, n): last_residual_block = self.inverted_residual_unit( input=last_residual_block, num_in_filter=last_c, num_filters=c, ifshortcut=True, stride=1, filter_size=3, padding=1, expansion_factor=t, name=name + '_' + str(i + 1)) return last_residual_block def MobileNetV2_x0_25(): model = MobileNetV2(scale=0.25) return model def MobileNetV2_x0_5(): model = MobileNetV2(scale=0.5) return model def MobileNetV2_x1_0(): model = MobileNetV2(scale=1.0) return model def METHOD_NAME(): model = MobileNetV2(scale=1.5) return model def MobileNetV2_x2_0(): model = MobileNetV2(scale=2.0) return model def MobileNetV2_scale(): model = MobileNetV2(scale=1.2, change_depth=True) return model
null
tear down
""" Name: r.in.ascii test Purpose: Tests r.in.ascii and its flags/options. Author: Sunveer Singh, Google Code-in 2017 Copyright: (C) 2017 by Sunveer Singh and the GRASS Development Team Licence: This program is free software under the GNU General Public License (>=v2). Read the file COPYING that comes with GRASS for details. """ from grass.gunittest.case import TestCase from grass.gunittest.main import test from grass.script.core import read_command INPUT_NOQUOTES = """north: 4299000.00 south: 4247000.00 east: 528000.00 west: 500000.00 rows: 10 cols: 15 null: -9999 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 """ INPUT_TSV = """north: 4299000.00 south: 4247000.00 east: 528000.00 west: 500000.00 rows: 10 cols: 15 null: -9999 1\ 2\ 3\ 4\ 5\ 6\ 7\ 8\ 9\ 10\ 11\ 12\ 13\ 14\ 15 1\ 2\ 3\ 4\ 5\ 6\ 7\ 8\ 9\ 10\ 11\ 12\ 13\ 14\ 15 1\ 2\ 3\ 4\ 5\ 6\ 7\ 8\ 9\ 10\ 11\ 12\ 13\ 14\ 15 1\ 2\ 3\ 4\ 5\ 6\ 7\ 8\ 9\ 10\ 11\ 12\ 13\ 14\ 15 1\ 2\ 3\ 4\ 5\ 6\ 7\ 8\ 9\ 10\ 11\ 12\ 13\ 14\ 15 1\ 2\ 3\ 4\ 5\ 6\ 7\ 8\ 9\ 10\ 11\ 12\ 13\ 14\ 15 1\ 2\ 3\ 4\ 5\ 6\ 7\ 8\ 9\ 10\ 11\ 12\ 13\ 14\ 15 1\ 2\ 3\ 4\ 5\ 6\ 7\ 8\ 9\ 10\ 11\ 12\ 13\ 14\ 15 1\ 2\ 3\ 4\ 5\ 6\ 7\ 8\ 9\ 10\ 11\ 12\ 13\ 14\ 15 1\ 2\ 3\ 4\ 5\ 6\ 7\ 8\ 9\ 10\ 11\ 12\ 13\ 14\ 15 """ INPUT_UNCOMMON = """north: 4299000.00 south: 4247000.00 east: 528000.00 west: 500000.00 rows: 10 cols: 15 null: -9999 1@ 2@ 3@ 4@ 5@ 6@ 7@ 8@ 9@ 10@ 11@ 12@ 13@ 14@ 15 1@ 2@ 3@ 4@ 5@ 6@ 7@ 8@ 9@ 10@ 11@ 12@ 13@ 14@ 15 1@ 2@ 3@ 4@ 5@ 6@ 7@ 8@ 9@ 10@ 11@ 12@ 13@ 14@ 15 1@ 2@ 3@ 4@ 5@ 6@ 7@ 8@ 9@ 10@ 11@ 12@ 13@ 14@ 15 1@ 2@ 3@ 4@ 5@ 6@ 7@ 8@ 9@ 10@ 11@ 12@ 13@ 14@ 15 1@ 2@ 3@ 4@ 5@ 6@ 7@ 8@ 9@ 10@ 11@ 12@ 13@ 14@ 15 1@ 2@ 3@ 4@ 5@ 6@ 7@ 8@ 9@ 10@ 11@ 12@ 13@ 14@ 15 1@ 2@ 3@ 4@ 5@ 6@ 7@ 8@ 9@ 10@ 11@ 12@ 13@ 14@ 15 1@ 2@ 3@ 4@ 5@ 6@ 7@ 8@ 9@ 10@ 11@ 12@ 13@ 14@ 15 1@ 2@ 3@ 4@ 5@ 6@ 7@ 8@ 9@ 10@ 11@ 12@ 13@ 14@ 15 """ class SimpleCsvTestCase(TestCase): ascii_test = "ascii" @classmethod def setUpClass(cls): """Use temporary region settings""" cls.use_temp_region() cls.runModule("g.region", n=4299000.00, s=4247000.00, e=528000.00, w=500000.00) @classmethod def tearDownClass(cls): cls.del_temp_region() def METHOD_NAME(self): """Remove the raster map after each test method""" self.runModule("g.remove", flags="f", type="raster", pattern=self.ascii_test) def test_no_text_delimeter(self): """Test loading no quotes""" self.assertModule( "r.in.ascii", input="-", output=self.ascii_test, type="CELL", stdin_=INPUT_NOQUOTES, ) self.assertRasterMinMax( map=self.ascii_test, refmin=1, refmax=15, msg="ascii_test in degrees must be between 1 and 15", ) def test_text_delimeter(self): """Testing with external file""" self.assertModule( "r.in.ascii", input="data/input_ascii.txt", output=self.ascii_test, type="CELL", ) self.assertRasterMinMax( map=self.ascii_test, refmin=1, refmax=5, msg="ascii_test in degrees must be between 1 and 5", ) def test_tsv(self): """Test loading TSV""" self.assertModule( "r.in.ascii", input="-", output=self.ascii_test, type="CELL", stdin_=INPUT_TSV, ) self.assertRasterMinMax( map=self.ascii_test, refmin=1, refmax=15, msg="ascii_test in degrees must be between 1 and 15", ) def test_uncommon_delims(self): """Test loading with uncommon delimiters""" self.assertModule( "r.in.ascii", input="-", output=self.ascii_test, type="CELL", stdin_=INPUT_UNCOMMON, ) self.assertRasterMinMax( map=self.ascii_test, refmin=1, refmax=15, msg="ascii_test in degrees must be between 1 and 15", ) if __name__ == "__main__": test()
null
setup
import logging import pytest from ocs_ci.ocs import constants, node from ocs_ci.framework.testlib import ManageTest, tier2 log = logging.getLogger(__name__) @tier2 @pytest.mark.polarion_id("OCS-2599") class TestRbdBlockPvc(ManageTest): """ Tests RBD block PVC """ @pytest.fixture(autouse=True) def METHOD_NAME(self, project_factory, pvc_factory, pod_factory): """ Create PVC and pods """ self.pvc_size = 10 self.pvc_obj = pvc_factory( interface=constants.CEPHBLOCKPOOL, size=self.pvc_size, access_mode=constants.ACCESS_MODE_RWX, status=constants.STATUS_BOUND, volume_mode=constants.VOLUME_MODE_BLOCK, size_unit="Mi", ) worker_nodes_list = node.get_worker_nodes() self.pod_objs = [] for node_name in worker_nodes_list: pod_obj = pod_factory( interface=constants.CEPHBLOCKPOOL, pvc=self.pvc_obj, status=constants.STATUS_RUNNING, node_name=node_name, pod_dict_path=constants.CSI_RBD_RAW_BLOCK_POD_YAML, raw_block_pv=True, ) self.pod_objs.append(pod_obj) def test_rbd_block_rwx_pvc(self, pod_factory): """ Test RBD Block volume mode RWX PVC """ # Find initial md5sum value log.info("Find initial md5sum value") for pod_obj in self.pod_objs: # Find initial md5sum pod_obj.md5sum_before_io = pod_obj.exec_sh_cmd_on_pod( command=f"dd iflag=direct if={pod_obj.get_storage_path(storage_type='block')} | md5sum" ) md5sum_values_initial = [pod_obj.md5sum_before_io for pod_obj in self.pod_objs] assert ( len(set(md5sum_values_initial)) == 1 ), "Initial md5sum values from the pods are not same" md5sum_value_initial = md5sum_values_initial[0] # Run IO from each pod and verify md5sum on all pods for io_pod in self.pod_objs: # Run IO from one pod log.info("Run IO from one pod") io_pod.run_io( storage_type="block", size=f"{int(self.pvc_size/2)}M", io_direction="write", runtime=5, end_fsync=1, ) log.info(f"IO started on pod {io_pod.name}") # Wait for IO completion io_pod.get_fio_results() log.info(f"IO completed on pod {io_pod.name}") # Verify md5sum has changed after IO log.info("Verify md5sum has changed after IO. Verify from all pods.") for pod_obj in self.pod_objs: # Find md5sum pod_obj.md5sum_after_io = pod_obj.exec_sh_cmd_on_pod( command=f"dd iflag=direct if={pod_obj.get_storage_path(storage_type='block')} | md5sum" ) assert pod_obj.md5sum_after_io != md5sum_value_initial, ( f"md5sum obtained from the pod {pod_obj.name} has not changed after IO. " f"IO was run from pod {io_pod.name}" ) log.info( f"md5sum obtained from the pod {pod_obj.name} has changed after IO from pod {io_pod.name}" ) # Verify the md5sum value obtained from all the pods are same md5sum_values_final = [pod_obj.md5sum_after_io for pod_obj in self.pod_objs] assert ( len(set(md5sum_values_final)) == 1 ), f"md5sum values from the pods after IO are not same-{md5sum_values_final}" log.info( f"md5sum value obtained from all pods after running IO" f" from {io_pod.name} are same - {md5sum_values_final}" ) md5sum_value_initial = md5sum_values_final[0] # Delete pods log.info("Deleting the pods") for pod_obj in self.pod_objs: pod_obj.delete() pod_obj.ocp.wait_for_delete(resource_name=pod_obj.name) log.info("Deleted all the pods") pod_obj_new = pod_factory( interface=constants.CEPHBLOCKPOOL, pvc=self.pvc_obj, status=constants.STATUS_RUNNING, pod_dict_path=constants.CSI_RBD_RAW_BLOCK_POD_YAML, raw_block_pv=True, ) # Find md5sum value and compare log.info("Find md5sum value from new pod") md5sum_new = pod_obj_new.exec_sh_cmd_on_pod( command=f"dd iflag=direct if={pod_obj_new.get_storage_path(storage_type='block')} | md5sum" ) assert ( md5sum_new == md5sum_value_initial ), f"md5sum mismatch on new pod. Expected {md5sum_value_initial}. Obtained {md5sum_new}" # Run IO from new pod log.info("Run IO from new pod") pod_obj_new.run_io( storage_type="block", size=f"{int(self.pvc_size/2)}M", io_direction="write", runtime=30, end_fsync=1, ) log.info(f"IO started on the new pod {pod_obj_new.name}") # Wait for IO completion pod_obj_new.get_fio_results() log.info(f"IO completed on the new pod {pod_obj_new.name}")
null
summarize contacts by residue
""" Utility functions for analyzing lattice contacts. Used in: `Fraser JS, van den Bedem H, Samelson AJ, Lang PT, Holton JM, Echols N, Alber T. Accessing protein conformational ensembles using room-temperature X-ray crystallography. Proc Natl Acad Sci U S A. 2011 Sep 27;108(39):16247-52. <http://www.ncbi.nlm.nih.gov/pubmed/21918110>`_ """ from __future__ import absolute_import, division, print_function import operator import sys def find_crystal_contacts(xray_structure, pdb_atoms, # atom_with_labels, not atom! selected_atoms=None, distance_cutoff=3.5, ignore_same_asu=True, ignore_waters=True): from scitbx.array_family import flex sites_frac = xray_structure.sites_frac() unit_cell = xray_structure.unit_cell() pair_asu_table = xray_structure.pair_asu_table( distance_cutoff=distance_cutoff) pair_sym_table = pair_asu_table.extract_pair_sym_table() contacts = [] if (selected_atoms is None): selected_atoms = flex.bool(len(pdb_atoms), True) for i_seq,pair_sym_dict in enumerate(pair_sym_table): if (not selected_atoms[i_seq]): continue site_i = sites_frac[i_seq] atom_i = pdb_atoms[i_seq] resname_i = atom_i.resname atmname_i = atom_i.name chainid_i = atom_i.chain_id for j_seq,sym_ops in pair_sym_dict.items(): site_j = sites_frac[j_seq] atom_j = pdb_atoms[j_seq] resname_j = atom_j.resname atmname_j = atom_j.name chainid_j = atom_j.chain_id for sym_op in sym_ops: if sym_op.is_unit_mx(): if ignore_same_asu : continue elif (chainid_i == chainid_j): continue if (resname_j in ["HOH","WAT"] and ignore_waters): continue site_ji = sym_op * site_j distance = unit_cell.distance(site_i, site_ji) contacts.append((i_seq, j_seq, sym_op, distance)) #print resname_i, atmname_i, resname_j, atmname_j, str(sym_op), distance return contacts def find_crystal_contacts_by_residue(xray_structure, pdb_hierarchy, **kwds): contacts_by_residue = {} atoms = list(pdb_hierarchy.atoms_with_labels()) contacts = find_crystal_contacts(xray_structure, atoms, **kwds) for (i_seq, j_seq, sym_op, distance) in contacts : atom_rec = atoms[i_seq].fetch_labels() residue_key = (atom_rec.chain_id, atom_rec.resname, atom_rec.resid(), atom_rec.altloc) if (not residue_key in contacts_by_residue): contacts_by_residue[residue_key] = [] contacts_by_residue[residue_key].append((j_seq, sym_op, distance)) all_residues = [] for chain in pdb_hierarchy.models()[0].chains(): chain_id = chain.id for residue_group in chain.residue_groups(): resid = residue_group.resid() for atom_group in residue_group.atom_groups(): resname = atom_group.resname altloc = atom_group.altloc residue_key = (chain_id, resname, resid, altloc) residue_contacts = contacts_by_residue.get(residue_key, []) all_residues.append((residue_key, residue_contacts)) return all_residues def extract_closest_contacting_residues(residue_contacts, pdb_atoms): reduced_contacts = [] for (residue_key, contacts) in residue_contacts : if (len(contacts) == 0): reduced_contacts.append((residue_key, None, None, None)) else : contacts.sort(key=operator.itemgetter(2)) (j_seq, sym_op, distance) = contacts[0] atom_rec = pdb_atoms[j_seq].fetch_labels() contact_key = (atom_rec.chain_id, atom_rec.resname, atom_rec.resid(), atom_rec.altloc) reduced_contacts.append((residue_key, contact_key, sym_op, distance)) return reduced_contacts def METHOD_NAME(residue_contacts, pdb_hierarchy, out=sys.stdout): from mmtbx.refinement.print_statistics import make_header summary = extract_closest_contacting_residues(residue_contacts, pdb_hierarchy.atoms()) make_header("Crystal contacts by residue", out=out) print(" %-16s %-16s %-16s %-16s" % ("residue", "closest contact", "symop", "distance (A)"), file=out) print("-"*72, file=out) for (residue_key, contact_key, sym_op, distance) in summary : (chain_id, resname, resid, altloc) = residue_key id_str = "%s%5s %3s %s" % (chain_id, resid, resname, altloc) if (contact_key is None): print(" %-16s %-16s %-16s %-4s" % (id_str, "*","*","*"), file=out) else : (chain_id, resname, resid, altloc) = contact_key id_str_2 = "%s%5s %3s %s" % (chain_id, resid, resname, altloc) print(" %-16s %-16s %-16s %-4.2f" % (id_str, id_str_2, sym_op, distance), file=out) def show_contacts(contacts, pdb_atoms): for contact in contacts : (i_seq, j_seq, sym_op, distance) = contact atom_i = pdb_atoms[i_seq] atom_j = pdb_atoms[j_seq] fmt_i = atom_i.id_str()[5:20] fmt_j = atom_j.id_str()[5:20] #fmt_i = "%-2s %4s %3s %4s" % (atom_i.chain_id, atom_i.resid(), # atom_i.resname, atom_i.name) #fmt_j = "%-2s %4s %3s %4s" % (atom_j.chain_id, atom_j.resid(), # atom_j.resname, atom_j.name) print("%s %s %5.2f %s" % (fmt_i,fmt_j,distance,str(sym_op))) def show_contacts_for_pymol(contacts, pdb_atoms, object_name, distance_cutoff=3.5): for contact in contacts : (i_seq, j_seq, sym_op, distance) = contact atom_i = pdb_atoms[i_seq] atom_j = pdb_atoms[j_seq] s1 = "(%s and chain '%s' and resi %d and name %s)" % (object_name, atom_i.chain_id, atom_i.resseq_as_int(), atom_i.name) s2 = "((not %s) and (chain '%s' and resi %d and name %s))" % ( object_name, atom_j.chain_id, atom_j.resseq_as_int(), atom_j.name) print("dist %s, %s within %.1f of %s" % (s1, s2, distance_cutoff+0.1, s1)) def apply_sym_op_to_pdb(pdb_hierarchy, sym_op, unit_cell): #import scitbx.matrix r = sym_op.r() t = sym_op.t() #rt = scitbx.matrix.rt((r.as_double(), t.as_double())) new_hierarchy = pdb_hierarchy.deep_copy() atoms = pdb_hierarchy.atoms() sites_frac = unit_cell.fractionalize(sites_cart=atoms.extract_xyz()) new_sites = sites_frac * r.as_double() + t.as_double() atoms.set_xyz(unit_cell.orthogonalize(sites_frac=new_sites)) return new_hierarchy def apply_biological_unit(pdb_in): atoms = pdb_in.atoms() remark = pdb_in.remark_section() if (remark.size() == 0): raise Sorry("No REMARK records in this PDB file.") return pdb_out if __name__ == "__main__" : pdb_file = sys.argv[1] import iotbx.pdb pdb_in = iotbx.pdb.input(pdb_file) pdb_hierarchy = pdb_in.construct_hierarchy() xrs = pdb_in.xray_structure_simple() residue_contacts = find_crystal_contacts_by_residue(xrs, pdb_hierarchy) METHOD_NAME(residue_contacts, pdb_hierarchy)
null
move to
# -*- coding: utf-8 -*- """ Created on Fri Aug 3 16:53:34 2018 @author: ep558,wmd22 """ from __future__ import division from __future__ import print_function from builtins import str from past.utils import old_div import nplab.instrument.serial_instrument as si class ParkerStepper(si.SerialInstrument): '''Stepper object for controlling timedelay ''' def __init__(self, port=None,max_steps = 12000000,calibration = 7500.0): '''Setup baud rate, return charcter and timeout ''' self.termination_character = '\r' self.port_settings = {'baudrate':9600,'timeout':1} si.SerialInstrument.__init__(self,port=port) self.calibration = calibration self.max_steps = float(max_steps) self.initialise() def initialise(self): '''Set Calibration and make stepper ready to run ''' self.write("SSA1") self.query("CMDDIR1") #Should be a query? self.write("MPI") self.write("A5") self.write("V4") self.write("8ER3200") self.write("8OSB1") self.write("8OSH0") self.write("8OSC0") self.write("8OSD0") self.write("8FSA0") self.query("8OS") #SHOULD BE A QUERY? self.query("8FS") def moveto(self,newlocation,blocking = True): '''Moves to the requested stepper position Args: newlocation(int): The new postion you want the stepper to move to ''' if newlocation>=self.max_steps or newlocation<0: print('Move failed as new postion was out of range') return None self.write("MN") self.write("MPA") self.write("8D"+str(newlocation)) self.write("G") if blocking == True: self.location() def step(self,stepsize,blocking = True): '''Perform a signal step of size x Args: stepsize(int): ''' self.write("MN") self.write("MPI") self.write("8D"+str(stepsize)) self.write("G") if blocking == True: self.location() def loop(self,repeats,start,finish,velocity = 4,acceleration = 5): '''Perform a number of loops using the inbuilt loop function Args: repeats(int): Number of loops start(int) : Start location finish(int): End location velocity(int): Stepper veolocity acceleration(int): Stepper acceleration ''' self.write("A"+str(acceleration)) self.write("V"+str(velocity)) self.write("L"+str(repeats)) self.moveto(start) self.moveto(finish) self.write("N") def location(self): '''Determine the current stepper position in picoseconds and steps Returns: stepper position picoseconds stepper position steps ''' Success = False while Success ==False: try: loc = [old_div(self.int_query("8PR"),(self.calibration)),self.int_query("8PR")] if loc != [old_div(self.int_query("8PR"),(self.calibration)),self.int_query("8PR")]: raise ValueError Success = True except ValueError: Success = False return loc def movepositive(self): '''Move continuesly positive until a stop command is recieved ''' self.write("MC") self.write("H+") self.write("G") def movenegative(self): '''Move continuesly negative until a stop command is recieved ''' self.write("MC") self.write("H-") self.write("G") def stop(self): '''Force the stepper to stop in its current position ''' self.write("S") def home(self, velocity = -3): '''Move the stepper to its home position Args: velocity(int): Stepper velocity Notes: The correct sign (+/-) for the velocity for home movement must be given otherwise the stepper will go to the wrong end of the stage''' self.write("GH"+str(velocity)) def zero(self): '''Set the current stepper position as zero ''' self.write("PZ") def get_qt_ui(self): if not hasattr(self,'ui'): self.ui = Stepper_Ui(self) return self.ui # 'New code starts here from nplab.utils.gui import QtCore, QtGui, QtWidgets, uic from nplab.ui.ui_tools import UiTools import os class Stepper_Ui(QtWidgets.QWidget, UiTools): def __init__(self,stepper): super(Stepper_Ui, self).__init__() # assert(stepper==Stepper) # checking if the object is a stepper self.stepper = stepper ui_file = os.path.join(os.path.dirname(__file__),'stepper_GUI.ui') # GUI path . e.g. look into location of the current file and search for the given name uic.loadUi(ui_file, self) #loading the ui file self.move_spinBox.setMaximum(int(self.stepper.max_steps)) self.move_percent_doubleSpinBox.setMaximum(100.0) self.current_button.clicked.connect(self.update_positions) self.setpercent_pushButton.clicked.connect(self.move_to_percent) self.setsteps_pushButton.clicked.connect(self.METHOD_NAME) self.update_positions() def update_positions(self): current_pos = float(self.stepper.location()[1]) self.current_number.setText(str(current_pos)) self.current_percent.setText(str(old_div(100.0*current_pos,self.stepper.max_steps))[:4]+'%') def move_to_percent(self): percent=self.move_percent_doubleSpinBox.value() steps=int(old_div((percent*self.stepper.max_steps),100)) self.stepper.moveto(steps,blocking = False) def METHOD_NAME(self): steps= self.move_spinBox.value() self.stepper.moveto(steps) # self.current_number=self.location() # self.set_number=self.moveto(finish) # set_button
null
lower thresholds
# Author: Niels Nuyttens <[email protected]> # # License: Apache Software License 2.0 from __future__ import annotations import sys import typing from collections import namedtuple from enum import Enum from typing import Dict, List, Optional, Union # noqa: TYP001 if typing.TYPE_CHECKING: from typing_extensions import Protocol else: Protocol = object if sys.version_info >= (3, 10): from typing import ParamSpec, TypeGuard # noqa: F401 else: from typing_extensions import ParamSpec, TypeGuard # noqa: F401 if sys.version_info >= (3, 11): from typing import Self else: from typing_extensions import Self import pandas as pd from nannyml.exceptions import InvalidArgumentsException from nannyml.plots import Figure Key = namedtuple('Key', 'properties display_names') class Result(Protocol): """The data that was calculated or estimated.""" data: pd.DataFrame @property def empty(self) -> bool: ... @property def chunk_keys(self) -> pd.Series: ... @property def chunk_start_dates(self) -> pd.Series: ... @property def chunk_end_dates(self) -> pd.Series: ... @property def chunk_start_indices(self) -> pd.Series: ... @property def chunk_end_indices(self) -> pd.Series: ... @property def chunk_indices(self) -> pd.Series: ... @property def chunk_periods(self) -> pd.Series: ... def keys(self) -> List[Key]: ... def values(self, key: Key) -> Optional[pd.Series]: ... def alerts(self, key: Key) -> Optional[pd.Series]: ... def upper_thresholds(self, key: Key) -> Optional[pd.Series]: ... def METHOD_NAME(self, key: Key) -> Optional[pd.Series]: ... def upper_confidence_bounds(self, key: Key) -> Optional[pd.Series]: ... def lower_confidence_bounds(self, key: Key) -> Optional[pd.Series]: ... def sampling_error(self, key: Key) -> Optional[pd.Series]: ... def filter(self, period: str = 'all', metrics: Optional[Union[str, List[str]]] = None, *args, **kwargs) -> Result: ... def to_df(self, multilevel: bool = True) -> pd.DataFrame: ... def plot(self, *args, **kwargs) -> Figure: ... class Metric(Protocol): """Represents any kind of metric (or method) that can be calculated or estimated.""" @property def display_name(self) -> str: ... @property def column_name(self) -> str: ... class Calculator(Protocol): """Calculator base class.""" def fit(self, reference_data: pd.DataFrame, *args, **kwargs) -> Self: """Fits the calculator on reference data.""" def calculate(self, data: pd.DataFrame, *args, **kwargs) -> Result: """Perform a calculation based on analysis data.""" class Estimator(Protocol): """Estimator base class.""" def fit(self, reference_data: pd.DataFrame, *args, **kwargs) -> Self: """Fits the estimator on reference data.""" def estimate(self, data: pd.DataFrame, *args, **kwargs) -> Result: """Perform an estimation based on analysis data.""" ModelOutputsType = Union[str, Dict[str, str]] def model_output_column_names(model_outputs: ModelOutputsType) -> List[str]: """Get model output column nanmes from inputs.""" if model_outputs is None: return [] if isinstance(model_outputs, str): return [model_outputs] elif isinstance(model_outputs, Dict): return [column_name for label, column_name in model_outputs.items()] else: raise InvalidArgumentsException( f"received object of type {type(model_outputs)}. ModelOutputsType should be " f"either a 'str' or a 'Dict[str, str]'" ) def class_labels(model_outputs: ModelOutputsType) -> List[str]: if isinstance(model_outputs, Dict): return sorted(list(model_outputs.keys())) else: raise InvalidArgumentsException( f"received object of type {type(model_outputs)}. Multiclass ModelOutputsType should be a 'Dict[str, str]'" ) class ProblemType(str, Enum): """Use cases NannyML supports.""" CLASSIFICATION_BINARY = 'classification_binary' CLASSIFICATION_MULTICLASS = 'classification_multiclass' REGRESSION = 'regression' @staticmethod def parse(problem_type: str): if problem_type in 'classification_binary': return ProblemType.CLASSIFICATION_BINARY elif problem_type in 'classification_multiclass': return ProblemType.CLASSIFICATION_MULTICLASS elif problem_type in 'regression': return ProblemType.REGRESSION else: raise InvalidArgumentsException( f"unknown value for problem_type '{problem_type}'. Value should be one of " f"{[pt.value for pt in ProblemType]}" )
null
get dev ops configuration
# coding=utf-8 # *** WARNING: this file was generated by pulumi. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs __all__ = [ 'GetDevOpsConfigurationResult', 'AwaitableGetDevOpsConfigurationResult', 'get_dev_ops_configuration', 'get_dev_ops_configuration_output', ] @pulumi.output_type class GetDevOpsConfigurationResult: """ DevOps Configuration resource. """ def __init__(__self__, id=None, name=None, properties=None, system_data=None, type=None): if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if properties and not isinstance(properties, dict): raise TypeError("Expected argument 'properties' to be a dict") pulumi.set(__self__, "properties", properties) if system_data and not isinstance(system_data, dict): raise TypeError("Expected argument 'system_data' to be a dict") pulumi.set(__self__, "system_data", system_data) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter def id(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ The name of the resource """ return pulumi.get(self, "name") @property @pulumi.getter def properties(self) -> 'outputs.DevOpsConfigurationPropertiesResponse': """ DevOps Configuration properties. """ return pulumi.get(self, "properties") @property @pulumi.getter(name="systemData") def system_data(self) -> 'outputs.SystemDataResponse': """ Metadata pertaining to creation and last modification of the resource. """ return pulumi.get(self, "system_data") @property @pulumi.getter def type(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type") class AwaitableGetDevOpsConfigurationResult(GetDevOpsConfigurationResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetDevOpsConfigurationResult( id=self.id, name=self.name, properties=self.properties, system_data=self.system_data, type=self.type) def METHOD_NAME(resource_group_name: Optional[str] = None, security_connector_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDevOpsConfigurationResult: """ DevOps Configuration resource. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str security_connector_name: The security connector name. """ __args__ = dict() __args__['resourceGroupName'] = resource_group_name __args__['securityConnectorName'] = security_connector_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:security/v20230901preview:getDevOpsConfiguration', __args__, opts=opts, typ=GetDevOpsConfigurationResult).value return AwaitableGetDevOpsConfigurationResult( id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), properties=pulumi.get(__ret__, 'properties'), system_data=pulumi.get(__ret__, 'system_data'), type=pulumi.get(__ret__, 'type')) @_utilities.lift_output_func(METHOD_NAME) def get_dev_ops_configuration_output(resource_group_name: Optional[pulumi.Input[str]] = None, security_connector_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDevOpsConfigurationResult]: """ DevOps Configuration resource. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str security_connector_name: The security connector name. """ ...
null
get chunks
import sys from collections.abc import Callable, Generator, Iterable, Mapping, MutableMapping, MutableSequence from multiprocessing.connection import Connection from multiprocessing.context import BaseContext, Process from multiprocessing.queues import Queue, SimpleQueue from threading import Lock, Semaphore, Thread from types import TracebackType from typing import Any, Generic, TypeVar from weakref import ref from ._base import BrokenExecutor, Executor, Future _T = TypeVar("_T") _threads_wakeups: MutableMapping[Any, Any] _global_shutdown: bool class _ThreadWakeup: _closed: bool _reader: Connection _writer: Connection def close(self) -> None: ... def wakeup(self) -> None: ... def clear(self) -> None: ... def _python_exit() -> None: ... EXTRA_QUEUED_CALLS: int _MAX_WINDOWS_WORKERS: int class _RemoteTraceback(Exception): tb: str def __init__(self, tb: TracebackType) -> None: ... class _ExceptionWithTraceback: exc: BaseException tb: TracebackType def __init__(self, exc: BaseException, tb: TracebackType) -> None: ... def __reduce__(self) -> str | tuple[Any, ...]: ... def _rebuild_exc(exc: Exception, tb: str) -> Exception: ... class _WorkItem(Generic[_T]): future: Future[_T] fn: Callable[..., _T] args: Iterable[Any] kwargs: Mapping[str, Any] def __init__(self, future: Future[_T], fn: Callable[..., _T], args: Iterable[Any], kwargs: Mapping[str, Any]) -> None: ... class _ResultItem: work_id: int exception: Exception result: Any if sys.version_info >= (3, 11): exit_pid: int | None def __init__( self, work_id: int, exception: Exception | None = None, result: Any | None = None, exit_pid: int | None = None ) -> None: ... else: def __init__(self, work_id: int, exception: Exception | None = None, result: Any | None = None) -> None: ... class _CallItem: work_id: int fn: Callable[..., Any] args: Iterable[Any] kwargs: Mapping[str, Any] def __init__(self, work_id: int, fn: Callable[..., Any], args: Iterable[Any], kwargs: Mapping[str, Any]) -> None: ... class _SafeQueue(Queue[Future[Any]]): pending_work_items: dict[int, _WorkItem[Any]] shutdown_lock: Lock thread_wakeup: _ThreadWakeup if sys.version_info >= (3, 9): def __init__( self, max_size: int | None = 0, *, ctx: BaseContext, pending_work_items: dict[int, _WorkItem[Any]], shutdown_lock: Lock, thread_wakeup: _ThreadWakeup, ) -> None: ... else: def __init__( self, max_size: int | None = 0, *, ctx: BaseContext, pending_work_items: dict[int, _WorkItem[Any]] ) -> None: ... def _on_queue_feeder_error(self, e: Exception, obj: _CallItem) -> None: ... def METHOD_NAME(*iterables: Any, chunksize: int) -> Generator[tuple[Any, ...], None, None]: ... def _process_chunk(fn: Callable[..., _T], chunk: Iterable[tuple[Any, ...]]) -> list[_T]: ... if sys.version_info >= (3, 11): def _sendback_result( result_queue: SimpleQueue[_WorkItem[Any]], work_id: int, result: Any | None = None, exception: Exception | None = None, exit_pid: int | None = None, ) -> None: ... else: def _sendback_result( result_queue: SimpleQueue[_WorkItem[Any]], work_id: int, result: Any | None = None, exception: Exception | None = None ) -> None: ... if sys.version_info >= (3, 11): def _process_worker( call_queue: Queue[_CallItem], result_queue: SimpleQueue[_ResultItem], initializer: Callable[..., object] | None, initargs: tuple[Any, ...], max_tasks: int | None = None, ) -> None: ... else: def _process_worker( call_queue: Queue[_CallItem], result_queue: SimpleQueue[_ResultItem], initializer: Callable[..., object] | None, initargs: tuple[Any, ...], ) -> None: ... if sys.version_info >= (3, 9): class _ExecutorManagerThread(Thread): thread_wakeup: _ThreadWakeup shutdown_lock: Lock executor_reference: ref[Any] processes: MutableMapping[int, Process] call_queue: Queue[_CallItem] result_queue: SimpleQueue[_ResultItem] work_ids_queue: Queue[int] pending_work_items: dict[int, _WorkItem[Any]] def __init__(self, executor: ProcessPoolExecutor) -> None: ... def run(self) -> None: ... def add_call_item_to_queue(self) -> None: ... def wait_result_broken_or_wakeup(self) -> tuple[Any, bool, str]: ... def process_result_item(self, result_item: int | _ResultItem) -> None: ... def is_shutting_down(self) -> bool: ... def terminate_broken(self, cause: str) -> None: ... def flag_executor_shutting_down(self) -> None: ... def shutdown_workers(self) -> None: ... def join_executor_internals(self) -> None: ... def get_n_children_alive(self) -> int: ... _system_limits_checked: bool _system_limited: bool | None def _check_system_limits() -> None: ... def _chain_from_iterable_of_lists(iterable: Iterable[MutableSequence[Any]]) -> Any: ... class BrokenProcessPool(BrokenExecutor): ... class ProcessPoolExecutor(Executor): _mp_context: BaseContext | None _initializer: Callable[..., None] | None _initargs: tuple[Any, ...] _executor_manager_thread: _ThreadWakeup _processes: MutableMapping[int, Process] _shutdown_thread: bool _shutdown_lock: Lock _idle_worker_semaphore: Semaphore _broken: bool _queue_count: int _pending_work_items: dict[int, _WorkItem[Any]] _cancel_pending_futures: bool _executor_manager_thread_wakeup: _ThreadWakeup _result_queue: SimpleQueue[Any] _work_ids: Queue[Any] if sys.version_info >= (3, 11): def __init__( self, max_workers: int | None = None, mp_context: BaseContext | None = None, initializer: Callable[..., object] | None = None, initargs: tuple[Any, ...] = (), *, max_tasks_per_child: int | None = None, ) -> None: ... else: def __init__( self, max_workers: int | None = None, mp_context: BaseContext | None = None, initializer: Callable[..., object] | None = None, initargs: tuple[Any, ...] = (), ) -> None: ... if sys.version_info >= (3, 9): def _start_executor_manager_thread(self) -> None: ... def _adjust_process_count(self) -> None: ...
null
survival function
# -*- coding: utf-8 -*- import autograd.numpy as np import warnings from autograd.numpy import abs, log from scipy.special import gammaincinv from autograd_gamma import gammaincc, gammainc, gammaln, gammainccln, gammaincln from lifelines.fitters import ParametricRegressionFitter from lifelines.utils import CensoringType from lifelines.utils.safe_exp import safe_exp from lifelines import utils from lifelines import GeneralizedGammaFitter class GeneralizedGammaRegressionFitter(ParametricRegressionFitter): r""" This class implements a Generalized Gamma model for regression data. The model has parameterized form: The survival function is: .. math:: S(t; x)=\left\{ \begin{array}{} 1-\Gamma_{RL}\left( \frac{1}{{{\lambda }^{2}}};\frac{{e}^{\lambda \left( \frac{\log(t)-\mu }{\sigma} \right)}}{\lambda ^{2}} \right) \textit{ if } \lambda> 0 \\ \Gamma_{RL}\left( \frac{1}{{{\lambda }^{2}}};\frac{{e}^{\lambda \left( \frac{\log(t)-\mu }{\sigma} \right)}}{\lambda ^{2}} \right) \textit{ if } \lambda \le 0 \\ \end{array} \right.\,\! where :math:`\Gamma_{RL}` is the regularized lower incomplete Gamma function, and :math:`\sigma = \sigma(x) = \exp(\alpha x^T), \lambda = \lambda(x) = \beta x^T, \mu = \mu(x) = \gamma x^T`. This model has the Exponential, Weibull, Gamma and Log-Normal as sub-models, and thus can be used as a way to test which model to use: 1. When :math:`\lambda = 1` and :math:`\sigma = 1`, then the data is Exponential. 2. When :math:`\lambda = 1` then the data is Weibull. 3. When :math:`\sigma = \lambda` then the data is Gamma. 4. When :math:`\lambda = 0` then the data is Log-Normal. 5. When :math:`\lambda = -1` then the data is Inverse-Weibull. 6. When :math:`-\sigma = \lambda` then the data is Inverse-Gamma. After calling the ``.fit`` method, you have access to properties like: ``cumulative_hazard_``, ``survival_function_``, A summary of the fit is available with the method ``print_summary()``. Important ------------- The parameterization implemented has :math:`\log\sigma`, thus there is a `ln_sigma_` in the output. Exponentiate this parameter to recover :math:`\sigma`. Important ------------- This model is experimental. It's API may change in the future. Also, it's convergence is not very stable. Parameters ----------- alpha: float, optional (default=0.05) the level in the confidence intervals. penalizer: float or array, optional (default=0.0) the penalizer coefficient to the size of the coefficients. See `l1_ratio`. Must be equal to or greater than 0. Alternatively, penalizer is an array equal in size to the number of parameters, with penalty coefficients for specific variables. For example, `penalizer=0.01 * np.ones(p)` is the same as `penalizer=0.01` Examples -------- .. code:: python from lifelines import GeneralizedGammaFitter from lifelines.datasets import load_waltons waltons = load_waltons() ggf = GeneralizedGammaFitter() ggf.fit(waltons['T'], waltons['E']) ggf.plot() ggf.summary Attributes ---------- cumulative_hazard_ : DataFrame The estimated cumulative hazard (with custom timeline if provided) hazard_ : DataFrame The estimated hazard (with custom timeline if provided) survival_function_ : DataFrame The estimated survival function (with custom timeline if provided) cumulative_density_ : DataFrame The estimated cumulative density function (with custom timeline if provided) density_: DataFrame The estimated density function (PDF) (with custom timeline if provided) variance_matrix_ : DataFrame The variance matrix of the coefficients median_: float The median time to event lambda_: float The fitted parameter in the model rho_: float The fitted parameter in the model alpha_: float The fitted parameter in the model durations: array The durations provided event_observed: array The event_observed variable provided timeline: array The time line to use for plotting and indexing entry: array or None The entry array provided, or None """ _fitted_parameter_names = ["sigma_", "mu_", "lambda_"] def _create_initial_point(self, Ts, E, entries, weights, Xs): # detect constant columns constant_col = (Xs.var(0) < 1e-8).idxmax() uni_model = GeneralizedGammaFitter() with warnings.catch_warnings(): warnings.simplefilter("ignore") if utils.CensoringType.is_right_censoring(self): uni_model.fit_right_censoring(Ts[0], event_observed=E, entry=entries, weights=weights) elif utils.CensoringType.is_interval_censoring(self): uni_model.fit_interval_censoring(Ts[0], Ts[1], entry=entries, weights=weights) elif utils.CensoringType.is_left_censoring(self): uni_model.fit_left_censoring(Ts[1], event_observed=E, entry=entries, weights=weights) # we may use these later in log_likelihood_test() self._ll_null_ = uni_model.log_likelihood_ assert self._ll_null_dof == 3 default_point = super(GeneralizedGammaRegressionFitter, self)._create_initial_point(Ts, E, entries, weights, Xs) nested_point = {} nested_point["mu_"] = np.array([0.0] * (len(Xs["mu_"].columns))) if constant_col in Xs["mu_"].columns: nested_point["mu_"][Xs["mu_"].columns.index(constant_col)] = uni_model.mu_ nested_point["sigma_"] = np.array([0.0] * (len(Xs["sigma_"].columns))) if constant_col in Xs["sigma_"].columns: nested_point["sigma_"][Xs["sigma_"].columns.index(constant_col)] = uni_model.ln_sigma_ # this needs to be non-zero because we divide by it nested_point["lambda_"] = np.array([0.01] * (len(Xs["lambda_"].columns))) if constant_col in Xs["lambda_"].columns: nested_point["lambda_"][Xs["lambda_"].columns.index(constant_col)] = uni_model.lambda_ return [nested_point, default_point] def METHOD_NAME(self, params, T, Xs): lambda_ = Xs["lambda_"] @ params["lambda_"] sigma_ = safe_exp(Xs["sigma_"] @ params["sigma_"]) mu_ = Xs["mu_"] @ params["mu_"] Z = (log(T) - mu_) / sigma_ ilambda_2 = 1 / lambda_ ** 2 exp_term = np.clip(safe_exp(lambda_ * Z) * ilambda_2, 1e-300, 1e25) return np.where(lambda_ > 0, gammaincc(ilambda_2, exp_term), gammainc(ilambda_2, exp_term)) def _cumulative_hazard(self, params, T, Xs): lambda_ = Xs["lambda_"] @ params["lambda_"] sigma_ = safe_exp(Xs["sigma_"] @ params["sigma_"]) mu_ = Xs["mu_"] @ params["mu_"] ilambda_2 = 1 / lambda_ ** 2 Z = (log(T) - mu_) / np.clip(sigma_, 0, 1e20) exp_term = np.clip(safe_exp(lambda_ * Z) * ilambda_2, 1e-300, 1e25) return -np.where(lambda_ > 0, gammainccln(ilambda_2, exp_term), gammaincln(ilambda_2, exp_term)) def _log_hazard(self, params, T, Xs): lambda_ = Xs["lambda_"] @ params["lambda_"] ln_sigma_ = Xs["sigma_"] @ params["sigma_"] mu_ = Xs["mu_"] @ params["mu_"] ilambda_2 = 1 / lambda_ ** 2 Z = (log(T) - mu_) / np.clip(safe_exp(ln_sigma_), 0, 1e20) exp_term = np.clip(safe_exp(lambda_ * Z) * ilambda_2, 1e-300, 1e25) return ( log(np.abs(lambda_)) - log(T) - ln_sigma_ - gammaln(ilambda_2) + (lambda_ * Z - 2 * log(np.abs(lambda_))) * ilambda_2 - exp_term - np.where(lambda_ > 0, gammainccln(ilambda_2, exp_term), gammaincln(ilambda_2, exp_term)) )
null
find quantized model file
import inspect import re from pathlib import Path import accelerate import torch import transformers from transformers import AutoConfig, AutoModelForCausalLM import modules.shared as shared from modules.logging_colors import logger from gptq_for_llama import llama_inference_offload from gptq_for_llama.modelutils import find_layers from gptq_for_llama.quant import make_quant # This function is a replacement for the load_quant function in the # GPTQ-for_LLaMa repository. It supports more models and branches. def _load_quant(model, checkpoint, wbits, groupsize=-1, faster_kernel=False, exclude_layers=None, kernel_switch_threshold=128, eval=True): exclude_layers = exclude_layers or ['lm_head'] def noop(*args, **kwargs): pass config = AutoConfig.from_pretrained(model, trust_remote_code=shared.args.trust_remote_code) torch.nn.init.kaiming_uniform_ = noop torch.nn.init.uniform_ = noop torch.nn.init.normal_ = noop torch.set_default_dtype(torch.half) transformers.modeling_utils._init_weights = False torch.set_default_dtype(torch.half) model = AutoModelForCausalLM.from_config(config, trust_remote_code=shared.args.trust_remote_code) torch.set_default_dtype(torch.float) if eval: model = model.eval() layers = find_layers(model) for name in exclude_layers: if name in layers: del layers[name] gptq_args = inspect.getfullargspec(make_quant).args make_quant_kwargs = { 'module': model, 'names': layers, 'bits': wbits, } if 'groupsize' in gptq_args: make_quant_kwargs['groupsize'] = groupsize if 'faster' in gptq_args: make_quant_kwargs['faster'] = faster_kernel if 'kernel_switch_threshold' in gptq_args: make_quant_kwargs['kernel_switch_threshold'] = kernel_switch_threshold make_quant(**make_quant_kwargs) del layers if checkpoint.endswith('.safetensors'): from safetensors.torch import load_file as safe_load model.load_state_dict(safe_load(checkpoint), strict=False) else: model.load_state_dict(torch.load(checkpoint), strict=False) model.seqlen = 2048 return model # Used to locate the .pt/.safetensors quantized file def METHOD_NAME(model_name): if shared.args.checkpoint: return Path(shared.args.checkpoint) path_to_model = Path(f'{shared.args.model_dir}/{model_name}') pt_path = None priority_name_list = [ Path(f'{shared.args.model_dir}/{model_name}{hyphen}{shared.args.wbits}bit{group}{ext}') for group in ([f'-{shared.args.groupsize}g', ''] if shared.args.groupsize > 0 else ['']) for ext in ['.safetensors', '.pt'] for hyphen in ['-', f'/{model_name}-', '/'] ] for path in priority_name_list: if path.exists(): pt_path = path break # If the model hasn't been found with a well-behaved name, pick the last .pt # or the last .safetensors found in its folder as a last resort if not pt_path: for ext in ['.pt', '.safetensors']: found = list(path_to_model.glob(f"*{ext}")) if len(found) > 0: if len(found) > 1: logger.warning(f'More than one {ext} model has been found. The last one will be selected. It could be wrong.') pt_path = found[-1] break return pt_path # The function that loads the model in modules/models.py def load_quantized(model_name): if shared.args.model_type is None: logger.error("The model could not be loaded because its type could not be inferred from its name.") logger.error("Please specify the type manually using the --model_type argument.") return None # Select the appropriate load_quant function model_type = shared.args.model_type.lower() if shared.args.pre_layer and model_type == 'llama': load_quant = llama_inference_offload.load_quant elif model_type in ('llama', 'opt', 'gptj'): if shared.args.pre_layer: logger.warning("Ignoring --pre_layer because it only works for llama model type.") load_quant = _load_quant else: logger.error("Unknown pre-quantized model type specified. Only 'llama', 'opt' and 'gptj' are supported") exit() # Find the quantized model weights file (.pt/.safetensors) path_to_model = Path(f'{shared.args.model_dir}/{model_name}') pt_path = METHOD_NAME(model_name) if not pt_path: logger.error("Could not find the quantized model in .pt or .safetensors format, exiting...") exit() else: logger.info(f"Found the following quantized model: {pt_path}") # qwopqwop200's offload if model_type == 'llama' and shared.args.pre_layer: if len(shared.args.pre_layer) == 1: pre_layer = shared.args.pre_layer[0] else: pre_layer = shared.args.pre_layer model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.groupsize, pre_layer) else: threshold = False if model_type == 'gptj' else 128 model = load_quant(str(path_to_model), str(pt_path), shared.args.wbits, shared.args.groupsize, kernel_switch_threshold=threshold) # accelerate offload (doesn't work properly) if shared.args.gpu_memory or torch.cuda.device_count() > 1: if shared.args.gpu_memory: memory_map = list(map(lambda x: x.strip(), shared.args.gpu_memory)) max_cpu_memory = shared.args.cpu_memory.strip() if shared.args.cpu_memory is not None else '99GiB' max_memory = {} for i in range(len(memory_map)): max_memory[i] = f'{memory_map[i]}GiB' if not re.match('.*ib$', memory_map[i].lower()) else memory_map[i] max_memory['cpu'] = f'{max_cpu_memory}GiB' if not re.match('.*ib$', max_cpu_memory.lower()) else max_cpu_memory else: max_memory = accelerate.utils.get_balanced_memory(model) device_map = accelerate.infer_auto_device_map(model, max_memory=max_memory, no_split_module_classes=["LlamaDecoderLayer"]) logger.info("Using the following device map for the quantized model:", device_map) # https://huggingface.co/docs/accelerate/package_reference/big_modeling#accelerate.dispatch_model model = accelerate.dispatch_model(model, device_map=device_map, offload_buffers=True) # No offload elif not shared.args.cpu: model = model.to(torch.device('cuda:0')) return model
null
list share subscription synchronizations
# coding=utf-8 # *** WARNING: this file was generated by pulumi. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs __all__ = [ 'ListShareSubscriptionSynchronizationsResult', 'AwaitableListShareSubscriptionSynchronizationsResult', 'list_share_subscription_synchronizations', 'list_share_subscription_synchronizations_output', ] @pulumi.output_type class ListShareSubscriptionSynchronizationsResult: """ A consumer side list of share subscription synchronizations """ def __init__(__self__, next_link=None, value=None): if next_link and not isinstance(next_link, str): raise TypeError("Expected argument 'next_link' to be a str") pulumi.set(__self__, "next_link", next_link) if value and not isinstance(value, list): raise TypeError("Expected argument 'value' to be a list") pulumi.set(__self__, "value", value) @property @pulumi.getter(name="nextLink") def next_link(self) -> Optional[str]: """ The Url of next result page. """ return pulumi.get(self, "next_link") @property @pulumi.getter def value(self) -> Sequence['outputs.ShareSubscriptionSynchronizationResponse']: """ Collection of items of type DataTransferObjects. """ return pulumi.get(self, "value") class AwaitableListShareSubscriptionSynchronizationsResult(ListShareSubscriptionSynchronizationsResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return ListShareSubscriptionSynchronizationsResult( next_link=self.next_link, value=self.value) def METHOD_NAME(account_name: Optional[str] = None, filter: Optional[str] = None, orderby: Optional[str] = None, resource_group_name: Optional[str] = None, share_subscription_name: Optional[str] = None, skip_token: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListShareSubscriptionSynchronizationsResult: """ List synchronizations of a share subscription Azure REST API version: 2021-08-01. :param str account_name: The name of the share account. :param str filter: Filters the results using OData syntax. :param str orderby: Sorts the results using OData syntax. :param str resource_group_name: The resource group name. :param str share_subscription_name: The name of the share subscription. :param str skip_token: Continuation token """ __args__ = dict() __args__['accountName'] = account_name __args__['filter'] = filter __args__['orderby'] = orderby __args__['resourceGroupName'] = resource_group_name __args__['shareSubscriptionName'] = share_subscription_name __args__['skipToken'] = skip_token opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:datashare:listShareSubscriptionSynchronizations', __args__, opts=opts, typ=ListShareSubscriptionSynchronizationsResult).value return AwaitableListShareSubscriptionSynchronizationsResult( next_link=pulumi.get(__ret__, 'next_link'), value=pulumi.get(__ret__, 'value')) @_utilities.lift_output_func(METHOD_NAME) def list_share_subscription_synchronizations_output(account_name: Optional[pulumi.Input[str]] = None, filter: Optional[pulumi.Input[Optional[str]]] = None, orderby: Optional[pulumi.Input[Optional[str]]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, share_subscription_name: Optional[pulumi.Input[str]] = None, skip_token: Optional[pulumi.Input[Optional[str]]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListShareSubscriptionSynchronizationsResult]: """ List synchronizations of a share subscription Azure REST API version: 2021-08-01. :param str account_name: The name of the share account. :param str filter: Filters the results using OData syntax. :param str orderby: Sorts the results using OData syntax. :param str resource_group_name: The resource group name. :param str share_subscription_name: The name of the share subscription. :param str skip_token: Continuation token """ ...
null
operator getoutput custom type fields container
#------------------------------------------------------------------------------- # Operator #------------------------------------------------------------------------------- class OperatorAbstractAPI: @staticmethod def init_operator_environment(object): pass @staticmethod def finish_operator_environment(object): pass @staticmethod def operator_new(operatorName): raise NotImplementedError @staticmethod def operator_get_specification_if_any(operatorName): raise NotImplementedError @staticmethod def operator_delete(op): raise NotImplementedError @staticmethod def operator_record_instance(op, transfer_ownership): raise NotImplementedError @staticmethod def operator_record_with_new_name(existing_identifier, new_identifier, core): raise NotImplementedError @staticmethod def operator_set_config(op, config): raise NotImplementedError @staticmethod def operator_get_config(op): raise NotImplementedError @staticmethod def operator_by_id(id): raise NotImplementedError @staticmethod def get_operator_id(op): raise NotImplementedError @staticmethod def dpf_operator_by_name(operatorName): raise NotImplementedError @staticmethod def dpf_operator_delete(op): raise NotImplementedError @staticmethod def operator_connect_int(op, iPin, value): raise NotImplementedError @staticmethod def operator_connect_bool(op, iPin, value): raise NotImplementedError @staticmethod def operator_connect_double(op, iPin, value): raise NotImplementedError @staticmethod def operator_connect_string(op, iPin, value): raise NotImplementedError @staticmethod def operator_connect_scoping(op, iPin, scoping): raise NotImplementedError @staticmethod def operator_connect_data_sources(op, iPin, dataSources): raise NotImplementedError @staticmethod def operator_connect_field(op, iPin, value): raise NotImplementedError @staticmethod def operator_connect_collection(op, iPin, value): raise NotImplementedError @staticmethod def operator_connect_meshed_region(op, iPin, dataSources): raise NotImplementedError @staticmethod def operator_connect_vector_int(op, iPin, ptrValue, size): raise NotImplementedError @staticmethod def operator_connect_vector_double(op, iPin, ptrValue, size): raise NotImplementedError @staticmethod def operator_connect_collection_as_vector(op, iPin, collection): raise NotImplementedError @staticmethod def operator_connect_operator_output(op, iPin, value, outputIndex): raise NotImplementedError @staticmethod def operator_connect_streams(op, iPin, streams): raise NotImplementedError @staticmethod def operator_connect_property_field(op, iPin, streams): raise NotImplementedError @staticmethod def operator_connect_string_field(op, iPin, value): raise NotImplementedError @staticmethod def operator_connect_custom_type_field(op, iPin, value): raise NotImplementedError @staticmethod def operator_connect_support(op, iPin, support): raise NotImplementedError @staticmethod def operator_connect_time_freq_support(op, iPin, support): raise NotImplementedError @staticmethod def operator_connect_workflow(op, iPin, wf): raise NotImplementedError @staticmethod def operator_connect_cyclic_support(op, iPin, sup): raise NotImplementedError @staticmethod def operator_connect_ians_dispatch(op, iPin, ptr): raise NotImplementedError @staticmethod def operator_connect_data_tree(op, iPin, ptr): raise NotImplementedError @staticmethod def operator_connect_external_data(op, iPin, ptr): raise NotImplementedError @staticmethod def operator_connect_remote_workflow(op, iPin, ptr): raise NotImplementedError @staticmethod def operator_connect_operator_as_input(op, iPin, ptr): raise NotImplementedError @staticmethod def operator_connect_any(op, iPin, ptr): raise NotImplementedError @staticmethod def operator_connect_label_space(op, iPin, ptr): raise NotImplementedError @staticmethod def operator_connect_generic_data_container(op, iPin, ptr): raise NotImplementedError @staticmethod def operator_connect_result_info(op, iPin, ptr): raise NotImplementedError @staticmethod def operator_disconnect(op, iPin): raise NotImplementedError @staticmethod def operator_getoutput_fields_container(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_scopings_container(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_field(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_scoping(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_data_sources(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_field_mapping(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_meshes_container(op, iOutput): raise NotImplementedError @staticmethod def METHOD_NAME(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_cyclic_support(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_workflow(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_string_field(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_custom_type_field(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_generic_data_container(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_string(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_bytearray(op, iOutput, size): raise NotImplementedError @staticmethod def operator_getoutput_int(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_double(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_bool(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_time_freq_support(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_meshed_region(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_result_info(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_materials_container(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_streams(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_property_field(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_any_support(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_data_tree(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_operator(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_external_data(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_int_collection(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_double_collection(op, iOutput): raise NotImplementedError @staticmethod def operator_getoutput_as_any(op, iOutput): raise NotImplementedError @staticmethod def operator_has_output_when_evaluated(op, iOutput): raise NotImplementedError @staticmethod def operator_status(op): raise NotImplementedError @staticmethod def operator_run(op): raise NotImplementedError @staticmethod def operator_invalidate(op): raise NotImplementedError @staticmethod def operator_derivate(op): raise NotImplementedError @staticmethod def operator_name(op): raise NotImplementedError @staticmethod def operator_get_status(op): raise NotImplementedError @staticmethod def operator_new_on_client(operatorName, client): raise NotImplementedError @staticmethod def operator_get_copy(id, client): raise NotImplementedError @staticmethod def operator_get_id_for_client(wf): raise NotImplementedError
null
step update
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # # -------------------------------------------------------------------------------------------- # pylint: disable=too-few-public-methods,unnecessary-pass,unused-argument """ Kubernetescluster agentpool tests scenarios """ from azure.cli.testsdk import ScenarioTest from .config import CONFIG def setup_scenario1(test): """Env setup_scenario1""" pass def cleanup_scenario1(test): """Env cleanup_scenario1""" pass def call_scenario1(test): """# Testcase: scenario1""" setup_scenario1(test) step_create(test) METHOD_NAME(test) step_show(test) step_list(test) step_delete(test) cleanup_scenario1(test) def step_create(test, checks=None): """Kubernetescluster agentpool create operation""" if checks is None: checks = [] test.cmd( "az networkcloud kubernetescluster agentpool create --name {name} " "--kubernetes-cluster-name {clusterName} --resource-group {rg} " "--location {location} " "--extended-location name={extendedLocation} type={extendedLocationType} " "--admin-username={adminUsername} " "--ssh-key-values {sshKey} " "--count {count} --mode {mode} --vm-sku-name {vmSkuName} " "--agent-options {agentOptions} --labels {labels} --taints {taints} " "--attached-network-configuration l3-networks={l3Networks} " "--availability-zones {availabilityZones} " "--upgrade-settings max-surge={maxSurge} " "--tags {tags}" ) def METHOD_NAME(test, checks=None): """Kubernetescluster agentpool update operation""" if checks is None: checks = [] test.cmd( "az networkcloud kubernetescluster agentpool update --name {name} " "--kubernetes-cluster-name {clusterName} --resource-group {rg} " "--tags {tagsUpdate}" ) def step_show(test, checks=None): """Kubernetescluster agentpool show operation""" if checks is None: checks = [] test.cmd( "az networkcloud kubernetescluster agentpool show --name {name} " "--kubernetes-cluster-name {clusterName} --resource-group {rg}" ) def step_list(test, checks=None): """Kubernetescluster agentpool list operation""" if checks is None: checks = [] test.cmd( "az networkcloud kubernetescluster agentpool list " "--kubernetes-cluster-name {clusterName} --resource-group {rg}" ) def step_delete(test, checks=None): """Kubernetescluster agentpool delete operation""" if checks is None: checks = [] test.cmd( "az networkcloud kubernetescluster agentpool delete --name {name} " "--kubernetes-cluster-name {clusterName} --resource-group {rg} -y" ) class KubernetesClusterAgentPoolScenarioTest(ScenarioTest): """Kubernetescluster agentpool scenario tests""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.kwargs.update( { "name": CONFIG.get("KUBERNETESCLUSTER_AGENTPOOL", "name"), "clusterName": CONFIG.get( "KUBERNETESCLUSTER_AGENTPOOL", "cluster_name" ), "rg": CONFIG.get("KUBERNETESCLUSTER_AGENTPOOL", "resource_group"), "location": CONFIG.get("KUBERNETESCLUSTER_AGENTPOOL", "location"), "extendedLocation": CONFIG.get( "KUBERNETESCLUSTER_AGENTPOOL", "extended_location" ), "extendedLocationType": CONFIG.get("CLUSTER", "extended_location_type"), "tags": CONFIG.get("KUBERNETESCLUSTER_AGENTPOOL", "tags"), "tagsUpdate": CONFIG.get("KUBERNETESCLUSTER_AGENTPOOL", "tags_update"), "adminUsername": CONFIG.get( "KUBERNETESCLUSTER_AGENTPOOL", "admin_username" ), "sshKey": CONFIG.get("KUBERNETESCLUSTER_AGENTPOOL", "ssh_key_values"), "count": CONFIG.get("KUBERNETESCLUSTER_AGENTPOOL", "count"), "mode": CONFIG.get("KUBERNETESCLUSTER_AGENTPOOL", "mode"), "vmSkuName": CONFIG.get("KUBERNETESCLUSTER_AGENTPOOL", "vm_sku_name"), "agentOptions": CONFIG.get( "KUBERNETESCLUSTER_AGENTPOOL", "agent_options" ), "l3Networks": CONFIG.get("KUBERNETESCLUSTER_AGENTPOOL", "l3_networks"), "taints": CONFIG.get("KUBERNETESCLUSTER_AGENTPOOL", "taints"), "labels": CONFIG.get("KUBERNETESCLUSTER_AGENTPOOL", "labels"), "availabilityZones": CONFIG.get( "KUBERNETESCLUSTER_AGENTPOOL", "availability_zones" ), "maxSurge": CONFIG.get("KUBERNETESCLUSTER_AGENTPOOL", "max_surge"), } ) def test_kubernetesclusteragentpool_scenario(self): """test scenario for kubernetes cluster agentpool CRUD operations""" call_scenario1(self)
null
stop
""" Copyright 2020 The Magma Authors. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import abc import asyncio import logging import time from contextlib import suppress from typing import Optional, cast class Job(abc.ABC): """ This is a base class that provides functions for a specific task to ensure regular completion of the loop. A co-routine run must be implemented by a subclass. periodic() will call the co-routine at a regular interval set by self._interval. """ def __init__( self, interval: int, loop: Optional[asyncio.AbstractEventLoop] = None, ) -> None: if loop is None: self._loop = asyncio.get_event_loop() else: self._loop = loop # Task in charge of periodically running the task self._periodic_task = cast(Optional[asyncio.Task], None) # Task in charge of deciding how long to wait until next run self._interval_wait_task = cast(Optional[asyncio.Task], None) self._interval = interval # in seconds self._last_run = cast(Optional[float], None) self._timeout = cast(Optional[float], None) # Condition variable used to control how long the job waits until # executing its task again. self._cond = asyncio.Condition(loop=self._loop) @abc.abstractmethod async def _run(self): """ Once implemented by a subclass, this function will contain the actual work of this Job. """ pass def start(self) -> None: """ kicks off the _periodic while loop """ if self._periodic_task is None: self._periodic_task = self._loop.create_task(self._periodic()) def METHOD_NAME(self) -> None: """ cancels the _periodic while loop """ if self._periodic_task is not None: self._periodic_task.cancel() with suppress(asyncio.CancelledError): # Await task to execute it's cancellation self._loop.run_until_complete(self._periodic_task) self._periodic_task = None def set_timeout(self, timeout: float) -> None: self._timeout = timeout def set_interval(self, interval: int) -> None: """ sets the interval used in _periodic to decide how long to sleep """ self._interval = interval def heartbeat(self) -> None: # record time to keep track of iteration length self._last_run = time.time() def not_completed(self, current_time: float) -> bool: last_time = self._last_run if last_time is None: return True if last_time < current_time - (self._timeout or 120): return True return False async def _sleep_for_interval(self): await asyncio.sleep(self._interval) async with self._cond: self._cond.notify() async def wake_up(self): """ Cancels the _sleep_for_interval task if it exists, and notifies the cond var so that the _periodic loop can continue. """ if self._interval_wait_task is not None: self._interval_wait_task.cancel() async with self._cond: self._cond.notify() async def _periodic(self) -> None: while True: self.heartbeat() try: await self._run() except Exception as exp: # pylint: disable=broad-except logging.exception("Exception from _run: %s", exp) # Wait for self._interval seconds or wake_up is explicitly called self._interval_wait_task = \ self._loop.create_task(self._sleep_for_interval()) async with self._cond: await self._cond.wait()
null
test budget authority endpoint sort year
import pytest from model_bakery import baker from rest_framework import status @pytest.fixture def model_instances(): baker.make("accounts.BudgetAuthority", year=2000, amount=2000000, agency_identifier="000") baker.make("accounts.BudgetAuthority", year=2001, amount=2001000, agency_identifier="000") baker.make("accounts.BudgetAuthority", year=2002, amount=2002000, agency_identifier="000") baker.make("accounts.BudgetAuthority", year=2003, amount=1003000, agency_identifier="000") baker.make("accounts.BudgetAuthority", year=2000, amount=1000, agency_identifier="002") baker.make("accounts.BudgetAuthority", year=2000, amount=2000, fr_entity_code="0202", agency_identifier="002") @pytest.mark.skip @pytest.mark.django_db def test_budget_authority_endpoint(model_instances, client): resp = client.get("/api/v2/budget_authority/agencies/000/") assert resp.status_code == status.HTTP_200_OK results = resp.json()["results"] assert len(results) == 4 for result in results: assert "year" in result assert "total" in result @pytest.mark.skip @pytest.mark.django_db def test_budget_authority_endpoint_no_records(model_instances, client): resp = client.get("/api/v2/budget_authority/agencies/001/") assert resp.status_code == status.HTTP_200_OK assert not resp.json()["results"] @pytest.mark.skip @pytest.mark.django_db def test_budget_authority_endpoint_no_frec_sums_all(model_instances, client): "If FREC is not specified, all records with that AID should be summed" resp = client.get("/api/v2/budget_authority/agencies/002/") assert resp.status_code == status.HTTP_200_OK results = resp.json()["results"] assert len(results) == 1 assert results[0]["total"] == 3000 @pytest.mark.skip @pytest.mark.django_db def test_budget_authority_endpoint_filters_on_frec(model_instances, client): "If FREC is specified, sum only records with that FREC" resp = client.get("/api/v2/budget_authority/agencies/002/?frec=0202") assert resp.status_code == status.HTTP_200_OK results = resp.json()["results"] assert len(results) == 1 assert results[0]["total"] == 2000 @pytest.mark.skip @pytest.mark.django_db def test_budget_authority_endpoint_sorts_year_by_default(model_instances, client): resp = client.get("/api/v2/budget_authority/agencies/002/") assert resp.status_code == status.HTTP_200_OK results = resp.json()["results"] years = [r["year"] for r in results] assert years == sorted(years) @pytest.mark.skip @pytest.mark.django_db def test_budget_authority_endpoint_bad_sort_parameters(model_instances, client): "Appropriate errors should be thrown if bad sort parameters supplied" resp = client.get("/api/v2/budget_authority/agencies/000/?sort=wxyz") # Even though I'm raising ParseErrors, which should be 400s, # they're being raised as 500s... thus skipping for now assert resp.status_code == status.HTTP_400_BAD_REQUEST resp = client.get("/api/v2/budget_authority/agencies/002/?sort=year&order=wxyz") assert resp.status_code == status.HTTP_400_BAD_REQUEST @pytest.mark.skip @pytest.mark.django_db def METHOD_NAME(model_instances, client): "Test support for `sort` and `order` parameters" resp = client.get("/api/v2/budget_authority/agencies/000/?sort=year") years = [r["year"] for r in resp.json()["results"]] assert years == sorted(years) @pytest.mark.skip @pytest.mark.django_db def test_budget_authority_endpoint_sort_year_desc(model_instances, client): resp = client.get("/api/v2/budget_authority/agencies/000/?sort=year&order=desc") years = [r["year"] for r in resp.json()["results"]] assert years == sorted(years, reverse=True) @pytest.mark.skip @pytest.mark.django_db def test_budget_authority_endpoint_sort_total(model_instances, client): resp = client.get("/api/v2/budget_authority/agencies/000/?sort=total&order=desc") totals = [r["total"] for r in resp.json()["results"]] assert totals == sorted(totals, reverse=True)
null
test compute norm row index for blobs
import unittest from caffe2.python import workspace, brew, model_helper from caffe2.python.modeling.compute_norm_for_blobs import ComputeNormForBlobs import numpy as np class ComputeNormForBlobsTest(unittest.TestCase): def test_compute_norm_for_blobs(self): model = model_helper.ModelHelper(name="test") data = model.net.AddExternalInput("data") fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2) # no operator name set, will use default brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1) net_modifier = ComputeNormForBlobs( blobs=['fc1_w', 'fc2_w'], logging_frequency=10, ) net_modifier(model.net) workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32)) workspace.RunNetOnce(model.param_init_net) workspace.RunNetOnce(model.net) fc1_w = workspace.FetchBlob('fc1_w') fc1_w_l2_norm = workspace.FetchBlob('fc1_w_l2_norm') self.assertEqual(fc1_w_l2_norm.size, 1) self.assertAlmostEqual(fc1_w_l2_norm[0], np.linalg.norm(fc1_w)**2, delta=1e-5) self.assertEqual(len(model.net.Proto().op), 10) assert model.net.output_record() is None def test_compute_norm_for_blobs_modify_output_record(self): model = model_helper.ModelHelper(name="test") data = model.net.AddExternalInput("data") fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2) # no operator name set, will use default brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1) net_modifier = ComputeNormForBlobs( blobs=['fc1_w', 'fc2_w'], logging_frequency=10, ) net_modifier(model.net, modify_output_record=True) workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32)) workspace.RunNetOnce(model.param_init_net) workspace.RunNetOnce(model.net) fc1_w = workspace.FetchBlob('fc1_w') fc1_w_l2_norm = workspace.FetchBlob('fc1_w_l2_norm') self.assertEqual(fc1_w_l2_norm.size, 1) self.assertAlmostEqual(fc1_w_l2_norm[0], np.linalg.norm(fc1_w)**2, delta=1e-5) self.assertEqual(len(model.net.Proto().op), 10) assert 'fc1_w' + net_modifier.field_name_suffix() in\ model.net.output_record().field_blobs(),\ model.net.output_record().field_blobs() assert 'fc2_w' + net_modifier.field_name_suffix() in\ model.net.output_record().field_blobs(),\ model.net.output_record().field_blobs() def test_compute_averaged_norm_for_blobs(self): model = model_helper.ModelHelper(name="test") data = model.net.AddExternalInput("data") fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2) # no operator name set, will use default brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1) net_modifier = ComputeNormForBlobs( blobs=['fc1_w', 'fc2_w'], logging_frequency=10, compute_averaged_norm=True, ) net_modifier(model.net) workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32)) workspace.RunNetOnce(model.param_init_net) workspace.RunNetOnce(model.net) fc1_w = workspace.FetchBlob('fc1_w') fc1_w_averaged_l2_norm = workspace.FetchBlob('fc1_w_averaged_l2_norm') self.assertEqual(fc1_w_averaged_l2_norm.size, 1) self.assertAlmostEqual(fc1_w_averaged_l2_norm[0], np.linalg.norm(fc1_w)**2 / fc1_w.size, delta=1e-5) self.assertEqual(len(model.net.Proto().op), 10) def test_compute_norm_for_blobs_no_print(self): model = model_helper.ModelHelper(name="test") data = model.net.AddExternalInput("data") fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2) # no operator name set, will use default brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1) net_modifier = ComputeNormForBlobs( blobs=['fc1_w', 'fc2_w'], logging_frequency=-1, ) net_modifier(model.net) workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32)) workspace.RunNetOnce(model.param_init_net) workspace.RunNetOnce(model.net) fc1_w = workspace.FetchBlob('fc1_w') fc1_w_l2_norm = workspace.FetchBlob('fc1_w_l2_norm') self.assertEqual(fc1_w_l2_norm.size, 1) self.assertAlmostEqual(fc1_w_l2_norm[0], np.linalg.norm(fc1_w)**2, delta=1e-5) self.assertEqual(len(model.net.Proto().op), 8) def test_compute_l1_norm_for_blobs(self): model = model_helper.ModelHelper(name="test") data = model.net.AddExternalInput("data") fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2) # no operator name set, will use default brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1) net_modifier = ComputeNormForBlobs( blobs=['fc1_w', 'fc2_w'], logging_frequency=10, p=1, ) net_modifier(model.net) workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32)) workspace.RunNetOnce(model.param_init_net) workspace.RunNetOnce(model.net) fc1_w = workspace.FetchBlob('fc1_w') fc1_w_l1_norm = workspace.FetchBlob('fc1_w_l1_norm') self.assertEqual(fc1_w_l1_norm.size, 1) self.assertAlmostEqual(fc1_w_l1_norm[0], np.sum(np.abs(fc1_w)), delta=1e-5) self.assertEqual(len(model.net.Proto().op), 10) def test_compute_l1_averaged_norm_for_blobs(self): model = model_helper.ModelHelper(name="test") data = model.net.AddExternalInput("data") fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2) # no operator name set, will use default brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1) net_modifier = ComputeNormForBlobs( blobs=['fc1_w', 'fc2_w'], logging_frequency=10, p=1, compute_averaged_norm=True, ) net_modifier(model.net) workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32)) workspace.RunNetOnce(model.param_init_net) workspace.RunNetOnce(model.net) fc1_w = workspace.FetchBlob('fc1_w') fc1_w_averaged_l1_norm = workspace.FetchBlob('fc1_w_averaged_l1_norm') self.assertEqual(fc1_w_averaged_l1_norm.size, 1) self.assertAlmostEqual(fc1_w_averaged_l1_norm[0], np.sum(np.abs(fc1_w)) / fc1_w.size, delta=1e-5) self.assertEqual(len(model.net.Proto().op), 10) def METHOD_NAME(self): model = model_helper.ModelHelper(name="test") data = model.net.AddExternalInput("data") fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2) net_modifier = ComputeNormForBlobs( blobs=['fc1_w'], logging_frequency=10, compute_averaged_norm=True, row_index=1 ) net_modifier(model.net) workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32)) workspace.RunNetOnce(model.param_init_net) workspace.RunNetOnce(model.net) fc1_w = workspace.FetchBlob('fc1_w') fc1_w_row_1_averaged_l2_norm = workspace.FetchBlob('fc1_w_row_1_averaged_l2_norm') self.assertEqual(fc1_w_row_1_averaged_l2_norm.size, 1) self.assertAlmostEqual(fc1_w_row_1_averaged_l2_norm[0], np.linalg.norm(fc1_w[1])**2 / fc1_w[1].size, delta=1e-5)
null
request path
import logging import uuid from datetime import datetime import architect from django.contrib.auth.models import AnonymousUser, User from django.db import models from dimagi.utils.web import get_ip from corehq.apps.domain.utils import get_domain_from_url from corehq.util.models import ( ForeignValue, NullJsonField, TruncatingCharField, foreign_init, ) log = logging.getLogger(__name__) def make_uuid(): return uuid.uuid4().hex def getdate(): return datetime.utcnow() STANDARD_HEADER_KEYS = [ 'X_FORWARDED_FOR', 'X_FORWARDED_HOST', 'X_FORWARDED_SERVER', 'VIA', 'HTTP_REFERER', 'REQUEST_METHOD', 'QUERY_STRING', 'HTTP_ACCEPT_CHARSET', 'HTTP_CONNECTION', 'HTTP_COOKIE', 'SERVER_NAME', 'SERVER_PORT', 'HTTP_ACCEPT', 'REMOTE_ADDR', 'HTTP_ACCEPT_LANGUAGE', 'CONTENT_TYPE', 'CONTENT_LENGTH', 'HTTP_ACCEPT_ENCODING', 'HTTP_USER_AGENT', # settings.AUDIT_TRACE_ID_HEADER (django-ified) will be added here ] def get_standard_headers(request_meta, exclude=None): exclude = exclude or [] headers = {} for k in STANDARD_HEADER_KEYS: if k not in exclude: header_item = request_meta.get(k, None) if header_item is not None: headers[k] = header_item return headers class UserAgent(models.Model): value = models.CharField(max_length=255, db_index=True, unique=True) class HttpAccept(models.Model): value = models.CharField(max_length=255, db_index=True, unique=True) class ViewName(models.Model): value = models.CharField(max_length=255, db_index=True, unique=True) class AuditEvent(models.Model): id = models.BigAutoField(primary_key=True) user = models.CharField(max_length=255, null=True, blank=True) domain = models.CharField(max_length=126, null=True, blank=True) event_date = models.DateTimeField(default=getdate, db_index=True) path = TruncatingCharField(max_length=255, blank=True, default='') ip_address = models.CharField(max_length=45, blank=True, default='') session_key = models.CharField(max_length=255, blank=True, null=True) user_agent_fk = models.ForeignKey( UserAgent, null=True, db_index=False, on_delete=models.PROTECT) user_agent = ForeignValue(user_agent_fk, truncate=True) couch_id = models.CharField(max_length=126, null=True) @property def doc_type(self): return type(self).__name__ @property def description(self): raise NotImplementedError("abstract property") class Meta: abstract = True indexes = [ models.Index(fields=["user", "event_date"]), models.Index(fields=["domain", "event_date"]), ] def __str__(self): return "[%s] %s" % (self.doc_type, self.description) @classmethod def create_audit(cls, request, user): audit = cls() audit.domain = get_domain(request) audit.path = request.path audit.ip_address = get_ip(request) audit.session_key = request.session.session_key audit.user_agent = request.META.get('HTTP_USER_AGENT') if isinstance(user, AnonymousUser): audit.user = None elif user is None: audit.user = None elif isinstance(user, User): audit.user = user.username else: audit.user = user return audit @architect.install('partition', type='range', subtype='date', constraint='month', column='event_date') @foreign_init class NavigationEventAudit(AuditEvent): """ Audit event to track happenings within the system, ie, view access """ params = TruncatingCharField(max_length=4096, blank=True, default='') view_fk = models.ForeignKey( ViewName, null=True, db_index=False, on_delete=models.PROTECT) view = ForeignValue(view_fk, truncate=True) view_kwargs = NullJsonField(default=dict) headers = NullJsonField(default=dict) status_code = models.SmallIntegerField(default=0) class Meta(AuditEvent.Meta): constraints = [ models.UniqueConstraint(fields=['couch_id'], condition=models.Q(couch_id__isnull=False), name="audit_nav_couch_875bc_idx"), ] @property def description(self): return self.user or "" @property def METHOD_NAME(self): return f"{self.path}?{self.params}" @property def request_method(self): headers = self.headers or {} return headers.get("REQUEST_METHOD", "") @classmethod def audit_view(cls, request, user, view_func, view_kwargs): audit = cls.create_audit(request, user) if request.GET: audit.params = request.META.get("QUERY_STRING", "") audit.view = "%s.%s" % (view_func.__module__, view_func.__name__) audit.headers.update(get_standard_headers(request.META, exclude=['HTTP_USER_AGENT'])) # it's a bit verbose to go to that extreme, TODO: need to have # targeted fields in the META, but due to server differences, it's # hard to make it universal. audit.view_kwargs = view_kwargs return audit ACCESS_LOGIN = 'i' ACCESS_LOGOUT = 'o' ACCESS_FAILED = 'f' ACCESS_CHOICES = { ACCESS_LOGIN: "Login", ACCESS_LOGOUT: "Logout", ACCESS_FAILED: "Login failed", } @architect.install('partition', type='range', subtype='date', constraint='month', column='event_date') @foreign_init class AccessAudit(AuditEvent): access_type = models.CharField(max_length=1, choices=ACCESS_CHOICES.items()) http_accept_fk = models.ForeignKey( HttpAccept, null=True, db_index=False, on_delete=models.PROTECT) http_accept = ForeignValue(http_accept_fk, truncate=True) trace_id = models.CharField(max_length=127, null=True, blank=True) class Meta(AuditEvent.Meta): constraints = [ models.UniqueConstraint(fields=['couch_id'], condition=models.Q(couch_id__isnull=False), name="audit_access_couch_10d1b_idx"), ] # Optional (django-ified) settings.AUDIT_TRACE_ID_HEADER set by AuditcareConfig trace_id_header = None @property def description(self): return f"{ACCESS_CHOICES[self.access_type]}: {self.user or ''}" @classmethod def create_audit(cls, request, user, access_type): '''Creates an instance of a Access log.''' audit = super().create_audit(request, user) audit.http_accept = request.META.get('HTTP_ACCEPT') audit.access_type = access_type if cls.trace_id_header is not None: audit.trace_id = request.META.get(cls.trace_id_header) return audit @classmethod def audit_login(cls, request, user, *args, **kwargs): audit = cls.create_audit(request, user, ACCESS_LOGIN) audit.save() @classmethod def audit_login_failed(cls, request, username, *args, **kwargs): audit = cls.create_audit(request, username, ACCESS_FAILED) audit.save() @classmethod def audit_logout(cls, request, user): audit = cls.create_audit(request, user, ACCESS_LOGOUT) audit.save() def audit_login(sender, *, request, user, **kwargs): AccessAudit.audit_login(request, user) # success def audit_logout(sender, *, request, user, **kwargs): AccessAudit.audit_logout(request, user) def audit_login_failed(sender, *, request, credentials, **kwargs): AccessAudit.audit_login_failed(request, credentials["username"]) def get_domain(request): domain = get_domain_from_url(request.path) domain2 = getattr(request, "domain", None) if domain2: if not domain: domain = domain2 elif domain != domain2: log.error("domain mismatch for request %s: %r != %r", request.path, domain, domain2) return domain
null
test timing
############################ Copyrights and license ############################ # # # Copyright 2020 Steve Kowalik <[email protected]> # # # # This file is part of PyGithub. # # http://pygithub.readthedocs.io/ # # # # PyGithub is free software: you can redistribute it and/or modify it under # # the terms of the GNU Lesser General Public License as published by the Free # # Software Foundation, either version 3 of the License, or (at your option) # # any later version. # # # # PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY # # WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # # FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more # # details. # # # # You should have received a copy of the GNU Lesser General Public License # # along with PyGithub. If not, see <http://www.gnu.org/licenses/>. # # # ################################################################################ from datetime import datetime, timezone from . import Framework class WorkflowRun(Framework.TestCase): def setUp(self): super().setUp() self.repo = self.g.get_repo("PyGithub/PyGithub") self.workflow_run = self.repo.get_workflow_run(3881497935) def testAttributes(self): self.assertEqual( repr(self.workflow_run), 'WorkflowRun(url="https://api.github.com/repos/PyGithub/PyGithub/actions/runs/3881497935", id=3881497935)', ) self.assertEqual(self.workflow_run.id, 3881497935) self.assertEqual(self.workflow_run.name, "CI") self.assertEqual(self.workflow_run.head_branch, "feat/workflow-run") self.assertEqual(self.workflow_run.head_sha, "c6e5cac67a58a4eb11f1f28567a77a6e2cc8ee98") self.assertEqual(self.workflow_run.path, ".github/workflows/ci.yml") self.assertEqual(self.workflow_run.display_title, "TEST PR") self.assertEqual(self.workflow_run.run_number, 930) self.assertEqual(self.workflow_run.run_attempt, 1) self.assertEqual( self.workflow_run.run_started_at, datetime(2023, 1, 10, 8, 24, 19, tzinfo=timezone.utc), ) self.assertEqual(self.workflow_run.event, "pull_request") self.assertEqual(self.workflow_run.status, "completed") self.assertEqual(self.workflow_run.conclusion, "success") self.assertEqual(self.workflow_run.workflow_id, 1903133) self.assertEqual( self.workflow_run.url, "https://api.github.com/repos/PyGithub/PyGithub/actions/runs/3881497935", ) self.assertEqual( self.workflow_run.html_url, "https://github.com/PyGithub/PyGithub/actions/runs/3881497935", ) self.assertEqual(self.workflow_run.pull_requests, []) created_at = datetime(2023, 1, 10, 8, 24, 19, tzinfo=timezone.utc) self.assertEqual(self.workflow_run.created_at, created_at) updated_at = datetime(2023, 1, 10, 8, 28, 20, tzinfo=timezone.utc) self.assertEqual(self.workflow_run.updated_at, updated_at) self.assertEqual( self.workflow_run.jobs_url, "https://api.github.com/repos/PyGithub/PyGithub/actions/runs/3881497935/jobs", ) self.assertEqual( self.workflow_run.logs_url, "https://api.github.com/repos/PyGithub/PyGithub/actions/runs/3881497935/logs", ) self.assertEqual( self.workflow_run.check_suite_url, "https://api.github.com/repos/PyGithub/PyGithub/check-suites/10279069747", ) self.assertEqual( self.workflow_run.artifacts_url, "https://api.github.com/repos/PyGithub/PyGithub/actions/runs/3881497935/artifacts", ) self.assertEqual( self.workflow_run.cancel_url, "https://api.github.com/repos/PyGithub/PyGithub/actions/runs/3881497935/cancel", ) self.assertEqual( self.workflow_run.rerun_url, "https://api.github.com/repos/PyGithub/PyGithub/actions/runs/3881497935/rerun", ) self.assertEqual( self.workflow_run.workflow_url, "https://api.github.com/repos/PyGithub/PyGithub/actions/workflows/1903133", ) self.assertEqual(self.workflow_run.head_commit.message, "add attribute 'name' on WorkflowRun") self.assertEqual(self.workflow_run.repository.name, "PyGithub") self.assertEqual(self.workflow_run.head_repository.name, "PyGithub") def METHOD_NAME(self): timing = self.workflow_run.timing() self.assertEqual( timing.billable, { "UBUNTU": { "job_runs": [ {"duration_ms": 0, "job_id": 10545727758}, {"duration_ms": 0, "job_id": 10545727888}, {"duration_ms": 0, "job_id": 10545728039}, {"duration_ms": 0, "job_id": 10545728190}, {"duration_ms": 0, "job_id": 10545728356}, ], "jobs": 5, "total_ms": 0, } }, ) self.assertEqual(timing.run_duration_ms, 241000) def test_rerun(self): wr = self.repo.get_workflow_run(3910280793) self.assertFalse(wr.rerun()) def test_rerun_with_successful_run(self): wr = self.repo.get_workflow_run(3881497935) self.assertFalse(wr.rerun()) def test_cancel(self): wr = self.repo.get_workflow_run(3911660493) self.assertFalse(wr.cancel()) def test_delete(self): wr = self.repo.get_workflow_run(3881497935) self.assertFalse(wr.delete()) def test_jobs(self): self.assertListKeyEqual( self.workflow_run.jobs(), lambda j: j.id, [10545727758, 10545727888, 10545728039, 10545728190, 10545728356], )
null
update group
from typing import List, TypeVar import logging from dispatch.case.models import Case from dispatch.database.core import SessionLocal from dispatch.database.core import get_table_name_by_class_instance from dispatch.event import service as event_service from dispatch.incident.models import Incident from dispatch.plugin import service as plugin_service from .enums import GroupType, GroupAction from .models import Group, GroupCreate from .service import create log = logging.getLogger(__name__) Subject = TypeVar("Subject", Case, Incident) def create_group( subject: Subject, group_type: str, group_participants: List[str], db_session: SessionLocal ): """Creates a group.""" plugin = plugin_service.get_active_instance( db_session=db_session, project_id=subject.project.id, plugin_type="participant-group" ) if not plugin: log.warning("Group not created. No group plugin enabled.") return group_name = subject.name if group_type == GroupType.notifications: group_name = f"{subject.name}-{GroupType.notifications}" # we create the external group try: external_group = plugin.instance.create(name=group_name, participants=group_participants) except Exception as e: log.exception(e) return if not external_group: log.error(f"Group not created. Plugin {plugin.plugin.slug} encountered an error.") return external_group.update( { "resource_type": f"{plugin.plugin.slug}-{group_type}-group", "resource_id": external_group["id"], } ) # we create the internal group group_in = GroupCreate( name=external_group["name"], email=external_group["email"], resource_type=external_group["resource_type"], resource_id=external_group["resource_id"], weblink=external_group["weblink"], ) group = create(db_session=db_session, group_in=group_in) subject.groups.append(group) if group_type == GroupType.tactical: subject.tactical_group_id = group.id if group_type == GroupType.notifications: subject.notifications_group_id = group.id db_session.add(subject) db_session.commit() subject_type = get_table_name_by_class_instance(subject) if subject_type == "case": event_service.log_case_event( db_session=db_session, source=plugin.plugin.title, description=f"Case {group_type} group created", case_id=subject.id, ) if subject_type == "incident": event_service.log_incident_event( db_session=db_session, source=plugin.plugin.title, description=f"Incident {group_type} group created", incident_id=subject.id, ) return group def METHOD_NAME( subject: Subject, group: Group, group_action: GroupAction, group_member: str, db_session: SessionLocal, ): """Updates an existing group.""" plugin = plugin_service.get_active_instance( db_session=db_session, project_id=subject.project.id, plugin_type="participant-group" ) if not plugin: log.warning("Group not updated. No group plugin enabled.") return # we get the list of group members try: group_members = plugin.instance.list(email=group.email) except Exception as e: log.exception(e) return subject_type = get_table_name_by_class_instance(subject) # we add the member to the group if it's not a member if group_action == GroupAction.add_member and group_member not in group_members: try: plugin.instance.add(email=group.email, participants=[group_member]) except Exception as e: log.exception(e) return if subject_type == "case": event_service.log_case_event( db_session=db_session, source=plugin.plugin.title, description=f"{group_member} added to case group ({group.email})", case_id=subject.id, ) if subject_type == "incident": event_service.log_incident_event( db_session=db_session, source=plugin.plugin.title, description=f"{group_member} added to incident group ({group.email})", incident_id=subject.id, ) # we remove the member from the group if it's a member if group_action == GroupAction.remove_member and group_member in group_members: try: plugin.instance.remove(email=group.email, participants=[group_member]) except Exception as e: log.exception(e) return if subject_type == "case": event_service.log_case_event( db_session=db_session, source=plugin.plugin.title, description=f"{group_member} removed from case group ({group.email})", case_id=subject.id, ) if subject_type == "incident": event_service.log_incident_event( db_session=db_session, source=plugin.plugin.title, description=f"{group_member} removed from incident group ({group.email})", incident_id=subject.id, ) def delete_group(group: Group, project_id: int, db_session: SessionLocal): """Deletes an existing group.""" plugin = plugin_service.get_active_instance( db_session=db_session, project_id=project_id, plugin_type="participant-group" ) if plugin: try: plugin.instance.delete(email=group.email) except Exception as e: log.exception(e) else: log.warning("Group not deleted. No group plugin enabled.")
null
filter log events
# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and # limitations under the License. import datetime import json from pcluster.aws.common import AWSClientError, AWSExceptionHandler, Boto3Client from pcluster.utils import datetime_to_epoch class LogsClient(Boto3Client): """Logs Boto3 client.""" def __init__(self): super().__init__("logs") def log_group_exists(self, log_group_name): """Return true if log group exists, false otherwise.""" try: self.describe_log_group(log_group_name) return True except AWSClientError: return False @AWSExceptionHandler.handle_client_exception def delete_log_group(self, log_group_name): """Delete log group by given log group name.""" self._client.delete_log_group(logGroupName=log_group_name) @AWSExceptionHandler.handle_client_exception def describe_log_groups(self, log_group_name_prefix=None): """Return a list of log groups .""" return list(self._paginate_results(self._client.describe_log_groups, logGroupNamePrefix=log_group_name_prefix)) @AWSExceptionHandler.handle_client_exception def describe_log_group(self, log_group_name): """Return log group identified by the given log group name.""" for group in self.describe_log_groups(log_group_name_prefix=log_group_name): if group.get("logGroupName") == log_group_name: return group raise AWSClientError(function_name="describe_log_groups", message=f"Log Group {log_group_name} not found") @AWSExceptionHandler.handle_client_exception def METHOD_NAME(self, log_group_name, start_time=None, end_time=None, log_stream_name_prefix=None): """Return the list of events included in a specific time window for a given group name.""" kwargs = {"logGroupName": log_group_name, "limit": 1} if start_time: kwargs["startTime"] = start_time if end_time: kwargs["endTime"] = end_time if log_stream_name_prefix: kwargs["logStreamNamePrefix"] = log_stream_name_prefix return self._client.METHOD_NAME(**kwargs).get("events") @AWSExceptionHandler.handle_client_exception def get_log_events( self, log_group_name, log_stream_name, start_time=None, end_time=None, limit=None, start_from_head=None, next_token=None, ): """Return the list of events included in a specific time window for a given log stream.""" kwargs = {"logGroupName": log_group_name, "logStreamName": log_stream_name} if start_time: kwargs["startTime"] = start_time if end_time: kwargs["endTime"] = end_time if limit: kwargs["limit"] = limit if start_from_head is not None: kwargs["startFromHead"] = start_from_head if next_token: kwargs["nextToken"] = next_token return self._client.get_log_events(**kwargs) @AWSExceptionHandler.handle_client_exception def create_export_task( self, log_group_name, bucket, bucket_prefix=None, log_stream_name_prefix=None, start_time: datetime.datetime = None, end_time: datetime.datetime = None, ): """Start the task that will export a log group name to an s3 bucket, and return the task ID.""" kwargs = { "logGroupName": log_group_name, "fromTime": start_time and datetime_to_epoch(start_time), "to": end_time and datetime_to_epoch(end_time), "destination": bucket, "destinationPrefix": bucket_prefix, } if log_stream_name_prefix: kwargs["logStreamNamePrefix"] = log_stream_name_prefix return self._client.create_export_task(**kwargs).get("taskId") @AWSExceptionHandler.handle_client_exception def get_export_task_status(self, task_id): """Get the status for the CloudWatch export task with the given task_id.""" tasks = self._client.describe_export_tasks(taskId=task_id).get("exportTasks", None) if not tasks: raise AWSClientError(function_name="describe_export_tasks", message=f"Log export task {task_id} not found") if len(tasks) > 2: raise AWSClientError( function_name="describe_export_tasks", message="More than one CloudWatch logs export task with ID={task_id}:\n{tasks}".format( task_id=task_id, tasks=json.dumps(tasks, indent=2) ), ) return tasks[0].get("status").get("code") @AWSExceptionHandler.handle_client_exception def describe_log_streams(self, log_group_name, log_stream_name_prefix=None, next_token=None): """Return a list of log streams in the given log group, filtered by the given prefix.""" kwargs = {"logGroupName": log_group_name} if log_stream_name_prefix: kwargs["logStreamNamePrefix"] = log_stream_name_prefix if next_token: kwargs["nextToken"] = next_token return self._client.describe_log_streams(**kwargs)
null
test nodecode i386
# pylint: disable=missing-class-docstring,no-self-use import gc import logging import os import pickle import unittest from common import slow_test import angr from angr.state_plugins.history import HistoryIter l = logging.getLogger("angr.tests") test_location = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "..", "binaries", "tests") target_addrs = { "i386": [0x080485C9], "x86_64": [0x4006ED], "ppc": [0x1000060C], "armel": [0x85F0], "android/arm": [0x4004CC], "mips": [0x4009FC], } avoid_addrs = { "i386": [0x080485DD, 0x08048564], "x86_64": [0x4006AA, 0x4006FD], "ppc": [0x10000644, 0x1000059C], "armel": [0x86F8, 0x857C], "android/arm": [0x4004F0, 0x400470], "mips": [0x400A10, 0x400774], } corrupt_addrs = { "i386": [0x80486B6, b"bO\xcc", lambda s: s.memory.store(s.regs.esp, s.regs.eax)], "x86_64": [0x400742, b"\x0f\x0b\xb0[\x41", lambda s: s.registers.store("rdx", 8)], "ppc": [0x100006B8, b"\x05\xad\xc2\xea", lambda s: s.registers.store("r5", 8)], "armel": [0x8678, b"\xbdM\xec3", lambda s: s.registers.store("r2", 8)], "mips": [0x400918, b"[\xf8\x96@"[::-1], lambda s: s.registers.store("a2", 8)], } divergences = { "ppc": 0x10000588, "x86_64": 0x40068E, "i386": 0x8048559, "armel": 0x8568, "android/arm": 0x40045C, "mips": 0x40075C, } class TestFauxware(unittest.TestCase): def _run_fauxware(self, arch): p = angr.Project(os.path.join(test_location, arch, "fauxware"), auto_load_libs=False) results = p.factory.simulation_manager().explore(find=target_addrs[arch], avoid=avoid_addrs[arch]) stdin = results.found[0].posix.dumps(0) assert b"\x00\x00\x00\x00\x00\x00\x00\x00\x00SOSNEAKY\x00" == stdin # test the divergence detection ancestor = results.found[0].history.closest_common_ancestor((results.avoid + results.active)[0].history) divergent_point = list(HistoryIter(results.found[0].history, end=ancestor))[0] # p.factory.block(divergent_point.addr).pp() assert divergent_point.recent_bbl_addrs[0] == divergences[arch] def _run_pickling(self, arch): p = angr.Project(os.path.join(test_location, arch, "fauxware"), auto_load_libs=False) pg = p.factory.simulation_manager().run(n=10) pickled = pickle.dumps(pg, pickle.HIGHEST_PROTOCOL) del p del pg gc.collect() pg = pickle.loads(pickled) pg.explore(find=target_addrs[arch], avoid=avoid_addrs[arch]) stdin = pg.found[0].posix.dumps(0) assert b"\x00\x00\x00\x00\x00\x00\x00\x00\x00SOSNEAKY\x00" == stdin @slow_test def _run_fastmem(self, arch): p = angr.Project(os.path.join(test_location, arch, "fauxware"), auto_load_libs=False) p.analyses.CongruencyCheck(throw=True).set_state_options(right_add_options={"FAST_REGISTERS"}).run() def _run_nodecode(self, arch): p = angr.Project(os.path.join(test_location, arch, "fauxware"), auto_load_libs=False) # screw up the instructions and make sure the test fails with nodecode for i, c in enumerate(corrupt_addrs[arch][1]): p.loader.memory[corrupt_addrs[arch][0] + i] = c boned = p.factory.simulation_manager().explore(find=target_addrs[arch], avoid=avoid_addrs[arch]) assert len(boned.errored) >= 1 assert isinstance(boned.errored[0].error, angr.SimIRSBNoDecodeError) assert boned.errored[0].state.addr == corrupt_addrs[arch][0] # hook the instructions with the emulated stuff p.hook( corrupt_addrs[arch][0], corrupt_addrs[arch][2], length=len(corrupt_addrs[arch][1]), ) results = p.factory.simulation_manager().explore(find=target_addrs[arch], avoid=avoid_addrs[arch]) stdin = results.found[0].posix.dumps(0) assert b"\x00\x00\x00\x00\x00\x00\x00\x00\x00SOSNEAKY\x00" == stdin def _run_merge(self, arch): p = angr.Project(os.path.join(test_location, arch, "fauxware"), auto_load_libs=False) pg = p.factory.simulation_manager() pg.explore() # release the unmergable data for s in pg.deadended: s.release_plugin("fs") if 3 in s.posix.fd: s.posix.close(3) pg.merge(stash="deadended", merge_key=lambda s: s.addr) path = pg.deadended[[b"Welcome" in s for s in pg.mp_deadended.posix.dumps(1).mp_items].index(True)] yes, no = path.history.merge_conditions inp = path.posix.stdin.content[2][0] # content of second packet try: assert b"SOSNEAKY" in path.solver.eval(inp, cast_to=bytes, extra_constraints=(yes,)) assert b"SOSNEAKY" not in path.solver.eval(inp, cast_to=bytes, extra_constraints=(no,)) except AssertionError: yes, no = no, yes assert b"SOSNEAKY" in path.solver.eval(inp, cast_to=bytes, extra_constraints=(yes,)) assert b"SOSNEAKY" not in path.solver.eval(inp, cast_to=bytes, extra_constraints=(no,)) def test_merge_i386(self): self._run_merge("i386") def test_merge_x86_64(self): self._run_merge("x86_64") def test_merge_ppc(self): self._run_merge("ppc") def test_merge_armel(self): self._run_merge("armel") def test_merge_android(self): self._run_merge("android/arm") def test_merge_mips(self): self._run_merge("mips") def test_fauxware_i386(self): self._run_fauxware("i386") def test_fauxware_x86_64(self): self._run_fauxware("x86_64") def test_fauxware_ppc(self): self._run_fauxware("ppc") def test_fauxware_armel(self): self._run_fauxware("armel") def test_fauxware_android(self): self._run_fauxware("android/arm") def test_fauxware_mips(self): self._run_fauxware("mips") def test_pickling_i386(self): self._run_pickling("i386") def test_pickling_x86_64(self): self._run_pickling("x86_64") def test_pickling_ppc(self): self._run_pickling("ppc") def test_pickling_armel(self): self._run_pickling("armel") def test_pickling_mips(self): self._run_pickling("mips") @slow_test def test_fastmen(self): self._run_fastmem("x86_64") def METHOD_NAME(self): self._run_nodecode("i386") def test_nodecode_x86_64(self): self._run_nodecode("x86_64") def test_nodecode_ppc(self): self._run_nodecode("ppc") def test_nodecode_armel(self): self._run_nodecode("armel") def test_nodecode_mips(self): self._run_nodecode("mips") if __name__ == "__main__": unittest.main()
null
validate
# This code is part of Qiskit. # # (C) Copyright IBM 2022. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Runtime options that control the execution environment.""" import re import logging from dataclasses import dataclass from typing import Optional, List from .exceptions import IBMInputValueError from .utils.utils import validate_job_tags @dataclass(init=False) class RuntimeOptions: """Class for representing generic runtime execution options.""" backend: Optional[str] = None image: Optional[str] = None log_level: Optional[str] = None instance: Optional[str] = None job_tags: Optional[List[str]] = None max_execution_time: Optional[int] = None session_time: Optional[int] = None def __init__( self, backend: Optional[str] = None, image: Optional[str] = None, log_level: Optional[str] = None, instance: Optional[str] = None, job_tags: Optional[List[str]] = None, max_execution_time: Optional[int] = None, session_time: Optional[int] = None, ) -> None: """RuntimeOptions constructor. Args: backend: target backend to run on. This is required for ``ibm_quantum`` channel. image: the runtime image used to execute the program, specified in the form of ``image_name:tag``. Not all accounts are authorized to select a different image. log_level: logging level to set in the execution environment. The valid log levels are: ``DEBUG``, ``INFO``, ``WARNING``, ``ERROR``, and ``CRITICAL``. The default level is ``WARNING``. instance: The hub/group/project to use, in that format. This is only supported for ``ibm_quantum`` channel. If ``None``, a hub/group/project that provides access to the target backend is randomly selected. job_tags: Tags to be assigned to the job. The tags can subsequently be used as a filter in the :meth:`jobs()` function call. max_execution_time: Maximum execution time in seconds, which is based on quantum time (not wall clock time). Quantum time is the time that the QPU complex (including control software, control electronics, QPU, and so on) is engaged in processing the job. If a job exceeds this time limit, it is forcibly cancelled. Simulator jobs continue to use wall clock time because they do not have quantum time. session_time: Length of session in seconds. """ self.backend = backend self.image = image self.log_level = log_level self.instance = instance self.job_tags = job_tags self.max_execution_time = max_execution_time self.session_time = session_time def METHOD_NAME(self, channel: str) -> None: """Validate options. Args: channel: channel type. Raises: IBMInputValueError: If one or more option is invalid. """ if self.image and not re.match( "[a-zA-Z0-9]+([/.\\-_][a-zA-Z0-9]+)*:[a-zA-Z0-9]+([.\\-_][a-zA-Z0-9]+)*$", self.image, ): raise IBMInputValueError('"image" needs to be in form of image_name:tag') if channel == "ibm_quantum" and not self.backend: raise IBMInputValueError( '"backend" is required field in "options" for "ibm_quantum" channel.' ) if self.instance and channel != "ibm_quantum": raise IBMInputValueError('"instance" is only supported for "ibm_quantum" channel.') if self.log_level and not isinstance(logging.getLevelName(self.log_level.upper()), int): raise IBMInputValueError( f"{self.log_level} is not a valid log level. The valid log levels are: `DEBUG`, " f"`INFO`, `WARNING`, `ERROR`, and `CRITICAL`." ) if self.job_tags: validate_job_tags(self.job_tags, IBMInputValueError)
null
train docs
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import torch from best_download import download_file from lm_eval.base import PerplexityTask from lm_eval.utils import sh def wikitext_detokenizer(string): # contractions string = string.replace("s '", "s'") string = re.sub(r"/' [0-9]/", r"/'[0-9]/", string) # number separators string = string.replace(" @-@ ", "-") string = string.replace(" @,@ ", ",") string = string.replace(" @.@ ", ".") # punctuation # GEO: TODO: What if string ends with punctuation? (e.g. "bla .") Isn't replace(" .", ".") more general? string = string.replace(" : ", ": ") string = string.replace(" ; ", "; ") string = string.replace(" . ", ". ") string = string.replace(" ! ", "! ") string = string.replace(" ? ", "? ") string = string.replace(" , ", ", ") # double brackets string = re.sub(r"\(\s*([^\)]*?)\s*\)", r"(\1)", string) string = re.sub(r"\[\s*([^\]]*?)\s*\]", r"[\1]", string) string = re.sub(r"{\s*([^}]*?)\s*}", r"{\1}", string) string = re.sub(r"\"\s*([^\"]*?)\s*\"", r'"\1"', string) string = re.sub(r"'\s*([^']*?)\s*'", r"'\1'", string) # miscellaneous string = string.replace("= = = =", "====") string = string.replace("= = =", "===") string = string.replace("= =", "==") string = string.replace(" " + chr(176) + " ", chr(176)) string = string.replace(" \n", "\n") string = string.replace("\n ", "\n") string = string.replace(" N ", " 1 ") string = string.replace(" 's", "'s") return string class WikiText(PerplexityTask): VERSION = 0 def __init__(self, cache_dir=""): self.cache_dir = cache_dir super().__init__() def download(self): cache_dir = ( self.cache_dir if self.cache_dir else os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, "data") ) if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0: if not os.path.exists(cache_dir + "/wikitext/wikitext-2-raw/wiki.valid.raw"): os.makedirs(cache_dir + "/wikitext", exist_ok=True) download_file( "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip", local_file=cache_dir + "/wikitext/wikitext-2-raw-v1.zip", expected_checksum="ef7edb566e3e2b2d31b29c1fdb0c89a4cc683597484c3dc2517919c615435a11", ) sh(f"cd {cache_dir}/wikitext && unzip wikitext-2-raw-v1.zip") if torch.distributed.is_initialized(): torch.distributed.barrier() self.cache_dir = cache_dir def fewshot_description(self): # TODO: figure out fewshot description return "" def has_validation_docs(self): return True def has_train_docs(self): return True def has_test_docs(self): return True def docs_for_split(self, split): ret = [] for line in ( open(self.cache_dir + f"/wikitext/wikitext-2-raw/wiki.{split}.raw", encoding='utf-8').read().split("\n") ): rline = line.replace("= = =", "===").replace("= =", "==").strip() if rline.startswith("= ") and rline.strip().endswith(" ="): s = "\n".join(ret) if s.strip(): yield s ret = [] ret.append(line) yield "\n".join(ret) def validation_docs(self): return self.docs_for_split("valid") def METHOD_NAME(self): return self.docs_for_split("train") def test_docs(self): return self.docs_for_split("test") def doc_to_target(self, doc): return wikitext_detokenizer(doc) def count_words(self, doc): # count number of words in *original doc before detokenization* return len(re.split(r"\s+", doc)) class WikiText103(WikiText): def download(self): cache_dir = ( self.cache_dir if self.cache_dir else os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir, "data") ) if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0: if not os.path.exists(cache_dir + "/wikitext/wikitext-103-raw/wiki.valid.raw"): os.makedirs(cache_dir + "/wikitext", exist_ok=True) download_file( "https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip", local_file=cache_dir + "/wikitext/wikitext-103-raw-v1.zip", ) sh(f"cd {cache_dir}/wikitext && unzip wikitext-103-raw-v1.zip") if torch.distributed.is_initialized(): torch.distributed.barrier() self.cache_dir = cache_dir def docs_for_split(self, split): ret = [] for line in ( open(self.cache_dir + f"/wikitext/wikitext-103-raw/wiki.{split}.raw", encoding='utf-8').read().split("\n") ): rline = line.replace("= = =", "===").replace("= =", "==").strip() if rline.startswith("= ") and rline.strip().endswith(" ="): s = "\n".join(ret) if s.strip(): yield s ret = [] ret.append(line) yield "\n".join(ret)
null
test get party shipping address
import unittest import frappe from frappe.test_runner import make_test_objects from erpnext.accounts.doctype.payment_entry.payment_entry import get_payment_entry from erpnext.accounts.doctype.purchase_invoice.test_purchase_invoice import make_purchase_invoice from erpnext.accounts.party import get_party_shipping_address from erpnext.accounts.utils import ( get_future_stock_vouchers, get_voucherwise_gl_entries, sort_stock_vouchers_by_posting_date, update_reference_in_payment_entry, ) from erpnext.stock.doctype.item.test_item import make_item from erpnext.stock.doctype.purchase_receipt.test_purchase_receipt import make_purchase_receipt from erpnext.stock.doctype.stock_entry.stock_entry_utils import make_stock_entry class TestUtils(unittest.TestCase): @classmethod def setUpClass(cls): super(TestUtils, cls).setUpClass() make_test_objects("Address", ADDRESS_RECORDS) def METHOD_NAME(self): address = get_party_shipping_address("Customer", "_Test Customer 1") self.assertEqual(address, "_Test Billing Address 2 Title-Billing") def test_get_party_shipping_address2(self): address = get_party_shipping_address("Customer", "_Test Customer 2") self.assertEqual(address, "_Test Shipping Address 2 Title-Shipping") def test_get_voucher_wise_gl_entry(self): pr = make_purchase_receipt( item_code="_Test Item", posting_date="2021-02-01", rate=100, qty=1, warehouse="Stores - TCP1", company="_Test Company with perpetual inventory", ) future_vouchers = get_future_stock_vouchers("2021-01-01", "00:00:00", for_items=["_Test Item"]) voucher_type_and_no = ("Purchase Receipt", pr.name) self.assertTrue( voucher_type_and_no in future_vouchers, msg="get_future_stock_vouchers not returning correct value", ) posting_date = "2021-01-01" gl_entries = get_voucherwise_gl_entries(future_vouchers, posting_date) self.assertTrue( voucher_type_and_no in gl_entries, msg="get_voucherwise_gl_entries not returning expected GLes", ) def test_stock_voucher_sorting(self): vouchers = [] item = make_item().name stock_entry = {"item": item, "to_warehouse": "_Test Warehouse - _TC", "qty": 1, "rate": 10} se1 = make_stock_entry(posting_date="2022-01-01", **stock_entry) se3 = make_stock_entry(posting_date="2022-03-01", **stock_entry) se2 = make_stock_entry(posting_date="2022-02-01", **stock_entry) for doc in (se1, se2, se3): vouchers.append((doc.doctype, doc.name)) vouchers.append(("Stock Entry", "Wat")) sorted_vouchers = sort_stock_vouchers_by_posting_date(list(reversed(vouchers))) self.assertEqual(sorted_vouchers, vouchers) def test_update_reference_in_payment_entry(self): item = make_item().name purchase_invoice = make_purchase_invoice( item=item, supplier="_Test Supplier USD", currency="USD", conversion_rate=82.32, do_not_submit=1 ) purchase_invoice.credit_to = "_Test Payable USD - _TC" purchase_invoice.submit() payment_entry = get_payment_entry(purchase_invoice.doctype, purchase_invoice.name) payment_entry.paid_amount = 15725 payment_entry.deductions = [] payment_entry.save() # below is the difference between base_received_amount and base_paid_amount self.assertEqual(payment_entry.difference_amount, -4855.0) payment_entry.target_exchange_rate = 62.9 payment_entry.save() # below is due to change in exchange rate self.assertEqual(payment_entry.references[0].exchange_gain_loss, -4855.0) payment_entry.references = [] self.assertEqual(payment_entry.difference_amount, 0.0) payment_entry.submit() payment_reconciliation = frappe.new_doc("Payment Reconciliation") payment_reconciliation.company = payment_entry.company payment_reconciliation.party_type = "Supplier" payment_reconciliation.party = purchase_invoice.supplier payment_reconciliation.receivable_payable_account = payment_entry.paid_to payment_reconciliation.get_unreconciled_entries() payment_reconciliation.allocate_entries( { "payments": [d.__dict__ for d in payment_reconciliation.payments], "invoices": [d.__dict__ for d in payment_reconciliation.invoices], } ) for d in payment_reconciliation.invoices: # Reset invoice outstanding_amount because allocate_entries will zero this value out. d.outstanding_amount = d.amount for d in payment_reconciliation.allocation: d.difference_account = "Exchange Gain/Loss - _TC" payment_reconciliation.reconcile() payment_entry.load_from_db() self.assertEqual(len(payment_entry.references), 1) self.assertEqual(payment_entry.difference_amount, 0) ADDRESS_RECORDS = [ { "doctype": "Address", "address_type": "Billing", "address_line1": "Address line 1", "address_title": "_Test Billing Address Title", "city": "Lagos", "country": "Nigeria", "links": [ {"link_doctype": "Customer", "link_name": "_Test Customer 2", "doctype": "Dynamic Link"} ], }, { "doctype": "Address", "address_type": "Shipping", "address_line1": "Address line 2", "address_title": "_Test Shipping Address 1 Title", "city": "Lagos", "country": "Nigeria", "links": [ {"link_doctype": "Customer", "link_name": "_Test Customer 2", "doctype": "Dynamic Link"} ], }, { "doctype": "Address", "address_type": "Shipping", "address_line1": "Address line 3", "address_title": "_Test Shipping Address 2 Title", "city": "Lagos", "country": "Nigeria", "is_shipping_address": "1", "links": [ {"link_doctype": "Customer", "link_name": "_Test Customer 2", "doctype": "Dynamic Link"} ], }, { "doctype": "Address", "address_type": "Billing", "address_line1": "Address line 4", "address_title": "_Test Billing Address 2 Title", "city": "Lagos", "country": "Nigeria", "is_shipping_address": "1", "links": [ {"link_doctype": "Customer", "link_name": "_Test Customer 1", "doctype": "Dynamic Link"} ], }, ]
null
get authorized user
""" Users management Keeps functionality that couples with the following app modules - users, - login - security - resource_manager """ import logging from datetime import datetime import redis.asyncio as aioredis from aiohttp import web from models_library.emails import LowerCaseEmailStr from pydantic import BaseModel, parse_obj_as from servicelib.logging_utils import log_decorator from ..garbage_collector.settings import GUEST_USER_RC_LOCK_FORMAT from ..login.storage import AsyncpgStorage, get_plugin_storage from ..login.utils import ACTIVE, GUEST, get_client_ip, get_random_string from ..redis import get_redis_lock_manager_client from ..security.api import authorized_userid, encrypt_password, is_anonymous, remember from ..users.api import get_user from ..users.exceptions import UserNotFoundError from ._constants import MSG_GUESTS_NOT_ALLOWED from .settings import StudiesDispatcherSettings, get_plugin_settings _logger = logging.getLogger(__name__) class UserInfo(BaseModel): id: int name: str email: str primary_gid: int needs_login: bool = False is_guest: bool = True async def METHOD_NAME(request: web.Request) -> dict: # Returns valid user if it is identified (cookie) and logged in (valid cookie)? user_id = await authorized_userid(request) if user_id is not None: try: user = await get_user(request.app, user_id) return user except UserNotFoundError: return {} return {} async def _create_temporary_guest_user(request: web.Request): db: AsyncpgStorage = get_plugin_storage(request.app) redis_locks_client: aioredis.Redis = get_redis_lock_manager_client(request.app) settings: StudiesDispatcherSettings = get_plugin_settings(app=request.app) random_user_name = get_random_string(min_len=5) email = parse_obj_as(LowerCaseEmailStr, f"{random_user_name}@guest-at-osparc.io") password = get_random_string(min_len=12) expires_at = datetime.utcnow() + settings.STUDIES_GUEST_ACCOUNT_LIFETIME # GUEST_USER_RC_LOCK: # # These locks prevents the GC from deleting a GUEST user in to stages of its lifefime: # # 1. During construction: # - Prevents GC from deleting this GUEST user while it is being created # - Since the user still does not have an ID assigned, the lock is named with his random_user_name # - the timeout here is the TTL of the lock in Redis. in case the webserver is overwhelmed and cannot create # a user during that time or crashes, then redis will ensure the lock disappears and let the garbage collector do its work # MAX_DELAY_TO_CREATE_USER = 3 # secs # # 2. During initialization # - Prevents the GC from deleting this GUEST user, with ID assigned, while it gets initialized and acquires it's first resource # - Uses the ID assigned to name the lock # MAX_DELAY_TO_GUEST_FIRST_CONNECTION = 15 # secs # # # NOTES: # - In case of failure or excessive delay the lock has a timeout that automatically unlocks it # and the GC can clean up what remains # - Notice that the ids to name the locks are unique, therefore the lock can be acquired w/o errors # - These locks are very specific to resources and have timeout so the risk of blocking from GC is small # # (1) read details above async with redis_locks_client.lock( GUEST_USER_RC_LOCK_FORMAT.format(user_id=random_user_name), timeout=MAX_DELAY_TO_CREATE_USER, ): # NOTE: usr Dict is incomplete, e.g. does not contain primary_gid usr = await db.create_user( { "name": random_user_name, "email": email, "password_hash": encrypt_password(password), "status": ACTIVE, "role": GUEST, "created_ip": get_client_ip(request), "expires_at": expires_at, } ) user: dict = await get_user(request.app, usr["id"]) # (2) read details above await redis_locks_client.lock( GUEST_USER_RC_LOCK_FORMAT.format(user_id=user["id"]), timeout=MAX_DELAY_TO_GUEST_FIRST_CONNECTION, ).acquire() return user @log_decorator(_logger, level=logging.DEBUG) async def get_or_create_guest_user( request: web.Request, *, allow_anonymous_or_guest_users: bool ) -> UserInfo: """ A user w/o authentication is denoted ANONYMOUS. If allow_anonymous_or_guest_users=True, then these users can be automatically promoted to GUEST. For that, a temporary guest account is created and associated to this user. GUEST users are therefore a special user that is un-identified to us (no email/name, etc) NOTE that if allow_anonymous_or_guest_users=False, GUEST users are NOT allowed in the system either. Arguments: allow_anonymous_or_guest_users -- if True, it will create a temporary GUEST account Raises: web.HTTPUnauthorized if ANONYMOUS users are not allowed (either w/o auth or as GUEST) """ user = None # anonymous = no identity in request is_anonymous_user = await is_anonymous(request) if not is_anonymous_user: # NOTE: covers valid cookie with unauthorized user (e.g. expired guest/banned) user = await METHOD_NAME(request) if not user and allow_anonymous_or_guest_users: _logger.debug("Anonymous user is accepted as guest...") user = await _create_temporary_guest_user(request) is_anonymous_user = True if not allow_anonymous_or_guest_users and (not user or user.get("role") == GUEST): # NOTE: if allow_anonymous_users=False then GUEST users are NOT allowed! raise web.HTTPUnauthorized(reason=MSG_GUESTS_NOT_ALLOWED) assert isinstance(user, dict) # nosec return UserInfo( id=user["id"], name=user["name"], email=user["email"], primary_gid=user["primary_gid"], needs_login=is_anonymous_user, is_guest=user.get("role") == GUEST, ) async def ensure_authentication( user: UserInfo, request: web.Request, response: web.Response ): if user.needs_login: _logger.debug("Auto login for anonymous user %s", user.name) identity = user.email await remember(request, response, identity)
null
update user permissions
from typing import List, Optional from fastapi import Depends, HTTPException, Security from fastapi.security import SecurityScopes from loguru import logger from sqlalchemy.orm import Session from starlette.status import HTTP_201_CREATED, HTTP_400_BAD_REQUEST, HTTP_404_NOT_FOUND from fides.api.api import deps from fides.api.models.fides_user import FidesUser from fides.api.models.fides_user_permissions import FidesUserPermissions from fides.api.oauth.roles import APPROVER, OWNER, RoleRegistryEnum from fides.api.oauth.utils import get_current_user, oauth2_scheme, verify_oauth_client from fides.api.schemas.user_permission import ( UserPermissionsCreate, UserPermissionsEdit, UserPermissionsResponse, ) from fides.api.util.api_router import APIRouter from fides.common.api.scope_registry import ( USER_PERMISSION_ASSIGN_OWNERS, USER_PERMISSION_CREATE, USER_PERMISSION_READ, USER_PERMISSION_UPDATE, ) from fides.common.api.v1 import urn_registry as urls from fides.common.api.v1.urn_registry import V1_URL_PREFIX from fides.config import CONFIG router = APIRouter(tags=["User Permissions"], prefix=V1_URL_PREFIX) def validate_user_id(db: Session, user_id: str) -> FidesUser: """Get the user by id, otherwise throw a 404""" user = FidesUser.get_by(db, field="id", value=user_id) if not user: raise HTTPException( status_code=HTTP_404_NOT_FOUND, detail=f"No user found with id {user_id}." ) return user async def owner_role_permission_check( db: Session, roles: List[RoleRegistryEnum], authorization: str ) -> None: """Extra permissions check to assert that the token possesses the USER_PERMISSION_ASSIGN_OWNERS scope if attempting to make another user an owner. """ if OWNER in roles: await verify_oauth_client( security_scopes=SecurityScopes([USER_PERMISSION_ASSIGN_OWNERS]), authorization=authorization, db=db, ) @router.post( urls.USER_PERMISSIONS, dependencies=[Security(verify_oauth_client, scopes=[USER_PERMISSION_CREATE])], status_code=HTTP_201_CREATED, response_model=UserPermissionsResponse, ) async def create_user_permissions( *, db: Session = Depends(deps.get_db), user_id: str, authorization: str = Security(oauth2_scheme), permissions: UserPermissionsCreate, ) -> FidesUserPermissions: """Create user permissions with associated roles.""" user = validate_user_id(db, user_id) if user.permissions is not None: # type: ignore[attr-defined] raise HTTPException( status_code=HTTP_400_BAD_REQUEST, detail="This user already has permissions set.", ) await owner_role_permission_check(db, permissions.roles, authorization) if user.client: # Just in case - this shouldn't happen in practice. user.client.update(db=db, data=permissions.dict()) logger.info("Created FidesUserPermission record") return FidesUserPermissions.create( db=db, data={"user_id": user_id, **permissions.dict()} ) @router.put( urls.USER_PERMISSIONS, dependencies=[Security(verify_oauth_client, scopes=[USER_PERMISSION_UPDATE])], response_model=UserPermissionsResponse, ) async def METHOD_NAME( *, db: Session = Depends(deps.get_db), user_id: str, authorization: str = Security(oauth2_scheme), permissions: UserPermissionsEdit, ) -> FidesUserPermissions: """Update a user's role(s). The UI assigns one role at a time, but multiple roles are technically supported. Users inherit numerous scopes that are associated with their role(s). """ user = validate_user_id(db, user_id) logger.info("Updated FidesUserPermission record") await owner_role_permission_check(db, permissions.roles, authorization) if user.client: user.client.update(db=db, data={"roles": permissions.roles}) updated_user_perms = user.permissions.update( # type: ignore[attr-defined] db=db, data={"id": user.permissions.id, "user_id": user_id, "roles": permissions.roles}, # type: ignore[attr-defined] ) if user.systems and APPROVER in user.permissions.roles: # type: ignore[attr-defined] for system in user.systems.copy(): logger.info( "Approvers cannot be system managers. Removing user {} as system manager of {}.", user.id, system.fides_key, ) user.remove_as_system_manager(db, system) return updated_user_perms @router.get( urls.USER_PERMISSIONS, response_model=UserPermissionsResponse, ) async def get_user_permissions( *, db: Session = Depends(deps.get_db), authorization: str = Security(oauth2_scheme), current_user: FidesUser = Depends(get_current_user), user_id: str, ) -> Optional[FidesUserPermissions]: # A user is able to retrieve their own permissions. if current_user and current_user.id == user_id: # The root user is a special case because they aren't persisted in the database. if current_user.id == CONFIG.security.oauth_root_client_id: logger.info("Created FidesUserPermission for root user") return FidesUserPermissions( id=CONFIG.security.oauth_root_client_id, user_id=CONFIG.security.oauth_root_client_id, roles=CONFIG.security.root_user_roles, ) logger.info("Retrieved FidesUserPermission record for current user") return FidesUserPermissions.get_by(db, field="user_id", value=current_user.id) # To look up the permissions of another user, that user must exist and the current user must # have permission to read users. validate_user_id(db, user_id) await verify_oauth_client( security_scopes=SecurityScopes([USER_PERMISSION_READ]), authorization=authorization, db=db, ) logger.info("Retrieved FidesUserPermission record") return FidesUserPermissions.get_by(db, field="user_id", value=user_id)
null
test read entries order
#!/usr/bin/env python from typing import Optional from grr_response_core.lib import rdfvalue from grr_response_server.rdfvalues import objects as rdf_objects APIAuditEntry = rdf_objects.APIAuditEntry def _Date(date: str) -> rdfvalue.RDFDatetime: return rdfvalue.RDFDatetime.FromHumanReadable(date) class DatabaseTestEventsMixin(object): def _MakeEntry( self, http_request_path: str = "/test", router_method_name: str = "TestHandler", username: str = "user", response_code: APIAuditEntry.Code = APIAuditEntry.Code.OK, timestamp: Optional[rdfvalue.RDFDatetime] = None, ) -> APIAuditEntry: self.db.WriteGRRUser(username) return APIAuditEntry( http_request_path=http_request_path, router_method_name=router_method_name, username=username, response_code=response_code, timestamp=timestamp, ) def _WriteEntry(self, **kwargs) -> rdf_objects.APIAuditEntry: entry = self._MakeEntry(**kwargs) self.db.WriteAPIAuditEntry(entry) return entry def testWriteDoesNotMutate(self): entry = self._MakeEntry() copy = entry.Copy() self.db.WriteAPIAuditEntry(entry) self.assertEqual(entry, copy) def testWriteAuditEntry(self): entry = self._WriteEntry() entries = self.db.ReadAPIAuditEntries() self.assertLen(entries, 1) # We should not compare timestamps. entries[0].timestamp = None self.assertCountEqual(entries, [entry]) def testWriteEntriesWithMicrosecondDifference(self): # MySQL TIMESTAMP's valid range starts from 1970-01-01 00:00:01, # hence we have to set the time to at least 1 second from epoch. timestamp = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(1000000 + 1) entry1 = self._WriteEntry(username="user1", timestamp=timestamp) entry2 = self._WriteEntry(username="user2", timestamp=timestamp) timestamp = rdfvalue.RDFDatetime.FromMicrosecondsSinceEpoch(1000000 + 2) entry3 = self._WriteEntry(username="user1", timestamp=timestamp) entries = self.db.ReadAPIAuditEntries() self.assertCountEqual(entries, [entry1, entry2, entry3]) def testReadEntries(self): entry1 = self._WriteEntry() entry2 = self._WriteEntry(response_code=APIAuditEntry.Code.ERROR) entries = self.db.ReadAPIAuditEntries() self.assertLen(entries, 2) # We should not compare timestamps. entries[0].timestamp = None entries[1].timestamp = None self.assertCountEqual(entries, [entry1, entry2]) def METHOD_NAME(self): status_codes = [ APIAuditEntry.Code.OK, APIAuditEntry.Code.ERROR, APIAuditEntry.Code.FORBIDDEN, APIAuditEntry.Code.NOT_FOUND, APIAuditEntry.Code.NOT_IMPLEMENTED, ] for status_code in status_codes: self._WriteEntry(response_code=status_code) entries = self.db.ReadAPIAuditEntries() for entry, status_code in zip(entries, status_codes): self.assertEqual(entry.response_code, status_code) def testReadEntriesFilterUsername(self): entry = self._WriteEntry(username="foo") self._WriteEntry(username="bar") self._WriteEntry(username="foobar") entries = self.db.ReadAPIAuditEntries(username="foo") self.assertLen(entries, 1) # We should not compare timestamps. entries[0].timestamp = None self.assertCountEqual(entries, [entry]) def testReadEntriesFilterRouterMethodName(self): self._WriteEntry(router_method_name="foo") self._WriteEntry(router_method_name="bar") self._WriteEntry(router_method_name="foobar") entries = self.db.ReadAPIAuditEntries(router_method_names=["foo", "bar"]) router_method_names = [_.router_method_name for _ in entries] self.assertCountEqual(router_method_names, ["foo", "bar"]) def testReadEntriesFilterTimestamp(self): self._WriteEntry(response_code=APIAuditEntry.Code.OK) ok_timestamp = self.db.Now() self._WriteEntry(response_code=APIAuditEntry.Code.ERROR) error_timestamp = self.db.Now() self._WriteEntry(response_code=APIAuditEntry.Code.NOT_FOUND) not_found_timestamp = self.db.Now() entries = self.db.ReadAPIAuditEntries(min_timestamp=not_found_timestamp) self.assertEmpty(entries) entries = self.db.ReadAPIAuditEntries(max_timestamp=not_found_timestamp) self.assertLen(entries, 3) entries = self.db.ReadAPIAuditEntries(min_timestamp=ok_timestamp) self.assertEqual([e.response_code for e in entries], [APIAuditEntry.Code.ERROR, APIAuditEntry.Code.NOT_FOUND]) entries = self.db.ReadAPIAuditEntries(max_timestamp=error_timestamp) self.assertEqual([e.response_code for e in entries], [APIAuditEntry.Code.OK, APIAuditEntry.Code.ERROR]) entries = self.db.ReadAPIAuditEntries( min_timestamp=ok_timestamp, max_timestamp=error_timestamp) self.assertEqual([e.response_code for e in entries], [APIAuditEntry.Code.ERROR]) def testCountEntries(self): day = _Date("2019-02-02") self._WriteEntry(username="user1", timestamp=_Date("2019-02-02 00:00")) self._WriteEntry(username="user2", timestamp=_Date("2019-02-02 00:00")) self.assertEqual({ ("user1", day): 1, ("user2", day): 1 }, self.db.CountAPIAuditEntriesByUserAndDay()) self._WriteEntry(username="user1", timestamp=_Date("2019-02-02 23:59:59")) self.assertEqual({ ("user1", day): 2, ("user2", day): 1 }, self.db.CountAPIAuditEntriesByUserAndDay()) def testCountEntriesFilteredByTimestamp(self): self._WriteEntry(username="user", timestamp=_Date("2019-02-01")) self._WriteEntry(username="user1", timestamp=_Date("2019-02-02 00:12:00")) self._WriteEntry(username="user2", timestamp=_Date("2019-02-02 00:12:00")) self._WriteEntry(username="user1", timestamp=_Date("2019-02-02 00:12:01")) self._WriteEntry(username="user1", timestamp=_Date("2019-02-03")) self._WriteEntry(username="user1", timestamp=_Date("2019-02-04")) counts = self.db.CountAPIAuditEntriesByUserAndDay( min_timestamp=_Date("2019-02-02"), max_timestamp=_Date("2019-02-03 23:59:59")) self.assertEqual( { ("user1", _Date("2019-02-02")): 2, ("user2", _Date("2019-02-02")): 1, ("user1", _Date("2019-02-03")): 1, }, counts) def testDeleteUsersRetainsApiAuditEntries(self): self._WriteEntry(username="foo") self.db.DeleteGRRUser("foo") entries = self.db.ReadAPIAuditEntries(username="foo") self.assertLen(entries, 1) self.assertEqual(entries[0].username, "foo") def testWriteAndReadWithCommitTimestamp(self): entry = self._MakeEntry(username="foo") before = self.db.Now() self.db.WriteAPIAuditEntry(entry) after = self.db.Now() entries = self.db.ReadAPIAuditEntries(username="foo") self.assertLen(entries, 1) self.assertBetween(entries[0].timestamp, before, after) # This file is a test library and thus does not require a __main__ block.
null
client
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Issue tracker manager functions.""" import json import jira from clusterfuzz._internal.config import db_config class IssueTrackerManager(object): """Issue tracker manager.""" def __init__(self, project_name): """"Construct an issue tracker manager instance based on parameters.""" self._client = None self.project_name = project_name @property def METHOD_NAME(self): """HTTP Client.""" if self._client is None: self._client = self._create_client() return self._client def _create_client(self): """Return a client object for querying the issue tracker.""" config = db_config.get() credentials = json.loads(config.jira_credentials) jira_url = config.jira_url jira_client = jira.JIRA( jira_url, auth=(credentials['username'], credentials['password'])) return jira_client def save(self, issue): """Save an issue.""" if issue.id == -1: return self._create(issue) return self._update(issue) def create(self): """Create an issue object locally.""" raw_fields = {'id': '-1', 'fields': {'components': [], 'labels': []}} # Create jira issue object jira_issue = jira.resources.Issue({}, jira.resilientsession.ResilientSession(), raw_fields) return jira_issue def _transition_issue_status_if_updated(self, issue): """Transitions the status of the issue if updated. Jira has a separate endpoint to transition status.""" # Brittle - we should be pulling the equivalent of 'new' from the policy. if issue.status == 'Open': return # This assumes the following: # 1. If issue.status is an instance of Resource, the value comes from # Jira directly and has not been changed. # 2. If issue.status is not an instance of Resource, the value is a # string and the issue status should be updated. # Brittle - we should be pulling the equivalent of 'new' from the policy. if not isinstance(issue.status, jira.resources.Resource): self.METHOD_NAME.transition_issue(issue.jira_issue, transition=issue.status) def _add_watchers(self, issue): """Add watchers to the ticket. Jira has a separate endpoint to add watchers.""" # Get watchers from LabelStore. watchers = list(issue.ccs) # Jira weirdness, update watchers this way. for watcher in watchers: self.METHOD_NAME.add_watcher(issue.jira_issue, watcher) def _get_issue_fields(self, issue): """Get issue fields to populate the ticket""" # Get labels from LabelStore. labels = list(issue.labels) # Get components from LabelStore. components = list(issue.components) fields = { 'summary': issue.title, 'description': issue.body, 'labels': labels, 'components': components, } if issue.assignee is not None: if isinstance(issue.assignee, jira.resources.Resource): assignee = {'name': issue.assignee.name} else: assignee = {'name': issue.assignee} fields['assignee'] = assignee # Again brittle - need to pull these strings from policy. if 'Critical - P1' in labels: fields['priority'] = {'name': 'Critical - P1'} elif 'Major - P2' in labels: fields['priority'] = {'name': 'Major - P2'} return fields def _create(self, issue): """Create an issue.""" fields = self._get_issue_fields(issue) jira_issue = self.METHOD_NAME.create_issue(fields=fields) self._add_watchers(jira_issue) issue.jira_issue = jira_issue def _update(self, issue): """Update an issue.""" update_fields = self._get_issue_fields(issue) self._transition_issue_status_if_updated(issue) self._add_watchers(issue) issue.jira_issue.update(fields=update_fields) def get_watchers(self, issue): """Retrieve list of watchers.""" if issue.id == -1: return [] watchlist = self.METHOD_NAME.watchers(issue) watchers = [] for watcher in watchlist.watchers: watchers.append(watcher.name) return watchers def get_issue(self, issue_id): """Retrieve an issue object with a specific id.""" issue = self.METHOD_NAME.issue(str(issue_id)) return issue def get_issue_count(self, query_string): """Return number of issues for a given query.""" issues = self.METHOD_NAME.search_issues(query_string) return len(issues) def get_issues(self, query_string, max_results=10000): """Return all issues for a given query.""" issues = self.METHOD_NAME.search_issues(query_string, maxResults=max_results) return issues
null
get object in list by class
from typing import Any import pytest from fastjsonschema.exceptions import JsonSchemaValueException from snuba.clickhouse.translators.snuba.mappers import ( ColumnToColumn, ColumnToFunction, ColumnToIPAddress, ColumnToMapping, ColumnToNullIf, ) from snuba.datasets.configuration.entity_builder import build_entity_from_config from snuba.query.expressions import Column, FunctionCall, Literal from tests.datasets.configuration.utils import ConfigurationTest def METHOD_NAME(object_list: Any, object_class: Any) -> Any: for obj in object_list: if isinstance(obj, object_class): return obj return None class TestEntityConfiguration(ConfigurationTest): def test_entity_loader_fixed_string(self) -> None: entity = build_entity_from_config( "tests/datasets/configuration/entity_with_fixed_string.yaml" ) columns = list(entity.get_data_model()) assert len(columns) == 3 assert columns[0].type.length == 420 # type: ignore assert columns[2].type.length == 69 # type: ignore def test_bad_configuration_broken_query_processor(self) -> None: with pytest.raises(JsonSchemaValueException): build_entity_from_config( "tests/datasets/configuration/broken_entity_bad_query_processor.yaml" ) def test_bad_configuration_broken_validator(self) -> None: with pytest.raises(JsonSchemaValueException): build_entity_from_config( "tests/datasets/configuration/broken_entity_positional_validator_args.yaml" ) def test_entity_loader_for_entity_with_column_mappers(self) -> None: pluggable_entity = build_entity_from_config( "tests/datasets/configuration/entity_with_column_mappers.yaml" ) column_mappers = pluggable_entity.get_all_storage_connections()[ 0 ].translation_mappers.columns # Check that ColumnToIpAdress mapper was successfully loaded from config column_to_ip_address = METHOD_NAME( column_mappers, ColumnToIPAddress ) assert isinstance(column_to_ip_address, ColumnToFunction) # Check that nested expressions were loaded correctly in ColumnToIPAddress assert len(column_to_ip_address.to_function_params) == 2 function_call = next( ( fc for fc in column_to_ip_address.to_function_params if isinstance(fc, FunctionCall) and fc.function_name == "IPv4NumToString" ), None, ) assert function_call is not None assert len(function_call.parameters) == 1 assert any(isinstance(param, Column) for param in function_call.parameters) # Check that ColumnToNullIf mapper was successfully loaded from config column_to_user_null_if = METHOD_NAME( column_mappers, ColumnToNullIf ) assert isinstance(column_to_user_null_if, ColumnToFunction) # Check that expressions were loaded correctly in ColumnToNullIf assert len(column_to_user_null_if.to_function_params) == 2 assert any( isinstance(param, Column) for param in column_to_user_null_if.to_function_params ) assert any( isinstance(param, Literal) for param in column_to_user_null_if.to_function_params ) # Check that other column mappers (which do not contain expressions) were loaded correctly column_to_mapping = METHOD_NAME(column_mappers, ColumnToMapping) assert column_to_mapping is not None assert column_to_mapping.from_col_name == "geo_country_code" column_to_column = METHOD_NAME(column_mappers, ColumnToColumn) assert column_to_column is not None assert column_to_column.from_col_name == "email" def test_entity_loader_no_custom_validators(self) -> None: pluggable_entity = build_entity_from_config( "tests/datasets/configuration/entity_no_custom_validators.yaml" ) entity_validators = set(pluggable_entity.get_validators()) assert len(entity_validators) == len(pluggable_entity._get_builtin_validators()) def test_entity_loader_join_relationships(self) -> None: pluggable_entity = build_entity_from_config( "tests/datasets/configuration/entity_join_relationships.yaml" ) relationships = pluggable_entity.get_all_join_relationships() assert len(relationships) == 1 rel = pluggable_entity.get_join_relationship("owns") assert rel is not None assert rel.rhs_entity.value == "events" assert rel.join_type.value == "LEFT" assert len(rel.columns) == 2 assert rel.columns[0][0] == "project_id" assert rel.columns[0][1] == "project_id" assert rel.columns[1][0] == "group_id" assert rel.columns[1][1] == "group_id" assert len(rel.equivalences) == 1 assert rel.equivalences[0][0] == "offset" assert rel.equivalences[0][1] == "offset"
null
threshold
""" The RegressionFDR class implements the 'Knockoff' approach for controlling false discovery rates (FDR) in regression analysis. The knockoff approach does not require standard errors. Thus one application is to provide inference for parameter estimates that are not smooth functions of the data. For example, the knockoff approach can be used to do inference for parameter estimates obtained from the LASSO, of from stepwise variable selection. The knockoff approach controls FDR for parameter estimates that may be dependent, such as coefficient estimates in a multiple regression model. The knockoff approach is applicable whenever the test statistic can be computed entirely from x'y and x'x, where x is the design matrix and y is the vector of responses. Reference --------- Rina Foygel Barber, Emmanuel Candes (2015). Controlling the False Discovery Rate via Knockoffs. Annals of Statistics 43:5. https://candes.su.domains/publications/downloads/FDR_regression.pdf """ import numpy as np import pandas as pd from statsmodels.iolib import summary2 class RegressionFDR: """ Control FDR in a regression procedure. Parameters ---------- endog : array_like The dependent variable of the regression exog : array_like The independent variables of the regression regeffects : RegressionEffects instance An instance of a RegressionEffects class that can compute effect sizes for the regression coefficients. method : str The approach used to assess and control FDR, currently must be 'knockoff'. Returns ------- Returns an instance of the RegressionFDR class. The `fdr` attribute holds the estimated false discovery rates. Notes ----- This class Implements the knockoff method of Barber and Candes. This is an approach for controlling the FDR of a variety of regression estimation procedures, including correlation coefficients, OLS regression, OLS with forward selection, and LASSO regression. For other approaches to FDR control in regression, see the statsmodels.stats.multitest module. Methods provided in that module use Z-scores or p-values, and therefore require standard errors for the coefficient estimates to be available. The default method for constructing the augmented design matrix is the 'equivariant' approach, set `design_method='sdp'` to use an alternative approach involving semidefinite programming. See Barber and Candes for more information about both approaches. The sdp approach requires that the cvxopt package be installed. """ def __init__(self, endog, exog, regeffects, method="knockoff", **kwargs): if hasattr(exog, "columns"): self.xnames = exog.columns else: self.xnames = ["x%d" % j for j in range(exog.shape[1])] exog = np.asarray(exog) endog = np.asarray(endog) if "design_method" not in kwargs: kwargs["design_method"] = "equi" nobs, nvar = exog.shape if kwargs["design_method"] == "equi": exog1, exog2, _ = _design_knockoff_equi(exog) elif kwargs["design_method"] == "sdp": exog1, exog2, _ = _design_knockoff_sdp(exog) endog = endog - np.mean(endog) self.endog = endog self.exog = np.concatenate((exog1, exog2), axis=1) self.exog1 = exog1 self.exog2 = exog2 self.stats = regeffects.stats(self) unq, inv, cnt = np.unique(self.stats, return_inverse=True, return_counts=True) # The denominator of the FDR cc = np.cumsum(cnt) denom = len(self.stats) - cc + cnt denom[denom < 1] = 1 # The numerator of the FDR ii = np.searchsorted(unq, -unq, side='right') - 1 numer = cc[ii] numer[ii < 0] = 0 # The knockoff+ estimated FDR fdrp = (1 + numer) / denom # The knockoff estimated FDR fdr = numer / denom self.fdr = fdr[inv] self.fdrp = fdrp[inv] self._ufdr = fdr self._unq = unq df = pd.DataFrame(index=self.xnames) df["Stat"] = self.stats df["FDR+"] = self.fdrp df["FDR"] = self.fdr self.fdr_df = df def METHOD_NAME(self, tfdr): """ Returns the threshold statistic for a given target FDR. """ if np.min(self._ufdr) <= tfdr: return self._unq[self._ufdr <= tfdr][0] else: return np.inf def summary(self): summ = summary2.Summary() summ.add_title("Regression FDR results") summ.add_df(self.fdr_df) return summ def _design_knockoff_sdp(exog): """ Use semidefinite programming to construct a knockoff design matrix. Requires cvxopt to be installed. """ try: from cvxopt import solvers, matrix except ImportError: raise ValueError("SDP knockoff designs require installation of cvxopt") nobs, nvar = exog.shape # Standardize exog xnm = np.sum(exog**2, 0) xnm = np.sqrt(xnm) exog = exog / xnm Sigma = np.dot(exog.T, exog) c = matrix(-np.ones(nvar)) h0 = np.concatenate((np.zeros(nvar), np.ones(nvar))) h0 = matrix(h0) G0 = np.concatenate((-np.eye(nvar), np.eye(nvar)), axis=0) G0 = matrix(G0) h1 = 2 * Sigma h1 = matrix(h1) i, j = np.diag_indices(nvar) G1 = np.zeros((nvar*nvar, nvar)) G1[i*nvar + j, i] = 1 G1 = matrix(G1) solvers.options['show_progress'] = False sol = solvers.sdp(c, G0, h0, [G1], [h1]) sl = np.asarray(sol['x']).ravel() xcov = np.dot(exog.T, exog) exogn = _get_knmat(exog, xcov, sl) return exog, exogn, sl def _design_knockoff_equi(exog): """ Construct an equivariant design matrix for knockoff analysis. Follows the 'equi-correlated knockoff approach of equation 2.4 in Barber and Candes. Constructs a pair of design matrices exogs, exogn such that exogs is a scaled/centered version of the input matrix exog, exogn is another matrix of the same shape with cov(exogn) = cov(exogs), and the covariances between corresponding columns of exogn and exogs are as small as possible. """ nobs, nvar = exog.shape if nobs < 2*nvar: msg = "The equivariant knockoff can ony be used when n >= 2*p" raise ValueError(msg) # Standardize exog xnm = np.sum(exog**2, 0) xnm = np.sqrt(xnm) exog = exog / xnm xcov = np.dot(exog.T, exog) ev, _ = np.linalg.eig(xcov) evmin = np.min(ev) sl = min(2*evmin, 1) sl = sl * np.ones(nvar) exogn = _get_knmat(exog, xcov, sl) return exog, exogn, sl def _get_knmat(exog, xcov, sl): # Utility function, see equation 2.2 of Barber & Candes. nobs, nvar = exog.shape ash = np.linalg.inv(xcov) ash *= -np.outer(sl, sl) i, j = np.diag_indices(nvar) ash[i, j] += 2 * sl umat = np.random.normal(size=(nobs, nvar)) u, _ = np.linalg.qr(exog) umat -= np.dot(u, np.dot(u.T, umat)) umat, _ = np.linalg.qr(umat) ashr, xc, _ = np.linalg.svd(ash, 0) ashr *= np.sqrt(xc) ashr = ashr.T ex = (sl[:, None] * np.linalg.solve(xcov, exog.T)).T exogn = exog - ex + np.dot(umat, ashr) return exogn
null
mode
import torch from torch.distributions import constraints from torch.distributions.categorical import Categorical from torch.distributions.distribution import Distribution __all__ = ["OneHotCategorical", "OneHotCategoricalStraightThrough"] class OneHotCategorical(Distribution): r""" Creates a one-hot categorical distribution parameterized by :attr:`probs` or :attr:`logits`. Samples are one-hot coded vectors of size ``probs.size(-1)``. .. note:: The `probs` argument must be non-negative, finite and have a non-zero sum, and it will be normalized to sum to 1 along the last dimension. :attr:`probs` will return this normalized value. The `logits` argument will be interpreted as unnormalized log probabilities and can therefore be any real number. It will likewise be normalized so that the resulting probabilities sum to 1 along the last dimension. :attr:`logits` will return this normalized value. See also: :func:`torch.distributions.Categorical` for specifications of :attr:`probs` and :attr:`logits`. Example:: >>> # xdoctest: +IGNORE_WANT("non-deterministic") >>> m = OneHotCategorical(torch.tensor([ 0.25, 0.25, 0.25, 0.25 ])) >>> m.sample() # equal probability of 0, 1, 2, 3 tensor([ 0., 0., 0., 1.]) Args: probs (Tensor): event probabilities logits (Tensor): event log probabilities (unnormalized) """ arg_constraints = {"probs": constraints.simplex, "logits": constraints.real_vector} support = constraints.one_hot has_enumerate_support = True def __init__(self, probs=None, logits=None, validate_args=None): self._categorical = Categorical(probs, logits) batch_shape = self._categorical.batch_shape event_shape = self._categorical.param_shape[-1:] super().__init__(batch_shape, event_shape, validate_args=validate_args) def expand(self, batch_shape, _instance=None): new = self._get_checked_instance(OneHotCategorical, _instance) batch_shape = torch.Size(batch_shape) new._categorical = self._categorical.expand(batch_shape) super(OneHotCategorical, new).__init__( batch_shape, self.event_shape, validate_args=False ) new._validate_args = self._validate_args return new def _new(self, *args, **kwargs): return self._categorical._new(*args, **kwargs) @property def _param(self): return self._categorical._param @property def probs(self): return self._categorical.probs @property def logits(self): return self._categorical.logits @property def mean(self): return self._categorical.probs @property def METHOD_NAME(self): probs = self._categorical.probs METHOD_NAME = probs.argmax(axis=-1) return torch.nn.functional.one_hot(METHOD_NAME, num_classes=probs.shape[-1]).to(probs) @property def variance(self): return self._categorical.probs * (1 - self._categorical.probs) @property def param_shape(self): return self._categorical.param_shape def sample(self, sample_shape=torch.Size()): sample_shape = torch.Size(sample_shape) probs = self._categorical.probs num_events = self._categorical._num_events indices = self._categorical.sample(sample_shape) return torch.nn.functional.one_hot(indices, num_events).to(probs) def log_prob(self, value): if self._validate_args: self._validate_sample(value) indices = value.max(-1)[1] return self._categorical.log_prob(indices) def entropy(self): return self._categorical.entropy() def enumerate_support(self, expand=True): n = self.event_shape[0] values = torch.eye(n, dtype=self._param.dtype, device=self._param.device) values = values.view((n,) + (1,) * len(self.batch_shape) + (n,)) if expand: values = values.expand((n,) + self.batch_shape + (n,)) return values class OneHotCategoricalStraightThrough(OneHotCategorical): r""" Creates a reparameterizable :class:`OneHotCategorical` distribution based on the straight- through gradient estimator from [1]. [1] Estimating or Propagating Gradients Through Stochastic Neurons for Conditional Computation (Bengio et al, 2013) """ has_rsample = True def rsample(self, sample_shape=torch.Size()): samples = self.sample(sample_shape) probs = self._categorical.probs # cached via @lazy_property return samples + (probs - probs.detach())
null
get matches
# -*- coding: utf-8 -*- import io import logging import re from babelfish import Language, language_converters from guessit import guessit try: from lxml import etree except ImportError: try: import xml.etree.cElementTree as etree except ImportError: import xml.etree.ElementTree as etree from requests import Session from zipfile import ZipFile from . import Provider from ..exceptions import ProviderError from ..matches import guess_matches from ..subtitle import Subtitle, fix_line_ending from ..video import Episode logger = logging.getLogger(__name__) class PodnapisiSubtitle(Subtitle): """Podnapisi Subtitle.""" provider_name = 'podnapisi' def __init__(self, language, hearing_impaired, page_link, pid, releases, title, season=None, episode=None, year=None): super(PodnapisiSubtitle, self).__init__(language, hearing_impaired=hearing_impaired, page_link=page_link) self.pid = pid self.releases = releases self.title = title self.season = season self.episode = episode self.year = year @property def id(self): return self.pid @property def info(self): return ' '.join(self.releases) or self.pid def METHOD_NAME(self, video): matches = guess_matches(video, { 'title': self.title, 'year': self.year, 'season': self.season, 'episode': self.episode }) video_type = 'episode' if isinstance(video, Episode) else 'movie' for release in self.releases: matches |= guess_matches(video, guessit(release, {'type': video_type})) return matches class PodnapisiProvider(Provider): """Podnapisi Provider.""" languages = ({Language('por', 'BR'), Language('srp', script='Latn')} | {Language.fromalpha2(l) for l in language_converters['alpha2'].codes}) server_url = 'https://www.podnapisi.net/subtitles/' subtitle_class = PodnapisiSubtitle def __init__(self): self.session = None def initialize(self): self.session = Session() self.session.headers['User-Agent'] = self.user_agent def terminate(self): self.session.close() def query(self, language, keyword, season=None, episode=None, year=None): # set parameters, see http://www.podnapisi.net/forum/viewtopic.php?f=62&t=26164#p212652 params = {'sXML': 1, 'sL': str(language), 'sK': keyword} is_episode = False if season and episode: is_episode = True params['sTS'] = season params['sTE'] = episode if year: params['sY'] = year # loop over paginated results logger.info('Searching subtitles %r', params) subtitles = [] pids = set() while True: # query the server r = self.session.get(self.server_url + 'search/old', params=params, timeout=10) r.raise_for_status() xml = etree.fromstring(r.content) # exit if no results if not int(xml.find('pagination/results').text): logger.debug('No subtitles found') break # loop over subtitles for subtitle_xml in xml.findall('subtitle'): # read xml elements pid = subtitle_xml.find('pid').text # ignore duplicates, see http://www.podnapisi.net/forum/viewtopic.php?f=62&t=26164&start=10#p213321 if pid in pids: continue language = Language.fromietf(subtitle_xml.find('language').text) hearing_impaired = 'n' in (subtitle_xml.find('flags').text or '') page_link = subtitle_xml.find('url').text releases = [] if subtitle_xml.find('release').text: for release in subtitle_xml.find('release').text.split(): release = re.sub(r'\.+$', '', release) # remove trailing dots release = ''.join(filter(lambda x: ord(x) < 128, release)) # remove non-ascii characters releases.append(release) title = subtitle_xml.find('title').text season = int(subtitle_xml.find('tvSeason').text) episode = int(subtitle_xml.find('tvEpisode').text) year = int(subtitle_xml.find('year').text) if is_episode: subtitle = self.subtitle_class(language, hearing_impaired, page_link, pid, releases, title, season=season, episode=episode, year=year) else: subtitle = self.subtitle_class(language, hearing_impaired, page_link, pid, releases, title, year=year) logger.debug('Found subtitle %r', subtitle) subtitles.append(subtitle) pids.add(pid) # stop on last page if int(xml.find('pagination/current').text) >= int(xml.find('pagination/count').text): break # increment current page params['page'] = int(xml.find('pagination/current').text) + 1 logger.debug('Getting page %d', params['page']) return subtitles def list_subtitles(self, video, languages): season = episode = None if isinstance(video, Episode): titles = [video.series] + video.alternative_series season = video.season episode = video.episode else: titles = [video.title] + video.alternative_titles for title in titles: subtitles = [s for l in languages for s in self.query(l, title, season=season, episode=episode, year=video.year)] if subtitles: return subtitles return [] def download_subtitle(self, subtitle): # download as a zip logger.info('Downloading subtitle %r', subtitle) r = self.session.get(self.server_url + subtitle.pid + '/download', params={'container': 'zip'}, timeout=10) r.raise_for_status() # open the zip with ZipFile(io.BytesIO(r.content)) as zf: if len(zf.namelist()) > 1: raise ProviderError('More than one file to unzip') subtitle.content = fix_line_ending(zf.read(zf.namelist()[0]))
null
addcrypted2
# -*- coding: utf-8 -*- import os from base64 import standard_b64decode from functools import wraps from urllib.parse import unquote from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes import flask from flask.json import jsonify from pyload.core.api import Destination from pyload.core.utils.convert import to_str from pyload.core.utils.misc import eval_js #: url_prefix here is intentional since it should not be affected by path prefix bp = flask.Blueprint("flash", __name__, url_prefix="/") #: decorator def local_check(func): @wraps(func) def wrapper(*args, **kwargs): remote_addr = flask.request.environ.get("REMOTE_ADDR", "0") http_host = flask.request.environ.get("HTTP_HOST", "0") if remote_addr in ("127.0.0.1", "::ffff:127.0.0.1", "::1", "localhost") or http_host in ( "127.0.0.1:9666", "[::1]:9666", ): return func(*args, **kwargs) else: return "Forbidden", 403 return wrapper @bp.after_request def add_cors(response): response.headers.update({ 'Access-Control-Max-Age': 1800, 'Access-Control-Allow-Origin': "*", 'Access-Control-Allow-Methods': "OPTIONS, GET, POST" }) return response @bp.route("/flash/", methods=["GET", "POST"], endpoint="index") @bp.route("/flash/<id>", methods=["GET", "POST"], endpoint="index") @local_check def index(id="0"): return "JDownloader\r\n" @bp.route("/flash/add", methods=["POST"], endpoint="add") @local_check def add(): package = flask.request.form.get( "package", flask.request.form.get("source", flask.request.form.get("referer")) ) urls = [url.strip() for url in unquote(flask.request.form["urls"]).replace(' ', '\n').split("\n") if url.strip()] if not urls: return "failed no urls\r\n", 500 pack_password = flask.request.form.get("passwords") api = flask.current_app.config["PYLOAD_API"] try: if package: pack = api.add_package(package, urls, Destination.COLLECTOR) else: pack = api.generate_and_add_packages(urls, Destination.COLLECTOR) except Exception as exc: return "failed " + str(exc) + "\r\n", 500 if pack_password: api.set_package_data(pack, {"password": pack_password}) return "success\r\n" @bp.route("/flash/addcrypted", methods=["POST"], endpoint="addcrypted") @local_check def addcrypted(): api = flask.current_app.config["PYLOAD_API"] package = flask.request.form.get( "package", flask.request.form.get("source", flask.request.form.get("referer")) ) dl_path = api.get_config_value("general", "storage_folder") dlc_path = os.path.join( dl_path, package.replace("/", "").replace("\\", "").replace(":", "") + ".dlc" ) dlc = flask.request.form["crypted"].replace(" ", "+") with open(dlc_path, mode="wb") as fp: fp.write(dlc) pack_password = flask.request.form.get("passwords") try: pack = api.add_package(package, [dlc_path], Destination.COLLECTOR) except Exception as exc: return "failed " + str(exc) + "\r\n", 500 else: if pack_password: api.set_package_data(pack, {"password": pack_password}) return "success\r\n" @bp.route("/flash/addcrypted2", methods=["POST"], endpoint="addcrypted2") @local_check def METHOD_NAME(): package = flask.request.form.get( "package", flask.request.form.get("source", flask.request.form.get("referer")) ) crypted = flask.request.form["crypted"] jk = flask.request.form["jk"] pack_password = flask.request.form.get("passwords") crypted = standard_b64decode(unquote(crypted.replace(" ", "+"))) jk = eval_js(f"{jk} f()") try: IV = key = bytes.fromhex(jk) except Exception: return "Could not decrypt key", 500 cipher = Cipher( algorithms.AES(key), modes.CBC(IV), backend=default_backend() ) decryptor = cipher.decryptor() decrypted = decryptor.update(crypted) + decryptor.finalize() urls = to_str(decrypted).replace("\x00", "").replace("\r", "").split("\n") urls = [url for url in urls if url.strip()] api = flask.current_app.config["PYLOAD_API"] try: if package: pack = api.add_package(package, urls, Destination.COLLECTOR) else: pack = api.generate_and_add_packages(urls, Destination.COLLECTOR) except Exception as exc: return "failed " + str(exc) + "\r\n", 500 else: if pack_password: api.set_package_data(pack, {"password": pack_password}) return "success\r\n" @bp.route("/flashgot", methods=["POST"], endpoint="flashgot") @bp.route("/flashgot_pyload", methods=["POST"], endpoint="flashgot") @local_check def flashgot(): if flask.request.referrer not in ( "http://localhost:9666/flashgot", "http://127.0.0.1:9666/flashgot", ): flask.abort(500) package = flask.request.form.get("package") urls = [url for url in flask.request.form["urls"].split("\n") if url.strip()] # folder = flask.request.form.get('dir', None) autostart = int(flask.request.form.get("autostart", 0)) api = flask.current_app.config["PYLOAD_API"] if package: api.add_package(package, urls, Destination.QUEUE if autostart else Destination.COLLECTOR) else: api.generate_and_add_packages(urls, Destination.QUEUE if autostart else Destination.COLLECTOR) @bp.route("/crossdomain.xml", endpoint="crossdomain") @local_check def crossdomain(): rep = '<?xml version="1.0"?>\n' rep += '<!DOCTYPE cross-domain-policy SYSTEM "http://www.macromedia.com/xml/dtds/cross-domain-policy.dtd">\n' rep += "<cross-domain-policy>\n" rep += '<allow-access-from domain="*" />\n' rep += "</cross-domain-policy>" return rep @bp.route("/flash/checkSupportForUrl", methods=["POST"], endpoint="checksupport") @local_check def checksupport(): api = flask.current_app.config["PYLOAD_API"] url = flask.request.form["url"] res = api.check_urls([url]) supported = not res[0][1] is None return str(supported).lower() @bp.route("/jdcheck.js", endpoint="jdcheck") @local_check def jdcheck(): rep = "jdownloader=true;\r\n" rep += "var version='42707';\r\n" return rep
null
validate fileupload notification ttl
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from argparse import ArgumentError from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError from azure.cli.command_modules.iot.custom import SimpleAccessRights, iot_central_app_get def validate_policy_permissions(ns): if ns.permissions is None or ns.permissions == []: raise ArgumentError(None, 'the following arguments are required: --permissions') allowed = [x.value.lower() for x in SimpleAccessRights] for p in ns.permissions: if p not in allowed: raise ValueError('Unrecognized permission "{}"'.format(p)) def validate_retention_days(ns): if ns.retention_day and ns.retention_day not in range(1, 8, 1): raise ArgumentError(None, 'Please specify the retention time for device-to-cloud messages' ' from 1 to 7 days only.') def validate_fileupload_notification_max_delivery_count(ns): if (ns.fileupload_notification_max_delivery_count and ns.fileupload_notification_max_delivery_count not in range(1, 101, 1)): raise ArgumentError(None, 'Please specify the number of retry from 1 to 100 only to deliver a message') def METHOD_NAME(ns): if (ns.fileupload_notification_ttl and ns.fileupload_notification_ttl not in range(1, 49, 1)): raise ArgumentError(None, 'Please specify the period of time from 1 to 48 hours for which a file upload' ' notification is available to consume before it is expired.') def validate_fileupload_sas_ttl(ns): if (ns.fileupload_sas_ttl and ns.fileupload_sas_ttl not in range(1, 25, 1)): raise ArgumentError(None, 'Please specify the period of time from 1 to 24 hours for which the SAS URI' ' generated by IoT Hub for file upload is valid.') def validate_feedback_ttl(ns): if (ns.feedback_ttl and ns.feedback_ttl not in range(1, 49, 1)): raise ArgumentError(None, 'Please specify the period of time from 1 to 48 hours for which a message is' ' available to consume before it is expired by the IoT hub.') def validate_feedback_lock_duration(ns): if (ns.feedback_lock_duration and ns.feedback_lock_duration not in range(5, 301, 1)): raise InvalidArgumentValueError('Please specify the feedback lock duration from 5 to 300 seconds only.') def validate_fileupload_notification_lock_duration(ns): if (ns.fileupload_notification_lock_duration and ns.fileupload_notification_lock_duration not in range(5, 301, 1)): raise InvalidArgumentValueError('Please specify the notification lock duration from 5 to 300 seconds only.') def validate_feedback_max_delivery_count(ns): if (ns.feedback_max_delivery_count and ns.feedback_max_delivery_count not in range(1, 101, 1)): raise ArgumentError(None, 'Please specify the number of retry from 1 to 100 only to deliver a message' ' on the feedback queue.') def validate_c2d_max_delivery_count(ns): if (ns.c2d_max_delivery_count and ns.c2d_max_delivery_count not in range(1, 101, 1)): raise ArgumentError(None, 'Please specify the maximum delivery count from 1 to 100 only for cloud-to-device' ' messages in the device queue.') def validate_c2d_ttl(ns): if (ns.c2d_ttl and ns.c2d_ttl not in range(1, 49, 1)): raise ArgumentError(None, 'Please specify the default time from 1 to 48 hours to live for cloud-to-device' ' messages in the device queue.') def validate_private_endpoint_connection_id(cmd, namespace): ns = namespace if ns.connection_id: id_parts = ns.connection_id.split('/') ns.private_endpoint_connection_name = id_parts[-1] ns.account_name = id_parts[-3] ns.resource_group_name = id_parts[-7] del ns.connection_id if ns.account_name and not ns.resource_group_name: ns.resource_group_name = iot_central_app_get(cmd.cli_ctx, ns.account_name).resourceGroup if not all([ns.account_name, ns.resource_group_name, ns.private_endpoint_connection_name]): raise ArgumentUsageError('incorrect usage: [--id ID | --name NAME --account-name NAME]')
null
affected channels groups
from pubnub import utils from pubnub.endpoints.endpoint import Endpoint from pubnub.errors import PNERR_MESSAGE_MISSING from pubnub.exceptions import PubNubException from pubnub.models.consumer.pubsub import PNPublishResult from pubnub.enums import HttpMethod, PNOperationType from pubnub.endpoints.mixins import TimeTokenOverrideMixin class Publish(Endpoint, TimeTokenOverrideMixin): # /publish/<pub_key>/<sub_key>/<signature>/<channel>/<callback>/<message>[?argument(s)] PUBLISH_GET_PATH = "/publish/%s/%s/0/%s/%s/%s" PUBLISH_POST_PATH = "/publish/%s/%s/0/%s/%s" def __init__(self, pubnub): super(Publish, self).__init__(pubnub) self._channel = None self._message = None self._should_store = None self._use_post = None self._meta = None self._replicate = None self._ptto = None self._ttl = None def channel(self, channel): self._channel = str(channel) return self def message(self, message): self._message = message return self def use_post(self, use_post): self._use_post = bool(use_post) return self def use_compression(self, compress=True): self._use_compression = bool(compress) return self def is_compressable(self): return True def should_store(self, should_store): self._should_store = bool(should_store) return self def meta(self, meta): self._meta = meta return self def ttl(self, ttl): self._ttl = ttl return self def build_data(self): if self._use_post is True: cipher = self.pubnub.config.cipher_key if cipher is not None: return '"' + self.pubnub.config.crypto.encrypt(cipher, utils.write_value_as_string(self._message)) + '"' else: return utils.write_value_as_string(self._message) else: return None def encoded_params(self): if self._meta: return { "meta": utils.url_write(self._meta) } else: return {} def custom_params(self): params = TimeTokenOverrideMixin.custom_params(self) if self._ttl: params['ttl'] = self._ttl if self._meta: params['meta'] = utils.write_value_as_string(self._meta) if self._should_store is not None: if self._should_store: params["store"] = "1" else: params["store"] = "0" # REVIEW: should auth key be assigned here? if self.pubnub.config.auth_key is not None: params["auth"] = utils.url_encode(self.pubnub.config.auth_key) return params def build_path(self): if self._use_post: return Publish.PUBLISH_POST_PATH % (self.pubnub.config.publish_key, self.pubnub.config.subscribe_key, utils.url_encode(self._channel), 0) else: cipher = self.pubnub.config.cipher_key stringified_message = utils.write_value_as_string(self._message) if cipher is not None: stringified_message = '"' + self.pubnub.config.crypto.encrypt(cipher, stringified_message) + '"' stringified_message = utils.url_encode(stringified_message) return Publish.PUBLISH_GET_PATH % (self.pubnub.config.publish_key, self.pubnub.config.subscribe_key, utils.url_encode(self._channel), 0, stringified_message) def http_method(self): if self._use_post is True: return HttpMethod.POST else: return HttpMethod.GET def validate_params(self): self.validate_channel() if self._message is None: raise PubNubException(pn_error=PNERR_MESSAGE_MISSING) self.validate_subscribe_key() self.validate_publish_key() def create_response(self, envelope): """ :param envelope: an already serialized json response :return: """ if envelope is None: return None timetoken = int(envelope[2]) res = PNPublishResult(envelope, timetoken) return res def is_auth_required(self): return True def affected_channels(self): return None def METHOD_NAME(self): return None def request_timeout(self): return self.pubnub.config.non_subscribe_request_timeout def connect_timeout(self): return self.pubnub.config.connect_timeout def operation_type(self): return PNOperationType.PNPublishOperation def name(self): return "Publish"
null
process
# coding: utf-8 # # Copyright 2021 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Beam DoFns and PTransforms to provide validation of topic models.""" from __future__ import annotations from core.domain import topic_domain from core.jobs import job_utils from core.jobs.decorators import validation_decorators from core.jobs.transforms.validation import base_validation from core.jobs.types import model_property from core.jobs.types import topic_validation_errors from core.platform import models import apache_beam as beam from typing import Iterator, List, Optional, Tuple, Type, Union MYPY = False if MYPY: # pragma: no cover from mypy_imports import topic_models (topic_models,) = models.Registry.import_models([models.Names.TOPIC]) # TODO(#15613): Here we use MyPy ignore because the incomplete typing of # apache_beam library and absences of stubs in Typeshed, forces MyPy to # assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class # cannot subclass 'DoFn' (has type 'Any')), we added an ignore here. @validation_decorators.AuditsExisting(topic_models.TopicModel) class ValidateCanonicalNameMatchesNameInLowercase(beam.DoFn): # type: ignore[misc] """DoFn to validate canonical name matching with lower case name.""" def METHOD_NAME( self, input_model: topic_models.TopicModel ) -> Iterator[topic_validation_errors.ModelCanonicalNameMismatchError]: """Function that validate that canonical name of the model is same as name of the model in lowercase. Args: input_model: datastore_services.Model. TopicModel to validate. Yields: ModelCanonicalNameMismatchError. An error class for name mismatched models. """ model = job_utils.clone_model(input_model) name = model.name if name.lower() != model.canonical_name: yield topic_validation_errors.ModelCanonicalNameMismatchError(model) @validation_decorators.AuditsExisting( topic_models.TopicSnapshotMetadataModel) class ValidateTopicSnapshotMetadataModel( base_validation.BaseValidateCommitCmdsSchema[ topic_models.TopicSnapshotMetadataModel ] ): """Overrides _get_change_domain_class for TopicSnapshotMetadataModel.""" def _get_change_domain_class( self, input_model: topic_models.TopicSnapshotMetadataModel # pylint: disable=unused-argument ) -> Type[topic_domain.TopicChange]: """Returns a change domain class. Args: input_model: datastore_services.Model. Entity to validate. Returns: topic_domain.TopicChange. A domain object class for the changes made by commit commands of the model. """ return topic_domain.TopicChange @validation_decorators.AuditsExisting( topic_models.TopicRightsSnapshotMetadataModel) class ValidateTopicRightsSnapshotMetadataModel( base_validation.BaseValidateCommitCmdsSchema[ topic_models.TopicRightsSnapshotMetadataModel ] ): """Overrides _get_change_domain_class for TopicRightsSnapshotMetadataModel. """ def _get_change_domain_class( self, input_model: topic_models.TopicRightsSnapshotMetadataModel # pylint: disable=unused-argument ) -> Type[topic_domain.TopicRightsChange]: """Returns a change domain class. Args: input_model: datastore_services.Model. Entity to validate. Returns: topic_domain.TopicRightsChange. A domain object class for the changes made by commit commands of the model. """ return topic_domain.TopicRightsChange @validation_decorators.AuditsExisting(topic_models.TopicCommitLogEntryModel) class ValidateTopicCommitLogEntryModel( base_validation.BaseValidateCommitCmdsSchema[ topic_models.TopicCommitLogEntryModel ] ): """Overrides _get_change_domain_class for TopicCommitLogEntryModel. """ # Here we use MyPy ignore because the signature of this method doesn't # match with super class's _get_change_domain_class() method. def _get_change_domain_class( # type: ignore[override] self, input_model: topic_models.TopicCommitLogEntryModel ) -> Optional[ Type[Union[topic_domain.TopicRightsChange, topic_domain.TopicChange]] ]: """Returns a change domain class. Args: input_model: datastore_services.Model. Entity to validate. Returns: topic_domain.TopicRightsChange|topic_domain.TopicChange. A domain object class for the changes made by commit commands of the model. """ model = job_utils.clone_model(input_model) if model.id.startswith('rights'): return topic_domain.TopicRightsChange elif model.id.startswith('topic'): return topic_domain.TopicChange else: return None @validation_decorators.RelationshipsOf(topic_models.TopicSummaryModel) def topic_summary_model_relationships( model: Type[topic_models.TopicSummaryModel] ) -> Iterator[ Tuple[ model_property.PropertyType, List[ Type[Union[topic_models.TopicModel, topic_models.TopicRightsModel]] ] ] ]: """Yields how the properties of the model relates to the ID of others.""" yield model.id, [topic_models.TopicModel] yield model.id, [topic_models.TopicRightsModel]
null
rmse
""" A demo for multi-output regression ================================== The demo is adopted from scikit-learn: https://scikit-learn.org/stable/auto_examples/ensemble/plot_random_forest_regression_multioutput.html#sphx-glr-auto-examples-ensemble-plot-random-forest-regression-multioutput-py See :doc:`/tutorials/multioutput` for more information. .. note:: The feature is experimental. For the `multi_output_tree` strategy, many features are missing. """ import argparse from typing import Dict, List, Tuple import numpy as np from matplotlib import pyplot as plt import xgboost as xgb def plot_predt(y: np.ndarray, y_predt: np.ndarray, name: str) -> None: s = 25 plt.scatter(y[:, 0], y[:, 1], c="navy", s=s, edgecolor="black", label="data") plt.scatter( y_predt[:, 0], y_predt[:, 1], c="cornflowerblue", s=s, edgecolor="black" ) plt.xlim([-1, 2]) plt.ylim([-1, 2]) plt.show() def gen_circle() -> Tuple[np.ndarray, np.ndarray]: "Generate a sample dataset that y is a 2 dim circle." rng = np.random.RandomState(1994) X = np.sort(200 * rng.rand(100, 1) - 100, axis=0) y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T y[::5, :] += 0.5 - rng.rand(20, 2) y = y - y.min() y = y / y.max() return X, y def rmse_model(plot_result: bool, strategy: str) -> None: """Draw a circle with 2-dim coordinate as target variables.""" X, y = gen_circle() # Train a regressor on it reg = xgb.XGBRegressor( tree_method="hist", n_estimators=128, n_jobs=16, max_depth=8, multi_strategy=strategy, subsample=0.6, ) reg.fit(X, y, eval_set=[(X, y)]) y_predt = reg.predict(X) if plot_result: plot_predt(y, y_predt, "multi") def custom_rmse_model(plot_result: bool, strategy: str) -> None: """Train using Python implementation of Squared Error.""" def gradient(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray: """Compute the gradient squared error.""" y = dtrain.get_label().reshape(predt.shape) return predt - y def hessian(predt: np.ndarray, dtrain: xgb.DMatrix) -> np.ndarray: """Compute the hessian for squared error.""" return np.ones(predt.shape) def squared_log( predt: np.ndarray, dtrain: xgb.DMatrix ) -> Tuple[np.ndarray, np.ndarray]: grad = gradient(predt, dtrain) hess = hessian(predt, dtrain) # both numpy.ndarray and cupy.ndarray works. return grad, hess def METHOD_NAME(predt: np.ndarray, dtrain: xgb.DMatrix) -> Tuple[str, float]: y = dtrain.get_label().reshape(predt.shape) v = np.sqrt(np.sum(np.power(y - predt, 2))) return "PyRMSE", v X, y = gen_circle() Xy = xgb.DMatrix(X, y) results: Dict[str, Dict[str, List[float]]] = {} # Make sure the `num_target` is passed to XGBoost when custom objective is used. # When builtin objective is used, XGBoost can figure out the number of targets # automatically. booster = xgb.train( { "tree_method": "hist", "num_target": y.shape[1], "multi_strategy": strategy, }, dtrain=Xy, num_boost_round=128, obj=squared_log, evals=[(Xy, "Train")], evals_result=results, custom_metric=METHOD_NAME, ) y_predt = booster.inplace_predict(X) if plot_result: plot_predt(y, y_predt, "multi") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--plot", choices=[0, 1], type=int, default=1) args = parser.parse_args() # Train with builtin RMSE objective # - One model per output. rmse_model(args.plot == 1, "one_output_per_tree") # - One model for all outputs, this is still working in progress, many features are # missing. rmse_model(args.plot == 1, "multi_output_tree") # Train with custom objective. # - One model per output. custom_rmse_model(args.plot == 1, "one_output_per_tree") # - One model for all outputs, this is still working in progress, many features are # missing. custom_rmse_model(args.plot == 1, "multi_output_tree")
null
get message
from django.core.mail import EmailMessage from django.core.cache import cache from django.template.loader import render_to_string from django.conf import settings from django.contrib.admin.models import LogEntry, CHANGE, ADDITION from django.contrib.contenttypes.models import ContentType from sponsors.models import Sponsorship, Contract, BenefitFeature class BaseEmailSponsorshipNotification: subject_template = None message_template = None email_context_keys = None def get_subject(self, context): return render_to_string(self.subject_template, context).strip() def METHOD_NAME(self, context): return render_to_string(self.message_template, context).strip() def get_recipient_list(self, context): raise NotImplementedError def get_attachments(self, context): """ Returns list with attachments tuples (filename, content, mime type) """ return [] def get_email_context(self, **kwargs): return {k: kwargs.get(k) for k in self.email_context_keys} def notify(self, **kwargs): context = self.get_email_context(**kwargs) email = EmailMessage( subject=self.get_subject(context), body=self.METHOD_NAME(context), to=self.get_recipient_list(context), from_email=settings.SPONSORSHIP_NOTIFICATION_FROM_EMAIL, ) for attachment in self.get_attachments(context): email.attach(*attachment) email.send() class AppliedSponsorshipNotificationToPSF(BaseEmailSponsorshipNotification): subject_template = "sponsors/email/psf_new_application_subject.txt" message_template = "sponsors/email/psf_new_application.txt" email_context_keys = ["request", "sponsorship"] def get_recipient_list(self, context): return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL] class AppliedSponsorshipNotificationToSponsors(BaseEmailSponsorshipNotification): subject_template = "sponsors/email/sponsor_new_application_subject.txt" message_template = "sponsors/email/sponsor_new_application.txt" email_context_keys = ["sponsorship", "request"] def get_recipient_list(self, context): return context["sponsorship"].verified_emails def get_email_context(self, **kwargs): context = super().get_email_context(**kwargs) context["required_assets"] = BenefitFeature.objects.from_sponsorship(context["sponsorship"]).required_assets() return context class RejectedSponsorshipNotificationToPSF(BaseEmailSponsorshipNotification): subject_template = "sponsors/email/psf_rejected_sponsorship_subject.txt" message_template = "sponsors/email/psf_rejected_sponsorship.txt" email_context_keys = ["sponsorship"] def get_recipient_list(self, context): return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL] class RejectedSponsorshipNotificationToSponsors(BaseEmailSponsorshipNotification): subject_template = "sponsors/email/sponsor_rejected_sponsorship_subject.txt" message_template = "sponsors/email/sponsor_rejected_sponsorship.txt" email_context_keys = ["sponsorship"] def get_recipient_list(self, context): return context["sponsorship"].verified_emails class ContractNotificationToPSF(BaseEmailSponsorshipNotification): subject_template = "sponsors/email/psf_contract_subject.txt" message_template = "sponsors/email/psf_contract.txt" email_context_keys = ["contract"] def get_recipient_list(self, context): return [settings.SPONSORSHIP_NOTIFICATION_TO_EMAIL] def get_attachments(self, context): document = context["contract"].document with document.open("rb") as fd: content = fd.read() return [("Contract.pdf", content, "application/pdf")] class ContractNotificationToSponsors(BaseEmailSponsorshipNotification): subject_template = "sponsors/email/sponsor_contract_subject.txt" message_template = "sponsors/email/sponsor_contract.txt" email_context_keys = ["contract"] def get_recipient_list(self, context): return context["contract"].sponsorship.verified_emails def get_attachments(self, context): contract = context["contract"] if contract.document_docx: document = contract.document_docx ext, app_type = "docx", "msword" else: # fallback to PDF for existing contracts document = contract.document ext, app_type = "pdf", "pdf" document = context["contract"].document with document.open("rb") as fd: content = fd.read() return [(f"Contract.{ext}", content, f"application/{app_type}")] def add_log_entry(request, object, acton_flag, message): return LogEntry.objects.log_action( user_id=request.user.id, content_type_id=ContentType.objects.get_for_model(type(object)).pk, object_id=object.pk, object_repr=str(object), action_flag=acton_flag, change_message=message ) class SponsorshipApprovalLogger: def notify(self, request, sponsorship, contract, **kwargs): add_log_entry(request, sponsorship, CHANGE, "Sponsorship Approval") add_log_entry(request, contract, ADDITION, "Created After Sponsorship Approval") class SentContractLogger: def notify(self, request, contract, **kwargs): add_log_entry(request, contract, CHANGE, "Contract Sent") class ExecutedContractLogger: def notify(self, request, contract, **kwargs): add_log_entry(request, contract, CHANGE, "Contract Executed") class ExecutedExistingContractLogger: def notify(self, request, contract, **kwargs): add_log_entry(request, contract, CHANGE, "Existing Contract Uploaded and Executed") class NullifiedContractLogger: def notify(self, request, contract, **kwargs): add_log_entry(request, contract, CHANGE, "Contract Nullified") class SendSponsorNotificationLogger: def notify(self, notification, sponsorship, contact_types, request, **kwargs): contacts = ", ".join(contact_types) msg = f"Notification '{notification.internal_name}' was sent to contacts: {contacts}" add_log_entry(request, sponsorship, CHANGE, msg) class RefreshSponsorshipsCache: def notify(self, *args, **kwargs): # clean up cached used by "sponsors/partials/sponsors-list.html" cache.delete("CACHED_SPONSORS_LIST") class AssetCloseToDueDateNotificationToSponsors(BaseEmailSponsorshipNotification): subject_template = "sponsors/email/sponsor_expiring_assets_subject.txt" message_template = "sponsors/email/sponsor_expiring_assets.txt" email_context_keys = ["sponsorship", "required_assets", "due_date", "days"] def get_recipient_list(self, context): return context["sponsorship"].verified_emails def get_email_context(self, **kwargs): context = super().get_email_context(**kwargs) context["required_assets"] = BenefitFeature.objects.from_sponsorship(context["sponsorship"]).required_assets() return context class ClonedResourcesLogger: def notify(self, request, resource, from_year, **kwargs): msg = f"Cloned from {from_year} sponsorship application config" add_log_entry(request, resource, ADDITION, msg)
null
test check is bool boolean attr
# -*- coding: utf-8 -*- from unittest import mock from django.db import models from django.test import TestCase from tests.testapp.models import ( DummyRelationModel, InheritedFromPostWithUniqField, PostWithUniqField, ReverseModel, SecondDummyRelationModel, ThirdDummyRelationModel, ) from django_extensions.db.fields import UniqueFieldMixin class UniqFieldMixinTestCase(TestCase): def setUp(self): class MockField(UniqueFieldMixin): def __init__(self, **kwargs): for key, value in kwargs.items(): setattr(self, key, value) self.uniq_field = MockField( attname='uniq_field', max_length=255, boolean_attr=True, non_boolean_attr='non_boolean_attr' ) f_dummy = DummyRelationModel.objects.create() s_dummy = SecondDummyRelationModel.objects.create() t_dummy = ThirdDummyRelationModel.objects.create() post = PostWithUniqField.objects.create( uniq_field='test_uniq', common_field='first', another_common_field='second', many_to_one_field=f_dummy, one_to_one_field=s_dummy, ) post.many_to_many_field.add(t_dummy) post.save() ReverseModel.objects.create(post_field=post) self.post = post def tearDown(self): PostWithUniqField.objects.all().delete() DummyRelationModel.objects.all().delete() SecondDummyRelationModel.objects.all().delete() ThirdDummyRelationModel.objects.all().delete() ReverseModel.objects.all().delete() def METHOD_NAME(self): self.assertIsNone(self.uniq_field.check_is_bool('boolean_attr')) def test_check_is_bool_non_boolean_attr(self): with self.assertRaisesMessage( ValueError, "'non_boolean_attr' argument must be True or False", ): self.uniq_field.check_is_bool('non_boolean_attr') def test__get_fields_returns_list_of_tulpes(self): uniq_mixin_fields = UniqueFieldMixin._get_fields(PostWithUniqField) self.assertIsInstance(uniq_mixin_fields, list) for field in uniq_mixin_fields: self.assertIsInstance(field, tuple) def test__get_fields_returns_correct_fields(self): option_fields = PostWithUniqField._meta.get_fields() uniq_mixin_fields = [i[0] for i in UniqueFieldMixin._get_fields(PostWithUniqField)] self.assertEqual(len(option_fields), 9) self.assertEqual(len(uniq_mixin_fields), 7) fields_to_be_excluded_from_uniq_mixin_fields = [ f for f in option_fields if f.is_relation and not f.one_to_one and not (f.many_to_one and f.related_model) ] for field in fields_to_be_excluded_from_uniq_mixin_fields: self.assertNotIn(field, uniq_mixin_fields) def test__get_fields_returns_correct_model(self): post_models = [i[1] for i in UniqueFieldMixin._get_fields(PostWithUniqField)] self.assertTrue(all(model is None for model in post_models)) inherited_post_models = [ i[1] for i in UniqueFieldMixin._get_fields(InheritedFromPostWithUniqField) if i[1] ] self.assertEqual(len(inherited_post_models), 6) self.assertTrue(all(model is PostWithUniqField) for model in inherited_post_models) def test_get_queryset(self): mocked_get_fields = ( (models.CharField, PostWithUniqField), ) with mock.patch( 'django_extensions.db.fields.UniqueFieldMixin._get_fields', return_value=mocked_get_fields ), mock.patch( 'tests.testapp.models.PostWithUniqField._default_manager.all' ) as mocked_qs_all: self.uniq_field.get_queryset(PostWithUniqField, models.CharField) mocked_qs_all.assert_called_with() mocked_get_fields = ( (models.CharField, None), ) with mock.patch( 'django_extensions.db.fields.UniqueFieldMixin._get_fields', return_value=mocked_get_fields ), mock.patch( 'tests.testapp.models.InheritedFromPostWithUniqField._default_manager.all' ) as mocked_qs_all: self.uniq_field.get_queryset(InheritedFromPostWithUniqField, models.CharField) mocked_qs_all.assert_called_with() def test_find_unique(self): def filter_func(*args, **kwargs): uniq_field = kwargs.get('uniq_field') if uniq_field == 'a': return mocked_qs return None mocked_qs = mock.Mock(spec=PostWithUniqField.objects) mocked_qs.filter.side_effect = filter_func mocked_qs.exclude.return_value = mocked_qs field = models.CharField with mock.patch( 'django_extensions.db.fields.UniqueFieldMixin.get_queryset', return_value=mocked_qs ) as get_qs: res = self.uniq_field.find_unique(self.post, field, iter('abcde')) get_qs.assert_called_with(PostWithUniqField, field) mocked_qs.exclude.assert_called_with(pk=self.post.pk) self.assertEqual(res, 'b') self.assertTrue(hasattr(self.post, 'uniq_field')) self.assertEqual(self.post.uniq_field, 'b')
null
is unknown
import logging import os from io import StringIO from shlex import quote from tron.config import schema from tron.serialize import filehandler from tron.utils import timeutils from tron.utils.observer import Observable from tron.utils.state import Machine log = logging.getLogger(__name__) class ActionCommand(Observable): """An ActionCommand encapsulates a runnable task that is passed to a node for execution. A Node calls: started (when the command starts) exited (when the command exits) write_<channel> (when output is received) done (when the command is finished) """ PENDING = "pending" RUNNING = "running" EXITING = "exiting" COMPLETE = "complete" FAILSTART = "failstart" STATE_MACHINE = Machine( PENDING, **{ PENDING: {"start": RUNNING, "exit": FAILSTART,}, RUNNING: {"exit": EXITING,}, EXITING: {"close": COMPLETE,}, }, ) STDOUT = ".stdout" STDERR = ".stderr" def __init__(self, id, command, serializer=None): super().__init__() self.id = id self.command = command self.machine = Machine.from_machine(ActionCommand.STATE_MACHINE) self.exit_status = None self.start_time = None self.end_time = None if serializer: self.stdout = serializer.open(self.STDOUT) self.stderr = serializer.open(self.STDERR) else: self.stdout = filehandler.NullFileHandle self.stderr = filehandler.NullFileHandle @property def state(self): return self.machine.state def transition_and_notify(self, target): if self.machine.transition(target): self.notify(self.state) return True def started(self): if self.machine.check("start"): self.start_time = timeutils.current_timestamp() return self.transition_and_notify("start") def exited(self, exit_status): if self.machine.check("exit"): self.end_time = timeutils.current_timestamp() self.exit_status = exit_status return self.transition_and_notify("exit") def write_stderr(self, value): self.stderr.write(value) def write_stdout(self, value): self.stdout.write(value) def done(self): if self.machine.check("close"): self.stdout.close() self.stderr.close() return self.transition_and_notify("close") def handle_errback(self, result): """Handle an unexpected error while being run. This will likely be an interval error. Cleanup the state of this ActionCommand and log something useful for debugging. """ log.error(f"Unknown failure for {self}, {str(result)}") self.exited(result) self.done() @property def METHOD_NAME(self): return self.exit_status is None @property def is_failed(self): return bool(self.exit_status) @property def is_complete(self): """Complete implies done and success.""" return self.machine.state == ActionCommand.COMPLETE @property def is_done(self): """Done implies no more work will be done, but might not be success.""" return self.machine.state in (ActionCommand.COMPLETE, ActionCommand.FAILSTART,) def __repr__(self): return f"ActionCommand {self.id} {self.command}: {self.state}" class StringBufferStore: """A serializer object which can be passed to ActionCommand as a serializer, but stores streams in memory. """ def __init__(self): self.buffers = {} def open(self, name): return self.buffers.setdefault(name, StringIO()) def clear(self): self.buffers.clear() class NoActionRunnerFactory: """Action runner factory that does not wrap the action run command.""" @classmethod def create(cls, id, command, serializer): return ActionCommand(id, command, serializer) @classmethod def build_stop_action_command(cls, _id, _command): """It is not possible to stop action commands without a runner.""" raise NotImplementedError("An action_runner is required to stop.") class SubprocessActionRunnerFactory: """Run actions by wrapping them in `action_runner.py`.""" runner_exec_name = "action_runner.py" status_exec_name = "action_status.py" def __init__(self, status_path, exec_path): self.status_path = status_path self.exec_path = exec_path @classmethod def from_config(cls, config): return cls(config.remote_status_path, config.remote_exec_path) def create(self, id, command, serializer): command = self.build_command(id, command, self.runner_exec_name) return ActionCommand(id, command, serializer) def build_command(self, id, command, exec_name): status_path = os.path.join(self.status_path, id) runner_path = os.path.join(self.exec_path, exec_name) return f"{quote(runner_path)} {quote(status_path)} {quote(command)} {quote(id)}" def build_stop_action_command(self, id, command): command = self.build_command(id, command, self.status_exec_name) run_id = f"{id}.{command}" return ActionCommand(run_id, command, StringBufferStore()) def __eq__(self, other): return ( self.__class__ == other.__class__ and self.status_path == other.status_path and self.exec_path == other.exec_path ) def __ne__(self, other): return not self == other def create_action_runner_factory_from_config(config): """A factory-factory method which returns a callable that can be used to create ActionCommand objects. The factory definition should match the constructor for ActionCommand. """ if not config or config.runner_type == schema.ActionRunnerTypes.none.value: return NoActionRunnerFactory() elif config.runner_type == schema.ActionRunnerTypes.subprocess.value: return SubprocessActionRunnerFactory.from_config(config) else: raise ValueError("Unknown runner type: %s", config.runner_type)
null
parse args
#!/usr/bin/env python3 # -*- coding: utf-8; py-indent-offset: 4 -*- # # Author: Linuxfabrik GmbH, Zurich, Switzerland # Contact: info (at) linuxfabrik (dot) ch # https://www.linuxfabrik.ch/ # License: The Unlicense, see LICENSE file. # https://github.com/Linuxfabrik/monitoring-plugins/blob/main/CONTRIBUTING.rst """See the check's README for more details. """ import argparse # pylint: disable=C0413 import sys # pylint: disable=C0413 import json # pylint: disable=C0413 import lib.args # pylint: disable=C0413 import lib.base # pylint: disable=C0413 import lib.test # pylint: disable=C0413 import lib.url # pylint: disable=C0413 from lib.globals import (STATE_CRIT, STATE_OK, # pylint: disable=C0413 STATE_UNKNOWN, STATE_WARN) __author__ = """Linuxfabrik GmbH, Zurich/Switzerland; originally written by Dominik Riva, Universitätsspital Basel/Switzerland""" __version__ = '2023071203' DESCRIPTION = """This check targets the JSON endpoint of https://www.cometsystem.com/ Web Sensors. """ DEFAULT_INSECURE = False DEFAULT_NO_PROXY = False DEFAULT_TIMEOUT = 5 def METHOD_NAME(): """Parse command line arguments using argparse. """ parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument( '-V', '--version', action='version', version='%(prog)s: v{} by {}'.format(__version__, __author__) ) parser.add_argument( '--always-ok', help='Always returns OK.', dest='ALWAYS_OK', action='store_true', default=False, ) parser.add_argument( '--insecure', help='This option explicitly allows to perform "insecure" SSL connections. ' 'Default: %(default)s', dest='INSECURE', action='store_true', default=DEFAULT_INSECURE, ) parser.add_argument( '--no-proxy', help='Do not use a proxy. Default: %(default)s', dest='NO_PROXY', action='store_true', default=DEFAULT_NO_PROXY, ) parser.add_argument( '--severity', help='Severity for alerting, order matters, first match on part of a channel name wins. ' 'Have a look at the README for details. Example: ' '`--severity temp:high:crit --severity dew:low:crit --severity humi:ok ' '--severity warn`. ' 'Repeating. Default: warn', dest='SEVERITY', action='append', default=None, ) parser.add_argument( '--test', help='For unit tests. Needs "path-to-stdout-file,path-to-stderr-file,expected-retc".', dest='TEST', type=lib.args.csv, ) parser.add_argument( '--timeout', help='Network timeout in seconds. Default: %(default)s (seconds)', dest='TIMEOUT', type=int, default=DEFAULT_TIMEOUT, ) parser.add_argument( '-u', '--url', help='Comet system URL pointing to the JSON file (http://example.com/values.json).', dest='URL', required=True, ) return parser.METHOD_NAME() def get_channel_state(channel, args): """Return the state for a given Web Sensor channel dict. Order of args.SEVERITY matters, first match wins. """ if args.SEVERITY is not None: for severity in args.SEVERITY: sev = severity.split(':') if len(sev) == 1 and channel['alarm']: # there is no ":" return lib.base.str2state(sev[0]) if len(sev) == 2: # "humi:warn" if sev[0].lower() in channel['name'].lower() and channel['alarm']: return lib.base.str2state(sev[1]) else: # "humi:low:warn", "temp:high:crit" # alarm == 1: high alarm if sev[0].lower() in channel['name'].lower() \ and sev[1].lower().startswith('high') \ and channel['alarm'] == 1: return lib.base.str2state(sev[2]) # alarm == 2: low alarm if sev[0].lower() in channel['name'].lower() \ and sev[1].lower().startswith('low') \ and channel['alarm'] == 2: return lib.base.str2state(sev[2]) if not channel['alarm']: return STATE_OK return STATE_WARN # the default state for any alarm, no matter if high or low def main(): """The main function. Hier spielt die Musik. """ # parse the command line, exit with UNKNOWN if it fails try: args = METHOD_NAME() except SystemExit: sys.exit(STATE_UNKNOWN) # fetch data if args.TEST is None: if not args.URL.startswith('http'): lib.base.oao( '--url parameter has to start with "http://" or https://".', STATE_UNKNOWN, ) # fetch the URL result = lib.base.coe(lib.url.fetch_json( args.URL, insecure=args.INSECURE, no_proxy=args.NO_PROXY, timeout=args.TIMEOUT, )) else: # do not call the command, put in test data stdout, stderr, retc = lib.test.test(args.TEST) result = json.loads(stdout) if 'ch1' not in result: lib.base.cu('Malformed Comet System Web Sensors status file.') # init some vars msg = '' state = STATE_OK perfdata = '' table_data = [] # analyze data for key, value in result.items(): if key.startswith('ch'): # this is a channel if '%' in value['unit']: perfdata += lib.base.get_perfdata(value['name'], value['aval'], '%', _min=0, _max=100) # pylint: disable=C0301 else: perfdata += lib.base.get_perfdata(value['name'], value['aval']) value['ch'] = key if value['unit'].endswith('C') or value['unit'].endswith('F'): # strip ° value['unit'] = value['unit'][-1] channel_state = get_channel_state(value, args) state = lib.base.get_worst(channel_state, state) if value['alarm'] == 0: value['alarm'] = '' if value['alarm'] == 1: value['alarm'] = 'high' if value['alarm'] == 2: value['alarm'] = 'low' value['aval'] = '{}{}{}'.format( value['aval'], value['unit'], lib.base.state2str(channel_state, prefix=' '), ) table_data.append(value) # build the message msg += ' on {} SN {}.\n\n'.format( result['devname'], result['devsn'], ) if table_data: msg += lib.base.get_table( table_data, ['ch', 'name', 'alarm', 'aval'], ['Ch#', 'Name', 'Alarm', 'Value'], ) if state == STATE_CRIT: msg = 'There are critical errors' + msg elif state == STATE_WARN: msg = 'There are warnings' + msg elif state == STATE_UNKNOWN: msg = 'Unknown states' + msg else: msg = 'Everything is ok' + msg # over and out lib.base.oao(msg, state, perfdata, always_ok=args.ALWAYS_OK) if __name__ == '__main__': try: main() except Exception: # pylint: disable=W0703 lib.base.cu()
null
tear down
import os import shutil import tempfile import logging import subprocess import time import re import pytest from flask import Flask, request, Response from threading import Thread from collections import deque from unittest import TestCase from pathlib import Path from werkzeug.serving import make_server from samcli.cli.global_config import GlobalConfig from samcli.cli.main import TELEMETRY_PROMPT from tests.testing_utils import get_sam_command LOG = logging.getLogger(__name__) TELEMETRY_ENDPOINT_PORT = "18298" TELEMETRY_ENDPOINT_HOST = "localhost" TELEMETRY_ENDPOINT_URL = "http://{}:{}".format(TELEMETRY_ENDPOINT_HOST, TELEMETRY_ENDPOINT_PORT) # Convert line separators to work with Windows \r\n EXPECTED_TELEMETRY_PROMPT = re.sub(r"\n", os.linesep, TELEMETRY_PROMPT) @pytest.mark.xdist_group(name="sam_telemetry") class IntegBase(TestCase): @classmethod def setUpClass(cls): cls.cmd = get_sam_command() def setUp(self): self.maxDiff = None # Show full JSON Diff self.config_dir = tempfile.mkdtemp() self._gc = GlobalConfig() self._gc.config_dir = Path(self.config_dir) def METHOD_NAME(self): self.config_dir and shutil.rmtree(self.config_dir) def run_cmd(self, cmd_list=None, stdin_data="", optout_envvar_value=None): # Any command will work for this test suite cmd_list = cmd_list or [self.cmd, "local", "generate-event", "s3", "put"] env = os.environ.copy() # remove the envvar which usually is set in CI/CD. This interferes with tests env.pop("SAM_CLI_TELEMETRY", None) if optout_envvar_value: # But if the caller explicitly asked us to opt-out via EnvVar, then set it here env["SAM_CLI_TELEMETRY"] = optout_envvar_value env["__SAM_CLI_APP_DIR"] = self.config_dir env["__SAM_CLI_TELEMETRY_ENDPOINT_URL"] = "{}/metrics".format(TELEMETRY_ENDPOINT_URL) process = subprocess.Popen( cmd_list, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env ) return process def unset_config(self): config_file = Path(self.config_dir, "metadata.json") if config_file.exists(): config_file.unlink() def set_config(self, telemetry_enabled=None): self._gc.telemetry_enabled = telemetry_enabled def get_global_config(self): return self._gc class TelemetryServer: """ HTTP Server that can receive and store Telemetry requests. Caller can later retrieve the responses for assertion Examples -------- >>> with TelemetryServer() as server: >>> # Server is running now >>> # Set the Telemetry backend endpoint to the server's URL >>> env = os.environ.copy().setdefault("__SAM_CLI_TELEMETRY_ENDPOINT_URL", server.url) >>> # Run SAM CLI command >>> p = subprocess.Popen(["samdev", "local", "generate-event", "s3", "put"], env=env) >>> p.wait() # Wait for process to complete >>> # Get the first metrics request that was sent >>> r = server.get_request(0) >>> assert r.method == 'POST' >>> assert r.body == "{...}" """ def __init__(self): super().__init__() self.flask_app = Flask(__name__) self.flask_app.add_url_rule( "/metrics", endpoint="/metrics", view_func=self._request_handler, methods=["POST"], provide_automatic_options=False, ) # Thread-safe data structure to record requests sent to the server self._requests = deque() def __enter__(self): self.server = make_server(TELEMETRY_ENDPOINT_HOST, TELEMETRY_ENDPOINT_PORT, self.flask_app) self.thread = Thread(target=self.server.serve_forever) self.thread.daemon = True # When test completes, this thread will die automatically self.thread.start() # Start the thread return self def __exit__(self, *args, **kwargs): # Flask will start shutting down only *after* the above request completes. # Just give the server a little bit of time to teardown finish time.sleep(2) self.server.shutdown() self.thread.join() def get_request(self, index): return self._requests[index] def get_all_requests(self): return list(self._requests) def _request_handler(self, **kwargs): """ Handles Flask requests """ # `request` is a variable populated by Flask automatically when handler method is called request_data = { "endpoint": request.endpoint, "method": request.method, "data": request.get_json(), "headers": dict(request.headers), } self._requests.append(request_data) return Response(response={}, status=200)
null
convert xml to dict
from typing import Union from digitalpy.core.main.controller import ( Controller, ) from digitalpy.core.domain.node import Node from copy import deepcopy from lxml.etree import Element # pylint: disable=no-name-in-module from lxml import etree import xmltodict from ..configuration.xml_serializer_constants import ( XML_SERIALIZER_BUSINESS_RULES_PATH, XML_SERIALIZER, BASE_OBJECT_NAME, ) class XMLSerializationController(Controller): def execute(self, method=None): getattr(self, method)(**self.request.get_values()) return self.response def METHOD_NAME(self, message: str, **kwargs): """converts the provided xml string to a dictionary Args: message (str): xml string to be converted to a dictionary """ self.response.set_value("dict", xmltodict.parse(message)) def convert_node_to_xml(self, node, **kwargs): """converts the provided node to an xml string Args: node (Node): the node to be serialized to xml """ self.response.set_value( "message", self._serialize_node(node, node.__class__.__name__.lower()) ) def _serialize_node( self, node: Node, tag_name: str, level=0 ) -> Union[str, Element]: """the body of the serialization function recursively serializes each node class Args: node (Node): the root node class to be serialized tag_name (str): the name of the root node class to be serialized level (int, optional): _description_. Defaults to 0. Returns: Union[str, Element]: the original call to this method returns a string representing the xml the Element is only returned in the case of recursive calls """ xml = Element(tag_name) # handles text data within tag if hasattr(node, "text"): xml.text = node.text for attribName in node.get_properties(): # below line is required because get_properties function returns only cot property names value = getattr(node, attribName) if hasattr(value, "__dict__"): tagElement = self._serialize_node(value, attribName, level=level + 1) # TODO: modify so double underscores are handled differently try: if attribName[0] == "_": tagElement.tag = "_" + tagElement.tag xml.append(tagElement) except: pass else: xml.append(tagElement) elif value == None: continue elif isinstance(value, list): for element in value: tagElement = self._serialize_node( element, attribName, level=level + 1 ) # TODO: modify so double underscores are handled differently try: if attribName[0] == "_": tagElement.tag = "_" + tagElement.tag xml.append(tagElement) except: pass else: xml.append(tagElement) else: # TODO: modify so double underscores are handled differently # handles instances in which attribute name begins with double underscore try: if attribName[0] == "_": xml.attrib["_" + attribName] = value except: pass else: xml.attrib[attribName] = str(value) if hasattr(node, "xml_string"): # this method combines the xml object parsed from # the model object with the xml_string found in the node # directly, giving priority to the xml object parsed from the model object xml = self._xml_merge(node.xml_string, xml) if level == 0: return etree.tostring(xml) else: return xml def _xml_merge(self, a, b): """credits: https://gist.github.com/dirkjot/bd25b037b33bba6187e99d76792ceb90 this function merges two xml etree elements Args: a (_type_): _description_ b (_type_): _description_ """ def inner(a_parent, b_parent): for bchild in b_parent: achild = a_parent.xpath("./" + bchild.tag) if not achild: a_parent.append(bchild) elif bchild.getchildren(): inner(achild[0], bchild) res = deepcopy(a) inner(res, b) return res
null
get first playing player
#!/usr/bin/env python3 import gi gi.require_version("Playerctl", "2.0") from gi.repository import Playerctl, GLib from gi.repository.Playerctl import Player import argparse import logging import sys import signal import gi import json import os from typing import List logger = logging.getLogger(__name__) def signal_handler(sig, frame): logger.info("Received signal to stop, exiting") sys.stdout.write("\n") sys.stdout.flush() # loop.quit() sys.exit(0) class PlayerManager: def __init__(self, selected_player=None): self.manager = Playerctl.PlayerManager() self.loop = GLib.MainLoop() self.manager.connect( "name-appeared", lambda *args: self.on_player_appeared(*args)) self.manager.connect( "player-vanished", lambda *args: self.on_player_vanished(*args)) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) signal.signal(signal.SIGPIPE, signal.SIG_DFL) self.selected_player = selected_player self.init_players() def init_players(self): for player in self.manager.props.player_names: if self.selected_player is not None and self.selected_player != player.name: logger.debug(f"{player.name} is not the filtered player, skipping it") continue self.init_player(player) def run(self): logger.info("Starting main loop") self.loop.run() def init_player(self, player): logger.info(f"Initialize new player: {player.name}") player = Playerctl.Player.new_from_name(player) player.connect("playback-status", self.on_playback_status_changed, None) player.connect("metadata", self.on_metadata_changed, None) self.manager.manage_player(player) self.on_metadata_changed(player, player.props.metadata) def get_players(self) -> List[Player]: return self.manager.props.players def write_output(self, text, player): logger.debug(f"Writing output: {text}") output = {"text": text, "class": "custom-" + player.props.player_name, "alt": player.props.player_name} sys.stdout.write(json.dumps(output) + "\n") sys.stdout.flush() def clear_output(self): sys.stdout.write("\n") sys.stdout.flush() def on_playback_status_changed(self, player, status, _=None): logger.debug(f"Playback status changed for player {player.props.player_name}: {status}") self.on_metadata_changed(player, player.props.metadata) def METHOD_NAME(self): players = self.get_players() logger.debug(f"Getting first playing player from {len(players)} players") if len(players) > 0: # if any are playing, show the first one that is playing # reverse order, so that the most recently added ones are preferred for player in players[::-1]: if player.props.status == "Playing": return player # if none are playing, show the first one return players[0] else: logger.debug("No players found") return None def show_most_important_player(self): logger.debug("Showing most important player") # show the currently playing player # or else show the first paused player # or else show nothing current_player = self.METHOD_NAME() if current_player is not None: self.on_metadata_changed(current_player, current_player.props.metadata) else: self.clear_output() def on_metadata_changed(self, player, metadata, _=None): logger.debug(f"Metadata changed for player {player.props.player_name}") player_name = player.props.player_name artist = player.get_artist() title = player.get_title() track_info = "" if player_name == "spotify" and "mpris:trackid" in metadata.keys() and ":ad:" in player.props.metadata["mpris:trackid"]: track_info = "Advertisement" elif artist is not None and title is not None: track_info = f"{artist} - {title}" else: track_info = title if track_info: if player.props.status == "Playing": track_info = " " + track_info else: track_info = " " + track_info # only print output if no other player is playing current_playing = self.METHOD_NAME() if current_playing is None or current_playing.props.player_name == player.props.player_name: self.write_output(track_info, player) else: logger.debug(f"Other player {current_playing.props.player_name} is playing, skipping") def on_player_appeared(self, _, player): logger.info(f"Player has appeared: {player.name}") if player is not None and (self.selected_player is None or player.name == self.selected_player): self.init_player(player) else: logger.debug( "New player appeared, but it's not the selected player, skipping") def on_player_vanished(self, _, player): logger.info(f"Player {player.props.player_name} has vanished") self.show_most_important_player() def parse_arguments(): parser = argparse.ArgumentParser() # Increase verbosity with every occurrence of -v parser.add_argument("-v", "--verbose", action="count", default=0) # Define for which player we"re listening parser.add_argument("--player") parser.add_argument("--enable-logging", action="store_true") return parser.parse_args() def main(): arguments = parse_arguments() # Initialize logging if arguments.enable_logging: logfile = os.path.join(os.path.dirname( os.path.realpath(__file__)), "media-player.log") logging.basicConfig(filename=logfile, level=logging.DEBUG, format="%(asctime)s %(name)s %(levelname)s:%(lineno)d %(message)s") # Logging is set by default to WARN and higher. # With every occurrence of -v it's lowered by one logger.setLevel(max((3 - arguments.verbose) * 10, 0)) logger.info("Creating player manager") if arguments.player: logger.info(f"Filtering for player: {arguments.player}") player = PlayerManager(arguments.player) player.run() if __name__ == "__main__": main()
null
test unknown
import unittest from BaseClasses import PlandoOptions from Options import PlandoBosses class SingleBosses(PlandoBosses): bosses = {"B1", "B2"} locations = {"L1", "L2"} option_vanilla = 0 option_shuffle = 1 @staticmethod def can_place_boss(boss: str, location: str) -> bool: if boss == "b1" and location == "l1": return False return True class MultiBosses(SingleBosses): bosses = SingleBosses.bosses # explicit copy required locations = SingleBosses.locations duplicate_bosses = True option_singularity = 2 # required when duplicate_bosses = True class TestPlandoBosses(unittest.TestCase): def testCI(self): """Bosses, locations and modes are supposed to be case-insensitive""" self.assertEqual(MultiBosses.from_any("L1-B2").value, "l1-b2;vanilla") self.assertEqual(MultiBosses.from_any("ShUfFlE").value, SingleBosses.option_shuffle) def testRandom(self): """Validate random is random""" import random random.seed(0) value1 = MultiBosses.from_any("random") random.seed(0) value2 = MultiBosses.from_text("random") self.assertEqual(value1, value2) for n in range(0, 100): if MultiBosses.from_text("random") != value1: break else: raise Exception("random is not random") def testShuffleMode(self): """Test that simple modes (no Plando) work""" self.assertEqual(MultiBosses.from_any("shuffle"), MultiBosses.option_shuffle) self.assertNotEqual(MultiBosses.from_any("vanilla"), MultiBosses.option_shuffle) def testPlacement(self): """Test that valid placements work and invalid placements fail""" with self.assertRaises(ValueError): MultiBosses.from_any("l1-b1") MultiBosses.from_any("l1-b2;l2-b1") def testMixed(self): """Test that shuffle is applied for remaining locations""" self.assertIn("shuffle", MultiBosses.from_any("l1-b2;l2-b1;shuffle").value) self.assertIn("vanilla", MultiBosses.from_any("l1-b2;l2-b1").value) def METHOD_NAME(self): """Test that unknown values throw exceptions""" # unknown boss with self.assertRaises(ValueError): MultiBosses.from_any("l1-b0") # unknown location with self.assertRaises(ValueError): MultiBosses.from_any("l0-b1") # swapped boss-location with self.assertRaises(ValueError): MultiBosses.from_any("b2-b2") # boss name in place of mode (no singularity) with self.assertRaises(ValueError): SingleBosses.from_any("b1") with self.assertRaises(ValueError): SingleBosses.from_any("l2-b2;b1") # location name in place of mode with self.assertRaises(ValueError): MultiBosses.from_any("l1") with self.assertRaises(ValueError): MultiBosses.from_any("l2-b2;l1") # mode name in place of location with self.assertRaises(ValueError): MultiBosses.from_any("shuffle-b2;vanilla") with self.assertRaises(ValueError): MultiBosses.from_any("shuffle-b2;l2-b2") # mode name in place of boss with self.assertRaises(ValueError): MultiBosses.from_any("l2-shuffle;vanilla") with self.assertRaises(ValueError): MultiBosses.from_any("l1-shuffle;l2-b2") def testOrder(self): """Can't use mode in random places""" with self.assertRaises(ValueError): MultiBosses.from_any("shuffle;l2-b2") def testDuplicateBoss(self): """Can place the same boss twice""" MultiBosses.from_any("l1-b2;l2-b2") with self.assertRaises(ValueError): SingleBosses.from_any("l1-b2;l2-b2") def testDuplicateLocation(self): """Can't use the same location twice""" with self.assertRaises(ValueError): MultiBosses.from_any("l1-b2;l1-b2") def testSingularity(self): """Test automatic singularity mode""" self.assertIn(";singularity", MultiBosses.from_any("b2").value) def testPlandoOptions(self): """Test that plando options verification works""" plandoed_string = "l1-b2;l2-b1" mixed_string = "l1-b2;shuffle" regular_string = "shuffle" plandoed = MultiBosses.from_any(plandoed_string) mixed = MultiBosses.from_any(mixed_string) regular = MultiBosses.from_any(regular_string) # plando should work with boss plando plandoed.verify(None, "Player", PlandoOptions.bosses) self.assertTrue(plandoed.value.startswith(plandoed_string)) # plando should fall back to default without boss plando plandoed.verify(None, "Player", PlandoOptions.items) self.assertEqual(plandoed, MultiBosses.option_vanilla) # mixed should fall back to mode mixed.verify(None, "Player", PlandoOptions.items) # should produce a warning and still work self.assertEqual(mixed, MultiBosses.option_shuffle) # mode stuff should just work regular.verify(None, "Player", PlandoOptions.items) self.assertEqual(regular, MultiBosses.option_shuffle)
null
then conditional stdout does not match regexp
import json import re import textwrap import jsonschema # type: ignore import yaml from behave import then, when from hamcrest import assert_that, contains_string, equal_to, not_ from features.steps.shell import when_i_run_command from features.util import SafeLoaderWithoutDatetime, process_template_vars @then("I will see the following on {stream}") def then_i_will_see_on_stream(context, stream): content = getattr(context.process, stream).strip() text = process_template_vars(context, context.text) if not text == content: raise AssertionError( "Expected to find exactly:\n{}\nBut got:\n{}".format( textwrap.indent(text, " "), textwrap.indent(content, " ") ) ) @then("if `{value1}` in `{value2}` and stdout matches regexp") def then_conditional_stdout_matches_regexp(context, value1, value2): """Only apply regex assertion if value1 in value2.""" if value1 in value2.split(" or "): then_stream_matches_regexp(context, "stdout") @then("if `{value1}` in `{value2}` and stdout does not match regexp") def METHOD_NAME(context, value1, value2): """Only apply regex assertion if value1 in value2.""" if value1 in value2.split(" or "): then_stream_does_not_match_regexp(context, "stdout") @then("if `{value1}` not in `{value2}` and stdout matches regexp") def then_not_in_conditional_stdout_does_not_match_regexp( context, value1, value2 ): """Only apply regex assertion if value1 not in value2.""" if value1 not in value2.split(" or "): then_stream_matches_regexp(context, "stdout") @then("{stream} does not match regexp") def then_stream_does_not_match_regexp(context, stream): text = process_template_vars(context, context.text) content = getattr(context.process, stream).strip() if re.compile(text).search(content) is not None: raise AssertionError( "Expected to NOT match regexp:\n{}\nBut got:\n{}".format( textwrap.indent(text, " "), textwrap.indent(content, " ") ) ) @then("{stream} matches regexp") def then_stream_matches_regexp(context, stream): content = getattr(context.process, stream).strip() text = process_template_vars(context, context.text) if re.compile(text).search(content) is None: raise AssertionError( "Expected to match regexp:\n{}\nBut got:\n{}".format( textwrap.indent(text, " "), textwrap.indent(content, " ") ) ) @then("{stream} contains substring") def then_stream_contains_substring(context, stream): content = getattr(context.process, stream).strip() text = process_template_vars(context, context.text) if text not in content: raise AssertionError( ( "Expected to find substring:\n{}\n" + "But couldn't find it in:\n{}" ).format( textwrap.indent(text, " "), textwrap.indent(content, " ") ) ) @then("{stream} does not contain substring") def then_stream_not_contains_substring(context, stream): content = getattr(context.process, stream).strip() text = process_template_vars(context, context.text) assert_that(content, not_(contains_string(text))) if text in content: raise AssertionError( ( "Expected to NOT find substring:\n{}\n" + "But did find it in:\n{}" ).format( textwrap.indent(text, " "), textwrap.indent(content, " ") ) ) @then("I will see the uaclient version on stdout") def then_i_will_see_the_uaclient_version_on_stdout(context): python_import = "from uaclient.version import get_version" cmd = "python3 -c '{}; print(get_version())'".format(python_import) actual_version = context.process.stdout.strip() when_i_run_command(context, cmd, "as non-root") expected_version = context.process.stdout.strip() assert_that(expected_version, equal_to(actual_version)) @then("stdout is a {output_format} matching the `{schema}` schema") def stdout_matches_the_json_schema(context, output_format, schema): if output_format == "json": instance = json.loads(context.process.stdout.strip()) elif output_format == "yaml": instance = yaml.load( context.process.stdout.strip(), SafeLoaderWithoutDatetime ) with open("features/schemas/{}.json".format(schema), "r") as schema_file: jsonschema.validate(instance=instance, schema=json.load(schema_file)) @then("the {output_format} API response data matches the `{schema}` schema") def api_response_matches_schema(context, output_format, schema): if output_format == "json": instance = json.loads(context.process.stdout.strip()) elif output_format == "yaml": instance = yaml.load( context.process.stdout.strip(), SafeLoaderWithoutDatetime ) with open("features/schemas/{}.json".format(schema), "r") as schema_file: jsonschema.validate( instance=instance.get("data", {}).get("attributes"), schema=json.load(schema_file), ) @when("I verify root and non-root `{cmd}` calls have the same output") def root_vs_nonroot_cmd_comparison(context, cmd): when_i_run_command(context, cmd, "with sudo") root_status_stdout = context.process.stdout.strip() root_status_stderr = context.process.stderr.strip() when_i_run_command(context, cmd, "as non-root") nonroot_status_stdout = context.process.stdout.strip() nonroot_status_stderr = context.process.stderr.strip() assert_that(root_status_stdout, nonroot_status_stdout) assert root_status_stderr == nonroot_status_stderr
null
json
import typing as t from functools import wraps import pydantic from pydantic.fields import FieldInfo from sqlglot import exp from sqlmesh.core import dialect as d if t.TYPE_CHECKING: Model = t.TypeVar("Model", bound="PydanticModel") DEFAULT_ARGS = {"exclude_none": True, "by_alias": True} PYDANTIC_MAJOR_VERSION = int(pydantic.__version__.split(".")[0]) if PYDANTIC_MAJOR_VERSION >= 2: def field_validator(*args: t.Any, **kwargs: t.Any) -> t.Callable[[t.Any], t.Any]: # Pydantic v2 doesn't support "always" argument. The validator behaves as if "always" is True. kwargs.pop("always", None) return pydantic.field_validator(*args, **kwargs) # type: ignore def model_validator(*args: t.Any, **kwargs: t.Any) -> t.Callable[[t.Any], t.Any]: # Pydantic v2 doesn't support "always" argument. The validator behaves as if "always" is True. kwargs.pop("always", None) return pydantic.model_validator(*args, **kwargs) # type: ignore def field_serializer(*args: t.Any, **kwargs: t.Any) -> t.Callable[[t.Any], t.Any]: return pydantic.field_serializer(*args, **kwargs) # type: ignore else: def field_validator(*args: t.Any, **kwargs: t.Any) -> t.Callable[[t.Any], t.Any]: mode = kwargs.pop("mode", "after") return pydantic.validator(*args, **kwargs, pre=mode.lower() == "before", allow_reuse=True) def model_validator(*args: t.Any, **kwargs: t.Any) -> t.Callable[[t.Any], t.Any]: mode = kwargs.pop("mode", "after") return pydantic.root_validator( *args, **kwargs, pre=mode.lower() == "before", allow_reuse=True ) def field_serializer(*args: t.Any, **kwargs: t.Any) -> t.Callable[[t.Any], t.Any]: def _decorator(func: t.Callable[[t.Any], t.Any]) -> t.Callable[[t.Any], t.Any]: @wraps(func) def _wrapper(*args: t.Any, **kwargs: t.Any) -> t.Any: return func(*args, **kwargs) return _wrapper return _decorator def _expression_encoder(e: exp.Expression) -> str: return e.meta.get("sql") or e.sql(dialect=e.meta.get("dialect")) AuditQueryTypes = t.Union[exp.Subqueryable, d.JinjaQuery] ModelQueryTypes = t.Union[exp.Subqueryable, d.JinjaQuery, d.MacroFunc] class PydanticModel(pydantic.BaseModel): if PYDANTIC_MAJOR_VERSION >= 2: model_config = pydantic.ConfigDict( # type: ignore arbitrary_types_allowed=True, extra="forbid", # type: ignore # Even though Pydantic v2 kept support for json_encoders, the functionality has been # crippled badly. Here we need to enumerate all different ways of how sqlglot expressions # show up in pydantic models. json_encoders={ exp.Expression: _expression_encoder, exp.DataType: _expression_encoder, exp.Tuple: _expression_encoder, AuditQueryTypes: _expression_encoder, # type: ignore ModelQueryTypes: _expression_encoder, # type: ignore }, protected_namespaces=(), ) else: class Config: arbitrary_types_allowed = True extra = "forbid" json_encoders = {exp.Expression: _expression_encoder} underscore_attrs_are_private = True smart_union = True def dict( self, **kwargs: t.Any, ) -> t.Dict[str, t.Any]: kwargs.update(DEFAULT_ARGS) return super().model_dump(**kwargs) if PYDANTIC_MAJOR_VERSION >= 2 else super().dict(**kwargs) # type: ignore def METHOD_NAME( self, **kwargs: t.Any, ) -> str: kwargs.update(DEFAULT_ARGS) if PYDANTIC_MAJOR_VERSION >= 2: # Pydantic v2 doesn't support arbitrary arguments for json.dump(). kwargs.pop("sort_keys", None) return super().model_dump_json(**kwargs) # type: ignore return super().METHOD_NAME(**kwargs) # type: ignore def copy(self: "Model", **kwargs: t.Any) -> "Model": return ( super().model_copy(**kwargs) if PYDANTIC_MAJOR_VERSION >= 2 else super().copy(**kwargs) # type: ignore ) @property def fields_set(self: "Model") -> t.Set[str]: return self.__pydantic_fields_set__ if PYDANTIC_MAJOR_VERSION >= 2 else self.__fields_set__ # type: ignore @classmethod def parse_obj(cls: t.Type["Model"], obj: t.Any) -> "Model": return ( super().model_validate(obj) if PYDANTIC_MAJOR_VERSION >= 2 else super().parse_obj(obj) # type: ignore ) @classmethod def parse_raw(cls: t.Type["Model"], b: t.Union[str, bytes], **kwargs: t.Any) -> "Model": return ( super().model_validate_json(b, **kwargs) if PYDANTIC_MAJOR_VERSION >= 2 else super().parse_raw(b, **kwargs) # type: ignore ) @classmethod def missing_required_fields( cls: t.Type["PydanticModel"], provided_fields: t.Set[str] ) -> t.Set[str]: return cls.required_fields() - provided_fields @classmethod def extra_fields(cls: t.Type["PydanticModel"], provided_fields: t.Set[str]) -> t.Set[str]: return provided_fields - cls.all_fields() @classmethod def all_fields(cls: t.Type["PydanticModel"]) -> t.Set[str]: return cls._fields() @classmethod def all_field_infos(cls: t.Type["PydanticModel"]) -> t.Dict[str, FieldInfo]: return cls.model_fields if PYDANTIC_MAJOR_VERSION >= 2 else cls.__fields__ # type: ignore @classmethod def required_fields(cls: t.Type["PydanticModel"]) -> t.Set[str]: return cls._fields(lambda field: field.is_required() if PYDANTIC_MAJOR_VERSION >= 2 else field.required) # type: ignore @classmethod def _fields( cls: t.Type["PydanticModel"], predicate: t.Callable[[t.Any], bool] = lambda _: True, ) -> t.Set[str]: return { field_info.alias if field_info.alias else field_name for field_name, field_info in cls.all_field_infos().items() # type: ignore if predicate(field_info) } def model_validator_v1_args(func: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: @wraps(func) def wrapper(cls: t.Type, values: t.Any, *args: t.Any, **kwargs: t.Any) -> t.Any: is_values_dict = isinstance(values, dict) values_dict = values if is_values_dict else values.__dict__ result = func(cls, values_dict, *args, **kwargs) if is_values_dict: return result else: values.__dict__.update(result) return values return wrapper def field_validator_v1_args(func: t.Callable[..., t.Any]) -> t.Callable[..., t.Any]: @wraps(func) def wrapper(cls: t.Type, v: t.Any, values: t.Any, *args: t.Any, **kwargs: t.Any) -> t.Any: values_dict = values if isinstance(values, dict) else values.data return func(cls, v, values_dict, *args, **kwargs) return wrapper
null
extract symbols
# Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Provides a container for DescriptorProtos.""" __author__ = '[email protected] (Matt Toia)' import warnings class Error(Exception): pass class DescriptorDatabaseConflictingDefinitionError(Error): """Raised when a proto is added with the same name & different descriptor.""" class DescriptorDatabase(object): """A container accepting FileDescriptorProtos and maps DescriptorProtos.""" def __init__(self): self._file_desc_protos_by_file = {} self._file_desc_protos_by_symbol = {} def Add(self, file_desc_proto): """Adds the FileDescriptorProto and its types to this database. Args: file_desc_proto: The FileDescriptorProto to add. Raises: DescriptorDatabaseConflictingDefinitionError: if an attempt is made to add a proto with the same name but different definition than an existing proto in the database. """ proto_name = file_desc_proto.name if proto_name not in self._file_desc_protos_by_file: self._file_desc_protos_by_file[proto_name] = file_desc_proto elif self._file_desc_protos_by_file[proto_name] != file_desc_proto: raise DescriptorDatabaseConflictingDefinitionError( '%s already added, but with different descriptor.' % proto_name) else: return # Add all the top-level descriptors to the index. package = file_desc_proto.package for message in file_desc_proto.message_type: for name in METHOD_NAME(message, package): self._AddSymbol(name, file_desc_proto) for enum in file_desc_proto.enum_type: self._AddSymbol(('.'.join((package, enum.name))), file_desc_proto) for enum_value in enum.value: self._file_desc_protos_by_symbol[ '.'.join((package, enum_value.name))] = file_desc_proto for extension in file_desc_proto.extension: self._AddSymbol(('.'.join((package, extension.name))), file_desc_proto) for service in file_desc_proto.service: self._AddSymbol(('.'.join((package, service.name))), file_desc_proto) def FindFileByName(self, name): """Finds the file descriptor proto by file name. Typically the file name is a relative path ending to a .proto file. The proto with the given name will have to have been added to this database using the Add method or else an error will be raised. Args: name: The file name to find. Returns: The file descriptor proto matching the name. Raises: KeyError if no file by the given name was added. """ return self._file_desc_protos_by_file[name] def FindFileContainingSymbol(self, symbol): """Finds the file descriptor proto containing the specified symbol. The symbol should be a fully qualified name including the file descriptor's package and any containing messages. Some examples: 'some.package.name.Message' 'some.package.name.Message.NestedEnum' 'some.package.name.Message.some_field' The file descriptor proto containing the specified symbol must be added to this database using the Add method or else an error will be raised. Args: symbol: The fully qualified symbol name. Returns: The file descriptor proto containing the symbol. Raises: KeyError if no file contains the specified symbol. """ try: return self._file_desc_protos_by_symbol[symbol] except KeyError: # Fields, enum values, and nested extensions are not in # _file_desc_protos_by_symbol. Try to find the top level # descriptor. Non-existent nested symbol under a valid top level # descriptor can also be found. The behavior is the same with # protobuf C++. top_level, _, _ = symbol.rpartition('.') try: return self._file_desc_protos_by_symbol[top_level] except KeyError: # Raise the original symbol as a KeyError for better diagnostics. raise KeyError(symbol) def FindFileContainingExtension(self, extendee_name, extension_number): # TODO(jieluo): implement this API. return None def FindAllExtensionNumbers(self, extendee_name): # TODO(jieluo): implement this API. return [] def _AddSymbol(self, name, file_desc_proto): if name in self._file_desc_protos_by_symbol: warn_msg = ('Conflict register for file "' + file_desc_proto.name + '": ' + name + ' is already defined in file "' + self._file_desc_protos_by_symbol[name].name + '"') warnings.warn(warn_msg, RuntimeWarning) self._file_desc_protos_by_symbol[name] = file_desc_proto def METHOD_NAME(desc_proto, package): """Pulls out all the symbols from a descriptor proto. Args: desc_proto: The proto to extract symbols from. package: The package containing the descriptor type. Yields: The fully qualified name found in the descriptor. """ message_name = package + '.' + desc_proto.name if package else desc_proto.name yield message_name for nested_type in desc_proto.nested_type: for symbol in METHOD_NAME(nested_type, message_name): yield symbol for enum_type in desc_proto.enum_type: yield '.'.join((message_name, enum_type.name))
null
shop filter
from module.base.button import ButtonGrid from module.base.decorator import cached_property, del_cached_property from module.base.timer import Timer from module.logger import logger from module.map_detection.utils import Points from module.ocr.ocr import Digit, DigitYuv, Ocr from module.shop.assets import * from module.shop.base import ShopItemGrid from module.shop.clerk import ShopClerk from module.shop.shop_status import ShopStatus from module.ui.scroll import Scroll MEDAL_SHOP_SCROLL = Scroll(MEDAL_SHOP_SCROLL_AREA, color=(247, 211, 66)) MEDAL_SHOP_SCROLL.edge_threshold = 0.15 MEDAL_SHOP_SCROLL.drag_threshold = 0.15 class ShopPriceOcr(DigitYuv): def after_process(self, result): result = Ocr.after_process(self, result) # '100' detected as '00' on retrofit blueprint if result == '00': result = '100' return Digit.after_process(self, result) PRICE_OCR = ShopPriceOcr([], letter=(255, 223, 57), threshold=32, name='Price_ocr') TEMPLATE_MEDAL_ICON = Template('./assets/shop/cost/Medal.png') TEMPLATE_MEDAL_ICON_2 = Template('./assets/shop/cost/Medal_2.png') class MedalShop2(ShopClerk, ShopStatus): @cached_property def METHOD_NAME(self): """ Returns: str: """ return self.config.MedalShop2_Filter.strip() def _get_medals(self): """ Returns: np.array: [[x1, y1], [x2, y2]], location of the medal icon upper-left corner. """ left_column = self.image_crop((472, 348, 1170, 648)) medals = TEMPLATE_MEDAL_ICON_2.match_multi(left_column, similarity=0.5, threshold=5) medals = Points([(0., m.area[1]) for m in medals]).group(threshold=5) logger.attr('Medals_icon', len(medals)) return medals def wait_until_medal_appear(self, skip_first_screenshot=True): """ After entering medal shop page, items are not loaded that fast, wait until any medal icon appears """ timeout = Timer(1, count=3).start() while 1: if skip_first_screenshot: skip_first_screenshot = False else: self.device.screenshot() medals = self._get_medals() if timeout.reached(): break if len(medals): break @cached_property def shop_grid(self): return self.shop_medal_grid() def shop_medal_grid(self): """ Returns: ButtonGrid: """ # (472, 348, 1170, 648) medals = self._get_medals() count = len(medals) if count == 0: logger.warning('Unable to find medal icon, assume item list is at top') origin_y = 246 delta_y = 213 row = 2 elif count == 1: y_list = medals[:, 1] # +256, top of the crop area in _get_medals() # -125, from the top of medal icon to the top of shop item origin_y = y_list[0] + 348 - 127 delta_y = 213 row = 1 elif count == 2: y_list = medals[:, 1] y1, y2 = y_list[0], y_list[1] origin_y = min(y1, y2) + 348 - 127 delta_y = abs(y1 - y2) row = 2 else: logger.warning(f'Unexpected medal icon match result: {[m.area for m in medals]}') origin_y = 246 delta_y = 213 row = 2 # Make up a ButtonGrid # Original grid is: # shop_grid = ButtonGrid( # origin=(476, 246), delta=(156, 213), button_shape=(98, 98), grid_shape=(5, 2), name='SHOP_GRID') shop_grid = ButtonGrid( origin=(476, origin_y), delta=(156, delta_y), button_shape=(98, 98), grid_shape=(5, row), name='SHOP_GRID') return shop_grid shop_template_folder = './assets/shop/medal' @cached_property def shop_medal_items(self): """ Returns: ShopItemGrid: """ shop_grid = self.shop_grid shop_medal_items = ShopItemGrid( shop_grid, templates={}, amount_area=(60, 74, 96, 95), price_area=(52, 132, 132, 162)) shop_medal_items.load_template_folder(self.shop_template_folder) shop_medal_items.load_cost_template_folder('./assets/shop/cost') shop_medal_items.similarity = 0.85 # Lower the threshold for consistent matches of PR/DRBP shop_medal_items.cost_similarity = 0.5 shop_medal_items.price_ocr = PRICE_OCR return shop_medal_items def shop_items(self): """ Shared alias name for all shops, so to use @Config must define a unique alias as cover Returns: ShopItemGrid: """ return self.shop_medal_items def shop_currency(self): """ Ocr shop medal currency Then return medal count Returns: int: medal amount """ self._currency = self.status_get_medal() logger.info(f'Medal: {self._currency}') return self._currency def shop_has_loaded(self, items): """ If any item parsed with a default price of 5000; then shop cannot be safely bought from yet Returns: bool """ for item in items: if int(item.price) == 5000: return False return True def shop_interval_clear(self): """ Clear interval on select assets for shop_buy_handle """ super().shop_interval_clear() self.interval_clear(SHOP_BUY_CONFIRM_SELECT) self.interval_clear(SHOP_BUY_CONFIRM_AMOUNT) def shop_buy_handle(self, item): """ Handle shop_medal buy interface if detected Args: item: Item to handle Returns: bool: whether interface was detected and handled """ if self.appear(SHOP_BUY_CONFIRM_SELECT, offset=(20, 20), interval=3): self.shop_buy_select_execute(item) self.interval_reset(SHOP_BUY_CONFIRM_SELECT) return True if self.appear(SHOP_BUY_CONFIRM_AMOUNT, offset=(20, 20), interval=3): self.shop_buy_amount_execute(item) self.interval_reset(SHOP_BUY_CONFIRM_AMOUNT) return True return False def run(self): """ Run Medal Shop """ # Base case; exit run if filter empty if not self.METHOD_NAME: return # When called, expected to be in # correct Medal Shop interface logger.hr('Medal Shop', level=1) self.wait_until_medal_appear() # Execute buy operations MEDAL_SHOP_SCROLL.set_top(main=self) while 1: self.shop_buy() if MEDAL_SHOP_SCROLL.at_bottom(main=self): logger.info('Medal shop reach bottom, stop') break else: MEDAL_SHOP_SCROLL.next_page(main=self, page=0.66) del_cached_property(self, 'shop_grid') del_cached_property(self, 'shop_medal_items') continue
null
get aws configs
""" Copyright 2021 The Magma Authors. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os import subprocess import boto3 from cli.style import print_error_msg from utils.common import run_command def set_aws_configs(params: set, configs: dict): """Sets AWS configuration Args: params (set): List of aws configuration attributes configs (dict): Configuration map for a particular component """ for k, v in configs.items(): if k not in params: continue cmd = ["aws", "configure", "set", k, v] proc_inst = run_command(cmd) if proc_inst.returncode != 0: print_error_msg(f"Failed configuring aws with {k}") def METHOD_NAME(): """Gets AWS configuration from environment""" env_params_cfg_map = ( ('AWS_ACCESS_KEY_ID', 'aws_access_key_id'), ('AWS_SECRET_ACCESS_KEY', 'aws_secret_access_key'), ('AWS_DEFAULT_REGION', 'region'), ) configs = {} for env_param, cfg_key in env_params_cfg_map: cmd = ["aws", "configure", "get", cfg_key] proc_inst = run_command(cmd) val = None if proc_inst.returncode == 0: val = proc_inst.stdout.strip() if not val: val = os.environ.get(env_param) configs[cfg_key] = val return configs def check_elastic_role_not_exists(): elastic_role = 'AWSServiceRoleForAmazonElasticsearchService' client = boto3.client('iam') try: client.get_role(RoleName=elastic_role) except client.exceptions.NoSuchEntityException: return True return False def get_gateways(gateway_prefix: str = "agw"): client = boto3.client('ec2') gateways = [] try: instance_info = client.describe_instances( Filters=[ { 'Name': 'tag:Name', 'Values': [f'{gateway_prefix}*'], }, { 'Name': 'instance-state-name', 'Values': ['running'], }, ], ) for reservation in instance_info["Reservations"]: gateway_id = "" hostname = "" for instance in reservation["Instances"]: for tags in instance["Tags"]: if tags["Key"] == "Name": gateway_id = tags["Value"] break gateway_ip = instance['PrivateIpAddress'] if gateway_id and gateway_ip: gateways.append((gateway_id, gateway_ip)) except client.exceptions.NoSuchEntityException: pass return gateways def get_bastion_ip(gateway_prefix: str = "agw"): client = boto3.client('ec2') gateways = [] try: instance_info = client.describe_instances( Filters=[ { 'Name': 'tag:Name', 'Values': ['*Bridge'], }, { 'Name': 'instance-state-name', 'Values': ['running'], }, ], ) for reservation in instance_info["Reservations"]: for instance in reservation["Instances"]: return instance['PublicIpAddress'] except client.exceptions.NoSuchEntityException: pass return '' def verify_resources_exist(uuid: str = None): """Check if resources with a specific uuid exist Args: uuid (str, optional): unique id to identify cluster. Defaults to None. """ client = boto3.client('resourcegroupstaggingapi') tagFilters = [] if uuid: tag_filters = [{ 'Key': 'magma-uuid', 'Values': [uuid], }] resources = client.get_resources( TagFilters=tag_filters, ) for resource in resources['ResourceTagMappingList']: print(resource["ResourceARN"])
null
icon
# /*########################################################################## # # Copyright (c) 2016 European Synchrotron Radiation Facility # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # ###########################################################################*/ """ This module contains :class:`SafeIconProvider`. """ __authors__ = ["V. Valls"] __license__ = "MIT" __date__ = "31/10/2017" import sys import logging from silx.gui import qt _logger = logging.getLogger(__name__) class SafeFileIconProvider(qt.QFileIconProvider): """ This class reimplement :class:`qt.QFileIconProvider` to avoid blocking access to the file system. It avoid to use `qt.QFileInfo.absoluteFilePath` or `qt.QFileInfo.canonicalPath` to reach drive icons which are known to freeze the file system using network drives. Computer root, and drive root paths are filtered. Other paths are not filtered while it is anyway needed to synchronoze a drive to accesss to it. """ WIN32_DRIVE_UNKNOWN = 0 """The drive type cannot be determined.""" WIN32_DRIVE_NO_ROOT_DIR = 1 """The root path is invalid; for example, there is no volume mounted at the specified path.""" WIN32_DRIVE_REMOVABLE = 2 """The drive has removable media; for example, a floppy drive, thumb drive, or flash card reader.""" WIN32_DRIVE_FIXED = 3 """The drive has fixed media; for example, a hard disk drive or flash drive.""" WIN32_DRIVE_REMOTE = 4 """The drive is a remote (network) drive.""" WIN32_DRIVE_CDROM = 5 """The drive is a CD-ROM drive.""" WIN32_DRIVE_RAMDISK = 6 """The drive is a RAM disk.""" def __init__(self): qt.QFileIconProvider.__init__(self) self.__filterDirAndFiles = False if sys.platform == "win32": self._windowsTypes = {} item = "Drive", qt.QStyle.SP_DriveHDIcon self._windowsTypes[self.WIN32_DRIVE_UNKNOWN] = item item = "Invalid root", qt.QStyle.SP_DriveHDIcon self._windowsTypes[self.WIN32_DRIVE_NO_ROOT_DIR] = item item = "Removable", qt.QStyle.SP_DriveNetIcon self._windowsTypes[self.WIN32_DRIVE_REMOVABLE] = item item = "Drive", qt.QStyle.SP_DriveHDIcon self._windowsTypes[self.WIN32_DRIVE_FIXED] = item item = "Remote", qt.QStyle.SP_DriveNetIcon self._windowsTypes[self.WIN32_DRIVE_REMOTE] = item item = "CD-ROM", qt.QStyle.SP_DriveCDIcon self._windowsTypes[self.WIN32_DRIVE_CDROM] = item item = "RAM disk", qt.QStyle.SP_DriveHDIcon self._windowsTypes[self.WIN32_DRIVE_RAMDISK] = item def __windowsDriveTypeId(self, info): try: import ctypes path = info.filePath() dtype = ctypes.cdll.kernel32.GetDriveTypeW(path) except Exception: _logger.warning("Impossible to identify drive %s" % path) _logger.debug("Backtrace", exc_info=True) return self.WIN32_DRIVE_UNKNOWN return dtype def __windowsDriveIcon(self, info): dtype = self.__windowsDriveTypeId(info) default = self._windowsTypes[self.WIN32_DRIVE_UNKNOWN] driveInfo = self._windowsTypes.get(dtype, default) style = qt.QApplication.instance().style() METHOD_NAME = style.standardIcon(driveInfo[1]) return METHOD_NAME def __windowsDriveType(self, info): dtype = self.__windowsDriveTypeId(info) default = self._windowsTypes[self.WIN32_DRIVE_UNKNOWN] driveInfo = self._windowsTypes.get(dtype, default) return driveInfo[0] def METHOD_NAME(self, info): if isinstance(info, qt.QFileIconProvider.IconType): # It's another C++ method signature: # QIcon QFileIconProvider::icon(QFileIconProvider::IconType type) return super(SafeFileIconProvider, self).METHOD_NAME(info) style = qt.QApplication.instance().style() path = info.filePath() if path in ["", "/"]: # That's the computer root on Windows or Linux result = style.standardIcon(qt.QStyle.SP_ComputerIcon) elif sys.platform == "win32" and path[-2] == ":": # That's a drive on Windows result = self.__windowsDriveIcon(info) elif self.__filterDirAndFiles: if info.isDir(): result = style.standardIcon(qt.QStyle.SP_DirIcon) else: result = style.standardIcon(qt.QStyle.SP_FileIcon) else: result = qt.QFileIconProvider.METHOD_NAME(self, info) return result def type(self, info): path = info.filePath() if path in ["", "/"]: # That's the computer root on Windows or Linux result = "Computer" elif sys.platform == "win32" and path[-2] == ":": # That's a drive on Windows result = self.__windowsDriveType(info) elif self.__filterDirAndFiles: if info.isDir(): result = "Directory" else: result = info.suffix() else: result = qt.QFileIconProvider.type(self, info) return result
null
system data
# coding=utf-8 # *** WARNING: this file was generated by pulumi. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from . import outputs __all__ = [ 'GetFirewallRuleResult', 'AwaitableGetFirewallRuleResult', 'get_firewall_rule', 'get_firewall_rule_output', ] @pulumi.output_type class GetFirewallRuleResult: """ Represents a server firewall rule. """ def __init__(__self__, end_ip_address=None, id=None, name=None, start_ip_address=None, METHOD_NAME=None, type=None): if end_ip_address and not isinstance(end_ip_address, str): raise TypeError("Expected argument 'end_ip_address' to be a str") pulumi.set(__self__, "end_ip_address", end_ip_address) if id and not isinstance(id, str): raise TypeError("Expected argument 'id' to be a str") pulumi.set(__self__, "id", id) if name and not isinstance(name, str): raise TypeError("Expected argument 'name' to be a str") pulumi.set(__self__, "name", name) if start_ip_address and not isinstance(start_ip_address, str): raise TypeError("Expected argument 'start_ip_address' to be a str") pulumi.set(__self__, "start_ip_address", start_ip_address) if METHOD_NAME and not isinstance(METHOD_NAME, dict): raise TypeError("Expected argument 'system_data' to be a dict") pulumi.set(__self__, "system_data", METHOD_NAME) if type and not isinstance(type, str): raise TypeError("Expected argument 'type' to be a str") pulumi.set(__self__, "type", type) @property @pulumi.getter(name="endIpAddress") def end_ip_address(self) -> str: """ The end IP address of the server firewall rule. Must be IPv4 format. """ return pulumi.get(self, "end_ip_address") @property @pulumi.getter def id(self) -> str: """ Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} """ return pulumi.get(self, "id") @property @pulumi.getter def name(self) -> str: """ The name of the resource """ return pulumi.get(self, "name") @property @pulumi.getter(name="startIpAddress") def start_ip_address(self) -> str: """ The start IP address of the server firewall rule. Must be IPv4 format. """ return pulumi.get(self, "start_ip_address") @property @pulumi.getter(name="systemData") def METHOD_NAME(self) -> 'outputs.SystemDataResponse': """ The system metadata relating to this resource. """ return pulumi.get(self, "system_data") @property @pulumi.getter def type(self) -> str: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type") class AwaitableGetFirewallRuleResult(GetFirewallRuleResult): # pylint: disable=using-constant-test def __await__(self): if False: yield self return GetFirewallRuleResult( end_ip_address=self.end_ip_address, id=self.id, name=self.name, start_ip_address=self.start_ip_address, METHOD_NAME=self.METHOD_NAME, type=self.type) def get_firewall_rule(firewall_rule_name: Optional[str] = None, resource_group_name: Optional[str] = None, server_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFirewallRuleResult: """ Gets information about a server firewall rule. :param str firewall_rule_name: The name of the server firewall rule. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str server_name: The name of the server. """ __args__ = dict() __args__['firewallRuleName'] = firewall_rule_name __args__['resourceGroupName'] = resource_group_name __args__['serverName'] = server_name opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts) __ret__ = pulumi.runtime.invoke('azure-native:dbformysql/v20220101:getFirewallRule', __args__, opts=opts, typ=GetFirewallRuleResult).value return AwaitableGetFirewallRuleResult( end_ip_address=pulumi.get(__ret__, 'end_ip_address'), id=pulumi.get(__ret__, 'id'), name=pulumi.get(__ret__, 'name'), start_ip_address=pulumi.get(__ret__, 'start_ip_address'), METHOD_NAME=pulumi.get(__ret__, 'system_data'), type=pulumi.get(__ret__, 'type')) @_utilities.lift_output_func(get_firewall_rule) def get_firewall_rule_output(firewall_rule_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, server_name: Optional[pulumi.Input[str]] = None, opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFirewallRuleResult]: """ Gets information about a server firewall rule. :param str firewall_rule_name: The name of the server firewall rule. :param str resource_group_name: The name of the resource group. The name is case insensitive. :param str server_name: The name of the server. """ ...
null
extract tag
"""Generate gemoji index.""" import os import requests import json import argparse import shutil import zipfile import gen_emoji1 import gen_gemoji import gen_twemoji import gen_joypixels current_dir = os.path.dirname(os.path.abspath(__file__)) GITHUB_API_HEADER = 'application/vnd.github.v3+json' GITHUB_API = 'https://api.github.com' GEMOJI = 'github/gemoji' EMOJIONE = 'joypixels/emojione' JOYPIXELS = 'joypixels/emoji-toolkit' TWEMOJI = 'jdecked/twemoji' def url_join(*args): """Join URL parts.""" return '/'.join(args) def get_github_emoji(): """Get GitHub's usable emoji.""" try: resp = requests.get( url_join(GITHUB_API, 'emojis'), headers={'Accept': GITHUB_API_HEADER}, timeout=30 ) except Exception: return None return json.loads(resp.text) def METHOD_NAME(repo, file_location): """Extract tag from zip.""" with zipfile.ZipFile(file_location, "r") as z: z.extractall(os.path.dirname(file_location)) repo_dir = None base = os.path.dirname(file_location) for x in os.listdir(base): fullname = os.path.join(base, x) if os.path.isdir(fullname) and x.startswith(repo.replace('/', '-')): repo_dir = fullname break if repo_dir: os.rename(repo_dir, os.path.join(base, repo.replace('/', '-'))) def download_tag(repo, tag, url): """Download tag.""" destination = os.path.join(current_dir, 'tags', repo.replace('/', '-')) if not os.path.exists(destination): os.makedirs(destination) else: print('Removing old tag...') shutil.rmtree(destination) os.makedirs(destination) file_location = os.path.join(destination, os.path.basename(url) + '.zip') print('Downloading: %s --> %s' % (url, file_location)) resp = requests.get( url, headers={'Accept': GITHUB_API_HEADER}, stream=True ) with open(file_location, 'wb') as f: for chunk in resp.iter_content(chunk_size=1024): if chunk: f.write(chunk) METHOD_NAME(repo, file_location) def select_tag(repo, no_download): """Get GitHub's usable emoji.""" resp = requests.get( url_join(GITHUB_API, 'repos', repo, 'releases'), headers={'Accept': GITHUB_API_HEADER}, timeout=50 ) assert resp.status_code == 200, "API call failed to get tag list!" tags = json.loads(resp.text) print('Select %s tag to use:' % repo) num_tags = len(tags) text = [] for index in range(num_tags): text.append(' [%d] %s' % (index, tags[index]['tag_name'])) if (index + 1) % 4 == 0: text.append('\n') if len(text) == 0 or text[-1] != '\n': text.append('\n') print(''.join(text)) user_input = None while user_input is None: try: user_input = int(input('Select Tag > ')) except Exception: user_input = None if user_input is not None and (user_input < 0 or user_input >= num_tags): user_input = None if not no_download: download_tag(repo, tags[user_input]['tag_name'], tags[user_input]['zipball_url']) return tags[user_input]['tag_name'] if __name__ == "__main__": parser = argparse.ArgumentParser(prog='gen_emoji', description='Generate emoji db files.') # Flag arguments parser.add_argument('--tag', default=None, help="Tag to use.") parser.add_argument('--joypixels-tag', default=None, help="Joypixels tag to use for Twemoji.") parser.add_argument('--joypixels-no-download', action='store_true', default=False, help="Skip Joypixels download.") parser.add_argument('--gemoji', action='store_true', default=False, help="Get Gemoji.") parser.add_argument('--emojione', action='store_true', default=False, help="Get Emojione.") parser.add_argument('--twemoji', action='store_true', default=False, help="Get Twemoji.") parser.add_argument('--no-download', action='store_true', default=False, help="Skip download and use local.") args = parser.parse_args() os.chdir(current_dir) if args.gemoji: if args.tag is None: tag = select_tag(GEMOJI, args.no_download) else: tag = args.tag gen_gemoji.parse(GEMOJI.replace('/', '-'), tag) if args.emojione: if args.tag is None: tag = select_tag(EMOJIONE, args.no_download) else: tag = args.tag gen_emoji1.parse(EMOJIONE.replace('/', '-'), tag) if args.twemoji: if args.joypixels_tag is None: jtag = select_tag(JOYPIXELS, args.joypixels_no_download) else: jtag = args.joypixels_tag db, aliases = gen_joypixels.parse(JOYPIXELS.replace('/', '-'), jtag) if args.tag is None: tag = select_tag(TWEMOJI, args.no_download) else: tag = args.tag gen_twemoji.parse(TWEMOJI.replace('/', '-'), tag, jtag, db, aliases)
null