label
stringlengths 1
61
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
write phenotype file | import csv
import hashlib
import io
import json
import requests
import shutil
from typing import Dict
from typing import List
from typing import Optional
from typing import TextIO
import numpy as np
from base.webqtlConfig import TMPDIR
from base.trait import create_trait
from utility.redis_tools import get_redis_conn
from utility.tools import locate, get_setting, GN3_LOCAL_URL
from wqflask.database import database_connection
def run_rqtl(trait_name, vals, samples, dataset, pair_scan, mapping_scale, model, method, num_perm, perm_strata_list, do_control, control_marker, manhattan_plot, cofactors):
"""Run R/qtl by making a request to the GN3 endpoint and reading in the output file(s)"""
pheno_file = METHOD_NAME(trait_name, samples, vals, dataset, cofactors, perm_strata_list)
if dataset.group.genofile:
geno_file = locate(dataset.group.genofile, "genotype")
else:
geno_file = locate(dataset.group.name + ".geno", "genotype")
post_data = {
"pheno_file": pheno_file,
"geno_file": geno_file,
"model": model,
"method": method,
"nperm": num_perm,
"scale": mapping_scale
}
if pair_scan:
post_data["pairscan"] = True
if cofactors:
covarstruct_file = write_covarstruct_file(cofactors)
post_data["covarstruct"] = covarstruct_file
if do_control == "true" and control_marker:
post_data["control"] = control_marker
if not manhattan_plot and not pair_scan:
post_data["interval"] = True
if cofactors:
post_data["addcovar"] = True
if perm_strata_list:
post_data["pstrata"] = True
rqtl_output = requests.post(GN3_LOCAL_URL + "api/rqtl/compute", data=post_data).json()
if num_perm > 0:
return rqtl_output['perm_results'], rqtl_output['suggestive'], rqtl_output['significant'], rqtl_output['results']
else:
return rqtl_output['results']
def get_hash_of_textio(the_file: TextIO) -> str:
"""Given a StringIO, return the hash of its contents"""
the_file.seek(0)
hash_of_file = hashlib.md5(the_file.read().encode()).hexdigest()
hash_of_file = hash_of_file.replace("/", "_") # Replace / with _ to prevent issue with filenames being translated to directories
return hash_of_file
def write_covarstruct_file(cofactors: str) -> str:
"""
Given list of cofactors (as comma-delimited string), write
a comma-delimited file where the first column consists of cofactor names
and the second column indicates whether they're numerical or categorical
"""
trait_datatype_json = None
with database_connection(get_setting("SQL_URI")) as conn, conn.cursor() as cursor:
cursor.execute("SELECT value FROM TraitMetadata WHERE type='trait_data_type'")
trait_datatype_json = json.loads(cursor.fetchone()[0])
covar_struct_file = io.StringIO()
writer = csv.writer(covar_struct_file, delimiter="\t", quoting = csv.QUOTE_NONE)
for cofactor in cofactors.split(","):
datatype = trait_datatype_json[cofactor] if cofactor in trait_datatype_json else "numerical"
cofactor_name = cofactor.split(":")[0]
writer.writerow([cofactor_name, datatype])
hash_of_file = get_hash_of_textio(covar_struct_file)
file_path = TMPDIR + hash_of_file + ".csv"
with open(file_path, "w") as fd:
covar_struct_file.seek(0)
shutil.copyfileobj(covar_struct_file, fd)
return file_path
def METHOD_NAME(trait_name: str,
samples: List[str],
vals: List,
dataset_ob,
cofactors: Optional[str] = None,
perm_strata_list: Optional[List] = None) -> TextIO:
"""Given trait name, sample list, value list, dataset ob, and optional string
representing cofactors, return the file's full path/name
"""
cofactor_data = cofactors_to_dict(cofactors, dataset_ob, samples)
pheno_file = io.StringIO()
writer = csv.writer(pheno_file, delimiter="\t", quoting=csv.QUOTE_NONE)
header_row = ["Samples", trait_name]
header_row += [cofactor for cofactor in cofactor_data]
if perm_strata_list:
header_row.append("Strata")
writer.writerow(header_row)
for i, sample in enumerate(samples):
this_row = [sample]
if vals[i] != "x":
this_row.append(str(round(float(vals[i]), 3)))
else:
this_row.append("NA")
for cofactor in cofactor_data:
this_row.append(cofactor_data[cofactor][i])
if perm_strata_list:
this_row.append(perm_strata_list[i])
writer.writerow(this_row)
hash_of_file = get_hash_of_textio(pheno_file)
file_path = TMPDIR + hash_of_file + ".csv"
with open(file_path, "w") as fd:
pheno_file.seek(0)
shutil.copyfileobj(pheno_file, fd)
return file_path
def cofactors_to_dict(cofactors: str, dataset_ob, samples) -> Dict:
"""Given a string of cofactors, the trait being mapped's dataset ob,
and list of samples, return cofactor data as a Dict
"""
cofactor_dict = {}
if cofactors:
dataset_ob.group.get_samplelist(redis_conn=get_redis_conn())
sample_list = dataset_ob.group.samplelist
for cofactor in cofactors.split(","):
cofactor_name, cofactor_dataset = cofactor.split(":")
if cofactor_dataset == dataset_ob.name:
cofactor_dict[cofactor_name] = []
trait_ob = create_trait(dataset=dataset_ob,
name=cofactor_name)
sample_data = trait_ob.data
for index, sample in enumerate(samples):
if sample in sample_data:
sample_value = str(round(float(sample_data[sample].value), 3))
cofactor_dict[cofactor_name].append(sample_value)
else:
cofactor_dict[cofactor_name].append("NA")
return cofactor_dict | null |
set reset address | #
# This file is part of LiteX.
#
# Copyright (c) 2018 Jean-François Nguyen <[email protected]>
# Copyright (c) 2018-2019 Florent Kermarrec <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import os
import subprocess
from migen import *
from litex.gen import *
from litex import get_data_mod
from litex.soc.interconnect import wishbone
from litex.soc.cores.cpu import CPU, CPU_GCC_TRIPLE_RISCV32
# Variants -----------------------------------------------------------------------------------------
CPU_VARIANTS = ["standard"]
# Minerva ------------------------------------------------------------------------------------------
class Minerva(CPU):
category = "softcore"
family = "riscv"
name = "minerva"
human_name = "Minerva"
variants = CPU_VARIANTS
data_width = 32
endianness = "little"
gcc_triple = CPU_GCC_TRIPLE_RISCV32
linker_output_format = "elf32-littleriscv"
nop = "nop"
io_regions = {0x8000_0000: 0x8000_0000} # Origin, Length.
# GCC Flags.
@property
def gcc_flags(self):
flags = "-march=rv32im "
flags += "-mabi=ilp32 "
flags += "-D__minerva__ "
return flags
def __init__(self, platform, variant="standard"):
self.platform = platform
self.variant = variant
self.reset = Signal()
self.interrupt = Signal(32)
self.ibus = ibus = wishbone.Interface()
self.dbus = dbus = wishbone.Interface()
self.periph_buses = [self.ibus, self.dbus] # Peripheral buses (Connected to main SoC's bus).
self.memory_buses = [] # Memory buses (Connected directly to LiteDRAM).
# # #
self.cpu_params = dict(
# Clk / Rst.
i_clk = ClockSignal("sys"),
i_rst = ResetSignal("sys") | self.reset,
# IRQ.
i_timer_interrupt = 0,
i_software_interrupt = 0,
i_external_interrupt = self.interrupt,
# Ibus.
o_ibus__stb = ibus.stb,
o_ibus__cyc = ibus.cyc,
o_ibus__cti = ibus.cti,
o_ibus__bte = ibus.bte,
o_ibus__we = ibus.we,
o_ibus__adr = ibus.adr,
o_ibus__dat_w = ibus.dat_w,
o_ibus__sel = ibus.sel,
i_ibus__ack = ibus.ack,
i_ibus__err = ibus.err,
i_ibus__dat_r = ibus.dat_r,
# Dbus.
o_dbus__stb = dbus.stb,
o_dbus__cyc = dbus.cyc,
o_dbus__cti = dbus.cti,
o_dbus__bte = dbus.bte,
o_dbus__we = dbus.we,
o_dbus__adr = dbus.adr,
o_dbus__dat_w = dbus.dat_w,
o_dbus__sel = dbus.sel,
i_dbus__ack = dbus.ack,
i_dbus__err = dbus.err,
i_dbus__dat_r = dbus.dat_r,
)
def METHOD_NAME(self, reset_address):
self.reset_address = reset_address
@staticmethod
def elaborate(reset_address, with_icache, with_dcache, with_muldiv, verilog_filename):
cli_params = []
cli_params.append("--reset-addr={}".format(reset_address))
if with_icache:
cli_params.append("--with-icache")
if with_dcache:
cli_params.append("--with-dcache")
if with_muldiv:
cli_params.append("--with-muldiv")
cli_params.append("generate")
cli_params.append("--type=v")
sdir = get_data_mod("cpu", "minerva").data_location
if subprocess.call(["python3", os.path.join(sdir, "cli.py"), *cli_params],
stdout=open(verilog_filename, "w")):
raise OSError("Unable to elaborate Minerva CPU, please check your Amaranth/Yosys install")
def do_finalize(self):
assert hasattr(self, "reset_address")
verilog_filename = os.path.join(self.platform.output_dir, "gateware", "minerva.v")
self.elaborate(
reset_address = self.reset_address,
with_icache = True,
with_dcache = True,
with_muldiv = True,
verilog_filename = verilog_filename)
self.platform.add_source(verilog_filename)
self.specials += Instance("minerva_cpu", **self.cpu_params) | null |
ishow image2 | # -*- coding: utf-8 -*-
import logging
import utool as ut
import wbia.plottool as pt # NOQA
from wbia.plottool import interact_annotations
print, rrr, profile = ut.inject2(__name__)
logger = logging.getLogger('wbia')
# DESTROY_OLD_WINDOW = True
DESTROY_OLD_WINDOW = False
def METHOD_NAME(ibs, gid, fnum=None, dodraw=True):
r"""
Args:
ibs (IBEISController): wbia controller object
gid (int):
dodraw (bool):
CommandLine:
python -m wbia.viz.interact.interact_annotations2 --test-ishow_image2 --show
Example:
>>> # DISABLE_DOCTEST
>>> from wbia.viz.interact.interact_annotations2 import * # NOQA
>>> import wbia
>>> # build test data
>>> ibs = wbia.opendb('testdb1')
>>> gid = 2
>>> dodraw = True
>>> # execute function
>>> self = ishow_image2(ibs, gid, dodraw)
>>> # verify results
>>> result = str(self)
>>> print(result)
>>> pt.show_if_requested()
"""
self = ANNOTATION_Interaction2(ibs, gid, fnum=fnum, dodraw=dodraw)
return self
class ANNOTATION_Interaction2(object):
def __init__(
self,
ibs,
gid,
next_callback=None,
prev_callback=None,
rows_updated_callback=None,
reset_window=True,
dodraw=True,
fnum=None,
):
"""
TODO: rename to interact image annotations?
"""
self.ibs = ibs
self.gid = gid
self.rows_updated_callback = rows_updated_callback
img = ibs.get_images(self.gid)
self.aid_list = ibs.get_image_aids(self.gid)
bbox_list = ibs.get_annot_bboxes(self.aid_list)
# verts_list = ibs.get_annot_verts(self.aid_list) # TODO
theta_list = ibs.get_annot_thetas(self.aid_list)
species_list = ibs.get_annot_species_texts(self.aid_list)
# valid_species = ibs.get_all_species_texts()
valid_species = [tup[1] for tup in ibs.get_working_species()]
metadata_list = [ibs.get_annot_lazy_dict(aid) for aid in self.aid_list]
for metadata in metadata_list:
# eager eval on name
metadata['name']
if True:
interact_annotations.rrr()
self.interact_ANNOTATIONS = interact_annotations.AnnotationInteraction(
img,
bbox_list=bbox_list,
theta_list=theta_list,
species_list=species_list,
metadata_list=metadata_list,
commit_callback=self.commit_callback,
# TODO: get default species in a better way
default_species=self.ibs.cfg.detect_cfg.species_text,
next_callback=next_callback,
prev_callback=prev_callback,
fnum=fnum,
valid_species=valid_species,
# figure_to_use=None if reset_window else self.interact_ANNOTATIONS.fig,
)
if dodraw:
self.interact_ANNOTATIONS.start()
# pt.update()
def commit_callback(
self,
unchanged_indices,
deleted_indices,
changed_indices,
changed_annottups,
new_annottups,
):
"""
TODO: Rename to commit_callback
Callback from interact_annotations to ibs for when data is modified
"""
logger.info('[interact_annot2] enter commit_callback')
logger.info(
'[interact_annot2] nUnchanged=%d, nDelete=%d, nChanged=%d, nNew=%d'
% (
len(unchanged_indices),
len(deleted_indices),
len(changed_indices),
len(new_annottups),
)
)
rows_updated = False
# Delete annotations
if len(deleted_indices) > 0:
rows_updated = True
deleted_aids = [self.aid_list[del_index] for del_index in deleted_indices]
logger.info('[interact_annot2] deleted_indexes: {!r}'.format(deleted_indices))
logger.info('[interact_annot2] deleted_aids: {!r}'.format(deleted_aids))
self.ibs.delete_annots(deleted_aids)
# Set/Change annotations
if len(changed_annottups) > 0:
changed_aid = [self.aid_list[index] for index in changed_indices]
bbox_list1 = [bbox for (bbox, t, s) in changed_annottups]
theta_list1 = [t for (bbox, t, s) in changed_annottups]
species_list1 = [s for (bbox, t, s) in changed_annottups]
logger.info('[interact_annot2] changed_indexes: {!r}'.format(changed_indices))
logger.info('[interact_annot2] changed_aid: {!r}'.format(changed_aid))
self.ibs.set_annot_species(changed_aid, species_list1)
self.ibs.set_annot_thetas(changed_aid, theta_list1, delete_thumbs=False)
self.ibs.set_annot_bboxes(changed_aid, bbox_list1, delete_thumbs=True)
# Add annotations
if len(new_annottups) > 0:
# New list returns a list of tuples [(x, y, w, h, theta, species) ...]
rows_updated = True
bbox_list2 = [bbox for (bbox, t, s) in new_annottups]
theta_list2 = [t for (bbox, t, s) in new_annottups]
species_list2 = [s for (bbox, t, s) in new_annottups]
gid_list = [self.gid] * len(new_annottups)
new_aids = self.ibs.add_annots(
gid_list,
bbox_list=bbox_list2,
theta_list=theta_list2,
species_list=species_list2,
)
logger.info('[interact_annot2] new_indexes: {!r}'.format(new_annottups))
logger.info('[interact_annot2] new_aids: {!r}'.format(new_aids))
logger.info('[interact_annot2] about to exit callback')
if rows_updated and self.rows_updated_callback is not None:
self.rows_updated_callback()
logger.info('[interact_annot2] exit callback')
def update_image_and_callbacks(self, gid, nextcb, prevcb, do_save=True):
if do_save:
# save the current changes when pressing next or previous
self.interact_ANNOTATIONS.save_and_exit(None, do_close=False)
if DESTROY_OLD_WINDOW:
ANNOTATION_Interaction2.__init__(
self,
self.ibs,
gid,
next_callback=nextcb,
prev_callback=prevcb,
rows_updated_callback=self.rows_updated_callback,
reset_window=False,
)
else:
if True:
self.interact_ANNOTATIONS.rrr()
ibs = self.ibs
self.gid = gid
img = ibs.get_images(self.gid)
self.aid_list = ibs.get_image_aids(self.gid)
bbox_list = ibs.get_annot_bboxes(self.aid_list)
theta_list = ibs.get_annot_thetas(self.aid_list)
species_list = ibs.get_annot_species_texts(self.aid_list)
metadata_list = [ibs.get_annot_lazy_dict(aid) for aid in self.aid_list]
self.interact_ANNOTATIONS.update_image_and_callbacks(
img,
bbox_list=bbox_list,
theta_list=theta_list,
species_list=species_list,
metadata_list=metadata_list,
next_callback=nextcb,
prev_callback=prevcb,
) | null |
create job | import os
import time
from typing import Dict, Union
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from mage_ai.services.k8s.config import K8sExecutorConfig
from mage_ai.services.k8s.constants import DEFAULT_NAMESPACE, KUBE_POD_NAME_ENV_VAR
from mage_ai.shared.hash import merge_dict
class JobManager():
def __init__(
self,
job_name: str = 'mage-job',
namespace: str = DEFAULT_NAMESPACE,
logger=None,
logging_tags: Dict = None,
):
self.job_name = job_name
self.namespace = namespace
self.logger = logger
self.logging_tags = logging_tags or dict()
self.load_config()
self.batch_api_client = client.BatchV1Api()
self.api_version = 'batch/v1'
self.core_api_client = client.CoreV1Api()
self.pod_config = self.core_api_client.read_namespaced_pod(
name=os.getenv(KUBE_POD_NAME_ENV_VAR),
namespace=self.namespace,
)
@classmethod
def load_config(cls) -> bool:
try:
config.load_incluster_config()
return True
except Exception:
pass
try:
config.load_kube_config()
return True
except Exception:
pass
return False
def run_job(
self,
command: str,
k8s_config: Union[K8sExecutorConfig, Dict] = None,
):
if not self.job_exists():
if type(k8s_config) is dict:
k8s_config = K8sExecutorConfig.load(config=k8s_config)
job = self.create_job_object(
command,
k8s_config=k8s_config,
)
self.METHOD_NAME(job)
api_response = None
job_completed = False
while not job_completed:
api_response = self.batch_api_client.read_namespaced_job_status(
name=self.job_name,
namespace=self.namespace
)
if api_response.status.succeeded is not None or \
api_response.status.failed is not None:
job_completed = True
time.sleep(5)
# self._print(f'Job {self.job_name} status={api_response.status}')
self.delete_job()
self._print(f'Job {self.job_name} status={api_response.status}')
if api_response.status.succeeded is None:
raise Exception(f'Failed to execute k8s job {self.job_name}')
def create_job_object(
self,
command: str,
k8s_config: K8sExecutorConfig = None,
) -> client.V1Job:
# Configureate Pod template container
mage_server_container_spec = self.pod_config.spec.containers[0]
container_kwargs = dict(
name='mage-job-container',
image=mage_server_container_spec.image,
image_pull_policy='IfNotPresent',
command=command.split(' ') if isinstance(command, str) else command,
env=mage_server_container_spec.env,
volume_mounts=mage_server_container_spec.volume_mounts,
)
if k8s_config and (k8s_config.resource_limits or k8s_config.resource_requests):
resource_kwargs = dict()
if k8s_config.resource_limits:
resource_kwargs['limits'] = k8s_config.resource_limits
if k8s_config.resource_requests:
resource_kwargs['requests'] = k8s_config.resource_requests
container_kwargs['resources'] = client.V1ResourceRequirements(
**resource_kwargs,
)
if k8s_config and k8s_config.container_config:
container_kwargs = merge_dict(container_kwargs, k8s_config.container_config)
container = client.V1Container(
**container_kwargs,
)
# Create and configurate a spec section
pod_spec = dict(
containers=[container],
image_pull_secrets=self.pod_config.spec.image_pull_secrets,
restart_policy='Never',
volumes=self.pod_config.spec.volumes,
)
if k8s_config and k8s_config.service_account_name:
pod_spec['service_account_name'] = k8s_config.service_account_name
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={'name': self.job_name}),
spec=client.V1PodSpec(**pod_spec),
)
# Create the specification of deployment
spec = client.V1JobSpec(template=template, backoff_limit=0)
# Instantiate the job object
job = client.V1Job(
api_version=self.api_version,
kind='Job',
metadata=client.V1ObjectMeta(name=self.job_name),
spec=spec)
return job
def METHOD_NAME(self, job):
api_response = self.batch_api_client.create_namespaced_job(
body=job,
namespace=self.namespace,
)
self._print(f"Job created. status='{api_response.status}'")
def delete_job(self):
try:
api_response = self.batch_api_client.delete_namespaced_job(
name=self.job_name,
namespace=self.namespace,
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=0))
self._print("Job deleted. status='%s'" % str(api_response.status))
except Exception as e:
self._print(f'Failed to delete job {self.job_name} with error {e}')
def job_exists(self):
try:
self.batch_api_client.read_namespaced_job(
name=self.job_name,
namespace=self.namespace
)
return True
except ApiException:
pass
return False
def _print(self, message, **kwargs):
if self.logger is None:
print(message, **kwargs)
else:
self.logger.info(message, **merge_dict(self.logging_tags, kwargs)) | null |
check handshake timeout | import ssl
import sys
from collections import deque
from collections.abc import Callable
from enum import Enum
from typing import Any, ClassVar
from typing_extensions import Literal, TypeAlias
from . import constants, events, futures, protocols, transports
def _create_transport_context(server_side: bool, server_hostname: str | None) -> ssl.SSLContext: ...
if sys.version_info >= (3, 11):
SSLAgainErrors: tuple[type[ssl.SSLWantReadError], type[ssl.SSLSyscallError]]
class SSLProtocolState(Enum):
UNWRAPPED: str
DO_HANDSHAKE: str
WRAPPED: str
FLUSHING: str
SHUTDOWN: str
class AppProtocolState(Enum):
STATE_INIT: str
STATE_CON_MADE: str
STATE_EOF: str
STATE_CON_LOST: str
def add_flowcontrol_defaults(high: int | None, low: int | None, kb: int) -> tuple[int, int]: ...
else:
_UNWRAPPED: Literal["UNWRAPPED"]
_DO_HANDSHAKE: Literal["DO_HANDSHAKE"]
_WRAPPED: Literal["WRAPPED"]
_SHUTDOWN: Literal["SHUTDOWN"]
if sys.version_info < (3, 11):
class _SSLPipe:
max_size: ClassVar[int]
_context: ssl.SSLContext
_server_side: bool
_server_hostname: str | None
_state: str
_incoming: ssl.MemoryBIO
_outgoing: ssl.MemoryBIO
_sslobj: ssl.SSLObject | None
_need_ssldata: bool
_handshake_cb: Callable[[BaseException | None], None] | None
_shutdown_cb: Callable[[], None] | None
def __init__(self, context: ssl.SSLContext, server_side: bool, server_hostname: str | None = None) -> None: ...
@property
def context(self) -> ssl.SSLContext: ...
@property
def ssl_object(self) -> ssl.SSLObject | None: ...
@property
def need_ssldata(self) -> bool: ...
@property
def wrapped(self) -> bool: ...
def do_handshake(self, callback: Callable[[BaseException | None], object] | None = None) -> list[bytes]: ...
def shutdown(self, callback: Callable[[], object] | None = None) -> list[bytes]: ...
def feed_eof(self) -> None: ...
def feed_ssldata(self, data: bytes, only_handshake: bool = False) -> tuple[list[bytes], list[bytes]]: ...
def feed_appdata(self, data: bytes, offset: int = 0) -> tuple[list[bytes], int]: ...
class _SSLProtocolTransport(transports._FlowControlMixin, transports.Transport):
_sendfile_compatible: ClassVar[constants._SendfileMode]
_loop: events.AbstractEventLoop
if sys.version_info >= (3, 11):
_ssl_protocol: SSLProtocol | None
else:
_ssl_protocol: SSLProtocol
_closed: bool
def __init__(self, loop: events.AbstractEventLoop, ssl_protocol: SSLProtocol) -> None: ...
def get_extra_info(self, name: str, default: Any | None = None) -> dict[str, Any]: ...
@property
def _protocol_paused(self) -> bool: ...
def write(self, data: bytes | bytearray | memoryview) -> None: ...
def can_write_eof(self) -> Literal[False]: ...
if sys.version_info >= (3, 11):
def get_write_buffer_limits(self) -> tuple[int, int]: ...
def get_read_buffer_limits(self) -> tuple[int, int]: ...
def set_read_buffer_limits(self, high: int | None = None, low: int | None = None) -> None: ...
def get_read_buffer_size(self) -> int: ...
if sys.version_info >= (3, 11):
_SSLProtocolBase: TypeAlias = protocols.BufferedProtocol
else:
_SSLProtocolBase: TypeAlias = protocols.Protocol
class SSLProtocol(_SSLProtocolBase):
_server_side: bool
_server_hostname: str | None
_sslcontext: ssl.SSLContext
_extra: dict[str, Any]
_write_backlog: deque[tuple[bytes, int]]
_write_buffer_size: int
_waiter: futures.Future[Any]
_loop: events.AbstractEventLoop
_app_transport: _SSLProtocolTransport
_transport: transports.BaseTransport | None
_ssl_handshake_timeout: int | None
_app_protocol: protocols.BaseProtocol
_app_protocol_is_buffer: bool
if sys.version_info >= (3, 11):
max_size: ClassVar[int]
else:
_sslpipe: _SSLPipe | None
_session_established: bool
_call_connection_made: bool
_in_handshake: bool
_in_shutdown: bool
if sys.version_info >= (3, 11):
def __init__(
self,
loop: events.AbstractEventLoop,
app_protocol: protocols.BaseProtocol,
sslcontext: ssl.SSLContext,
waiter: futures.Future[Any],
server_side: bool = False,
server_hostname: str | None = None,
call_connection_made: bool = True,
ssl_handshake_timeout: int | None = None,
ssl_shutdown_timeout: float | None = None,
) -> None: ...
else:
def __init__(
self,
loop: events.AbstractEventLoop,
app_protocol: protocols.BaseProtocol,
sslcontext: ssl.SSLContext,
waiter: futures.Future[Any],
server_side: bool = False,
server_hostname: str | None = None,
call_connection_made: bool = True,
ssl_handshake_timeout: int | None = None,
) -> None: ...
def _set_app_protocol(self, app_protocol: protocols.BaseProtocol) -> None: ...
def _wakeup_waiter(self, exc: BaseException | None = None) -> None: ...
def connection_lost(self, exc: BaseException | None) -> None: ...
def eof_received(self) -> None: ...
def _get_extra_info(self, name: str, default: Any | None = None) -> Any: ...
def _start_shutdown(self) -> None: ...
if sys.version_info >= (3, 11):
def _write_appdata(self, list_of_data: list[bytes]) -> None: ...
else:
def _write_appdata(self, data: bytes) -> None: ...
def _start_handshake(self) -> None: ...
def METHOD_NAME(self) -> None: ...
def _on_handshake_complete(self, handshake_exc: BaseException | None) -> None: ...
def _fatal_error(self, exc: BaseException, message: str = "Fatal error on transport") -> None: ...
def _abort(self) -> None: ...
if sys.version_info >= (3, 11):
def get_buffer(self, n: int) -> memoryview: ...
else:
def _finalize(self) -> None: ...
def _process_write_backlog(self) -> None: ... | null |
test non image upload | # Copyright (c) 2012-2016 Seafile Ltd.
import os.path
from django.test import TestCase
from django.urls import reverse
from django.conf import settings
from seahub.base.accounts import User
from seahub.avatar.settings import AVATAR_DEFAULT_URL, AVATAR_MAX_AVATARS_PER_USER
from seahub.avatar.util import get_primary_avatar
from seahub.avatar.models import Avatar
from seahub.test_utils import Fixtures
try:
from PIL import Image
dir(Image) # Placate PyFlakes
except ImportError:
import Image
def upload_helper(o, filename):
f = open(os.path.join(o.testdatapath, filename), "rb")
response = o.client.post(reverse('avatar_add'), {
'avatar': f,
}, follow=True)
f.close()
return response
class AvatarTestCase(TestCase, Fixtures):
"""
Helper base class for all the follow test cases.
"""
def setUp(self):
self.testdatapath = os.path.join(os.path.dirname(__file__), "testdata")
self.user = self.create_user('[email protected]', 'testpassword', is_active=True)
response = self.client.post('/accounts/login/', {
'username': '[email protected]',
'password': 'testpassword',
},
)
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].endswith(settings.LOGIN_REDIRECT_URL))
Image.init()
def tearDown(self):
self.user.delete()
class AvatarUploadTests(AvatarTestCase):
def METHOD_NAME(self):
response = upload_helper(self, "nonimagefile")
self.assertEqual(response.status_code, 200)
self.assertNotEqual(response.context['upload_avatar_form'].errors, {})
def testNormalImageUpload(self):
response = upload_helper(self, "test.png")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.redirect_chain), 1)
self.assertEqual(response.context['upload_avatar_form'].errors, {})
avatar = get_primary_avatar(self.user)
self.assertNotEqual(avatar, None)
def testImageWithoutExtension(self):
# use with AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png')
response = upload_helper(self, "imagefilewithoutext")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.assertNotEqual(response.context['upload_avatar_form'].errors, {})
def testImageWithWrongExtension(self):
# use with AVATAR_ALLOWED_FILE_EXTS = ('.jpg', '.png')
response = upload_helper(self, "imagefilewithwrongext.ogg")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.assertNotEqual(response.context['upload_avatar_form'].errors, {})
def testImageTooBig(self):
# use with AVATAR_MAX_SIZE = 1024 * 1024
response = upload_helper(self, "testbig.png")
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.redirect_chain), 0) # Redirect only if it worked
self.assertNotEqual(response.context['upload_avatar_form'].errors, {})
def testDefaultUrl(self):
response = self.client.get(reverse('avatar_render_primary', kwargs={
'user': self.user.username,
'size': 80,
}))
loc = response['Location']
base_url = getattr(settings, 'STATIC_URL', None)
if not base_url:
base_url = settings.MEDIA_URL
self.assertTrue(base_url in loc)
self.assertTrue(loc.endswith(AVATAR_DEFAULT_URL))
def testNonExistingUser(self):
a = get_primary_avatar("[email protected]")
self.assertEqual(a, None)
def testThereCanBeOnlyOnePrimaryAvatar(self):
for i in range(1, 10):
self.testNormalImageUpload()
count = Avatar.objects.filter(emailuser=self.user, primary=True).count()
self.assertEqual(count, 1)
# def testDeleteAvatar(self):
# self.testNormalImageUpload()
# avatar = Avatar.objects.filter(emailuser=self.user)
# self.failUnlessEqual(len(avatar), 1)
# response = self.client.post(reverse('avatar_delete'), {
# 'choices': [avatar[0].id],
# }, follow=True)
# self.failUnlessEqual(response.status_code, 200)
# self.failUnlessEqual(len(response.redirect_chain), 1)
# count = Avatar.objects.filter(emailuser=self.user).count()
# self.failUnlessEqual(count, 0)
# def testDeletePrimaryAvatarAndNewPrimary(self):
# self.testThereCanBeOnlyOnePrimaryAvatar()
# primary = get_primary_avatar(self.emailuser)
# oid = primary.id
# response = self.client.post(reverse('avatar_delete'), {
# 'choices': [oid],
# })
# primaries = Avatar.objects.filter(emailuser=self.user, primary=True)
# self.failUnlessEqual(len(primaries), 1)
# self.failIfEqual(oid, primaries[0].id)
# avatars = Avatar.objects.filter(emailuser=self.user)
# self.failUnlessEqual(avatars[0].id, primaries[0].id)
# def testTooManyAvatars(self):
# for i in range(0, AVATAR_MAX_AVATARS_PER_USER):
# self.testNormalImageUpload()
# count_before = Avatar.objects.filter(emailuser=self.user).count()
# response = upload_helper(self, "test.png")
# print response.redirect_chain
# count_after = Avatar.objects.filter(emailuser=self.user).count()
# self.failUnlessEqual(response.status_code, 200)
# self.failUnlessEqual(len(response.redirect_chain), 0) # Redirect only if it worked
# self.failIfEqual(response.context['upload_avatar_form'].errors, {})
# self.failUnlessEqual(count_before, count_after)
# def testAvatarOrder
# def testReplaceAvatarWhenMaxIsOne
# def testHashFileName
# def testHashUserName
# def testChangePrimaryAvatar
# def testDeleteThumbnailAndRecreation
# def testAutomaticThumbnailCreation | null |
test time range indicator 114 | # Copyright iris-grib contributors
#
# This file is part of iris-grib and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for :func:`iris_grib._grib1_load_rules.grib1_convert`."""
# Import iris_grib.tests first so that some things can be initialised before
# importing anything else
import iris_grib.tests as tests
import eccodes
from unittest import mock
from iris.aux_factory import HybridPressureFactory
from iris.exceptions import TranslationError
from iris.fileformats.rules import Reference
from iris_grib import GribWrapper
from iris_grib._grib1_load_rules import grib1_convert
from iris_grib.tests.unit import TestField
class TestBadEdition(tests.IrisGribTest):
def test(self):
message = mock.Mock(edition=2)
emsg = 'GRIB edition 2 is not supported'
with self.assertRaisesRegex(TranslationError, emsg):
grib1_convert(message)
class TestBoundedTime(TestField):
@staticmethod
def is_forecast_period(coord):
return (coord.standard_name == 'forecast_period' and
coord.units == 'hours')
@staticmethod
def is_time(coord):
return (coord.standard_name == 'time' and
coord.units == 'hours since epoch')
def assert_bounded_message(self, **kwargs):
attributes = {'productDefinitionTemplateNumber': 0,
'edition': 1, '_forecastTime': 15,
'_forecastTimeUnit': 'hours',
'phenomenon_bounds': lambda u: (80, 120),
'_phenomenonDateTime': -1,
'table2Version': 9999,
'_originatingCentre': 'xxx',
}
attributes.update(kwargs)
message = mock.Mock(**attributes)
self._test_for_coord(message, grib1_convert, self.is_forecast_period,
expected_points=[35],
expected_bounds=[[15, 55]])
self._test_for_coord(message, grib1_convert, self.is_time,
expected_points=[100],
expected_bounds=[[80, 120]])
def assert_bounded_message_3hours(self, **kwargs):
attributes = {'productDefinitionTemplateNumber': 0,
'edition': 1, '_forecastTime': 252,
'_forecastTimeUnit': '3 hours',
'phenomenon_bounds': lambda u: (252, 258),
'_phenomenonDateTime': -1,
'table2Version': 9999,
'_originatingCentre': 'xxx',
}
attributes.update(kwargs)
message = mock.Mock(**attributes)
self._test_for_coord(message, grib1_convert, self.is_forecast_period,
expected_points=[255],
expected_bounds=[[252, 258]])
self._test_for_coord(message, grib1_convert, self.is_time,
expected_points=[255],
expected_bounds=[[252, 258]])
def test_time_range_indicator_2(self):
self.assert_bounded_message(timeRangeIndicator=2)
self.assert_bounded_message_3hours(timeRangeIndicator=2)
def test_time_range_indicator_3(self):
self.assert_bounded_message(timeRangeIndicator=3)
self.assert_bounded_message_3hours(timeRangeIndicator=3)
def test_time_range_indicator_4(self):
self.assert_bounded_message(timeRangeIndicator=4)
self.assert_bounded_message_3hours(timeRangeIndicator=4)
def test_time_range_indicator_5(self):
self.assert_bounded_message(timeRangeIndicator=5)
self.assert_bounded_message_3hours(timeRangeIndicator=5)
def test_time_range_indicator_51(self):
self.assert_bounded_message(timeRangeIndicator=51)
self.assert_bounded_message_3hours(timeRangeIndicator=51)
def test_time_range_indicator_113(self):
self.assert_bounded_message(timeRangeIndicator=113)
self.assert_bounded_message_3hours(timeRangeIndicator=113)
def METHOD_NAME(self):
self.assert_bounded_message(timeRangeIndicator=114)
self.assert_bounded_message_3hours(timeRangeIndicator=114)
def test_time_range_indicator_115(self):
self.assert_bounded_message(timeRangeIndicator=115)
self.assert_bounded_message_3hours(timeRangeIndicator=115)
def test_time_range_indicator_116(self):
self.assert_bounded_message(timeRangeIndicator=116)
self.assert_bounded_message_3hours(timeRangeIndicator=116)
def test_time_range_indicator_117(self):
self.assert_bounded_message(timeRangeIndicator=117)
self.assert_bounded_message_3hours(timeRangeIndicator=117)
def test_time_range_indicator_118(self):
self.assert_bounded_message(timeRangeIndicator=118)
self.assert_bounded_message_3hours(timeRangeIndicator=118)
def test_time_range_indicator_123(self):
self.assert_bounded_message(timeRangeIndicator=123)
self.assert_bounded_message_3hours(timeRangeIndicator=123)
def test_time_range_indicator_124(self):
self.assert_bounded_message(timeRangeIndicator=124)
self.assert_bounded_message_3hours(timeRangeIndicator=124)
def test_time_range_indicator_125(self):
self.assert_bounded_message(timeRangeIndicator=125)
self.assert_bounded_message_3hours(timeRangeIndicator=125)
class Test_GribLevels(tests.IrisTest):
def test_grib1_hybrid_height(self):
gm = eccodes.codes_grib_new_from_samples('regular_gg_ml_grib1')
gw = GribWrapper(gm)
results = grib1_convert(gw)
factory, = results[0]
self.assertEqual(factory.factory_class, HybridPressureFactory)
delta, sigma, ref = factory.args
self.assertEqual(delta, {'long_name': 'level_pressure'})
self.assertEqual(sigma, {'long_name': 'sigma'})
self.assertEqual(ref, Reference(name='surface_pressure'))
coords_and_dims = results[8]
coord, = [co for co, _ in coords_and_dims
if co.name() == 'model_level_number']
self.assertEqual(coord.units, '1')
self.assertEqual(coord.attributes['positive'], 'up')
coord, = [co for co, _ in coords_and_dims
if co.name() == 'level_pressure']
self.assertEqual(coord.units, 'Pa')
coord, = [co for co, _ in coords_and_dims
if co.name() == 'sigma']
self.assertEqual(coord.units, '1')
if __name__ == "__main__":
tests.main() | null |
get logging | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.profiles import get_sdk
from ._client_factory import generic_data_service_factory
from .profiles import CUSTOM_DATA_STORAGE
class ServiceProperties(object):
def __init__(self, cli_ctx, name, service, account_name=None, account_key=None, connection_string=None,
sas_token=None):
self.cli_ctx = cli_ctx
self.name = name
self.client = generic_data_service_factory(cli_ctx, service, name=account_name, key=account_key,
connection_string=connection_string, sas_token=sas_token)
if not self.client:
from knack.util import CLIError
raise CLIError('Failed to initialize data client.')
def get_service_properties(self):
return getattr(self.client, 'get_{}_service_properties'.format(self.name))
def set_service_properties(self):
return getattr(self.client, 'set_{}_service_properties'.format(self.name))
def METHOD_NAME(self, timeout=None):
return self.get_service_properties()(timeout=timeout).__dict__['logging']
def set_logging(self, read, write, delete, retention, timeout=None):
t_logging, t_retention_policy = get_sdk(self.cli_ctx, CUSTOM_DATA_STORAGE, 'Logging', 'RetentionPolicy',
mod='common.models')
retention_policy = t_retention_policy(enabled=retention != 0, days=retention)
logging = t_logging(delete, read, write, retention_policy)
return self.set_service_properties()(logging=logging, timeout=timeout)
def get_cors(self, timeout=None):
return self.get_service_properties()(timeout=timeout).__dict__['cors']
def add_cors(self, origins, methods, max_age, exposed_headers=None, allowed_headers=None, timeout=None):
from azure.common import AzureHttpError
t_cors_rule = get_sdk(self.cli_ctx, CUSTOM_DATA_STORAGE, 'CorsRule', mod='common.models')
cors = self.get_cors(timeout)
new_rule = t_cors_rule(origins, methods, max_age, exposed_headers, allowed_headers)
cors.append(new_rule)
try:
return self.set_service_properties()(cors=cors, timeout=timeout)
except AzureHttpError as ex:
# The service issue: https://msazure.visualstudio.com/DefaultCollection/One/_workitems/edit/1247479.
# This workaround can be removed once the service is updated.
if ex.status_code == 400 and len(cors) > 5:
from knack.util import CLIError
raise CLIError('Failed to add CORS rules. No more than 5 CORS rule can be added.')
raise ex
def clear_cors(self, timeout=None):
return self.set_service_properties()(cors=[], timeout=timeout)
def get_metrics(self, interval, timeout=None):
props = self.get_service_properties()(timeout=timeout)
metrics = {}
if interval == 'both':
metrics['hour'] = props.__dict__['hour_metrics']
metrics['minute'] = props.__dict__['minute_metrics']
else:
metrics[interval] = props.__dict__['{}_metrics'.format(interval)]
return metrics
def set_metrics(self, retention, hour, minute, api=None, timeout=None):
t_metrics, t_retention_policy = get_sdk(self.cli_ctx, CUSTOM_DATA_STORAGE, 'Metrics', 'RetentionPolicy',
mod='common.models')
retention_policy = t_retention_policy(enabled=retention != 0, days=retention)
hour_metrics = t_metrics(hour, api, retention_policy) if hour is not None else None
minute_metrics = t_metrics(minute, api, retention_policy) if minute is not None else None
return self.set_service_properties()(
hour_metrics=hour_metrics, minute_metrics=minute_metrics, timeout=timeout) | null |
check distribution | import copy
import unittest
import numpy as np
from pydrake.common import RandomGenerator
import pydrake.common.schema as mut
import pydrake.math
class TestSchema(unittest.TestCase):
def METHOD_NAME(self, dut):
"""Confirms that a subclass instance of Distribution
* binds the base methods, and
* supports copy/deepcopy (even though the superclass does not).
"""
self.assertIsInstance(dut, mut.Distribution)
copy.copy(dut)
copy.deepcopy(dut)
dut.Sample(generator=RandomGenerator())
dut.Mean()
dut.ToSymbolic()
def test_deterministic(self):
mut.Deterministic()
mut.Deterministic(0.5)
dut = mut.Deterministic(value=1.0)
dut.value = 2.0
self.METHOD_NAME(dut)
def test_gaussian(self):
mut.Gaussian()
mut.Gaussian(0.5, 0.2)
dut = mut.Gaussian(mean=1.0, stddev=0.1)
dut.mean = 2.0
dut.stddev = 0.2
self.METHOD_NAME(dut)
def test_uniform(self):
mut.Uniform()
mut.Uniform(-0.5, 0.5)
dut = mut.Uniform(min=-1.0, max=1.0)
dut.min = -2.0
dut.max = 2.0
self.METHOD_NAME(dut)
def test_uniform_discrete(self):
mut.UniformDiscrete()
mut.UniformDiscrete([0.0, 1.0])
dut = mut.UniformDiscrete(values=[0.0, 0.1])
dut.values = [0.0, 0.2]
self.METHOD_NAME(dut)
def test_distribution_variant(self):
"""Confirms that the free functions that operate on a variant are
bound."""
items = [
mut.Deterministic(1.0),
mut.Gaussian(1.0, 0.1),
mut.Uniform(-1.0, 1.0),
mut.UniformDiscrete([0.0, 1.0]),
]
for item in items:
copied = mut.ToDistribution(item)
self.METHOD_NAME(copied)
mut.Sample(var=item, generator=RandomGenerator())
mut.Mean(var=item)
mut.ToSymbolic(var=item)
if mut.IsDeterministic(var=item):
mut.GetDeterministicValue(var=item)
def _check_distribution_vector(self, dut):
"""Confirms that a subclass instance of DistributionVector
* binds the base methods, and
* supports copy/deepcopy (even though the superclass does not).
"""
self.assertIsInstance(dut, mut.DistributionVector)
copy.copy(dut)
copy.deepcopy(dut)
dut.Sample(generator=RandomGenerator())
dut.Mean()
dut.ToSymbolic()
def test_deterministic_vector(self):
mut.DeterministicVectorX()
mut.DeterministicVectorX([0.1, 0.2])
dut = mut.DeterministicVectorX(value=[0.0, 1.0])
dut.value = [0.0, 2.0]
self._check_distribution_vector(dut)
def test_gaussian_vector(self):
mut.GaussianVectorX()
mut.GaussianVectorX([-0.5, 0.5], [0.2, 0.2])
dut = mut.GaussianVectorX(mean=[-1.0, 1.0], stddev=[0.1, 0.1])
dut.mean = [-2.0, 2.0]
dut.stddev = [0.2, 0.2]
self._check_distribution_vector(dut)
def test_uniform_vector(self):
mut.UniformVectorX()
mut.UniformVectorX([-0.5, -5.0], [0.5, 5.0])
dut = mut.UniformVectorX(min=[-1.0, -10.0], max=[1.0, 10.0])
dut.min = [-2.0, -20.0]
dut.max = [2.0, 20.0]
self._check_distribution_vector(dut)
def test_sized_vectors(self):
"""Spot check the fixed-size stochastic vectors."""
for size in [None, 1, 2, 3, 4, 5, 6]:
vec_data = [1.0] * (size or 3)
for template in [mut.DeterministicVector,
mut.GaussianVector,
mut.UniformVector]:
with self.subTest(template=template, size=size):
dut_cls = template[size]
if template == mut.DeterministicVector:
init_args = [vec_data]
else:
init_args = [vec_data, vec_data]
dut = dut_cls(*init_args)
self._check_distribution_vector(dut)
def test_distribution_vector_variant(self):
"""Confirms that the free functions that operate on a vector variant
are bound."""
items = [
mut.DeterministicVectorX(value=[1.0]),
mut.GaussianVectorX(mean=[1.0], stddev=[0.1]),
mut.UniformVectorX(min=[-1.0], max=[1.0]),
mut.DeterministicVector[3](value=[1.0]*3),
mut.GaussianVector[3](mean=[1.0]*3, stddev=[0.1]*3),
mut.UniformVector[3](min=[-1.0]*3, max=[1.0]*3),
]
for item in items:
copied = mut.ToDistributionVector(item)
self._check_distribution_vector(copied)
if mut.IsDeterministic(vec=item):
mut.GetDeterministicValue(vec=item)
def test_rotation(self):
for dut in [mut.Rotation(),
mut.Rotation(pydrake.math.RotationMatrix()),
mut.Rotation(pydrake.math.RollPitchYaw([0.0, 0.0, 0.0]))]:
# The dut should be the identity.
self.assertTrue(dut.IsDeterministic())
rotmat = dut.GetDeterministicValue()
self.assertTrue(rotmat.IsExactlyIdentity())
# All getter functions work without crashing.
dut.ToSymbolic()
# The class is copyable and has a real repr.
mut.Rotation(other=dut)
copy.copy(dut)
copy.deepcopy(dut)
self.assertIn("value", repr(dut))
# Setters.
dut.set_rpy_deg([0.1, 0.2, 0.3])
np.testing.assert_equal(dut.value.deg, [0.1, 0.2, 0.3])
# Attributes.
self.assertIsInstance(
mut.Rotation(value=mut.Rotation.AngleAxis()).value,
mut.Rotation.AngleAxis)
def test_rotation_nested_classes(self):
# The class is copyable and has a real repr.
for dut_cls in [mut.Rotation.Identity,
mut.Rotation.Uniform,
mut.Rotation.Rpy,
mut.Rotation.AngleAxis]:
dut = dut_cls()
dut_cls(other=dut)
copy.copy(dut)
copy.deepcopy(dut)
self.assertNotIn("0x", repr(dut))
# Properties are bound.
np.testing.assert_equal(mut.Rotation.Rpy(deg=[1, 2, 3]).deg, [1, 2, 3])
self.assertEqual(mut.Rotation.AngleAxis(angle_deg=5).angle_deg, 5)
def test_transform(self):
for dut in [mut.Transform(),
mut.Transform(pydrake.math.RigidTransform())]:
# The dut should be the identity.
self.assertIsNone(dut.base_frame)
self.assertTrue(dut.IsDeterministic())
np.testing.assert_equal(dut.translation, [0, 0, 0])
rotmat = dut.rotation.GetDeterministicValue()
self.assertTrue(rotmat.IsExactlyIdentity())
# All getter functions work without crashing.
dut.ToSymbolic()
dut.Mean()
dut.Sample(generator=RandomGenerator())
# The class is copyable and has a real repr.
mut.Transform(other=dut)
copy.copy(dut)
copy.deepcopy(dut)
self.assertIn("base_frame", repr(dut))
# Setters.
dut.base_frame = "name"
self.assertEqual(dut.base_frame, "name")
dut.translation = [1.0, 2.0, 3.0]
np.testing.assert_equal(dut.translation, [1.0, 2.0, 3.0])
dut.set_rotation_rpy_deg([0.1, 0.2, 0.3])
np.testing.assert_equal(dut.rotation.value.deg, [0.1, 0.2, 0.3])
dut.rotation.value.deg = [0.4, 0.5, 0.6]
np.testing.assert_equal(dut.rotation.value.deg, [0.4, 0.5, 0.6])
# Attributes.
self.assertEqual(mut.Transform(base_frame="base").base_frame, "base") | null |
test no optional args | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the :class:`iris.coord_systems.Geostationary` class."""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
import cartopy.crs as ccrs
from iris.coord_systems import GeogCS, Geostationary
class Test(tests.IrisTest):
def setUp(self):
# Set everything to non-default values.
self.latitude_of_projection_origin = 0 # For now, Cartopy needs =0.
self.longitude_of_projection_origin = 123.0
self.perspective_point_height = 9999.0
self.sweep_angle_axis = "x"
self.false_easting = 100.0
self.false_northing = -200.0
self.semi_major_axis = 4000.0
self.semi_minor_axis = 3900.0
self.ellipsoid = GeogCS(self.semi_major_axis, self.semi_minor_axis)
self.globe = ccrs.Globe(
semimajor_axis=self.semi_major_axis,
semiminor_axis=self.semi_minor_axis,
ellipse=None,
)
# Actual and expected coord system can be re-used for
# Geostationary.test_crs_creation and test_projection_creation.
self.expected = ccrs.Geostationary(
central_longitude=self.longitude_of_projection_origin,
satellite_height=self.perspective_point_height,
false_easting=self.false_easting,
false_northing=self.false_northing,
globe=self.globe,
sweep_axis=self.sweep_angle_axis,
)
self.geo_cs = Geostationary(
self.latitude_of_projection_origin,
self.longitude_of_projection_origin,
self.perspective_point_height,
self.sweep_angle_axis,
self.false_easting,
self.false_northing,
self.ellipsoid,
)
def test_crs_creation(self):
res = self.geo_cs.as_cartopy_crs()
self.assertEqual(res, self.expected)
def test_projection_creation(self):
res = self.geo_cs.as_cartopy_projection()
self.assertEqual(res, self.expected)
def test_non_zero_lat(self):
with self.assertRaisesRegex(ValueError, "Non-zero latitude"):
Geostationary(
0.1,
self.longitude_of_projection_origin,
self.perspective_point_height,
self.sweep_angle_axis,
self.false_easting,
self.false_northing,
self.ellipsoid,
)
def test_invalid_sweep(self):
with self.assertRaisesRegex(ValueError, "Invalid sweep_angle_axis"):
Geostationary(
self.latitude_of_projection_origin,
self.longitude_of_projection_origin,
self.perspective_point_height,
"a",
self.false_easting,
self.false_northing,
self.ellipsoid,
)
def test_set_optional_args(self):
# Check that setting the optional (non-ellipse) args works.
crs = Geostationary(
0, 0, 1000, "y", false_easting=100, false_northing=-200
)
self.assertEqualAndKind(crs.false_easting, 100.0)
self.assertEqualAndKind(crs.false_northing, -200.0)
def _check_crs_defaults(self, crs):
# Check for property defaults when no kwargs options were set.
# NOTE: except ellipsoid, which is done elsewhere.
self.assertEqualAndKind(crs.false_easting, 0.0)
self.assertEqualAndKind(crs.false_northing, 0.0)
def METHOD_NAME(self):
# Check expected defaults with no optional args.
crs = Geostationary(0, 0, 1000, "y")
self._check_crs_defaults(crs)
def test_optional_args_None(self):
# Check expected defaults with optional args=None.
crs = Geostationary(
0, 0, 1000, "y", false_easting=None, false_northing=None
)
self._check_crs_defaults(crs)
if __name__ == "__main__":
tests.main() | null |
test preprocess fn returns correct element | # Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from absl.testing import parameterized
import tensorflow as tf
from tensorflow_federated.python.core.backends.native import execution_contexts
from tensorflow_federated.python.simulation.baselines import client_spec
from tensorflow_federated.python.simulation.baselines.stackoverflow import tag_prediction_preprocessing
TEST_DATA = collections.OrderedDict(
creation_date=(['unused date']),
score=([tf.constant(0, dtype=tf.int64)]),
tags=(['B']),
title=(['C']),
tokens=(['A']),
type=(['unused type']),
)
def _compute_length_of_dataset(ds):
return ds.reduce(0, lambda x, _: x + 1)
class ToIDsFnTest(tf.test.TestCase):
def test_word_tokens_to_ids_without_oov(self):
word_vocab = ['A', 'B', 'C']
tag_vocab = ['D', 'E', 'F']
to_ids_fn = tag_prediction_preprocessing.build_to_ids_fn(
word_vocab, tag_vocab
)
data = {'tokens': 'A B C', 'title': '', 'tags': ''}
processed = to_ids_fn(data)
self.assertAllClose(self.evaluate(processed[0]), [1 / 3, 1 / 3, 1 / 3])
def test_word_tokens_to_ids_with_duplicates_without_oov(self):
word_vocab = ['A', 'B', 'C']
tag_vocab = ['D', 'E', 'F']
to_ids_fn = tag_prediction_preprocessing.build_to_ids_fn(
word_vocab, tag_vocab
)
data = {'tokens': 'A B C A A C B B B', 'title': '', 'tags': ''}
processed = to_ids_fn(data)
self.assertAllClose(self.evaluate(processed[0]), [1 / 3, 4 / 9, 2 / 9])
def test_word_tokens_to_ids_with_oov(self):
word_vocab = ['A', 'B']
tag_vocab = ['D', 'E', 'F']
to_ids_fn = tag_prediction_preprocessing.build_to_ids_fn(
word_vocab, tag_vocab
)
data = {'tokens': 'A B C', 'title': '', 'tags': ''}
processed = to_ids_fn(data)
self.assertAllClose(self.evaluate(processed[0]), [1 / 2, 1 / 2])
def test_word_tokens_to_ids_with_duplicates_and_oov(self):
word_vocab = ['A', 'B']
tag_vocab = ['D', 'E', 'F']
to_ids_fn = tag_prediction_preprocessing.build_to_ids_fn(
word_vocab, tag_vocab
)
data = {'tokens': 'A B C A C C A B', 'title': '', 'tags': ''}
processed = to_ids_fn(data)
self.assertAllClose(self.evaluate(processed[0]), [3 / 5, 2 / 5])
def test_word_tokens_all_oov(self):
word_vocab = ['A', 'B']
tag_vocab = ['D', 'E', 'F']
to_ids_fn = tag_prediction_preprocessing.build_to_ids_fn(
word_vocab, tag_vocab
)
data = {'tokens': 'C D E F G', 'title': '', 'tags': ''}
processed = to_ids_fn(data)
self.assertAllClose(self.evaluate(processed[0]), [0, 0])
def test_tag_tokens_to_ids_without_oov(self):
word_vocab = ['A', 'B', 'C']
tag_vocab = ['D', 'E', 'F']
to_ids_fn = tag_prediction_preprocessing.build_to_ids_fn(
word_vocab, tag_vocab
)
data = {'tokens': '', 'title': '', 'tags': 'D|E|F'}
processed = to_ids_fn(data)
self.assertAllClose(self.evaluate(processed[1]), [1, 1, 1])
def test_tag_tokens_to_ids_with_oov(self):
word_vocab = ['A', 'B', 'C']
tag_vocab = ['D', 'E']
to_ids_fn = tag_prediction_preprocessing.build_to_ids_fn(
word_vocab, tag_vocab
)
data = {'tokens': '', 'title': '', 'tags': 'D|E|F'}
processed = to_ids_fn(data)
self.assertAllClose(self.evaluate(processed[1]), [1, 1])
def test_join_word_tokens_with_title(self):
word_vocab = ['A', 'B', 'C']
tag_vocab = ['D', 'E', 'F']
to_ids_fn = tag_prediction_preprocessing.build_to_ids_fn(
word_vocab, tag_vocab
)
data = {'tokens': 'A B C', 'title': 'A B', 'tags': ''}
processed = to_ids_fn(data)
self.assertAllClose(self.evaluate(processed[0]), [2 / 5, 2 / 5, 1 / 5])
class PreprocessFnTest(tf.test.TestCase, parameterized.TestCase):
def test_preprocess_fn_with_empty_word_vocab_raises(self):
preprocess_spec = client_spec.ClientSpec(num_epochs=1, batch_size=1)
with self.assertRaisesRegex(ValueError, 'word_vocab must be non-empty'):
tag_prediction_preprocessing.create_preprocess_fn(
preprocess_spec, word_vocab=[], tag_vocab=['B']
)
def test_preprocess_fn_with_empty_tag_vocab_raises(self):
preprocess_spec = client_spec.ClientSpec(num_epochs=1, batch_size=1)
with self.assertRaisesRegex(ValueError, 'tag_vocab must be non-empty'):
tag_prediction_preprocessing.create_preprocess_fn(
preprocess_spec, word_vocab=['A'], tag_vocab=[]
)
@parameterized.named_parameters(
('num_epochs_1_batch_size_1', 1, 1),
('num_epochs_4_batch_size_2', 4, 2),
('num_epochs_9_batch_size_3', 9, 3),
('num_epochs_12_batch_size_1', 12, 1),
('num_epochs_3_batch_size_5', 3, 5),
('num_epochs_7_batch_size_2', 7, 2),
)
def test_ds_length_is_ceil_num_epochs_over_batch_size(
self, num_epochs, batch_size
):
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
preprocess_spec = client_spec.ClientSpec(
num_epochs=num_epochs, batch_size=batch_size
)
preprocess_fn = tag_prediction_preprocessing.create_preprocess_fn(
preprocess_spec, word_vocab=['A'], tag_vocab=['B']
)
preprocessed_ds = preprocess_fn(ds)
self.assertEqual(
_compute_length_of_dataset(preprocessed_ds),
tf.cast(tf.math.ceil(num_epochs / batch_size), tf.int32),
)
def METHOD_NAME(self):
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
word_vocab = ['A', 'B', 'C']
word_vocab_size = len(word_vocab)
tag_vocab = ['A', 'B']
tag_vocab_size = len(tag_vocab)
preprocess_spec = client_spec.ClientSpec(
num_epochs=1, batch_size=1, shuffle_buffer_size=1
)
preprocess_fn = tag_prediction_preprocessing.create_preprocess_fn(
preprocess_spec, word_vocab=word_vocab, tag_vocab=tag_vocab
)
preprocessed_ds = preprocess_fn(ds)
expected_element_x_spec_shape = (None, word_vocab_size)
expected_element_y_spec_shape = (None, tag_vocab_size)
self.assertEqual(
preprocessed_ds.element_spec,
(
tf.TensorSpec(expected_element_x_spec_shape, dtype=tf.float32),
tf.TensorSpec(expected_element_y_spec_shape, dtype=tf.float32),
),
)
element = next(iter(preprocessed_ds))
expected_element_x = tf.constant([[0.5, 0.0, 0.5]])
expected_element_y = tf.constant([[0.0, 1.0]])
self.assertAllClose(
element, (expected_element_x, expected_element_y), rtol=1e-6
)
@parameterized.named_parameters(
('max_elements1', 1),
('max_elements3', 3),
('max_elements7', 7),
('max_elements11', 11),
('max_elements18', 18),
)
def test_ds_length_with_max_elements(self, max_elements):
repeat_size = 10
ds = tf.data.Dataset.from_tensor_slices(TEST_DATA)
preprocess_spec = client_spec.ClientSpec(
num_epochs=repeat_size, batch_size=1, max_elements=max_elements
)
preprocess_fn = tag_prediction_preprocessing.create_preprocess_fn(
preprocess_spec, word_vocab=['A'], tag_vocab=['B']
)
preprocessed_ds = preprocess_fn(ds)
self.assertEqual(
_compute_length_of_dataset(preprocessed_ds),
min(repeat_size, max_elements),
)
if __name__ == '__main__':
execution_contexts.set_sync_local_cpp_execution_context()
tf.test.main() | null |
clone template | # Copyright © Michal Čihař <[email protected]>
#
# SPDX-License-Identifier: GPL-3.0-or-later
"""Translate Toolkit based file-format wrappers for multi string support."""
from __future__ import annotations
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy
from weblate.checks.flags import Flags
from weblate.trans.util import get_string
from .base import TranslationUnit
from .ttkit import CSVUtf8Format
class MultiUnit(TranslationUnit):
def __init__(self, parent, unit, template=None):
super().__init__(parent, None, None)
self.units = [unit]
def merge(self, unit):
self.units.append(unit)
self._invalidate_target()
@cached_property
def locations(self):
return ", ".join(unit.locations for unit in self.units)
@cached_property
def source(self):
return get_string(unit.source for unit in self.units)
@cached_property
def target(self):
return get_string(unit.target for unit in self.units)
@cached_property
def context(self):
# Context should be same for all units
return self.units[0].context
@cached_property
def id_hash(self):
# Hash should be same for all units
return self.units[0].id_hash
@cached_property
def notes(self):
return "\n".join(unit.notes for unit in self.units if unit.notes)
def is_translated(self):
return any(unit.is_translated() for unit in self.units)
def is_fuzzy(self, fallback=False):
return any(unit.is_fuzzy(fallback) for unit in self.units)
def has_content(self):
return any(unit.has_content() for unit in self.units)
def is_readonly(self):
return any(unit.is_readonly() for unit in self.units)
def set_target(self, target: str | list[str]):
"""Set translation unit target."""
self._invalidate_target()
# Mare sure we have a list
if isinstance(target, str):
target = [target]
# Remove any extra units
while len(target) < len(self.units):
last = self.units.pop()
self.parent.store.removeunit(last.unit)
# Add missing units
while len(target) > len(self.units):
new = self.parent.create_unit(self.context, self.units[0].source)
self.units.append(
self.parent.unit_class(self.parent, new, self.units[0].template)
)
self.parent.store.addunit(new)
for i, value in enumerate(target):
self.units[i].set_target(value)
def set_state(self, state):
for unit in self.units:
unit.set_state(state)
@cached_property
def flags(self):
flags = Flags()
for unit in self.units:
flags.merge(unit.flags)
return flags.format()
def has_unit(self) -> bool:
return all(unit.has_unit() for unit in self.units)
def METHOD_NAME(self):
for unit in self.units:
if not unit.has_unit():
unit.METHOD_NAME()
def untranslate(self, language):
for unit in self.units:
unit.untranslate(language)
class MultiFormatMixin:
has_multiple_strings: bool = True
def merge_multi(self, iterable):
result = {}
for unit in iterable:
id_hash = unit.id_hash
if id_hash in result:
result[id_hash].merge(unit)
else:
if not isinstance(unit, MultiUnit):
unit = MultiUnit(unit.parent, unit)
result[id_hash] = unit
return list(result.values())
@cached_property
def template_units(self):
return self.merge_multi(super().template_units)
def _get_all_bilingual_units(self):
return self.merge_multi(super()._get_all_bilingual_units())
def _build_monolingual_unit(self, unit):
try:
matching = self._template_index[unit.id_hash]
except KeyError:
return MultiUnit(self, self.unit_class(self, None, unit.units[0].template))
matching_units = [unit.template for unit in matching.units]
result = MultiUnit(
self, self.unit_class(self, matching_units[0], unit.units[0].template)
)
for extra in matching_units[1:]:
result.merge(self.unit_class(self, extra, unit.units[0].template))
return result
def _get_all_monolingual_units(self):
return self.merge_multi(super()._get_all_monolingual_units())
class MultiCSVUtf8Format(MultiFormatMixin, CSVUtf8Format):
name = gettext_lazy("Multivalue CSV file (UTF-8)")
format_id = "csv-multi-utf-8" | null |
id | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetParametersByPathResult',
'AwaitableGetParametersByPathResult',
'get_parameters_by_path',
'get_parameters_by_path_output',
]
@pulumi.output_type
class GetParametersByPathResult:
"""
A collection of values returned by getParametersByPath.
"""
def __init__(__self__, arns=None, METHOD_NAME=None, names=None, path=None, recursive=None, types=None, values=None, with_decryption=None):
if arns and not isinstance(arns, list):
raise TypeError("Expected argument 'arns' to be a list")
pulumi.set(__self__, "arns", arns)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if names and not isinstance(names, list):
raise TypeError("Expected argument 'names' to be a list")
pulumi.set(__self__, "names", names)
if path and not isinstance(path, str):
raise TypeError("Expected argument 'path' to be a str")
pulumi.set(__self__, "path", path)
if recursive and not isinstance(recursive, bool):
raise TypeError("Expected argument 'recursive' to be a bool")
pulumi.set(__self__, "recursive", recursive)
if types and not isinstance(types, list):
raise TypeError("Expected argument 'types' to be a list")
pulumi.set(__self__, "types", types)
if values and not isinstance(values, list):
raise TypeError("Expected argument 'values' to be a list")
pulumi.set(__self__, "values", values)
if with_decryption and not isinstance(with_decryption, bool):
raise TypeError("Expected argument 'with_decryption' to be a bool")
pulumi.set(__self__, "with_decryption", with_decryption)
@property
@pulumi.getter
def arns(self) -> Sequence[str]:
return pulumi.get(self, "arns")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def names(self) -> Sequence[str]:
return pulumi.get(self, "names")
@property
@pulumi.getter
def path(self) -> str:
return pulumi.get(self, "path")
@property
@pulumi.getter
def recursive(self) -> Optional[bool]:
return pulumi.get(self, "recursive")
@property
@pulumi.getter
def types(self) -> Sequence[str]:
return pulumi.get(self, "types")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter(name="withDecryption")
def with_decryption(self) -> Optional[bool]:
return pulumi.get(self, "with_decryption")
class AwaitableGetParametersByPathResult(GetParametersByPathResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetParametersByPathResult(
arns=self.arns,
METHOD_NAME=self.METHOD_NAME,
names=self.names,
path=self.path,
recursive=self.recursive,
types=self.types,
values=self.values,
with_decryption=self.with_decryption)
def get_parameters_by_path(path: Optional[str] = None,
recursive: Optional[bool] = None,
with_decryption: Optional[bool] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetParametersByPathResult:
"""
Provides SSM Parameters by path.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
foo = aws.ssm.get_parameters_by_path(path="/foo")
```
> **Note:** The unencrypted value of a SecureString will be stored in the raw state as plain-text.
**Note:** The data source is currently following the behavior of the [SSM API](https://docs.aws.amazon.com/sdk-for-go/api/service/ssm/#Parameter) to return a string value, regardless of parameter type. For type `StringList`, we can use the built-in split() function to get values in a list. Example: `split(",", data.aws_ssm_parameter.subnets.value)`
:param str path: Prefix path of the parameter.
:param bool recursive: Whether to recursively return parameters under `path`. Defaults to `false`.
In addition to all arguments above, the following attributes are exported:
:param bool with_decryption: Whether to return decrypted `SecureString` value. Defaults to `true`.
"""
__args__ = dict()
__args__['path'] = path
__args__['recursive'] = recursive
__args__['withDecryption'] = with_decryption
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:ssm/getParametersByPath:getParametersByPath', __args__, opts=opts, typ=GetParametersByPathResult).value
return AwaitableGetParametersByPathResult(
arns=pulumi.get(__ret__, 'arns'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
names=pulumi.get(__ret__, 'names'),
path=pulumi.get(__ret__, 'path'),
recursive=pulumi.get(__ret__, 'recursive'),
types=pulumi.get(__ret__, 'types'),
values=pulumi.get(__ret__, 'values'),
with_decryption=pulumi.get(__ret__, 'with_decryption'))
@_utilities.lift_output_func(get_parameters_by_path)
def get_parameters_by_path_output(path: Optional[pulumi.Input[str]] = None,
recursive: Optional[pulumi.Input[Optional[bool]]] = None,
with_decryption: Optional[pulumi.Input[Optional[bool]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetParametersByPathResult]:
"""
Provides SSM Parameters by path.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
foo = aws.ssm.get_parameters_by_path(path="/foo")
```
> **Note:** The unencrypted value of a SecureString will be stored in the raw state as plain-text.
**Note:** The data source is currently following the behavior of the [SSM API](https://docs.aws.amazon.com/sdk-for-go/api/service/ssm/#Parameter) to return a string value, regardless of parameter type. For type `StringList`, we can use the built-in split() function to get values in a list. Example: `split(",", data.aws_ssm_parameter.subnets.value)`
:param str path: Prefix path of the parameter.
:param bool recursive: Whether to recursively return parameters under `path`. Defaults to `false`.
In addition to all arguments above, the following attributes are exported:
:param bool with_decryption: Whether to return decrypted `SecureString` value. Defaults to `true`.
"""
... | null |
find docs to be removed | import logging
from abc import ABC, abstractmethod
from typing import Any, Dict, List, Optional, Type
import numpy as np
from llama_index.bridge.pydantic import Field
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
VectorStore,
VectorStoreQuery,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import (
legacy_metadata_dict_to_node,
metadata_dict_to_node,
node_to_metadata_dict,
)
logger = logging.getLogger(__name__)
class DocArrayVectorStore(VectorStore, ABC):
"""DocArray Vector Store Base Class.
This is an abstract base class for creating a DocArray vector store.
The subclasses should implement _init_index and _find_docs_to_be_removed methods.
"""
# for mypy. will get initialized by the subclass.
_index: Any
_schema: Any
_ref_docs: Dict[str, List[str]]
stores_text: bool = True
flat_metadata: bool = False
def _update_ref_docs(self, docs) -> None: # type: ignore[no-untyped-def]
pass
@abstractmethod
def _init_index(self, **kwargs: Any): # type: ignore[no-untyped-def]
"""Initializes the index.
This method should be overridden by the subclasses.
"""
pass
@abstractmethod
def METHOD_NAME(self, doc_id: str) -> List[str]:
"""Finds the documents to be removed from the vector store.
Args:
doc_id (str): Document ID that should be removed.
Returns:
List[str]: List of document IDs to be removed.
This is an abstract method and needs to be implemented in any concrete subclass.
"""
pass
@property
def client(self) -> Any:
"""Get client."""
return None
def num_docs(self) -> int:
"""Retrieves the number of documents in the index.
Returns:
int: The number of documents in the index.
"""
return self._index.num_docs()
@staticmethod
def _get_schema(**embeddings_params: Any) -> Type:
"""Fetches the schema for DocArray indices.
Args:
**embeddings_params: Variable length argument list for the embedding.
Returns:
DocArraySchema: Schema for a DocArray index.
"""
from docarray import BaseDoc
from docarray.typing import ID, NdArray
class DocArraySchema(BaseDoc):
id: Optional[ID] = None
text: Optional[str] = None
metadata: Optional[dict] = None
embedding: NdArray = Field(**embeddings_params)
return DocArraySchema
def add(
self,
nodes: List[BaseNode],
) -> List[str]:
"""Adds nodes to the vector store.
Args:
nodes (List[BaseNode]): List of nodes with embeddings.
Returns:
List[str]: List of document IDs added to the vector store.
"""
from docarray import DocList
# check to see if empty document list was passed
if len(nodes) == 0:
return []
docs = DocList[self._schema]( # type: ignore[name-defined]
self._schema(
id=node.node_id,
metadata=node_to_metadata_dict(node, flat_metadata=self.flat_metadata),
text=node.get_content(metadata_mode=MetadataMode.NONE),
embedding=node.get_embedding(),
)
for node in nodes
)
self._index.index(docs)
logger.info(f"Successfully added {len(docs)} documents to the index")
if self._ref_docs is not None:
self._update_ref_docs(docs)
return [doc.id for doc in docs]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Deletes a document from the vector store.
Args:
ref_doc_id (str): Document ID to be deleted.
**delete_kwargs (Any): Additional arguments to pass to the delete method.
"""
docs_to_be_removed = self.METHOD_NAME(ref_doc_id)
if not docs_to_be_removed:
logger.warning(f"Document with doc_id {ref_doc_id} not found")
return
del self._index[docs_to_be_removed]
logger.info(f"Deleted {len(docs_to_be_removed)} documents from the index")
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Queries the vector store and retrieves the results.
Args:
query (VectorStoreQuery): Query for the vector store.
Returns:
VectorStoreQueryResult: Result of the query from vector store.
"""
if query.filters:
# only for ExactMatchFilters
filter_query = {
"metadata__" + filter.key: {"$eq": filter.value}
for filter in query.filters.filters
}
query = (
self._index.build_query() # get empty query object
.find(
query=self._schema(embedding=np.array(query.query_embedding)),
search_field="embedding",
limit=query.similarity_top_k,
) # add vector similarity search
.filter(filter_query=filter_query) # add filter search
.build() # build the query
)
# execute the combined query and return the results
docs, scores = self._index.execute_query(query)
else:
docs, scores = self._index.find(
query=self._schema(embedding=np.array(query.query_embedding)),
search_field="embedding",
limit=query.similarity_top_k,
)
nodes, ids = [], []
for doc in docs:
try:
node = metadata_dict_to_node(doc.metadata)
node.text = doc.text
except Exception:
# TODO: legacy metadata support
metadata, node_info, relationships = legacy_metadata_dict_to_node(
doc.metadata
)
node = TextNode(
id_=doc.id,
text=doc.text,
metadata=metadata,
start_char_idx=node_info.get("start", None),
end_char_idx=node_info.get("end", None),
relationships=relationships,
)
nodes.append(node)
ids.append(doc.id)
logger.info(f"Found {len(nodes)} results for the query")
return VectorStoreQueryResult(nodes=nodes, ids=ids, similarities=scores) | null |
enhance | import numpy as np
import torch
import torch.nn.functional as F
from torchaudio.transforms import Resample
from .nsf_hifigan.nvSTFT import STFT
from .nsf_hifigan.models import load_model
class Enhancer:
def __init__(self, enhancer_type, enhancer_ckpt, device=None):
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
self.device = device
if enhancer_type == "nsf-hifigan":
self.enhancer = NsfHifiGAN(enhancer_ckpt, device=self.device)
else:
raise ValueError(f" [x] Unknown enhancer: {enhancer_type}")
self.resample_kernel = {}
self.enhancer_sample_rate = self.enhancer.sample_rate()
self.enhancer_hop_size = self.enhancer.hop_size()
def METHOD_NAME(self, audio, sample_rate, f0, hop_size, adaptive_key=0, silence_front=0): # 1, T # 1, n_frames, 1
# enhancer start time
start_frame = int(silence_front * sample_rate / hop_size)
real_silence_front = start_frame * hop_size / sample_rate
audio = audio[:, int(np.round(real_silence_front * sample_rate)) :]
f0 = f0[:, start_frame:, :]
# adaptive parameters
if adaptive_key == "auto":
adaptive_key = 12 * np.log2(float(torch.max(f0) / 760))
adaptive_key = max(0, np.ceil(adaptive_key))
print("auto_adaptive_key: " + str(int(adaptive_key)))
else:
adaptive_key = float(adaptive_key)
adaptive_factor = 2 ** (-adaptive_key / 12)
adaptive_sample_rate = 100 * int(np.round(self.enhancer_sample_rate / adaptive_factor / 100))
real_factor = self.enhancer_sample_rate / adaptive_sample_rate
# resample the ddsp output
if sample_rate == adaptive_sample_rate:
audio_res = audio
else:
key_str = str(sample_rate) + str(adaptive_sample_rate)
if key_str not in self.resample_kernel:
self.resample_kernel[key_str] = Resample(sample_rate, adaptive_sample_rate, lowpass_filter_width=128).to(self.device)
audio_res = self.resample_kernel[key_str](audio)
n_frames = int(audio_res.size(-1) // self.enhancer_hop_size + 1)
# resample f0
if hop_size == self.enhancer_hop_size and sample_rate == self.enhancer_sample_rate and sample_rate == adaptive_sample_rate:
f0_res = f0.squeeze(-1) # 1, n_frames
else:
f0_np = f0.squeeze(0).squeeze(-1).cpu().numpy()
f0_np *= real_factor
time_org = (hop_size / sample_rate) * np.arange(len(f0_np)) / real_factor
time_frame = (self.enhancer_hop_size / self.enhancer_sample_rate) * np.arange(n_frames)
f0_res = np.interp(time_frame, time_org, f0_np, left=f0_np[0], right=f0_np[-1])
f0_res = torch.from_numpy(f0_res).unsqueeze(0).float().to(self.device) # 1, n_frames
# enhance
enhanced_audio, enhancer_sample_rate = self.enhancer(audio_res, f0_res)
# resample the enhanced output
if adaptive_sample_rate != enhancer_sample_rate:
key_str = str(adaptive_sample_rate) + str(enhancer_sample_rate)
if key_str not in self.resample_kernel:
self.resample_kernel[key_str] = Resample(adaptive_sample_rate, enhancer_sample_rate, lowpass_filter_width=128).to(self.device)
enhanced_audio = self.resample_kernel[key_str](enhanced_audio)
# pad the silence frames
if start_frame > 0:
enhanced_audio = F.pad(enhanced_audio, (int(np.round(enhancer_sample_rate * real_silence_front)), 0))
return enhanced_audio, enhancer_sample_rate
class NsfHifiGAN(torch.nn.Module):
def __init__(self, model_path, device=None):
super().__init__()
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
self.device = device
print("| Load HifiGAN: ", model_path)
self.model, self.h = load_model(model_path, device=self.device)
self.stft = STFT(self.h.sampling_rate, self.h.num_mels, self.h.n_fft, self.h.win_size, self.h.hop_size, self.h.fmin, self.h.fmax)
def sample_rate(self):
return self.h.sampling_rate
def hop_size(self):
return self.h.hop_size
def forward(self, audio, f0):
with torch.no_grad():
mel = self.stft.get_mel(audio)
enhanced_audio = self.model(mel, f0[:, : mel.size(-1)])
return enhanced_audio, self.h.sampling_rate | null |
complete | """
Utilities for end-users.
"""
from __future__ import absolute_import
import __main__
from collections import namedtuple
import logging
import traceback
import re
import os
import sys
from parso import split_lines
from jedi import Interpreter
from jedi.api.helpers import get_on_completion_name
READLINE_DEBUG = False
def setup_readline(namespace_module=__main__):
"""
Install Jedi completer to :mod:`readline`.
This function setups :mod:`readline` to use Jedi in Python interactive
shell. If you want to use a custom ``PYTHONSTARTUP`` file (typically
``$HOME/.pythonrc.py``), you can add this piece of code::
try:
from jedi.utils import setup_readline
setup_readline()
except ImportError:
# Fallback to the stdlib readline completer if it is installed.
# Taken from http://docs.python.org/2/library/rlcompleter.html
print("Jedi is not installed, falling back to readline")
try:
import readline
import rlcompleter
readline.parse_and_bind("tab: complete")
except ImportError:
print("Readline is not installed either. No tab completion is enabled.")
This will fallback to the readline completer if Jedi is not installed.
The readline completer will only complete names in the global namespace,
so for example::
ran<TAB>
will complete to ``range``
with both Jedi and readline, but::
range(10).cou<TAB>
will show complete to ``range(10).count`` only with Jedi.
You'll also need to add ``export PYTHONSTARTUP=$HOME/.pythonrc.py`` to
your shell profile (usually ``.bash_profile`` or ``.profile`` if you use
bash).
"""
if READLINE_DEBUG:
logging.basicConfig(
filename='/tmp/jedi.log',
filemode='a',
level=logging.DEBUG
)
class JediRL(object):
def METHOD_NAME(self, text, state):
"""
This complete stuff is pretty weird, a generator would make
a lot more sense, but probably due to backwards compatibility
this is still the way how it works.
The only important part is stuff in the ``state == 0`` flow,
everything else has been copied from the ``rlcompleter`` std.
library module.
"""
if state == 0:
sys.path.insert(0, os.getcwd())
# Calling python doesn't have a path, so add to sys.path.
try:
logging.debug("Start REPL completion: " + repr(text))
interpreter = Interpreter(text, [namespace_module.__dict__])
lines = split_lines(text)
position = (len(lines), len(lines[-1]))
name = get_on_completion_name(
interpreter._module_node,
lines,
position
)
before = text[:len(text) - len(name)]
completions = interpreter.completions()
logging.debug("REPL completions: %s", completions)
except:
logging.error("REPL Completion error:\n" + traceback.format_exc())
raise
finally:
sys.path.pop(0)
self.matches = [before + c.name_with_symbols for c in completions]
try:
return self.matches[state]
except IndexError:
return None
try:
# Need to import this one as well to make sure it's executed before
# this code. This didn't use to be an issue until 3.3. Starting with
# 3.4 this is different, it always overwrites the completer if it's not
# already imported here.
import rlcompleter # noqa: F401
import readline
except ImportError:
print("Jedi: Module readline not available.")
else:
readline.set_completer(JediRL().METHOD_NAME)
readline.parse_and_bind("tab: complete")
# jedi itself does the case matching
readline.parse_and_bind("set completion-ignore-case on")
# because it's easier to hit the tab just once
readline.parse_and_bind("set show-all-if-unmodified")
readline.parse_and_bind("set show-all-if-ambiguous on")
# don't repeat all the things written in the readline all the time
readline.parse_and_bind("set completion-prefix-display-length 2")
# No delimiters, Jedi handles that.
readline.set_completer_delims('')
def version_info():
"""
Returns a namedtuple of Jedi's version, similar to Python's
``sys.version_info``.
"""
Version = namedtuple('Version', 'major, minor, micro')
from jedi import __version__
tupl = re.findall(r'[a-z]+|\d+', __version__)
return Version(*[x if i == 3 else int(x) for i, x in enumerate(tupl)]) | null |
test amx check support | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition
import tvm
from tvm import relay
from tvm import te
import tvm.testing
from tvm.topi.x86.tensor_intrin import dot_32x128x32_u8s8s32_sapphirerapids
from tvm.topi.x86.tensor_intrin import acc_32x32_int32_sapphirerapids
import numpy as np
import pytest
@tvm.testing.requires_llvm
@pytest.mark.skip("skip due to AMX feature not avaliable yet")
def test_amx_u8s8s32_matmul_tensorize():
m = 1024
k = 1024
n = 1024
# --------------------------Config---------------------------
# Skip this test if "-mcpu=sapphirerapids" not supported by LLVM < 12.0
target = "llvm -mcpu=sapphirerapids"
dev = tvm.device(target, 0)
if not tvm.testing.device_enabled(target):
print("skip because %s is not enabled..." % target)
return
amx_init = tvm.get_global_func("runtime.amx_init")
amx_tileconfig = tvm.get_global_func("runtime.amx_tileconfig")
assert amx_init()
assert amx_tileconfig(16, 64) # config tile size to 16 rows by 64 columns.
# --------------------------Compute--------------------------
X = te.placeholder((m, k), name="X", dtype="uint8")
ak = te.reduce_axis((0, k), name="k")
packedW = te.placeholder((n // 16, k // 4, 16, 4), name="packedW", dtype="int8")
C = te.compute(
(m, n),
lambda i, j: te.sum(
X[i, ak].astype("int32")
* packedW[tvm.tir.indexdiv(j, 16), tvm.tir.indexdiv(ak, 4), j % 16, ak % 4].astype(
"int32"
),
axis=ak,
),
name="F",
)
# --------------------------Schedule--------------------------
s = te.create_schedule(C.op)
a_x, a_y = C.op.axis
(a_k,) = C.op.reduce_axis
CF = s.cache_write(C, "amx.tmm")
a_xo, a_xi = s[C].split(a_x, factor=32)
a_yo, a_yi = s[C].split(a_y, factor=32)
s[C].reorder(a_xo, a_yo, a_xi, a_yi)
s[CF].compute_at(s[C], a_yo)
(a_k_f,) = CF.op.reduce_axis
a_x_f, a_y_f = CF.op.axis
a_xo_f, a_xi_f = s[CF].split(a_x_f, factor=32)
a_yo_f, a_yi_f = s[CF].split(a_y_f, factor=32)
a_ko_f, a_ki_f = s[CF].split(a_k_f, factor=128)
s[CF].reorder(a_ko_f, a_xo_f, a_yo_f, a_ki_f, a_xi_f, a_yi_f)
s[CF].tensorize(a_ki_f, dot_32x128x32_u8s8s32_sapphirerapids(LDA=k))
s[C].tensorize(a_xi, acc_32x32_int32_sapphirerapids(LDC=n))
lib = tvm.build(s, [X, packedW, C], target, name="intrinsic")
asm = lib.get_source("asm")
assert "tilezero" in asm
assert "tileloaddt1" in asm
assert "tdpbusd" in asm
assert "tilestored" in asm
# ----------------------- verify correctness --------------------------------
# generate the plain data
a = np.random.uniform(1, 10, size=(m, k)).astype("uint8")
b = np.random.uniform(1, 10, size=(n, k)).astype("int8")
packW = np.random.uniform(1, 10, size=(n // 16, k // 4, 16, 4)).astype("int8")
# This should occurs in pre_pack (constant folding) stage,
# from plain data to blocked data(NC16n4c)
for i_n in range(n):
for i_k in range(k):
packW[i_n // 16][i_k // 4][i_n % 16][i_k % 4] = b[i_n][i_k]
x = tvm.nd.array(a, dev)
w = tvm.nd.array(packW, dev)
y = tvm.nd.array(np.zeros((m, n), dtype="int32"), dev)
t_evaluator = lib.time_evaluator(lib.entry_name, dev, number=100)
result = t_evaluator(x, w, y)
print(result)
tvm.testing.assert_allclose(y.numpy(), np.dot(a.astype("int32"), b.T.astype("int32")), rtol=0)
@tvm.testing.requires_llvm
@pytest.mark.skip("skip due to AMX feature not avaliable yet")
def METHOD_NAME():
amx_init = tvm.get_global_func("runtime.amx_init")
amx_tileconfig = tvm.get_global_func("runtime.amx_tileconfig")
assert amx_init()
assert amx_tileconfig(16, 64)
if __name__ == "__main__":
pytest.main([__file__]) | null |
aborting | """ZMQServer Driver."""
from schema import Use, Or
import zmq
from testplan.common.config import ConfigOption
from testplan.common.utils.documentation_helper import emphasized
from testplan.common.utils.timing import retry_until_timeout
from ..base import Driver, DriverConfig
class ZMQServerConfig(DriverConfig):
"""
Configuration object for
:py:class:`~testplan.testing.multitest.driver.zmq.server.ZMQServer` driver.
"""
@classmethod
def get_options(cls):
"""
Schema for options validation and assignment of default values.
"""
return {
ConfigOption("host", default="localhost"): str,
ConfigOption("port", default=0): Use(int),
ConfigOption("message_pattern", default=zmq.PAIR): Or(
zmq.PAIR, zmq.REP, zmq.PUB, zmq.PUSH
),
}
class ZMQServer(Driver):
"""
The ZMQServer can receive multiple connections from different ZMQClients.
The socket can be of type:
* zmq.PAIR
* zmq.REP
* zmq.PUB
* zmq.PUSH
{emphasized_members_docs}
:param name: Name of ZMQServer.
:type name: ``str``
:param host: Host name to bind to. Default: 'localhost'
:type host: ``str``
:param port: Port number to bind to. Default: 0 (Random port)
:type port: ``int``
:param message_pattern: Message pattern. Default: ``zmq.PAIR``
:type message_pattern: ``int``
"""
CONFIG = ZMQServerConfig
def __init__(
self,
name: str,
host: str = "localhost",
port: int = 0,
message_pattern=zmq.PAIR,
**options
):
options.update(self.filter_locals(locals()))
super(ZMQServer, self).__init__(**options)
self._host: str = None
self._port: int = None
self._zmq_context = None
self._socket = None
@emphasized
@property
def host(self):
"""Target host name."""
return self._host
@emphasized
@property
def port(self):
"""Port number assigned."""
return self._port
@property
def socket(self):
"""
Returns the underlying ``zmq.sugar.socket.Socket`` object.
"""
return self._socket
def send(self, data, timeout=30):
"""
Try to send the message until it either sends or hits timeout.
:param timeout: Timeout to retry sending the message
:type timeout: ``int``
"""
return retry_until_timeout(
exception=zmq.ZMQError,
item=self._socket.send,
kwargs={"data": data, "flags": zmq.NOBLOCK},
timeout=timeout,
raise_on_timeout=True,
)
def receive(self, timeout=30):
"""
Try to send the message until it either has been received or
hits timeout.
:param timeout: Timeout to retry receiving the message
:type timeout: ``int``
:return: The received message
:rtype: ``object`` or ``str`` or ``zmq.sugar.frame.Frame``
"""
return retry_until_timeout(
exception=zmq.ZMQError,
item=self._socket.recv,
kwargs={"flags": zmq.NOBLOCK},
timeout=timeout,
raise_on_timeout=True,
)
def starting(self):
"""
Start the ZMQServer.
"""
super(ZMQServer, self).starting()
# pylint: disable=abstract-class-instantiated
self._zmq_context = zmq.Context()
self._socket = self._zmq_context.socket(self.cfg.message_pattern)
if self.cfg.port == 0:
port = self._socket.bind_to_random_port(
"tcp://{host}".format(host=self.cfg.host)
)
else:
self._socket.bind(
"tcp://{host}:{port}".format(
host=self.cfg.host, port=self.cfg.port
)
)
port = self.cfg.port
self._host = self.cfg.host
self._port = port
def stopping(self):
"""
Stop the ZMQServer.
"""
super(ZMQServer, self).stopping()
if not self._socket.closed:
self._socket.close()
if not self._zmq_context.closed:
self._zmq_context.term()
def METHOD_NAME(self):
"""Abort logic that stops the server."""
if not self._socket.closed:
self._socket.close()
if not self._zmq_context.closed:
self._zmq_context.term() | null |
last modified | from __future__ import annotations
from itertools import accumulate
from typing import TYPE_CHECKING
from flask import current_app
from sqlalchemy import tuple_, func
from sqlalchemy.orm import defaultload
from timApp.auth.auth_models import BlockAccess
from timApp.auth.get_user_rights_for_item import get_user_rights_for_item
from timApp.item.block import Block, BlockType
from timApp.item.blockrelevance import BlockRelevance
from timApp.timdb.exceptions import TimDbException
from timApp.timdb.sqa import include_if_loaded
from timApp.util.utils import split_location, date_to_relative, cached_property
if TYPE_CHECKING:
from timApp.folder.folder import Folder
from timApp.user.user import User
class ItemBase:
"""An item that can be assigned permissions."""
@property
def owners(self):
return self.block.owners if self.block else None
@property
def block(self) -> Block:
# Relationships are not loaded when constructing an object with __init__.
if not hasattr(self, "_block") or self._block is None:
self._block = Block.query.get(self.id)
return self._block
@property
def id(self):
"""Returns the item id."""
raise NotImplementedError
@property
def METHOD_NAME(self):
return self.block.modified if self.block else None
@property
def parents(self):
return self.block.parents
@property
def children(self):
return self.block.children
@property
def relevance(self) -> BlockRelevance:
return self.block.relevance if self.block else None
class Item(ItemBase):
"""An item that exists in the TIM directory hierarchy. Currently :class:`~.Folder` and :class:`~.DocInfo`."""
@property
def id(self):
raise NotImplementedError
@property
def path(self):
"""Returns the Document path, including the language part in case of a translation."""
raise NotImplementedError
@property
def path_without_lang(self):
"""Returns the Document path without the language part in case of a translation."""
raise NotImplementedError
@property
def url(self):
return current_app.config["TIM_HOST"] + self.url_relative
def get_url_for_view(self, name: str):
return f'{current_app.config["TIM_HOST"]}/{name}/{self.path}'
def get_relative_url_for_view(self, name: str):
return f"/{name}/{self.path}"
@property
def url_relative(self):
return "/view/" + self.path
@property
def location(self):
folder, _ = split_location(self.path_without_lang)
return folder
@property
def title(self):
if self.block is None:
return "All documents"
if not self.block.description:
return self.short_name
return self.block.description
@title.setter
def title(self, value):
self.block.description = value
@property
def short_name(self):
parts = self.path_without_lang.rsplit("/", 1)
return parts[len(parts) - 1]
def parents_to_root(self, include_root=True, eager_load_groups=False):
if not self.path_without_lang:
return []
path_tuples = self.parent_paths()
from timApp.folder.folder import Folder
if not path_tuples:
return [Folder.get_root()]
# TODO: Add an option whether to load relevance eagerly or not;
# currently eager by default is better to speed up search cache processing
# and it doesn't slow down other code much.
crumbs_q = (
Folder.query.filter(tuple_(Folder.location, Folder.name).in_(path_tuples))
.order_by(func.length(Folder.location).desc())
.options(defaultload(Folder._block).joinedload(Block.relevance))
)
if eager_load_groups:
crumbs_q = crumbs_q.options(
defaultload(Folder._block)
.joinedload(Block.accesses)
.joinedload(BlockAccess.usergroup)
)
crumbs = crumbs_q.all()
if include_root:
crumbs.append(Folder.get_root())
return crumbs
def parent_paths(self) -> list[tuple[str, str]]:
path_parts = self.path_without_lang.split("/")
paths = list(p[1:] for p in accumulate("/" + part for part in path_parts[:-1]))
return [split_location(p) for p in paths]
@cached_property
def parents_to_root_eager(self):
return self.parents_to_root(eager_load_groups=True)
@property
def parent(
self,
) -> Folder: # TODO rename this to parent_folder to distinguish better from "parents" attribute
folder = self.location
from timApp.folder.folder import Folder
return Folder.find_by_path(folder) if folder else Folder.get_root()
@property
def public(self):
return True
def to_json(self, curr_user: User | None = None):
if curr_user is None:
from timApp.auth.sessioninfo import get_current_user_object
curr_user = get_current_user_object()
return {
"name": self.short_name,
"path": self.path,
"title": self.title,
"location": self.location,
"id": self.id,
"modified": date_to_relative(self.METHOD_NAME)
if self.METHOD_NAME
else None,
"owners": self.owners,
"rights": get_user_rights_for_item(self, curr_user),
"unpublished": self.block.is_unpublished() if self.block else False,
"public": self.public,
# We only add tags if they've already been loaded.
**include_if_loaded("tags", self.block),
**include_if_loaded("relevance", self.block),
}
def get_relative_path(self, path: str):
"""Gets the item path relative to the given path.
The item must be under the path; otherwise TimDbException is thrown.
"""
path = path.strip("/")
if not self.path.startswith(path + "/"):
raise TimDbException("Cannot get relative path")
return self.path.replace(path + "/", "", 1)
@staticmethod
def find_by_id(item_id):
b = Block.query.get(item_id)
if b:
if b.type_id == BlockType.Document.value:
from timApp.document.docentry import DocEntry
return DocEntry.find_by_id(item_id)
elif b.type_id == BlockType.Folder.value:
from timApp.folder.folder import Folder
return Folder.get_by_id(item_id)
else:
raise NotImplementedError
return None
@staticmethod
def find_by_path(path: str, fallback_to_id: bool = False) -> Item | None:
"""
Finds an item by path. If the item is not found, None is returned.
:param path: The path of the item to find.
:param fallback_to_id: If True, the path is treated as an ID if the item is not found by path.
:return: The item, or None if not found.
"""
from timApp.document.docentry import DocEntry
doc = DocEntry.find_by_path(path, fallback_to_id)
if doc:
return doc
from timApp.folder.folder import Folder
folder = Folder.find_by_path(path, fallback_to_id)
if folder:
return folder
return None | null |
test handle bar updates indicator | # -------------------------------------------------------------------------------------------------
# Copyright (C) 2015-2023 Nautech Systems Pty Ltd. All rights reserved.
# https://nautechsystems.io
#
# Licensed under the GNU Lesser General Public License Version 3.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at https://www.gnu.org/licenses/lgpl-3.0.en.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------------------------------
import sys
import pytest
from nautilus_trader.indicators.atr import AverageTrueRange
from nautilus_trader.test_kit.providers import TestInstrumentProvider
from nautilus_trader.test_kit.stubs.data import TestDataStubs
AUDUSD_SIM = TestInstrumentProvider.default_fx_ccy("AUD/USD")
class TestAverageTrueRange:
def setup(self):
# Fixture Setup
self.atr = AverageTrueRange(10)
def test_name_returns_expected_string(self):
# Arrange, Act, Assert
assert self.atr.name == "AverageTrueRange"
def test_str_repr_returns_expected_string(self):
# Arrange, Act, Assert
assert str(self.atr) == "AverageTrueRange(10, SIMPLE, True, 0.0)"
assert repr(self.atr) == "AverageTrueRange(10, SIMPLE, True, 0.0)"
def test_period(self):
# Arrange, Act, Assert
assert self.atr.period == 10
def test_initialized_without_inputs_returns_false(self):
# Arrange, Act, Assert
assert self.atr.initialized is False
def test_initialized_with_required_inputs_returns_true(self):
# Arrange, Act
for _i in range(10):
self.atr.update_raw(1.00000, 1.00000, 1.00000)
# Assert
assert self.atr.initialized is True
def METHOD_NAME(self):
# Arrange
indicator = AverageTrueRange(10)
bar = TestDataStubs.bar_5decimal()
# Act
indicator.handle_bar(bar)
# Assert
assert indicator.has_inputs
assert indicator.value == 2.999999999997449e-05
def test_value_with_no_inputs_returns_zero(self):
# Arrange, Act, Assert
assert self.atr.value == 0.0
def test_value_with_epsilon_input(self):
# Arrange
epsilon = sys.float_info.epsilon
self.atr.update_raw(epsilon, epsilon, epsilon)
# Act, Assert
assert self.atr.value == 0.0
def test_value_with_one_ones_input(self):
# Arrange
self.atr.update_raw(1.00000, 1.00000, 1.00000)
# Act, Assert
assert self.atr.value == 0.0
def test_value_with_one_input(self):
# Arrange
self.atr.update_raw(1.00020, 1.00000, 1.00010)
# Act, Assert
assert self.atr.value == pytest.approx(0.00020)
def test_value_with_three_inputs(self):
# Arrange
self.atr.update_raw(1.00020, 1.00000, 1.00010)
self.atr.update_raw(1.00020, 1.00000, 1.00010)
self.atr.update_raw(1.00020, 1.00000, 1.00010)
# Act, Assert
assert self.atr.value == pytest.approx(0.00020)
def test_value_with_close_on_high(self):
# Arrange
high = 1.00010
low = 1.00000
# Act
for _i in range(1000):
high += 0.00010
low += 0.00010
close = high
self.atr.update_raw(high, low, close)
# Assert
assert self.atr.value == pytest.approx(0.00010, 2)
def test_value_with_close_on_low(self):
# Arrange
high = 1.00010
low = 1.00000
# Act
for _i in range(1000):
high -= 0.00010
low -= 0.00010
close = low
self.atr.update_raw(high, low, close)
# Assert
assert self.atr.value == pytest.approx(0.00010)
def test_floor_with_ten_ones_inputs(self):
# Arrange
floor = 0.00005
floored_atr = AverageTrueRange(10, value_floor=floor)
for _i in range(20):
floored_atr.update_raw(1.00000, 1.00000, 1.00000)
# Act, Assert
assert floored_atr.value == 5e-05
def test_floor_with_exponentially_decreasing_high_inputs(self):
# Arrange
floor = 0.00005
floored_atr = AverageTrueRange(10, value_floor=floor)
high = 1.00020
low = 1.00000
close = 1.00000
for _i in range(20):
high -= (high - low) / 2
floored_atr.update_raw(high, low, close)
# Act, Assert
assert floored_atr.value == 5e-05
def test_reset_successfully_returns_indicator_to_fresh_state(self):
# Arrange
for _i in range(1000):
self.atr.update_raw(1.00010, 1.00000, 1.00005)
# Act
self.atr.reset()
# Assert
assert not self.atr.initialized
assert self.atr.value == 0 | null |
create assembly file list | #!/usr/bin/env python3
import os
import sys
import re
import json
from vmaf.core.result import Result
__copyright__ = "Copyright 2016-2020, Netflix, Inc."
__license__ = "BSD+Patent"
def print_usage():
print("usage: python " + os.path.basename(sys.argv[0]) + " input_file\n")
print("input_file contains a list of files for assembly (can be xml or json)")
class FileAssembler:
SUPPORTED_FILE_TYPES = ['xml', 'json']
def __init__(self, to_assemble_input):
self.to_assemble_input = to_assemble_input
@staticmethod
def METHOD_NAME(to_assemble_input):
to_assemble_list = []
if isinstance(to_assemble_input, list):
to_assemble_list = to_assemble_input
else:
with open(to_assemble_input, "rt") as input_file:
for line in input_file.readlines():
# match comment
mo = re.match(r"^#", line)
if mo:
print("Skip commented line: {}".format(line))
continue
# match whitespace
mo = re.match(r"[\s]+", line)
if mo:
continue
mo = re.match(r"([\S]+)", line)
if not mo:
print("Invalid file: {}".format(line))
print_usage()
return 1
to_assemble_list.append(line.strip())
return to_assemble_list
def _create_result_list(self, to_assemble_list):
pass
def assemble(self):
"""
Main file assembly logic
"""
to_assemble_list = self.METHOD_NAME(self.to_assemble_input)
self._assert(to_assemble_list)
results = self._create_result_list(to_assemble_list)
combined_result = Result.combine_result(results)
return combined_result
def _assert(self, to_assemble_list):
"""
Perform necessary assertions before parsing any of the files.
"""
# check that the number of files is greater than 0
assert len(to_assemble_list) > 0
# check that the file formats match
assemble_format_list = [os.path.splitext(f)[1].split(".")[1] for f in to_assemble_list]
assert len(set(assemble_format_list)) == 1, "The file formats for assembly do not much."
# check that the file format is supported for assembly
assert assemble_format_list[0] in self.SUPPORTED_FILE_TYPES, \
"The assembly format is not consistent, use any of {fmts}".format(fmts=str(self.SUPPORTED_FILE_TYPES))
class XmlAssembler(FileAssembler):
@staticmethod
def _parse_files(to_assemble_list):
to_assemble_xml_strings = []
for to_assemble_xml in to_assemble_list:
with open(to_assemble_xml, 'r') as f:
to_assemble_xml_strings.append(f.read())
return to_assemble_xml_strings
def _create_result_list(self, to_assemble_list):
to_assemble_xml_strings = self._parse_files(to_assemble_list)
results = []
for to_assemble_xml_string in to_assemble_xml_strings:
results.append(Result.from_xml(to_assemble_xml_string))
return results
class JsonAssembler(FileAssembler):
@staticmethod
def _parse_files(to_assemble_list):
to_assemble_json_strings = []
for json_file in to_assemble_list:
with open(json_file, 'r') as f:
to_assemble_json_strings.append(json.load(f))
return to_assemble_json_strings
def _create_result_list(self, to_assemble_list):
to_assemble_jsons = self._parse_files(to_assemble_list)
results = []
for to_assemble_json in to_assemble_jsons:
to_assemble_json_string = json.dumps(to_assemble_json)
results.append(Result.from_json(to_assemble_json_string))
return results
def main():
if len(sys.argv) != 2:
print_usage()
return 2
files_to_assemble_list = sys.argv[1]
desired_file_list = FileAssembler.METHOD_NAME(files_to_assemble_list)
if ".xml" in desired_file_list[0]:
xml_output = XmlAssembler(files_to_assemble_list).assemble().to_xml()
print(xml_output)
elif ".json" in desired_file_list[0]:
json_output = JsonAssembler(files_to_assemble_list).assemble().to_json()
print(json_output)
else:
print_usage()
return 2
if __name__ == "__main__":
ret = main()
exit(ret) | null |
ignore if marked | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
import os
import re
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional
import sentry_sdk
import snowflake
from magma.configuration.service_configs import get_service_config_value
from orc8r.protos.mconfig import mconfigs_pb2
from sentry_sdk.integrations.redis import RedisIntegration
Event = Dict[str, Any]
Hint = Dict[str, Any]
SentryHook = Callable[[Event, Hint], Optional[Event]]
CONTROL_PROXY = 'control_proxy'
SENTRY_CONFIG = 'sentry'
SENTRY_URL = 'sentry_url_python'
SENTRY_EXCLUDED = 'sentry_excluded_errors'
SENTRY_SAMPLE_RATE = 'sentry_sample_rate'
CLOUD_ADDRESS = 'cloud_address'
ORC8R_CLOUD_ADDRESS = 'orc8r_cloud_address'
DEFAULT_SAMPLE_RATE = 1.0
COMMIT_HASH = 'COMMIT_HASH'
HWID = 'hwid'
SERVICE_NAME = 'service_name'
LOGGING_EXTRA = 'extra'
EXCLUDE_FROM_ERROR_MONITORING_KEY = 'exclude_from_error_monitoring'
# Dictionary constant for convenience, must not be mutated
EXCLUDE_FROM_ERROR_MONITORING = {EXCLUDE_FROM_ERROR_MONITORING_KEY: True} # noqa: WPS407
@dataclass
class SharedSentryConfig(object):
"""Sentry configuration shared by all Python services,
taken from shared mconfig or control_proxy.yml"""
dsn: str
sample_rate: float
exclusion_patterns: List[str]
# TODO when control_proxy.yml is outdated move to shared mconfig entirely
def _get_shared_sentry_config(sentry_mconfig: mconfigs_pb2.SharedSentryConfig) -> SharedSentryConfig:
"""Get Sentry configs with the following priority
1) control_proxy.yml (if sentry_python_url is present)
2) shared mconfig (i.e. first try streamed mconfig from orc8r,
if empty: default mconfig in /etc/magma)
Args:
sentry_mconfig (SharedSentryConfig): proto message of shared mconfig
Returns:
(str, float): sentry url, sentry sample rate
"""
dsn = get_service_config_value(
CONTROL_PROXY,
SENTRY_URL,
default='',
)
if not dsn:
# Here, we assume that `dsn` and `sample_rate` should be pulled
# from the same source, that is the source where the user has
# entered the `dsn`.
# Without this coupling `dsn` and `sample_rate` could possibly
# be pulled from different sources.
dsn = sentry_mconfig.dsn_python
sample_rate = sentry_mconfig.sample_rate
else:
logging.info("Sentry config: dsn_python and sample_rate are pulled from control_proxy.yml.")
sample_rate = get_service_config_value(
CONTROL_PROXY,
SENTRY_SAMPLE_RATE,
default=DEFAULT_SAMPLE_RATE,
)
# Exclusion patterns only exist in mconfig, not in control_proxy.yml
exclusion_patterns = sentry_mconfig.exclusion_patterns
return SharedSentryConfig(dsn, sample_rate, exclusion_patterns)
def METHOD_NAME(event: Event) -> Optional[Event]:
if event.get(LOGGING_EXTRA) and event.get(LOGGING_EXTRA).get(EXCLUDE_FROM_ERROR_MONITORING_KEY):
return None
return event
def _filter_excluded_messages(event: Event, hint: Hint, patterns_to_exclude: List[str]) -> Optional[Event]:
explicit_message = event.get('message')
log_entry = event.get("logentry")
log_message = log_entry.get('message') if log_entry else None
exc_info = hint.get("exc_info")
exception_message = str(exc_info[1]) if exc_info else None
messages = [msg for msg in (explicit_message, log_message, exception_message) if msg]
if not messages:
return event
for pattern in patterns_to_exclude:
for message in messages:
if re.search(pattern, message):
return None
return event
def _get_before_send_hook(patterns_to_exclude: List[str]) -> SentryHook:
def filter_excluded_and_marked_messages(
event: Event, hint: Hint,
) -> Optional[Event]:
event = METHOD_NAME(event)
if event:
return _filter_excluded_messages(event, hint, patterns_to_exclude)
return None
def filter_marked_messages(
event: Event, _: Hint,
) -> Optional[Event]:
return METHOD_NAME(event)
if patterns_to_exclude:
return filter_excluded_and_marked_messages
return filter_marked_messages
def sentry_init(service_name: str, sentry_mconfig: mconfigs_pb2.SharedSentryConfig) -> None:
"""Initialize connection and start piping errors to sentry.io."""
sentry_config = _get_shared_sentry_config(sentry_mconfig)
if not sentry_config.dsn:
logging.info(
'Sentry disabled because of missing dsn_python. '
'See documentation (Configure > AGW) on how to configure '
'Sentry dsn.',
)
return
sentry_sdk.init(
dsn=sentry_config.dsn,
release=os.getenv(COMMIT_HASH),
traces_sample_rate=sentry_config.sample_rate,
before_send=_get_before_send_hook(sentry_config.exclusion_patterns),
integrations=[
RedisIntegration(),
],
)
cloud_address = get_service_config_value(
CONTROL_PROXY,
CLOUD_ADDRESS,
default=None,
)
sentry_sdk.set_tag(ORC8R_CLOUD_ADDRESS, cloud_address)
sentry_sdk.set_tag(HWID, snowflake.snowflake())
sentry_sdk.set_tag(SERVICE_NAME, service_name) | null |
build list request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import AutomationClientMixinABC, _convert_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-08-08"] = kwargs.pop("api_version", _params.pop("api-version", "2022-08-08"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/providers/Microsoft.Automation/operations")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.automation.AutomationClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> Iterable["_models.Operation"]:
"""Lists all of the available Automation REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.automation.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-08-08"] = kwargs.pop("api_version", _params.pop("api-version", "2022-08-08"))
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = METHOD_NAME(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.Automation/operations"} | null |
get kdb instance | from pathlib import Path
import os, re, errno, sys, subprocess
import kdb
elektra_namespaces = ["user:", "system:", "dir:", "spec:", "cascading:", "proc:"]
parent_key = None # not yet set
dir_file_special_name = "®elektra.value"
xattr_kdb_file = "®elektra.file"
#translates from filesystem (that are below the "pid"-level) paths to elektra paths (e.g. '/user:/dir/@elektra.value' -> 'user:/dir', '/cascading:/key' -> '/key', assuming parent_key == "/")
def os_path_to_elektra_path(os_path):
#inject parent_key after namespace into os_path
namespace, *rest = Path(os_path).parts[1:]
os_path = str(Path(namespace, parent_key.name[1:], *rest))
elektra_path = os_path
if Path(elektra_path).name == dir_file_special_name:
elektra_path = str(Path(elektra_path).parent).strip("/")
if re.match("^cascading:|^/cascading:", elektra_path):
elektra_path = re.sub("^cascading:|^/cascading:", "", elektra_path)
if elektra_path == "":
elektra_path = "/"
else:
elektra_path = elektra_path.strip("/") #remove slashes ('/' is reserved for the cascading namespace)
if elektra_path.endswith(":"):
elektra_path = elektra_path + "/" #special case intruced around elektra v5 (the root of a namespace needs a trailing slash)
return elektra_path
#returns a kdb instance (with mocked argv, envp)
def METHOD_NAME():
config = kdb.KeySet(0)
contract = kdb.KeySet(0)
custom_envp = [ "%s=%s" % (k, v) for (k, v) in os.environ.items() ]
kdb.goptsContract (contract, sys.argv, custom_envp, parent_key, config)
db = kdb.KDB(contract)
#monkey patch db.get as
#- proc:/ keys are only available through a cascading lookup (See man page elektra-namespaces: "Keys in the namespace proc ... are ignored by kdbGet ... ")
#- we don't want spec: keys to appear in the cascading namespace
orig_get = db.get
def patched_get(ks, orig_root):
justified_root = re.sub("^proc:/", "/", str(orig_root))
status = orig_get(ks, justified_root)
if kdb.Key(orig_root).isCascading():
for key_to_remove in ks.filter(lambda key: key.isSpec()):
ks.remove(key_to_remove)
return status
db.get = patched_get
return db
def size_of_file(os_path):
return len(file_contents(os_path))
def is_directory_empty(os_path):
dirs, files = ls(os_path)
return not bool(dirs) and not bool(files)
#performs function of the "kdb file" command
def get_kdb_file(os_path):
elektra_path = os_path_to_elektra_path(os_path)
resolved_file_path = subprocess.check_output(["kdb", "file", elektra_path]).decode().strip()
return resolved_file_path
def update_key_value(os_path: str, new_value: bytes):
# kdb.kdb.KDBException, may be thrown
# validation => whole key needs to be written at once
with METHOD_NAME() as db:
path = os_path_to_elektra_path(os_path)
ks = kdb.KeySet()
db.get(ks, path)
key = ks[path]
#try to save new_value as UTF-8 string in case it can be decoded as such
try:
new_value_as_string = new_value.decode(encoding="utf-8", errors="strict")
key.value = new_value_as_string
except UnicodeDecodeError:
key.value = new_value
db.set(ks, path) #using key instead of path here deleted the key
#may throw KeyError
def file_contents(os_path):
key, _ = get_key_and_keyset(os_path)
if key.isString():
return key.value.encode(encoding='UTF-8') #return bytes in all cases
elif key.isBinary():
return key.value
else:
raise Error("Unsupported key type")
#creates key, or, if key already exists, does nothing
def create_key(os_path):
path = os_path_to_elektra_path(os_path)
with METHOD_NAME() as db:
ks = kdb.KeySet()
db.get(ks, path)
if not path in ks:
key = kdb.Key(path)
ks.append(key)
keys_modified = db.set(ks, path)
if keys_modified != 1:
raise OSError(errno.EIO)
#could also be an attempt to create an already existing key. in this rare case the error code does not fit.
def get_meta_map(os_path):
key, _ = get_key_and_keyset(os_path)
return { meta.name:meta.value for meta in key.getMeta() }
def has_meta(os_path, name):
try:
meta_map = get_meta_map(os_path)
return name in meta_map
except KeyError:
return False
#get_meta, set_meta may throw KeyError
def get_meta(os_path, name):
return get_meta_map(os_path)[name]
def set_meta(os_path, name, value):
meta_map = get_meta_map(os_path)
meta_map[name] = value
update_meta_map(os_path, meta_map)
def update_meta_map(os_path, new_meta_map):
path = os_path_to_elektra_path(os_path)
with METHOD_NAME() as db:
ks = kdb.KeySet()
db.get(ks, path)
key = ks[path]
#delete old meta keys
for meta_key in key.getMeta():
key.delMeta(meta_key.name)
#insert new meta keys
for keyname in new_meta_map.keys():
key.setMeta(keyname, new_meta_map[keyname])
db.set(ks, path)
#may throw KeyError
def get_key_and_keyset(os_path):
path = os_path_to_elektra_path(os_path)
with METHOD_NAME() as db:
ks = kdb.KeySet()
db.get(ks, path)
key = ks[path]
return (key, ks)
#returns tuple inidicating if path is dir, is file
def key_type(os_path):
if os_path in [".", "..", "/", "/user:", "/system:", "/spec:", "/dir:", "/cascading:", "/proc:"]:
return (True, False)
dir_listing, file_listing = ls(os_path)
return (bool(dir_listing), bool(file_listing))
def is_list_prefix(prefix, list_):
if len(prefix) > len(list_):
return False
for (i, item) in enumerate(prefix):
if list_[i] != item:
return False
return True
def is_path_prefix(prefix, path):
#remove (potential) trailing / to cope with special case introduced in os_path_to_elektra_path
prefix = re.sub("/$", "", prefix)
return is_list_prefix(prefix.split("/"), path.split("/"))
def _remove_namespace_prefix(elektra_path):
return re.sub("^.*:", "", elektra_path)
#returns tuple of dirs, files of given path (does not include '.', '..')
def ls(os_path):
path = os_path_to_elektra_path(os_path)
root = kdb.Key(path)
is_root_level = len(path) > 1 and path.endswith("/") # special case
with METHOD_NAME() as db:
ks = kdb.KeySet()
db.get(ks, root)
#only retain keys that are below the root (kdb.get does not gurantee this property)
ks_filtered = kdb.KeySet()
for key in ks:
if key.isBelowOrSame(root):
ks_filtered.append(key)
path_without_namespace = _remove_namespace_prefix(path)
result_keys_without_namespace = map(_remove_namespace_prefix, ks_filtered.unpack_names())
below = {name.split(path_without_namespace)[1] for name in result_keys_without_namespace if is_path_prefix(path_without_namespace, name)}
dirs = {name.split("/")[0 if is_root_level else 1] for name in below if "/" in name}
files = {name for name in below if not "/" in name}.difference(dirs)
if '' in files:
files.remove('')
files.add(dir_file_special_name)
return (dirs, files) | null |
get market moves for period | ######## DO AS A CACHEING OBJECT
### CREATE A CACHE
from typing import List, Any, Dict
import datetime
import pandas as pd
import numpy as np
from syscore.dateutils import (
get_date_from_period_and_end_date,
get_approx_vol_scalar_versus_daily_vol_for_period,
)
from syscore.cache import Cache
from syscore.constants import arg_not_supplied
from sysdata.data_blob import dataBlob
from sysproduction.data.prices import diagPrices, get_list_of_instruments
class marketMovers(object):
def __init__(self, data: dataBlob):
self._data = data
self._cache = Cache(self)
def get_market_moves_for_dates(
self, start_date: datetime.datetime, end_date: datetime.datetime
) -> pd.DataFrame:
self._end_date = end_date
self._start_date = start_date
list_of_instruments = get_list_of_instruments(self.data, source="multiple")
all_moves = [
self.get_market_move_for_instrument_and_dates(
instrument_code=instrument_code
)
for instrument_code in list_of_instruments
]
all_moves_as_df = pd.DataFrame(all_moves)
all_moves_as_df = all_moves_as_df.dropna()
return all_moves_as_df
def get_market_move_for_instrument_and_dates(self, instrument_code: str) -> dict:
print(instrument_code)
start_date = self.start_date
end_date = self.end_date
price_change = self.get_price_change(
instrument_code=instrument_code, start_date=start_date, end_date=end_date
)
vol_for_period = self.calculate_vol(
instrument_code=instrument_code, start_date=start_date, end_date=end_date
)
vol_adjusted = price_change / vol_for_period
percentage_change = self.get_percentage_change(
instrument_code=instrument_code, start_date=start_date, end_date=end_date
)
return dict(
name=instrument_code, change=percentage_change, vol_adjusted=vol_adjusted
)
def METHOD_NAME(self, period: str) -> pd.DataFrame:
self._end_date = datetime.datetime.now()
print("Getting data for %s" % period)
# ['name', 'change', 'vol_adjusted']
list_of_instruments = get_list_of_instruments(self.data, source="multiple")
all_moves: List[Dict[str, Any]] = []
for instrument_code in list_of_instruments:
try:
market_moves = self.get_market_move_for_instrument_and_period(
instrument_code=instrument_code,
period=period,
)
all_moves.append(market_moves)
except IndexError:
# missing data for this period
pass
all_moves_as_df = pd.DataFrame(all_moves)
all_moves_as_df = all_moves_as_df.dropna()
return all_moves_as_df
def get_market_move_for_instrument_and_period(
self, instrument_code: str, period: str
) -> dict:
print(instrument_code)
start_date = self.start_date_for_period(period)
end_date = self.end_date
price_change = self.get_price_change(
instrument_code=instrument_code, start_date=start_date, end_date=end_date
)
vol_for_period = self.calculate_vol(
instrument_code=instrument_code, start_date=start_date, end_date=end_date
)
vol_adjusted = price_change / vol_for_period
percentage_change = self.get_percentage_change(
instrument_code=instrument_code, start_date=start_date, end_date=end_date
)
return dict(
name=instrument_code, change=percentage_change, vol_adjusted=vol_adjusted
)
def get_percentage_change(
self,
instrument_code: str,
start_date: datetime.datetime,
end_date: datetime.date,
) -> float:
price_series = self.get_prices_for_instrument(instrument_code)
change = get_percentage_change_from_series_for_period(
price_series, start_date=start_date, end_date=end_date
)
return change
def get_price_change(
self,
instrument_code: str,
start_date: datetime.datetime,
end_date: datetime.date,
) -> float:
price_series = self.get_prices_for_instrument(instrument_code)
change = get_price_change_from_series_for_period(
price_series, start_date=start_date, end_date=end_date
)
return change
def get_prices_for_instrument(
self,
instrument_code: str,
) -> pd.Series:
return self.cache.get(self._get_prices_for_instrument, instrument_code)
def _get_prices_for_instrument(
self,
instrument_code: str,
) -> pd.Series:
diag_prices = diagPrices(self.data)
price_series = diag_prices.get_adjusted_prices(instrument_code).ffill()
return price_series
def calculate_vol(
self,
instrument_code: str,
start_date: datetime.datetime,
end_date: datetime.date,
) -> float:
vol_scalar = get_approx_vol_scalar_versus_daily_vol_for_period(
start_date, end_date
)
stddev = self.get_stdev_at_start_date_for_instrument(
start_date, instrument_code
)
return stddev * vol_scalar
def get_stdev_at_start_date_for_instrument(
self, start_date: datetime.date, instrument_code: str
):
stdev = get_stdev_at_start_date_for_instrument(
start_date=start_date,
price_series=self.get_prices_for_instrument(instrument_code),
)
return stdev
def start_date_for_period(self, period: str) -> datetime.datetime:
return get_date_from_period_and_end_date(period, end_date=self.end_date)
@property
def data(self) -> dataBlob:
return self._data
@property
def end_date(self) -> datetime.datetime:
return getattr(self, "_end_date", arg_not_supplied)
@property
def start_date(self) -> datetime.datetime:
return getattr(self, "_start_date", arg_not_supplied)
@property
def cache(self) -> Cache:
return self._cache
def get_price_change_from_series_for_period(
price_series: pd.Series, start_date: datetime.date, end_date: datetime.date
) -> float:
price_series_for_period = price_series[start_date:end_date]
if len(price_series_for_period) == 0:
return np.nan
return price_series_for_period[-1] - price_series_for_period[0]
def get_percentage_change_from_series_for_period(
price_series: pd.Series, start_date: datetime.date, end_date: datetime.date
) -> float:
price_series_for_period = price_series[start_date:end_date]
if len(price_series_for_period) == 0:
return np.nan
return 100 * ((price_series_for_period[-1] / price_series_for_period[0]) - 1)
def get_stdev_at_start_date_for_instrument(
price_series: pd.Series, start_date: datetime.date
):
price_series_for_period = price_series[:start_date]
daily_price_series = price_series_for_period.resample("1B").ffill()
daily_returns = daily_price_series.diff()
vol_series = daily_returns.ewm(30).std()
return vol_series[-1] | null |
time analysis area weighted | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
from importlib import import_module, reload
################
# Prepare info for reset_colormaps:
# Import and capture colormaps.
from matplotlib import colormaps # isort:skip
_COLORMAPS_ORIG = set(colormaps)
# Import iris.palette, which modifies colormaps.
import iris.palette
# Derive which colormaps have been added by iris.palette.
_COLORMAPS_MOD = set(colormaps)
COLORMAPS_EXTRA = _COLORMAPS_MOD - _COLORMAPS_ORIG
# Touch iris.palette to prevent linters complaining.
_ = iris.palette
################
class Iris:
@staticmethod
def _import(module_name, reset_colormaps=False):
"""
Have experimented with adding sleep() commands into the imported
modules. The results reveal:
ASV avoids invoking `import x` if nothing gets called in the
benchmark (some imports were timed, but only those where calls
happened during import).
Using reload() is not identical to importing, but does produce
results that are very close to expected import times, so this is fine
for monitoring for regressions.
It is also ideal for accurate repetitions, without the need to mess
with the ASV `number` attribute etc, since cached imports are not used
and the repetitions are therefore no faster than the first run.
"""
mod = import_module(module_name)
if reset_colormaps:
# Needed because reload() will attempt to register new colormaps a
# second time, which errors by default.
for cm_name in COLORMAPS_EXTRA:
colormaps.unregister(cm_name)
reload(mod)
def time_iris(self):
self._import("iris")
def time__concatenate(self):
self._import("iris._concatenate")
def time__constraints(self):
self._import("iris._constraints")
def time__data_manager(self):
self._import("iris._data_manager")
def time__deprecation(self):
self._import("iris._deprecation")
def time__lazy_data(self):
self._import("iris._lazy_data")
def time__merge(self):
self._import("iris._merge")
def time__representation(self):
self._import("iris._representation")
def time_analysis(self):
self._import("iris.analysis")
def METHOD_NAME(self):
self._import("iris.analysis._area_weighted")
def time_analysis__grid_angles(self):
self._import("iris.analysis._grid_angles")
def time_analysis__interpolation(self):
self._import("iris.analysis._interpolation")
def time_analysis__regrid(self):
self._import("iris.analysis._regrid")
def time_analysis__scipy_interpolate(self):
self._import("iris.analysis._scipy_interpolate")
def time_analysis_calculus(self):
self._import("iris.analysis.calculus")
def time_analysis_cartography(self):
self._import("iris.analysis.cartography")
def time_analysis_geomerty(self):
self._import("iris.analysis.geometry")
def time_analysis_maths(self):
self._import("iris.analysis.maths")
def time_analysis_stats(self):
self._import("iris.analysis.stats")
def time_analysis_trajectory(self):
self._import("iris.analysis.trajectory")
def time_aux_factory(self):
self._import("iris.aux_factory")
def time_common(self):
self._import("iris.common")
def time_common_lenient(self):
self._import("iris.common.lenient")
def time_common_metadata(self):
self._import("iris.common.metadata")
def time_common_mixin(self):
self._import("iris.common.mixin")
def time_common_resolve(self):
self._import("iris.common.resolve")
def time_config(self):
self._import("iris.config")
def time_coord_categorisation(self):
self._import("iris.coord_categorisation")
def time_coord_systems(self):
self._import("iris.coord_systems")
def time_coords(self):
self._import("iris.coords")
def time_cube(self):
self._import("iris.cube")
def time_exceptions(self):
self._import("iris.exceptions")
def time_experimental(self):
self._import("iris.experimental")
def time_fileformats(self):
self._import("iris.fileformats")
def time_fileformats__ff(self):
self._import("iris.fileformats._ff")
def time_fileformats__ff_cross_references(self):
self._import("iris.fileformats._ff_cross_references")
def time_fileformats__pp_lbproc_pairs(self):
self._import("iris.fileformats._pp_lbproc_pairs")
def time_fileformats_structured_array_identification(self):
self._import("iris.fileformats._structured_array_identification")
def time_fileformats_abf(self):
self._import("iris.fileformats.abf")
def time_fileformats_cf(self):
self._import("iris.fileformats.cf")
def time_fileformats_dot(self):
self._import("iris.fileformats.dot")
def time_fileformats_name(self):
self._import("iris.fileformats.name")
def time_fileformats_name_loaders(self):
self._import("iris.fileformats.name_loaders")
def time_fileformats_netcdf(self):
self._import("iris.fileformats.netcdf")
def time_fileformats_nimrod(self):
self._import("iris.fileformats.nimrod")
def time_fileformats_nimrod_load_rules(self):
self._import("iris.fileformats.nimrod_load_rules")
def time_fileformats_pp(self):
self._import("iris.fileformats.pp")
def time_fileformats_pp_load_rules(self):
self._import("iris.fileformats.pp_load_rules")
def time_fileformats_pp_save_rules(self):
self._import("iris.fileformats.pp_save_rules")
def time_fileformats_rules(self):
self._import("iris.fileformats.rules")
def time_fileformats_um(self):
self._import("iris.fileformats.um")
def time_fileformats_um__fast_load(self):
self._import("iris.fileformats.um._fast_load")
def time_fileformats_um__fast_load_structured_fields(self):
self._import("iris.fileformats.um._fast_load_structured_fields")
def time_fileformats_um__ff_replacement(self):
self._import("iris.fileformats.um._ff_replacement")
def time_fileformats_um__optimal_array_structuring(self):
self._import("iris.fileformats.um._optimal_array_structuring")
def time_fileformats_um_cf_map(self):
self._import("iris.fileformats.um_cf_map")
def time_io(self):
self._import("iris.io")
def time_io_format_picker(self):
self._import("iris.io.format_picker")
def time_iterate(self):
self._import("iris.iterate")
def time_palette(self):
self._import("iris.palette", reset_colormaps=True)
def time_plot(self):
self._import("iris.plot")
def time_quickplot(self):
self._import("iris.quickplot")
def time_std_names(self):
self._import("iris.std_names")
def time_symbols(self):
self._import("iris.symbols")
def time_tests(self):
self._import("iris.tests")
def time_time(self):
self._import("iris.time")
def time_util(self):
self._import("iris.util")
# third-party imports
def time_third_party_cartopy(self):
self._import("cartopy")
def time_third_party_cf_units(self):
self._import("cf_units")
def time_third_party_cftime(self):
self._import("cftime")
def time_third_party_matplotlib(self):
self._import("matplotlib")
def time_third_party_numpy(self):
self._import("numpy")
def time_third_party_scipy(self):
self._import("scipy") | null |
extract data | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MHSMRegionsOperations:
"""MHSMRegionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.keyvault.v2021_12_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_by_resource(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> AsyncIterable["_models.MHSMRegionsListResult"]:
"""The List operation gets information about the regions associated with the managed HSM Pool.
:param resource_group_name: Name of the resource group that contains the managed HSM pool.
:type resource_group_name: str
:param name: Name of the managed HSM Pool.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MHSMRegionsListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.keyvault.v2021_12_01_preview.models.MHSMRegionsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MHSMRegionsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-12-01-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def METHOD_NAME(pipeline_response):
deserialized = self._deserialize('MHSMRegionsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ManagedHsmError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, METHOD_NAME
)
list_by_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.KeyVault/managedHSMs/{name}/regions'} # type: ignore | null |
export mod broadcast | # Copyright (c) ONNX Project Contributors
#
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import onnx
from onnx.backend.test.case.base import Base
from onnx.backend.test.case.node import expect
class Mod(Base):
@staticmethod
def export_mod_mixed_sign_float64() -> None:
node = onnx.helper.make_node("Mod", inputs=["x", "y"], outputs=["z"], fmod=1)
x = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float64)
y = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float64)
z = np.fmod(x, y) # expected output [-0.1, 0.4, 5. , 0.1, -0.4, 3.]
expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_float64")
@staticmethod
def export_mod_mixed_sign_float32() -> None:
node = onnx.helper.make_node("Mod", inputs=["x", "y"], outputs=["z"], fmod=1)
x = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float32)
y = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float32)
z = np.fmod(
x, y
) # expected output [-0.10000038, 0.39999962, 5. , 0.10000038, -0.39999962, 3.]
expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_float32")
@staticmethod
def export_mod_mixed_sign_float16() -> None:
node = onnx.helper.make_node("Mod", inputs=["x", "y"], outputs=["z"], fmod=1)
x = np.array([-4.3, 7.2, 5.0, 4.3, -7.2, 8.0]).astype(np.float16)
y = np.array([2.1, -3.4, 8.0, -2.1, 3.4, 5.0]).astype(np.float16)
z = np.fmod(
x, y
) # expected output [-0.10156, 0.3984 , 5. , 0.10156, -0.3984 , 3.]
expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_float16")
@staticmethod
def export_mod_mixed_sign_int64() -> None:
node = onnx.helper.make_node(
"Mod",
inputs=["x", "y"],
outputs=["z"],
)
x = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int64)
y = np.array([2, -3, 8, -2, 3, 5]).astype(np.int64)
z = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]
expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_int64")
@staticmethod
def export_mod_mixed_sign_int32() -> None:
node = onnx.helper.make_node(
"Mod",
inputs=["x", "y"],
outputs=["z"],
)
x = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int32)
y = np.array([2, -3, 8, -2, 3, 5]).astype(np.int32)
z = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]
expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_int32")
@staticmethod
def export_mod_mixed_sign_int16() -> None:
node = onnx.helper.make_node(
"Mod",
inputs=["x", "y"],
outputs=["z"],
)
x = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int16)
y = np.array([2, -3, 8, -2, 3, 5]).astype(np.int16)
z = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]
expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_int16")
@staticmethod
def export_mod_mixed_sign_int8() -> None:
node = onnx.helper.make_node(
"Mod",
inputs=["x", "y"],
outputs=["z"],
)
x = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int8)
y = np.array([2, -3, 8, -2, 3, 5]).astype(np.int8)
z = np.mod(x, y) # expected output [ 0, -2, 5, 0, 2, 3]
expect(node, inputs=[x, y], outputs=[z], name="test_mod_mixed_sign_int8")
@staticmethod
def export_mod_uint8() -> None:
node = onnx.helper.make_node(
"Mod",
inputs=["x", "y"],
outputs=["z"],
)
x = np.array([4, 7, 5]).astype(np.uint8)
y = np.array([2, 3, 8]).astype(np.uint8)
z = np.mod(x, y) # expected output [0, 1, 5]
expect(node, inputs=[x, y], outputs=[z], name="test_mod_uint8")
@staticmethod
def export_mod_uint16() -> None:
node = onnx.helper.make_node(
"Mod",
inputs=["x", "y"],
outputs=["z"],
)
x = np.array([4, 7, 5]).astype(np.uint16)
y = np.array([2, 3, 8]).astype(np.uint16)
z = np.mod(x, y) # expected output [0, 1, 5]
expect(node, inputs=[x, y], outputs=[z], name="test_mod_uint16")
@staticmethod
def export_mod_uint32() -> None:
node = onnx.helper.make_node(
"Mod",
inputs=["x", "y"],
outputs=["z"],
)
x = np.array([4, 7, 5]).astype(np.uint32)
y = np.array([2, 3, 8]).astype(np.uint32)
z = np.mod(x, y) # expected output [0, 1, 5]
expect(node, inputs=[x, y], outputs=[z], name="test_mod_uint32")
@staticmethod
def export_mod_uint64() -> None:
node = onnx.helper.make_node(
"Mod",
inputs=["x", "y"],
outputs=["z"],
)
x = np.array([4, 7, 5]).astype(np.uint64)
y = np.array([2, 3, 8]).astype(np.uint64)
z = np.mod(x, y) # expected output [0, 1, 5]
expect(node, inputs=[x, y], outputs=[z], name="test_mod_uint64")
@staticmethod
def export_mod_int64_fmod() -> None:
node = onnx.helper.make_node("Mod", inputs=["x", "y"], outputs=["z"], fmod=1)
x = np.array([-4, 7, 5, 4, -7, 8]).astype(np.int64)
y = np.array([2, -3, 8, -2, 3, 5]).astype(np.int64)
z = np.fmod(x, y) # expected output [ 0, 1, 5, 0, -1, 3]
expect(node, inputs=[x, y], outputs=[z], name="test_mod_int64_fmod")
@staticmethod
def METHOD_NAME() -> None:
node = onnx.helper.make_node(
"Mod",
inputs=["x", "y"],
outputs=["z"],
)
x = np.arange(0, 30).reshape([3, 2, 5]).astype(np.int32)
y = np.array([7]).astype(np.int32)
z = np.mod(x, y)
# array([[[0, 1, 2, 3, 4],
# [5, 6, 0, 1, 2]],
# [[3, 4, 5, 6, 0],
# [1, 2, 3, 4, 5]],
# [[6, 0, 1, 2, 3],
# [4, 5, 6, 0, 1]]], dtype=int32)
expect(node, inputs=[x, y], outputs=[z], name="test_mod_broadcast") | null |
test support mech | # --- BEGIN COPYRIGHT BLOCK ---
# Copyright (C) 2017 Red Hat, Inc.
# All rights reserved.
#
# License: GPL (version 3 or any later version).
# See LICENSE for details.
# --- END COPYRIGHT BLOCK ---
from lib389.topologies import topology_st_gssapi, gssapi_ack
from lib389.idm.user import UserAccounts
from lib389.saslmap import SaslMappings
from lib389._constants import DEFAULT_SUFFIX
import ldap
import subprocess
import os
import pytest
pytestmark = pytest.mark.tier1
@pytest.fixture(scope='module')
def testuser(topology_st_gssapi):
# Create a user
users = UserAccounts(topology_st_gssapi.standalone, DEFAULT_SUFFIX)
user = users.create(properties={
'uid': 'testuser',
'cn' : 'testuser',
'sn' : 'user',
'uidNumber' : '1000',
'gidNumber' : '2000',
'homeDirectory' : '/home/testuser'
})
# Give them a krb princ
user.create_keytab()
return user
@gssapi_ack
def test_gssapi_bind(topology_st_gssapi, testuser):
"""Test that we can bind with GSSAPI
:id: 894a4c27-3d4c-4ba3-aa33-2910032e3783
:setup: standalone gssapi instance
:steps:
1. Bind with sasl/gssapi
:expectedresults:
1. Bind succeeds
"""
conn = testuser.bind_gssapi()
assert(conn.whoami_s() == "dn: %s" % testuser.dn.lower())
@gssapi_ack
def test_invalid_sasl_map(topology_st_gssapi, testuser):
"""Test that auth fails when we can not map a user.
:id: dd4218eb-9237-4611-ba2f-1781391cadd1
:setup: standalone gssapi instance
:steps:
1. Invalidate a sasl map
2. Attempt to bind
:expectedresults:
1. The sasl map is invalid.
2. The bind fails.
"""
saslmaps = SaslMappings(topology_st_gssapi.standalone)
saslmap = saslmaps.get('suffix map')
saslmap.set('nsSaslMapFilterTemplate', '(invalidattr=\\1)')
with pytest.raises(ldap.INVALID_CREDENTIALS):
conn = testuser.bind_gssapi()
saslmap.set('nsSaslMapFilterTemplate', '(uid=\\1)')
@gssapi_ack
def test_missing_user(topology_st_gssapi):
"""Test that binding with no user does not work.
:id: 109b5ab8-6556-4222-92d6-398476a50d30
:setup: standalone gssapi instance
:steps:
1. Create a principal with a name that is not mappable
2. Attempt to bind
:expectedresults:
1. The principal is created
2. The bind fails.
"""
# Make a principal and bind with no user.
st = topology_st_gssapi.standalone
st.realm.create_principal("doesnotexist")
st.realm.create_keytab("doesnotexist", "/tmp/doesnotexist.keytab")
# Now try to bind.
subprocess.call(['kdestroy', '-A'])
os.environ["KRB5_CLIENT_KTNAME"] = "/tmp/doesnotexist.keytab"
conn = ldap.initialize(st.toLDAPURL())
sasltok = ldap.sasl.gssapi()
with pytest.raises(ldap.INVALID_CREDENTIALS):
conn.sasl_interactive_bind_s('', sasltok)
@gssapi_ack
def METHOD_NAME(topology_st_gssapi, testuser):
"""Test allowed sasl mechs works when GSSAPI is allowed
:id: 6ec80aca-00c4-4141-b96b-3ae8837fc751
:setup: standalone gssapi instance
:steps:
1. Add GSSAPI to allowed sasl mechanisms.
2. Attempt to bind
:expectedresults:
1. The allowed mechs are changed.
2. The bind succeeds.
"""
topology_st_gssapi.standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'GSSAPI EXTERNAL ANONYMOUS')
conn = testuser.bind_gssapi()
assert(conn.whoami_s() == "dn: %s" % testuser.dn.lower())
@gssapi_ack
def test_rejected_mech(topology_st_gssapi, testuser):
"""Test allowed sasl mechs fail when GSSAPI is not allowed.
:id: 7896c756-6f65-4390-a844-12e2eec19675
:setup: standalone gssapi instance
:steps:
1. Add GSSAPI to allowed sasl mechanisms.
2. Attempt to bind
:expectedresults:
1. The allowed mechs are changed.
2. The bind fails.
"""
topology_st_gssapi.standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'EXTERNAL ANONYMOUS')
with pytest.raises(ldap.STRONG_AUTH_NOT_SUPPORTED):
conn = testuser.bind_gssapi()
topology_st_gssapi.standalone.config.set('nsslapd-allowed-sasl-mechanisms', 'GSSAPI EXTERNAL ANONYMOUS')
| null |
xy of hex | import inspect
import numpy as np
import pytest
from pytest import approx
from scipy.spatial import Delaunay, Voronoi
from landlab.graph.voronoi.voronoi_to_graph import (
VoronoiDelaunay,
VoronoiDelaunayToGraph,
)
XY_OF_NODE = {
"rect-horizontal-3-3": [
[0.0, 0.0],
[1.0, 0.0],
[2.0, 0.0],
[0.5, 1.0],
[1.5, 1.0],
[2.5, 1.0],
[0.0, 2.0],
[1.0, 2.0],
[2.0, 2.0],
],
"rect-horizontal-4-3": [
[0.0, 0.0],
[1.0, 0.0],
[2.0, 0.0],
[0.5, 1.0],
[1.5, 1.0],
[2.5, 1.0],
[0.0, 2.0],
[1.0, 2.0],
[2.0, 2.0],
[0.0, 3.0],
[1.0, 3.0],
[2.0, 3.0],
],
"rect-vertical-3-3": [
[0.0, 0.0],
[2.0, 0.0],
[1.0, 0.5],
[0.0, 1.0],
[2.0, 1.0],
[1.0, 1.5],
[0.0, 2.0],
[2.0, 2.0],
[1.0, 2.5],
],
"rect-vertical-3-4": [
[0.0, 0.0],
[2.0, 0.0],
[1.0, 0.5],
[3.0, 0.5],
[0.0, 1.0],
[2.0, 1.0],
[1.0, 1.5],
[3.0, 1.5],
[0.0, 2.0],
[2.0, 2.0],
[1.0, 2.5],
[3.0, 2.5],
],
}
@pytest.fixture
def hex_graph():
return VoronoiDelaunayToGraph(XY_OF_NODE["rect-horizontal-3-3"])
@pytest.fixture
def METHOD_NAME():
return XY_OF_NODE["rect-horizontal-3-3"]
def pytest_generate_tests(metafunc):
if "at_property" in metafunc.fixturenames:
props = dict(
inspect.getmembers(
VoronoiDelaunayToGraph, lambda o: isinstance(o, property)
)
)
metafunc.parametrize(
"at_property", [name for name in props.keys() if "_at_" in name]
)
if "of_property" in metafunc.fixturenames:
props = dict(
inspect.getmembers(
VoronoiDelaunayToGraph, lambda o: isinstance(o, property)
)
)
metafunc.parametrize(
"of_property",
[
name
for name in props.keys()
if "_of_" in name and not name.startswith("number")
],
)
def test_voronoi_name_mapping(METHOD_NAME):
"""Test scipy Voronoi names are mapped to landlab-style names."""
voronoi = Voronoi(METHOD_NAME)
delaunay = Delaunay(METHOD_NAME)
graph = VoronoiDelaunay(METHOD_NAME)
voronoi.regions, voronoi.point_region = VoronoiDelaunay._remove_empty_regions(
voronoi.regions, voronoi.point_region
)
assert np.all(graph.x_of_node == approx(voronoi.points[:, 0]))
assert np.all(graph.y_of_node == approx(voronoi.points[:, 1]))
assert np.all(graph.x_of_corner == approx(voronoi.vertices[:, 0]))
assert np.all(graph.y_of_corner == approx(voronoi.vertices[:, 1]))
assert np.all(graph.nodes_at_link == voronoi.ridge_points)
assert tuple(graph.n_corners_at_cell) == tuple(
len(region) for region in voronoi.regions
)
for cell, corners in enumerate(graph.corners_at_cell):
assert np.all(corners[: graph.n_corners_at_cell[cell]] == voronoi.regions[cell])
assert np.all(corners[graph.n_corners_at_cell[cell] :] == -1)
assert np.all(graph.corners_at_face == voronoi.ridge_vertices)
assert np.all(graph.nodes_at_face == voronoi.ridge_points)
assert np.all(graph.cell_at_node == voronoi.point_region)
assert np.all(graph.nodes_at_patch == delaunay.simplices)
def test_at_array_is_int(hex_graph, at_property):
"""Test that _at_ properties are arrays of int."""
assert getattr(hex_graph, at_property).dtype == int
def test_degenerate_case():
xy_of_node = np.array(
[[0, 0], [1, 0], [2, 0], [0, 1], [1, 1], [2, 1], [0, 2], [1, 2], [2, 2]],
dtype=float,
)
VoronoiDelaunay(xy_of_node)
# VoronoiDelaunayToGraph(xy_of_node)
def test_of_array_is_float(hex_graph, of_property):
"""Test that _of_ properties are arrays of float."""
xy_of_node = np.array(
[[0, 0], [2, 0], [4, 0], [1, 1], [3, 1], [5, 1], [0, 2], [2, 2], [4, 2]],
dtype=float,
)
# hex_graph = VoronoiDelaunayToGraph(xy_of_node)
hex_graph = VoronoiDelaunay(xy_of_node)
assert getattr(hex_graph, of_property).dtype == float
@pytest.mark.parametrize(
"element,expected",
[
("nodes", 9),
("links", 17),
("patches", 9),
("corners", 9),
("faces", 10),
("cells", 2),
],
)
def test_element_count_without_perimeter_nodes(hex_graph, element, expected):
assert getattr(hex_graph, f"number_of_{element}") == expected
@pytest.mark.parametrize(
"element,expected",
[
("nodes", 9),
("links", 16),
("patches", 8),
("corners", 8),
("faces", 8),
("cells", 1),
],
)
def test_element_count_with_perimeter_nodes(METHOD_NAME, element, expected):
perimeter_links = [[0, 1], [1, 2], [2, 5], [5, 8], [8, 7], [7, 6], [6, 3], [3, 0]]
graph = VoronoiDelaunayToGraph(METHOD_NAME, perimeter_links=perimeter_links)
assert getattr(graph, f"number_of_{element}") == expected
@pytest.mark.parametrize("at", ("node", "link", "cell", "corner", "face", "cell"))
def test_compact_ids_without_perimeter_nodes(hex_graph, at):
ids = []
for name in hex_graph.ids_with_prefix(at):
ids.append(np.sort(np.unique(getattr(hex_graph, name).reshape((-1,)))))
ids = np.sort(np.unique(np.concatenate(ids)))
ids = ids[ids >= 0]
assert ids[0] >= 0
assert ids[-1] <= hex_graph._mesh.dims[at]
@pytest.mark.parametrize("at", ("node", "link", "cell", "corner", "face", "cell"))
def test_compact_ids_with_perimeter_nodes(METHOD_NAME, at):
perimeter_links = [[0, 1], [1, 2], [2, 5], [5, 8], [8, 7], [7, 6], [6, 3], [3, 0]]
graph = VoronoiDelaunayToGraph(METHOD_NAME, perimeter_links=perimeter_links)
ids = []
for name in graph.ids_with_prefix(at):
ids.append(np.sort(np.unique(getattr(graph, name).reshape((-1,)))))
ids = np.sort(np.unique(np.concatenate(ids)))
ids = ids[ids >= 0]
assert ids[0] >= 0
assert ids[-1] <= graph._mesh.dims[at]
@pytest.mark.parametrize("at", ["node", "link", "patch", "corner", "face", "cell"])
def test_has_prefix(hex_graph, at):
expected = {
"node": ("nodes_at_patch", "nodes_at_face", "node_at_cell", "nodes_at_link"),
"link": ("links_at_patch",),
"patch": (),
"corner": ("corners_at_face", "corners_at_cell"),
"face": ("faces_at_cell",),
"cell": ("cell_at_node",),
}
assert hex_graph.ids_with_prefix(at) == set(expected[at])
@pytest.mark.parametrize("at", ["node", "link", "patch", "corner", "face", "cell"])
def test_has_suffix(hex_graph, at):
expected = {
"node": ("cell_at_node",),
"link": ("nodes_at_link",),
"patch": ("nodes_at_patch", "links_at_patch"),
"corner": (),
"face": ("corners_at_face", "nodes_at_face"),
"cell": (
"n_corners_at_cell",
"faces_at_cell",
"node_at_cell",
"corners_at_cell",
),
}
assert hex_graph.ids_with_suffix(at) == set(expected[at])
@pytest.mark.parametrize(
"n_nodes",
[2**10, 2**11, 2**12, 2**13, 2**14, 2**15], # , 2 ** 16, 2 ** 20]
)
def test_big_graph(n_nodes):
xy_of_node = np.random.rand(2 * n_nodes).reshape((-1, 2))
graph = VoronoiDelaunayToGraph(xy_of_node)
assert graph.number_of_nodes == n_nodes | null |
show bss | #!/usr/bin/python
import dbus
import sys, os
import time
import gobject
from dbus.mainloop.glib import DBusGMainLoop
WPAS_DBUS_SERVICE = "fi.w1.wpa_supplicant1"
WPAS_DBUS_INTERFACE = "fi.w1.wpa_supplicant1"
WPAS_DBUS_OPATH = "/fi/w1/wpa_supplicant1"
WPAS_DBUS_INTERFACES_INTERFACE = "fi.w1.wpa_supplicant1.Interface"
WPAS_DBUS_INTERFACES_OPATH = "/fi/w1/wpa_supplicant1/Interfaces"
WPAS_DBUS_BSS_INTERFACE = "fi.w1.wpa_supplicant1.BSS"
def byte_array_to_string(s):
import urllib
r = ""
for c in s:
if c >= 32 and c < 127:
r += "%c" % c
else:
r += urllib.quote(chr(c))
return r
def list_interfaces(wpas_obj):
ifaces = wpas_obj.Get(WPAS_DBUS_INTERFACE, 'Interfaces',
dbus_interface=dbus.PROPERTIES_IFACE)
for path in ifaces:
if_obj = bus.get_object(WPAS_DBUS_SERVICE, path)
ifname = if_obj.Get(WPAS_DBUS_INTERFACES_INTERFACE, 'Ifname',
dbus_interface=dbus.PROPERTIES_IFACE)
print ifname
def propertiesChanged(properties):
if properties.has_key("State"):
print "PropertiesChanged: State: %s" % (properties["State"])
def METHOD_NAME(bss):
net_obj = bus.get_object(WPAS_DBUS_SERVICE, bss)
net = dbus.Interface(net_obj, WPAS_DBUS_BSS_INTERFACE)
# Convert the byte-array for SSID and BSSID to printable strings
val = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'BSSID',
dbus_interface=dbus.PROPERTIES_IFACE)
bssid = ""
for item in val:
bssid = bssid + ":%02x" % item
bssid = bssid[1:]
val = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'SSID',
dbus_interface=dbus.PROPERTIES_IFACE)
ssid = byte_array_to_string(val)
val = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'WPA',
dbus_interface=dbus.PROPERTIES_IFACE)
wpa = "no"
if len(val["KeyMgmt"]) > 0:
wpa = "yes"
val = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'RSN',
dbus_interface=dbus.PROPERTIES_IFACE)
wpa2 = "no"
if len(val["KeyMgmt"]) > 0:
wpa2 = "yes"
freq = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'Frequency',
dbus_interface=dbus.PROPERTIES_IFACE)
signal = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'Signal',
dbus_interface=dbus.PROPERTIES_IFACE)
val = net_obj.Get(WPAS_DBUS_BSS_INTERFACE, 'Rates',
dbus_interface=dbus.PROPERTIES_IFACE)
if len(val) > 0:
maxrate = val[0] / 1000000
else:
maxrate = 0
print " %s :: ssid='%s' wpa=%s wpa2=%s signal=%d rate=%d freq=%d" % (bssid, ssid, wpa, wpa2, signal, maxrate, freq)
def scanDone(success):
print "Scan done: success=%s" % success
res = if_obj.Get(WPAS_DBUS_INTERFACES_INTERFACE, 'BSSs',
dbus_interface=dbus.PROPERTIES_IFACE)
print "Scanned wireless networks:"
for opath in res:
print opath
METHOD_NAME(opath)
def bssAdded(bss, properties):
print "BSS added: %s" % (bss)
METHOD_NAME(bss)
def bssRemoved(bss):
print "BSS removed: %s" % (bss)
def main():
dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
global bus
bus = dbus.SystemBus()
wpas_obj = bus.get_object(WPAS_DBUS_SERVICE, WPAS_DBUS_OPATH)
if len(sys.argv) != 2:
list_interfaces(wpas_obj)
os._exit(1)
wpas = dbus.Interface(wpas_obj, WPAS_DBUS_INTERFACE)
bus.add_signal_receiver(scanDone,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="ScanDone")
bus.add_signal_receiver(bssAdded,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="BSSAdded")
bus.add_signal_receiver(bssRemoved,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="BSSRemoved")
bus.add_signal_receiver(propertiesChanged,
dbus_interface=WPAS_DBUS_INTERFACES_INTERFACE,
signal_name="PropertiesChanged")
ifname = sys.argv[1]
# See if wpa_supplicant already knows about this interface
path = None
try:
path = wpas.GetInterface(ifname)
except dbus.DBusException, exc:
if not str(exc).startswith("fi.w1.wpa_supplicant1.InterfaceUnknown:"):
raise exc
try:
path = wpas.CreateInterface({'Ifname': ifname, 'Driver': 'test'})
time.sleep(1)
except dbus.DBusException, exc:
if not str(exc).startswith("fi.w1.wpa_supplicant1.InterfaceExists:"):
raise exc
global if_obj
if_obj = bus.get_object(WPAS_DBUS_SERVICE, path)
global iface
iface = dbus.Interface(if_obj, WPAS_DBUS_INTERFACES_INTERFACE)
iface.Scan({'Type': 'active'})
gobject.MainLoop().run()
wpas.RemoveInterface(dbus.ObjectPath(path))
if __name__ == "__main__":
main()
| null |
write string | #!/usr/bin/python3
import argparse
import glob
import os
import time
import random
COLOURS = (b'\xFF\x00\x00', b'\x00\xFF\x00', b'\x00\x00\xFF', b'\xFF\xFF\x00', b'\xFF\x00\xFF', b'\x00\xFF\xFF')
def write_binary(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'wb') as open_file:
open_file.write(payload)
def read_string(driver_path, device_file):
with open(os.path.join(driver_path, device_file), 'r') as open_file:
return open_file.read().rstrip('\n')
def METHOD_NAME(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'w') as open_file:
open_file.write(payload)
def find_devices(vid, pid):
driver_paths = glob.glob(os.path.join('/sys/bus/hid/drivers/razerkbd', '*:{0:04X}:{1:04X}.*'.format(vid, pid)))
for driver_path in driver_paths:
device_type_path = os.path.join(driver_path, 'device_type')
if os.path.exists(device_type_path):
yield driver_path
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--skip-standard', action='store_true')
parser.add_argument('--skip-custom', action='store_true')
parser.add_argument('--skip-game-led', action='store_true')
parser.add_argument('--skip-macro-led', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
found_chroma = False
for index, driver_path in enumerate(find_devices(0x1532, 0x0203), start=1):
found_chroma = True
print("Blackwidow Chroma {0}\n".format(index))
print("Driver version: {0}".format(read_string(driver_path, 'version')))
print("Driver firmware version: {0}".format(read_string(driver_path, 'firmware_version')))
print("Device serial: {0}".format(read_string(driver_path, 'device_serial')))
print("Device type: {0}".format(read_string(driver_path, 'device_type')))
print("Device mode: {0}".format(read_string(driver_path, 'device_mode')))
# Set to static red so that we have something standard
write_binary(driver_path, 'matrix_effect_static', b'\xFF\x00\x00')
if not args.skip_standard:
print("Starting brightness test. Press enter to begin.")
input()
print("Max brightness...", end='')
METHOD_NAME(driver_path, 'matrix_brightness', '255')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Half brightness...", end='')
METHOD_NAME(driver_path, 'matrix_brightness', '128')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Zero brightness...", end='')
METHOD_NAME(driver_path, 'matrix_brightness', '0')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
METHOD_NAME(driver_path, 'matrix_brightness', '255')
print("Starting other colour effect tests. Press enter to begin.")
input()
print("Green Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\x00')
time.sleep(5)
print("Cyan Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\xFF')
time.sleep(5)
print("Spectrum")
write_binary(driver_path, 'matrix_effect_spectrum', b'\x00')
time.sleep(10)
print("None")
write_binary(driver_path, 'matrix_effect_none', b'\x00')
time.sleep(5)
print("Wave Left")
METHOD_NAME(driver_path, 'matrix_effect_wave', '1')
time.sleep(5)
print("Wave Right")
METHOD_NAME(driver_path, 'matrix_effect_wave', '2')
time.sleep(5)
print("Breathing random")
write_binary(driver_path, 'matrix_effect_breath', b'\x00')
time.sleep(10)
print("Breathing red")
write_binary(driver_path, 'matrix_effect_breath', b'\xFF\x00\x00')
time.sleep(10)
print("Breathing blue-green")
write_binary(driver_path, 'matrix_effect_breath', b'\x00\xFF\x00\x00\x00\xFF')
time.sleep(10)
if not args.skip_custom:
# Custom LEDs all rows
payload_all = b''
for row in range(0, 6): # 0x15 is 21. 0->21 inclusive
payload_all += row.to_bytes(1, byteorder='big') + b'\x00\x15'
for i in range(0, 22):
payload_all += random.choice(COLOURS)
# Custom LEDs M1-5
payload_m1_5 = b''
for row in range(0, 6): # Column 0 or column 0
payload_m1_5 += row.to_bytes(1, byteorder='big') + b'\x00\x00' + b'\xFF\xFF\xFF'
print("Custom LED matrix colours test. Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_all)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
print("Custom LED matrix partial colours test. Setting M1-5 to white. Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_m1_5)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
time.sleep(0.5)
if not args.skip_game_led:
# Game mode test
print("Starting game mode LED tests. Press enter to begin.")
input()
print("Enabling game mode LED")
METHOD_NAME(driver_path, 'game_led_state', '1')
time.sleep(5)
print("Disabling game mode LED")
METHOD_NAME(driver_path, 'game_led_state', '0')
time.sleep(5)
if not args.skip_macro_led:
print("Starting marco LED tests. Press enter to begin.")
input()
print("Enabling macro mode LED")
METHOD_NAME(driver_path, 'macro_led_state', '1')
time.sleep(5)
print("Enabling macro mode LED static effect")
METHOD_NAME(driver_path, 'macro_led_effect', '0')
time.sleep(5)
print("Enabling macro mode LED blinking effect")
METHOD_NAME(driver_path, 'macro_led_effect', '1')
time.sleep(5)
METHOD_NAME(driver_path, 'macro_led_effect', '0')
print("Disabling macro mode LED")
METHOD_NAME(driver_path, 'macro_led_state', '0')
time.sleep(5)
print("Finished")
if not found_chroma:
print("No Blackwidow Chromas found") | null |
break lines | # Copyright 2013-2017 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from wa.utils.terminalsize import get_terminal_size
INDENTATION_FROM_TITLE = 4
class TextFormatter(object):
"""
This is a base class for text formatting. It mainly ask to implement two
methods which are add_item and format_data. The formar will add new text to
the formatter, whereas the latter will return a formatted text. The name
attribute represents the name of the foramtter.
"""
name = None
data = None
def __init__(self):
pass
def add_item(self, new_data, item_title):
"""
Add new item to the text formatter.
:param new_data: The data to be added
:param item_title: A title for the added data
"""
raise NotImplementedError()
def format_data(self):
"""
It returns a formatted text
"""
raise NotImplementedError()
class DescriptionListFormatter(TextFormatter):
name = 'description_list_formatter'
data = None
def get_text_width(self):
if not self._text_width:
self._text_width, _ = get_terminal_size() # pylint: disable=unpacking-non-sequence
return self._text_width
def set_text_width(self, value):
self._text_width = value
text_width = property(get_text_width, set_text_width)
def __init__(self, title=None, width=None):
super(DescriptionListFormatter, self).__init__()
self.data_title = title
self._text_width = width
self.longest_word_length = 0
self.data = []
def add_item(self, new_data, item_title):
if len(item_title) > self.longest_word_length:
self.longest_word_length = len(item_title)
self.data[len(self.data):] = [(item_title, self._remove_newlines(new_data))]
def format_data(self):
parag_indentation = self.longest_word_length + INDENTATION_FROM_TITLE
string_formatter = '{}:<{}{} {}'.format('{', parag_indentation, '}', '{}')
formatted_data = ''
if self.data_title:
formatted_data += self.data_title
line_width = self.text_width - parag_indentation
for title, paragraph in self.data:
formatted_data += '\n'
title_len = self.longest_word_length - len(title)
title += ':'
if title_len > 0:
title = (' ' * title_len) + title
parag_lines = self.METHOD_NAME(paragraph, line_width).splitlines()
if parag_lines:
formatted_data += string_formatter.format(title, parag_lines[0])
for line in parag_lines[1:]:
formatted_data += '\n' + string_formatter.format('', line)
else:
formatted_data += title[:-1]
self.text_width = None
return formatted_data
# Return text's paragraphs sperated in a list, such that each index in the
# list is a single text paragraph with no new lines
def _remove_newlines(self, new_data): # pylint: disable=R0201
parag_list = ['']
parag_num = 0
prv_parag = None
# For each paragraph sperated by a new line
for paragraph in new_data.splitlines():
if paragraph:
parag_list[parag_num] += ' ' + paragraph
# if the previous line is NOT empty, then add new empty index for
# the next paragraph
elif prv_parag:
parag_num = 1
parag_list.append('')
prv_parag = paragraph
# sometimes, we end up with an empty string as the last item so we reomve it
if not parag_list[-1]:
return parag_list[:-1]
return parag_list
def METHOD_NAME(self, parag_list, line_width): # pylint: disable=R0201
formatted_paragraphs = []
for para in parag_list:
words = para.split()
if words:
formatted_text = words.pop(0)
current_width = len(formatted_text)
# for each word in the paragraph, line width is an accumlation of
# word length + 1 (1 is for the space after each word).
for word in words:
word = word.strip()
if current_width + len(word) + 1 >= line_width:
formatted_text += '\n' + word
current_width = len(word)
else:
formatted_text += ' ' + word
current_width += len(word) + 1
formatted_paragraphs.append(formatted_text)
return '\n\n'.join(formatted_paragraphs) | null |
prepare request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._operations import build_list_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.imagebuilder.aio.ImageBuilderClient`'s
:attr:`operations` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, **kwargs: Any) -> AsyncIterable["_models.Operation"]:
"""Lists available operations for the Microsoft.VirtualMachineImages provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Operation or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.imagebuilder.models.Operation]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-07-01"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.OperationListResult] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("OperationListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = METHOD_NAME(next_link)
_stream = False
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list.metadata = {"url": "/providers/Microsoft.VirtualMachineImages/operations"} | null |
num samples | import functools
import operator
import os
import os.path
import sys
import numpy as np
# Bamboo utilities
current_file = os.path.realpath(__file__)
current_dir = os.path.dirname(current_file)
sys.path.insert(0, os.path.join(os.path.dirname(current_dir), 'common_python'))
import tools
# ==============================================
# Objects for Python data reader
# ==============================================
# Note: The Python data reader imports this file as a module and calls
# the functions below to ingest data.
# Data
# Note: The L1 norm is not differentiable at 0, so we make sure values
# are away from 0.
np.random.seed(201910247)
_num_samples = 23
_sample_size = 7
_samples = np.random.normal(size=(_num_samples,_sample_size)).astype(np.float32)
# Sample access functions
def get_sample(index):
return _samples[index,:]
def METHOD_NAME():
return _num_samples
def sample_dims():
return (_sample_size,)
# ==============================================
# Setup LBANN experiment
# ==============================================
def setup_experiment(lbann, weekly):
"""Construct LBANN experiment.
Args:
lbann (module): Module for LBANN Python frontend
"""
mini_batch_size = METHOD_NAME() // 2
trainer = lbann.Trainer(mini_batch_size)
model = construct_model(lbann)
data_reader = construct_data_reader(lbann)
optimizer = lbann.NoOptimizer()
return trainer, model, data_reader, optimizer, None # Don't request any specific number of nodes
def construct_model(lbann):
"""Construct LBANN model.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Input data
# Note: Sum with a weights layer so that gradient checking will
# verify that error signals are correct.
x_weights = lbann.Weights(optimizer=lbann.SGD(),
initializer=lbann.ConstantInitializer(value=0.0),
name='input_weights')
x = lbann.Sum(lbann.Reshape(lbann.Input(data_field='samples'),
dims=_sample_size),
lbann.WeightsLayer(weights=x_weights,
dims=_sample_size))
x_lbann = x
# Objects for LBANN model
obj = []
metrics = []
callbacks = []
# ------------------------------------------
# Data-parallel layout
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.LogSigmoid(x, data_layout='data_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='data-parallel layout'))
# NumPy implementation
vals = []
for i in range(METHOD_NAME()):
x = get_sample(i).astype(np.float64)
y = x - np.log1p(np.exp(x))
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Model-parallel layout
# ------------------------------------------
# LBANN implementation
x = x_lbann
y = lbann.LogSigmoid(x, data_layout='model_parallel')
z = lbann.L2Norm2(y)
obj.append(z)
metrics.append(lbann.Metric(z, name='model-parallel layout'))
# NumPy implementation
vals = []
for i in range(METHOD_NAME()):
x = get_sample(i).astype(np.float64)
y = x - np.log1p(np.exp(x))
z = tools.numpy_l2norm2(y)
vals.append(z)
val = np.mean(vals)
tol = 8 * val * np.finfo(np.float32).eps
callbacks.append(lbann.CallbackCheckMetric(
metric=metrics[-1].name,
lower_bound=val-tol,
upper_bound=val+tol,
error_on_failure=True,
execution_modes='test'))
# ------------------------------------------
# Gradient checking
# ------------------------------------------
callbacks.append(lbann.CallbackCheckGradients(error_on_failure=True))
# ------------------------------------------
# Construct model
# ------------------------------------------
num_epochs = 0
return lbann.Model(num_epochs,
layers=lbann.traverse_layer_graph(x_lbann),
objective_function=obj,
metrics=metrics,
callbacks=callbacks)
def construct_data_reader(lbann):
"""Construct Protobuf message for Python data reader.
The Python data reader will import the current Python file to
access the sample access functions.
Args:
lbann (module): Module for LBANN Python frontend
"""
# Note: The training data reader should be removed when
# https://github.com/LLNL/lbann/issues/1098 is resolved.
message = lbann.reader_pb2.DataReader()
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'train'
)
])
message.reader.extend([
tools.create_python_data_reader(
lbann,
current_file,
'get_sample',
'num_samples',
'sample_dims',
'test'
)
])
return message
# ==============================================
# Setup PyTest
# ==============================================
# Create test functions that can interact with PyTest
for _test_func in tools.create_tests(setup_experiment, __file__):
globals()[_test_func.__name__] = _test_func | null |
status message | # Copyright 2013-2023 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""Query the status of bootstrapping on this machine"""
import platform
from typing import List, Optional, Sequence, Tuple, Union
import spack.util.executable
from ._common import _executables_in_store, _python_import, _try_import_from_store
from .config import ensure_bootstrap_configuration
from .core import clingo_root_spec, patchelf_root_spec
from .environment import (
BootstrapEnvironment,
black_root_spec,
flake8_root_spec,
isort_root_spec,
mypy_root_spec,
pytest_root_spec,
)
ExecutablesType = Union[str, Sequence[str]]
RequiredResponseType = Tuple[bool, Optional[str]]
SpecLike = Union["spack.spec.Spec", str]
def _required_system_executable(exes: ExecutablesType, msg: str) -> RequiredResponseType:
"""Search for an executable is the system path only."""
if isinstance(exes, str):
exes = (exes,)
if spack.util.executable.which_string(*exes):
return True, None
return False, msg
def _required_executable(
exes: ExecutablesType, query_spec: SpecLike, msg: str
) -> RequiredResponseType:
"""Search for an executable in the system path or in the bootstrap store."""
if isinstance(exes, str):
exes = (exes,)
if spack.util.executable.which_string(*exes) or _executables_in_store(exes, query_spec):
return True, None
return False, msg
def _required_python_module(module: str, query_spec: SpecLike, msg: str) -> RequiredResponseType:
"""Check if a Python module is available in the current interpreter or
if it can be loaded from the bootstrap store
"""
if _python_import(module) or _try_import_from_store(module, query_spec):
return True, None
return False, msg
def _missing(name: str, purpose: str, system_only: bool = True) -> str:
"""Message to be printed if an executable is not found"""
msg = '[{2}] MISSING "{0}": {1}'
if not system_only:
return msg.format(name, purpose, "@*y{{B}}")
return msg.format(name, purpose, "@*y{{-}}")
def _core_requirements() -> List[RequiredResponseType]:
_core_system_exes = {
"make": _missing("make", "required to build software from sources"),
"patch": _missing("patch", "required to patch source code before building"),
"bash": _missing("bash", "required for Spack compiler wrapper"),
"tar": _missing("tar", "required to manage code archives"),
"gzip": _missing("gzip", "required to compress/decompress code archives"),
"unzip": _missing("unzip", "required to compress/decompress code archives"),
"bzip2": _missing("bzip2", "required to compress/decompress code archives"),
"git": _missing("git", "required to fetch/manage git repositories"),
}
if platform.system().lower() == "linux":
_core_system_exes["xz"] = _missing("xz", "required to compress/decompress code archives")
# Executables that are not bootstrapped yet
result = [_required_system_executable(exe, msg) for exe, msg in _core_system_exes.items()]
# Python modules
result.append(
_required_python_module(
"clingo", clingo_root_spec(), _missing("clingo", "required to concretize specs", False)
)
)
return result
def _buildcache_requirements() -> List[RequiredResponseType]:
_buildcache_exes = {
"file": _missing("file", "required to analyze files for buildcaches"),
("gpg2", "gpg"): _missing("gpg2", "required to sign/verify buildcaches", False),
}
if platform.system().lower() == "darwin":
_buildcache_exes["otool"] = _missing("otool", "required to relocate binaries")
# Executables that are not bootstrapped yet
result = [_required_system_executable(exe, msg) for exe, msg in _buildcache_exes.items()]
if platform.system().lower() == "linux":
result.append(
_required_executable(
"patchelf",
patchelf_root_spec(),
_missing("patchelf", "required to relocate binaries", False),
)
)
return result
def _optional_requirements() -> List[RequiredResponseType]:
_optional_exes = {
"zstd": _missing("zstd", "required to compress/decompress code archives"),
"svn": _missing("svn", "required to manage subversion repositories"),
"hg": _missing("hg", "required to manage mercurial repositories"),
}
# Executables that are not bootstrapped yet
result = [_required_system_executable(exe, msg) for exe, msg in _optional_exes.items()]
return result
def _development_requirements() -> List[RequiredResponseType]:
# Ensure we trigger environment modifications if we have an environment
if BootstrapEnvironment.spack_yaml().exists():
with BootstrapEnvironment() as env:
env.update_syspath_and_environ()
return [
_required_executable(
"isort", isort_root_spec(), _missing("isort", "required for style checks", False)
),
_required_executable(
"mypy", mypy_root_spec(), _missing("mypy", "required for style checks", False)
),
_required_executable(
"flake8", flake8_root_spec(), _missing("flake8", "required for style checks", False)
),
_required_executable(
"black", black_root_spec(), _missing("black", "required for code formatting", False)
),
_required_python_module(
"pytest", pytest_root_spec(), _missing("pytest", "required to run unit-test", False)
),
]
def METHOD_NAME(section) -> Tuple[str, bool]:
"""Return a status message to be printed to screen that refers to the
section passed as argument and a bool which is True if there are missing
dependencies.
Args:
section (str): either 'core' or 'buildcache' or 'optional' or 'develop'
"""
pass_token, fail_token = "@*g{[PASS]}", "@*r{[FAIL]}"
# Contain the header of the section and a list of requirements
spack_sections = {
"core": ("{0} @*{{Core Functionalities}}", _core_requirements),
"buildcache": ("{0} @*{{Binary packages}}", _buildcache_requirements),
"optional": ("{0} @*{{Optional Features}}", _optional_requirements),
"develop": ("{0} @*{{Development Dependencies}}", _development_requirements),
}
msg, required_software = spack_sections[section]
with ensure_bootstrap_configuration():
missing_software = False
for found, err_msg in required_software():
if not found and err_msg:
missing_software = True
msg += "\n " + err_msg
msg += "\n"
msg = msg.format(pass_token if not missing_software else fail_token)
return msg, missing_software | null |
run fio | # Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs fio against a remote gluster cluster."""
import json
from absl import flags
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import configs
from perfkitbenchmarker.linux_packages import fio
from perfkitbenchmarker.linux_packages import gluster
FLAGS = flags.FLAGS
flags.DEFINE_string('fill_disk_size', '4G',
'Amount to fill the disk before reading.')
flags.DEFINE_string('fill_disk_bs', '128k',
'Block size used to fill the disk before reading.')
flags.DEFINE_integer('fill_disk_iodepth', 64, 'iodepth used to fill the disk.')
flags.DEFINE_string('read_size', '4G', 'Size of the file to read.')
flags.DEFINE_string('read_bs', '512k', 'Block size of the file to read.')
flags.DEFINE_integer('read_iodepth', 1, 'iodepth used in reading the file.')
BENCHMARK_NAME = 'gluster_fio'
BENCHMARK_CONFIG = """
gluster_fio:
description: >
Runs fio against a remote gluster cluster.
vm_groups:
clients:
vm_spec: *default_single_core
vm_count: null
gluster_servers:
vm_spec: *default_single_core
disk_spec: *default_500_gb
vm_count: 1
"""
_VOLUME_NAME = 'vol01'
_MOUNT_POINT = '/glusterfs'
_NUM_SECTORS_READ_AHEAD = 16384
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
"""Set up GlusterFS and install fio.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
gluster_servers = benchmark_spec.vm_groups['gluster_servers']
clients = benchmark_spec.vm_groups['clients']
client_vm = clients[0]
background_tasks.RunThreaded(
lambda vm: vm.Install('fio'), gluster_servers + clients
)
for vm in gluster_servers:
vm.SetReadAhead(_NUM_SECTORS_READ_AHEAD,
[d.GetDevicePath() for d in vm.scratch_disks])
# Set up Gluster
if gluster_servers:
gluster.ConfigureServers(gluster_servers, _VOLUME_NAME)
args = [((client, gluster_servers[0], _VOLUME_NAME, _MOUNT_POINT), {})
for client in clients]
background_tasks.RunThreaded(gluster.MountGluster, args)
gluster_address = gluster_servers[0].internal_ip
client_vm.RemoteCommand('sudo mkdir -p /testdir')
client_vm.RemoteCommand('sudo mount %s:/vol01 /testdir -t glusterfs' %
gluster_address)
def METHOD_NAME(vm, fio_params, metadata):
"""Run fio.
Args:
vm: Virtual machine to run fio on.
fio_params: fio parameters used to create the fio command to run.
metadata: Metadata to add to the results.
Returns:
A list of sample.Sample objects
"""
stdout, _ = vm.RemoteCommand('sudo {0} {1}'.format(fio.GetFioExec(),
fio_params))
job_file_contents = fio.FioParametersToJob(fio_params)
samples = fio.ParseResults(
job_file_contents,
json.loads(stdout),
base_metadata=metadata,
skip_latency_individual_stats=True)
return samples
def Run(benchmark_spec):
"""Run fio against gluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
gluster_servers = benchmark_spec.vm_groups['gluster_servers']
clients = benchmark_spec.vm_groups['clients']
client_vm = clients[0]
results = []
metadata = {
'fill_disk_size': FLAGS.fill_disk_size,
'fill_disk_bs': FLAGS.fill_disk_bs,
'fill_disk_iodepth': FLAGS.fill_disk_iodepth,
'read_size': FLAGS.read_size,
'read_bs': FLAGS.read_bs,
'read_iodepth': FLAGS.read_iodepth,
}
fio_params = ' '.join([
'--output-format=json', '--name=fill_disk',
'--filename=/testdir/testfile',
'--filesize=%s' % FLAGS.fill_disk_size, '--ioengine=libaio', '--direct=1',
'--verify=0', '--randrepeat=0',
'--bs=%s' % FLAGS.fill_disk_bs,
'--iodepth=%s' % FLAGS.fill_disk_iodepth, '--rw=randwrite'
])
samples = METHOD_NAME(client_vm, fio_params, metadata)
results += samples
# In addition to dropping caches, increase polling to potentially reduce
# variance in network operations
for vm in gluster_servers + clients:
vm.RemoteCommand('sudo /sbin/sysctl net.core.busy_poll=50')
vm.DropCaches()
fio_read_common_params = [
'--output-format=json', '--randrepeat=1', '--ioengine=libaio',
'--gtod_reduce=1', '--filename=/testdir/testfile',
'--bs=%s' % FLAGS.read_bs,
'--iodepth=%s' % FLAGS.read_iodepth,
'--size=%s' % FLAGS.read_size, '--readwrite=randread'
]
fio_params = '--name=first_read ' + ' '.join(fio_read_common_params)
samples = METHOD_NAME(client_vm, fio_params, metadata)
results += samples
# Run the command again. This time, the file should be cached.
fio_params = '--name=second_read ' + ' '.join(fio_read_common_params)
samples = METHOD_NAME(client_vm, fio_params, metadata)
results += samples
return results
def Cleanup(benchmark_spec):
"""Cleanup gluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
clients = benchmark_spec.vm_groups['clients']
gluster_servers = benchmark_spec.vm_groups['gluster_servers']
for client in clients:
client.RemoteCommand('sudo umount %s' % _MOUNT_POINT)
if gluster_servers:
gluster.DeleteVolume(gluster_servers[0], _VOLUME_NAME) | null |
clear link | """Service for linking the project to Minecraft."""
__all__ = [
"LinkManager",
]
import logging
import os
import platform
from pathlib import Path
from typing import List, Optional, Union
from beet import Cache, CachePin, Context, ErrorMessage, MultiCache, PackOverwrite
from beet.core.utils import FileSystemPath, log_time, remove_path
logger = logging.getLogger("link")
def link_cache_finalizer(cache: Cache):
"""Link cache finalizer."""
LinkManager(cache).clean()
class LinkManager:
cache: Cache
dirty = CachePin[List[str]]("dirty", default_factory=list)
world = CachePin[Optional[str]]("world", None)
minecraft = CachePin[Optional[str]]("minecraft", None)
data_pack = CachePin[Optional[str]]("data_pack", None)
resource_pack = CachePin[Optional[str]]("resource_pack", None)
def __init__(self, arg: Union[Context, MultiCache[Cache], Cache]):
if isinstance(arg, Context):
arg = arg.cache
if isinstance(arg, MultiCache):
arg = arg["link"]
self.cache = arg
self.cache.add_finalizer(link_cache_finalizer)
def clean(self):
"""Remove the previously linked files and folders."""
remove_path(*self.dirty)
self.dirty.clear()
def autosave_handler(self, ctx: Context):
"""Plugin for linking the generated resource pack and data pack to Minecraft."""
to_link = [
(Path(directory), pack)
for directory, pack in zip([self.resource_pack, self.data_pack], ctx.packs)
if directory and pack
]
if to_link:
with log_time("Link project."):
for directory, pack in to_link:
try:
self.dirty.append(str(pack.save(directory)))
except PackOverwrite as exc:
logger.warning(
f"Remove the conflicting pack to set up the link. {exc}"
)
def setup_link(
self,
world: Optional[FileSystemPath] = None,
minecraft: Optional[FileSystemPath] = None,
data_pack: Optional[FileSystemPath] = None,
resource_pack: Optional[FileSystemPath] = None,
):
"""Associate minecraft directories to the project."""
if minecraft:
minecraft = Path(minecraft).resolve()
if not minecraft.is_dir():
raise ErrorMessage(f"The specified Minecraft folder does not exist.")
else:
self.locate_minecraft()
minecraft = Path(self.minecraft) if self.minecraft else None
if world:
world_name = world
world = Path(world).resolve()
if not (world / "level.dat").is_file():
if minecraft and Path(world_name).parts == (world_name,):
world = minecraft / "saves" / world_name
if not world.is_dir():
raise ErrorMessage(
f"Couldn't find {str(world_name)!r} in the Minecraft save folder."
)
else:
raise ErrorMessage(f"The specified world folder is invalid.")
else:
world = None
if data_pack:
data_pack = Path(data_pack).resolve()
if not data_pack.is_dir():
raise ErrorMessage(
f"The specified data packs directory does not exist."
)
elif world:
data_pack = world / "datapacks"
else:
data_pack = None
if data_pack and not world:
world = data_pack.parent
if world and not minecraft:
minecraft = world.parent.parent
if resource_pack:
resource_pack = Path(resource_pack).resolve()
if not resource_pack.is_dir():
raise ErrorMessage(
f"The specified resource packs directory does not exist."
)
elif minecraft:
resource_pack = minecraft / "resourcepacks"
else:
resource_pack = None
if resource_pack and not minecraft:
minecraft = resource_pack.parent
if world:
self.world = str(world)
if minecraft:
self.minecraft = str(minecraft)
if data_pack:
self.data_pack = str(data_pack)
if resource_pack:
self.resource_pack = str(resource_pack)
def METHOD_NAME(self):
"""Clear the link."""
self.cache.clear()
def locate_minecraft(self):
"""Try to find the .minecraft folder."""
locations = [
Path(path)
for path in os.environ.get("MINECRAFT_PATH", "").split(":")
if path
]
system = platform.system()
if system == "Linux":
locations.append(Path("~/.minecraft").expanduser())
locations.append(
Path("~/.var/app/com.mojang.Minecraft/data/minecraft").expanduser()
)
elif system == "Darwin":
locations.append(
Path("~/Library/Application Support/minecraft").expanduser()
)
elif system == "Windows":
locations.append(Path(os.path.expandvars(r"%APPDATA%\.minecraft")))
if path := next((path for path in locations if path and path.is_dir()), None):
self.minecraft = str(path.resolve())
def summary(self) -> str:
"""Return a formatted summary."""
return "\n".join(
f"{title}:\n | directory = {directory}\n"
for title, directory in [
("Minecraft installation", self.minecraft),
("World folder", self.world),
("Data packs directory", self.data_pack),
("Resource packs directory", self.resource_pack),
]
) | null |
enumerate interfaces from client | #!/usr/bin/env python
"""Windows specific actions.
Most of these actions share an interface (in/out rdfvalues) with linux actions
of the same name. Windows-only actions are registered with the server via
libs/server_stubs.py
"""
import binascii
import logging
import pythoncom
import win32api
import win32com.client
import win32file
import win32service
import win32serviceutil
import winreg
import wmi
from grr_response_client import actions
from grr_response_client.client_actions import standard
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib.rdfvalues import client_action as rdf_client_action
from grr_response_core.lib.rdfvalues import client_fs as rdf_client_fs
from grr_response_core.lib.rdfvalues import client_network as rdf_client_network
from grr_response_core.lib.rdfvalues import protodict as rdf_protodict
# Properties to remove from results sent to the server.
# These properties are included with nearly every WMI object and use space.
IGNORE_PROPS = [
"CSCreationClassName", "CreationClassName", "OSName", "OSCreationClassName",
"WindowsVersion", "CSName", "__NAMESPACE", "__SERVER", "__PATH"
]
class GetInstallDate(actions.ActionPlugin):
"""Estimate the install date of this system."""
out_rdfvalues = [rdf_protodict.DataBlob]
def Run(self, unused_args):
"""Estimate the install date of this system."""
# Don't use winreg.KEY_WOW64_64KEY since it breaks on Windows 2000
subkey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
"Software\\Microsoft\\Windows NT\\CurrentVersion",
0, winreg.KEY_READ)
install_date = winreg.QueryValueEx(subkey, "InstallDate")
self.SendReply(rdfvalue.RDFDatetime.FromSecondsSinceEpoch(install_date[0]))
def METHOD_NAME(args):
"""Enumerate all MAC addresses of all NICs.
Args:
args: Unused.
Yields:
`rdf_client_network.Interface` instances.
"""
del args # Unused.
pythoncom.CoInitialize()
for interface in (wmi.WMI().Win32_NetworkAdapterConfiguration() or []):
addresses = []
for ip_address in interface.IPAddress or []:
addresses.append(
rdf_client_network.NetworkAddress(human_readable_address=ip_address))
response = rdf_client_network.Interface(ifname=interface.Description)
if interface.MACAddress:
response.mac_address = binascii.unhexlify(
interface.MACAddress.replace(":", ""))
if addresses:
response.addresses = addresses
yield response
class EnumerateInterfaces(actions.ActionPlugin):
"""Enumerate all MAC addresses of all NICs.
Win32_NetworkAdapterConfiguration definition:
http://msdn.microsoft.com/en-us/library/aa394217(v=vs.85).aspx
"""
out_rdfvalues = [rdf_client_network.Interface]
def Run(self, args):
for res in METHOD_NAME(args):
self.SendReply(res)
def EnumerateFilesystemsFromClient(args):
"""List all local filesystems mounted on this system."""
del args # Unused.
for drive in win32api.GetLogicalDriveStrings().split("\x00"):
if not drive:
continue
try:
volume = win32file.GetVolumeNameForVolumeMountPoint(drive).rstrip("\\")
label, _, _, _, fs_type = win32api.GetVolumeInformation(drive)
except win32api.error:
continue
yield rdf_client_fs.Filesystem(
device=volume,
mount_point="/%s:/" % drive[0],
type=fs_type,
label=label)
class EnumerateFilesystems(actions.ActionPlugin):
"""Enumerate all unique filesystems local to the system."""
out_rdfvalues = [rdf_client_fs.Filesystem]
def Run(self, args):
for res in EnumerateFilesystemsFromClient(args):
self.SendReply(res)
class Uninstall(actions.ActionPlugin):
"""Remove the service that starts us at startup."""
out_rdfvalues = [rdf_protodict.DataBlob]
def Run(self, unused_arg):
"""This kills us with no cleanups."""
logging.debug("Disabling service")
win32serviceutil.ChangeServiceConfig(
None,
config.CONFIG["Nanny.service_name"],
startType=win32service.SERVICE_DISABLED)
svc_config = QueryService(config.CONFIG["Nanny.service_name"])
if svc_config[1] == win32service.SERVICE_DISABLED:
logging.info("Disabled service successfully")
self.SendReply(rdf_protodict.DataBlob(string="Service disabled."))
else:
self.SendReply(
rdf_protodict.DataBlob(string="Service failed to disable."))
def QueryService(svc_name):
"""Query service and get its config."""
hscm = win32service.OpenSCManager(None, None,
win32service.SC_MANAGER_ALL_ACCESS)
result = None
try:
hs = win32serviceutil.SmartOpenService(hscm, svc_name,
win32service.SERVICE_ALL_ACCESS)
result = win32service.QueryServiceConfig(hs)
win32service.CloseServiceHandle(hs)
finally:
win32service.CloseServiceHandle(hscm)
return result
def WmiQueryFromClient(args):
"""Run the WMI query and return the data."""
query = args.query
base_object = args.base_object or r"winmgmts:\root\cimv2"
if not query.upper().startswith("SELECT "):
raise RuntimeError("Only SELECT WMI queries allowed.")
for response_dict in RunWMIQuery(query, baseobj=base_object):
yield response_dict
class WmiQuery(actions.ActionPlugin):
"""Runs a WMI query and returns the results to a server callback."""
in_rdfvalue = rdf_client_action.WMIRequest
out_rdfvalues = [rdf_protodict.Dict]
def Run(self, args):
for res in WmiQueryFromClient(args):
self.SendReply(res)
def RunWMIQuery(query, baseobj=r"winmgmts:\root\cimv2"):
"""Run a WMI query and return a result.
Args:
query: the WMI query to run.
baseobj: the base object for the WMI query.
Yields:
rdf_protodict.Dicts containing key value pairs from the resulting COM
objects.
"""
pythoncom.CoInitialize() # Needs to be called if using com from a thread.
wmi_obj = win32com.client.GetObject(baseobj)
# This allows our WMI to do some extra things, in particular
# it gives it access to find the executable path for all processes.
wmi_obj.Security_.Privileges.AddAsString("SeDebugPrivilege")
# Run query
try:
query_results = wmi_obj.ExecQuery(query)
except pythoncom.com_error as e:
raise RuntimeError("Failed to run WMI query \'%s\' err was %s" % (query, e))
# Extract results from the returned COMObject and return dicts.
try:
for result in query_results:
response = rdf_protodict.Dict()
properties = (
list(result.Properties_) +
list(getattr(result, "SystemProperties_", [])))
for prop in properties:
if prop.Name not in IGNORE_PROPS:
# Protodict can handle most of the types we care about, but we may
# get some objects that we don't know how to serialize, so we tell the
# dict to set the value to an error message and keep going
response.SetItem(prop.Name, prop.Value, raise_on_error=False)
yield response
except pythoncom.com_error as e:
raise RuntimeError("WMI query data error on query \'%s\' err was %s" %
(e, query))
class UpdateAgent(standard.ExecuteBinaryCommand):
"""Updates the GRR agent to a new version."""
# For Windows this is just an alias to ExecuteBinaryCommand. | null |
created date | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetHybridUseBenefitResult',
'AwaitableGetHybridUseBenefitResult',
'get_hybrid_use_benefit',
'get_hybrid_use_benefit_output',
]
@pulumi.output_type
class GetHybridUseBenefitResult:
"""
Response on GET of a hybrid use benefit
"""
def __init__(__self__, METHOD_NAME=None, etag=None, id=None, last_updated_date=None, name=None, provisioning_state=None, sku=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'created_date' to be a str")
pulumi.set(__self__, "created_date", METHOD_NAME)
if etag and not isinstance(etag, int):
raise TypeError("Expected argument 'etag' to be a int")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_updated_date and not isinstance(last_updated_date, str):
raise TypeError("Expected argument 'last_updated_date' to be a str")
pulumi.set(__self__, "last_updated_date", last_updated_date)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="createdDate")
def METHOD_NAME(self) -> str:
"""
Created date
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter
def etag(self) -> int:
"""
Indicates the revision of the hybrid use benefit
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastUpdatedDate")
def last_updated_date(self) -> str:
"""
Last updated date
"""
return pulumi.get(self, "last_updated_date")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def sku(self) -> 'outputs.SkuResponse':
"""
Hybrid use benefit SKU
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetHybridUseBenefitResult(GetHybridUseBenefitResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetHybridUseBenefitResult(
METHOD_NAME=self.METHOD_NAME,
etag=self.etag,
id=self.id,
last_updated_date=self.last_updated_date,
name=self.name,
provisioning_state=self.provisioning_state,
sku=self.sku,
type=self.type)
def get_hybrid_use_benefit(plan_id: Optional[str] = None,
scope: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHybridUseBenefitResult:
"""
Gets a given plan ID
:param str plan_id: This is a unique identifier for a plan. Should be a guid.
:param str scope: The scope at which the operation is performed. This is limited to Microsoft.Compute/virtualMachines and Microsoft.Compute/hostGroups/hosts for now
"""
__args__ = dict()
__args__['planId'] = plan_id
__args__['scope'] = scope
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:softwareplan/v20191201:getHybridUseBenefit', __args__, opts=opts, typ=GetHybridUseBenefitResult).value
return AwaitableGetHybridUseBenefitResult(
METHOD_NAME=pulumi.get(__ret__, 'created_date'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
last_updated_date=pulumi.get(__ret__, 'last_updated_date'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
sku=pulumi.get(__ret__, 'sku'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_hybrid_use_benefit)
def get_hybrid_use_benefit_output(plan_id: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetHybridUseBenefitResult]:
"""
Gets a given plan ID
:param str plan_id: This is a unique identifier for a plan. Should be a guid.
:param str scope: The scope at which the operation is performed. This is limited to Microsoft.Compute/virtualMachines and Microsoft.Compute/hostGroups/hosts for now
"""
... | null |
tostring | import hashlib
import logging
from gettext import gettext as _
from lxml import etree
LOGGER = logging.getLogger(__name__)
LOGGER_FMT = "%(asctime)s [%(levelname)s] %(name)s: %(message)s"
class PidProviderXMLAdapter:
def __init__(self, xml_with_pre, pkg_name=None):
self.xml_with_pre = xml_with_pre
self.pkg_name = pkg_name
def METHOD_NAME(self):
return self.xml_with_pre.METHOD_NAME()
@property
def sps_pkg_name(self):
return self.xml_with_pre.sps_pkg_name
@property
def finger_print(self):
return self.xml_with_pre.finger_print
@property
def related_items(self):
return self.xml_with_pre.related_items
@property
def journal_issn_electronic(self):
return self.xml_with_pre.journal_issn_electronic
@property
def journal_issn_print(self):
return self.xml_with_pre.journal_issn_print
@property
def v2_prefix(self):
return self.xml_with_pre.v2_prefix
@property
def volume(self):
return self.xml_with_pre.volume
@property
def number(self):
return self.xml_with_pre.number
@property
def suppl(self):
return self.xml_with_pre.suppl
@property
def pub_year(self):
return self.xml_with_pre.pub_year
@property
def article_pub_year(self):
return self.xml_with_pre.article_pub_year
@property
def main_doi(self):
return self.xml_with_pre.main_doi
@property
def main_toc_section(self):
return self.xml_with_pre.main_toc_section
@property
def is_aop(self):
return self.xml_with_pre.is_aop
@property
def elocation_id(self):
return self.xml_with_pre.elocation_id
@property
def fpage(self):
return self.xml_with_pre.fpage
@property
def fpage_seq(self):
return self.xml_with_pre.fpage_seq
@property
def lpage(self):
return self.xml_with_pre.lpage
@property
def v2(self):
return self.xml_with_pre.v2
@v2.setter
def v2(self, value):
self.xml_with_pre.v2 = value
@property
def v3(self):
return self.xml_with_pre.v3
@v3.setter
def v3(self, value):
self.xml_with_pre.v3 = value
@property
def aop_pid(self):
return self.xml_with_pre.aop_pid
@aop_pid.setter
def aop_pid(self, value):
self.xml_with_pre.aop_pid = value
@property
def z_links(self):
if not hasattr(self, "_links") or not self._links:
self._links = _str_with_64_char("|".join(self.xml_with_pre.links))
return self._links
@property
def z_collab(self):
if not hasattr(self, "_collab") or not self._collab:
self._collab = _str_with_64_char(self.xml_with_pre.collab)
return self._collab
@property
def z_surnames(self):
if not hasattr(self, "_surnames") or not self._surnames:
self._surnames = _str_with_64_char(
"|".join(
[
_standardize(person.get("surname"))
for person in self.xml_with_pre.authors.get("person")
]
)
)
return self._surnames
@property
def z_article_titles_texts(self):
return _str_with_64_char(
"|".join(sorted(self.xml_with_pre.article_titles_texts or []))
)
@property
def z_partial_body(self):
if not hasattr(self, "_partial_body") or not self._partial_body:
self._partial_body = _str_with_64_char(self.xml_with_pre.partial_body)
return self._partial_body
def query_params(self, filter_by_issue=False, aop_version=False):
"""
Get query parameters
Arguments
---------
filter_by_issue: bool
aop_version: bool
Returns
-------
dict
"""
_params = dict(
z_surnames=self.z_surnames or None,
z_collab=self.z_collab or None,
)
if not any(_params.values()):
_params["main_doi"] = self.main_doi
if not any(_params.values()):
_params["z_links"] = self.z_links
if not any(_params.values()):
_params["pkg_name"] = self.sps_pkg_name
if not any(_params.values()):
_params["z_partial_body"] = self.z_partial_body
_params["elocation_id"] = self.elocation_id
if aop_version:
_params["issue__isnull"] = True
else:
if filter_by_issue:
_params["issue__pub_year"] = self.pub_year
_params["issue__volume"] = self.volume
_params["issue__number"] = self.number
_params["issue__suppl"] = self.suppl
_params["fpage"] = self.fpage
_params["fpage_seq"] = self.fpage_seq
_params["lpage"] = self.lpage
_params["journal__issn_print"] = self.journal_issn_print
_params["journal__issn_electronic"] = self.journal_issn_electronic
_params["article_pub_year"] = self.article_pub_year
_params["z_article_titles_texts"] = self.z_article_titles_texts
LOGGER.info(_params)
return _params
@classmethod
def adapt_query_params(cls, params):
"""
Adapt query parameters
Parameters
----------
params : dict
Returns
-------
dict
"""
_params = params.copy()
LOGGER.info(f"Adapt params input: {_params}")
attr_names = (
"main_doi",
"pkg_name",
"elocation_id",
"issue__volume",
"issue__number",
"issue__suppl",
"fpage",
"fpage_seq",
"lpage",
)
for attr_name in attr_names:
try:
_params[f"{attr_name}__iexact"] = _params.pop(attr_name)
except KeyError:
continue
LOGGER.info(f"Adapt params output: {_params}")
return _params
@property
def query_list(self):
items = []
if self.is_aop:
LOGGER.info("self.is_aop")
# o xml_adapter não contém dados de issue
# não indica na consulta o valor para o atributo issue
# então o registro encontrado pode ou não ter dados de issue
params = self.query_params(aop_version=False)
items.append(params)
else:
# o xml_adapter contém dados de issue
# inclui na consulta os dados de issue
LOGGER.info("not self.is_aop")
params = self.query_params(filter_by_issue=True)
items.append(params)
# busca por registro cujo valor de issue is None
params = self.query_params(aop_version=True)
items.append(params)
return items
def _standardize(text):
return (text or "").strip().upper()
def _str_with_64_char(text):
"""
>>> import hashlib
>>> m = hashlib.sha256()
>>> m.update(b"Nobody inspects")
>>> m.update(b" the spammish repetition")
>>> m.digest()
b'\x03\x1e\xdd}Ae\x15\x93\xc5\xfe\\\x00o\xa5u+7\xfd\xdf\xf7\xbcN\x84:\xa6\xaf\x0c\x95\x0fK\x94\x06'
>>> m.digest_size
32
>>> m.block_size
64
hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest()
"""
if not text:
return None
return hashlib.sha256(_standardize(text).encode("utf-8")).hexdigest() | null |
switch widget | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Main widget to use in plugins that show content that comes from the IPython
console, such as the Variable Explorer or Plots.
"""
# Third party imports
from qtpy.QtWidgets import QStackedWidget, QVBoxLayout
# Local imports
from spyder.api.translations import _
from spyder.api.widgets.main_widget import PluginMainWidget
from spyder.widgets.helperwidgets import PaneEmptyWidget
class ShellConnectMainWidget(PluginMainWidget):
"""
Main widget to use in a plugin that shows console-specific content.
Notes
-----
* This is composed of a QStackedWidget to stack widgets associated to each
shell widget in the console and only show one of them at a time.
* The current widget in the stack will display the content associated to
the console with focus.
"""
def __init__(self, *args, set_layout=True, **kwargs):
super().__init__(*args, **kwargs)
# Widgets
self._stack = QStackedWidget(self)
self._shellwidgets = {}
if set_layout:
# Layout
layout = QVBoxLayout()
layout.addWidget(self._stack)
self.setLayout(layout)
# ---- PluginMainWidget API
# ------------------------------------------------------------------------
def current_widget(self):
"""
Return the current widget in the stack.
Returns
-------
QWidget
The current widget.
"""
return self._stack.currentWidget()
def get_focus_widget(self):
return self.current_widget()
# ---- SpyderWidgetMixin API
# ------------------------------------------------------------------------
def update_style(self):
self._stack.setStyleSheet("QStackedWidget {padding: 0px; border: 0px}")
# ---- Stack accesors
# ------------------------------------------------------------------------
def count(self):
"""
Return the number of widgets in the stack.
Returns
-------
int
The number of widgets in the stack.
"""
return self._stack.count()
def get_widget_for_shellwidget(self, shellwidget):
"""return widget corresponding to shellwidget."""
shellwidget_id = id(shellwidget)
if shellwidget_id in self._shellwidgets:
return self._shellwidgets[shellwidget_id]
return None
# ---- Public API
# ------------------------------------------------------------------------
def add_shellwidget(self, shellwidget):
"""Create a new widget in the stack and associate it to shellwidget."""
shellwidget_id = id(shellwidget)
if shellwidget_id not in self._shellwidgets:
widget = self.create_new_widget(shellwidget)
self._stack.addWidget(widget)
self._shellwidgets[shellwidget_id] = widget
# Add all actions to new widget for shortcuts to work.
for __, action in self.get_actions().items():
if action:
widget_actions = widget.actions()
if action not in widget_actions:
widget.addAction(action)
self.set_shellwidget(shellwidget)
def remove_shellwidget(self, shellwidget):
"""Remove widget associated to shellwidget."""
shellwidget_id = id(shellwidget)
if shellwidget_id in self._shellwidgets:
widget = self._shellwidgets.pop(shellwidget_id)
self._stack.removeWidget(widget)
self.close_widget(widget)
self.update_actions()
def set_shellwidget(self, shellwidget):
"""Set widget associated with shellwidget as the current widget."""
old_widget = self.current_widget()
widget = self.get_widget_for_shellwidget(shellwidget)
if widget is None:
return
self._stack.setCurrentWidget(widget)
self.METHOD_NAME(widget, old_widget)
self.update_actions()
def add_errored_shellwidget(self, shellwidget):
"""
Create a new PaneEmptyWidget in the stack and associate it to
shellwidget.
This is necessary to show a meaningful message when switching to
consoles with dead kernels.
"""
shellwidget_id = id(shellwidget)
if shellwidget_id not in self._shellwidgets:
widget = PaneEmptyWidget(
self,
"variable-explorer", # TODO: Use custom icon here
_("No connected console"),
_("The current console failed to start, so there is no "
"content to show here.")
)
self._stack.addWidget(widget)
self._shellwidgets[shellwidget_id] = widget
self.set_shellwidget(shellwidget)
def create_new_widget(self, shellwidget):
"""Create a widget to communicate with shellwidget."""
raise NotImplementedError
def close_widget(self, widget):
"""Close the widget."""
raise NotImplementedError
def METHOD_NAME(self, widget, old_widget):
"""Switch the current widget."""
raise NotImplementedError
def refresh(self):
"""Refresh widgets."""
if self.count():
widget = self.current_widget()
widget.refresh()
def is_current_widget_empty(self):
"""Check if the current widget is a PaneEmptyWidget."""
return isinstance(self.current_widget(), PaneEmptyWidget) | null |
on connect | #############################################
# This is a basic script to emulate the hardware of
# an Arduino microcontroller. The VirtualDevice
# service will execute this script when
# createVirtualArduino(port) is called
import time
import math
import threading
from random import randint
from org.myrobotlab.codec.serial import ArduinoMsgCodec
working = False
worker = None
analogReadPollingPins = []
digitalReadPollingPins = []
def work():
"""thread worker function"""
global working, analogReadPollingPins
x = 0
working = True
while(working):
x = x + 0.09
y = int(math.cos(x) * 100 + 150)
# retcmd = "publishPin/" + str(pin) + "/3/"+ str(y) +"\n"
# uart.write(codec.encode(retcmd))
for pinx in digitalReadPollingPins:
retcmd = "publishPin/" + str(pinx) + "/0/"+str(randint(0,1))+"\n"
uart.write(codec.encode(retcmd))
for pinx in analogReadPollingPins:
#retcmd = "publishPin/" + str(pinx) + "/4/"+ str(y) +"\n"
#retcmd = "publishPin/" + str(pinx) + "/" + str(int(pinx)%4) + "/"+ str(y) +"\n"
retcmd = "publishPin/" + str(pinx) + "/1/"+ str(y) +"\n"
uart.write(codec.encode(retcmd))
sleep(0.001)
#print (y)
# TODO -------
# if (digitalReadPollingPins.length() == 0 && analogReadPollingPins.length() == 0
# working = False
print("I am done !")
codec = ArduinoMsgCodec()
virtual = runtime.start("virtual", "VirtualDevice")
logic = virtual.getLogic()
# get uarts and subscribe to them
for uartName in virtual.getUarts().keySet():
uart = virtual.getUart(uartName)
logic.subscribe(uart.getName(), "publishRX")
logic.subscribe(uart.getName(), "onConnect")
logic.subscribe(uart.getName(), "onPortNames")
logic.subscribe(uart.getName(), "onDisconnect")
def onRX(b):
global working, worker, analogReadPollingPins
print("onByte", b)
command = codec.decode(b)
if command != None and len(command) > 0 :
print("decoded", command)
# rstrip strips the \n from the record
command = command.rstrip()
clist = command.split('/')
if command == "getVersion":
uart.write(codec.encode("publishVersion/"+ str(ArduinoMsgCodec.MRLCOMM_VERSION) +"\n"))
elif command.startswith("digitalReadPollingStart"):
print("digitalReadPollingStart")
pin = clist[1]
digitalReadPollingPins.append(pin)
if worker == None:
worker = threading.Thread(name='worker', target=work)
worker.setDaemon(True)
worker.start()
elif command.startswith("digitalReadPollingStop"):
print("digitalReadPollingStop")
pin = clist[1]
digitalReadPollingPins.remove(pin)
elif command.startswith("analogReadPollingStart"):
print("analogReadPollingStart")
pin = clist[1]
analogReadPollingPins.append(pin)
if worker == None:
worker = threading.Thread(name='worker', target=work)
worker.setDaemon(True)
worker.start()
elif command.startswith("analogReadPollingStop"):
print("analogReadPollingStop")
pin = clist[1]
analogReadPollingPins.remove(pin)
def off():
working = False
worker = None
def METHOD_NAME(portName):
print("onConnect to ", portName)
# FIXME ??? is this bad algorithm to determine callback method name ?
# seems somebody is expecting it this way
def onOnConnect(portName):
print("onOnConnect connected to ", portName)
def onPortNames(portName):
print("onPortNames TODO - list portNames")
def onOnPortNames(portName):
print("onOnPortNames TODO - list portNames")
def onDisconnect(portName):
print("onDisconnect from ", portName)
def onOnDisconnect(portName):
print("onOnDisconnect from ", portName)
# WHAT THE HECK IS THIS ABOUT ?
# TODO - find out
def serial1RX(data):
print("serial1RX ", data)
def serial2RX(data):
print("serial2RX ", data)
def serial3RX(data):
print("serial3RX ", data)
def serial4RX(data):
print("serial4RX ", data)
def serial5RX(data):
print("serial5RX ", data)
def serial6RX(data):
print("serial6RX ", data)
def serial7RX(data):
print("serial7RX ", data)
def serial8RX(data):
print("serial8RX ", data)
| null |
location | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetConfigurationAssignmentsForSubscriptionResult',
'AwaitableGetConfigurationAssignmentsForSubscriptionResult',
'get_configuration_assignments_for_subscription',
'get_configuration_assignments_for_subscription_output',
]
@pulumi.output_type
class GetConfigurationAssignmentsForSubscriptionResult:
"""
Configuration Assignment
"""
def __init__(__self__, filter=None, id=None, METHOD_NAME=None, maintenance_configuration_id=None, name=None, resource_id=None, system_data=None, type=None):
if filter and not isinstance(filter, dict):
raise TypeError("Expected argument 'filter' to be a dict")
pulumi.set(__self__, "filter", filter)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", METHOD_NAME)
if maintenance_configuration_id and not isinstance(maintenance_configuration_id, str):
raise TypeError("Expected argument 'maintenance_configuration_id' to be a str")
pulumi.set(__self__, "maintenance_configuration_id", maintenance_configuration_id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_id and not isinstance(resource_id, str):
raise TypeError("Expected argument 'resource_id' to be a str")
pulumi.set(__self__, "resource_id", resource_id)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def filter(self) -> Optional['outputs.ConfigurationAssignmentFilterPropertiesResponse']:
"""
Properties of the configuration assignment
"""
return pulumi.get(self, "filter")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified identifier of the resource
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[str]:
"""
Location of the resource
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maintenanceConfigurationId")
def maintenance_configuration_id(self) -> Optional[str]:
"""
The maintenance configuration Id
"""
return pulumi.get(self, "maintenance_configuration_id")
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceId")
def resource_id(self) -> Optional[str]:
"""
The unique resourceId
"""
return pulumi.get(self, "resource_id")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Type of the resource
"""
return pulumi.get(self, "type")
class AwaitableGetConfigurationAssignmentsForSubscriptionResult(GetConfigurationAssignmentsForSubscriptionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetConfigurationAssignmentsForSubscriptionResult(
filter=self.filter,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
maintenance_configuration_id=self.maintenance_configuration_id,
name=self.name,
resource_id=self.resource_id,
system_data=self.system_data,
type=self.type)
def get_configuration_assignments_for_subscription(configuration_assignment_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetConfigurationAssignmentsForSubscriptionResult:
"""
Get configuration assignment for resource..
Azure REST API version: 2023-04-01.
:param str configuration_assignment_name: Configuration assignment name
"""
__args__ = dict()
__args__['configurationAssignmentName'] = configuration_assignment_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:maintenance:getConfigurationAssignmentsForSubscription', __args__, opts=opts, typ=GetConfigurationAssignmentsForSubscriptionResult).value
return AwaitableGetConfigurationAssignmentsForSubscriptionResult(
filter=pulumi.get(__ret__, 'filter'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'location'),
maintenance_configuration_id=pulumi.get(__ret__, 'maintenance_configuration_id'),
name=pulumi.get(__ret__, 'name'),
resource_id=pulumi.get(__ret__, 'resource_id'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_configuration_assignments_for_subscription)
def get_configuration_assignments_for_subscription_output(configuration_assignment_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetConfigurationAssignmentsForSubscriptionResult]:
"""
Get configuration assignment for resource..
Azure REST API version: 2023-04-01.
:param str configuration_assignment_name: Configuration assignment name
"""
... | null |
pre operations | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"eventhubs cluster namespace list",
)
class List(AAZCommand):
"""List all Event Hubs Namespace IDs in an Event Hubs Dedicated Cluster.
"""
_aaz_info = {
"version": "2023-01-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.eventhub/clusters/{}/namespaces", "2023-01-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.cluster_name = AAZStrArg(
options=["-n", "--name", "--cluster-name"],
help="The name of the Event Hubs Cluster.",
required=True,
fmt=AAZStrArgFormat(
max_length=50,
min_length=6,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.METHOD_NAME()
self.ClustersListNamespaces(ctx=self.ctx)()
self.post_operations()
@register_callback
def METHOD_NAME(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class ClustersListNamespaces(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.EventHub/clusters/{clusterName}/namespaces",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"clusterName", self.ctx.args.cluster_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-01-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType()
return cls._schema_on_200
class _ListHelper:
"""Helper class for List"""
__all__ = ["List"] | null |
translate points to crop | # Copyright (C) 2020-2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
import numpy as np
import os
import cv2
import torch
from networks.mainnetwork import Network
from dataloaders import helpers
def convert_mask_to_polygon(mask):
contours = None
if int(cv2.__version__.split('.')[0]) > 3:
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)[0]
else:
contours = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_KCOS)[1]
contours = max(contours, key=lambda arr: arr.size)
if contours.shape.count(1):
contours = np.squeeze(contours)
if contours.size < 3 * 2:
raise Exception('Less then three point have been detected. Can not build a polygon.')
polygon = []
for point in contours:
polygon.append([int(point[0]), int(point[1])])
return polygon
class ModelHandler:
def __init__(self):
base_dir = os.environ.get("MODEL_PATH", "/opt/nuclio/iog")
model_path = os.path.join(base_dir, "IOG_PASCAL_SBD.pth")
self.device = torch.device("cpu")
# Number of input channels (RGB + heatmap of IOG points)
self.net = Network(nInputChannels=5, num_classes=1, backbone='resnet101',
output_stride=16, sync_bn=None, freeze_bn=False)
pretrain_dict = torch.load(model_path)
self.net.load_state_dict(pretrain_dict)
self.net.to(self.device)
self.net.eval()
def handle(self, image, bbox, pos_points, neg_points, threshold):
with torch.no_grad():
# extract a crop with padding from the image
crop_padding = 30
crop_bbox = [
max(bbox[0][0] - crop_padding, 0),
max(bbox[0][1] - crop_padding, 0),
min(bbox[1][0] + crop_padding, image.width - 1),
min(bbox[1][1] + crop_padding, image.height - 1)
]
crop_shape = (
int(crop_bbox[2] - crop_bbox[0] + 1), # width
int(crop_bbox[3] - crop_bbox[1] + 1), # height
)
# try to use crop_from_bbox(img, bbox, zero_pad) here
input_crop = np.array(image.crop(crop_bbox)).astype(np.float32)
# resize the crop
input_crop = cv2.resize(input_crop, (512, 512), interpolation=cv2.INTER_NEAREST)
crop_scale = (512 / crop_shape[0], 512 / crop_shape[1])
def METHOD_NAME(points):
points = [
((p[0] - crop_bbox[0]) * crop_scale[0], # x
(p[1] - crop_bbox[1]) * crop_scale[1]) # y
for p in points]
return points
pos_points = METHOD_NAME(pos_points)
neg_points = METHOD_NAME(neg_points)
# Create IOG image
pos_gt = np.zeros(shape=input_crop.shape[:2], dtype=np.float64)
neg_gt = np.zeros(shape=input_crop.shape[:2], dtype=np.float64)
for p in pos_points:
pos_gt = np.maximum(pos_gt, helpers.make_gaussian(pos_gt.shape, center=p))
for p in neg_points:
neg_gt = np.maximum(neg_gt, helpers.make_gaussian(neg_gt.shape, center=p))
iog_image = np.stack((pos_gt, neg_gt), axis=2).astype(dtype=input_crop.dtype)
# Convert iog_image to an image (0-255 values)
cv2.normalize(iog_image, iog_image, 0, 255, cv2.NORM_MINMAX)
# Concatenate input crop and IOG image
input_blob = np.concatenate((input_crop, iog_image), axis=2)
# numpy image: H x W x C
# torch image: C X H X W
input_blob = input_blob.transpose((2, 0, 1))
# batch size is 1
input_blob = np.array([input_blob])
input_tensor = torch.from_numpy(input_blob)
input_tensor = input_tensor.to(self.device)
output_mask = self.net.forward(input_tensor)[4]
output_mask = output_mask.to(self.device)
pred = np.transpose(output_mask.data.numpy()[0, :, :, :], (1, 2, 0))
pred = pred > threshold
pred = np.squeeze(pred)
# Convert a mask to a polygon
pred = np.array(pred, dtype=np.uint8)
pred = cv2.resize(pred, dsize=(crop_shape[0], crop_shape[1]),
interpolation=cv2.INTER_CUBIC)
cv2.normalize(pred, pred, 0, 255, cv2.NORM_MINMAX)
mask = np.zeros((image.height, image.width), dtype=np.uint8)
x = int(crop_bbox[0])
y = int(crop_bbox[1])
mask[y : y + crop_shape[1], x : x + crop_shape[0]] = pred
polygon = convert_mask_to_polygon(mask)
return mask, polygon | null |
test comparison uneven nested | import numpy as np
import pytest
import aesara
from aesara.tensor.type import TensorType
from aesara.typed_list.basic import TypedListVariable
from aesara.typed_list.type import TypedListType
from tests.tensor.utils import random_ranged
class TestTypedListType:
def test_wrong_input_on_creation(self):
# Typed list type should raises an
# error if the argument passed for
# type is not a valid aesara type
with pytest.raises(TypeError):
TypedListType(None)
def test_wrong_input_on_filter(self):
# Typed list type should raises an
# error if the argument given to filter
# isn't of the same type as the one
# specified on creation
# list of matrices
myType = TypedListType(TensorType(aesara.config.floatX, shape=(None, None)))
with pytest.raises(TypeError):
myType.filter([4])
def test_not_a_list_on_filter(self):
# Typed List Value should raises an error
# if no iterable variable is given on input
# list of matrices
myType = TypedListType(TensorType(aesara.config.floatX, shape=(None, None)))
with pytest.raises(TypeError):
myType.filter(4)
def test_type_equality(self):
# Typed list types should only be equal
# when they contains the same aesara
# variables
# list of matrices
myType1 = TypedListType(TensorType(aesara.config.floatX, shape=(None, None)))
# list of matrices
myType2 = TypedListType(TensorType(aesara.config.floatX, shape=(None, None)))
# list of scalars
myType3 = TypedListType(TensorType(aesara.config.floatX, shape=()))
assert myType2 == myType1
assert myType3 != myType1
def test_filter_sanity_check(self):
# Simple test on typed list type filter
myType = TypedListType(TensorType(aesara.config.floatX, shape=(None, None)))
x = random_ranged(-1000, 1000, [100, 100])
assert np.array_equal(myType.filter([x]), [x])
def test_intern_filter(self):
# Test checking if values contained are themselves
# filtered. If they weren't this code would raise
# an exception.
myType = TypedListType(TensorType("float64", shape=(None, None)))
x = np.asarray([[4, 5], [4, 5]], dtype="float32")
assert np.array_equal(myType.filter([x]), [x])
def test_load_alot(self):
myType = TypedListType(TensorType(aesara.config.floatX, shape=(None, None)))
x = random_ranged(-1000, 1000, [10, 10])
testList = []
for i in range(10000):
testList.append(x)
assert np.array_equal(myType.filter(testList), testList)
def test_basic_nested_list(self):
# Testing nested list with one level of depth
myNestedType = TypedListType(
TensorType(aesara.config.floatX, shape=(None, None))
)
myType = TypedListType(myNestedType)
x = random_ranged(-1000, 1000, [100, 100])
assert np.array_equal(myType.filter([[x]]), [[x]])
def test_comparison_different_depth(self):
# Nested list with different depth aren't the same
myNestedType = TypedListType(
TensorType(aesara.config.floatX, shape=(None, None))
)
myNestedType2 = TypedListType(myNestedType)
myNestedType3 = TypedListType(myNestedType2)
assert myNestedType2 != myNestedType3
def test_nested_list_arg(self):
# test for the 'depth' optional argument
myNestedType = TypedListType(
TensorType(aesara.config.floatX, shape=(None, None)), 3
)
myType = TypedListType(TensorType(aesara.config.floatX, shape=(None, None)))
myManualNestedType = TypedListType(TypedListType(TypedListType(myType)))
assert myNestedType == myManualNestedType
def test_get_depth(self):
# test case for get_depth utilitary function
myType = TypedListType(TensorType(aesara.config.floatX, shape=(None, None)))
myManualNestedType = TypedListType(TypedListType(TypedListType(myType)))
assert myManualNestedType.get_depth() == 3
def METHOD_NAME(self):
# test for comparison between uneven nested list
myType = TypedListType(TensorType(aesara.config.floatX, shape=(None, None)))
myManualNestedType1 = TypedListType(TypedListType(TypedListType(myType)))
myManualNestedType2 = TypedListType(TypedListType(myType))
assert myManualNestedType1 != myManualNestedType2
assert myManualNestedType2 != myManualNestedType1
def test_variable_is_Typed_List_variable(self):
mySymbolicVariable = TypedListType(
TensorType(aesara.config.floatX, shape=(None, None))
)()
assert isinstance(mySymbolicVariable, TypedListVariable) | null |
test tasks handles nonexisting theme | import pytest
from ....cache.test import assert_invalidates_cache
from ... import THEME_CACHE
from ..css import change_css_source, get_theme_media_map, rebuild_css
from ..tasks import build_single_theme_css, build_theme_css
@pytest.fixture
def assert_snapshot_match(snapshot, theme):
def _assert_snapshot_match(result):
result = result.replace(theme.dirname, "themedir")
snapshot.assert_match(result)
return _assert_snapshot_match
@pytest.fixture
def media_map(theme, image):
return get_theme_media_map(theme)
def test_tasks_builds_single_css_file(theme, image, css_needing_build):
build_single_theme_css(css_needing_build.pk)
css_needing_build.refresh_from_db()
assert css_needing_build.build_file
def test_tasks_skips_single_css_file_that_doesnt_require_build(theme, css):
build_single_theme_css(css.pk)
css.refresh_from_db()
assert not css.build_file
def test_tasks_handles_nonexisting_css_file(db):
build_single_theme_css(1)
def test_tasks_builds_theme_css_files_that_require_it(theme, image, css_needing_build):
build_theme_css(theme.pk)
css_needing_build.refresh_from_db()
assert css_needing_build.build_file
def test_tasks_skips_theme_css_files_that_dont_require_build(theme, css):
build_theme_css(theme.pk)
css.refresh_from_db()
assert not css.build_file
def METHOD_NAME(nonexisting_theme):
build_theme_css(nonexisting_theme.pk)
def test_media_map_for_theme_without_any_media_files_returns_empty_dict(theme):
assert get_theme_media_map(theme) == {}
def test_media_map_for_theme_with_media_files_returns_dict_with_data(
theme, image, media
):
assert get_theme_media_map(theme)
def test_css_file_is_build(media_map, css_needing_build):
rebuild_css(media_map, css_needing_build)
css_needing_build.refresh_from_db()
assert css_needing_build.build_file
def test_build_css_file_is_hashed(media_map, css_needing_build):
rebuild_css(media_map, css_needing_build)
css_needing_build.refresh_from_db()
assert css_needing_build.build_hash
def test_build_css_file_includes_hash_in_filename(media_map, css_needing_build):
rebuild_css(media_map, css_needing_build)
css_needing_build.refresh_from_db()
assert css_needing_build.build_hash in str(css_needing_build.build_file)
def test_build_css_file_has_size_set(media_map, css_needing_build):
rebuild_css(media_map, css_needing_build)
css_needing_build.refresh_from_db()
assert css_needing_build.size
def test_simple_url_to_file_is_replaced_with_valid_url(
assert_snapshot_match, media_map, image
):
css = ".page-header { background-image: url(%s); }" % image.name
result = change_css_source(media_map, css)
assert_snapshot_match(result)
def test_relative_url_to_file_is_replaced_with_valid_url(
assert_snapshot_match, media_map, image
):
css = ".page-header { background-image: url(./%s); }" % image.name
result = change_css_source(media_map, css)
assert_snapshot_match(result)
def test_url_to_file_from_create_react_app_is_replaced_with_valid_url(
assert_snapshot_match, media_map, image
):
hashed_name = str(image.file).split("/")[-1]
css = ".page-header { background-image: url(/static/media/%s); }" % hashed_name
result = change_css_source(media_map, css)
assert_snapshot_match(result)
def test_quoted_url_to_file_is_replaced_with_valid_url(
assert_snapshot_match, media_map, image
):
css = '.page-header { background-image: url("%s"); }' % image.name
result = change_css_source(media_map, css)
assert_snapshot_match(result)
def test_single_quoted_url_to_file_is_replaced_with_valid_url(
assert_snapshot_match, media_map, image
):
css = ".page-header { background-image: url('%s'); }" % image.name
result = change_css_source(media_map, css)
assert_snapshot_match(result)
def test_absolute_https_url_to_file_is_not_replaced(media_map):
css = ".page-header { background-image: url(https://cdn.example.com/bg.png); }"
result = change_css_source(media_map, css)
assert result == css
def test_absolute_http_url_to_file_is_not_replaced(media_map):
css = ".page-header { background-image: url(http://cdn.example.com/bg.png); }"
result = change_css_source(media_map, css)
assert result == css
def test_absolute_protocol_relative_url_to_file_is_not_replaced(media_map):
css = ".page-header { background-image: url(://cdn.example.com/bg.png); }"
result = change_css_source(media_map, css)
assert result == css
def test_css_file_with_multiple_different_urls_is_correctly_replaced(
assert_snapshot_match, media_map, image
):
css = (
".page-header { background-image: url(http://cdn.example.com/bg.png); }"
'\n.container { background-image: url("%s"); }'
'\n.alert { background-image: url("%s"); }'
) % (image.name, str(image.file).strip("/")[-1])
result = change_css_source(media_map, css)
assert_snapshot_match(result)
def test_building_single_theme_css_invalidates_theme_cache(
theme, image, css_needing_build
):
with assert_invalidates_cache(THEME_CACHE):
build_single_theme_css(css_needing_build.pk)
def test_building_theme_css_invalidates_theme_cache(theme):
with assert_invalidates_cache(THEME_CACHE):
build_theme_css(theme.pk) | null |
visit func arg list | # Generated from /Users/mep/LocalStack/localstack/localstack/services/stepfunctions/asl/antlr/ASLIntrinsicParser.g4 by ANTLR 4.12.0
from antlr4 import *
if __name__ is not None and "." in __name__:
from .ASLIntrinsicParser import ASLIntrinsicParser
else:
from ASLIntrinsicParser import ASLIntrinsicParser
# This class defines a complete generic visitor for a parse tree produced by ASLIntrinsicParser.
class ASLIntrinsicParserVisitor(ParseTreeVisitor):
# Visit a parse tree produced by ASLIntrinsicParser#func_decl.
def visitFunc_decl(self, ctx:ASLIntrinsicParser.Func_declContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#states_func_decl.
def visitStates_func_decl(self, ctx:ASLIntrinsicParser.States_func_declContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#state_fun_name.
def visitState_fun_name(self, ctx:ASLIntrinsicParser.State_fun_nameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#func_arg_list.
def METHOD_NAME(self, ctx:ASLIntrinsicParser.Func_arg_listContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#func_arg_string.
def visitFunc_arg_string(self, ctx:ASLIntrinsicParser.Func_arg_stringContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#func_arg_int.
def visitFunc_arg_int(self, ctx:ASLIntrinsicParser.Func_arg_intContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#func_arg_float.
def visitFunc_arg_float(self, ctx:ASLIntrinsicParser.Func_arg_floatContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#func_arg_bool.
def visitFunc_arg_bool(self, ctx:ASLIntrinsicParser.Func_arg_boolContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#func_arg_json_path.
def visitFunc_arg_json_path(self, ctx:ASLIntrinsicParser.Func_arg_json_pathContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#func_arg_func_decl.
def visitFunc_arg_func_decl(self, ctx:ASLIntrinsicParser.Func_arg_func_declContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#json_path.
def visitJson_path(self, ctx:ASLIntrinsicParser.Json_pathContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#json_path_part.
def visitJson_path_part(self, ctx:ASLIntrinsicParser.Json_path_partContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#json_path_iden.
def visitJson_path_iden(self, ctx:ASLIntrinsicParser.Json_path_idenContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#json_path_iden_qual.
def visitJson_path_iden_qual(self, ctx:ASLIntrinsicParser.Json_path_iden_qualContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#json_path_qual_void.
def visitJson_path_qual_void(self, ctx:ASLIntrinsicParser.Json_path_qual_voidContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#json_path_qual_idx.
def visitJson_path_qual_idx(self, ctx:ASLIntrinsicParser.Json_path_qual_idxContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#json_path_qual_query.
def visitJson_path_qual_query(self, ctx:ASLIntrinsicParser.Json_path_qual_queryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#json_path_query_cmp.
def visitJson_path_query_cmp(self, ctx:ASLIntrinsicParser.Json_path_query_cmpContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#json_path_query_length.
def visitJson_path_query_length(self, ctx:ASLIntrinsicParser.Json_path_query_lengthContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#json_path_query_binary.
def visitJson_path_query_binary(self, ctx:ASLIntrinsicParser.Json_path_query_binaryContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#json_path_query_star.
def visitJson_path_query_star(self, ctx:ASLIntrinsicParser.Json_path_query_starContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by ASLIntrinsicParser#identifier.
def visitIdentifier(self, ctx:ASLIntrinsicParser.IdentifierContext):
return self.visitChildren(ctx)
del ASLIntrinsicParse | null |
test l3 local parameter id | #
# @file TestL3LocalParameter.py
# @brief L3 Local Parameter unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Sarah Keating
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestL3LocalParameter.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
def isnan(x):
return (x != x)
pass
class TestL3LocalParameter(unittest.TestCase):
global P
P = None
def setUp(self):
self.P = libsbml.LocalParameter(3,1)
if (self.P == None):
pass
pass
def tearDown(self):
_dummyList = [ self.P ]; _dummyList[:] = []; del _dummyList
pass
def test_L3_LocalParameter_NS(self):
self.assertTrue( self.P.getNamespaces() != None )
self.assertTrue( self.P.getNamespaces().getLength() == 1 )
self.assertTrue(( "http://www.sbml.org/sbml/level3/version1/core" == self.P.getNamespaces().getURI(0) ))
pass
def test_L3_LocalParameter_create(self):
self.assertTrue( self.P.getTypeCode() == libsbml.SBML_LOCAL_PARAMETER )
self.assertTrue( self.P.getMetaId() == "" )
self.assertTrue( self.P.getNotes() == None )
self.assertTrue( self.P.getAnnotation() == None )
self.assertTrue( self.P.getId() == "" )
self.assertTrue( self.P.getName() == "" )
self.assertTrue( self.P.getUnits() == "" )
self.assertEqual( True, isnan(self.P.getValue()) )
self.assertEqual( False, self.P.isSetId() )
self.assertEqual( False, self.P.isSetName() )
self.assertEqual( False, self.P.isSetValue() )
self.assertEqual( False, self.P.isSetUnits() )
pass
def test_L3_LocalParameter_createWithNS(self):
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.sbml.org", "testsbml")
sbmlns = libsbml.SBMLNamespaces(3,1)
sbmlns.addNamespaces(xmlns)
p = libsbml.LocalParameter(sbmlns)
self.assertTrue( p.getTypeCode() == libsbml.SBML_LOCAL_PARAMETER )
self.assertTrue( p.getMetaId() == "" )
self.assertTrue( p.getNotes() == None )
self.assertTrue( p.getAnnotation() == None )
self.assertTrue( p.getLevel() == 3 )
self.assertTrue( p.getVersion() == 1 )
self.assertTrue( p.getNamespaces() != None )
self.assertTrue( p.getNamespaces().getLength() == 2 )
self.assertTrue( p.getId() == "" )
self.assertTrue( p.getName() == "" )
self.assertTrue( p.getUnits() == "" )
self.assertEqual( True, isnan(p.getValue()) )
self.assertEqual( False, p.isSetId() )
self.assertEqual( False, p.isSetName() )
self.assertEqual( False, p.isSetValue() )
self.assertEqual( False, p.isSetUnits() )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
pass
def test_L3_LocalParameter_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_L3_LocalParameter_hasRequiredAttributes(self):
p = libsbml.LocalParameter(3,1)
self.assertEqual( False, p.hasRequiredAttributes() )
p.setId( "id")
self.assertEqual( True, p.hasRequiredAttributes() )
_dummyList = [ p ]; _dummyList[:] = []; del _dummyList
pass
def METHOD_NAME(self):
id = "mitochondria";
self.assertEqual( False, self.P.isSetId() )
self.P.setId(id)
self.assertTrue(( id == self.P.getId() ))
self.assertEqual( True, self.P.isSetId() )
if (self.P.getId() == id):
pass
pass
def test_L3_LocalParameter_name(self):
name = "My_Favorite_Factory";
self.assertEqual( False, self.P.isSetName() )
self.P.setName(name)
self.assertTrue(( name == self.P.getName() ))
self.assertEqual( True, self.P.isSetName() )
if (self.P.getName() == name):
pass
self.P.unsetName()
self.assertEqual( False, self.P.isSetName() )
if (self.P.getName() != None):
pass
pass
def test_L3_LocalParameter_units(self):
units = "volume";
self.assertEqual( False, self.P.isSetUnits() )
self.P.setUnits(units)
self.assertTrue(( units == self.P.getUnits() ))
self.assertEqual( True, self.P.isSetUnits() )
if (self.P.getUnits() == units):
pass
self.P.unsetUnits()
self.assertEqual( False, self.P.isSetUnits() )
if (self.P.getUnits() != None):
pass
pass
def test_L3_LocalParameter_value(self):
self.assertEqual( False, self.P.isSetValue() )
self.assertEqual( True, isnan(self.P.getValue()) )
self.P.setValue(1.5)
self.assertEqual( True, self.P.isSetValue() )
self.assertTrue( self.P.getValue() == 1.5 )
self.P.unsetValue()
self.assertEqual( False, self.P.isSetValue() )
self.assertEqual( True, isnan(self.P.getValue()) )
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestL3LocalParameter))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
| null |
get class | # ../memory/__init__.py
"""Provides binary/memory based functionality."""
# =============================================================================
# >> IMPORTS
# =============================================================================
# Python
import inspect
# Source.Python
# Loggers
from loggers import _sp_logger
# =============================================================================
# >> FORWARD IMPORTS
# =============================================================================
# Source.Python
# core
from core import AutoUnload
# memory
from _memory import BinaryFile
from _memory import CallingConvention
from _memory import CLASS_INFO
from _memory import Convention
from _memory import DataType
from _memory import EXPOSED_CLASSES
from _memory import Function
from _memory import FunctionInfo
from _memory import NULL
from _memory import Pointer
from _memory import ProcessorRegister
from _memory import Register
from _memory import Registers
from _memory import StackData
from _memory import TYPE_SIZES
from _memory import alloc
from _memory import find_binary
from _memory import get_data_type_size
from _memory import get_object_pointer
from _memory import get_size
from _memory import make_object
# =============================================================================
# >> ALL DECLARATION
# =============================================================================
__all__ = ('BinaryFile',
'CLASS_INFO',
'Callback',
'CallingConvention',
'Convention',
'DataType',
'EXPOSED_CLASSES',
'Function',
'FunctionInfo',
'NULL',
'Pointer',
'ProcessorRegister',
'Register',
'Registers',
'StackData',
'TYPE_SIZES',
'alloc',
'find_binary',
'get_class',
'get_class_info',
'get_class_name',
'get_data_type_size',
'get_function_info',
'get_object_pointer',
'get_size',
'get_virtual_function',
'make_object'
)
# =============================================================================
# >> GLOBAL VARIABLES
# =============================================================================
# Get the sp.memory logger
memory_logger = _sp_logger.memory
# =============================================================================
# >> CLASSES
# =============================================================================
class Callback(AutoUnload, Function):
"""Decorator to create a function in memory to call a Python callback."""
def __init__(self, convention, arg_types, return_type):
"""Initialize the Callback object.
:param Convention|CallingConvention convention: Calling convention
that should be used for this callback.
:param iterable arg_types: Argument types of the callback.
:param return_type: Return type of the callback.
"""
self.callback = None
# Allocate enough space for a jump, so we can hook it later. Then
# convert it to a function. Of course, this isn't a function, but the
# hook will override it.
super().__init__(
alloc(8, False).address, convention, arg_types, return_type)
self.add_pre_hook(self._hook)
def _hook(self, args):
"""Call the callback and get the return value."""
return_value = self.callback(args)
if return_value is not None:
return return_value
if self.return_type == DataType.VOID:
return 0
# We will crash now :(
raise ValueError('Return value is not allowed to be None.')
def __call__(self, *args, **kw):
"""Store the given callback on the first call.
All further calls will call the created callback function.
"""
if self.callback is None:
assert callable(args[0])
self.callback = args[0]
return self
return super().__call__(*args, **kw)
def _unload_instance(self):
"""Remove the hook, restore the allocated space and deallocate it."""
self._delete_hook()
self.dealloc()
# =============================================================================
# >> FUNCTIONS
# =============================================================================
def get_virtual_function(obj, function_name, function_index=0):
"""Return a :class:`Function` object.
Create the :class:`Function` object by using
a :class:`FunctionInfo` object.
:param obj: An object of an exposed class.
:param str function_name: See :func:`get_function_info`.
:param int function_index: See :func:`get_function_info`.
:raise ValueError: See :func:`get_class_name`.
"""
return get_object_pointer(obj).make_virtual_function(
get_function_info(obj, function_name, function_index))
def get_function_info(cls, function_name, function_index=0):
"""Return the :class:`FunctionInfo` object of a member function.
:param str cls: See :func:`get_class_info`.
:param str function_name: The name of the member function on the C++ side.
:param int function_index: The index of the member function in the
function info list. This is only required if the function is
overloaded and you want to get a different FunctionInfo object than
the first one.
:raise ValueError: See :func:`get_class_name`.
"""
return get_class_info(cls)[function_name][function_index]
def get_class_info(cls):
"""Return the class info dictionary of a class.
:param str cls: A string that defines the name of the class on the C++
side or an exposed class or an object of an exposed class.
:raise ValueError: See :func:`get_class_name`.
"""
if isinstance(cls, str):
return CLASS_INFO[cls]
if not inspect.isclass(cls):
cls = cls.__class__
return get_class_info(get_class_name(cls))
def get_class_name(cls):
"""Return the name of a class or class object on the C++ side.
:param cls: A class or class object.
:raise ValueError: Raised if the class was not exposed by Source.Python.
"""
if not inspect.isclass(cls):
cls = cls.__class__
for name, possible_cls in EXPOSED_CLASSES.items():
if cls is possible_cls:
return name
for base_class in cls.__bases__:
try:
class_name = get_class_name(base_class)
except ValueError:
continue
else:
return class_name
raise ValueError('Given class was not exposed.')
def METHOD_NAME(classname):
"""Return the class of an exposed class by its C++ class name.
:param str classname: The name of the exposed class on the C++ side.
:raise KeyError: Raised if the `classname` is not the name of an exposed
class.
"""
return EXPOSED_CLASSES[classname] | null |
test read stdout | """
unit tests for the script engine
"""
import contextlib
import logging
import signal
import sys
import pytest
import salt.engines.script as script
from salt.exceptions import CommandExecutionError
from tests.support.mock import Mock, patch
log = logging.getLogger(__name__)
def _check_skip(grains):
if grains["os"] == "MacOS":
return True
return False
pytestmark = [
pytest.mark.skip_initial_gh_actions_failure(skip=_check_skip),
]
@pytest.fixture
def configure_loader_modules(master_opts):
return {script: {"__opts__": master_opts}}
def test__get_serializer():
"""
Test known serializer is returned or exception is raised
if unknown serializer
"""
for serializers in ("json", "yaml", "msgpack"):
assert script._get_serializer(serializers)
with pytest.raises(CommandExecutionError):
script._get_serializer("bad")
def METHOD_NAME():
"""
Test we can yield stdout
"""
with patch("subprocess.Popen") as popen_mock:
popen_mock.stdout.readline.return_value = "test"
assert next(script._read_stdout(popen_mock)) == "test"
def test__read_stdout_terminates_properly():
"""
Test that _read_stdout terminates with the sentinel
"""
with patch("subprocess.Popen") as popen_mock:
popen_mock.stdout.readline.return_value = b""
with pytest.raises(StopIteration):
next(script._read_stdout(popen_mock))
@pytest.fixture()
def serializer():
with patch("salt.engines.script._get_serializer", autospec=True) as get_serializer:
serializer = Mock()
get_serializer.return_value = serializer
serializer.deserialize.side_effect = lambda x: x
yield serializer
@pytest.fixture()
def event_send():
event = Mock()
with patch("salt.utils.event.get_master_event", autospec=True) as get_master:
get_master.return_value.fire_event = event
with patch.dict(script.__salt__, {"event.send": event}):
yield event
@pytest.fixture()
def raw_event():
with patch("salt.engines.script._read_stdout", autospec=True) as stdout:
yield stdout
@pytest.fixture()
def proc():
with patch("subprocess.Popen", autospec=True) as popen:
proc = Mock()
proc.wait.return_value = False
proc.pid = 1337
popen.return_value = proc
yield
@pytest.fixture()
def event():
return {"tag": "test", "data": {"foo": "bar", "id": "test"}}
@pytest.fixture()
def new_tag():
return {"tag": "testnew", "data": {"foo": "bar", "id": "test"}}
@pytest.fixture()
def new_event():
return {"tag": "test", "data": {"foo": "baz", "id": "test"}}
@pytest.fixture
def timeout():
"""
This fixture was proposed by waynew to allow testing
an otherwise infinite loop.
Once https://github.com/saltstack/salt/pull/62910 is merged,
this can be migrated.
"""
if sys.platform.startswith("win"):
pytest.skip("SIGALRM is not available on Windows.")
def handler(num, frame):
raise TimeoutError()
@contextlib.contextmanager
def _timeout(t=1):
signal.signal(signal.SIGALRM, handler)
signal.alarm(t)
try:
yield _timeout
except TimeoutError:
pass
finally:
signal.alarm(0)
return _timeout
@pytest.mark.usefixtures("proc", "serializer", "event_send", "raw_event", "timeout")
class TestStart:
def test_start(self, event, raw_event, event_send, timeout):
raw_event.side_effect = ([event],)
try:
with timeout():
script.start("cmd", interval=1.5)
except StopIteration:
log.warning("Timeout failure")
event_send.assert_called_once_with(tag=event["tag"], data=event["data"])
def test_multiple(self, event, new_event, raw_event, event_send, timeout):
raw_event.side_effect = ([event, new_event],)
try:
with timeout():
script.start("cmd", interval=1.5)
except StopIteration:
log.warning("Timeout failure")
assert event_send.call_count == 2
event_send.assert_any_call(tag=event["tag"], data=event["data"])
event_send.assert_any_call(tag=new_event["tag"], data=new_event["data"])
def test_onchange_no_change_no_output(self, event, raw_event, event_send, timeout):
raw_event.side_effect = 110 * ([event],)
try:
with timeout():
script.start("cmd", onchange=True, interval=0.01)
except StopIteration:
log.warning("Timeout failure")
event_send.assert_called_once_with(tag=event["tag"], data=event["data"])
def test_start_onchange_no_change_multiple(
self, event, new_tag, raw_event, event_send, timeout
):
raw_event.side_effect = 110 * ([event, new_tag],)
try:
with timeout():
script.start("cmd", onchange=True, interval=0.01)
except StopIteration:
log.warning("Timeout failure")
assert event_send.call_count == 2
event_send.assert_any_call(tag=event["tag"], data=event["data"])
event_send.assert_any_call(tag=new_tag["tag"], data=new_tag["data"])
def test_start_onchange_with_change(
self, event, new_event, raw_event, event_send, timeout
):
raw_event.side_effect = 50 * [[event]] + 60 * [[new_event]]
try:
with timeout():
script.start("cmd", onchange=True, interval=0.01)
except StopIteration:
log.warning("Timeout failure")
assert event_send.call_count == 2
event_send.assert_any_call(tag=event["tag"], data=event["data"])
event_send.assert_called_with(tag=new_event["tag"], data=new_event["data"])
def test_start_onchange_new_tag(
self, event, new_tag, raw_event, event_send, timeout
):
raw_event.side_effect = 50 * [[event]] + 60 * [[new_tag]]
try:
with timeout():
script.start("cmd", onchange=True, interval=0.01)
except StopIteration:
log.warning("Timeout failure")
event_send.assert_any_call(tag=event["tag"], data=event["data"])
event_send.assert_called_with(tag=new_tag["tag"], data=new_tag["data"]) | null |
setup http manager | from __future__ import annotations
import json
import logging
import os
from abc import abstractmethod
from pathlib import Path
from typing import Any
import urllib3
from checkov.common.util.data_structures_utils import merge_dicts
from checkov.common.util.http_utils import get_user_agent_header
class BaseVCSDAL:
def __init__(self) -> None:
self.api_url = ""
self.graphql_api_url = ""
self.token = "" # nosec
self.current_repository = ""
self.current_branch = ""
self.repo_owner = ""
self.org = ""
self.default_branch_cache: dict[str, Any] = {}
self.request_lib_http = None
self._organization_security = None
self.org_complementary_metadata: dict[str, Any] = {}
self.repo_complementary_metadata: dict[str, Any] = {}
self.http: urllib3.PoolManager | None = None
self.METHOD_NAME(ca_certificate=os.getenv('BC_CA_BUNDLE', None))
self.discover()
self.setup_conf_dir()
@abstractmethod
def discover(self) -> None:
"""
discover parameters from execution context of checkov. usually from env variable
"""
self.default_branch_cache = {}
def METHOD_NAME(self, ca_certificate: str | None = None) -> None:
"""
bridgecrew uses both the urllib3 and requests libraries, while checkov uses the requests library.
:param ca_certificate: an optional CA bundle to be used by both libraries.
"""
if self.http:
return
if ca_certificate:
os.environ['REQUESTS_CA_BUNDLE'] = ca_certificate
try:
parsed_url = urllib3.util.parse_url(os.environ['https_proxy'])
self.http = urllib3.ProxyManager(os.environ['https_proxy'], cert_reqs='REQUIRED',
ca_certs=ca_certificate,
proxy_headers=urllib3.make_headers(proxy_basic_auth=parsed_url.auth)) # type:ignore[no-untyped-call]
except KeyError:
self.http = urllib3.PoolManager(cert_reqs='REQUIRED', ca_certs=ca_certificate)
else:
try:
parsed_url = urllib3.util.parse_url(os.environ['https_proxy'])
self.http = urllib3.ProxyManager(os.environ['https_proxy'],
proxy_headers=urllib3.make_headers(proxy_basic_auth=parsed_url.auth)) # type:ignore[no-untyped-call]
except KeyError:
self.http = urllib3.PoolManager()
def _request(self, endpoint: str, allowed_status_codes: list[int]) -> dict[str, Any] | None:
if allowed_status_codes is None:
allowed_status_codes = [200]
if not self.token:
return None
url_endpoint = f"{self.api_url}/{endpoint}"
try:
headers = self._headers()
if self.http:
request = self.http.request("GET", url_endpoint, headers=headers) # type:ignore[no-untyped-call]
if request.status in allowed_status_codes:
data: dict[str, Any] = json.loads(request.data.decode("utf8"))
if isinstance(data, dict) and 'errors' in data.keys():
return None
return data
except Exception:
logging.debug(f"Query failed to run by returning code of {url_endpoint}", exc_info=True)
return None
@abstractmethod
def _headers(self) -> dict[str, Any]:
return merge_dicts({"Accept": "application/vnd.github.v3+json",
"Authorization": f"token {self.token}"}, get_user_agent_header())
def _graphql_headers(self) -> dict[str, str]:
return {"Authorization": f"bearer {self.token}"}
def _request_graphql(self, query: str, variables: dict[str, Any]) -> Any:
if not self.token:
return
headers = self._graphql_headers()
body = json.dumps({'query': query, 'variables': variables})
try:
if self.http:
request = self.http.request("POST", self.graphql_api_url, body=body, headers=headers) # type:ignore[no-untyped-call]
if request.status == 200:
data = json.loads(request.data.decode("utf8"))
if isinstance(data, dict) and 'errors' in data.keys():
logging.debug("received errors %s", data)
return None
return data
else:
logging.debug("Query failed to run by returning code of {}. {}".format(request.data, query))
except Exception:
logging.debug(f"Query failed {query}", exc_info=True)
@staticmethod
def persist(path: str | Path, conf: dict[str, Any] | list[dict[str, Any]]) -> None:
BaseVCSDAL.ensure_dir(path)
with open(path, "w+", encoding='utf-8') as f:
logging.debug(f"Persisting to {path}")
json.dump(conf, f, ensure_ascii=False, indent=4)
@staticmethod
def ensure_dir(file_path: str | Path) -> None:
if not os.path.exists(file_path):
directory_path = os.path.dirname(file_path)
if not os.path.exists(directory_path):
os.makedirs(directory_path)
@abstractmethod
def setup_conf_dir(self) -> None:
pass | null |
test fulltext index query | import pytest
from resotocore.db import EstimatedSearchCost, EstimatedQueryCostRating
from resotocore.db.arango_query import to_query, query_cost, fulltext_term_combine
from resotocore.db.graphdb import GraphDB
from resotocore.db.model import QueryModel
from resotocore.model.model import Model
from resotocore.query.model import Query, Sort
from resotocore.query.query_parser import parse_query
def test_sort_order_for_synthetic_prop(foo_model: Model, graph_db: GraphDB) -> None:
def check_sort_in_query(q: Query, expected_sort: str) -> None:
query_str, _ = to_query(graph_db, QueryModel(q, foo_model))
assert f"SORT {expected_sort}" in query_str, f"Expected {expected_sort} in {query_str}"
check_sort_in_query(Query.by("foo").add_sort(Sort("reported.age")), "m0.reported.ctime desc")
check_sort_in_query(Query.by("foo").add_sort(Sort("some.age")), "m0.some.age asc")
check_sort_in_query(Query.by("foo").add_sort(Sort("reported.ctime")), "m0.reported.ctime asc")
check_sort_in_query(Query.by("foo").add_sort(Sort("metadata.expired")), "m0.metadata.expired asc")
@pytest.mark.asyncio
async def test_query_cost(foo_model: Model, graph_db: GraphDB) -> None:
async def cost(query_str: str) -> EstimatedSearchCost:
query = parse_query(query_str)
return await query_cost(graph_db, QueryModel(query, foo_model), False)
c1 = await cost("aggregate(sum(1) as count):is(base) sort count asc")
assert c1.full_collection_scan is False
assert c1.rating is EstimatedQueryCostRating.simple
c2 = await cost("is(base) sort count asc")
assert c2.full_collection_scan is False
assert c2.rating is EstimatedQueryCostRating.simple
c3 = await cost("all sort reported.name asc")
assert c3.full_collection_scan is True
assert c3.rating is EstimatedQueryCostRating.complex
c4 = await cost("all {parents: <-[0:]-} sort reported.name asc")
assert c4.full_collection_scan is True
assert c4.rating is EstimatedQueryCostRating.bad
def test_fulltext_term() -> None:
part = parse_query('(a>0 and ("foo" and (b>1 and c>2 and "d")))').parts[0]
ft, remaining = fulltext_term_combine(part.term)
assert str(remaining) == "((b > 1 and c > 2) and a > 0)"
assert str(ft) == '("d" and "foo")'
# there are 2 fulltext terms or combined with something else
ft, remaining = fulltext_term_combine(parse_query('(a>0 and "b") or ("c" and "d")').parts[0].term)
assert ft is None # fulltext index can not be utilized
ft, remaining = fulltext_term_combine(parse_query('a>0 {c: <--} "fulltext"').parts[0].term)
assert ft is None # fulltext index can not be utilized
ft, remaining = fulltext_term_combine(parse_query('a>0 {c: <-- "fulltext" }').parts[0].term)
assert ft is None # fulltext index can not be utilized
ft, remaining = fulltext_term_combine(parse_query('"a" and "b" or "c" and "d"').parts[0].term)
assert str(ft) == '((("a" and "b") or "c") and "d")'
def METHOD_NAME(foo_model: Model, graph_db: GraphDB) -> None:
def query_string(query: str) -> str:
query_str, _ = to_query(graph_db, QueryModel(parse_query(query), foo_model))
return query_str
single_ft_index = (
"LET m0=(FOR ft in search_ns SEARCH ANALYZER(PHRASE(ft.flat, @b0), 'delimited') "
"SORT BM25(ft) DESC RETURN ft) "
'FOR result in m0 RETURN UNSET(result, ["flat"])'
)
assert query_string('"a"') == single_ft_index
assert query_string('"some other fulltext string"') == single_ft_index
# and/or is combined correctly
assert (
"ANALYZER((((PHRASE(ft.flat, @b0)) and (PHRASE(ft.flat, @b1))) or "
"(PHRASE(ft.flat, @b2))) and (PHRASE(ft.flat, @b3)), 'delimited')"
) in query_string('"a" and "b" or "c" and "d"')
def test_ancestors_kind_lookup(foo_model: Model, graph_db: GraphDB) -> None:
# 1234 is coerced to a string
query = "ancestors.account.reported.name==1234"
assert to_query(graph_db, QueryModel(parse_query(query), foo_model))[1] == {"b0": "1234"}
def test_escape_property_path(foo_model: Model, graph_db: GraphDB) -> None:
raw = "metadata.replace.with.filter.sort.bla==true"
query = to_query(graph_db, QueryModel(parse_query(raw), foo_model))[0]
# aql keywords are escaped with backslashes
assert "m0.metadata.`replace`.`with`.`filter`.`sort`.bla" in query
def test_with_query_with_limit(foo_model: Model, graph_db: GraphDB) -> None:
query = "is(foo) with(empty, -->) limit 2"
query_str, _ = to_query(graph_db, QueryModel(parse_query(query), foo_model))
# make sure, there is no limit in the filter statement
assert "LET filter0 = (FOR m0 in `ns` FILTER @b0 IN m0.kinds RETURN m0)" in query_str
# make sure the limit is applied to the with statement
assert "FILTER counter1==1 LIMIT 0, 2 RETURN l0_l0_res" in query_str
def test_context(foo_model: Model, graph_db: GraphDB) -> None:
query = 'is(foo) and nested[*].{name=true and inner[*].{name=true}} and parents[*].{some_int="23"}'
aql, bind_vars = to_query(graph_db, QueryModel(parse_query(query).on_section("reported"), foo_model))
# query unfolds all nested loops
assert aql == (
"LET filter0 = (LET nested_distinct0 = (FOR m0 in `ns` FOR pre0 IN TO_ARRAY(m0.reported.nested) "
"FOR pre1 IN TO_ARRAY(pre0.inner) "
"FOR pre2 IN TO_ARRAY(m0.reported.parents) "
"FILTER ((@b0 IN m0.kinds) and ((pre0.name == @b1) and (pre1.name == @b2))) and (pre2.some_int == @b3) "
"RETURN DISTINCT m0) FOR m1 in nested_distinct0 "
'RETURN m1) FOR result in filter0 RETURN UNSET(result, ["flat"])'
)
# coercing works correctly for context terms
assert bind_vars["b1"] == "true" # true is coerced to a string
assert bind_vars["b2"] == "true" # inner true is coerced to a string
assert bind_vars["b3"] == 23 # 23 is coerced to an int
# fixed index works as well
query = "is(foo) and inner[1].{name=true and inner[0].name==true}"
aql, bind_vars = to_query(graph_db, QueryModel(parse_query(query).on_section("reported"), foo_model))
assert aql == (
"LET filter0 = (FOR m0 in `ns` FILTER (@b0 IN m0.kinds) and "
"((m0.reported.inner[1].name == @b1) and (m0.reported.inner[1].inner[0].name == @b2)) RETURN m0) "
'FOR result in filter0 RETURN UNSET(result, ["flat"])'
)
def test_usage(foo_model: Model, graph_db: GraphDB) -> None:
q, b = to_query(graph_db, QueryModel(parse_query("with_usage(3w, cpu, mem) is(foo)"), foo_model))
assert q == (
"LET filter0 = (FOR m0 in `ns` FILTER @b0 IN m0.kinds RETURN m0)\n"
"let with_usage0 = (\n"
" for r in filter0\n"
" let resource=r\n"
" let resource_usage = first(\n"
" for m in ns_usage\n"
" filter m.at>=@b1 and m.at<=@b2 and m.id==r._key\n"
" collect aggregate cpu_min = MIN(m.v.cpu.min), cpu_avg = AVG(m.v.cpu.avg), cpu_max = MAX(m.v.cpu.max), mem_min = MIN(m.v.mem.min), mem_avg = AVG(m.v.mem.avg), mem_max = MAX(m.v.mem.max), count = sum(1)\n" # noqa: E501
" return {usage:{cpu: {min: cpu_min, avg: cpu_avg, max: cpu_max},mem: {min: mem_min, avg: mem_avg, max: mem_max},entries:count,start:@b3,duration:@b4}}\n" # noqa: E501
" )\n"
" return resource_usage.usage.entries ? merge(resource, resource_usage) : resource\n"
")\n"
' FOR result in with_usage0 RETURN UNSET(result, ["flat"])'
) | null |
convert token to id | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization classes for LayoutXLM model."""
from typing import List, Optional
import sentencepiece as spm
from .. import AddedToken, PretrainedTokenizer
from ..tokenizer_utils import _is_control, _is_punctuation, _is_whitespace
SPIECE_UNDERLINE = "▁"
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"layoutxlm-base-uncased": 514,
# FIXME(wj-Mcat): why this model-name not in the init-configuration
# "layoutxlm-wo-backbone-base-uncased": 514
}
def _is_end_of_word(text):
"""Checks whether the last character in text is one of a punctuation, control or whitespace character."""
last_char = text[-1]
return bool(_is_control(last_char) | _is_punctuation(last_char) | _is_whitespace(last_char))
def _is_start_of_word(text):
"""Checks whether the first character in text is one of a punctuation, control or whitespace character."""
first_char = text[0]
return bool(_is_control(first_char) | _is_punctuation(first_char) | _is_whitespace(first_char))
class LayoutXLMTokenizer(PretrainedTokenizer):
resource_files_names = {"vocab_file": "sentencepiece.bpe.model"}
pretrained_resource_files_map = {
"vocab_file": {
"layoutxlm-base-uncased": "https://bj.bcebos.com/paddlenlp/models/transformers/layoutxlm_base/sentencepiece.bpe.model",
}
}
pretrained_init_configuration = {
"layoutxlm-base-uncased": {"do_lower_case": False},
}
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
SPECIAL_TOKENS_ATTRIBUTES = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
"additional_special_tokens",
]
def __init__(
self,
vocab_file,
bos_token="<s>",
eos_token="</s>",
sep_token="</s>",
cls_token="<s>",
unk_token="<unk>",
pad_token="<pad>",
mask_token="<mask>",
**kwargs
):
mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
self._bos_token = bos_token
self._eos_token = eos_token
self._sep_token = sep_token
self._cls_token = cls_token
self._unk_token = unk_token
self._pad_token = pad_token
self._mask_token = mask_token
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(vocab_file)
self.vocab_file = vocab_file
self.tokens_to_ids = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
self.offset = 1
self.tokens_to_ids["<mask>"] = len(self.sp_model) + self.offset
self.ids_to_tokens = {v: k for k, v in self.tokens_to_ids.items()}
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
) -> List[int]:
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model."
)
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
@property
def vocab_size(self):
return len(self.sp_model) + self.offset + 1 # Add the <mask> token
def get_vocab(self):
vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _tokenize(self, text):
return self.sp_model.EncodeAsPieces(text)
def METHOD_NAME(self, token):
"""Converts a token (str) in an id using the vocab."""
if token in self.tokens_to_ids:
return self.tokens_to_ids[token]
spm_id = self.sp_model.PieceToId(token)
# Need to return unknown token if the SP model returned 0
return spm_id + self.offset if spm_id else self.unk_token_id
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
if index in self.ids_to_tokens:
return self.ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.offset)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (strings for sub-words) in a single string."""
out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
return out_string
def num_special_tokens_to_add(self, pair=False):
token_ids_0 = []
token_ids_1 = []
return len(self.build_inputs_with_special_tokens(token_ids_0, token_ids_1 if pair else None)) | null |
apply | # -----------------------------------------------------------------------------
# BSD 3-Clause License
#
# Copyright (c) 2021-2022, Science and Technology Facilities Council
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
# Authors: R. W. Ford and N. Nobre, STFC Daresbury Lab
'''Module containing a class that provides functionality to transform
a PSyIR MIN or MAX operator to PSyIR code. This could be useful if the
operator is not supported by the back-end or if the performance of the
inline code is better than the intrinsic. This utility transformation
should not be called directly by the user, rather it provides
functionality that can be specialised by MIN and MAX-specific
transformations.
'''
from __future__ import absolute_import
from psyclone.psyir.nodes import BinaryOperation, NaryOperation, Assignment, \
Reference, IfBlock
from psyclone.psyir.symbols import DataSymbol, REAL_TYPE
from psyclone.psyir.transformations.intrinsics.operator2code_trans import \
Operator2CodeTrans
class MinOrMax2CodeTrans(Operator2CodeTrans):
'''Provides a utility transformation from a PSyIR MIN or MAX Operator
node to equivalent code in a PSyIR tree. Validity checks are also
performed (by the parent class). This utility transformation is
not designed to be called directly by the user, rather it should
be specialised to provide MIN or MAX transformations.
The transformation replaces
.. code-block:: python
R = [MIN or MAX](A, B, C ...)
with the following logic:
.. code-block:: python
R = A
if B [< or >] R:
R = B
if C [< or >] R:
R = C
...
'''
def __init__(self):
super(MinOrMax2CodeTrans, self).__init__()
self._classes = (BinaryOperation, NaryOperation)
self._compare_operator = None
def METHOD_NAME(self, node, options=None):
'''Apply this utility transformation to the specified node. This node
must be a MIN or MAX BinaryOperation or NaryOperation. The
operation is converted to equivalent inline code. This is
implemented as a PSyIR transform from:
.. code-block:: python
R = ... [MIN or MAX](A, B, C ...) ...
to:
.. code-block:: python
res = A
tmp = B
IF tmp [< or >] res:
res = tmp
tmp = C
IF tmp [< or >] res:
res = tmp
...
R = ... res ...
where ``A``, ``B``, ``C`` ... could be arbitrarily complex
PSyIR expressions and the ``...`` before and after ``[MIN or
MAX](A, B, C ...)`` can be arbitrary PSyIR code.
This transformation requires the operation node to be a
descendent of an assignment and will raise an exception if
this is not the case.
:param node: a MIN or MAX Binary- or Nary-Operation node.
:type node: :py:class:`psyclone.psyir.nodes.BinaryOperation` or \
:py:class:`psyclone.psyir.nodes.NaryOperation`
:param options: a dictionary with options for transformations.
:type options: Optional[Dict[str, Any]]
'''
# pylint: disable=too-many-locals
self.validate(node)
symbol_table = node.scope.symbol_table
assignment = node.ancestor(Assignment)
# Create a temporary result variable. There is an assumption
# here that the Operator returns a PSyIR real type. This
# might not be what is wanted (e.g. the args might PSyIR
# integers), or there may be errors (arguments are of
# different types) but this can't be checked as we don't have
# appropriate methods to query nodes (see #658).
res_var_symbol = symbol_table.new_symbol(
f"res_{self._operator_name.lower()}",
symbol_type=DataSymbol, datatype=REAL_TYPE)
# Create a temporary variable. Again there is an
# assumption here about the datatype - please see previous
# comment (associated issue #658).
tmp_var_symbol = symbol_table.new_symbol(
f"tmp_{self._operator_name.lower()}",
symbol_type=DataSymbol, datatype=REAL_TYPE)
# Replace operation with a temporary (res_var).
node.replace_with(Reference(res_var_symbol))
# res_var=A (child[0] of node)
lhs = Reference(res_var_symbol)
new_assignment = Assignment.create(lhs, node.children[0].detach())
assignment.parent.children.insert(assignment.position, new_assignment)
# For each of the remaining arguments (B,C...)
for expression in node.pop_all_children():
# tmp_var=(B or C or ...)
lhs = Reference(tmp_var_symbol)
new_assignment = Assignment.create(lhs, expression)
assignment.parent.children.insert(assignment.position,
new_assignment)
# if_condition: tmp_var [< or >] res_var
lhs = Reference(tmp_var_symbol)
rhs = Reference(res_var_symbol)
if_condition = BinaryOperation.create(
self._compare_operator, lhs, rhs)
# then_body: res_var=tmp_var
lhs = Reference(res_var_symbol)
rhs = Reference(tmp_var_symbol)
then_body = [Assignment.create(lhs, rhs)]
# if [if_condition] then [then_body]
if_stmt = IfBlock.create(if_condition, then_body)
assignment.parent.children.insert(assignment.position, if_stmt) | null |
new | # -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) The GTG Team
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""Everything related to saved searches."""
from gi.repository import GObject
from uuid import uuid4, UUID
from typing import Optional
import logging
from lxml.etree import Element, SubElement
from GTG.core.base_store import BaseStore
log = logging.getLogger(__name__)
class SavedSearch(GObject.Object):
"""A saved search."""
__gtype_name__ = 'gtg_SavedSearch'
__slots__ = ['id', 'name', 'query', 'icon', 'children']
def __init__(self, id: UUID, name: str, query: str) -> None:
self.id = id
self.name = name
self.query = query
self.icon = None
self.children = []
self.parent = None
def __str__(self) -> str:
"""String representation."""
return f'Saved Search: {self.name} ({self.id})'
def __repr__(self) -> str:
"""String representation."""
return (f'Saved Search "{self.name}" '
f'with query "{self.query}" and id "{self.id}"')
def __eq__(self, other) -> bool:
"""Equivalence."""
return self.id == other.id
class SavedSearchStore(BaseStore):
"""A list of saved searches."""
__gtype_name__ = 'gtg_SavedSearchStore'
#: Tag to look for in XML
XML_TAG = 'savedSearch'
def __str__(self) -> str:
"""String representation."""
return f'Saved Search Store. Holds {len(self.lookup)} search(es)'
def find(self, name: str) -> Optional[SavedSearch]:
"""Get a saved search by name."""
for search in self.data:
if search.name == name:
return search
def from_xml(self, xml: Element) -> None:
"""Load searches from an LXML element."""
elements = list(xml.iter(self.XML_TAG))
# Do parent searches first
for element in elements:
search_id = element.get('id')
name = element.get('name')
query = element.get('query')
search = SavedSearch(id=search_id, name=name, query=query)
self.add(search)
log.debug('Added %s', search)
for element in elements:
parent_name = element.get('parent')
if parent_name and parent_name != 'search':
tid = element.get('id')
parent = self.find(parent_name)
if parent:
self.parent(tid, parent.id)
log.debug('Added %s as child of %s', element, parent)
def to_xml(self) -> Element:
"""Save searches to an LXML element."""
root = Element('searchlist')
parent_map = {}
for search in self.data:
for child in search.children:
parent_map[child.id] = search.name
for search in self.lookup.values():
element = SubElement(root, self.XML_TAG)
element.set('id', str(search.id))
element.set('name', search.name)
element.set('query', search.query)
try:
element.set('parent', str(parent_map[search.id]))
except KeyError:
# Toplevel search
pass
return root
def METHOD_NAME(self, name: str, query: str, parent: UUID = None) -> SavedSearch:
"""Create a new saved search and add it to the store."""
search_id = uuid4()
search = SavedSearch(id=search_id, name=name, query=query)
if parent:
self.add(search, parent)
else:
self.data.append(search)
self.lookup[search_id] = search
return search | null |
solution for delegated puzzle | """
Pay to delegated puzzle or hidden puzzle
In this puzzle program, the solution must choose either a hidden puzzle or a
delegated puzzle on a given public key.
The given public key is morphed by adding an offset from the hash of the hidden puzzle
and itself, giving a new so-called "synthetic" public key which has the hidden puzzle
hidden inside of it.
If the hidden puzzle path is taken, the hidden puzzle and original public key will be revealed
which proves that it was hidden there in the first place.
This roughly corresponds to bitcoin's taproot.
Note:
p2_delegated_puzzle_or_hidden_puzzle is essentially the "standard coin" in chia.
DEFAULT_HIDDEN_PUZZLE_HASH from this puzzle is used with
calculate_synthetic_secret_key in the wallet's standard pk_to_sk finder.
This is important because it allows sign_coin_spends to function properly via the
following mechanism:
- A 'standard coin' coin exists in the blockchain with some puzzle hash.
- The user's wallet contains a primary sk/pk pair which are used to derive to one
level a set of auxiliary sk/pk pairs which are used for specific coins. These
can be used for signing in AGG_SIG_ME, but the standard coin uses a key further
derived from one of these via calculate_synthetic_secret_key as described in
https://chialisp.com/docs/standard_transaction. Therefore, when a wallet needs
to find a secret key for signing based on a public key, it needs to try repeating
this derivation as well and see if the G1Element (pk) associated with any of the
derived secret keys matches the pk requested by the coin.
- Python code previously appeared which was written like:
delegated_puzzle_solution = Program.to((1, condition_args))
solutions = Program.to([[], delgated_puzzle_solution, []])
In context, delegated_puzzle_solution here is any *chialisp program*, here one
simply quoting a list of conditions, and the following argument is the arguments
to this program, which here are unused. Secondly, the actual arguments to the
p2_delegated_puzzle_or_hidden_puzzle are given. The first argument determines
whether a hidden or revealed puzzle is used. If the puzzle is hidden, then what
is required is a signature given a specific synthetic key since the key cannot be
derived inline without the puzzle. In that case, the first argument is this key.
In most cases, the puzzle will be revealed, and this argument will be the nil object,
() (represented here by an empty python list).
The second and third arguments are a chialisp program and its corresponding
arguments, which will be run inside the standard coin puzzle. This interacts with
sign_coin_spend in that the AGG_SIG_ME condition added by the inner puzzle asks the
surrounding system to provide a signature over the provided program with a synthetic
key whose derivation is within. Any wallets which intend to use standard coins in
this way must try to resolve a public key to a secret key via this derivation.
"""
from __future__ import annotations
import hashlib
from typing import Union
from blspy import G1Element, PrivateKey
from clvm.casts import int_from_bytes
from chia.types.blockchain_format.program import Program
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.wallet.util.curry_and_treehash import calculate_hash_of_quoted_mod_hash, curry_and_treehash
from .load_clvm import load_clvm_maybe_recompile
from .p2_conditions import puzzle_for_conditions
DEFAULT_HIDDEN_PUZZLE = Program.from_bytes(bytes.fromhex("ff0980"))
DEFAULT_HIDDEN_PUZZLE_HASH = DEFAULT_HIDDEN_PUZZLE.get_tree_hash() # this puzzle `(x)` always fails
MOD = load_clvm_maybe_recompile("p2_delegated_puzzle_or_hidden_puzzle.clsp")
QUOTED_MOD_HASH = calculate_hash_of_quoted_mod_hash(MOD.get_tree_hash())
PublicKeyProgram = Union[bytes, Program]
GROUP_ORDER = 0x73EDA753299D7D483339D80809A1D80553BDA402FFFE5BFEFFFFFFFF00000001
def calculate_synthetic_offset(public_key: G1Element, hidden_puzzle_hash: bytes32) -> int:
blob = hashlib.sha256(bytes(public_key) + hidden_puzzle_hash).digest()
offset = int_from_bytes(blob)
offset %= GROUP_ORDER
return offset
def calculate_synthetic_public_key(public_key: G1Element, hidden_puzzle_hash: bytes32) -> G1Element:
synthetic_offset: PrivateKey = PrivateKey.from_bytes(
calculate_synthetic_offset(public_key, hidden_puzzle_hash).to_bytes(32, "big")
)
return public_key + synthetic_offset.get_g1()
def calculate_synthetic_secret_key(secret_key: PrivateKey, hidden_puzzle_hash: bytes32) -> PrivateKey:
secret_exponent = int.from_bytes(bytes(secret_key), "big")
public_key = secret_key.get_g1()
synthetic_offset = calculate_synthetic_offset(public_key, hidden_puzzle_hash)
synthetic_secret_exponent = (secret_exponent + synthetic_offset) % GROUP_ORDER
blob = synthetic_secret_exponent.to_bytes(32, "big")
synthetic_secret_key = PrivateKey.from_bytes(blob)
return synthetic_secret_key
def puzzle_for_synthetic_public_key(synthetic_public_key: G1Element) -> Program:
return MOD.curry(bytes(synthetic_public_key))
def puzzle_hash_for_synthetic_public_key(synthetic_public_key: G1Element) -> bytes32:
public_key_hash = Program.to(bytes(synthetic_public_key)).get_tree_hash()
return curry_and_treehash(QUOTED_MOD_HASH, public_key_hash)
def puzzle_for_public_key_and_hidden_puzzle_hash(public_key: G1Element, hidden_puzzle_hash: bytes32) -> Program:
synthetic_public_key = calculate_synthetic_public_key(public_key, hidden_puzzle_hash)
return puzzle_for_synthetic_public_key(synthetic_public_key)
def puzzle_hash_for_public_key_and_hidden_puzzle_hash(public_key: G1Element, hidden_puzzle_hash: bytes32) -> bytes32:
synthetic_public_key = calculate_synthetic_public_key(public_key, hidden_puzzle_hash)
return puzzle_hash_for_synthetic_public_key(synthetic_public_key)
def puzzle_for_public_key_and_hidden_puzzle(public_key: G1Element, hidden_puzzle: Program) -> Program:
return puzzle_for_public_key_and_hidden_puzzle_hash(public_key, hidden_puzzle.get_tree_hash())
def puzzle_for_pk(public_key: G1Element) -> Program:
return puzzle_for_public_key_and_hidden_puzzle_hash(public_key, DEFAULT_HIDDEN_PUZZLE_HASH)
def puzzle_hash_for_pk(public_key: G1Element) -> bytes32:
return puzzle_hash_for_public_key_and_hidden_puzzle_hash(public_key, DEFAULT_HIDDEN_PUZZLE_HASH)
def METHOD_NAME(delegated_puzzle: Program, solution: Program) -> Program:
return Program.to([[], delegated_puzzle, solution])
def solution_for_hidden_puzzle(
hidden_public_key: G1Element,
hidden_puzzle: Program,
solution_to_hidden_puzzle: Program,
) -> Program:
return Program.to([hidden_public_key, hidden_puzzle, solution_to_hidden_puzzle])
def solution_for_conditions(conditions) -> Program:
delegated_puzzle = puzzle_for_conditions(conditions)
return METHOD_NAME(delegated_puzzle, Program.to(0)) | null |
get name | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
from typing import Optional
import msgpack
from nvflare.fuel.f3.connection import BytesAlike, Connection
from nvflare.fuel.f3.endpoint import Endpoint
from nvflare.fuel.f3.sfm.constants import HandshakeKeys, Types
from nvflare.fuel.f3.sfm.prefix import PREFIX_LEN, Prefix
log = logging.getLogger(__name__)
class SfmConnection:
"""A wrapper of driver connection.
Driver connection deals with frame. This connection handles messages.
The frame format:
.. code-block::
+--------------------------------------------------------+
| length (4 bytes) |
+----------------------------+---------------------------+
| header_len (2) | type (1) | reserved |
+----------------------------+---------------------------+
| flags (2) | app_id (2) |
+----------------------------+---------------------------+
| stream_id (2) | sequence (2) |
+--------------------------------------------------------+
| Headers |
| header_len bytes |
+--------------------------------------------------------+
| |
| Payload |
| (length-header_len-16) bytes |
| |
+--------------------------------------------------------+
"""
def __init__(self, conn: Connection, local_endpoint: Endpoint):
self.conn = conn
self.local_endpoint = local_endpoint
self.sfm_endpoint = None
self.last_activity = 0
self.sequence = 0
self.lock = threading.Lock()
def METHOD_NAME(self) -> str:
return self.conn.name
def next_sequence(self) -> int:
"""Get next sequence number for the connection.
Sequence is used to detect lost frames.
"""
with self.lock:
self.sequence = (self.sequence + 1) & 0xFFFF
return self.sequence
def send_handshake(self, frame_type: int):
"""Send HELLO/READY frame"""
data = {HandshakeKeys.ENDPOINT_NAME: self.local_endpoint.name, HandshakeKeys.TIMESTAMP: time.time()}
if self.local_endpoint.properties:
data.update(self.local_endpoint.properties)
self.send_dict(frame_type, 1, data)
def send_heartbeat(self, frame_type: int, data: Optional[dict] = None):
"""Send Ping or Pong"""
if frame_type not in (Types.PING, Types.PONG):
log.error(f"Heartbeat type must be PING or PONG, not {frame_type}")
return
if not self.sfm_endpoint:
log.error("Trying to send heartbeat before SFM Endpoint is established")
return
stream_id = self.sfm_endpoint.next_stream_id()
self.send_dict(frame_type, stream_id, data)
def send_data(self, app_id: int, stream_id: int, headers: Optional[dict], payload: BytesAlike):
"""Send user data"""
prefix = Prefix(0, 0, Types.DATA, 0, 0, app_id, stream_id, 0)
self.send_frame(prefix, headers, payload)
def send_dict(self, frame_type: int, stream_id: int, data: dict):
"""Send a dict as payload"""
prefix = Prefix(0, 0, frame_type, 0, 0, 0, stream_id, 0)
payload = msgpack.packb(data)
self.send_frame(prefix, None, payload)
def send_frame(self, prefix: Prefix, headers: Optional[dict], payload: Optional[BytesAlike]):
headers_bytes = self.headers_to_bytes(headers)
header_len = len(headers_bytes) if headers_bytes else 0
length = PREFIX_LEN + header_len
if payload:
length += len(payload)
prefix.length = length
prefix.header_len = header_len
prefix.sequence = self.next_sequence()
buffer: bytearray = bytearray(length)
offset = 0
prefix.to_buffer(buffer, offset)
offset += PREFIX_LEN
if headers_bytes:
buffer[offset:] = headers_bytes
offset += header_len
if payload:
buffer[offset:] = payload
log.debug(f"Sending frame: {prefix} on {self.conn}")
# Only one thread can send data on a connection. Otherwise, the frames may interleave.
with self.lock:
self.conn.send_frame(buffer)
@staticmethod
def headers_to_bytes(headers: Optional[dict]) -> Optional[bytes]:
if headers:
return msgpack.packb(headers)
else:
return None | null |
critical cancel log | from copy import copy
from sysexecution.order_stacks.order_stack import missingOrder
from sysexecution.orders.named_order_objects import missing_order
from syscore.genutils import quickTimer
from sysexecution.stack_handler.stackHandlerCore import stackHandlerCore
from sysexecution.orders.list_of_orders import listOfOrders
from sysexecution.orders.broker_orders import brokerOrder
from sysproduction.data.broker import dataBroker
class stackHandlerCancelAndModify(stackHandlerCore):
def cancel_and_confirm_all_broker_orders(
self, log_critical_on_timeout: bool = False, wait_time_seconds: int = 60
):
"""
Try cancelling all our orders
We send the cancellations, and then poll for confirmation
If no cancellation comes, then we may send an email
:param log_critical_on_timeout: if the cancellation doesn't come through in time, log critical error
:param wait_time_seconds: Time after cancellation to give up (and send email)
:return: success or failure
"""
list_of_broker_orders = (
self.try_and_cancel_all_broker_orders_and_return_list_of_orders()
)
list_of_uncancelled_broker_orders = self.are_all_orders_cancelled_after_timeout(
list_of_broker_orders, wait_time_seconds=wait_time_seconds
)
if len(list_of_uncancelled_broker_orders) > 0:
# We don't wait for a confirmation
if log_critical_on_timeout:
self.METHOD_NAME(list_of_uncancelled_broker_orders)
else:
self.log.debug("All orders cancelled okay")
def try_and_cancel_all_broker_orders_and_return_list_of_orders(
self,
) -> listOfOrders:
list_of_broker_order_ids = self.broker_stack.get_list_of_order_ids()
list_of_broker_orders = []
for broker_order_id in list_of_broker_order_ids:
broker_order = self.cancel_broker_order_with_id_and_return_order(
broker_order_id
)
if broker_order is not missing_order:
list_of_broker_orders.append(broker_order)
list_of_broker_orders = listOfOrders(list_of_broker_orders)
return list_of_broker_orders
def cancel_broker_order_with_id_and_return_order(
self, broker_order_id: int
) -> brokerOrder:
broker_order = self.broker_stack.get_order_with_id_from_stack(broker_order_id)
if broker_order is missing_order:
return missing_order
if broker_order.fill_equals_desired_trade():
# no need to cancel
return missing_order
log = broker_order.log_with_attributes(self.log)
log.debug("Cancelling order on stack with broker %s" % str(broker_order))
data_broker = self.data_broker
data_broker.cancel_order_on_stack(broker_order)
return broker_order
def are_all_orders_cancelled_after_timeout(
self, list_of_broker_orders: listOfOrders, wait_time_seconds: int = 60
) -> listOfOrders:
timer = quickTimer(wait_time_seconds)
while timer.unfinished:
list_of_broker_orders = self.list_of_orders_not_yet_cancelled(
list_of_broker_orders
)
if len(list_of_broker_orders) == 0:
break
return list_of_broker_orders
def list_of_orders_not_yet_cancelled(
self, list_of_broker_orders: listOfOrders
) -> listOfOrders:
new_list_of_orders = copy(list_of_broker_orders)
for broker_order in list_of_broker_orders:
# if an order is cancelled, remove from list
try:
order_is_cancelled = self.check_order_cancelled(broker_order)
except missingOrder:
# Maintains previous behavior by assuming an order was cancelled
# when the corresponding IB order is not found
order_is_cancelled = True
if order_is_cancelled:
log = broker_order.log_with_attributes(self.log)
new_list_of_orders.remove(broker_order)
log.debug("Order %s succesfully cancelled" % broker_order)
new_list_of_orders = listOfOrders(new_list_of_orders)
return new_list_of_orders
def check_order_cancelled(self, broker_order: brokerOrder) -> bool:
data_broker = self.data_broker
order_is_cancelled = data_broker.check_order_is_cancelled(broker_order)
return order_is_cancelled
def METHOD_NAME(self, list_of_broker_orders: listOfOrders):
for broker_order in list_of_broker_orders:
log = broker_order.log_with_attributes(self.log)
log.critical(
"Broker order %s could not be cancelled within time limit; might be a position break"
% broker_order
) | null |
test static bss | #!/usr/bin/env python
import subprocess
from subprocess import Popen
import pytest
import stat
import lief
import pathlib
from utils import get_sample, is_linux, is_x86_64
def test_issue_872(tmp_path):
tmp = pathlib.Path(tmp_path)
elf: lief.ELF.Binary = lief.parse(get_sample('ELF/i872_risv.elf'))
payload_sec = elf.get_section(".payload")
offset = payload_sec.offset
new_section = lief.ELF.Section(".new_section")
new_section.virtual_address = 0xa0000000
new_section.add(lief.ELF.SECTION_FLAGS.ALLOC)
new_section.size = 0x1000
new_section.content = [0xa5] * 0x1000
elf.add(new_section)
outpath = tmp / "i872_risv_modified.elf"
elf.write(outpath.as_posix())
modified: lief.ELF.Binary = lief.ELF.parse(outpath.as_posix())
new_offset = modified.get_section(".payload").offset
new_section = modified.get_section(".new_section")
new_segment = modified.segment_from_offset(new_section.offset)
assert offset == new_offset
assert new_section.virtual_address == 0xa0000000
assert new_segment is not None
assert new_segment.virtual_address == 0xa0000000
@pytest.mark.skipif(not (is_linux() and is_x86_64()), reason="incompatible env")
@pytest.mark.parametrize("mode", [
lief.ELF.Binary.PHDR_RELOC.SEGMENT_GAP,
lief.ELF.Binary.PHDR_RELOC.FILE_END,
lief.ELF.Binary.PHDR_RELOC.BSS_END
])
def test_static_musl(tmp_path, mode):
sample = get_sample("ELF/i872_hello_musl.elf")
elf: lief.ELF.Binary = lief.ELF.parse(sample)
elf.relocate_phdr_table(mode)
segment = lief.ELF.Segment()
segment.type = lief.ELF.SEGMENT_TYPES.LOAD
segment.content = [0xcc for _ in range(0x2000)]
elf.add(segment)
outpath = tmp_path / "modified.elf"
elf.write(outpath.as_posix())
outpath.chmod(outpath.stat().st_mode | stat.S_IEXEC)
popen_args = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"universal_newlines": True
}
with Popen([outpath.as_posix()], **popen_args) as proc:
stdout = proc.stdout.read()
assert "Hello World" in stdout, f"Error: {stdout}"
@pytest.mark.skipif(not (is_linux() and is_x86_64()), reason="incompatible env")
@pytest.mark.parametrize("mode", [
lief.ELF.Binary.PHDR_RELOC.SEGMENT_GAP,
lief.ELF.Binary.PHDR_RELOC.FILE_END,
lief.ELF.Binary.PHDR_RELOC.BSS_END
])
def test_static_musl_bss(tmp_path, mode):
sample = get_sample("ELF/i872_hello_musl_bss.elf")
elf: lief.ELF.Binary = lief.ELF.parse(sample)
elf.relocate_phdr_table(mode)
segment = lief.ELF.Segment()
segment.type = lief.ELF.SEGMENT_TYPES.LOAD
segment.content = [0xcc for _ in range(0x2000)]
elf.add(segment)
outpath = tmp_path / "modified.elf"
elf.write(outpath.as_posix())
outpath.chmod(outpath.stat().st_mode | stat.S_IEXEC)
popen_args = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"universal_newlines": True
}
with Popen([outpath.as_posix()], **popen_args) as proc:
stdout = proc.stdout.read()
assert "Hello World" in stdout, f"Error: {stdout}"
@pytest.mark.skipif(not (is_linux() and is_x86_64()), reason="incompatible env")
@pytest.mark.parametrize("mode", [
lief.ELF.Binary.PHDR_RELOC.SEGMENT_GAP,
lief.ELF.Binary.PHDR_RELOC.FILE_END,
lief.ELF.Binary.PHDR_RELOC.BSS_END
])
def test_static(tmp_path, mode):
sample = get_sample("ELF/i872_hello.elf")
elf: lief.ELF.Binary = lief.ELF.parse(sample)
elf.relocate_phdr_table(mode)
segment = lief.ELF.Segment()
segment.type = lief.ELF.SEGMENT_TYPES.LOAD
segment.content = [0xcc for _ in range(0x2000)]
elf.add(segment)
outpath = tmp_path / "modified.elf"
elf.write(outpath.as_posix())
outpath.chmod(outpath.stat().st_mode | stat.S_IEXEC)
popen_args = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"universal_newlines": True
}
with Popen([outpath.as_posix()], **popen_args) as proc:
stdout = proc.stdout.read()
assert "Hello World" in stdout, f"Error: {stdout}"
@pytest.mark.skipif(not (is_linux() and is_x86_64()), reason="incompatible env")
@pytest.mark.parametrize("mode", [
lief.ELF.Binary.PHDR_RELOC.SEGMENT_GAP,
lief.ELF.Binary.PHDR_RELOC.FILE_END,
lief.ELF.Binary.PHDR_RELOC.BSS_END
])
def METHOD_NAME(tmp_path, mode):
sample = get_sample("ELF/i872_hello_bss.elf")
elf: lief.ELF.Binary = lief.ELF.parse(sample)
elf.relocate_phdr_table(mode)
segment = lief.ELF.Segment()
segment.type = lief.ELF.SEGMENT_TYPES.LOAD
segment.content = [0xcc for _ in range(0x2000)]
elf.add(segment)
outpath = tmp_path / "modified.elf"
elf.write(outpath.as_posix())
outpath.chmod(outpath.stat().st_mode | stat.S_IEXEC)
popen_args = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"universal_newlines": True
}
with Popen([outpath.as_posix()], **popen_args) as proc:
stdout = proc.stdout.read()
assert "Hello World" in stdout, f"Error: {stdout}"
@pytest.mark.skipif(not (is_linux() and is_x86_64()), reason="incompatible env")
@pytest.mark.parametrize("mode", [
lief.ELF.Binary.PHDR_RELOC.SEGMENT_GAP,
lief.ELF.Binary.PHDR_RELOC.FILE_END,
lief.ELF.Binary.PHDR_RELOC.BSS_END
])
def test_docker_init(tmp_path, mode):
sample = get_sample("ELF/docker-init.elf")
elf: lief.ELF.Binary = lief.ELF.parse(sample)
elf.relocate_phdr_table(mode)
segment = lief.ELF.Segment()
segment.type = lief.ELF.SEGMENT_TYPES.LOAD
segment.content = [0xcc for _ in range(0x2000)]
elf.add(segment)
outpath = tmp_path / "modified.elf"
elf.write(outpath.as_posix())
outpath.chmod(outpath.stat().st_mode | stat.S_IEXEC)
popen_args = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"universal_newlines": True
}
with Popen([outpath.as_posix(), "--version"], **popen_args) as proc:
stdout = proc.stdout.read()
assert "tini version 0.19.0" in stdout, f"Error: {stdout}" | null |
test pickle3 d | import unittest
import numpy as np
import discretize
from discretize.utils import example_simplex_mesh
import os
import pickle
import matplotlib.pyplot as plt
try:
import vtk # NOQA F401
has_vtk = True
except ImportError:
has_vtk = False
class SimplexTests(unittest.TestCase):
def test_init_errors(self):
bad_nodes = np.array(
[
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[2, 2, 0],
]
)
simplices = np.array([[0, 1, 2, 3]])
with self.assertRaises(ValueError):
discretize.SimplexMesh(bad_nodes, simplices)
good_nodes = np.array(
[
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
]
)
with self.assertRaises(ValueError):
# pass incompatible shaped nodes and simplices
discretize.SimplexMesh(good_nodes, simplices[:, :-1])
with self.assertRaises(ValueError):
# pass bad dimensionality
discretize.SimplexMesh(np.random.rand(10, 4), simplices[:, :-1])
def test_find_containing(self):
n = 4
points, simplices = example_simplex_mesh((n, n))
mesh = discretize.SimplexMesh(points, simplices)
x = np.array([[0.1, 0.2], [0.3, 0.4]])
inds = mesh.point2index(x)
np.testing.assert_equal(inds, [16, 5])
def test_pickle2D(self):
n = 5
points, simplices = discretize.utils.example_simplex_mesh((n, n))
mesh0 = discretize.SimplexMesh(points, simplices)
byte_string = pickle.dumps(mesh0)
mesh1 = pickle.loads(byte_string)
np.testing.assert_equal(mesh0.nodes, mesh1.nodes)
np.testing.assert_equal(mesh0._simplices, mesh1._simplices)
def METHOD_NAME(self):
n = 5
points, simplices = discretize.utils.example_simplex_mesh((n, n, n))
mesh0 = discretize.SimplexMesh(points, simplices)
byte_string = pickle.dumps(mesh0)
mesh1 = pickle.loads(byte_string)
np.testing.assert_equal(mesh0.nodes, mesh1.nodes)
np.testing.assert_equal(mesh0._simplices, mesh1._simplices)
def test_image_plotting(self):
n = 5
points, simplices = discretize.utils.example_simplex_mesh((n, n))
mesh = discretize.SimplexMesh(points, simplices)
cc_dat = np.random.rand(mesh.n_cells)
n_dat = np.random.rand(mesh.n_nodes)
f_dat = np.random.rand(mesh.n_faces)
e_dat = np.random.rand(mesh.n_edges)
ccv_dat = np.random.rand(mesh.n_cells, 2)
mesh.plot_image(cc_dat)
mesh.plot_image(ccv_dat, v_type="CCv", view="vec")
mesh.plot_image(n_dat)
mesh.plot_image(f_dat, v_type="Fx")
mesh.plot_image(f_dat, v_type="Fy")
mesh.plot_image(f_dat, v_type="F")
mesh.plot_image(f_dat, v_type="F", view="vec")
mesh.plot_image(e_dat, v_type="Ex")
mesh.plot_image(e_dat, v_type="Ey")
mesh.plot_image(e_dat, v_type="E")
mesh.plot_image(e_dat, v_type="E", view="vec")
with self.assertRaises(NotImplementedError):
points, simplices = discretize.utils.example_simplex_mesh((n, n, n))
mesh = discretize.SimplexMesh(points, simplices)
cc_dat = np.random.rand(mesh.n_cells)
mesh.plot_image(cc_dat)
plt.close("all")
def test_plot_grid(self):
n = 5
points, simplices = discretize.utils.example_simplex_mesh((n, n))
mesh = discretize.SimplexMesh(points, simplices)
mesh.plot_grid(nodes=True, faces=True, edges=True, centers=True)
points, simplices = discretize.utils.example_simplex_mesh((n, n, n))
mesh = discretize.SimplexMesh(points, simplices)
mesh.plot_grid(nodes=True, faces=True, edges=True, centers=True)
plt.close("all")
if has_vtk:
def test_2D_vtk(self):
n = 5
points, simplices = discretize.utils.example_simplex_mesh((n, n))
mesh = discretize.SimplexMesh(points, simplices)
cc_dat = np.random.rand(mesh.n_cells)
vtk_obj = mesh.to_vtk(models={"info": cc_dat})
mesh2, models = discretize.SimplexMesh.vtk_to_simplex_mesh(vtk_obj)
np.testing.assert_equal(mesh.nodes, mesh2.nodes)
np.testing.assert_equal(mesh._simplices, mesh2._simplices)
np.testing.assert_equal(cc_dat, models["info"])
mesh.write_vtk("test.vtu", models={"info": cc_dat})
mesh2, models = discretize.SimplexMesh.read_vtk("test.vtu")
np.testing.assert_equal(mesh.nodes, mesh2.nodes)
np.testing.assert_equal(mesh._simplices, mesh2._simplices)
np.testing.assert_equal(cc_dat, models["info"])
def test_3D_vtk(self):
n = 5
points, simplices = discretize.utils.example_simplex_mesh((n, n, n))
mesh = discretize.SimplexMesh(points, simplices)
cc_dat = np.random.rand(mesh.n_cells)
vtk_obj = mesh.to_vtk(models={"info": cc_dat})
mesh2, models = discretize.SimplexMesh.vtk_to_simplex_mesh(vtk_obj)
np.testing.assert_equal(mesh.nodes, mesh2.nodes)
np.testing.assert_equal(mesh._simplices, mesh2._simplices)
np.testing.assert_equal(cc_dat, models["info"])
mesh.write_vtk("test.vtu", models={"info": cc_dat})
mesh2, models = discretize.SimplexMesh.read_vtk("test.vtu")
np.testing.assert_equal(mesh.nodes, mesh2.nodes)
np.testing.assert_equal(mesh._simplices, mesh2._simplices)
np.testing.assert_equal(cc_dat, models["info"])
def tearDown(self):
try:
os.remove("test.vtu")
except FileNotFoundError:
pass | null |
process model name | import json
import os
import re
from typing import Dict
import fsspec
import yaml
from coqpit import Coqpit
from TTS.config.shared_configs import *
from TTS.utils.generic_utils import find_module
def read_json_with_comments(json_path):
"""for backward compat."""
# fallback to json
with fsspec.open(json_path, "r", encoding="utf-8") as f:
input_str = f.read()
# handle comments
input_str = re.sub(r"\\\n", "", input_str)
input_str = re.sub(r"//.*\n", "\n", input_str)
data = json.loads(input_str)
return data
def register_config(model_name: str) -> Coqpit:
"""Find the right config for the given model name.
Args:
model_name (str): Model name.
Raises:
ModuleNotFoundError: No matching config for the model name.
Returns:
Coqpit: config class.
"""
config_class = None
config_name = model_name + "_config"
paths = ["TTS.tts.configs", "TTS.vocoder.configs", "TTS.encoder.configs", "TTS.vc.configs"]
for path in paths:
try:
config_class = find_module(path, config_name)
except ModuleNotFoundError:
pass
if config_class is None:
raise ModuleNotFoundError(f" [!] Config for {model_name} cannot be found.")
return config_class
def METHOD_NAME(config_dict: Dict) -> str:
"""Format the model name as expected. It is a band-aid for the old `vocoder` model names.
Args:
config_dict (Dict): A dictionary including the config fields.
Returns:
str: Formatted modelname.
"""
model_name = config_dict["model"] if "model" in config_dict else config_dict["generator_model"]
model_name = model_name.replace("_generator", "").replace("_discriminator", "")
return model_name
def load_config(config_path: str) -> Coqpit:
"""Import `json` or `yaml` files as TTS configs. First, load the input file as a `dict` and check the model name
to find the corresponding Config class. Then initialize the Config.
Args:
config_path (str): path to the config file.
Raises:
TypeError: given config file has an unknown type.
Returns:
Coqpit: TTS config object.
"""
config_dict = {}
ext = os.path.splitext(config_path)[1]
if ext in (".yml", ".yaml"):
with fsspec.open(config_path, "r", encoding="utf-8") as f:
data = yaml.safe_load(f)
elif ext == ".json":
try:
with fsspec.open(config_path, "r", encoding="utf-8") as f:
data = json.load(f)
except json.decoder.JSONDecodeError:
# backwards compat.
data = read_json_with_comments(config_path)
else:
raise TypeError(f" [!] Unknown config file type {ext}")
config_dict.update(data)
model_name = METHOD_NAME(config_dict)
config_class = register_config(model_name.lower())
config = config_class()
config.from_dict(config_dict)
return config
def check_config_and_model_args(config, arg_name, value):
"""Check the give argument in `config.model_args` if exist or in `config` for
the given value.
Return False if the argument does not exist in `config.model_args` or `config`.
This is to patch up the compatibility between models with and without `model_args`.
TODO: Remove this in the future with a unified approach.
"""
if hasattr(config, "model_args"):
if arg_name in config.model_args:
return config.model_args[arg_name] == value
if hasattr(config, arg_name):
return config[arg_name] == value
return False
def get_from_config_or_model_args(config, arg_name):
"""Get the given argument from `config.model_args` if exist or in `config`."""
if hasattr(config, "model_args"):
if arg_name in config.model_args:
return config.model_args[arg_name]
return config[arg_name]
def get_from_config_or_model_args_with_default(config, arg_name, def_val):
"""Get the given argument from `config.model_args` if exist or in `config`."""
if hasattr(config, "model_args"):
if arg_name in config.model_args:
return config.model_args[arg_name]
if hasattr(config, arg_name):
return config[arg_name]
return def_val | null |
test query assertions | import unittest
from stix_shifter.stix_translation import stix_translation
translation = stix_translation.StixTranslation()
MODULE = 'reversinglabs'
def METHOD_NAME(query, queries):
assert isinstance(query, dict) is True
assert 'queries' in query
assert query['queries'] == [queries]
class TestReversingLabsStixToQuery(unittest.TestCase, object):
@staticmethod
def get_query_translation_result(stix_pattern, options={}):
return translation.translate(MODULE, 'query', MODULE, stix_pattern, options)
def test_ipv4_query(self):
stix_pattern = "[ipv4-addr:value='194.147.78.155']"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': '194.147.78.155', 'dataType': 'ip'}"
METHOD_NAME(query, queries)
def test_ipv6_query(self):
stix_pattern = "[ipv6-addr:value = '3001:0:0:0:0:0:0:2']"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': '3001:0:0:0:0:0:0:2', 'dataType': 'ip'}"
METHOD_NAME(query, queries)
def test_multi_ipv4_expression_query(self):
stix_pattern = "([ipv4-addr:value = '194.147.78.155'] OR [ipv4-addr:value = '198.51.100.10'])"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': '198.51.100.10', 'dataType': 'ip'}"
METHOD_NAME(query, queries)
def test_url_query(self):
stix_pattern = "[url:value='https://test.com']"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': 'https://test.com', 'dataType': 'url'}"
METHOD_NAME(query, queries)
def test_NOT_and_not_equals_operators(self):
search_string1 = "www.example.com"
search_string2 = "www.example.ca"
stix_pattern = "[url:value != '{}' OR url:value NOT = '{}']".format(search_string1, search_string2)
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': 'www.example.com', 'dataType': 'url'}"
METHOD_NAME(query, queries)
def test_file_hash_query(self):
stix_pattern = "[file:hashes.'SHA-1'='D5DD920BE5BCFEB904E95DA4B6D0CCCA0727D692']"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': 'D5DD920BE5BCFEB904E95DA4B6D0CCCA0727D692', 'dataType': 'hash'}"
METHOD_NAME(query, queries)
def test_file_hash_md5_query(self):
stix_pattern = "[file:hashes.'MD5'='16cda323189d8eba4248c0a2f5ad0d8f']"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': '16cda323189d8eba4248c0a2f5ad0d8f', 'dataType': 'hash'}"
METHOD_NAME(query, queries)
def test_generic_filehash_query(self):
stix_pattern = "[file:hashes.'SHA-256' = 'd7fc5162511d42d22462ad5b4c716b73903a677806119f9ad0314763ccd719ca']"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': 'd7fc5162511d42d22462ad5b4c716b73903a677806119f9ad0314763ccd719ca', 'dataType': 'hash'}"
METHOD_NAME(query, queries)
def test_domain_query(self):
stix_pattern = "[domain-name:value='test.com']"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': 'test.com', 'dataType': 'domain'}"
METHOD_NAME(query, queries)
def test_multi_expression_query(self):
stix_pattern = "[domain-name:value='test.com' OR ipv4-addr:value='194.147.78.155']"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': 'test.com', 'dataType': 'domain'}"
METHOD_NAME(query, queries)
def test_not_comp_exp(self):
"""
Test with NOT operator
:return:
"""
stix_pattern = "[ipv4-addr:value NOT = '172.31.60.104'] START t'2020-05-01T08:43:10.003Z' " \
"STOP t'2020-10-30T10:43:10.003Z'"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': '172.31.60.104', 'dataType': 'ip'}"
METHOD_NAME(query, queries)
def test_in_comp_exp(self):
"""
Test with IN operator
"""
stix_pattern = "[ipv4-addr:value IN ('172.31.60.104','94.147.78.155')]"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': '(172.31.60.104 OR 94.147.78.155)', 'dataType': 'ip'}"
METHOD_NAME(query, queries)
def test_one_obser_is_super_set_operator_network(self):
"""
to test single observation with an un-supported operator
"""
stix_pattern = "[ipv4-addr:value ISSUPERSET '172.217.0.0/24'] " \
"START t'2019-04-10T08:43:10.003Z' STOP t'2019-04-23T10:43:10.003Z'"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
assert query['success'] is False
assert query['code'] == 'mapping_error'
def test_like_comp_exp(self):
"""
Test with LIKE operator
"""
stix_pattern = "[ipv4-addr:value LIKE '172.31.60.104'] START t'2020-10-01T08:43:10.003Z' " \
"STOP t'2020-10-30T10:43:10.003Z'"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': '%172.31.60.104%', 'dataType': 'ip'}"
METHOD_NAME(query, queries)
def test_matches_comp_exp(self):
"""
Test with MATCHES operator
:return:
"""
stix_pattern = "[ipv4-addr:value MATCHES '\\\\d+'] START t'2020-10-01T08:43:10.003Z' STOP " \
"t'2020-10-30T10:43:10.003Z'"
query = TestReversingLabsStixToQuery.get_query_translation_result(stix_pattern)
queries = "{'data': '.*\\\\\\\\d+.*', 'dataType': 'ip'}"
METHOD_NAME(query, queries) | null |
slack | # coding=utf-8
#####################################################
# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
#####################################################
# noqa: E128,E201
from ..client import BaseClient
from ..client import createApiClient
from ..client import config
from ..client import createTemporaryCredentials
from ..client import createSession
_defaultConfig = config
class Notify(BaseClient):
"""
The notification service listens for tasks with associated notifications
and handles requests to send emails and post pulse messages.
"""
classOptions = {
}
serviceName = 'notify'
apiVersion = 'v1'
def ping(self, *args, **kwargs):
"""
Ping Server
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
def lbheartbeat(self, *args, **kwargs):
"""
Load Balancer Heartbeat
Respond without doing anything.
This endpoint is used to check that the service is up.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["lbheartbeat"], *args, **kwargs)
def version(self, *args, **kwargs):
"""
Taskcluster Version
Respond with the JSON version object.
https://github.com/mozilla-services/Dockerflow/blob/main/docs/version_object.md
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["version"], *args, **kwargs)
def email(self, *args, **kwargs):
"""
Send an Email
Send an email to `address`. The content is markdown and will be rendered
to HTML, but both the HTML and raw markdown text will be sent in the
email. If a link is included, it will be rendered to a nice button in the
HTML version of the email
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["email"], *args, **kwargs)
def pulse(self, *args, **kwargs):
"""
Publish a Pulse Message
Publish a message on pulse with the given `routingKey`.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["pulse"], *args, **kwargs)
def matrix(self, *args, **kwargs):
"""
Post Matrix Message
Post a message to a room in Matrix. Optionally includes formatted message.
The `roomId` in the scopes is a fully formed `roomId` with leading `!` such
as `!foo:bar.com`.
Note that the matrix client used by taskcluster must be invited to a room before
it can post there!
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["matrix"], *args, **kwargs)
def METHOD_NAME(self, *args, **kwargs):
"""
Post Slack Message
Post a message to a Slack channel.
The `channelId` in the scopes is a Slack channel ID, starting with a capital C.
The Slack app can post into public channels by default but will need to be added
to private channels before it can post messages there.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["slack"], *args, **kwargs)
def addDenylistAddress(self, *args, **kwargs):
"""
Denylist Given Address
Add the given address to the notification denylist. Addresses in the denylist will be ignored
by the notification service.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["addDenylistAddress"], *args, **kwargs)
def deleteDenylistAddress(self, *args, **kwargs):
"""
Delete Denylisted Address
Delete the specified address from the notification denylist.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["deleteDenylistAddress"], *args, **kwargs)
def listDenylist(self, *args, **kwargs):
"""
List Denylisted Notifications
Lists all the denylisted addresses.
By default this end-point will try to return up to 1000 addresses in one
request. But it **may return less**, even if more tasks are available.
It may also return a `continuationToken` even though there are no more
results. However, you can only be sure to have seen all results if you
keep calling `list` with the last `continuationToken` until you
get a result without a `continuationToken`.
If you are not interested in listing all the members at once, you may
use the query-string option `limit` to return fewer.
This method is ``experimental``
"""
return self._makeApiCall(self.funcinfo["listDenylist"], *args, **kwargs)
def heartbeat(self, *args, **kwargs):
"""
Heartbeat
Respond with a service heartbeat.
This endpoint is used to check on backing services this service
depends on.
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["heartbeat"], *args, **kwargs)
funcinfo = {
"addDenylistAddress": {
'args': [],
'input': 'v1/notification-address.json#',
'method': 'post',
'name': 'addDenylistAddress',
'route': '/denylist/add',
'stability': 'experimental',
},
"deleteDenylistAddress": {
'args': [],
'input': 'v1/notification-address.json#',
'method': 'delete',
'name': 'deleteDenylistAddress',
'route': '/denylist/delete',
'stability': 'experimental',
},
"email": {
'args': [],
'input': 'v1/email-request.json#',
'method': 'post',
'name': 'email',
'route': '/email',
'stability': 'experimental',
},
"heartbeat": {
'args': [],
'method': 'get',
'name': 'heartbeat',
'route': '/__heartbeat__',
'stability': 'stable',
},
"lbheartbeat": {
'args': [],
'method': 'get',
'name': 'lbheartbeat',
'route': '/__lbheartbeat__',
'stability': 'stable',
},
"listDenylist": {
'args': [],
'method': 'get',
'name': 'listDenylist',
'output': 'v1/notification-address-list.json#',
'query': ['continuationToken', 'limit'],
'route': '/denylist/list',
'stability': 'experimental',
},
"matrix": {
'args': [],
'input': 'v1/matrix-request.json#',
'method': 'post',
'name': 'matrix',
'route': '/matrix',
'stability': 'experimental',
},
"ping": {
'args': [],
'method': 'get',
'name': 'ping',
'route': '/ping',
'stability': 'stable',
},
"pulse": {
'args': [],
'input': 'v1/pulse-request.json#',
'method': 'post',
'name': 'pulse',
'route': '/pulse',
'stability': 'experimental',
},
"slack": {
'args': [],
'input': 'v1/slack-request.json#',
'method': 'post',
'name': 'slack',
'route': '/slack',
'stability': 'experimental',
},
"version": {
'args': [],
'method': 'get',
'name': 'version',
'route': '/__version__',
'stability': 'stable',
},
}
__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Notify'] | null |
install | """AsyncIO support for zmq
Requires asyncio and Python 3.
"""
# Copyright (c) PyZMQ Developers.
# Distributed under the terms of the Modified BSD License.
import asyncio
import selectors
import sys
import warnings
from asyncio import Future, SelectorEventLoop
from weakref import WeakKeyDictionary
import zmq as _zmq
from zmq import _future
# registry of asyncio loop : selector thread
_selectors: WeakKeyDictionary = WeakKeyDictionary()
class ProactorSelectorThreadWarning(RuntimeWarning):
"""Warning class for notifying about the extra thread spawned by tornado
We automatically support proactor via tornado's AddThreadSelectorEventLoop"""
def _get_selector_windows(
asyncio_loop,
) -> asyncio.AbstractEventLoop:
"""Get selector-compatible loop
Returns an object with ``add_reader`` family of methods,
either the loop itself or a SelectorThread instance.
Workaround Windows proactor removal of
*reader methods, which we need for zmq sockets.
"""
if asyncio_loop in _selectors:
return _selectors[asyncio_loop]
# detect add_reader instead of checking for proactor?
if hasattr(asyncio, "ProactorEventLoop") and isinstance(
asyncio_loop, asyncio.ProactorEventLoop # type: ignore
):
try:
from tornado.platform.asyncio import AddThreadSelectorEventLoop
except ImportError:
raise RuntimeError(
"Proactor event loop does not implement add_reader family of methods required for zmq."
" zmq will work with proactor if tornado >= 6.1 can be found."
" Use `asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())`"
" or install 'tornado>=6.1' to avoid this error."
)
warnings.warn(
"Proactor event loop does not implement add_reader family of methods required for zmq."
" Registering an additional selector thread for add_reader support via tornado."
" Use `asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())`"
" to avoid this warning.",
RuntimeWarning,
# stacklevel 5 matches most likely zmq.asyncio.Context().socket()
stacklevel=5,
)
selector_loop = _selectors[asyncio_loop] = AddThreadSelectorEventLoop(asyncio_loop) # type: ignore
# patch loop.close to also close the selector thread
loop_close = asyncio_loop.close
def _close_selector_and_loop():
# restore original before calling selector.close,
# which in turn calls eventloop.close!
asyncio_loop.close = loop_close
_selectors.pop(asyncio_loop, None)
selector_loop.close()
asyncio_loop.close = _close_selector_and_loop # type: ignore # mypy bug - assign a function to method
return selector_loop
else:
return asyncio_loop
def _get_selector_noop(loop) -> asyncio.AbstractEventLoop:
"""no-op on non-Windows"""
return loop
if sys.platform == "win32":
_get_selector = _get_selector_windows
else:
_get_selector = _get_selector_noop
class _AsyncIO:
_Future = Future
_WRITE = selectors.EVENT_WRITE
_READ = selectors.EVENT_READ
def _default_loop(self):
if sys.version_info >= (3, 7):
try:
return asyncio.get_running_loop()
except RuntimeError:
warnings.warn(
"No running event loop. zmq.asyncio should be used from within an asyncio loop.",
RuntimeWarning,
stacklevel=4,
)
# get_event_loop deprecated in 3.10:
return asyncio.get_event_loop()
class Poller(_AsyncIO, _future._AsyncPoller):
"""Poller returning asyncio.Future for poll results."""
def _watch_raw_socket(self, loop, socket, evt, f):
"""Schedule callback for a raw socket"""
selector = _get_selector(loop)
if evt & self._READ:
selector.add_reader(socket, lambda *args: f())
if evt & self._WRITE:
selector.add_writer(socket, lambda *args: f())
def _unwatch_raw_sockets(self, loop, *sockets):
"""Unschedule callback for a raw socket"""
selector = _get_selector(loop)
for socket in sockets:
selector.remove_reader(socket)
selector.remove_writer(socket)
class Socket(_AsyncIO, _future._AsyncSocket):
"""Socket returning asyncio Futures for send/recv/poll methods."""
_poller_class = Poller
def _get_selector(self, io_loop=None):
if io_loop is None:
io_loop = self._get_loop()
return _get_selector(io_loop)
def _init_io_state(self, io_loop=None):
"""initialize the ioloop event handler"""
self._get_selector(io_loop).add_reader(
self._fd, lambda: self._handle_events(0, 0)
)
def _clear_io_state(self):
"""clear any ioloop event handler
called once at close
"""
loop = self._current_loop
if loop and not loop.is_closed() and self._fd != -1:
self._get_selector(loop).remove_reader(self._fd)
Poller._socket_class = Socket
class Context(_zmq.Context[Socket]):
"""Context for creating asyncio-compatible Sockets"""
_socket_class = Socket
# avoid sharing instance with base Context class
_instance = None
class ZMQEventLoop(SelectorEventLoop):
"""DEPRECATED: AsyncIO eventloop using zmq_poll.
pyzmq sockets should work with any asyncio event loop as of pyzmq 17.
"""
def __init__(self, selector=None):
_deprecated()
return super().__init__(selector)
_loop = None
def _deprecated():
if _deprecated.called: # type: ignore
return
_deprecated.called = True # type: ignore
warnings.warn(
"ZMQEventLoop and zmq.asyncio.install are deprecated in pyzmq 17. Special eventloop integration is no longer needed.",
DeprecationWarning,
stacklevel=3,
)
_deprecated.called = False # type: ignore
def METHOD_NAME():
"""DEPRECATED: No longer needed in pyzmq 17"""
_deprecated()
__all__ = [
"Context",
"Socket",
"Poller",
"ZMQEventLoop",
"install",
] | null |
report | #!/usr/bin/env python3
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import collections
import argparse
import multiprocessing
import mooseutils
# List of available languages and an associated function for testing if a filename is that language
LANGUAGES = collections.OrderedDict()
LANGUAGES['C++'] = lambda f: f.endswith(('.C', '.h'))
LANGUAGES['Python'] = lambda f: f.endswith('.py')
LANGUAGES['Input'] = lambda f: f.endswith(('.i', '.hit'))
LANGUAGES['Markdown'] = lambda f: f.endswith('.md')
LANGUAGES['Make'] = lambda f: f.endswith(('Makefile', '.mk'))
LANGUAGES['YAML'] = lambda f: f.endswith('.yml')
def get_options():
"""Return the command-line options"""
parser = argparse.ArgumentParser(description='Tool for listing author line counts.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('locations', nargs='*', type=str, default=[mooseutils.git_root_dir()],
help='The repository directory to consider.')
parser.add_argument('-j', '--num-threads', type=int, default=os.cpu_count(),
help="The number of threads to use for computing the counts.")
parser.add_argument('--exclude', nargs=1, type=str, default='contrib',
help="Exclude pattern passed to git ls-files call.")
parser.add_argument('-l', '--languages', nargs='+', type=str, choices=list(LANGUAGES.keys()),
default=list(LANGUAGES.keys()),
help="Limit the analysis the the listed languages.")
return parser.parse_args()
def target(filename):
"""Helper for counting the lines, by author of the given filename"""
return mooseutils.git_lines(filename)
def update_count(c, lang, counts):
"""
Add the counts from to the total count
Input:
c[dict]: Local counts with authors as keys, returned from 'target' function
lang[str]: The language key that the 'c' count dict is associated
counts[dict of dict]: The global count by author, then language
"""
for key, value in c.items():
counts[key][lang] += value
def METHOD_NAME(counts, commits, merges):
"""
Prints the global count in a table on the screen
"""
titles = list(list(counts.values())[0].keys()) + ['Total', 'Commits', 'Merges']
row_format = '{:>25}'
row_format += "{:>10}" * (len(titles))
n = 25 + 10 * len(titles)
totals = {k:0 for k in titles}
print('-'*n)
print(row_format.format("Name", *titles))
print('-'*n)
for author, row in reversed(sorted(counts.items(), key=lambda item:sum(item[1].values()))):
row['Total'] = sum(row.values())
values = ['{:,}'.format(row[key]) for key in titles if key not in ('Commits', 'Merges')]
c = commits.get(author, 0)
m = merges.get(author, 0)
values += [c, m]
row['Commits'] = c
row['Merges'] = m
for key in titles:
totals[key] += row[key]
print(row_format.format(author, *values))
print('-'*n)
print(row_format.format('TOTAL', *['{:,}'.format(totals[key]) for key in titles]))
if __name__ == '__main__':
args = get_options()
# Populate desired langauges
lang = collections.OrderedDict()
for key in args.languages:
lang[key] = LANGUAGES[key]
# List all files in the repository
all_files = set()
for location in args.locations:
all_files.update(mooseutils.git_ls_files(os.path.abspath(args.locations[0]), exclude=args.exclude))
# Group filenames by extension
groups = collections.defaultdict(list)
for filename in all_files:
for key, func in lang.items():
if func(filename):
groups[key].append(filename)
# Report author counts by file type
counts = collections.defaultdict(lambda: {g:0 for g in lang.keys()})
for group, files in groups.items():
print('Counting {} lines...'.format(group), end='')
with multiprocessing.Pool(processes=args.num_threads) as pool:
for c in pool.imap_unordered(target, files):
update_count(c, group, counts)
print('done')
# Compute number of commits per user
commits = dict()
merges = dict()
for location in args.locations:
commits.update(mooseutils.git_committers(location, '--no-merges'))
merges.update(mooseutils.git_committers(location, '--merges'))
METHOD_NAME(counts, commits, merges) | null |
get map of attnum and table oid | import warnings
from sqlalchemy import and_, asc, cast, select, text, exists, Identity
from db.columns.exceptions import DynamicDefaultWarning
from db.connection import execute_msar_func_with_engine
from db.tables.operations.select import reflect_table_from_oid
from db.utils import execute_statement, get_pg_catalog_table
def get_column_attnum_from_names_as_map(table_oid, column_names, engine, metadata, connection_to_use=None):
statement = _get_columns_attnum_from_names(table_oid, column_names, engine, metadata=metadata)
attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()
name_attnum_map = {attnum_tuple['attname']: attnum_tuple['attnum'] for attnum_tuple in attnums_tuple}
return name_attnum_map
def get_columns_attnum_from_names(table_oid, column_names, engine, metadata, connection_to_use=None):
"""
Returns the respective list of attnum of the column names passed.
The order is based on the column order in the table and not by the order of the column names argument.
"""
statement = _get_columns_attnum_from_names(table_oid, column_names, engine=engine, metadata=metadata)
attnums_tuple = execute_statement(engine, statement, connection_to_use).fetchall()
attnums = [attnum_tuple[0] for attnum_tuple in attnums_tuple]
return attnums
def get_column_attnum_from_name(table_oid, column_name, engine, metadata, connection_to_use=None):
statement = _get_columns_attnum_from_names(table_oid, [column_name], engine=engine, metadata=metadata)
return execute_statement(engine, statement, connection_to_use).scalar()
def _get_columns_attnum_from_names(table_oid, column_names, engine, metadata):
pg_attribute = get_pg_catalog_table("pg_attribute", engine=engine, metadata=metadata)
sel = select(pg_attribute.c.attnum, pg_attribute.c.attname).where(
and_(
pg_attribute.c.attrelid == table_oid,
pg_attribute.c.attname.in_(column_names)
)
).order_by(asc(pg_attribute.c.attnum))
return sel
def get_column_attnums_from_tables(table_oids, engine, metadata, connection_to_use=None):
pg_attribute = get_pg_catalog_table("pg_attribute", engine, metadata=metadata)
sel = select(pg_attribute.c.attnum, pg_attribute.c.attrelid.label('table_oid')).where(
and_(
pg_attribute.c.attrelid.in_(table_oids),
# Ignore system columns
pg_attribute.c.attnum > 0,
# Ignore removed columns
pg_attribute.c.attisdropped.is_(False)
)
)
results = execute_statement(engine, sel, connection_to_use).fetchall()
return results
def METHOD_NAME(table_oids, engine, metadata, connection_to_use=None):
"""
Order determined by the column order in the table.
"""
triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(
table_oids, None, engine, metadata, connection_to_use
)
return {
(attnum, table_oid): column_name
for column_name, attnum, table_oid
in triples_of_col_info
}
def get_column_names_from_attnums(table_oid, attnums, engine, metadata, connection_to_use=None):
return list(get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use).values())
def get_map_of_attnum_to_column_name(table_oid, attnums, engine, metadata, connection_to_use=None):
"""
Order determined by the column order in the table.
"""
triples_of_col_info = _get_triples_of_column_name_and_attnum_and_table_oid(
[table_oid], attnums, engine, metadata, connection_to_use
)
return {
attnum: column_name
for column_name, attnum, _
in triples_of_col_info
}
def _get_triples_of_column_name_and_attnum_and_table_oid(
table_oids, attnums, engine, metadata, connection_to_use
):
statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(
table_oids, attnums, engine, metadata
)
return execute_statement(engine, statement, connection_to_use).fetchall()
def get_column_default(table_oid, attnum, engine, metadata, connection_to_use=None):
default_dict = get_column_default_dict(
table_oid,
attnum,
engine,
metadata=metadata,
connection_to_use=connection_to_use,
)
if default_dict is not None:
return default_dict['value']
def get_column_default_dict(table_oid, attnum, engine, metadata, connection_to_use=None):
column = get_column_from_oid_and_attnum(
table_oid=table_oid,
attnum=attnum,
engine=engine,
metadata=metadata,
connection_to_use=connection_to_use,
)
default = column.server_default
if default is None:
return
is_dynamic = execute_msar_func_with_engine(
engine, 'is_default_possibly_dynamic', table_oid, attnum
).fetchone()[0]
sql_text = str(default.arg) if not isinstance(default, Identity) else 'identity'
if is_dynamic:
warnings.warn(
"Dynamic column defaults are read only", DynamicDefaultWarning
)
default_value = sql_text
else:
# Defaults are often stored as text with SQL casts appended
# Ex: "'test default string'::character varying" or "'2020-01-01'::date"
# Here, we execute the cast to get the proper python value
default_value = execute_statement(
engine,
select(cast(text(sql_text), column.type)),
connection_to_use
).scalar()
return {"value": default_value, "is_dynamic": is_dynamic}
def determine_whether_column_contains_data(
table_oid, column_name, engine, metadata, connection_to_use=None
):
"""
Given a column, return True if it contains data, False otherwise.
"""
sa_table = reflect_table_from_oid(
table_oid, engine, metadata=metadata, connection_to_use=connection_to_use,
)
sel = select(exists(1).where(sa_table.columns[column_name] != None)) # noqa
contains_data = execute_statement(engine, sel, connection_to_use).scalar()
return contains_data
def get_column_from_oid_and_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):
sa_table = reflect_table_from_oid(table_oid, engine, metadata=metadata, connection_to_use=connection_to_use)
column_name = get_column_name_from_attnum(table_oid, attnum, engine, metadata=metadata, connection_to_use=connection_to_use)
sa_column = sa_table.columns[column_name]
return sa_column
def get_column_name_from_attnum(table_oid, attnum, engine, metadata, connection_to_use=None):
statement = _statement_for_triples_of_column_name_and_attnum_and_table_oid(
[table_oid], [attnum], engine, metadata=metadata,
)
column_name = execute_statement(engine, statement, connection_to_use).scalar()
return column_name
def _statement_for_triples_of_column_name_and_attnum_and_table_oid(
table_oids, attnums, engine, metadata
):
"""
Returns (column name, column attnum, column table's oid) tuples for each column that's in the
tables specified via `table_oids`, and, when `attnums` is not None, that has an attnum
specified in `attnums`.
The order is based on the column order in the table and not on the order of the arguments.
"""
pg_attribute = get_pg_catalog_table("pg_attribute", engine, metadata=metadata)
sel = select(pg_attribute.c.attname, pg_attribute.c.attnum, pg_attribute.c.attrelid)
wasnt_dropped = pg_attribute.c.attisdropped.is_(False)
table_oid_matches = pg_attribute.c.attrelid.in_(table_oids)
conditions = [wasnt_dropped, table_oid_matches]
if attnums is not None:
attnum_matches = pg_attribute.c.attnum.in_(attnums)
conditions.append(attnum_matches)
else:
attnum_positive = pg_attribute.c.attnum > 0
conditions.append(attnum_positive)
sel = sel.where(and_(*conditions))
return sel | null |
simple write csv | import concurrent.futures
import csv
import os
from pathlib import Path
import pandas as pd
from typing import List
from typing import Dict
from utils import df_wide_to_long
from utils import iso3_to_iso2
from utils import make_dir
from utils import write_to_csv
def METHOD_NAME(
output_dir: str = None,
name: str = None,
data: List[Dict] | Dict = None,
mode: str = "w",
extension: str = "csv") -> None:
if isinstance(data, dict):
data = [data]
with open(f"{output_dir}/{name}.{extension}", mode=mode) as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=data[0].keys())
writer.writeheader()
writer.writerows(data)
if __name__ == "__main__":
# where to create tables
outputDir = "../data/processed/PIK_PRIMAPv2.4"
outputDir = os.path.abspath(outputDir)
make_dir(path=Path(outputDir).as_posix())
# PRIMPAP dataset
fl = '../data/raw/PIK_PRIMAP-hist/Guetschow-et-al-2022-PRIMAP-hist_v2.4_no_extrap_11-Oct-2022.csv'
fl = os.path.abspath(fl)
# ------------------------------------------
# Publisher table
# ------------------------------------------
publisherDict = {
'id': 'PRIMAP',
'name': 'Potsdam Realtime Integrated Model for probabilistic Assessment of emissions Path',
'URL': 'https://www.pik-potsdam.de/paris-reality-check/primap-hist/'
}
METHOD_NAME(
output_dir=outputDir,
name='Publisher',
data=publisherDict,
mode='w'
)
# ------------------------------------------
# DataSource table
# ------------------------------------------
datasourceDict = {
'datasource_id': 'PRIMAP:10.5281/zenodo.7179775:v2.4',
'name': 'PRIMAP-hist CR v2.4',
'publisher': 'PRIMAP',
'published': '2022-10-17',
'URL': 'https://doi.org/10.5281/zenodo.7179775',
'citation': 'Gütschow, J., & Pflüger, M. (2022). The PRIMAP-hist national historical emissions time series v2.4 (1750-2021). Zenodo. doi:10.5281/zenodo.7179775'
}
METHOD_NAME(
output_dir=outputDir,
name='DataSource',
data=datasourceDict,
mode='w'
)
# ------------------------------------------
# EmissionsAgg table
# ------------------------------------------
# create emissionsAgg table
df_pri = pd.read_csv(fl)
# set values
entity = 'KYOTOGHG (AR4GWP100)'
category = 'M.0.EL'
scenario = 'HISTCR'
# filter PRIMAP dataset
filt = (
(df_pri['entity'] == entity) &
(df_pri['category (IPCC2006_PRIMAP)'] == category) &
(df_pri['scenario (PRIMAP-hist)'] == scenario)
)
# filtered dataset
df_pri = df_pri.loc[filt]
# get ISO data
with concurrent.futures.ProcessPoolExecutor(max_workers=8) as executor:
results = [executor.submit(iso3_to_iso2, name, return_input=True)
for name in list(set(df_pri['area (ISO3)']))]
data = [f.result() for f in concurrent.futures.as_completed(results)]
# return ISO as dataframe
df_iso = pd.DataFrame(data, columns=['iso3', 'iso2'])
# merge datasets
df_merged = pd.merge(df_pri, df_iso,
left_on=['area (ISO3)'],
right_on=["iso3"],
how="left")
# filter out ISO3 code ANT (netherland antilles)
# The Netherlands Antilles dissolved on October 10, 2010
filt = ~(df_merged['area (ISO3)'] == 'ANT')
df_merged = df_merged.loc[filt]
# convert from wide to long dataframe
df_long = df_wide_to_long(df=df_merged,
value_name="emissions",
var_name="year")
# drop custom iso codes, this is not totally necessary
isoCodesToDrop = [
'EARTH',
'ANNEXI',
'NONANNEXI',
'AOSIS',
'BASIC',
'EU27BX',
'LDC',
'UMBRELLA',
'ANT',
]
filt = ~(df_long['area (ISO3)'].isin(isoCodesToDrop))
df_long = df_long.loc[filt]
# filter nan emissions
filt = ~df_long['emissions'].isna()
df_long = df_long.loc[filt]
# rename columns
df_long = df_long.rename(columns={'iso2': 'actor_id'})
def gigagram_to_metric_ton(val):
''' 1 gigagram = 1000 tonnes '''
return val * 1000
# create id columns
df_long['datasource_id'] = datasourceDict['datasource_id']
df_long['emissions_id'] = df_long.apply(lambda row:
f"{row['source']}:{row['actor_id']}:{row['year']}",
axis=1)
# convert emissions to metric tons
df_long['total_emissions'] = df_long['emissions'].apply(
gigagram_to_metric_ton)
# Create EmissionsAgg table
emissionsAggColumns = [
"emissions_id",
"actor_id",
"year",
"total_emissions",
"datasource_id"
]
df_emissionsAgg = df_long[emissionsAggColumns]
# ensure columns have correct types
df_emissionsAgg = df_emissionsAgg.astype({'emissions_id': str,
'actor_id': str,
'year': int,
'total_emissions': int,
'datasource_id': str})
# sort by actor_id and year
df_emissionsAgg = df_emissionsAgg.sort_values(by=['actor_id', 'year'])
# save to csv
df_emissionsAgg.drop_duplicates().to_csv(
f'{outputDir}/EmissionsAgg.csv', index=False)
# =================================================================
# Tags and DataSourceTags
# =================================================================
# dictionary of tag_id : tag_name
tagDict = {
"GHGs_included_CO2_CH4_N2O_F_gases": "GHGs included: CO2, CH4, N2O, and F-gases",
"sectors_energy_IPPU_ag_waste_other": "Sectors: energy, IPPU, agriculture, waste, and other",
"excludes_LULUCF":"Excludes LULUCF",
"GWP_100_SAR_and_AR4": "Uses GWP100 from IPCC SAR and AR4, depending on the gas",
"Excludes_international_aviation_shipping":"Excludes emissions from international aviation and shipping",
'combined_datasets': 'Combined datasets',
'country_or_3rd_party': 'Country-reported data or third party',
'peer_reviewed': 'Peer reviewed',
}
tagDictList = [{"tag_id": key, "tag_name": value} for key, value in tagDict.items()]
METHOD_NAME(outputDir, "Tag", tagDictList)
dataSourceTagDictList = [
{"datasource_id": datasourceDict["datasource_id"], "tag_id": tag["tag_id"]}
for tag in tagDictList
]
METHOD_NAME(outputDir, "DataSourceTag", dataSourceTagDictList)
# ------------------------------------------
# DataSourceQuality table
# ------------------------------------------
DataSourceQualityList = [{
"datasource_id": datasourceDict['datasource_id'],
"score_type": "GHG target",
"score": 0.8,
"notes": "Long time series. not sure if 2021 values are correct. data for all countries. not country reported data"
}]
METHOD_NAME(outputDir, "DataSourceQuality", DataSourceQualityList | null |
test update weights | import os
import unittest
import torch
import torchvision
from packaging.version import Version
import neural_compressor.adaptor.pytorch as nc_torch
from neural_compressor import PostTrainingQuantConfig, quantization
from neural_compressor.adaptor.torch_utils.model_wrapper import WeightOnlyLinear
from neural_compressor.model import MODELS
from neural_compressor.model import Model as INCModel
from neural_compressor.model.torch_model import PyTorchModel
try:
import intel_pytorch_extension as ipex
TEST_IPEX = True
except:
TEST_IPEX = False
PT_VERSION = nc_torch.get_torch_version()
if PT_VERSION >= Version("1.8.0-rc1"):
FX_MODE = True
else:
FX_MODE = False
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
self.fc1 = torch.nn.Linear(30, 40)
self.fc2 = torch.nn.Linear(40, 30)
self.fc3 = torch.nn.Linear(30, 5)
def forward(self, x):
out = self.fc1(x)
out = self.fc2(out)
out = self.fc3(out)
return out
class TestPytorchModel(unittest.TestCase):
framework = "pytorch"
model = torchvision.models.quantization.resnet18()
lpot_model = MODELS["pytorch"](model)
def test_Model(self):
model = torchvision.models.quantization.resnet18()
inc_model = INCModel(model)
self.assertTrue(isinstance(inc_model, PyTorchModel))
def test_get_all_weight_name(self):
assert len(list(self.lpot_model.get_all_weight_names())) == 62
def test_get_weight(self):
for name, param in self.model.named_parameters():
if name == "layer4.1.conv2.weight":
param.data.fill_(0.0)
if name == "fc.bias":
param.data.fill_(0.1)
assert int(torch.sum(self.lpot_model.get_weight("layer4.1.conv2.weight"))) == 0
assert torch.allclose(torch.sum(torch.tensor(self.lpot_model.get_weight("fc.bias"))), torch.tensor(100.0))
def test_get_input(self):
model = MODELS["pytorch"](torchvision.models.quantization.resnet18())
model.model.eval().fuse_model()
model.register_forward_pre_hook()
rand_input = torch.rand(100, 3, 256, 256).float()
model.model(rand_input)
assert torch.equal(model.get_inputs("x"), rand_input)
model.remove_hooks()
def METHOD_NAME(self):
self.lpot_model.update_weights("fc.bias", torch.zeros([1000]))
assert int(torch.sum(self.lpot_model.get_weight("fc.bias"))) == 0
def test_gradient(self):
with self.assertRaises(AssertionError):
self.lpot_model.get_gradient("fc.bias")
shape = None
for name, tensor in self.lpot_model._model.named_parameters():
if name == "fc.bias":
shape = tensor.shape
tensor.grad = torch.randn(shape)
break
new_grad = torch.zeros(shape)
self.lpot_model.update_gradient("fc.bias", new_grad)
assert torch.equal(torch.tensor(self.lpot_model.get_gradient("fc.bias")), torch.zeros(shape))
rand_input = torch.rand(100, 3, 256, 256).float()
rand_input.grad = torch.ones_like(rand_input)
assert torch.equal(torch.tensor(self.lpot_model.get_gradient(rand_input)), torch.ones_like(rand_input))
def test_report_sparsity(self):
df, total_sparsity = self.lpot_model.report_sparsity()
self.assertTrue(total_sparsity > 0)
self.assertTrue(len(df) == 22)
def test_WeightOnlyLinear(self):
model = Model()
input = torch.randn(1, 30)
conf = PostTrainingQuantConfig(
approach="weight_only",
)
q_model = quantization.fit(model, conf)
out1 = q_model(input)
q_model.save("saved")
model_size1 = os.path.getsize("saved/best_model.pt") / 1024
print("FP32 Model size:{:.3f}M".format(model_size1))
# test compress_bits = [8, 16, 32, 64]
compression_dtype = [torch.int8, torch.int16, torch.int32, torch.int64]
for dtype in compression_dtype:
new_model = Model()
inc_model = INCModel(new_model)
inc_model.export_compressed_model(
qweight_config_path="saved/qconfig.json",
compression_dtype=dtype,
)
out2 = q_model(input)
torch.save(inc_model.state_dict(), "saved/tmp.pt")
model_size2 = os.path.getsize("saved/tmp.pt") / 1024
print("WeightOnlyLinear Model size:{:.3f}M".format(model_size2))
self.assertTrue(isinstance(inc_model.model.fc1, WeightOnlyLinear))
self.assertTrue(inc_model.model.fc1.packed_weight.dtype == dtype)
self.assertTrue(inc_model.model.fc1.scale.dtype == torch.float32)
self.assertTrue(model_size1 / model_size2 > 2)
self.assertTrue(torch.all(torch.isclose(out1, out2, atol=5e-1)))
# test compress_bits = [8, 16, 32, 64]
compress_dims = [0, 1]
for dim in compress_dims:
new_model = Model()
inc_model = INCModel(new_model)
inc_model.export_compressed_model(
qweight_config_path="saved/qconfig.json",
compression_dim=dim,
)
out2 = q_model(input)
torch.save(inc_model.state_dict(), "saved/tmp.pt")
model_size2 = os.path.getsize("saved/tmp.pt") / 1024
print("WeightOnlyLinear Model size:{:.3f}M".format(model_size2))
self.assertTrue(isinstance(inc_model.model.fc1, WeightOnlyLinear))
if dim == 1:
self.assertTrue(inc_model.model.fc1.packed_weight.shape[0] == inc_model.model.fc1.out_features)
else:
self.assertTrue(inc_model.model.fc1.packed_weight.shape[1] == inc_model.model.fc1.in_features)
self.assertTrue(model_size1 / model_size2 > 2)
self.assertTrue(torch.all(torch.isclose(out1, out2, atol=5e-1)))
# test half dtype
new_model = Model()
inc_model = INCModel(new_model)
inc_model.export_compressed_model(
qweight_config_path="saved/qconfig.json",
scale_dtype=torch.float16,
)
out2 = q_model(input)
torch.save(inc_model.state_dict(), "saved/tmp.pt")
model_size2 = os.path.getsize("saved/tmp.pt") / 1024
print("WeightOnlyLinear Model size:{:.3f}M".format(model_size2))
self.assertTrue(isinstance(inc_model.model.fc1, WeightOnlyLinear))
self.assertTrue(inc_model.model.fc1.scale.dtype == torch.float16)
self.assertTrue(model_size1 / model_size2 > 2)
self.assertTrue(torch.all(torch.isclose(out1, out2, atol=5e-1)))
if __name__ == "__main__":
unittest.main() | null |
test release failure | # SPDX-FileCopyrightText: Red Hat, Inc.
# SPDX-License-Identifier: GPL-2.0-or-later
from __future__ import absolute_import
from __future__ import division
from storage.storagetestlib import FakeGuardedLock
from testlib import VdsmTestCase
from vdsm.storage import guarded
class InjectedFailure(Exception):
pass
class TestContext(VdsmTestCase):
def test_empty(self):
with guarded.context([]):
pass
def test_one_vol(self):
log = []
locks = [
FakeGuardedLock('01_dom', 'dom', 'mode', log),
FakeGuardedLock('02_img', 'img', 'mode', log),
FakeGuardedLock('03_vol', 'vol', 'mode', log)]
expected = [
('acquire', '01_dom', 'dom', 'mode'),
('acquire', '02_img', 'img', 'mode'),
('acquire', '03_vol', 'vol', 'mode'),
('release', '03_vol', 'vol', 'mode'),
('release', '02_img', 'img', 'mode'),
('release', '01_dom', 'dom', 'mode')]
with guarded.context(locks):
self.assertEqual(expected[:3], log)
self.assertEqual(expected, log)
def test_two_vols_different_domains(self):
log = []
locks = [
FakeGuardedLock('01_dom', 'dom1', 'mode', log),
FakeGuardedLock('02_img', 'img1', 'mode', log),
FakeGuardedLock('03_vol', 'vol1', 'mode', log),
FakeGuardedLock('01_dom', 'dom2', 'mode', log),
FakeGuardedLock('02_img', 'img2', 'mode', log),
FakeGuardedLock('03_vol', 'vol2', 'mode', log)]
expected = [
('acquire', '01_dom', 'dom1', 'mode'),
('acquire', '01_dom', 'dom2', 'mode'),
('acquire', '02_img', 'img1', 'mode'),
('acquire', '02_img', 'img2', 'mode'),
('acquire', '03_vol', 'vol1', 'mode'),
('acquire', '03_vol', 'vol2', 'mode'),
('release', '03_vol', 'vol2', 'mode'),
('release', '03_vol', 'vol1', 'mode'),
('release', '02_img', 'img2', 'mode'),
('release', '02_img', 'img1', 'mode'),
('release', '01_dom', 'dom2', 'mode'),
('release', '01_dom', 'dom1', 'mode')]
with guarded.context(locks):
self.assertEqual(expected[:6], log)
self.assertEqual(expected, log)
def test_two_vols_same_image(self):
log = []
locks = [
FakeGuardedLock('01_dom', 'dom1', 'mode', log),
FakeGuardedLock('02_img', 'img1', 'mode', log),
FakeGuardedLock('03_vol', 'vol1', 'mode', log),
FakeGuardedLock('01_dom', 'dom1', 'mode', log),
FakeGuardedLock('02_img', 'img1', 'mode', log),
FakeGuardedLock('03_vol', 'vol2', 'mode', log)]
expected = [
('acquire', '01_dom', 'dom1', 'mode'),
('acquire', '02_img', 'img1', 'mode'),
('acquire', '03_vol', 'vol1', 'mode'),
('acquire', '03_vol', 'vol2', 'mode'),
('release', '03_vol', 'vol2', 'mode'),
('release', '03_vol', 'vol1', 'mode'),
('release', '02_img', 'img1', 'mode'),
('release', '01_dom', 'dom1', 'mode')]
with guarded.context(locks):
self.assertEqual(expected[:4], log)
self.assertEqual(expected, log)
def test_acquire_failure(self):
log = []
locks = [
FakeGuardedLock('01_dom', 'dom1', 'mode', log),
FakeGuardedLock('02_img', 'img1', 'mode', log),
FakeGuardedLock('03_vol', 'vol1', 'mode', log,
acquire=InjectedFailure)]
expected = [
('acquire', '01_dom', 'dom1', 'mode'),
('acquire', '02_img', 'img1', 'mode'),
('release', '02_img', 'img1', 'mode'),
('release', '01_dom', 'dom1', 'mode')]
with self.assertRaises(InjectedFailure):
with guarded.context(locks):
pass
self.assertEqual(expected, log)
def test_aquire_failure_then_release_failure(self):
log = []
locks = [
FakeGuardedLock('01_dom', 'dom1', 'mode', log),
FakeGuardedLock('02_img', 'img1', 'mode', log,
release=InjectedFailure),
FakeGuardedLock('03_vol', 'vol1', 'mode', log,
acquire=InjectedFailure)]
expected = [
('acquire', '01_dom', 'dom1', 'mode'),
('acquire', '02_img', 'img1', 'mode'),
('release', '01_dom', 'dom1', 'mode')]
with self.assertRaises(InjectedFailure):
with guarded.context(locks):
pass
self.assertEqual(expected, log)
def METHOD_NAME(self):
log = []
locks = [
FakeGuardedLock('01_dom', 'dom1', 'mode', log),
FakeGuardedLock('02_img', 'img1', 'mode', log),
FakeGuardedLock('03_vol', 'vol1', 'mode', log,
release=InjectedFailure)]
expected = [
('acquire', '01_dom', 'dom1', 'mode'),
('acquire', '02_img', 'img1', 'mode'),
('acquire', '03_vol', 'vol1', 'mode'),
('release', '02_img', 'img1', 'mode'),
('release', '01_dom', 'dom1', 'mode')]
with self.assertRaises(guarded.ReleaseError):
with guarded.context(locks):
pass
self.assertEqual(expected, log)
def test_fail_inside_context(self):
log = []
locks = [
FakeGuardedLock('01_dom', 'dom1', 'mode', log),
FakeGuardedLock('02_img', 'img1', 'mode', log),
FakeGuardedLock('03_vol', 'vol1', 'mode', log)]
expected = [
('acquire', '01_dom', 'dom1', 'mode'),
('acquire', '02_img', 'img1', 'mode'),
('acquire', '03_vol', 'vol1', 'mode'),
('release', '03_vol', 'vol1', 'mode'),
('release', '02_img', 'img1', 'mode'),
('release', '01_dom', 'dom1', 'mode')]
with self.assertRaises(InjectedFailure):
with guarded.context(locks):
raise InjectedFailure()
self.assertEqual(expected, log)
def test_fail_inside_context_with_release_failure(self):
log = []
locks = [
FakeGuardedLock('01_dom', 'dom1', 'mode', log),
FakeGuardedLock('02_img', 'img1', 'mode', log),
FakeGuardedLock('03_vol', 'vol1', 'mode', log,
release=InjectedFailure)]
expected = [
('acquire', '01_dom', 'dom1', 'mode'),
('acquire', '02_img', 'img1', 'mode'),
('acquire', '03_vol', 'vol1', 'mode'),
('release', '02_img', 'img1', 'mode'),
('release', '01_dom', 'dom1', 'mode')]
with self.assertRaises(RuntimeError):
with guarded.context(locks):
raise RuntimeError()
self.assertEqual(expected, log)
def test_deadlock(self):
log = []
locks = [
FakeGuardedLock('00_storage', 'dom', 'shared', log),
# Attemting to lock next locks will deadlock in resourceManager.
FakeGuardedLock('02_img.dom', 'img', 'exclusive', log),
FakeGuardedLock('02_img.dom', 'img', 'shared', log),
FakeGuardedLock('03_vol.dom', 'vol', 'exclusive', log),
]
# Creating a context should raise
with self.assertRaises(guarded.Deadlock):
with guarded.context(locks):
pass
# Without locking any of the locks
self.assertEqual([], log) | null |
sample program configs | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
import numpy as np
from functools import partial
class TestClipOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.X86,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
opencl_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_places)
self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)
self.enable_devices_on_nnadapter(device_names=[
"cambricon_mlu", "nvidia_tensorrt", "intel_openvino",
"kunlunxin_xtcl"
])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
if "kunlunxin_xtcl" in self.get_nnadapter_device_name():
in_num = len(program_config.inputs)
if in_num == 3:
return False
return True
def METHOD_NAME(self, draw):
in_shape_tmp = draw(
st.lists(
st.integers(
min_value=1, max_value=8), min_size=1, max_size=4))
in_shape = draw(st.sampled_from([in_shape_tmp, []]))
min_val = float(np.random.randint(0, 100) / 100)
max_val = min_val + 0.5
min_max_shape = draw(st.integers(min_value=1, max_value=1))
case_type = draw(st.sampled_from(["c1", "c2"]))
def generate_input(*args, **kwargs):
return np.random.random(in_shape).astype(np.float32)
def generate_min(*args, **kwargs):
return np.random.random(min_max_shape).astype(np.float32)
def generate_max(*args, **kwargs):
return np.random.random(min_max_shape).astype(np.float32) + 1.0
if case_type == "c1":
clip_op = OpConfig(
type="clip",
inputs={
"X": ["input_data"],
"Min": ["min_data"],
"Max": ["max_data"]
},
outputs={"Out": ["output_data"]},
attrs={"min": 0,
"max": 0})
program_config = ProgramConfig(
ops=[clip_op],
weights={},
inputs={
"input_data":
TensorConfig(data_gen=partial(generate_input)),
"min_data": TensorConfig(data_gen=partial(generate_min)),
"max_data": TensorConfig(data_gen=partial(generate_max)),
},
outputs=["output_data"])
else:
clip_op = OpConfig(
type="clip",
inputs={"X": ["input_data"]},
outputs={"Out": ["output_data"]},
attrs={"min": min_val,
"max": max_val})
program_config = ProgramConfig(
ops=[clip_op],
weights={},
inputs={
"input_data":
TensorConfig(data_gen=partial(generate_input))
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["clip"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
def teller1(program_config, predictor_config):
if "nvidia_tensorrt" in self.get_nnadapter_device_name():
in_num = len(program_config.inputs)
in_shape_size = len(program_config.inputs["input_data"].shape)
if in_num == 3 or in_shape_size == 1:
return True
self.add_ignore_check_case(
teller1, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Lite does not support '3 input tensors' or 'in_shape_size == 1' on nvidia_tensorrt."
)
def teller2(program_config, predictor_config):
if "intel_openvino" in self.get_nnadapter_device_name():
in_num = len(program_config.inputs)
if in_num == 3:
return True
self.add_ignore_check_case(
teller2, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"Lite does not support '3 input tensors' on intel_openvino.")
def _teller3(program_config, predictor_config):
target_type = predictor_config.target()
in_x_shape = list(program_config.inputs["input_data"].shape)
if target_type not in [
TargetType.ARM, TargetType.Host, TargetType.X86,
TargetType.Metal, TargetType.OpenCL
]:
if len(in_x_shape) == 0:
return True
self.add_ignore_check_case(
_teller3, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"0D-tensor is not supported on this target now.")
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=100)
if __name__ == "__main__":
unittest.main(argv=['']) | null |
find max ckpt | import os
from .util import get_ngpu
from .util import MAIN_ROOT
from .util import run_cmd
def METHOD_NAME(model_path):
max_ckpt = 0
for filename in os.listdir(model_path):
if filename.endswith('.pdz'):
files = filename[:-4]
a1, a2, it = files.split("_")
if int(it) > max_ckpt:
max_ckpt = int(it)
return max_ckpt
class FineTune:
def __init__(self):
self.now_file_path = os.path.dirname(__file__)
self.PYTHONPATH = os.path.join(MAIN_ROOT,
"examples/other/tts_finetune/tts3")
self.BIN_DIR = os.path.join(MAIN_ROOT,
"paddlespeech/t2s/exps/fastspeech2")
self.pretrained_model_dir = os.path.realpath(
"source/model/fastspeech2_aishell3_ckpt_1.1.0")
self.voc_model_dir = os.path.realpath(
"source/model/hifigan_aishell3_ckpt_0.2.0")
self.finetune_config = os.path.join("conf/tts3_finetune.yaml")
def finetune(self, input_dir, exp_dir='temp', epoch=100):
"""
use cmd follow examples/other/tts_finetune/tts3/run.sh
"""
newdir_name = "newdir"
new_dir = os.path.join(input_dir, newdir_name)
mfa_dir = os.path.join(exp_dir, 'mfa_result')
dump_dir = os.path.join(exp_dir, 'dump')
output_dir = os.path.join(exp_dir, 'exp')
lang = "zh"
ngpu = get_ngpu()
cmd = f"""
# check oov
python3 {self.PYTHONPATH}/local/check_oov.py \
--input_dir={input_dir} \
--pretrained_model_dir={self.pretrained_model_dir} \
--newdir_name={newdir_name} \
--lang={lang}
# get mfa result
python3 {self.PYTHONPATH}/local/get_mfa_result.py \
--input_dir={new_dir} \
--mfa_dir={mfa_dir} \
--lang={lang}
# generate durations.txt
python3 {self.PYTHONPATH}/local/generate_duration.py \
--mfa_dir={mfa_dir}
# extract feature
python3 {self.PYTHONPATH}/local/extract_feature.py \
--duration_file="./durations.txt" \
--input_dir={new_dir} \
--dump_dir={dump_dir} \
--pretrained_model_dir={self.pretrained_model_dir}
# create finetune env
python3 {self.PYTHONPATH}/local/prepare_env.py \
--pretrained_model_dir={self.pretrained_model_dir} \
--output_dir={output_dir}
# finetune
python3 {self.PYTHONPATH}/local/finetune.py \
--pretrained_model_dir={self.pretrained_model_dir} \
--dump_dir={dump_dir} \
--output_dir={output_dir} \
--ngpu={ngpu} \
--epoch=100 \
--finetune_config={self.finetune_config}
"""
print(cmd)
return run_cmd(cmd, exp_dir)
def synthesize(self, text, wav_name, out_wav_dir, exp_dir='temp'):
voc = "hifigan_aishell3"
dump_dir = os.path.join(exp_dir, 'dump')
output_dir = os.path.join(exp_dir, 'exp')
text_path = os.path.join(exp_dir, 'sentences.txt')
lang = "zh"
ngpu = get_ngpu()
model_path = f"{output_dir}/checkpoints"
ckpt = METHOD_NAME(model_path)
# 生成对应的语句
with open(text_path, "w", encoding='utf8') as f:
f.write(wav_name + " " + text)
cmd = f"""
FLAGS_allocator_strategy=naive_best_fit \
FLAGS_fraction_of_gpu_memory_to_use=0.01 \
python3 {self.BIN_DIR}/../synthesize_e2e.py \
--am=fastspeech2_aishell3 \
--am_config={self.pretrained_model_dir}/default.yaml \
--am_ckpt={output_dir}/checkpoints/snapshot_iter_{ckpt}.pdz \
--am_stat={self.pretrained_model_dir}/speech_stats.npy \
--voc={voc} \
--voc_config={self.voc_model_dir}/default.yaml \
--voc_ckpt={self.voc_model_dir}/snapshot_iter_2500000.pdz \
--voc_stat={self.voc_model_dir}/feats_stats.npy \
--lang={lang} \
--text={text_path} \
--output_dir={out_wav_dir} \
--phones_dict={dump_dir}/phone_id_map.txt \
--speaker_dict={dump_dir}/speaker_id_map.txt \
--spk_id=0 \
--ngpu={ngpu}
"""
out_path = os.path.join(out_wav_dir, f"{wav_name}.wav")
return run_cmd(cmd, out_path) | null |
test upgrade | """
Nest analysis results field
Revision ID: 7emq1brv0zz6
Date: 2022-06-09 20:38:11.017655
"""
import asyncio
import arrow
# Revision identifiers.
from pymongo import UpdateOne
from virtool_core.mongo import buffered_bulk_writer
from virtool.migration import MigrationContext
name = "Nest analysis results field"
created_at = arrow.get("2022-06-09 20:38:11.017655")
revision_id = "7emq1brv0zz6"
alembic_down_revision = "90bf491700cb"
virtool_down_revision = None
async def upgrade(ctx: MigrationContext):
"""
Move the ``subtracted_count`` and ``read_count`` fields from the document to the
``results`` sub-document.
This supports the new jobs API model where only a ``results`` field can be set on
the analysis document by a workflow job.
"""
# We are only interested in analyses that have a non-``None`` ``results`` field
# but no ``results.hits`` field, which indicates it is an older analysis.
query = {
"results": {"$ne": None, "$exists": True},
"results.hits": {"$exists": False},
}
async with buffered_bulk_writer(ctx.mongo.analyses) as writer:
async for document in ctx.mongo.analyses.find(query):
_id = document["_id"]
results = {"hits": document["results"]}
if document["workflow"] == "pathoscope_bowtie":
await writer.add(
UpdateOne(
{"_id": _id},
{
"$set": {
"results": {
**results,
"read_count": document["read_count"],
"subtracted_count": document["subtracted_count"],
}
},
"$unset": {"read_count": "", "subtracted_count": ""},
},
)
)
elif document["workflow"] == "nuvs":
await writer.add(
UpdateOne({"_id": _id}, {"$set": {"results": results}})
)
elif document["workflow"] == "aodp":
await writer.add(
UpdateOne(
{"_id": _id},
{
"$set": {
"results": {
**results,
"join_histogram": document["join_histogram"],
"joined_pair_count": document["joined_pair_count"],
"remainder_pair_count": document[
"remainder_pair_count"
],
}
},
"$unset": {
"join_histogram": "",
"joined_pair_count": "",
"remainder_pair_count": "",
},
},
)
)
if await ctx.mongo.analyses.count_documents(query) > 0:
raise Exception("Some analyses still have a non-nested results field")
async def METHOD_NAME(ctx: MigrationContext, snapshot):
await asyncio.gather(
ctx.mongo.analyses.delete_many({}),
ctx.mongo.analyses.insert_many(
[
{
"_id": "foo",
"read_count": 1209,
"results": [1, 2, 3, 4, 5],
"subtracted_count": 231,
"workflow": "pathoscope_bowtie",
},
{
"_id": "fine",
"results": {
"hits": [1, 2, 3, 4, 5],
"read_count": 1209,
"subtracted_count": 231,
},
"workflow": "pathoscope_bowtie",
},
{
"_id": "bar",
"read_count": 7982,
"results": [9, 8, 7, 6, 5],
"subtracted_count": 112,
"workflow": "pathoscope_bowtie",
},
{"_id": "baz", "results": [9, 8, 7, 6, 5], "workflow": "nuvs"},
{
"_id": "bad",
"join_histogram": [1, 2, 3, 4, 5],
"joined_pair_count": 12345,
"remainder_pair_count": 54321,
"results": [9, 8, 7, 6, 5],
"workflow": "aodp",
},
{
"_id": "missing",
"join_histogram": [1, 2, 3, 4, 5],
"joined_pair_count": 12345,
"remainder_pair_count": 54321,
"workflow": "aodp",
},
{
"_id": "none",
"join_histogram": [1, 2, 3, 4, 5],
"joined_pair_count": 12345,
"remainder_pair_count": 54321,
"results": None,
"workflow": "aodp",
},
]
),
)
await upgrade(ctx)
assert await ctx.mongo.analyses.find().to_list(None) == snapshot | null |
get | import typing
from collections.abc import MutableSet
from typing import List, Optional, Type, Union
from loguru import logger
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
PLUGIN_NAME = 'plex_watchlist'
SUPPORTED_IDS = ['imdb_id', 'tmdb_id', 'tvdb_id', 'plex_guid']
logger = logger.bind(name=PLUGIN_NAME)
if typing.TYPE_CHECKING:
from plexapi.myplex import MyPlexAccount
from plexapi.video import Movie, Show
def import_plexaccount() -> "Type[MyPlexAccount]":
try:
from plexapi.myplex import MyPlexAccount
return MyPlexAccount
except ImportError:
raise plugin.DependencyError('plex_watchlist', 'plexapi', 'plexapi package required')
def to_entry(plex_item: "Union[Movie, Show]") -> Entry:
entry = Entry(
title=f"{plex_item.title} ({plex_item.year})" if plex_item.year else plex_item.title,
url=plex_item.guid,
)
if plex_item.TYPE == 'movie':
entry['movie_name'] = plex_item.title
entry['movie_year'] = plex_item.year
elif plex_item.TYPE == 'show':
entry['series_name'] = plex_item.title
entry['series_year'] = plex_item.year
entry.update(get_supported_ids_from_plex_object(plex_item))
return entry
def get_supported_ids_from_plex_object(plex_item):
ids = {'plex_guid': plex_item.guid}
for guid in plex_item.guids:
x = guid.id.split("://")
try:
value = int(x[1])
except ValueError:
value = x[1]
media_id = f'{x[0]}_id'
if media_id in SUPPORTED_IDS:
ids[media_id] = value
return ids
class VideoStub:
guid: str
title: str
# plexapi objects are build fomr XML. So we create a simple stub that works for watchlist calls
def to_plex_item(entry):
item = VideoStub()
item.guid = entry['plex_guid']
item.title = entry['title']
return item
class PlexManagedWatchlist(MutableSet):
def __init__(
self,
username: Optional[str] = None,
password: Optional[str] = None,
token: Optional[str] = None,
filter: Optional[str] = None,
type: Optional[str] = None,
):
self.username = username
self.password = password
self.token = token
self.type = type
self.filter = filter
self._items: Optional[List[Entry]] = None
self._account: Optional[MyPlexAccount] = None
@property
def account(self) -> "MyPlexAccount":
MyPlexAccount = import_plexaccount()
if self._account is None:
self._account = MyPlexAccount(self.username, self.password, self.token)
return self._account
@property
def items(self) -> List[Entry]:
if self._items is None:
watchlist = self.account.watchlist(filter=self.filter, libtype=self.type)
self._items = []
for item in watchlist:
self._items.append(to_entry(item))
return self._items
def __iter__(self):
return iter(self.items)
def __len__(self) -> int:
return len(self.items)
def __contains__(self, entry) -> bool:
return self._find_entry(entry) is not None
def METHOD_NAME(self, entry) -> Optional[Entry]:
return self._find_entry(entry)
def add(self, entry: Entry) -> None:
item = None
if 'plex_guid' in entry:
item = to_plex_item(entry)
else:
logger.debug('Searching for {} with discover', entry['title'])
results = self.account.searchDiscover(entry['title'], libtype=self.type)
matched_entry = self._match_entry(entry, [to_entry(result) for result in results])
if matched_entry:
item = to_plex_item(matched_entry)
if item:
if self.account.onWatchlist(item):
logger.debug(f'"{item.title}" is already on the watchlist')
return
logger.debug(f'Adding "{item.title}" to the watchlist')
self.account.addToWatchlist(item)
def discard(self, entry) -> None:
entry = self._find_entry(entry)
if entry:
item = to_plex_item(entry)
logger.debug('Removing {} from watchlist', entry['title'])
self.account.removeFromWatchlist(item)
@property
def online(self) -> bool:
return True
@property
def immutable(self):
return False
def _find_entry(self, entry):
return self._match_entry(entry, self.items)
def _match_entry(self, entry: Entry, entries: List[Entry]):
for item in entries:
# match on supported ids
if any(entry.METHOD_NAME(id) is not None and entry[id] == item[id] for id in SUPPORTED_IDS):
return item
name = entry.METHOD_NAME('movie_name', None) or entry.METHOD_NAME('series_name', None)
year = entry.METHOD_NAME('movie_year', None) or entry.METHOD_NAME('series_year', None)
_name = item.METHOD_NAME('movie_name', None) or item.METHOD_NAME('series_name', None)
_year = item.METHOD_NAME('movie_year', None) or item.METHOD_NAME('series_year', None)
if (name and year) and (_name == name and _year == year):
return item
# title matching sucks but lets try as last resort
if entry.METHOD_NAME('title').lower() == item['title'].lower():
return item
class PlexWatchlist:
schema = {
'properties': {
'username': {'type': 'string'},
'password': {'type': 'string'},
'token': {'type': 'string'},
'type': {'type': 'string', 'enum': ['movie', 'show']},
'filter': {'type': 'string', 'enum': ['available', 'released']},
},
'anyOf': [{'required': ['token']}, {'required': ['username', 'password']}],
}
@plugin.priority(plugin.PRIORITY_FIRST)
def on_task_start(self, task, config):
import_plexaccount()
def get_list(self, config):
return PlexManagedWatchlist(**config)
@plugin.internet(logger)
def on_task_input(self, task, config):
yaml_list = PlexManagedWatchlist(**config)
yield from yaml_list
@event('plugin.register')
def register_plugin():
plugin.register(PlexWatchlist, PLUGIN_NAME, api_ver=2, interfaces=['task', 'list']) | null |
test reg class | """Tests for distutils.msvc9compiler."""
import sys
import unittest
import os
from distutils.errors import DistutilsPlatformError
from distutils.tests import support
from test.test_support import run_unittest
# A manifest with the only assembly reference being the msvcrt assembly, so
# should have the assembly completely stripped. Note that although the
# assembly has a <security> reference the assembly is removed - that is
# currently a "feature", not a bug :)
_MANIFEST_WITH_ONLY_MSVC_REFERENCE = """\
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1"
manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC90.CRT"
version="9.0.21022.8" processorArchitecture="x86"
publicKeyToken="XXXX">
</assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>
"""
# A manifest with references to assemblies other than msvcrt. When processed,
# this assembly should be returned with just the msvcrt part removed.
_MANIFEST_WITH_MULTIPLE_REFERENCES = """\
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1"
manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC90.CRT"
version="9.0.21022.8" processorArchitecture="x86"
publicKeyToken="XXXX">
</assemblyIdentity>
</dependentAssembly>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC90.MFC"
version="9.0.21022.8" processorArchitecture="x86"
publicKeyToken="XXXX"></assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>
"""
_CLEANED_MANIFEST = """\
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1"
manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
</dependency>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC90.MFC"
version="9.0.21022.8" processorArchitecture="x86"
publicKeyToken="XXXX"></assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>"""
if sys.platform=="win32":
from distutils.msvccompiler import get_build_version
if get_build_version()>=8.0:
SKIP_MESSAGE = None
else:
SKIP_MESSAGE = "These tests are only for MSVC8.0 or above"
else:
SKIP_MESSAGE = "These tests are only for win32"
@unittest.skipUnless(SKIP_MESSAGE is None, SKIP_MESSAGE)
class msvc9compilerTestCase(support.TempdirManager,
unittest.TestCase):
def test_no_compiler(self):
# makes sure query_vcvarsall raises
# a DistutilsPlatformError if the compiler
# is not found
from distutils.msvc9compiler import query_vcvarsall
def _find_vcvarsall(version):
return None
from distutils import msvc9compiler
old_find_vcvarsall = msvc9compiler.find_vcvarsall
msvc9compiler.find_vcvarsall = _find_vcvarsall
try:
self.assertRaises(DistutilsPlatformError, query_vcvarsall,
'wont find this version')
finally:
msvc9compiler.find_vcvarsall = old_find_vcvarsall
def METHOD_NAME(self):
from distutils.msvc9compiler import Reg
self.assertRaises(KeyError, Reg.get_value, 'xxx', 'xxx')
# looking for values that should exist on all
# windows registeries versions.
path = r'Control Panel\Desktop'
v = Reg.get_value(path, u'dragfullwindows')
self.assertTrue(v in (u'0', u'1', u'2'))
import _winreg
HKCU = _winreg.HKEY_CURRENT_USER
keys = Reg.read_keys(HKCU, 'xxxx')
self.assertEqual(keys, None)
keys = Reg.read_keys(HKCU, r'Control Panel')
self.assertTrue('Desktop' in keys)
def test_remove_visual_c_ref(self):
from distutils.msvc9compiler import MSVCCompiler
tempdir = self.mkdtemp()
manifest = os.path.join(tempdir, 'manifest')
f = open(manifest, 'w')
try:
f.write(_MANIFEST_WITH_MULTIPLE_REFERENCES)
finally:
f.close()
compiler = MSVCCompiler()
compiler._remove_visual_c_ref(manifest)
# see what we got
f = open(manifest)
try:
# removing trailing spaces
content = '\n'.join([line.rstrip() for line in f.readlines()])
finally:
f.close()
# makes sure the manifest was properly cleaned
self.assertEqual(content, _CLEANED_MANIFEST)
def test_remove_entire_manifest(self):
from distutils.msvc9compiler import MSVCCompiler
tempdir = self.mkdtemp()
manifest = os.path.join(tempdir, 'manifest')
f = open(manifest, 'w')
try:
f.write(_MANIFEST_WITH_ONLY_MSVC_REFERENCE)
finally:
f.close()
compiler = MSVCCompiler()
got = compiler._remove_visual_c_ref(manifest)
self.assertIs(got, None)
def test_suite():
return unittest.makeSuite(msvc9compilerTestCase)
if __name__ == "__main__":
run_unittest(test_suite()) | null |
act l 3191 | """
https://github.com/csesoc/Circles/wiki/Manual-Fixes-to-Course-Prerequisites
Copy this into a new file for the relevant faculty's fixes:
e.g. COMPFixes.py, ACCTFixes.py, PSYCFixes.py
Apply manual ACTL fixes to processed conditions in conditionsProcessed.json so
that they can be fed into algorithms.
If you make a mistake and need to regenerate conditionsProcessed.json, then you
can run:
python3 -m data.processors.conditionsPreprocessing
To then run this file:
python3 -m data.processors.manualFixes.ACTLFixes
"""
from data.utility import data_helpers
# Reads conditionsProcessed dictionary into 'CONDITIONS'
CONDITIONS = data_helpers.read_data("data/final_data/conditionsProcessed.json")
PROCESSED = "processed"
# Reads coursesProcessed dictionary into 'COURSES' (for updating exclusions)
COURSES = data_helpers.read_data("data/final_data/coursesProcessed.json")
def fix_conditions():
""" Functions to apply manual fixes """
CONDITIONS["ACTL1101"][PROCESSED] = ACTL_1101()
CONDITIONS["ACTL2101"][PROCESSED] = ACTL_2101()
CONDITIONS["ACTL2102"][PROCESSED] = ACTL_2102()
CONDITIONS["ACTL2111"][PROCESSED] = ACTL_2111()
CONDITIONS["ACTL3142"][PROCESSED] = ACTL_3142()
CONDITIONS["ACTL3162"][PROCESSED] = ACTL_3162()
CONDITIONS["ACTL3191"][PROCESSED] = METHOD_NAME()
CONDITIONS["ACTL3192"] = ACTL_3192(CONDITIONS["ACTL3192"])
CONDITIONS["ACTL3202"][PROCESSED] = ACTL_3202()
CONDITIONS["ACTL3303"][PROCESSED] = ACTL_3303()
CONDITIONS["ACTL4001"] = ACTL_4001(CONDITIONS["ACTL4001"])
CONDITIONS["ACTL4003"][PROCESSED] = ACTL_4003()
# Updates the files with the modified dictionaries
data_helpers.write_data(
CONDITIONS, "data/final_data/conditionsProcessed.json")
data_helpers.write_data(COURSES, "data/final_data/coursesProcessed.json")
def ACTL_1101():
"""
"original": "Prerequisite: MATH1151 AND in Actuarial Studies programs.<br/><br/>",
"processed": "MATH1151 && in Actuarial Studies programs"
"""
return "MATH1151 && ACTL#"
def ACTL_2101():
"""
"original": "Prerequisite:Enrolment in Program 3587<br/><br/>",
"processed": "Enrolment in Program 3587"
"""
return "3587"
def ACTL_2102():
"""
"original": "Pre-requisite: (ACTL2131 or MATH2901) and in Actuarial single or dual degrees.<br/><br/>",
"processed": "(ACTL2131 || MATH2901) && in Actuarial single || dual degrees"
"""
return "(ACTL2131 || MATH2901) && ACTL#"
def ACTL_2111():
"""
"original": "Pre-requsite: MATH1251 AND (ACTL1101 OR in MATHE1, MATHM1 or MATHT1 majors)<br/><br/>",
"processed": "MATH1251 && (ACTL1101 || in MATHE1 || MATHM1 || MATHT1 majors)"
"""
return "MATH1251 && (ACTL1101 || (MATHE1 || MATHM1 || MATHT1))"
def ACTL_3142():
"""
"original": "Pre-requisite: ACTL2131 OR (MATH2931 and in B. Data Science and Decisions (3959)) OR (MATH2901 and MATH2931)<br/><br/>",
"processed": "ACTL2131 || (MATH2931 && in B. Data Science && Decisions (3959)) || (MATH2901 && MATH2931)"
"""
return "ACTL2131 || (MATH2931 && 3959) || (MATH2901 && MATH2931)"
def ACTL_3162():
"""
"original": "Pre-requisite: ACTL2102 or (MATH2901 AND MATHE1, MATHM1 or MATHT1 major)<br/><br/>",
"processed": "ACTL2102 || (MATH2901 && MATHE1 || MATHM1 || MATHT1 major)"
"""
return "ACTL2102 || (MATH2901 && (MATHE1 || MATHM1 || MATHT1))"
def METHOD_NAME():
"""
"original": "Pre-requisites: ECON2101 or (ECON1101 and ACTL1101) or (completed at least 84UOC and enrolled in a Commerce Program).<br/><br/>",
"processed": "ECON2101 || (ECON1101 && ACTL1101) || ( 84UOC && a Commerce Program)"
"""
return "ECON2101 || (ECON1101 && ACTL1101) || (84UOC && COMM#)"
def ACTL_3192(condition):
"""
"original": "Pre-requisites: ECON2101 or (ECON1101 and ACTL1101) or (completed at least 84UOC and enrolled in a Commerce Program) and be in good academic standing.<br/><br/>",
"processed": "ECON2101 || (ECON1101 && ACTL1101) || ( 84UOC && a Commerce Program) && be in good academic standing"
"""
return {
"original": condition["original"],
"processed": "ECON2101 || (ECON1101 && ACTL1101) || (84UOC && COMM#)",
"handbook_note": "You must be in good academic standing to enroll in this course."
}
def ACTL_3202():
"""
"original": "Prerequisite: ACTL2101 and enrolment in program 3587<br/><br/>",
"processed": "ACTL2101 && enrolment in program 3587"
"""
return "ACTL2101 && 3587"
def ACTL_3303():
"""
"original": "Prerequisite: ACTL3202 and enrolment in program 3587<br/><br/>",
"processed": "ACTL3202 && enrolment in program 3587"
"""
return "ACTL3202 && 3587"
def ACTL_4001(condition):
"""
"original": "Pre-requisite: ACCT1511 or COMM1140, ACTL3141, ACTL3182, FINS1613 or COMM1180, ACTL3162, ACTL3151, ECON1102, 60+ WAM.<br/>Note: Students in 3587 may complete ACTL3141 as a co-requisite<br/><br/>",
"processed": "ACCT1511 || COMM1140 || ACTL3141 || ACTL3182 || FINS1613 || COMM1180, ACTL3162, ACTL3151, ECON1102, 60WAM. Note: in 3587 may complete ACTL3141 as a []"
"""
return {
"original": condition["original"],
"processed": "(ACCT1511 || COMM1140) && ACTL3141 && ACTL3182 && (FINS1613 || COMM1180) && ACTL3162 && ACTL3151 && ECON1102 && 60WAM",
"handbook_note": "Students in 3587 may complete ACTL3141 as a co-requisite."
}
def ACTL_4003():
"""
"original": "Students must be in Actuarial Studies (Honours).<br/><br/>",
"processed": "must be in Actuarial Studies (Honours)"
"""
return "4520"
if __name__ == "__main__":
fix_conditions() | null |
override vddsdio | # SPDX-FileCopyrightText: 2014-2022 Fredrik Ahlberg, Angus Gratton,
# Espressif Systems (Shanghai) CO LTD, other contributors as noted.
#
# SPDX-License-Identifier: GPL-2.0-or-later
from ..loader import ESPLoader
from ..util import FatalError, NotImplementedInROMError
class ESP8266ROM(ESPLoader):
"""Access class for ESP8266 ROM bootloader"""
CHIP_NAME = "ESP8266"
IS_STUB = False
CHIP_DETECT_MAGIC_VALUE = [0xFFF0C101]
# OTP ROM addresses
ESP_OTP_MAC0 = 0x3FF00050
ESP_OTP_MAC1 = 0x3FF00054
ESP_OTP_MAC3 = 0x3FF0005C
SPI_REG_BASE = 0x60000200
SPI_USR_OFFS = 0x1C
SPI_USR1_OFFS = 0x20
SPI_USR2_OFFS = 0x24
SPI_MOSI_DLEN_OFFS = None
SPI_MISO_DLEN_OFFS = None
SPI_W0_OFFS = 0x40
UART_CLKDIV_REG = 0x60000014
XTAL_CLK_DIVIDER = 2
FLASH_SIZES = {
"512KB": 0x00,
"256KB": 0x10,
"1MB": 0x20,
"2MB": 0x30,
"4MB": 0x40,
"2MB-c1": 0x50,
"4MB-c1": 0x60,
"8MB": 0x80,
"16MB": 0x90,
}
FLASH_FREQUENCY = {
"80m": 0xF,
"40m": 0x0,
"26m": 0x1,
"20m": 0x2,
}
BOOTLOADER_FLASH_OFFSET = 0
MEMORY_MAP = [
[0x3FF00000, 0x3FF00010, "DPORT"],
[0x3FFE8000, 0x40000000, "DRAM"],
[0x40100000, 0x40108000, "IRAM"],
[0x40201010, 0x402E1010, "IROM"],
]
def get_efuses(self):
# Return the 128 bits of ESP8266 efuse as a single Python integer
result = self.read_reg(0x3FF0005C) << 96
result |= self.read_reg(0x3FF00058) << 64
result |= self.read_reg(0x3FF00054) << 32
result |= self.read_reg(0x3FF00050)
return result
def _get_flash_size(self, efuses):
# rX_Y = EFUSE_DATA_OUTX[Y]
r0_4 = (efuses & (1 << 4)) != 0
r3_25 = (efuses & (1 << 121)) != 0
r3_26 = (efuses & (1 << 122)) != 0
r3_27 = (efuses & (1 << 123)) != 0
if r0_4 and not r3_25:
if not r3_27 and not r3_26:
return 1
elif not r3_27 and r3_26:
return 2
if not r0_4 and r3_25:
if not r3_27 and not r3_26:
return 2
elif not r3_27 and r3_26:
return 4
return -1
def get_chip_description(self):
efuses = self.get_efuses()
is_8285 = (
efuses & ((1 << 4) | 1 << 80)
) != 0 # One or the other efuse bit is set for ESP8285
if is_8285:
flash_size = self._get_flash_size(efuses)
max_temp = (
efuses & (1 << 5)
) != 0 # This efuse bit identifies the max flash temperature
chip_name = {
1: "ESP8285H08" if max_temp else "ESP8285N08",
2: "ESP8285H16" if max_temp else "ESP8285N16",
}.get(flash_size, "ESP8285")
return chip_name
return "ESP8266EX"
def get_chip_features(self):
features = ["WiFi"]
if "ESP8285" in self.get_chip_description():
features += ["Embedded Flash"]
return features
def flash_spi_attach(self, hspi_arg):
if self.IS_STUB:
super(ESP8266ROM, self).flash_spi_attach(hspi_arg)
else:
# ESP8266 ROM has no flash_spi_attach command in serial protocol,
# but flash_begin will do it
self.flash_begin(0, 0)
def flash_set_parameters(self, size):
# not implemented in ROM, but OK to silently skip for ROM
if self.IS_STUB:
super(ESP8266ROM, self).flash_set_parameters(size)
def chip_id(self):
"""
Read Chip ID from efuse - the equivalent of the SDK system_get_chip_id() func
"""
id0 = self.read_reg(self.ESP_OTP_MAC0)
id1 = self.read_reg(self.ESP_OTP_MAC1)
return (id0 >> 24) | ((id1 & 0xFFFFFF) << 8)
def read_mac(self):
"""Read MAC from OTP ROM"""
mac0 = self.read_reg(self.ESP_OTP_MAC0)
mac1 = self.read_reg(self.ESP_OTP_MAC1)
mac3 = self.read_reg(self.ESP_OTP_MAC3)
if mac3 != 0:
oui = ((mac3 >> 16) & 0xFF, (mac3 >> 8) & 0xFF, mac3 & 0xFF)
elif ((mac1 >> 16) & 0xFF) == 0:
oui = (0x18, 0xFE, 0x34)
elif ((mac1 >> 16) & 0xFF) == 1:
oui = (0xAC, 0xD0, 0x74)
else:
raise FatalError("Unknown OUI")
return oui + ((mac1 >> 8) & 0xFF, mac1 & 0xFF, (mac0 >> 24) & 0xFF)
def get_erase_size(self, offset, size):
"""Calculate an erase size given a specific size in bytes.
Provides a workaround for the bootloader erase bug."""
sectors_per_block = 16
sector_size = self.FLASH_SECTOR_SIZE
num_sectors = (size + sector_size - 1) // sector_size
start_sector = offset // sector_size
head_sectors = sectors_per_block - (start_sector % sectors_per_block)
if num_sectors < head_sectors:
head_sectors = num_sectors
if num_sectors < 2 * head_sectors:
return (num_sectors + 1) // 2 * sector_size
else:
return (num_sectors - head_sectors) * sector_size
def METHOD_NAME(self, new_voltage):
raise NotImplementedInROMError(
"Overriding VDDSDIO setting only applies to ESP32"
)
class ESP8266StubLoader(ESP8266ROM):
"""Access class for ESP8266 stub loader, runs on top of ROM."""
FLASH_WRITE_SIZE = 0x4000 # matches MAX_WRITE_BLOCK in stub_loader.c
IS_STUB = True
def __init__(self, rom_loader):
self.secure_download_mode = rom_loader.secure_download_mode
self._port = rom_loader._port
self._trace_enabled = rom_loader._trace_enabled
self.cache = rom_loader.cache
self.flush_input() # resets _slip_reader
def get_erase_size(self, offset, size):
return size # stub doesn't have same size bug as ROM loader
ESP8266ROM.STUB_CLASS = ESP8266StubLoader | null |
get container registry | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetContainerRegistryResult',
'AwaitableGetContainerRegistryResult',
'get_container_registry',
'get_container_registry_output',
]
@pulumi.output_type
class GetContainerRegistryResult:
"""
Container registry resource payload.
"""
def __init__(__self__, id=None, name=None, properties=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource Id for the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ContainerRegistryPropertiesResponse':
"""
Properties of the container registry resource payload.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetContainerRegistryResult(GetContainerRegistryResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetContainerRegistryResult(
id=self.id,
name=self.name,
properties=self.properties,
system_data=self.system_data,
type=self.type)
def METHOD_NAME(container_registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetContainerRegistryResult:
"""
Get the container registries resource.
:param str container_registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str service_name: The name of the Service resource.
"""
__args__ = dict()
__args__['containerRegistryName'] = container_registry_name
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:appplatform/v20230501preview:getContainerRegistry', __args__, opts=opts, typ=GetContainerRegistryResult).value
return AwaitableGetContainerRegistryResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_container_registry_output(container_registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetContainerRegistryResult]:
"""
Get the container registries resource.
:param str container_registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str service_name: The name of the Service resource.
"""
... | null |
spark dbfs datasource | from __future__ import annotations
import logging
import os
import pathlib
import re
from typing import TYPE_CHECKING, cast
import boto3
import botocore
import pytest
from great_expectations.core.util import DBFSPath
from great_expectations.datasource.fluent import SparkDBFSDatasource
from great_expectations.datasource.fluent.data_asset.data_connector import (
DBFSDataConnector,
)
from great_expectations.datasource.fluent.file_path_data_asset import (
_FilePathDataAsset,
)
from great_expectations.datasource.fluent.interfaces import TestConnectionError
from great_expectations.datasource.fluent.spark_file_path_datasource import (
CSVAsset,
)
from tests.test_utils import create_files_in_directory
if TYPE_CHECKING:
from pyfakefs.fake_filesystem import FakeFilesystem
logger = logging.getLogger(__file__)
@pytest.fixture
def METHOD_NAME(fs: FakeFilesystem, test_backends) -> SparkDBFSDatasource:
if "SparkDFDataset" not in test_backends:
pytest.skip("No spark backend selected.")
# Copy boto modules into fake filesystem (see https://github.com/spulec/moto/issues/1682#issuecomment-645016188)
for module in [boto3, botocore]:
module_dir = pathlib.Path(module.__file__).parent
fs.add_real_directory(module_dir, lazy_read=False)
# Copy google credentials into fake filesystem if they exist on your filesystem
google_cred_file = os.getenv("GOOGLE_APPLICATION_CREDENTIALS")
if google_cred_file:
fs.add_real_file(google_cred_file)
base_directory: str = "/dbfs/great_expectations"
fs.create_dir(base_directory)
create_files_in_directory(
directory=base_directory,
file_name_list=[
"alex_20200809_1000.csv",
"eugene_20200809_1500.csv",
"james_20200811_1009.csv",
"abe_20200809_1040.csv",
"will_20200809_1002.csv",
"james_20200713_1567.csv",
"eugene_20201129_1900.csv",
"will_20200810_1001.csv",
"james_20200810_1003.csv",
"alex_20200819_1300.csv",
],
)
return SparkDBFSDatasource( # type: ignore[call-arg]
name="spark_dbfs_datasource",
base_directory=pathlib.Path(base_directory),
)
@pytest.fixture
def csv_asset(METHOD_NAME: SparkDBFSDatasource) -> _FilePathDataAsset:
asset = METHOD_NAME.add_csv_asset(
name="csv_asset",
batching_regex=r"(?P<name>.+)_(?P<timestamp>.+)_(?P<price>\d{4})\.csv",
)
return asset
@pytest.fixture
def bad_regex_config(csv_asset: CSVAsset) -> tuple[re.Pattern, str]:
regex = re.compile(
r"(?P<name>.+)_(?P<ssn>\d{9})_(?P<timestamp>.+)_(?P<price>\d{4})\.csv"
)
data_connector: DBFSDataConnector = cast(
DBFSDataConnector, csv_asset._data_connector
)
test_connection_error_message = f"""No file at base_directory path "{data_connector._base_directory.resolve()}" matched regular expressions pattern "{data_connector._batching_regex.pattern}" and/or glob_directive "**/*" for DataAsset "csv_asset"."""
return regex, test_connection_error_message
@pytest.mark.spark
def test_construct_spark_dbfs_datasource(METHOD_NAME: SparkDBFSDatasource):
assert METHOD_NAME.name == "spark_dbfs_datasource"
@pytest.mark.spark
def test_add_csv_asset_to_datasource(METHOD_NAME: SparkDBFSDatasource):
asset_specified_metadata = {"asset_level_metadata": "my_metadata"}
asset = METHOD_NAME.add_csv_asset(
name="csv_asset",
batching_regex=r"(.+)_(.+)_(\d{4})\.csv",
batch_metadata=asset_specified_metadata,
)
assert asset.name == "csv_asset"
assert asset.batching_regex.match("random string") is None
assert asset.batching_regex.match("alex_20200819_13D0.csv") is None
m1 = asset.batching_regex.match("alex_20200819_1300.csv")
assert m1 is not None
assert asset.batch_metadata == asset_specified_metadata
@pytest.mark.unit
def test_construct_csv_asset_directly():
# noinspection PyTypeChecker
asset = CSVAsset(
name="csv_asset",
batching_regex=r"(.+)_(.+)_(\d{4})\.csv",
)
assert asset.name == "csv_asset"
assert asset.batching_regex.match("random string") is None
assert asset.batching_regex.match("alex_20200819_13D0.csv") is None
m1 = asset.batching_regex.match("alex_20200819_1300.csv")
assert m1 is not None
@pytest.mark.spark
@pytest.mark.xfail(
reason="Accessing objects on pyfakefs.fake_filesystem.FakeFilesystem using Spark is not working (this test is conducted using Jupyter notebook manually)."
)
def test_get_batch_list_from_fully_specified_batch_request(
METHOD_NAME: SparkDBFSDatasource,
):
asset_specified_metadata = {"asset_level_metadata": "my_metadata"}
asset = METHOD_NAME.add_csv_asset(
name="csv_asset",
batching_regex=r"(?P<name>.+)_(?P<timestamp>.+)_(?P<price>\d{4})\.csv",
batch_metadata=asset_specified_metadata,
)
request = asset.build_batch_request(
{"name": "alex", "timestamp": "20200819", "price": "1300"}
)
batches = asset.get_batch_list_from_batch_request(request)
assert len(batches) == 1
batch = batches[0]
assert batch.batch_request.datasource_name == METHOD_NAME.name
assert batch.batch_request.data_asset_name == asset.name
assert batch.batch_request.options == (
"name",
"timestamp",
"price",
"path",
)
assert batch.metadata == {
"path": "alex_20200819_1300.csv",
"name": "alex",
"timestamp": "20200819",
"price": "1300",
**asset_specified_metadata,
}
assert (
batch.id
== "spark_dbfs_datasource-csv_asset-name_alex-timestamp_20200819-price_1300"
)
request = asset.build_batch_request({"name": "alex"})
batches = asset.get_batch_list_from_batch_request(request)
assert len(batches) == 2
@pytest.mark.spark
def test_test_connection_failures(
METHOD_NAME: SparkDBFSDatasource,
bad_regex_config: tuple[re.Pattern, str],
):
regex, test_connection_error_message = bad_regex_config
csv_asset = CSVAsset(
name="csv_asset",
batching_regex=regex,
)
csv_asset._datasource = METHOD_NAME
METHOD_NAME.assets = [
csv_asset,
]
csv_asset._data_connector = DBFSDataConnector(
datasource_name=METHOD_NAME.name,
data_asset_name=csv_asset.name,
batching_regex=re.compile(regex),
base_directory=METHOD_NAME.base_directory,
data_context_root_directory=METHOD_NAME.data_context_root_directory,
glob_directive="*.csv",
file_path_template_map_fn=DBFSPath.convert_to_protocol_version,
)
csv_asset._test_connection_error_message = test_connection_error_message
with pytest.raises(TestConnectionError) as e:
METHOD_NAME.test_connection()
assert str(e.value) == str(test_connection_error_message) | null |
get padding | import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv1d, ConvTranspose1d
from torch.nn.utils import weight_norm, remove_weight_norm
LRELU_SLOPE = 0.1
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def METHOD_NAME(kernel_size, dilation=1):
return (kernel_size * dilation - dilation) // 2
class ResBlock(torch.nn.Module):
def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock, self).__init__()
self.convs1 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[0],
padding=METHOD_NAME(kernel_size, dilation[0]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[1],
padding=METHOD_NAME(kernel_size, dilation[1]),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=dilation[2],
padding=METHOD_NAME(kernel_size, dilation[2]),
)
),
]
)
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList(
[
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=METHOD_NAME(kernel_size, 1),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=METHOD_NAME(kernel_size, 1),
)
),
weight_norm(
Conv1d(
channels,
channels,
kernel_size,
1,
dilation=1,
padding=METHOD_NAME(kernel_size, 1),
)
),
]
)
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for layer in self.convs1:
remove_weight_norm(layer)
for layer in self.convs2:
remove_weight_norm(layer)
class Generator(torch.nn.Module):
def __init__(self, cfg):
super(Generator, self).__init__()
self.num_kernels = len(cfg["resblock_kernel_sizes"])
self.num_upsamples = len(cfg["upsample_rates"])
self.conv_pre = weight_norm(
Conv1d(
cfg.get("model_in_dim", 80),
cfg["upsample_initial_channel"],
7,
1,
padding=3,
)
)
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(
zip(cfg["upsample_rates"], cfg["upsample_kernel_sizes"])
):
self.ups.append(
weight_norm(
ConvTranspose1d(
cfg["upsample_initial_channel"] // (2 ** i),
cfg["upsample_initial_channel"] // (2 ** (i + 1)),
k,
u,
padding=(k - u) // 2,
)
)
)
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = cfg["upsample_initial_channel"] // (2 ** (i + 1))
for k, d in zip(
cfg["resblock_kernel_sizes"], cfg["resblock_dilation_sizes"]
):
self.resblocks.append(ResBlock(ch, k, d))
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i * self.num_kernels + j](x)
else:
xs += self.resblocks[i * self.num_kernels + j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print("Removing weight norm...")
for layer in self.ups:
remove_weight_norm(layer)
for layer in self.resblocks:
layer.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post) | null |
create score model set | import json
import os
import pickle
from re import finditer
import pandas
import pandas as pd
from KGToolbox.Tools import MakeIndex
from Marie.Util.Web.SPARQLWarehouse import ONTOKIN_ALL_PROPERTIES_ALL_SPECIES
from Marie.Util.location import DATA_DIR
from KGToolbox.Utils import query_blazegraph
"""
<http://www.theworldavatar.com/ontology/ontokin/OntoKin.owl#hasDipoleMoment>
<http://www.theworldavatar.com/ontology/ontokin/OntoKin.owl#hasDipoleMomentUnits>
<http://www.theworldavatar.com/ontology/ontokin/OntoKin.owl#hasLennardJonesDiameter>
<http://www.theworldavatar.com/ontology/ontokin/OntoKin.owl#hasLennardJonesDiameterUnits>
<http://www.theworldavatar.com/ontology/ontokin/OntoKin.owl#hasLennardJonesWellDepth>
<http://www.theworldavatar.com/ontology/ontokin/OntoKin.owl#hasLennardJonesWellDepthUnits>
<http://www.theworldavatar.com/ontology/ontokin/OntoKin.owl#hasPolarizability>
<http://www.theworldavatar.com/ontology/ontokin/OntoKin.owl#hasPolarizabilityUnits>
<http://www.theworldavatar.com/ontology/ontokin/OntoKin.owl#hasRotationalRelaxationCollisionNumber>
<http://www.theworldavatar.com/ontology/ontokin/OntoKin.owl#hasRotationalRelaxationCollisionNumberUnits> -
<http://www.theworldavatar.com/ontology/ontokin/OntoKin.owl#hasSpeciesGeometry>
"""
def camel_case_split(identifier):
matches = finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)', identifier)
return [m.group(0) for m in matches]
class OntoKinReader:
def __init__(self):
self.dataset_path = os.path.join(DATA_DIR, 'CrossGraph', 'ontokin')
self.entity2idx = None
self.relation2idx = None
def query_all_species(self):
# Use one query to find all the properties fo all species under the type species .
short_species_list = []
triples = []
tmp = []
value_dictionary = {}
rst = query_blazegraph(query=ONTOKIN_ALL_PROPERTIES_ALL_SPECIES, namespace="ontokin")
non_attributes = ['species', 'label', 'transport']
heads = [h for h in rst['head']['vars'] if h not in non_attributes and '_unit' not in h]
unique_labels = []
for r in rst['results']['bindings']:
row = []
species = r['species']['value'] # .split('/')[-1]
transport = r['transport']['value']
label = r['label']['value']
if "#" in species:
short_species = species.split('#')[-1]
else:
short_species = species.split('/')[-1]
# filter, only put findable species ...
counter = 0
if label not in unique_labels:
short_species_list.append(short_species)
counter += 1
print(f"number of selected iris", counter)
row.append(species)
row.append(label)
for head in heads:
if head in r:
data = r[head]['value']
else:
data = "EMPTY"
new_node = head + '_' + short_species
row.append(new_node)
if head + '_unit' in r:
data_unit = r[head + '_unit']['value']
value_dictionary[new_node] = data + ' ' + data_unit
else:
# insert a new node, with part of the species and the relations
value_dictionary[new_node] = data
triples.append((short_species, head + '_latent', new_node))
tmp.append(row)
unique_labels.append(label)
print('number of unique labels', len(unique_labels))
df_all_species = pd.DataFrame(tmp)
df_all_species.columns = ['species', 'label'] + heads
df_all_species.to_csv(os.path.join(self.dataset_path, 'all_species.tsv'), sep='\t')
with open(os.path.join(self.dataset_path, 'value_dict.json'), 'w') as f:
f.write(json.dumps(value_dictionary))
f.close()
df_triples = pd.DataFrame(triples)
df_triples.to_csv(os.path.join(self.dataset_path, 'ontokin-train.txt'), sep='\t', index=False, header=False)
df_test = df_triples.sample(frac=0.2)
df_test.to_csv(os.path.join(self.dataset_path, 'ontokin-test.txt'), sep='\t', index=False, header=False)
def run(self):
self.query_all_species()
data_folder = 'CrossGraph/ontokin'
MakeIndex.create_indexing(data_dir=data_folder, dataset_name="ontokin")
e2i_path = open(os.path.join(DATA_DIR, f'{data_folder}/entity2idx.pkl'), 'rb')
r2i_path = open(os.path.join(DATA_DIR, f'{data_folder}/relation2idx.pkl'), 'rb')
_full_dir = os.path.join(DATA_DIR, f'{data_folder}')
self.entity2idx = pickle.load(e2i_path)
self.relation2idx = pickle.load(r2i_path)
self.METHOD_NAME()
def METHOD_NAME(self):
property_dict = {"SpeciesGeometry_latent": "geometry",
"RotationalRelaxationCollisionNumber_latent": "rotational relaxation collision number",
"Polarizability_latent": "polarizability",
"LennardJonesDiameter_latent": "lennard jones diameter",
"LennardJonesWellDepth_latent": "lennard jones well depth",
"DipoleMoment_latent": "dipole moment"
}
df_train = pd.read_csv(os.path.join(self.dataset_path, "ontokin-train.txt"), sep='\t', header=None)
question_list = []
for idx, row in df_train.iterrows():
# question head tail rel
h, r, t = row
if "_latent" in r:
h_idx = (self.entity2idx[h])
r_idx = (self.relation2idx[r])
t_idx = (self.entity2idx[t])
q = property_dict[r]
question_list.append((q, h_idx, t_idx, r_idx))
df_questions = pd.DataFrame(question_list)
df_questions.columns = ["question", "head", "tail", "rel"]
df_questions.to_csv(os.path.join(self.dataset_path, "score_model_training.tsv"), sep='\t')
if __name__ == '__main__':
my_ontokin_reader = OntoKinReader()
my_ontokin_reader.run() | null |
test channel properties | # Copyright (C) 2022 ASTRON (Netherlands Institute for Radio Astronomy)
# SPDX-License-Identifier: GPL-3.0-or-later
"""
These tests check that the python bindings for the DPInfo class behave correctly.
This script can be invoked in two ways:
- as standalone from the build/pythondp3/test/integration directory,
using `pytest source/tPyDpInfo.py` (extended with pytest options of your choice)
- using ctest, see pythondp3/test/integration/CMakeLists.txt
"""
import math
import pytest
import sys
# Append current directory to system path in order to import testconfig
sys.path.append(".")
import testconfig as tcf
sys.path.insert(0, tcf.PYTHONDIR)
try:
"The import may fail while running pytest --collect-only"
import dp3
except ImportError:
pass
def test_constructor():
# Constructor using only default values for the arguments
info0 = dp3.DPInfo()
# Check whether the default values for the constructor arguments have been used
assert info0.n_correlations == 0
assert info0.original_n_channels == 0
assert info0.start_channel == 0
assert info0.antenna_set == ""
# Constructor using specific values for the arguments
n_correlations = 4
original_n_channels = 8
start_channel = 1
antenna_set = "LBA"
info1 = dp3.DPInfo(
n_correlations, original_n_channels, start_channel, antenna_set
)
assert info1.n_correlations == n_correlations
assert info1.original_n_channels == original_n_channels
assert info1.start_channel == start_channel
assert info1.antenna_set == antenna_set
def test_antenna_properties():
info = dp3.DPInfo()
# Check default values.
assert info.n_antenna == 0
assert info.antenna_names == []
assert info.antenna_positions == []
assert info.first_antenna_indices == []
assert info.second_antenna_indices == []
# Check that properties are read-only.
with pytest.raises(AttributeError):
info.n_antenna = 3
with pytest.raises(AttributeError):
info.antenna_names = ["very", "nice", "names"]
with pytest.raises(AttributeError):
info.antenna_positions = [[0, 0, 0], [0, 42, 0], [42, 0, 0]]
with pytest.raises(AttributeError):
info.first_antenna_indices = [0, 0, 0]
with pytest.raises(AttributeError):
info.second_antenna_indices = [0, 1, 2]
# Check that set_antennas() yields new property values.
names = ["name_1", "name_2"]
diameters = [42, 43]
positions = [[1, 2, 3], [4, 5, 6]]
first_indices = [0]
second_indices = [1]
info.set_antennas(
names, diameters, positions, first_indices, second_indices
)
assert info.n_antenna == 2
assert info.antenna_names == names
assert info.antenna_positions == positions
assert info.first_antenna_indices == first_indices
assert info.second_antenna_indices == second_indices
def METHOD_NAME():
info = dp3.DPInfo()
# Check default values.
assert info.n_channels == 0
assert info.channel_frequencies == []
assert info.bda_channel_frequencies == [[]]
assert info.channel_widths == []
assert info.bda_channel_widths == [[]]
# Check that properties are read-only.
with pytest.raises(AttributeError):
info.n_channels = 3
with pytest.raises(AttributeError):
info.channel_frequencies = [10.0e6, 11.0e6, 12.0e6]
with pytest.raises(AttributeError):
info.bda_channel_frequencies = [[13.0e6, 14.0e6, 15.0e6]]
with pytest.raises(AttributeError):
info.channel_frequencies = [2.0e6, 2.0e6, 2.0e6]
with pytest.raises(AttributeError):
info.bda_channel_frequencies = [[3.0e6, 3.0e6, 3.0e6]]
# Check that set_channels() yields new property values.
frequencies = [42.0e6, 43.0e6]
widths = [1.0e6, 1.0e6]
info.set_channels(frequencies, widths)
assert info.n_channels == 2
assert info.channel_frequencies == frequencies
assert info.bda_channel_frequencies == [frequencies]
assert info.channel_widths == widths
assert info.bda_channel_widths == [widths]
def test_time_properties():
info = dp3.DPInfo()
# Check default values.
assert info.first_time == 0.0
assert info.last_time == 0.0
assert info.time_interval == 1.0
assert info.start_time == -0.5
assert info.n_times == 1
# Check that properties are read-only.
with pytest.raises(AttributeError):
info.first_time = 3.0
with pytest.raises(AttributeError):
info.last_time = 4.0
with pytest.raises(AttributeError):
info.time_interval = 5.0
with pytest.raises(AttributeError):
info.start_time = 6.0
with pytest.raises(AttributeError):
info.n_times = 7
# Check that set_times() yields new property values.
first_time = 42.0
last_time = 141.0
interval = 5.0
info.set_times(first_time, last_time, interval)
assert info.first_time == first_time
assert info.last_time == last_time
assert info.time_interval == interval
assert info.start_time == 39.5
assert info.n_times == 21
def test_phase_center():
info = dp3.DPInfo()
assert math.isclose(info.phase_center[0], 0.0, rel_tol=1.0e-9)
assert math.isclose(info.phase_center[1], 0.5 * math.pi, rel_tol=1.0e-9)
for phase_center in [[0.1, 0.2], [-0.1, 0.2], [0.1, -0.2], [-0.1, -0.2]]:
info = dp3.DPInfo()
info.phase_center = phase_center
assert math.isclose(
phase_center[0], info.phase_center[0], rel_tol=1.0e-9
)
assert math.isclose(
phase_center[1], info.phase_center[1], rel_tol=1.0e-9
)
def test_ms_name():
info = dp3.DPInfo()
assert info.ms_name == ""
name = "test_ms_name"
info.ms_name = name
assert info.ms_name == name | null |
splitdrive | """Pathname and path-related operations for the Macintosh."""
import os
import warnings
from stat import *
import genericpath
from genericpath import *
__all__ = ["normcase","isabs","join","splitdrive","split","splitext",
"basename","dirname","commonprefix","getsize","getmtime",
"getatime","getctime", "islink","exists","lexists","isdir","isfile",
"walk","expanduser","expandvars","normpath","abspath",
"curdir","pardir","sep","pathsep","defpath","altsep","extsep",
"devnull","realpath","supports_unicode_filenames"]
# strings representing various path-related bits and pieces
curdir = ':'
pardir = '::'
extsep = '.'
sep = ':'
pathsep = '\n'
defpath = ':'
altsep = None
devnull = 'Dev:Null'
# Normalize the case of a pathname. Dummy in Posix, but <s>.lower() here.
def normcase(path):
return path.lower()
def isabs(s):
"""Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] != ':'
def join(s, *p):
path = s
for t in p:
if (not s) or isabs(t):
path = t
continue
if t[:1] == ':':
t = t[1:]
if ':' not in path:
path = ':' + path
if path[-1:] != ':':
path = path + ':'
path = path + t
return path
def split(s):
"""Split a pathname into two parts: the directory leading up to the final
bit, and the basename (the filename, without colons, in that directory).
The result (s, t) is such that join(s, t) yields the original argument."""
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i + 1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
def splitext(p):
return genericpath._splitext(p, sep, altsep, extsep)
splitext.__doc__ = genericpath._splitext.__doc__
def METHOD_NAME(p):
"""Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split()
def dirname(s): return split(s)[0]
def basename(s): return split(s)[1]
def ismount(s):
if not isabs(s):
return False
components = split(s)
return len(components) == 2 and components[1] == ''
def islink(s):
"""Return true if the pathname refers to a symbolic link."""
try:
import Carbon.File
return Carbon.File.ResolveAliasFile(s, 0)[2]
except:
return False
# Is `stat`/`lstat` a meaningful difference on the Mac? This is safe in any
# case.
def lexists(path):
"""Test whether a path exists. Returns True for broken symbolic links"""
try:
st = os.lstat(path)
except os.error:
return False
return True
def expandvars(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
def expanduser(path):
"""Dummy to retain interface-compatibility with other operating systems."""
return path
class norm_error(Exception):
"""Path cannot be normalized"""
def normpath(s):
"""Normalize a pathname. Will return the same result for
equivalent paths."""
if ":" not in s:
return ":"+s
comps = s.split(":")
i = 1
while i < len(comps)-1:
if comps[i] == "" and comps[i-1] != "":
if i > 1:
del comps[i-1:i+1]
i = i - 1
else:
# best way to handle this is to raise an exception
raise norm_error, 'Cannot use :: immediately after volume name'
else:
i = i + 1
s = ":".join(comps)
# remove trailing ":" except for ":" and "Volume:"
if s[-1] == ":" and len(comps) > 2 and s != ":"*len(s):
s = s[:-1]
return s
def walk(top, func, arg):
"""Directory tree walk with callback function.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), call func(arg, dirname, fnames).
dirname is the name of the directory, and fnames a list of the names of
the files and subdirectories in dirname (excluding '.' and '..'). func
may modify the fnames list in-place (e.g. via del or slice assignment),
and walk will only recurse into the subdirectories whose names remain in
fnames; this can be used to implement a filter, or to impose a specific
order of visiting. No semantics are defined for, or required of, arg,
beyond that arg is always passed to func. It can be used, e.g., to pass
a filename pattern, or a mutable object designed to accumulate
statistics. Passing None for arg is common."""
warnings.warnpy3k("In 3.x, os.path.walk is removed in favor of os.walk.",
stacklevel=2)
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name) and not islink(name):
walk(name, func, arg)
def abspath(path):
"""Return an absolute path."""
if not isabs(path):
if isinstance(path, unicode):
cwd = os.getcwdu()
else:
cwd = os.getcwd()
path = join(cwd, path)
return normpath(path)
# realpath is a no-op on systems without islink support
def realpath(path):
path = abspath(path)
try:
import Carbon.File
except ImportError:
return path
if not path:
return path
components = path.split(':')
path = components[0] + ':'
for c in components[1:]:
path = join(path, c)
try:
path = Carbon.File.FSResolveAliasFile(path, 1)[0].as_pathname()
except Carbon.File.Error:
pass
return path
supports_unicode_filenames = True | null |
execute external | import mindsdb_sql
from mindsdb_sql import parse_sql
from mindsdb_sql.planner import utils as planner_utils
from mindsdb.api.mysql.mysql_proxy.classes.sql_query import Column, SQLQuery
from mindsdb.api.mysql.mysql_proxy.utilities import (
ErBadDbError,
SqlApiException,
logger,
)
import mindsdb.utilities.profiler as profiler
from mindsdb.api.mysql.mysql_proxy.executor.executor_commands import ExecuteCommands
class Executor:
"""This class stores initial and intermediate params
between different steps of query execution. And it is also
creates a separate instance of ExecuteCommands to execute the current
query step.
IMPORTANT: A public API of this class is a contract.
And there are at least 2 classes strongly depend on it:
ExecuctorClient
ExecutorService.
These classes do the same work as Executor when
MindsDB works in 'modularity' mode.
Thus please make sure that IF you change the API,
you must update the API of these two classes as well!"""
def __init__(self, session, sqlserver):
self.session = session
self.sqlserver = sqlserver
self.query = None
# returned values
# all this attributes needs to be added in
# self.json() method
self.columns = []
self.params = []
self.data = None
self.state_track = None
self.server_status = None
self.is_executed = False
self.error_message = None
self.error_code = None
# self.predictor_metadata = {}
self.sql = ""
self.sql_lower = ""
self.command_executor = ExecuteCommands(self.session, self)
def change_default_db(self, new_db):
self.command_executor.change_default_db(new_db)
def stmt_prepare(self, sql):
resp = self.METHOD_NAME(sql)
if resp is not None:
# is already executed
self.is_executed = True
return
self.parse(sql)
# if not params
params = planner_utils.get_query_params(self.query)
if len(params) == 0:
# execute immediately
self.do_execute()
else:
# plan query
# TODO less complex.
# planner is inside SQLQuery now.
sqlquery = SQLQuery(self.query, session=self.session, execute=False)
sqlquery.prepare_query()
self.params = [
Column(
alias=p.value,
type="str",
name=p.value,
)
for p in params
]
# TODO:
# select * from mindsdb.models doesn't invoke prepare_steps and columns_list is empty
self.columns = sqlquery.columns_list
def stmt_execute(self, param_values):
if self.is_executed:
return
# fill params
self.query = planner_utils.fill_query_params(self.query, param_values)
# execute query
self.do_execute()
@profiler.profile()
def query_execute(self, sql):
logger.info("%s.query_execute: sql - %s", self.__class__.__name__, sql)
resp = self.METHOD_NAME(sql)
if resp is not None:
# is already executed
self.is_executed = True
return
self.parse(sql)
self.do_execute()
# for awesome Mongo API only
def binary_query_execute(self, sql):
self.sql = sql.to_string()
self.sql_lower = self.sql.lower()
ret = self.command_executor.execute_command(sql)
self.error_code = ret.error_code
self.error_message = ret.error_message
self.data = ret.data
self.server_status = ret.status
if ret.columns is not None:
self.columns = ret.columns
self.state_track = ret.state_track
def METHOD_NAME(self, sql):
# not exec directly in integration
return None
# try exec in external integration
if (
isinstance(self.session.database, str)
and len(self.session.database) > 0
and self.session.database.lower()
not in ("mindsdb", "files", "information_schema")
and "@@" not in sql.lower()
and (
(sql.lower().strip().startswith("select") and "from" in sql.lower())
or (
sql.lower().strip().startswith("show")
# and 'databases' in sql.lower()
and "tables" in sql.lower()
)
)
):
datanode = self.session.datahub.get(self.session.database)
if datanode is None:
raise ErBadDbError("Unknown database - %s" % self.session.database)
# try parse or send raw sql
try:
sql = parse_sql(sql, dialect="mindsdb")
except mindsdb_sql.exceptions.ParsingException:
pass
result, column_info = datanode.query(sql)
columns = [
Column(name=col["name"], type=col["type"]) for col in column_info
]
data = []
if len(result) > 0:
# columns = [{
# 'table_name': '',
# 'name': x,
# 'type': TYPES.MYSQL_TYPE_VAR_STRING
# } for x in result[0].keys()]
data = [[str(value) for key, value in x.items()] for x in result]
self.columns = columns
self.data = data
return True
@profiler.profile()
def parse(self, sql):
logger.info("%s.parse: sql - %s", self.__class__.__name__, sql)
self.sql = sql
sql_lower = sql.lower()
self.sql_lower = sql_lower.replace("`", "")
try:
self.query = parse_sql(sql, dialect="mindsdb")
except Exception as mdb_error:
try:
self.query = parse_sql(sql, dialect="mysql")
except Exception:
# not all statements are parsed by parse_sql
logger.warning(f"SQL statement is not parsed by mindsdb_sql: {sql}")
raise SqlApiException(
f"SQL statement cannot be parsed by mindsdb_sql - {sql}: {mdb_error}"
) from mdb_error
# == a place for workarounds ==
# or run sql in integration without parsing
@profiler.profile()
def do_execute(self):
# it can be already run at prepare state
logger.info("%s.do_execute", self.__class__.__name__)
if self.is_executed:
return
ret = self.command_executor.execute_command(self.query)
self.error_code = ret.error_code
self.error_message = ret.error_message
self.is_executed = True
self.data = ret.data
self.server_status = ret.status
if ret.columns is not None:
self.columns = ret.columns
self.state_track = ret.state_track | null |
minion config | import ctypes
import logging
import multiprocessing
import os
import pathlib
import shutil
import stat
import time
from pathlib import Path
import pytest
from pytestshellutils.utils import ports
from saltfactories.utils import random_string
import salt.channel.client
import salt.channel.server
import salt.config
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.master
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
log = logging.getLogger(__name__)
pytestmark = [
pytest.mark.skip_on_spawning_platform(
reason="These tests are currently broken on spawning platforms. Need to be rewritten.",
)
]
@pytest.fixture
def channel_minion_id():
return random_string("Tha-Minion-")
@pytest.fixture
def root_dir(tmp_path):
if salt.utils.platform.is_darwin():
# To avoid 'OSError: AF_UNIX path too long'
_root_dir = pathlib.Path("/tmp").resolve() / tmp_path.name
try:
yield _root_dir
finally:
shutil.rmtree(str(_root_dir), ignore_errors=True)
else:
yield tmp_path
def transport_ids(value):
return "transport({})".format(value)
@pytest.fixture(params=["tcp", "zeromq"], ids=transport_ids)
def transport(request):
return request.param
@pytest.fixture
def master_config(root_dir, transport):
master_conf = salt.config.master_config("")
master_conf["transport"] = transport
master_conf["id"] = "master"
master_conf["root_dir"] = str(root_dir)
master_conf["sock_dir"] = str(root_dir)
master_conf["interface"] = "127.0.0.1"
master_conf["publish_port"] = ports.get_unused_localhost_port()
master_conf["ret_port"] = ports.get_unused_localhost_port()
master_conf["pki_dir"] = str(root_dir / "pki")
os.makedirs(master_conf["pki_dir"])
salt.crypt.gen_keys(master_conf["pki_dir"], "master", 4096)
minions_keys = os.path.join(master_conf["pki_dir"], "minions")
os.makedirs(minions_keys)
yield master_conf
@pytest.fixture
def METHOD_NAME(master_config, channel_minion_id):
minion_conf = salt.config.METHOD_NAME(
"", minion_id=channel_minion_id, cache_minion_id=False
)
minion_conf["transport"] = master_config["transport"]
minion_conf["root_dir"] = master_config["root_dir"]
minion_conf["id"] = channel_minion_id
minion_conf["sock_dir"] = master_config["sock_dir"]
minion_conf["ret_port"] = master_config["ret_port"]
minion_conf["interface"] = "127.0.0.1"
minion_conf["pki_dir"] = os.path.join(master_config["root_dir"], "pki_minion")
os.makedirs(minion_conf["pki_dir"])
minion_conf["master_port"] = master_config["ret_port"]
minion_conf["master_ip"] = "127.0.0.1"
minion_conf["master_uri"] = "tcp://127.0.0.1:{}".format(master_config["ret_port"])
salt.crypt.gen_keys(minion_conf["pki_dir"], "minion", 4096)
minion_pub = os.path.join(minion_conf["pki_dir"], "minion.pub")
pub_on_master = os.path.join(master_config["pki_dir"], "minions", channel_minion_id)
shutil.copyfile(minion_pub, pub_on_master)
return minion_conf
@pytest.fixture
def process_manager():
process_manager = salt.utils.process.ProcessManager()
try:
yield process_manager
finally:
process_manager.terminate()
@pytest.fixture
def master_secrets():
salt.master.SMaster.secrets["aes"] = {
"secret": multiprocessing.Array(
ctypes.c_char,
salt.utils.stringutils.to_bytes(salt.crypt.Crypticle.generate_key_string()),
),
"serial": multiprocessing.Value(
ctypes.c_longlong, lock=False # We'll use the lock from 'secret'
),
}
yield
salt.master.SMaster.secrets.pop("aes")
@salt.ext.tornado.gen.coroutine
def _connect_and_publish(
io_loop, channel_minion_id, channel, server, received, timeout=60
):
log.info("TEST - BEFORE CHANNEL CONNECT")
yield channel.connect()
log.info("TEST - AFTER CHANNEL CONNECT")
def cb(payload):
log.info("TEST - PUB SERVER MSG %r", payload)
received.append(payload)
io_loop.stop()
channel.on_recv(cb)
server.publish({"tgt_type": "glob", "tgt": [channel_minion_id], "WTF": "SON"})
start = time.time()
while time.time() - start < timeout:
yield salt.ext.tornado.gen.sleep(1)
io_loop.stop()
def test_pub_server_channel(
io_loop,
channel_minion_id,
master_config,
METHOD_NAME,
process_manager,
master_secrets,
):
server_channel = salt.channel.server.PubServerChannel.factory(
master_config,
)
server_channel.pre_fork(process_manager)
req_server_channel = salt.channel.server.ReqServerChannel.factory(master_config)
req_server_channel.pre_fork(process_manager)
def handle_payload(payload):
log.info("TEST - Req Server handle payload %r", payload)
req_server_channel.post_fork(handle_payload, io_loop=io_loop)
if master_config["transport"] == "zeromq":
p = Path(str(master_config["sock_dir"])) / "workers.ipc"
mode = os.lstat(p).st_mode
assert bool(os.lstat(p).st_mode & stat.S_IRUSR)
assert not bool(os.lstat(p).st_mode & stat.S_IRGRP)
assert not bool(os.lstat(p).st_mode & stat.S_IROTH)
pub_channel = salt.channel.client.AsyncPubChannel.factory(METHOD_NAME)
received = []
try:
io_loop.add_callback(
_connect_and_publish,
io_loop,
channel_minion_id,
pub_channel,
server_channel,
received,
)
io_loop.start()
assert len(received) == 1
finally:
server_channel.close()
req_server_channel.close()
pub_channel.close() | null |
make grid | #!/usr/bin/env nemesis
"""Python application to create spatial databases for a synthetic event with time-varying Gaussian slip.
"""
import math
import numpy
from spatialdata.spatialdb.SimpleGridDB import SimpleGridDB
from spatialdata.spatialdb.SimpleGridAscii import createWriter
from spatialdata.spatialdb import TimeHistoryIO
from spatialdata.geocoords.CSGeo import CSGeo
from pythia.pyre.applications.Script import Script as Application
class GenerateSlowslip(Application):
"""Python application to create spatial databases for a synthetic
SSE with time-varying Gaussian slip.
"""
import pythia.pyre.inventory
# Python object for managing GenerateSlowslip facilities and properties.
##
# \b Properties
# @li \b rake Rake of fault slip (degrees).
# @li \b slip_center (lon,lat) coordinates of slip center.
# @li \b slip_radius Radius of slip region (degrees).
# @li \b slip_max Maximum slip value (meters).
# @li \b slip_sigma_lon Sigma value for longitude.
# @li \b slip_sigma_lat Sigma value for latitude.
# @li \b slip_times List of times for which to provide amplitudes.
# @li \b slip_time_units Units used for slip times.
# @li \b slip_amplitudes List of slip amplitudes.
# @li \b grid_lon_range Min and max longitude values for grid.
# @li \b grid_lat_range Min and max latitude values for grid.
# @li \b grid_incr Grid increment (degrees) for spatial database.
# @li \b time_db_filename Name of temporal DB output file.
# @li \b database_filename Filename for generated spatial database.
##
# \b Facilities
# @li \b coordsys Coordinate system for output database.
rake = pythia.pyre.inventory.float("rake", default=1.0)
rake.meta['tip'] = "Rake of fault slip (degrees)."
slipCenter = pythia.pyre.inventory.list("slip_center", default=[0.0, 0.0])
slipCenter.meta['tip'] = "(lon,lat) coordinates of slip center."
slipRadius = pythia.pyre.inventory.float("slip_radius", default=1.0)
slipRadius.meta['tip'] = "Radius of slip region (degrees)."
slipMax = pythia.pyre.inventory.float("slip_max", default=5.0)
slipMax.meta['tip'] = "Maximum slip value (meters)."
slipSigmaLon = pythia.pyre.inventory.float("slip_sigma_lon", default=0.2)
slipSigmaLon.meta['tip'] = "Sigma value for longitude."
slipSigmaLat = pythia.pyre.inventory.float("slip_sigma_lat", default=0.2)
slipSigmaLat.meta['tip'] = "Sigma value for latitude."
slipTimes = pythia.pyre.inventory.list("slip_times", default=[0.0, 0.5, 1.0])
slipTimes.meta['tip'] = "List of times for which to provide amplitudes."
slipTimeUnits = pythia.pyre.inventory.str("slip_time_units", default="year")
slipTimeUnits.meta['tip'] = "Units used for slip times."
slipAmplitudes = pythia.pyre.inventory.list("slip_amplitudes", default=[0.0, 0.5, 1.0])
slipAmplitudes.meta['tip'] = "List of slip amplitudes."
gridLonRange = pythia.pyre.inventory.list("grid_lon_range", default=[-123.0, -124.0])
gridLonRange.meta['tip'] = "Min and max longitude values for grid."
gridLatRange = pythia.pyre.inventory.list("grid_lat_range", default=[45.0, 46.0])
gridLatRange.meta['tip'] = "Min and max latitude values for grid."
gridIncr = pythia.pyre.inventory.float("grid_incr", default=0.05)
gridIncr.meta['tip'] = "Sigma value for latitude."
timeDbFilename = pythia.pyre.inventory.str("time_db_filename", default="slip.timedb")
timeDbFilename.meta['tip'] = "Filename of temporal DB output file."
coordsys = pythia.pyre.inventory.facility("coordsys", family="coordsys", factory=CSGeo)
coordsys.meta['tip'] = "Coordinate system for output database."
dbFilename = pythia.pyre.inventory.str("database_filename", default="slip.spatialdb")
dbFilename.meta['tip'] = "Filename for generated spatial database."
# PUBLIC METHODS /////////////////////////////////////////////////////
def __init__(self, name="generate_slowslip"):
Application.__init__(self, name)
self.lon = None
self.lat = None
self.z = None
self.grid = None
self.faultSlip = None
return
def main(self):
self.METHOD_NAME()
self._computeGauss()
self._writeSpatialdb()
self._writeTemporaldb()
return
# PRIVATE METHODS /////////////////////////////////////////////////////
def _configure(self):
"""Setup members using inventory.
"""
Application._configure(self)
def METHOD_NAME(self):
"""Function to create a mesh grid for computations.
"""
lonMin = float(self.gridLonRange[0])
lonMax = float(self.gridLonRange[1])
latMin = float(self.gridLatRange[0])
latMax = float(self.gridLatRange[1])
lonDiff = lonMax - lonMin
latDiff = latMax - latMin
numLon = int(round(lonDiff / self.gridIncr)) + 1
numLat = int(round(latDiff / self.gridIncr)) + 1
self.lon = numpy.linspace(lonMin, lonMax, num=numLon, dtype=numpy.float64)
self.lat = numpy.linspace(latMin, latMax, num=numLat, dtype=numpy.float64)
self.z = numpy.zeros(1, dtype=numpy.float64)
lonGrid, latGrid = numpy.meshgrid(self.lon, self.lat)
zGrid = numpy.zeros_like(lonGrid)
self.grid = numpy.column_stack((
lonGrid.flatten(),
latGrid.flatten(),
zGrid.flatten(),
))
def _computeGauss(self):
"""Function to compute 2D Gaussian slip distribution.
"""
lonShift = self.grid[:, 0] - float(self.slipCenter[0])
latShift = self.grid[:, 1] - float(self.slipCenter[1])
distance = numpy.sqrt(lonShift * lonShift + latShift * latShift)
outside = numpy.where(distance > self.slipRadius)
lonFac = 0.5 * lonShift * lonShift / (self.slipSigmaLon * self.slipSigmaLon)
latFac = 0.5 * latShift * latShift / (self.slipSigmaLat * self.slipSigmaLat)
slip = self.slipMax * numpy.exp(-(lonFac + latFac))
slip[outside] = 0.0
rakeRadians = math.radians(self.rake)
llComp = math.cos(rakeRadians)
udComp = math.sin(rakeRadians)
llSlip = llComp * slip
udSlip = udComp * slip
opSlip = numpy.zeros_like(llSlip)
self.faultSlip = numpy.column_stack((llSlip, udSlip, opSlip))
def _writeSpatialdb(self):
"""Write spatial database with fault slip.
"""
llSlipInfo = {'name': "left-lateral-slip",
'units': "m",
'data': self.faultSlip[:, 0]}
udSlipInfo = {'name': "reverse-slip",
'units': "m",
'data': self.faultSlip[:, 1]}
openInfo = {'name': "fault-opening",
'units': "m",
'data': self.faultSlip[:, 2]}
data = {'num-x': self.lon.shape[0],
'num-y': self.lat.shape[0],
'num-z': 1,
'points': self.grid,
'x': self.lon,
'y': self.lat,
'z': self.z,
'coordsys': self.coordsys,
'data_dim': 2,
'values': [llSlipInfo, udSlipInfo, openInfo]}
writer = createWriter(self.dbFilename)
writer.write(data)
def _writeTemporaldb(self):
"""Write temporal database with time variation of fault slip.
"""
if (len(self.slipTimes) == 1):
return
time = [float(i) for i in self.slipTimes]
timeArr = numpy.array(time, dtype=numpy.float64)
amplitude = [float(i) for i in self.slipAmplitudes]
amplitudeArr = numpy.array(amplitude, dtype=numpy.float64)
TimeHistoryIO.write(timeArr, amplitudeArr, self.slipTimeUnits, self.timeDbFilename)
# ----------------------------------------------------------------------
if __name__ == '__main__':
GenerateSlowslip().run()
# End of file | null |
test create tar gz basic | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import hashlib
import os
import shutil
import stat
import tarfile
import tempfile
import unittest
import pytest
from mozpack.archive import (
DEFAULT_MTIME,
create_tar_bz2_from_files,
create_tar_from_files,
create_tar_gz_from_files,
)
from mozpack.files import GeneratedFile
from mozunit import main
MODE_STANDARD = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
def file_hash(path):
h = hashlib.sha1()
with open(path, "rb") as fh:
while True:
data = fh.read(8192)
if not data:
break
h.update(data)
return h.hexdigest()
class TestArchive(unittest.TestCase):
def _create_files(self, root):
files = {}
for i in range(10):
p = os.path.join(root, "file%02d" % i)
with open(p, "wb") as fh:
fh.write(b"file%02d" % i)
# Need to set permissions or umask may influence testing.
os.chmod(p, MODE_STANDARD)
files["file%02d" % i] = p
for i in range(10):
files["file%02d" % (i + 10)] = GeneratedFile(b"file%02d" % (i + 10))
return files
def _verify_basic_tarfile(self, tf):
self.assertEqual(len(tf.getmembers()), 20)
names = ["file%02d" % i for i in range(20)]
self.assertEqual(tf.getnames(), names)
for ti in tf.getmembers():
self.assertEqual(ti.uid, 0)
self.assertEqual(ti.gid, 0)
self.assertEqual(ti.uname, "")
self.assertEqual(ti.gname, "")
self.assertEqual(ti.mode, MODE_STANDARD)
self.assertEqual(ti.mtime, DEFAULT_MTIME)
@pytest.mark.xfail(
reason="ValueError is not thrown despite being provided directory."
)
def test_dirs_refused(self):
d = tempfile.mkdtemp()
try:
tp = os.path.join(d, "test.tar")
with open(tp, "wb") as fh:
with self.assertRaisesRegexp(ValueError, "not a regular"):
create_tar_from_files(fh, {"test": d})
finally:
shutil.rmtree(d)
@pytest.mark.xfail(reason="ValueError is not thrown despite uid/gid being set.")
def test_setuid_setgid_refused(self):
d = tempfile.mkdtemp()
try:
uid = os.path.join(d, "setuid")
gid = os.path.join(d, "setgid")
with open(uid, "a"):
pass
with open(gid, "a"):
pass
os.chmod(uid, MODE_STANDARD | stat.S_ISUID)
os.chmod(gid, MODE_STANDARD | stat.S_ISGID)
tp = os.path.join(d, "test.tar")
with open(tp, "wb") as fh:
with self.assertRaisesRegexp(ValueError, "cannot add file with setuid"):
create_tar_from_files(fh, {"test": uid})
with self.assertRaisesRegexp(ValueError, "cannot add file with setuid"):
create_tar_from_files(fh, {"test": gid})
finally:
shutil.rmtree(d)
def test_create_tar_basic(self):
d = tempfile.mkdtemp()
try:
files = self._create_files(d)
tp = os.path.join(d, "test.tar")
with open(tp, "wb") as fh:
create_tar_from_files(fh, files)
# Output should be deterministic.
self.assertEqual(file_hash(tp), "01cd314e277f060e98c7de6c8ea57f96b3a2065c")
with tarfile.open(tp, "r") as tf:
self._verify_basic_tarfile(tf)
finally:
shutil.rmtree(d)
@pytest.mark.xfail(reason="hash mismatch")
def test_executable_preserved(self):
d = tempfile.mkdtemp()
try:
p = os.path.join(d, "exec")
with open(p, "wb") as fh:
fh.write("#!/bin/bash\n")
os.chmod(p, MODE_STANDARD | stat.S_IXUSR)
tp = os.path.join(d, "test.tar")
with open(tp, "wb") as fh:
create_tar_from_files(fh, {"exec": p})
self.assertEqual(file_hash(tp), "357e1b81c0b6cfdfa5d2d118d420025c3c76ee93")
with tarfile.open(tp, "r") as tf:
m = tf.getmember("exec")
self.assertEqual(m.mode, MODE_STANDARD | stat.S_IXUSR)
finally:
shutil.rmtree(d)
def METHOD_NAME(self):
d = tempfile.mkdtemp()
try:
files = self._create_files(d)
gp = os.path.join(d, "test.tar.gz")
with open(gp, "wb") as fh:
create_tar_gz_from_files(fh, files)
self.assertEqual(file_hash(gp), "7c4da5adc5088cdf00911d5daf9a67b15de714b7")
with tarfile.open(gp, "r:gz") as tf:
self._verify_basic_tarfile(tf)
finally:
shutil.rmtree(d)
def test_tar_gz_name(self):
d = tempfile.mkdtemp()
try:
files = self._create_files(d)
gp = os.path.join(d, "test.tar.gz")
with open(gp, "wb") as fh:
create_tar_gz_from_files(fh, files, filename="foobar")
self.assertEqual(file_hash(gp), "721e00083c17d16df2edbddf40136298c06d0c49")
with tarfile.open(gp, "r:gz") as tf:
self._verify_basic_tarfile(tf)
finally:
shutil.rmtree(d)
def test_create_tar_bz2_basic(self):
d = tempfile.mkdtemp()
try:
files = self._create_files(d)
bp = os.path.join(d, "test.tar.bz2")
with open(bp, "wb") as fh:
create_tar_bz2_from_files(fh, files)
self.assertEqual(file_hash(bp), "eb5096d2fbb71df7b3d690001a6f2e82a5aad6a7")
with tarfile.open(bp, "r:bz2") as tf:
self._verify_basic_tarfile(tf)
finally:
shutil.rmtree(d)
if __name__ == "__main__":
main() | null |
main | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2020, John Westcott IV <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: token
author: "John Westcott IV (@john-westcott-iv)"
version_added: "2.3.0"
short_description: create, update, or destroy Automation Platform Controller tokens.
description:
- Create or destroy Automation Platform Controller tokens. See
U(https://www.ansible.com/tower) for an overview.
- In addition, the module sets an Ansible fact which can be passed into other
controller modules as the parameter controller_oauthtoken. See examples for usage.
- Because of the sensitive nature of tokens, the created token value is only available once
through the Ansible fact. (See RETURN for details)
- Due to the nature of tokens this module is not idempotent. A second will
with the same parameters will create a new token.
- If you are creating a temporary token for use with modules you should delete the token
when you are done with it. See the example for how to do it.
options:
description:
description:
- Optional description of this access token.
required: False
type: str
application:
description:
- The application name, ID, or named URL tied to this token.
required: False
type: str
scope:
description:
- Allowed scopes, further restricts user's permissions. Must be a simple space-separated string with allowed scopes ['read', 'write'].
required: False
type: str
choices: ["read", "write"]
existing_token:
description: The data structure produced from token in create mode to be used with state absent.
type: dict
existing_token_id:
description: A token ID (number) which can be used to delete an arbitrary token with state absent.
type: str
state:
description:
- Desired state of the resource.
choices: ["present", "absent"]
default: "present"
type: str
extends_documentation_fragment: awx.awx.auth
'''
EXAMPLES = '''
- block:
- name: Create a new token using an existing token
token:
description: '{{ token_description }}'
scope: "write"
state: present
controller_oauthtoken: "{{ my_existing_token }}"
- name: Delete this token
token:
existing_token: "{{ controller_token }}"
state: absent
- name: Create a new token using username/password
token:
description: '{{ token_description }}'
scope: "write"
state: present
controller_username: "{{ my_username }}"
controller_password: "{{ my_password }}"
- name: Use our new token to make another call
job_list:
controller_oauthtoken: "{{ controller_token }}"
always:
- name: Delete our Token with the token we created
token:
existing_token: "{{ controller_token }}"
state: absent
when: token is defined
- name: Delete a token by its id
token:
existing_token_id: 4
state: absent
'''
RETURN = '''
controller_token:
type: dict
description: An Ansible Fact variable representing a token object which can be used for auth in subsequent modules. See examples for usage.
contains:
token:
description: The token that was generated. This token can never be accessed again, make sure this value is noted before it is lost.
type: str
id:
description: The numeric ID of the token created
type: str
returned: on successful create
'''
from ..module_utils.controller_api import ControllerAPIModule
def return_token(module, last_response):
# A token is special because you can never get the actual token ID back from the API.
# So the default module return would give you an ID but then the token would forever be masked on you.
# This method will return the entire token object we got back so that a user has access to the token
module.json_output['ansible_facts'] = {
'controller_token': last_response,
'tower_token': last_response,
}
module.exit_json(**module.json_output)
def METHOD_NAME():
# Any additional arguments that are not fields of the item can be added here
argument_spec = dict(
description=dict(),
application=dict(),
scope=dict(choices=['read', 'write']),
existing_token=dict(type='dict', no_log=False),
existing_token_id=dict(),
state=dict(choices=['present', 'absent'], default='present'),
)
# Create a module for ourselves
module = ControllerAPIModule(
argument_spec=argument_spec,
mutually_exclusive=[
('existing_token', 'existing_token_id'),
],
# If we are state absent make sure one of existing_token or existing_token_id are present
required_if=[
[
'state',
'absent',
('existing_token', 'existing_token_id'),
True,
],
],
)
# Extract our parameters
description = module.params.get('description')
application = module.params.get('application')
scope = module.params.get('scope')
existing_token = module.params.get('existing_token')
existing_token_id = module.params.get('existing_token_id')
state = module.params.get('state')
if state == 'absent':
if not existing_token:
existing_token = module.get_one(
'tokens',
**{
'data': {
'id': existing_token_id,
}
}
)
# If the state was absent we can let the module delete it if needed, the module will handle exiting from this
module.delete_if_needed(existing_token)
# Attempt to look up the related items the user specified (these will fail the module if not found)
application_id = None
if application:
application_id = module.resolve_name_to_id('applications', application)
# Create the data that gets sent for create and update
new_fields = {}
if description is not None:
new_fields['description'] = description
if application is not None:
new_fields['application'] = application_id
if scope is not None:
new_fields['scope'] = scope
# If the state was present and we can let the module build or update the existing item, this will return on its own
module.create_or_update_if_needed(
None,
new_fields,
endpoint='tokens',
item_type='token',
associations={},
on_create=return_token,
)
if __name__ == '__main__':
METHOD_NAME() | null |
create jax task | # coding=utf-8
# Copyright 2022 The Pax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for trainer_lib."""
import itertools
from absl.testing import absltest
from absl.testing import parameterized
from etils import epath
import jax.numpy as jnp
from paxml import tasks_lib
from paxml import trainer_lib
from praxis import base_hyperparams
from praxis import base_layer
from praxis import base_model
from praxis import optimizers
from praxis import pax_fiddle
from praxis import py_utils
from praxis import pytypes
from praxis import schedules
NestedMap = py_utils.NestedMap
JTensor = pytypes.JTensor
class RunningModeTest(parameterized.TestCase):
def test_unknown_mode(self):
self.assertEqual(
trainer_lib.RunningMode.detect(False, False, False),
trainer_lib.RunningMode.UNKNOWN,
)
@parameterized.parameters(
('has_train', True, False, False),
('has_train', True, True, False),
('has_train', True, False, True),
('has_train', True, True, True),
('has_eval', False, True, False),
('has_eval', True, True, False),
('has_eval', False, True, True),
('has_eval', True, True, True),
('has_decode', False, False, True),
('has_decode', False, True, True),
('has_decode', True, False, True),
('has_decode', True, True, True),
)
def test_valid_modes(
self, running_mode, has_train_metrics, has_eval_metrics, has_test_metrics
):
self.assertTrue(
getattr(
trainer_lib.RunningMode.detect(
has_train_metrics, has_eval_metrics, has_test_metrics
),
running_mode,
)
)
class TestModel(base_model.BaseModel):
"""Simple model for testing.
Attributes:
input_dims: Depth of the input.
output_dims: Depth of the output.
"""
input_dims: int = 0
output_dims: int = 0
def setup(self) -> None:
self.create_variable(
'weights',
base_layer.WeightHParams(shape=[self.input_dims, self.output_dims]),
)
def compute_predictions(self, input_batch: NestedMap) -> JTensor:
ret = jnp.einsum('bi,io->bo', input_batch.inputs, self.theta.weights)
self.add_summary('debug', ret, verbosity=4)
self.add_summary('info', ret, verbosity=3)
return ret
def compute_loss(
self, predictions: JTensor, input_batch: NestedMap
) -> tuple[NestedMap, NestedMap]:
del input_batch
prediction_loss = jnp.sum(predictions)
theta_loss = jnp.max(jnp.abs(self.theta.weights))
# Here loss is the main loss to back-prop into, and loss02 is an eval
# metric.
per_example_out = NestedMap()
return (
NestedMap(
prediction_loss=(
prediction_loss,
jnp.array(1.0, prediction_loss.dtype),
),
theta_loss=(theta_loss, jnp.array(1.0, theta_loss.dtype)),
),
per_example_out,
)
class TrainerLibTest(parameterized.TestCase):
def METHOD_NAME(self, input_dims: int, output_dims: int):
config = pax_fiddle.Config(tasks_lib.SingleTask, name='task')
config.model = pax_fiddle.Config(
TestModel,
name='test_model',
input_dims=input_dims,
output_dims=output_dims,
)
learner = config.train.learner
learner.loss_name = 'loss'
learner.optimizer = pax_fiddle.Config(optimizers.Adam)
learner.optimizer.lr_schedule = pax_fiddle.Config(schedules.Constant)
return base_hyperparams.instantiate(config)
@parameterized.parameters(itertools.product((True, False), (True, False)))
def test_create_train_state_metadata(self, discard_opt_states, do_eval):
input_dims = 3
output_dims = 5
inputs = jnp.ones((1, input_dims), dtype=jnp.float32)
task = self.METHOD_NAME(input_dims, output_dims)
train_shape_dtype = NestedMap(inputs=inputs)
metadata = trainer_lib.create_train_state_metadata(
task, train_shape_dtype, discard_opt_states, do_eval
)
self.assertTrue((metadata.input_shape_dtype['inputs'] == inputs).all())
var_weight_hparams = task.model.abstract_init_with_metadata(
train_shape_dtype, do_eval=do_eval
)
self.assertEqual(metadata.var_weight_hparams, var_weight_hparams)
padded_global_shapes = task.create_train_state_padded_shapes(
var_weight_hparams, discard_opt_states=discard_opt_states
)
self.assertEqual(metadata.padded_global_shapes, padded_global_shapes)
unpadded_global_shapes = task.create_train_state_unpadded_shapes(
var_weight_hparams, discard_opt_states=discard_opt_states
)
self.assertEqual(metadata.unpadded_global_shapes, unpadded_global_shapes)
partition_specs = task.create_train_state_partition_specs(
var_weight_hparams, discard_opt_states=discard_opt_states
)
self.assertEqual(metadata.partition_specs, partition_specs)
@parameterized.parameters(itertools.product((True, False), (True, False)))
def test_write_post_init_model_hparams_file(
self, discard_opt_states, do_eval
):
input_dims = 3
output_dims = 5
layer_cfg = pax_fiddle.Config(
TestModel,
name='test_model',
input_dims=input_dims,
output_dims=output_dims,
)
model = pax_fiddle.build(layer_cfg)
inputs = jnp.ones((1, input_dims), dtype=jnp.float32)
task = self.METHOD_NAME(input_dims, output_dims)
train_shape_dtype = NestedMap(inputs=inputs)
train_state_metadata = trainer_lib.create_train_state_metadata(
task, train_shape_dtype, discard_opt_states, do_eval
)
job_log_dir = (
epath.Path(absltest.get_default_test_tmpdir())
/ f'model_hparams_{discard_opt_states}_{do_eval}'
)
trainer_lib.write_post_init_model_hparams_file(
model, train_state_metadata, job_log_dir, do_eval
)
params_fpath = job_log_dir / 'post_init_model_params.txt'
with params_fpath.open() as params_file:
hyper_params, param_weights = params_file.read().split('\n\n')
hyper_params_config = model.abstract_init_with_mdl_config(
train_state_metadata.input_shape_dtype, do_eval=do_eval
)
hyper_params_expected = base_hyperparams.nested_struct_to_text(
hyper_params_config
)
param_weights_expected = base_hyperparams.nested_struct_to_text(
train_state_metadata.var_weight_hparams
)
self.assertEqual(hyper_params.strip(), hyper_params_expected.strip())
self.assertEqual(param_weights.strip(), param_weights_expected.strip())
if __name__ == '__main__':
absltest.main() | null |
find most distant | """
"""
from SALib.sample.morris.strategy import Strategy
from scipy.special import comb as nchoosek # type: ignore
from itertools import combinations, islice
import sys
import numpy as np # type: ignore
from typing import List
class BruteForce(Strategy):
"""Implements the brute force optimisation strategy"""
def _sample(
self, input_sample, num_samples, num_params, k_choices, num_groups=None
):
return self.brute_force_most_distant(
input_sample, num_samples, num_params, k_choices, num_groups
)
def brute_force_most_distant(
self,
input_sample: np.ndarray,
num_samples: int,
num_params: int,
k_choices: int,
num_groups: int = None,
) -> List:
"""Use brute force method to find most distant trajectories
Parameters
----------
input_sample : numpy.ndarray
num_samples : int
The number of samples to generate
num_params : int
The number of parameters
k_choices : int
The number of optimal trajectories
num_groups : int, default=None
The number of groups
Returns
-------
list
"""
scores = self.METHOD_NAME(
input_sample, num_samples, num_params, k_choices, num_groups
)
maximum_combo = self.find_maximum(scores, num_samples, k_choices)
return maximum_combo
def METHOD_NAME(
self,
input_sample: np.ndarray,
num_samples: int,
num_params: int,
k_choices: int,
num_groups: int = None,
) -> np.ndarray:
"""
Finds the 'k_choices' most distant choices from the
'num_samples' trajectories contained in 'input_sample'
Parameters
----------
input_sample : numpy.ndarray
num_samples : int
The number of samples to generate
num_params : int
The number of parameters
k_choices : int
The number of optimal trajectories
num_groups : int, default=None
The number of groups
Returns
-------
numpy.ndarray
"""
# Now evaluate the (N choose k_choices) possible combinations
if nchoosek(num_samples, k_choices) >= sys.maxsize:
raise ValueError("Number of combinations is too large")
number_of_combinations = int(nchoosek(num_samples, k_choices))
# First compute the distance matrix for each possible pairing
# of trajectories and store in a shared-memory array
distance_matrix = self.compute_distance_matrix(
input_sample, num_samples, num_params, num_groups
)
# Initialise the output array
chunk = int(1e6)
if chunk > number_of_combinations:
chunk = number_of_combinations
counter = 0
# Generate a list of all the possible combinations
combo_gen = combinations(range(num_samples), k_choices)
scores = np.zeros(number_of_combinations, dtype=np.float32)
# Generate the pairwise indices once
pairwise = np.array([y for y in combinations(range(k_choices), 2)])
mappable = self.mappable
for combos in self.grouper(chunk, combo_gen):
scores[(counter * chunk) : ((counter + 1) * chunk)] = mappable(
combos, pairwise, distance_matrix
)
counter += 1
return scores
@staticmethod
def grouper(n, iterable):
it = iter(iterable)
while True:
chunk = tuple(islice(it, n))
if not chunk:
return
yield chunk
@staticmethod
def mappable(combos, pairwise, distance_matrix):
"""
Obtains scores from the distance_matrix for each pairwise combination
held in the combos array
Parameters
----------
combos : numpy.ndarray
pairwise : numpy.ndarray
distance_matrix : numpy.ndarray
"""
combos = np.array(combos)
# Create a list of all pairwise combination for each combo in combos
combo_list = combos[
:,
pairwise[
:,
],
]
addresses = (combo_list[:, :, 1], combo_list[:, :, 0])
all_distances = distance_matrix[addresses]
new_scores = np.sqrt(np.einsum("ij,ij->i", all_distances, all_distances))
return new_scores
def find_maximum(self, scores, N, k_choices):
"""Finds the `k_choices` maximum scores from `scores`
Parameters
----------
scores : numpy.ndarray
N : int
k_choices : int
Returns
-------
list
"""
if not isinstance(scores, np.ndarray):
raise TypeError("Scores input is not a numpy array")
index_of_maximum = int(scores.argmax())
maximum_combo = self.nth(
combinations(range(N), k_choices), index_of_maximum, None
)
return sorted(maximum_combo)
@staticmethod
def nth(iterable, n, default=None):
"""Returns the nth item or a default value
Parameters
----------
iterable : iterable
n : int
default : default=None
The default value to return
"""
if type(n) != int:
raise TypeError("n is not an integer")
return next(islice(iterable, n, None), default) | null |
pop | import time
from timeit import default_timer
import logging
logger = logging.getLogger(__file__)
from collections import OrderedDict, Counter
def isnotebook():
""" returns True if Jupyter notebook is runnung """
# from https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
def time():
""" returns the time. used instead of time.time for rapid portability"""
return default_timer()
def get_unique_name_list_from_class_list(cls_list):
"""
returns a list of names using cls.name if unique or cls.name1, cls.name2... otherwise.
Order of the name list matches order of cls_list, such that iterating over zip(cls_list, name_list) is OK
"""
# cls_list is typically
# cls_modules = [rp.HK, rp.AMS, rp.Scope, rp.Sampler, rp.Asg1, rp.Asg2] + \
# [rp.AuxOutput] * 2 + [rp.IQ] * 3 + [rp.Pid] * 4 + [rp.IIR]
# first, map from list of classes to a list of corresponding names
# e.g. all_names = ['hk, ..., 'pwm', 'pwm', ...
all_names = [cls.__name__.lower() for cls in cls_list]
final_names = []
for name in all_names:
# how many times does the name occur?
occurences = all_names.count(name)
if occurences == 1:
# for single names, leave as-is
final_names.append(name)
else:
# for multiple name, assign name+str(lowest_free_number)
for i in range(occurences):
if not name+str(i) in final_names:
final_names.append(name+str(i))
break
return final_names
def get_class_name_from_module_name(module_name):
""" returns the class name corresponding to a module_name """
return module_name[0].upper() + (module_name[1:]).rstrip('1234567890')
def get_base_module_class(module):
""" returns the base class of module that has the same name as module """
base_module_class_name = get_class_name_from_module_name(module.name)
for base_module_class in type(module).__mro__:
if base_module_class.__name__ == base_module_class_name:
return base_module_class
# see http://stackoverflow.com/questions/3862310/how-can-i-find-all-subclasses-of-a-class-given-its-name
def all_subclasses(cls):
""" returns a list of all subclasses of cls """
return cls.__subclasses__() + [g for s in cls.__subclasses__()
for g in all_subclasses(s)]
def recursive_getattr(root, path):
""" returns root.path (i.e. root.attr1.attr2) """
attribute = root
for name in path.split('.'):
if name != "":
attribute = getattr(attribute, name)
return attribute
def recursive_setattr(root, path, value):
""" returns root.path = value (i.e. root.attr1.attr2 = value) """
attribute = root
names = path.split('.')
for name in names[:-1]:
attribute = getattr(attribute, name)
setattr(attribute, names[-1], value)
def setloglevel(level='info', loggername='pyrpl'):
""" sets the log level to the one specified in config file"""
try:
loglevels = {"notset": logging.NOTSET,
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL}
level = loglevels[level]
except:
pass
else:
logging.getLogger(name=loggername).setLevel(level)
class DuplicateFilter(logging.Filter):
"""
Prevent multiple repeated logging message from polluting the console
"""
def filter(self, record):
# add other fields if you need more granular comparison, depends on your app
current_log = (record.module, record.levelno, record.msg)
if current_log != getattr(self, "last_log", None):
self.last_log = current_log
return True
return False
def sorted_dict(dict_to_sort=None, sort_by_values=True, **kwargs):
if dict_to_sort is None:
dict_to_sort = kwargs
if not sort_by_values:
return OrderedDict(sorted(dict_to_sort.items()))
else:
return OrderedDict(sorted(dict_to_sort.items(), key=lambda x: x[1]))
def update_with_typeconversion(dictionary, update):
for k, v in update.items():
if k in dictionary:
# perform type conversion if appropriate
v = type(dictionary[k])(v)
dictionary[k] = v
return dictionary
def unique_list(nonunique_list):
""" Returns a list where each element of nonunique_list occurs exactly once.
The last occurence of an element defines its position in the returned list.
"""
unique_list = []
for attr in reversed(nonunique_list):
# remove all previous occurences
if attr not in unique_list:
unique_list.insert(0, attr)
return unique_list
class Bijection(dict):
""" This class defines a bijection object based on dict
It can be used exactly like dict, but additionally has a property
'inverse' which contains the inverted {value: key} dict. """
def __init__(self, *args, **kwargs):
super(Bijection, self).__init__(*args, **kwargs)
self.inverse = {v: k for k, v in self.items()}
def __setitem__(self, key, value):
super(Bijection, self).__setitem__(key, value)
self.inverse[value] = key
def __delitem__(self, key):
self.inverse.__delitem__(self.__getitem__(key))
super(Bijection, self).__delitem__(key)
def METHOD_NAME(self, key):
self.inverse.METHOD_NAME(self.__getitem__(key))
super(Bijection, self).METHOD_NAME(key)
def update(self, *args, **kwargs):
super(Bijection, self).update(*args, **kwargs)
self.inverse = {v: k for k, v in self.items()} | null |
test cov args with cov bin | # Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Consolidate test configuration from command-line and environment.
"""
from __future__ import annotations
from datetime import timedelta
from pathlib import Path
from legate.tester import FeatureType
from legate.tester.config import Config
from legate.tester.stages import test_stage as m
from legate.tester.stages.util import Shard, StageResult, StageSpec
from legate.tester.test_system import ProcessResult, TestSystem as _TestSystem
from legate.util.types import ArgList, EnvDict
from . import FakeSystem
s = FakeSystem()
class MockTestStage(m.TestStage):
kind: FeatureType = "eager"
name = "mock"
args = ["-foo", "-bar"]
def __init__(self, config: Config, system: _TestSystem) -> None:
self._init(config, system)
def compute_spec(self, config: Config, system: _TestSystem) -> StageSpec:
shards = [Shard([(0,)]), Shard([(1,)]), Shard([(2,)])]
return StageSpec(2, shards)
def shard_args(self, shard: Shard, config: Config) -> ArgList:
return []
def env(self, config: Config, system: _TestSystem) -> EnvDict:
return {}
class TestTestStage:
def test_name(self) -> None:
c = Config([])
stage = MockTestStage(c, s)
assert stage.name == "mock"
def test_intro(self) -> None:
c = Config([])
stage = MockTestStage(c, s)
assert "Entering stage: mock" in stage.intro
def test_outro(self) -> None:
c = Config([])
stage = MockTestStage(c, s)
stage.result = StageResult(
[ProcessResult("invoke", Path("test/file"))],
timedelta(seconds=2.123),
)
outro = stage.outro
assert "Exiting stage: mock" in outro
assert "Passed 1 of 1 tests (100.0%)" in outro
assert "2.123" in outro
def test_file_args_default(self) -> None:
c = Config([])
stage = MockTestStage(c, s)
assert stage.file_args(Path("integration/foo"), c) == []
assert stage.file_args(Path("unit/foo"), c) == []
def test_file_args_v(self) -> None:
c = Config(["test.py", "-v"])
stage = MockTestStage(c, s)
assert stage.file_args(Path("integration/foo"), c) == ["-v"]
assert stage.file_args(Path("unit/foo"), c) == []
def test_file_args_vv(self) -> None:
c = Config(["test.py", "-vv"])
stage = MockTestStage(c, s)
assert stage.file_args(Path("integration/foo"), c) == ["-v", "-s"]
assert stage.file_args(Path("unit/foo"), c) == []
def test_cov_args_without_cov_bin(self) -> None:
c = m.Config(["test.py", "--cov-args", "run -a"])
stage = MockTestStage(c, s)
assert stage.cov_args(c) == []
def METHOD_NAME(self) -> None:
cov_bin = "conda/envs/legate/bin/coverage"
args = ["--cov-bin", cov_bin]
c = m.Config(["test.py"] + args)
expected_result = [cov_bin] + c.cov_args.split()
stage = MockTestStage(c, s)
assert stage.cov_args(c) == expected_result
def test_cov_args_with_cov_bin_args_and_src_path(self) -> None:
cov_bin = "conda/envs/legate/bin/coverage"
cov_args = "run -a"
cov_src_path = "source_path"
args = (
["--cov-bin", cov_bin]
+ ["--cov-args", cov_args]
+ ["--cov-src-path", cov_src_path]
)
c = m.Config(["test.py"] + args)
expected_result = (
[cov_bin] + cov_args.split() + ["--source", cov_src_path]
)
stage = MockTestStage(c, s)
assert stage.cov_args(c) == expected_result | null |
test download json | import json
from functools import partial
from unittest import mock
import pytest
from yapapi.ctx import WorkContext
from yapapi.script import Script
class TestWorkContext:
@pytest.fixture(autouse=True)
def setUp(self):
self._on_download_executed = False
@staticmethod
def _get_work_context(storage=None):
return WorkContext(mock.Mock(), mock.Mock(), storage=storage, emitter=mock.Mock())
@staticmethod
def _assert_dst_path(script: Script, dst_path):
batch = script._evaluate()
transfer_cmd = [cmd for cmd in batch if "transfer" in cmd][0]
assert transfer_cmd["transfer"]["to"] == f"container:{dst_path}"
@staticmethod
def _assert_src_path(script: Script, src_path):
batch = script._evaluate()
transfer_cmd = [cmd for cmd in batch if "transfer" in cmd][0]
assert transfer_cmd["transfer"]["from"] == f"container:{src_path}"
async def _on_download(self, expected, data: bytes):
assert data == expected
self._on_download_executed = True
@pytest.mark.asyncio
async def test_upload_json(self):
storage = mock.AsyncMock()
dst_path = "/test/path"
data = {
"param": "value",
}
ctx = self._get_work_context(storage)
script = ctx.new_script()
script.upload_json(data, dst_path)
await script._before()
storage.upload_bytes.assert_called_with(json.dumps(data).encode("utf-8"))
self._assert_dst_path(script, dst_path)
@pytest.mark.asyncio
async def test_upload_bytes(self):
storage = mock.AsyncMock()
dst_path = "/test/path"
data = b"some byte string"
ctx = self._get_work_context(storage)
script = ctx.new_script()
script.upload_bytes(data, dst_path)
await script._before()
storage.upload_bytes.assert_called_with(data)
self._assert_dst_path(script, dst_path)
@pytest.mark.asyncio
async def test_download_bytes(self):
expected = b"some byte string"
storage = mock.AsyncMock()
storage.new_destination.return_value.download_bytes.return_value = expected
src_path = "/test/path"
ctx = self._get_work_context(storage)
script = ctx.new_script()
script.download_bytes(src_path, partial(self._on_download, expected))
await script._before()
await script._after()
self._assert_src_path(script, src_path)
assert self._on_download_executed
@pytest.mark.asyncio
async def METHOD_NAME(self):
expected = {"key": "val"}
storage = mock.AsyncMock()
storage.new_destination.return_value.download_bytes.return_value = json.dumps(
expected
).encode("utf-8")
src_path = "/test/path"
ctx = self._get_work_context(storage)
script = ctx.new_script()
script.download_json(src_path, partial(self._on_download, expected))
await script._before()
await script._after()
self._assert_src_path(script, src_path)
assert self._on_download_executed
@pytest.mark.parametrize(
"args",
(
("foo", 42),
(),
),
)
def test_start(self, args):
ctx = self._get_work_context()
script = ctx.new_script()
script.start(*args)
batch = script._evaluate()
assert batch == [{"start": {"args": args}}]
@pytest.mark.parametrize(
"kwargs",
(
{"foo": 42},
{},
),
)
def test_deploy(self, kwargs):
ctx = self._get_work_context()
script = ctx.new_script()
script.deploy(**kwargs)
batch = script._evaluate()
assert batch == [{"deploy": kwargs}]
def test_terminate(self):
ctx = self._get_work_context(None)
script = ctx.new_script()
script.terminate()
batch = script._evaluate()
assert batch == [{"terminate": {}}] | null |
parse output | #v 1.0
import sys, os, time, re
import urllib, urllib2
from optparse import OptionParser
from HTMLParser import HTMLParser
from xml.dom.minidom import parseString
#id mapping
def getPDBIdandChain(uniprotID):
#data from http://www.bioinf.org.uk/pdbsws/
dirPath = os.path.dirname(sys.argv[0])
fileName ='pdb_uniprot_chain_map.lst.2'
if len(dirPath) != 0:
fileName = dirPath + "/pdb_uniprot_chain_map.lst.2"
fileName = fileName.replace('\\', '/')
f = open (fileName, 'r')
pdbId = ''
chain = ''
while(1):
line = f.readline()
if not line:
break
line = line.rstrip('\n')
columns = line.split()
if len(columns) != 3:
continue
if columns[2] == uniprotID:
pdbId = columns[0]
chain = columns[1]
f.close()
return pdbId, chain
# Define a function for the thread
class NoRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
infourl = urllib.addinfourl(fp, headers, req.get_full_url())
infourl.status = code
infourl.code = code
return infourl
http_error_300 = http_error_302
http_error_301 = http_error_302
http_error_303 = http_error_302
http_error_307 = http_error_302
def prepare_optparser():
"""Prepare optparser object. New options will be added in this
function first.
"""
usage = "usage: %prog <-p STRING -c STRING -m STRING> [options]"
description = "SNP2PDBSite."
optparser = OptionParser(version="%prog v1.0", description=description, usage=usage, add_help_option=False)
optparser.add_option("-h","--help",action="help",help="Show this help message and exit.")
optparser.add_option("-p","--protId",dest="protId",type="string",
help="Enter protein (uniprot) ID.")
optparser.add_option("-c", "--chain", dest="chain",type="string",
help="Enter chain.")
optparser.add_option("-m", "--mut", dest="mutation",type="string",
help="Enter mutation.")
return optparser
def opt_validate(optparser):
"""Validate options from a OptParser object.
Return: Validated options object.
"""
(options,args) = optparser.parse_args()
if not (options.protId and options.chain and options.mutation):
optparser.print_help()
sys.exit(1)
if '"' in options.protId:
options.protId = options.protId.replace('"','')
if '"' in options.mutation:
options.mutation = options.mutation.replace('"','')
id, chain = getPDBIdandChain(options.protId)
if not (id and chain):
sys.exit(1)
options.chain = chain
options.protId = id
mutList = list(options.mutation)
mutList[0] = chain
options.mutation = "".join(mutList)
return options
class Snp2pdbsite:
def __init__(self, options):
self.protId = options.protId
if not options.chain:
options.chain = ""
self.chain = options.chain
if not options.mutation:
options.mutation = ""
self.mutation = options.mutation
self.url = 'http://www-bionet.sscc.ru/psd2/rest.php?tool=snp2pdbsite'
def sendQuery(self):
opener = urllib2.build_opener(NoRedirectHandler())
urllib2.install_opener(opener)
# do POST
params = urllib.urlencode({'pdb': self.protId, 'chain': self.chain, 'mut': self.mutation})
req = urllib2.Request(self.url, params)
rsp = urllib2.urlopen(req)
content = rsp.read()
res = content.find('qid=')
content = content[res+5:]
res = content.find('"')
content = content[:res]
self.url = "http://www-bionet.sscc.ru/psd2/rest.php?tool=snp2pdbsite&q=%s" %content
self.getResult()
def getResult(self):
opener = urllib2.build_opener(NoRedirectHandler())
urllib2.install_opener(opener)
content = "working"
while content.find('working') >= 0:
req = urllib2.Request(self.url)
rsp = urllib2.urlopen(req)
content = rsp.read()
time.sleep(0.1)
self.METHOD_NAME(content)
def METHOD_NAME(self, content):
if len(content) == 0:
print "Result is empty"
sys.exit(1)
xmldoc = parseString(content)
itemlist = xmldoc.getElementsByTagName('aa')
if itemlist.length <= 0:
print "Result is empty"
sys.exit(1)
for item in itemlist:
print "PDB_SITE" + ':' + item.getAttribute("pos") + item.getAttribute("aa") + ';'
def main():
opts=opt_validate(prepare_optparser())
g = Snp2pdbsite(opts)
g.sendQuery()
if __name__ == "__main__":
main()
| null |
yield all throttles from module | from importlib import import_module
from unittest import mock
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.test.client import RequestFactory
from django.test.utils import override_settings
import pytest
from freezegun import freeze_time
from rest_framework.test import APIRequestFactory, force_authenticate
from rest_framework.throttling import BaseThrottle
from rest_framework.viewsets import GenericViewSet
from olympia import amo
from olympia.activity.models import ActivityLog
from olympia.amo.tests import TestCase, user_factory
from olympia.api.throttling import GranularIPRateThrottle, GranularUserRateThrottle
def find_all_throttle_classes():
def METHOD_NAME(module):
for name in dir(module):
item = getattr(module, name)
if hasattr(item, 'mro'):
mro = item.mro()
# Our own throttle classes we've defined.
if BaseThrottle in mro and item.__module__.startswith('olympia.'):
yield item
# Every throttle class referenced in viewsets.
if GenericViewSet in mro:
for throttle in getattr(item, 'throttle_classes', []):
if throttle:
yield throttle
def yield_all_throttles():
for app in settings.INSTALLED_APPS:
if not app.startswith('olympia'):
continue
for module_name in ('throttling', 'views'):
try:
module = import_module(f'{app}.{module_name}')
yield from METHOD_NAME(module)
except ModuleNotFoundError:
continue
return sorted(set(yield_all_throttles()), key=lambda x: x.__name__)
@pytest.mark.parametrize('throttle_class', find_all_throttle_classes())
def test_ensure_throttles_inherit_from_granular_user_rate_throttle(throttle_class):
# All throttling classes we can find in addons-server should also be
# children of GranularUserRateThrottle.
assert GranularUserRateThrottle in throttle_class.mro()
class TestGranularUserRateThrottle(TestCase):
def setUp(self):
self.throttle = GranularUserRateThrottle()
def test_backwards_compatible_format(self):
# test the original DRF rate string format x/timeperiod works
assert self.throttle.parse_rate('1/minute') == (1, 60)
assert self.throttle.parse_rate('24/s') == (24, 1)
assert self.throttle.parse_rate('456/hour') == (456, 3600)
def test_granular_format(self):
assert self.throttle.parse_rate('1/5minute') == (1, 60 * 5)
assert self.throttle.parse_rate('24/1s') == (24, 1)
assert self.throttle.parse_rate('456/7hour') == (456, 7 * 3600)
@mock.patch('rest_framework.throttling.UserRateThrottle.allow_request')
def test_allow_request_if_api_throttling_setting_is_false(self, allow_request_mock):
request = RequestFactory().get('/test')
view = object()
# Pretend the parent class would always throttle requests if called.
allow_request_mock.return_value = False
# With the setting set to True (the default), throttle as normal.
assert settings.API_THROTTLING is True
assert self.throttle.allow_request(request, view) is False
assert allow_request_mock.call_count == 1
# With the setting set to False, ignore throttling.
with override_settings(API_THROTTLING=False):
assert settings.API_THROTTLING is False
assert self.throttle.allow_request(request, view) is True
# The parent class hasn't been called an additional time.
assert allow_request_mock.call_count == 1
# And again set to True to be sure.
assert settings.API_THROTTLING is True
assert self.throttle.allow_request(request, view) is False
assert allow_request_mock.call_count == 2
@mock.patch('rest_framework.throttling.UserRateThrottle.allow_request')
def test_bypass_if_user_has_permission(self, allow_request_mock):
request = RequestFactory().get('/test')
view = object()
# Pretend the parent class would always throttle requests if called.
allow_request_mock.return_value = False
# No user: throttle as normal.
assert self.throttle.allow_request(request, view) is False
assert allow_request_mock.call_count == 1
# AnonymousUser: throttle as normal.
request.user = AnonymousUser()
allow_request_mock.reset_mock()
assert self.throttle.allow_request(request, view) is False
assert allow_request_mock.call_count == 1
# Regular user: throttle as normal.
request.user = user_factory()
allow_request_mock.reset_mock()
assert self.throttle.allow_request(request, view) is False
assert allow_request_mock.call_count == 1
# User with the right permission: bypass throttling.
self.grant_permission(request.user, 'API:BypassThrottling')
allow_request_mock.reset_mock()
assert self.throttle.allow_request(request, view) is True
assert allow_request_mock.call_count == 0
@freeze_time(as_kwarg='frozen_time')
def test_freeze_time_works_with_throttling(self, frozen_time):
self.throttle = self.throttle.__class__()
old_time = self.throttle.timer()
frozen_time.move_to('2019-04-08 15:16:23.42')
assert self.throttle.timer() == 1554736583.42
assert old_time != 1554736583.42
@mock.patch('rest_framework.throttling.UserRateThrottle.allow_request')
def test_activity_log(self, allow_request_mock):
request = RequestFactory().get('/test')
view = object()
allow_request_mock.return_value = False
assert self.throttle.allow_request(request, view) is False
# Shouldn't be any ActivityLog since there was no user associated with
# that request.
assert not ActivityLog.objects.exists()
allow_request_mock.return_value = True
request.user = user_factory()
assert self.throttle.allow_request(request, view) is True
# Shouldn't be any ActivityLog since the request was not throttled.
assert not ActivityLog.objects.exists()
allow_request_mock.return_value = False
request.user = user_factory()
assert self.throttle.allow_request(request, view) is False
assert ActivityLog.objects.exists()
activity = ActivityLog.objects.get()
assert activity.action == amo.LOG.THROTTLED.id
assert activity.arguments == [self.throttle.scope]
assert activity.user == request.user
activity_str = str(activity)
assert '/user/%d/' % request.user.pk in activity_str
assert self.throttle.scope in activity_str
class TestGranularIPRateThrottle(TestGranularUserRateThrottle):
def setUp(self):
self.throttle = GranularIPRateThrottle()
def test_get_cache_key_returns_even_for_authenticated_users(self):
# Like DRF's AnonRateThrottleTests.test_authenticated_user_not_affected
# except that we should get a cache key regardless of whether the user
# is authenticated or not.
request = APIRequestFactory().get('/')
user = user_factory()
force_authenticate(request, user)
request.user = user
request.META['REMOTE_ADDR'] = '123.45.67.89'
expected_key = 'throttle_anon_123.45.67.89'
assert self.throttle.get_cache_key(request, view={}) == expected_key | null |
to str | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Submarine API
The Submarine REST API allows you to access Submarine resources such as, experiments, environments and notebooks. The API is hosted under the /v1 path on the Submarine server. For example, to list experiments on a server hosted at http://localhost:8080, access http://localhost:8080/api/v1/experiment/ # noqa: E501
The version of the OpenAPI document: 0.9.0-SNAPSHOT
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from submarine.client.configuration import Configuration
class GitCodeSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'branch': 'str',
'password': 'str',
'trust_certs': 'bool',
'url': 'str',
'username': 'str',
}
attribute_map = {
'branch': 'branch',
'password': 'password',
'trust_certs': 'trustCerts',
'url': 'url',
'username': 'username',
}
def __init__(
self,
branch=None,
password=None,
trust_certs=None,
url=None,
username=None,
local_vars_configuration=None,
): # noqa: E501
"""GitCodeSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._branch = None
self._password = None
self._trust_certs = None
self._url = None
self._username = None
self.discriminator = None
if branch is not None:
self.branch = branch
if password is not None:
self.password = password
if trust_certs is not None:
self.trust_certs = trust_certs
if url is not None:
self.url = url
if username is not None:
self.username = username
@property
def branch(self):
"""Gets the branch of this GitCodeSpec. # noqa: E501
:return: The branch of this GitCodeSpec. # noqa: E501
:rtype: str
"""
return self._branch
@branch.setter
def branch(self, branch):
"""Sets the branch of this GitCodeSpec.
:param branch: The branch of this GitCodeSpec. # noqa: E501
:type: str
"""
self._branch = branch
@property
def password(self):
"""Gets the password of this GitCodeSpec. # noqa: E501
:return: The password of this GitCodeSpec. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this GitCodeSpec.
:param password: The password of this GitCodeSpec. # noqa: E501
:type: str
"""
self._password = password
@property
def trust_certs(self):
"""Gets the trust_certs of this GitCodeSpec. # noqa: E501
:return: The trust_certs of this GitCodeSpec. # noqa: E501
:rtype: bool
"""
return self._trust_certs
@trust_certs.setter
def trust_certs(self, trust_certs):
"""Sets the trust_certs of this GitCodeSpec.
:param trust_certs: The trust_certs of this GitCodeSpec. # noqa: E501
:type: bool
"""
self._trust_certs = trust_certs
@property
def url(self):
"""Gets the url of this GitCodeSpec. # noqa: E501
:return: The url of this GitCodeSpec. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this GitCodeSpec.
:param url: The url of this GitCodeSpec. # noqa: E501
:type: str
"""
self._url = url
@property
def username(self):
"""Gets the username of this GitCodeSpec. # noqa: E501
:return: The username of this GitCodeSpec. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this GitCodeSpec.
:param username: The username of this GitCodeSpec. # noqa: E501
:type: str
"""
self._username = username
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item,
value.items(),
)
)
else:
result[attr] = value
return result
def METHOD_NAME(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.METHOD_NAME()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GitCodeSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, GitCodeSpec):
return True
return self.to_dict() != other.to_dict() | null |
outputs | """
component_wise_divide_fc
========================
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.METHOD_NAME import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class component_wise_divide_fc(Operator):
"""For every two fields with the same label space (from the two input
fields containers), computes component-wise fraction between two
fields of same dimensionality. If one field's scoping has an
'overall' location, then this field's values are applied on the
other field entirely.
Parameters
----------
fields_containerA : FieldsContainer
fields_containerB : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.math.component_wise_divide_fc()
>>> # Make input connections
>>> my_fields_containerA = dpf.FieldsContainer()
>>> op.inputs.fields_containerA.connect(my_fields_containerA)
>>> my_fields_containerB = dpf.FieldsContainer()
>>> op.inputs.fields_containerB.connect(my_fields_containerB)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.math.component_wise_divide_fc(
... fields_containerA=my_fields_containerA,
... fields_containerB=my_fields_containerB,
... )
>>> # Get output data
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(
self, fields_containerA=None, fields_containerB=None, config=None, server=None
):
super().__init__(name="component_wise_divide_fc", config=config, server=server)
self._inputs = InputsComponentWiseDivideFc(self)
self._outputs = OutputsComponentWiseDivideFc(self)
if fields_containerA is not None:
self.inputs.fields_containerA.connect(fields_containerA)
if fields_containerB is not None:
self.inputs.fields_containerB.connect(fields_containerB)
@staticmethod
def _spec():
description = """For every two fields with the same label space (from the two input
fields containers), computes component-wise fraction
between two fields of same dimensionality. If one field's
scoping has an 'overall' location, then this field's
values are applied on the other field entirely."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="fields_containerA",
type_names=["fields_container"],
optional=False,
document="""""",
),
1: PinSpecification(
name="fields_containerB",
type_names=["fields_container"],
optional=False,
document="""""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="fields_container",
type_names=["fields_container"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="component_wise_divide_fc", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsComponentWiseDivideFc
"""
return super().inputs
@property
def METHOD_NAME(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsComponentWiseDivideFc
"""
return super().METHOD_NAME
class InputsComponentWiseDivideFc(_Inputs):
"""Intermediate class used to connect user inputs to
component_wise_divide_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.component_wise_divide_fc()
>>> my_fields_containerA = dpf.FieldsContainer()
>>> op.inputs.fields_containerA.connect(my_fields_containerA)
>>> my_fields_containerB = dpf.FieldsContainer()
>>> op.inputs.fields_containerB.connect(my_fields_containerB)
"""
def __init__(self, op: Operator):
super().__init__(component_wise_divide_fc._spec().inputs, op)
self._fields_containerA = Input(
component_wise_divide_fc._spec().input_pin(0), 0, op, -1
)
self._inputs.append(self._fields_containerA)
self._fields_containerB = Input(
component_wise_divide_fc._spec().input_pin(1), 1, op, -1
)
self._inputs.append(self._fields_containerB)
@property
def fields_containerA(self):
"""Allows to connect fields_containerA input to the operator.
Parameters
----------
my_fields_containerA : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.component_wise_divide_fc()
>>> op.inputs.fields_containerA.connect(my_fields_containerA)
>>> # or
>>> op.inputs.fields_containerA(my_fields_containerA)
"""
return self._fields_containerA
@property
def fields_containerB(self):
"""Allows to connect fields_containerB input to the operator.
Parameters
----------
my_fields_containerB : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.component_wise_divide_fc()
>>> op.inputs.fields_containerB.connect(my_fields_containerB)
>>> # or
>>> op.inputs.fields_containerB(my_fields_containerB)
"""
return self._fields_containerB
class OutputsComponentWiseDivideFc(_Outputs):
"""Intermediate class used to get outputs from
component_wise_divide_fc operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.component_wise_divide_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
"""
def __init__(self, op: Operator):
super().__init__(component_wise_divide_fc._spec().METHOD_NAME, op)
self._fields_container = Output(
component_wise_divide_fc._spec().output_pin(0), 0, op
)
self._outputs.append(self._fields_container)
@property
def fields_container(self):
"""Allows to get fields_container output of the operator
Returns
----------
my_fields_container : FieldsContainer
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.math.component_wise_divide_fc()
>>> # Connect inputs : op.inputs. ...
>>> result_fields_container = op.outputs.fields_container()
""" # noqa: E501
return self._fields_container | null |
add to report | # Copyright (c) Facebook, Inc. and its affiliates.
import csv
import json
import logging
import os
import warnings
from dataclasses import dataclass, field
from typing import List
import pytorch_lightning as pl
from mmf.common.registry import registry
from mmf.common.sample import convert_batch_to_sample_list
from mmf.utils.configuration import get_mmf_env
from mmf.utils.distributed import gather_tensor, is_main
from mmf.utils.file_io import PathManager
from mmf.utils.general import ckpt_name_from_core_args, foldername_from_config_override
from mmf.utils.logger import log_class_usage
from mmf.utils.timer import Timer
from omegaconf import OmegaConf
from torch.utils.data import Dataset
logger = logging.getLogger(__name__)
DEFAULT_CANDIDATE_FIELDS = [
"id",
"question_id",
"image_id",
"context_tokens",
"captions",
"scores",
]
@registry.register_test_reporter("file")
@registry.register_test_reporter("default")
class TestReporter(Dataset):
@dataclass
class Config:
# A set of fields to be *considered* for exporting by the reporter
# Note that `format_for_prediction` is what ultimtly detemrimes the
# exported fields
candidate_fields: List[str] = field(
default_factory=lambda: DEFAULT_CANDIDATE_FIELDS
)
# csv or json
predict_file_format: str = "json"
def __init__(
self,
datamodules: List[pl.LightningDataModule],
config: Config = None,
dataset_type: str = "train",
):
self.test_reporter_config = OmegaConf.merge(
OmegaConf.structured(self.Config), config
)
self.datamodules = datamodules
self.dataset_type = dataset_type
self.config = registry.get("config")
self.report = []
self.timer = Timer()
self.training_config = self.config.training
self.num_workers = self.training_config.num_workers
self.batch_size = self.training_config.batch_size
self.report_folder_arg = get_mmf_env(key="report_dir")
self.experiment_name = self.training_config.experiment_name
self.current_datamodule_idx = -1
self.dataset_names = list(self.datamodules.keys())
self.current_datamodule = self.datamodules[
self.dataset_names[self.current_datamodule_idx]
]
self.current_dataloader = None
self.save_dir = get_mmf_env(key="save_dir")
self.report_folder = ckpt_name_from_core_args(self.config)
self.report_folder += foldername_from_config_override(self.config)
self.report_folder = os.path.join(self.save_dir, self.report_folder)
self.report_folder = os.path.join(self.report_folder, "reports")
if self.report_folder_arg:
self.report_folder = self.report_folder_arg
self.candidate_fields = self.test_reporter_config.candidate_fields
PathManager.mkdirs(self.report_folder)
log_class_usage("TestReporter", self.__class__)
@property
def current_dataset(self):
self._check_current_dataloader()
return self.current_dataloader.dataset
def next_dataset(self, flush_report=True):
if self.current_datamodule_idx >= 0:
if flush_report:
self.flush_report()
else:
self.report = []
self.current_datamodule_idx += 1
if self.current_datamodule_idx == len(self.datamodules):
return False
else:
self.current_datamodule = self.datamodules[
self.dataset_names[self.current_datamodule_idx]
]
logger.info(
f"Predicting for {self.dataset_names[self.current_datamodule_idx]}"
)
return True
def flush_report(self):
if not is_main():
# Empty report in all processes to avoid any leaks
self.report = []
return
name = self.current_datamodule.dataset_name
time_format = "%Y-%m-%dT%H:%M:%S"
time = self.timer.get_time_hhmmss(None, format=time_format)
filename = name + "_"
if len(self.experiment_name) > 0:
filename += self.experiment_name + "_"
filename += self.dataset_type + "_"
filename += time
use_csv_writer = (
self.config.evaluation.predict_file_format == "csv"
or self.test_reporter_config.predict_file_format == "csv"
)
if use_csv_writer:
filepath = os.path.join(self.report_folder, filename + ".csv")
self.csv_dump(filepath)
else:
filepath = os.path.join(self.report_folder, filename + ".json")
self.json_dump(filepath)
logger.info(f"Wrote predictions for {name} to {os.path.abspath(filepath)}")
self.report = []
def postprocess_dataset_report(self):
self._check_current_dataloader()
if hasattr(self.current_dataset, "on_prediction_end"):
self.report = self.current_dataset.on_prediction_end(self.report)
def csv_dump(self, filepath):
with PathManager.open(filepath, "w") as f:
title = self.report[0].keys()
cw = csv.DictWriter(f, title, delimiter=",", quoting=csv.QUOTE_MINIMAL)
cw.writeheader()
cw.writerows(self.report)
def json_dump(self, filepath):
with PathManager.open(filepath, "w") as f:
json.dump(self.report, f)
def get_dataloader(self):
self.current_dataloader = getattr(
self.current_datamodule, f"{self.dataset_type}_dataloader"
)()
# Make sure to assign dataset to dataloader object as
# required by MMF
if not hasattr(self.current_dataloader, "dataset"):
self.current_dataloader.dataset = getattr(
self.current_datamodule, f"{self.dataset_type}_dataset"
)
return self.current_dataloader
def prepare_batch(self, batch):
self._check_current_dataloader()
if hasattr(self.current_dataset, "prepare_batch"):
batch = self.current_dataset.prepare_batch(batch)
batch = convert_batch_to_sample_list(batch)
batch.dataset_name = self.current_dataset.dataset_name
batch.dataset_type = self.dataset_type
return batch
def __len__(self):
self._check_current_dataloader()
return len(self.current_dataloader)
def _check_current_dataloader(self):
assert self.current_dataloader is not None, (
"Please call `get_dataloader` before accessing any "
+ "'current_dataloader' based function"
)
def METHOD_NAME(self, report, model, *args, **kwargs):
if "execute_on_master_only" in kwargs:
warnings.warn(
"'execute_on_master_only keyword is deprecated and isn't used anymore",
DeprecationWarning,
)
self._check_current_dataloader()
for key in self.candidate_fields:
report = self.reshape_and_gather(report, key)
results = []
if hasattr(self.current_dataset, "format_for_prediction"):
results = self.current_dataset.format_for_prediction(report)
if hasattr(model, "format_for_prediction"):
results = model.format_for_prediction(results, report)
elif hasattr(model.module, "format_for_prediction"):
results = model.module.format_for_prediction(results, report)
self.report = self.report + results
def reshape_and_gather(self, report, key):
if key in report:
num_dims = report[key].dim()
if num_dims == 1:
report[key] = gather_tensor(report[key]).view(-1)
elif num_dims >= 2:
# Collect dims other than batch
other_dims = report[key].size()[1:]
report[key] = gather_tensor(report[key]).view(-1, *other_dims)
return report | null |
toggle logging | # Copyright 2015-2021 Espressif Systems (Shanghai) CO LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import os
import re
from typing import BinaryIO, Callable, Optional, Union # noqa: F401
from serial.tools import miniterm # noqa: F401
from .constants import MATCH_PCADDR
from .output_helpers import lookup_pc_address, red_print, yellow_print
class Logger:
def __init__(self, elf_file, console, timestamps, timestamp_format, pc_address_buffer, enable_address_decoding,
toolchain_prefix):
# type: (str, miniterm.Console, bool, str, bytes, bool, str) -> None
self.log_file = None # type: Optional[BinaryIO]
self._output_enabled = True # type: bool
self.elf_file = elf_file
self.console = console
self.timestamps = timestamps
self.timestamp_format = timestamp_format
self._pc_address_buffer = pc_address_buffer
self.enable_address_decoding = enable_address_decoding
self.toolchain_prefix = toolchain_prefix
@property
def pc_address_buffer(self): # type: () -> bytes
return self._pc_address_buffer
@pc_address_buffer.setter
def pc_address_buffer(self, value): # type: (bytes) -> None
self._pc_address_buffer = value
@property
def output_enabled(self): # type: () -> bool
return self._output_enabled
@output_enabled.setter
def output_enabled(self, value): # type: (bool) -> None
self._output_enabled = value
@property
def log_file(self): # type: () -> Optional[BinaryIO]
return self._log_file
@log_file.setter
def log_file(self, value): # type: (Optional[BinaryIO]) -> None
self._log_file = value
def METHOD_NAME(self): # type: () -> None
if self._log_file:
self.stop_logging()
else:
self.start_logging()
def toggle_timestamps(self): # type: () -> None
self.timestamps = not self.timestamps
def start_logging(self): # type: () -> None
if not self._log_file:
name = 'log.{}.{}.txt'.format(os.path.splitext(os.path.basename(self.elf_file))[0],
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
try:
self.log_file = open(name, 'wb+')
yellow_print('\nLogging is enabled into file {}'.format(name))
except Exception as e: # noqa
red_print('\nLog file {} cannot be created: {}'.format(name, e))
def stop_logging(self): # type: () -> None
if self._log_file:
try:
name = self._log_file.name
self._log_file.close()
yellow_print('\nLogging is disabled and file {} has been closed'.format(name))
except Exception as e: # noqa
red_print('\nLog file cannot be closed: {}'.format(e))
finally:
self._log_file = None
def print(self, string, console_printer=None): # noqa: E999
# type: (Union[str, bytes], Optional[Callable]) -> None
if console_printer is None:
console_printer = self.console.write_bytes
if self.timestamps and (self._output_enabled or self._log_file):
t = datetime.datetime.now().strftime(self.timestamp_format)
# "string" is not guaranteed to be a full line. Timestamps should be only at the beginning of lines.
if isinstance(string, type(u'')):
search_patt = '\n'
replacement = '\n' + t + ' '
else:
search_patt = b'\n' # type: ignore
replacement = b'\n' + t.encode('ascii') + b' ' # type: ignore
string = string.replace(search_patt, replacement) # type: ignore
if self._output_enabled:
console_printer(string)
if self._log_file:
try:
if isinstance(string, type(u'')):
string = string.encode() # type: ignore
self._log_file.write(string) # type: ignore
except Exception as e:
red_print('\nCannot write to file: {}'.format(e))
# don't fill-up the screen with the previous errors (probably consequent prints would fail also)
self.stop_logging()
def output_toggle(self): # type: () -> None
self.output_enabled = not self.output_enabled
yellow_print('\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.'.format(
self.output_enabled))
def handle_possible_pc_address_in_line(self, line): # type: (bytes) -> None
line = self._pc_address_buffer + line
self._pc_address_buffer = b''
if not self.enable_address_decoding:
return
for m in re.finditer(MATCH_PCADDR, line.decode(errors='ignore')):
translation = lookup_pc_address(m.group(), self.toolchain_prefix, self.elf_file)
if translation:
self.print(translation, console_printer=yellow_print) | null |