label
stringlengths 1
61
| code
stringlengths 4k
8k
|
---|---|
set up class | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <[email protected]> (c) 2017-2023
# ryanss <[email protected]> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from holidays.countries.dominican_republic import DominicanRepublic, DO, DOM
from tests.common import TestCase
class TestDominicanRepublic(TestCase):
@classmethod
def METHOD_NAME(cls):
super().METHOD_NAME(DominicanRepublic)
def test_country_aliases(self):
self.assertCountryAliases(DominicanRepublic, DO, DOM)
def test_2020(self):
self.assertHolidays(
("2020-01-01", "Año Nuevo"),
("2020-01-06", "Día de los Santos Reyes"),
("2020-01-21", "Día de la Altagracia"),
("2020-01-26", "Día de Duarte"),
("2020-02-27", "Día de Independencia"),
("2020-04-10", "Viernes Santo"),
("2020-05-04", "Día del Trabajo"),
("2020-06-11", "Corpus Christi"),
("2020-08-16", "Día de la Restauración"),
("2020-09-24", "Día de las Mercedes"),
("2020-11-09", "Día de la Constitución"),
("2020-12-25", "Día de Navidad"),
)
def test_2021(self):
self.assertHolidays(
("2021-01-01", "Año Nuevo"),
("2021-01-04", "Día de los Santos Reyes"),
("2021-01-21", "Día de la Altagracia"),
("2021-01-25", "Día de Duarte"),
("2021-02-27", "Día de Independencia"),
("2021-04-02", "Viernes Santo"),
("2021-05-01", "Día del Trabajo"),
("2021-06-03", "Corpus Christi"),
("2021-08-16", "Día de la Restauración"),
("2021-09-24", "Día de las Mercedes"),
("2021-11-06", "Día de la Constitución"),
("2021-12-25", "Día de Navidad"),
)
def test_2022(self):
self.assertHolidays(
("2022-01-01", "Año Nuevo"),
("2022-01-10", "Día de los Santos Reyes"),
("2022-01-21", "Día de la Altagracia"),
("2022-01-24", "Día de Duarte"),
("2022-02-27", "Día de Independencia"),
("2022-04-15", "Viernes Santo"),
("2022-05-02", "Día del Trabajo"),
("2022-06-16", "Corpus Christi"),
("2022-08-15", "Día de la Restauración"),
("2022-09-24", "Día de las Mercedes"),
("2022-11-06", "Día de la Constitución"),
("2022-12-25", "Día de Navidad"),
)
def test_movable(self):
self.assertHoliday(
"1996-01-06",
"1997-01-06",
"1998-01-05",
"1998-01-26",
"1999-01-25",
"1996-05-01",
"1998-05-04",
"1996-11-06",
"1997-11-10",
"2000-08-16",
"2001-08-20",
)
self.assertNoHoliday(
"1998-01-06",
"1999-01-26",
"1998-05-01",
"1997-11-06",
"2001-08-16",
)
def test_l10n_default(self):
self.assertLocalizedHolidays(
("2022-01-01", "Año Nuevo"),
("2022-01-10", "Día de los Santos Reyes"),
("2022-01-21", "Día de la Altagracia"),
("2022-01-24", "Día de Duarte"),
("2022-02-27", "Día de Independencia"),
("2022-04-15", "Viernes Santo"),
("2022-05-02", "Día del Trabajo"),
("2022-06-16", "Corpus Christi"),
("2022-08-15", "Día de la Restauración"),
("2022-09-24", "Día de las Mercedes"),
("2022-11-06", "Día de la Constitución"),
("2022-12-25", "Día de Navidad"),
)
def test_l10n_en_us(self):
self.assertLocalizedHolidays(
"en_US",
("2022-01-01", "New Year's Day"),
("2022-01-10", "Epiphany"),
("2022-01-21", "Lady of Altagracia"),
("2022-01-24", "Juan Pablo Duarte Day"),
("2022-02-27", "Independence Day"),
("2022-04-15", "Good Friday"),
("2022-05-02", "Labor Day"),
("2022-06-16", "Feast of Corpus Christi"),
("2022-08-15", "Restoration Day"),
("2022-09-24", "Our Lady of Mercedes Day"),
("2022-11-06", "Constitution Day"),
("2022-12-25", "Christmas Day"),
)
def test_l10n_uk(self):
self.assertLocalizedHolidays(
"uk",
("2022-01-01", "Новий рік"),
("2022-01-10", "Богоявлення"),
("2022-01-21", "День Богоматері Альтаграсія"),
("2022-01-24", "День Дуарте"),
("2022-02-27", "День незалежності"),
("2022-04-15", "Страсна пʼятниця"),
("2022-05-02", "День праці"),
("2022-06-16", "Свято Тіла і Крові Христових"),
("2022-08-15", "День реставрації"),
("2022-09-24", "День Богоматері Милосердя"),
("2022-11-06", "День Конституції"),
("2022-12-25", "Різдво Христове"),
) |
str indent | import re, inspect, textwrap, pydoc
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
from six import iteritems
class SphinxDocString(NumpyDocString):
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def METHOD_NAME(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self.METHOD_NAME(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self.METHOD_NAME(desc,8)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self.METHOD_NAME(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self.METHOD_NAME(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in iteritems(idx):
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
return out
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Attributes', 'Methods',
'Returns','Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_section('Examples')
out = self.METHOD_NAME(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
pass
class SphinxClassDoc(SphinxDocString, ClassDoc):
pass
def get_doc_object(obj, what=None, doc=None):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc, doc=doc)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, '', doc=doc)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxDocString(doc)
|
set mkl envs | #!/usr/bin/python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
import subprocess as sp
DEFAULT_SEASTAR_PORT="3333"
JEMALLOC_244 = "libjemalloc.so.2.4.4"
JEMALLOC_251 = "libjemalloc.so.2.5.1"
def gen_cluster_info(workspace):
tf_config_json = os.environ.get("TF_CONFIG", "{}")
print("TF_CONFIG=", tf_config_json)
tf_config = json.loads(tf_config_json)
cluster = tf_config.get("cluster", {})
if cluster is None:
print("TF_CONFIG cluster is empty")
return
ps_hosts = []
worker_hosts = []
chief_hosts = []
node_list = []
for key, value in cluster.items():
if "ps" == key:
ps_hosts = value
elif "worker" == key:
worker_hosts = value
elif "chief" == key:
chief_hosts = value
node_list.extend(value)
os.environ['TF_SEASTAR_ENDPOINT_MAP_PATH'] = '/tmp/'
print("Start to gen endpoint_map file.")
#endpoint_map_path = os.path.join(workspace, ".endpoint_map")
endpoint_map_path = "/tmp/.endpoint_map"
with open(endpoint_map_path, 'w') as fout:
for node in node_list:
host = node[0:node.index(':')]
fout.write(node + "=" + host + ":" + DEFAULT_SEASTAR_PORT + "\n")
os.system("ls -ltr /tmp/.endpoint_map")
task = tf_config.get("task", {})
if task is None:
print("TF_CONFIG task is empty")
return
task_index = task['index']
job_name = task['type']
return ps_hosts, worker_hosts, chief_hosts, job_name, task_index
def copy_python_binary(local_dir):
cmd_str = "cp /usr/bin/python " + os.path.join(local_dir, "python_bin")
return sp.call(cmd_str, shell=True)
def set_jemalloc_version(workspace):
strategy = os.environ.get("MEM_USAGE_STRATEGY", "")
cmd_str = ""
if "xmin" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
cmd_str += "export MALLOC_CONF=decay_time:0;"
elif "xmid" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
elif "min" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=dirty_decay_ms:0,muzzy_decay_ms:0;"
elif "mid" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,dirty_decay_ms:10000,muzzy_decay_ms:10000;"
elif "max" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:240000,muzzy_decay_ms:240000;"
elif "244" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
elif "251" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:60000,muzzy_decay_ms:60000;"
elif "close" == strategy:
pass
else:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:240000,muzzy_decay_ms:240000;"
return cmd_str
def pip_install_requirements(workspace):
requirements_path = os.path.join(workspace, "requirements.txt")
if not os.path.exists(requirements_path):
return 0
cmd_str = "$(which pip) install -r " + requirements_path
print("try to install requirements.txt from " + requirements_path)
return sp.call(cmd_str, shell=True)
def run_tensorflow_job(workspace, tf_script, tf_args, tf_envs, set_jemalloc_version_cmd):
cmd_str = "cd " + workspace + ";"
if set_jemalloc_version_cmd:
cmd_str += set_jemalloc_version_cmd
cmd_str += "LD_PRELOAD=${JEMALLOC_VERSION} "
cmd_str += " ".join(tf_envs) + " $(which python) -u "
cmd_str += tf_script + " " + " ".join(tf_args)
print("run tensorflow command:", cmd_str)
return sp.call(cmd_str, shell=True)
def METHOD_NAME(job_name):
envs = []
if "ps" == job_name:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
elif "worker" == job_name:
envs.append("OMP_NUM_THREADS=6")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
elif "evaluator" == job_name or "chief" == job_name:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
else:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
return envs
def set_network_threads(job_name):
envs = []
if "ps" == job_name:
envs.append("WORKER_DEFAULT_CORE_NUM=24")
elif "worker" == job_name:
envs.append("PS_DEFAULT_CORE_NUM=24")
return envs
if __name__ == "__main__":
print("start launching tensorflow job")
if "TF_WORKSPACE" not in os.environ:
print("TF_WORKSPACE env should be set.")
exit(1)
workspace = os.environ.get("TF_WORKSPACE", "")
if "TF_SCRIPT" not in os.environ:
print("TF_SCRIPT env should be set.")
exit(1)
tf_script = os.environ.get("TF_SCRIPT", "")
if "JEMALLOC_PATH" not in os.environ:
jemalloc_path = workspace
else:
jemalloc_path = os.environ.get("JEMALLOC_PATH", "")
#ret_code = copy_python_binary(workspace)
#if (ret_code != 0):
# exit(ret_code)
tf_args = sys.argv[1:]
tf_envs = []
#tf_envs.append("TF_SEASTAR_ENDPOINT_MAP_PATH=/tmp/")
if "TF_CONFIG" in os.environ:
ps_hosts, worker_hosts, chief_hosts, job_name, task_index = gen_cluster_info(workspace)
os.environ["TASK_INDEX"] = str(task_index)
os.environ["JOB_NAME"] = str(job_name)
#tf_envs.extend(set_mkl_envs(job_name))
set_jemalloc_version_cmd = set_jemalloc_version(jemalloc_path)
ret_code = pip_install_requirements(workspace)
if (ret_code != 0):
exit(ret_code)
ret_code = run_tensorflow_job(workspace, tf_script, tf_args, tf_envs, set_jemalloc_version_cmd)
if (ret_code != 0):
exit(ret_code) |
asym enc | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from cryptography.exceptions import InvalidKey, InvalidSignature
from cryptography.hazmat.primitives import asymmetric, ciphers, hashes, padding
from cryptography.x509 import Certificate
HASH_LENGTH = 4 # Adjustable to avoid collision
NONCE_LENGTH = 16 # For AES, this is 128 bits (i.e. block size)
KEY_LENGTH = 32 # AES 256. Choose from 16, 24, 32
HEADER_LENGTH = HASH_LENGTH + NONCE_LENGTH
PADDING_LENGTH = NONCE_LENGTH * 8 # in bits
KEY_ENC_LENGTH = 256
SIGNATURE_LENGTH = 256
SIMPLE_HEADER_LENGTH = NONCE_LENGTH + KEY_ENC_LENGTH + SIGNATURE_LENGTH
def get_hash(value):
hash = hashes.Hash(hashes.SHA256())
hash.update(value)
return hash.finalize()
class SessionKeyUnavailable(Exception):
pass
class InvalidCertChain(Exception):
pass
def METHOD_NAME(k, m):
return k.encrypt(
m,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None
),
)
def _asym_dec(k, m):
return k.decrypt(
m,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None
),
)
def _sign(k, m):
return k.sign(
data=m,
padding=asymmetric.padding.PSS(
mgf=asymmetric.padding.MGF1(hashes.SHA256()),
salt_length=asymmetric.padding.PSS.MAX_LENGTH,
),
algorithm=hashes.SHA256(),
)
def _verify(k, m, s):
k.verify(
s,
m,
asymmetric.padding.PSS(
mgf=asymmetric.padding.MGF1(hashes.SHA256()), salt_length=asymmetric.padding.PSS.MAX_LENGTH
),
hashes.SHA256(),
)
def _sym_enc(k, n, m):
cipher = ciphers.Cipher(ciphers.algorithms.AES(k), ciphers.modes.CBC(n))
encryptor = cipher.encryptor()
padder = padding.PKCS7(PADDING_LENGTH).padder()
padded_data = padder.update(m) + padder.finalize()
return encryptor.update(padded_data) + encryptor.finalize()
def _sym_dec(k, n, m):
cipher = ciphers.Cipher(ciphers.algorithms.AES(k), ciphers.modes.CBC(n))
decryptor = cipher.decryptor()
plain_text = decryptor.update(m)
plain_text = plain_text + decryptor.finalize()
unpadder = padding.PKCS7(PADDING_LENGTH).unpadder()
return unpadder.update(plain_text) + unpadder.finalize()
class SessionKeyManager:
def __init__(self, root_ca):
self.key_hash_dict = dict()
self.root_ca = root_ca
self.root_ca_pub_key = root_ca.public_key()
def validate_cert_chain(self, cert):
self.root_ca_pub_key.verify(
cert.signature, cert.tbs_certificate_bytes, asymmetric.padding.PKCS1v15(), cert.signature_hash_algorithm
)
def key_request(self, remote_cert, local_cert, local_pri_key):
session_key = os.urandom(KEY_LENGTH)
signature = _sign(local_pri_key, session_key)
try:
self.validate_cert_chain(remote_cert)
except InvalidSignature:
return False
remote_pub_key = remote_cert.public_key()
key_enc = METHOD_NAME(remote_pub_key, session_key)
self.key_hash_dict[get_hash(session_key)[-HASH_LENGTH:]] = session_key
key_response = key_enc + signature
return key_response
def process_key_response(self, remote_cert, local_cert, local_pri_key, key_response):
key_enc, signature = key_response[:KEY_ENC_LENGTH], key_response[KEY_ENC_LENGTH:]
try:
session_key = _asym_dec(local_pri_key, key_enc)
self.validate_cert_chain(remote_cert)
public_key = remote_cert.public_key()
_verify(public_key, session_key, signature)
self.key_hash_dict[get_hash(session_key)[-HASH_LENGTH:]] = session_key
except (InvalidKey, InvalidSignature):
return False
return True
def key_available(self):
return bool(self.key_hash_dict)
def get_key(self, key_hash):
return self.key_hash_dict.get(key_hash)
def get_latest_key(self):
try:
k, last_value = _, self.key_hash_dict[k] = self.key_hash_dict.popitem()
except KeyError as e:
raise SessionKeyUnavailable("No session key established yet")
return last_value
class CellCipher:
def __init__(self, session_key_manager: SessionKeyManager):
self.session_key_manager = session_key_manager
def encrypt(self, message):
key = self.session_key_manager.get_latest_key()
key_hash = get_hash(key)
nonce = os.urandom(NONCE_LENGTH)
return nonce + key_hash[-HASH_LENGTH:] + _sym_enc(key, nonce, message)
def decrypt(self, message):
nonce, key_hash, message = (
message[:NONCE_LENGTH],
message[NONCE_LENGTH:HEADER_LENGTH],
message[HEADER_LENGTH:],
)
key = self.session_key_manager.get_key(key_hash)
if key is None:
raise SessionKeyUnavailable("No session key found for received message")
return _sym_dec(key, nonce, message)
class SimpleCellCipher:
def __init__(self, root_ca: Certificate, pri_key: asymmetric.rsa.RSAPrivateKey, cert: Certificate):
self._root_ca = root_ca
self._root_ca_pub_key = root_ca.public_key()
self._pri_key = pri_key
self._cert = cert
self._pub_key = cert.public_key()
self._validate_cert_chain(self._cert)
self._cached_enc = dict()
self._cached_dec = dict()
def _validate_cert_chain(self, cert: Certificate):
self._root_ca_pub_key.verify(
cert.signature, cert.tbs_certificate_bytes, asymmetric.padding.PKCS1v15(), cert.signature_hash_algorithm
)
def encrypt(self, message: bytes, target_cert: Certificate):
cert_hash = hash(target_cert)
secret = self._cached_enc.get(cert_hash)
if secret is None:
self._validate_cert_chain(target_cert)
key = os.urandom(KEY_LENGTH)
remote_pub_key = target_cert.public_key()
key_enc = METHOD_NAME(remote_pub_key, key)
signature = _sign(self._pri_key, key_enc)
self._cached_enc[cert_hash] = (key, key_enc, signature)
else:
(key, key_enc, signature) = secret
nonce = os.urandom(NONCE_LENGTH)
ct = nonce + key_enc + signature + _sym_enc(key, nonce, message)
return ct
def decrypt(self, message: bytes, origin_cert: Certificate):
nonce, key_enc, signature = (
message[:NONCE_LENGTH],
message[NONCE_LENGTH : NONCE_LENGTH + KEY_ENC_LENGTH],
message[NONCE_LENGTH + KEY_ENC_LENGTH : SIMPLE_HEADER_LENGTH],
)
key_hash = hash(key_enc)
dec = self._cached_dec.get(key_hash)
if dec is None:
self._validate_cert_chain(origin_cert)
public_key = origin_cert.public_key()
_verify(public_key, key_enc, signature)
key = _asym_dec(self._pri_key, key_enc)
self._cached_dec[key_hash] = key
else:
key = dec
return _sym_dec(key, nonce, message[SIMPLE_HEADER_LENGTH:]) |
test monitors pg | import os
import json
from bgpcfgd.template import TemplateFabric
from bgpcfgd.config import ConfigMgr
from .util import load_constants_dir_mappings
TEMPLATE_PATH = os.path.abspath('../../dockers/docker-fpm-frr/frr')
def load_tests(peer_type, template_name):
constants = load_constants_dir_mappings()
path = "tests/data/%s/%s" % (constants[peer_type], template_name)
param_files = [name for name in os.listdir(path)
if os.path.isfile(os.path.join(path, name)) and name.startswith("param_")]
tests = []
for param_fname in param_files:
casename = param_fname.replace("param_", "").replace(".json", "")
result_fname = "result_%s.conf" % casename
full_param_fname = os.path.join(path, param_fname)
full_result_fname = os.path.join(path, result_fname)
tests.append((casename, full_param_fname, full_result_fname))
tmpl_path = os.path.join("bgpd", "templates", constants[peer_type], "%s.j2" % template_name)
return tmpl_path, tests
def load_json(fname):
with open(fname) as param_fp:
raw_params = json.load(param_fp)
params = {}
for table_key, table_entries in raw_params.items():
if table_key.startswith("CONFIG_DB__"):
# convert CONFIG_DB__* entries keys into tuple if needed
new_table_entries = {}
for entry_key, entry_value in table_entries.items():
if '|' in entry_key:
new_key = tuple(entry_key.split('|'))
else:
new_key = entry_key
new_table_entries[new_key] = entry_value
params[table_key] = new_table_entries
else:
params[table_key] = table_entries
return params
def compress_comments(raw_config):
comment_counter = 0
output = []
for line in raw_config.split('\n'):
stripped_line = line.strip()
# Skip empty lines
if stripped_line == '':
pass
# Write lines without comments
elif not stripped_line.startswith('!'):
if comment_counter > 0:
output.append("!")
comment_counter = 0
output.append(line)
# Write non-empty comments
elif stripped_line.startswith('!') and len(stripped_line) > 1:
if comment_counter > 0:
output.append("!")
comment_counter = 0
output.append(line)
# Count empty comments
else: # stripped_line == '!'
comment_counter += 1
# Flush last comment if we have one
if comment_counter > 0:
output.append("!")
return "\n".join(output) + "\n"
def write_result(fname, raw_result):
with open(fname, 'w') as fp:
raw_result_w_commpressed_comments = compress_comments(raw_result)
fp.write(raw_result_w_commpressed_comments)
def run_tests(test_name, template_fname, tests):
tf = TemplateFabric(TEMPLATE_PATH)
template = tf.from_file(template_fname)
for case_name, param_fname, result_fname in tests:
params = load_json(param_fname)
raw_generated_result = str(template.render(params))
assert "None" not in raw_generated_result, "Test %s.%s" % (test_name, case_name)
# this is used only for initial generation write_result(result_fname, raw_generated_result)
canonical_generated_result = ConfigMgr.to_canonical(raw_generated_result)
with open(result_fname) as result_fp:
raw_saved_result = result_fp.read()
canonical_saved_result = ConfigMgr.to_canonical(raw_saved_result)
assert canonical_saved_result == canonical_generated_result, "Test %s.%s" % (test_name, case_name)
# Tests
def test_general_policies():
test_data = load_tests("general", "policies.conf")
run_tests("general_policies", *test_data)
def test_general_pg():
test_data = load_tests("general", "peer-group.conf")
run_tests("general_pg", *test_data)
def test_general_instance():
test_data = load_tests("general", "instance.conf")
run_tests("general_instance", *test_data)
def test_internal_policies():
test_data = load_tests("internal", "policies.conf")
run_tests("internal_policies", *test_data)
def test_internal_pg():
test_data = load_tests("internal", "peer-group.conf")
run_tests("internal_pg", *test_data)
def test_internal_instance():
test_data = load_tests("internal", "instance.conf")
run_tests("internal_instance", *test_data)
def test_dynamic_policies():
test_data = load_tests("dynamic", "policies.conf")
run_tests("dynamic_policies", *test_data)
def test_dynamic_pg():
test_data = load_tests("dynamic", "peer-group.conf")
run_tests("dynamic_pg", *test_data)
def test_dynamic_instance():
test_data = load_tests("dynamic", "instance.conf")
run_tests("dynamic_instance", *test_data)
def test_monitors_policies():
test_data = load_tests("monitors", "policies.conf")
run_tests("monitors_policies", *test_data)
def METHOD_NAME():
test_data = load_tests("monitors", "peer-group.conf")
run_tests("monitors_pg", *test_data)
def test_monitors_instance():
test_data = load_tests("monitors", "instance.conf")
run_tests("monitors_instance", *test_data)
def test_voq_chassis_policies():
test_data = load_tests("voq_chassis", "policies.conf")
run_tests("voq_chassis_policies", *test_data)
def test_voq_chassis_pg():
test_data = load_tests("voq_chassis", "peer-group.conf")
run_tests("voq_chassis_pg", *test_data)
def test_voq_chassis_instance():
test_data = load_tests("voq_chassis", "instance.conf")
run_tests("voq_chassis_instance", *test_data)
def test_sentinel_policies():
test_data = load_tests("sentinels", "policies.conf")
run_tests("sentinel_policies", *test_data)
def test_sentinel_pg():
test_data = load_tests("sentinels", "peer-group.conf")
run_tests("sentinel_pg", *test_data)
def test_sentinel_instance():
test_data = load_tests("sentinels", "instance.conf")
run_tests("sentinel_instance", *test_data) |
webhook payment gateway initialize tokenization response | import json
import graphene
import mock
import pytest
from ....core.models import EventDelivery
from ....payment.interface import (
PaymentGatewayInitializeTokenizationRequestData,
PaymentGatewayInitializeTokenizationResponseData,
PaymentGatewayInitializeTokenizationResult,
)
from ....settings import WEBHOOK_SYNC_TIMEOUT
PAYMENT_GATEWAY_INITIALIZE_TOKENIZATION = """
subscription {
event {
... on PaymentGatewayInitializeTokenizationSession{
user{
id
}
channel{
id
}
data
}
}
}
"""
@pytest.fixture
def METHOD_NAME():
return {
"result": (
PaymentGatewayInitializeTokenizationResult.SUCCESSFULLY_INITIALIZED.name
),
"data": {"foo": "bar"},
}
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def test_payment_gateway_initialize_tokenization_with_static_payload(
mock_request,
customer_user,
webhook_plugin,
payment_gateway_initialize_tokenization_app,
METHOD_NAME,
channel_USD,
):
# given
mock_request.return_value = METHOD_NAME
plugin = webhook_plugin()
expected_data = {"foo": "bar"}
request_data = PaymentGatewayInitializeTokenizationRequestData(
user=customer_user,
app_identifier=payment_gateway_initialize_tokenization_app.identifier,
channel=channel_USD,
data=expected_data,
)
previous_value = PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER,
error="Payment gateway initialize tokenization failed to deliver.",
data=None,
)
# when
response = plugin.payment_gateway_initialize_tokenization(
request_data, previous_value
)
# then
delivery = EventDelivery.objects.get()
assert json.loads(delivery.payload.payload) == {
"user_id": graphene.Node.to_global_id("User", customer_user.pk),
"channel_slug": channel_USD.slug,
"data": expected_data,
}
mock_request.assert_called_once_with(delivery, timeout=WEBHOOK_SYNC_TIMEOUT)
assert response == PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.SUCCESSFULLY_INITIALIZED,
error=None,
data=METHOD_NAME["data"],
)
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def test_payment_gateway_initialize_tokenization_with_subscription_payload(
mock_request,
customer_user,
webhook_plugin,
payment_gateway_initialize_tokenization_app,
METHOD_NAME,
channel_USD,
):
# given
mock_request.return_value = METHOD_NAME
webhook = payment_gateway_initialize_tokenization_app.webhooks.first()
webhook.subscription_query = PAYMENT_GATEWAY_INITIALIZE_TOKENIZATION
webhook.save()
plugin = webhook_plugin()
expected_data = {"foo": "bar"}
request_data = PaymentGatewayInitializeTokenizationRequestData(
user=customer_user,
app_identifier=payment_gateway_initialize_tokenization_app.identifier,
channel=channel_USD,
data=expected_data,
)
previous_value = PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER,
error="Payment gateway initialize tokenization failed to deliver.",
data=None,
)
# when
response = plugin.payment_gateway_initialize_tokenization(
request_data, previous_value
)
# then
delivery = EventDelivery.objects.get()
assert json.loads(delivery.payload.payload) == {
"user": {"id": graphene.Node.to_global_id("User", customer_user.pk)},
"data": expected_data,
"channel": {"id": graphene.Node.to_global_id("Channel", channel_USD.pk)},
}
mock_request.assert_called_once_with(delivery, timeout=WEBHOOK_SYNC_TIMEOUT)
assert response == PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.SUCCESSFULLY_INITIALIZED,
error=None,
data=METHOD_NAME["data"],
)
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def test_payment_gateway_initialize_tokenization_missing_correct_response_from_webhook(
mock_request,
customer_user,
webhook_plugin,
payment_gateway_initialize_tokenization_app,
channel_USD,
):
# given
mock_request.return_value = None
webhook = payment_gateway_initialize_tokenization_app.webhooks.first()
webhook.subscription_query = PAYMENT_GATEWAY_INITIALIZE_TOKENIZATION
webhook.save()
plugin = webhook_plugin()
expected_data = {"foo": "bar"}
request_data = PaymentGatewayInitializeTokenizationRequestData(
user=customer_user,
app_identifier=payment_gateway_initialize_tokenization_app.identifier,
channel=channel_USD,
data=expected_data,
)
previous_value = PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER,
error="Payment gateway initialize tokenization failed to deliver.",
data=None,
)
# when
response = plugin.payment_gateway_initialize_tokenization(
request_data, previous_value
)
# then
delivery = EventDelivery.objects.get()
mock_request.assert_called_once_with(delivery, timeout=WEBHOOK_SYNC_TIMEOUT)
assert response == PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER,
error="Failed to delivery request.",
data=None,
)
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def test_payment_gateway_initialize_tokenization_failure_from_app(
mock_request,
customer_user,
webhook_plugin,
payment_gateway_initialize_tokenization_app,
channel_USD,
):
# given
expected_error_msg = "Expected error msg."
mock_request.return_value = {
"result": PaymentGatewayInitializeTokenizationResult.FAILED_TO_INITIALIZE.name,
"error": expected_error_msg,
"data": None,
}
plugin = webhook_plugin()
expected_data = {"foo": "bar"}
request_data = PaymentGatewayInitializeTokenizationRequestData(
user=customer_user,
app_identifier=payment_gateway_initialize_tokenization_app.identifier,
channel=channel_USD,
data=expected_data,
)
previous_value = PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER,
error="Payment gateway initialize tokenization failed to deliver.",
data=None,
)
# when
response = plugin.payment_gateway_initialize_tokenization(
request_data, previous_value
)
# then
delivery = EventDelivery.objects.get()
assert json.loads(delivery.payload.payload) == {
"user_id": graphene.Node.to_global_id("User", customer_user.pk),
"channel_slug": channel_USD.slug,
"data": expected_data,
}
mock_request.assert_called_once_with(delivery, timeout=WEBHOOK_SYNC_TIMEOUT)
assert response == PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_INITIALIZE,
error=expected_error_msg,
data=None,
) |
test mobile get unauthenticated user | """
Tests for reset deadlines endpoint.
"""
import datetime
import ddt
from django.urls import reverse
from django.utils import timezone
from edx_toggles.toggles.testutils import override_waffle_flag
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.util.testing import EventTestMixin
from lms.djangoapps.course_home_api.tests.utils import BaseCourseHomeTests
from lms.djangoapps.courseware.tests.helpers import MasqueradeMixin
from openedx.core.djangoapps.schedules.models import Schedule
from openedx.features.course_experience import RELATIVE_DATES_DISABLE_RESET_FLAG, RELATIVE_DATES_FLAG
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
class ResetCourseDeadlinesViewTests(EventTestMixin, BaseCourseHomeTests, MasqueradeMixin):
"""
Tests for reset deadlines endpoint.
"""
def setUp(self): # pylint: disable=arguments-differ
# Need to supply tracker name for the EventTestMixin. Also, EventTestMixin needs to come
# first in class inheritance so the setUp call here appropriately works
super().setUp('openedx.features.course_experience.api.v1.views.tracker')
self.course = CourseFactory.create(self_paced=True, start=timezone.now() - datetime.timedelta(days=1000))
def test_reset_deadlines(self):
enrollment = CourseEnrollment.enroll(self.user, self.course.id, CourseMode.VERIFIED)
enrollment.schedule.start_date = timezone.now() - datetime.timedelta(days=100)
enrollment.schedule.save()
# Test body with incorrect body param (course_key is required)
response = self.client.post(reverse('course-experience-reset-course-deadlines'), {'course': self.course.id})
assert response.status_code == 400
assert enrollment.schedule == Schedule.objects.get(id=enrollment.schedule.id)
self.assert_no_events_were_emitted()
# Test correct post body
response = self.client.post(reverse('course-experience-reset-course-deadlines'), {'course_key': self.course.id})
assert response.status_code == 200
assert enrollment.schedule.start_date < Schedule.objects.get(id=enrollment.schedule.id).start_date
self.assert_event_emitted(
'edx.ui.lms.reset_deadlines.clicked',
courserun_key=str(self.course.id),
is_masquerading=False,
is_staff=False,
org_key=self.course.org,
user_id=self.user.id,
)
@override_waffle_flag(RELATIVE_DATES_FLAG, active=True)
@override_waffle_flag(RELATIVE_DATES_DISABLE_RESET_FLAG, active=True)
def test_reset_deadlines_disabled(self):
enrollment = CourseEnrollment.enroll(self.user, self.course.id, CourseMode.VERIFIED)
enrollment.schedule.start_date = timezone.now() - datetime.timedelta(days=100)
enrollment.schedule.save()
response = self.client.post(reverse('course-experience-reset-course-deadlines'), {'course_key': self.course.id})
assert response.status_code == 200
assert enrollment.schedule == Schedule.objects.get(id=enrollment.schedule.id)
self.assert_no_events_were_emitted()
def test_reset_deadlines_with_masquerade(self):
""" Staff users should be able to masquerade as a learner and reset the learner's schedule """
student_username = self.user.username
student_user_id = self.user.id
student_enrollment = CourseEnrollment.enroll(self.user, self.course.id)
student_enrollment.schedule.start_date = timezone.now() - datetime.timedelta(days=100)
student_enrollment.schedule.save()
staff_enrollment = CourseEnrollment.enroll(self.staff_user, self.course.id)
staff_enrollment.schedule.start_date = timezone.now() - datetime.timedelta(days=30)
staff_enrollment.schedule.save()
self.switch_to_staff()
self.update_masquerade(course=self.course, username=student_username)
self.client.post(reverse('course-experience-reset-course-deadlines'), {'course_key': self.course.id})
updated_schedule = Schedule.objects.get(id=student_enrollment.schedule.id)
assert updated_schedule.start_date.date() == datetime.datetime.today().date()
updated_staff_schedule = Schedule.objects.get(id=staff_enrollment.schedule.id)
assert updated_staff_schedule.start_date == staff_enrollment.schedule.start_date
self.assert_event_emitted(
'edx.ui.lms.reset_deadlines.clicked',
courserun_key=str(self.course.id),
is_masquerading=True,
is_staff=False,
org_key=self.course.org,
user_id=student_user_id,
)
def test_post_unauthenticated_user(self):
self.client.logout()
response = self.client.post(reverse('course-experience-reset-course-deadlines'), {'course_key': self.course.id})
assert response.status_code == 401
def test_mobile_get_banner_info(self):
response = self.client.get(reverse('course-experience-course-deadlines-mobile', args=[self.course.id]))
assert response.status_code == 200
self.assertContains(response, 'missed_deadlines')
self.assertContains(response, 'missed_gated_content')
self.assertContains(response, 'content_type_gating_enabled')
self.assertContains(response, 'verified_upgrade_link')
def test_mobile_get_unknown_course(self):
url = reverse('course-experience-course-deadlines-mobile', args=['course-v1:unknown+course+2T2020'])
response = self.client.get(url)
assert response.status_code == 404
def METHOD_NAME(self):
self.client.logout()
response = self.client.get(reverse('course-experience-course-deadlines-mobile', args=[self.course.id]))
assert response.status_code == 401 |
run test | #
# This script needs to be run on startup
# qemu -kernel ${KERNEL} -s -S
# and then:
# gdb ${KERNEL}.vmlinux -x ${QEMU_SRC}/tests/guest-debug/test-gdbstub.py
import gdb
failcount = 0
def report(cond, msg):
"Report success/fail of test"
if cond:
print ("PASS: %s" % (msg))
else:
print ("FAIL: %s" % (msg))
global failcount
failcount += 1
def check_step():
"Step an instruction, check it moved."
start_pc = gdb.parse_and_eval('$pc')
gdb.execute("si")
end_pc = gdb.parse_and_eval('$pc')
return not (start_pc == end_pc)
def check_break(sym_name):
"Setup breakpoint, continue and check we stopped."
sym, ok = gdb.lookup_symbol(sym_name)
bp = gdb.Breakpoint(sym_name)
gdb.execute("c")
# hopefully we came back
end_pc = gdb.parse_and_eval('$pc')
print ("%s == %s %d" % (end_pc, sym.value(), bp.hit_count))
bp.delete()
# can we test we hit bp?
return end_pc == sym.value()
# We need to do hbreak manually as the python interface doesn't export it
def check_hbreak(sym_name):
"Setup hardware breakpoint, continue and check we stopped."
sym, ok = gdb.lookup_symbol(sym_name)
gdb.execute("hbreak %s" % (sym_name))
gdb.execute("c")
# hopefully we came back
end_pc = gdb.parse_and_eval('$pc')
print ("%s == %s" % (end_pc, sym.value()))
if end_pc == sym.value():
gdb.execute("d 1")
return True
else:
return False
class WatchPoint(gdb.Breakpoint):
def get_wpstr(self, sym_name):
"Setup sym and wp_str for given symbol."
self.sym, ok = gdb.lookup_symbol(sym_name)
wp_addr = gdb.parse_and_eval(sym_name).address
self.wp_str = '*(%(type)s)(&%(address)s)' % dict(
type = wp_addr.type, address = sym_name)
return(self.wp_str)
def __init__(self, sym_name, type):
wp_str = self.get_wpstr(sym_name)
super(WatchPoint, self).__init__(wp_str, gdb.BP_WATCHPOINT, type)
def stop(self):
end_pc = gdb.parse_and_eval('$pc')
print ("HIT WP @ %s" % (end_pc))
return True
def do_one_watch(sym, wtype, text):
wp = WatchPoint(sym, wtype)
gdb.execute("c")
report_str = "%s for %s (%s)" % (text, sym, wp.sym.value())
if wp.hit_count > 0:
report(True, report_str)
wp.delete()
else:
report(False, report_str)
def check_watches(sym_name):
"Watch a symbol for any access."
# Should hit for any read
do_one_watch(sym_name, gdb.WP_ACCESS, "awatch")
# Again should hit for reads
do_one_watch(sym_name, gdb.WP_READ, "rwatch")
# Finally when it is written
do_one_watch(sym_name, gdb.WP_WRITE, "watch")
class CatchBreakpoint(gdb.Breakpoint):
def __init__(self, sym_name):
super(CatchBreakpoint, self).__init__(sym_name)
self.sym, ok = gdb.lookup_symbol(sym_name)
def stop(self):
end_pc = gdb.parse_and_eval('$pc')
print ("CB: %s == %s" % (end_pc, self.sym.value()))
if end_pc == self.sym.value():
report(False, "Hit final catchpoint")
def METHOD_NAME():
"Run through the tests one by one"
print ("Checking we can step the first few instructions")
step_ok = 0
for i in range(3):
if check_step():
step_ok += 1
report(step_ok == 3, "single step in boot code")
print ("Checking HW breakpoint works")
break_ok = check_hbreak("kernel_init")
report(break_ok, "hbreak @ kernel_init")
# Can't set this up until we are in the kernel proper
# if we make it to run_init_process we've over-run and
# one of the tests failed
print ("Setup catch-all for run_init_process")
cbp = CatchBreakpoint("run_init_process")
cpb2 = CatchBreakpoint("try_to_run_init_process")
print ("Checking Normal breakpoint works")
break_ok = check_break("wait_for_completion")
report(break_ok, "break @ wait_for_completion")
print ("Checking watchpoint works")
check_watches("system_state")
#
# This runs as the script it sourced (via -x)
#
try:
print ("Connecting to remote")
gdb.execute("target remote localhost:1234")
# These are not very useful in scripts
gdb.execute("set pagination off")
gdb.execute("set confirm off")
# Run the actual tests
METHOD_NAME()
except:
print ("GDB Exception: %s" % (sys.exc_info()[0]))
failcount += 1
import code
code.InteractiveConsole(locals=globals()).interact()
raise
# Finally kill the inferior and exit gdb with a count of failures
gdb.execute("kill")
exit(failcount) |
fp16 to fp32 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
logger: logging.Logger = logging.getLogger()
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
TORCH_HALF_MIN: float = torch.finfo(torch.float16).min
TORCH_HALF_MAX: float = torch.finfo(torch.float16).max
TORCH_BFLOAT16_MIN: float = torch.finfo(torch.bfloat16).min
TORCH_BFLOAT16_MAX: float = torch.finfo(torch.bfloat16).max
def fp32_to_fp16_with_clamp(tensor: torch.Tensor) -> torch.Tensor:
return torch.clamp(tensor, TORCH_HALF_MIN, TORCH_HALF_MAX).half()
def fp32_to_bf16_with_clamp(tensor: torch.Tensor) -> torch.Tensor:
return torch.clamp(tensor, TORCH_BFLOAT16_MIN, TORCH_BFLOAT16_MAX).bfloat16()
def fp32_to_hfp8_with_clamp(
tensor: torch.Tensor, ebits: int = 4, mbits: int = 3, bias: int = 15
) -> torch.Tensor:
max_pos: float = (2 ** ((1 << ebits) - 2 - bias)) * (2 - 2 ** (-mbits))
return torch.ops.fbgemm.FloatToHFP8Quantized(
tensor.contiguous(),
ebits,
bias,
max_pos,
)
def METHOD_NAME(tensor: torch.Tensor) -> torch.Tensor:
return tensor.float()
def bf16_to_fp32(tensor: torch.Tensor) -> torch.Tensor:
return tensor.view(torch.bfloat16).float()
def hfp8_to_fp32(tensor: torch.Tensor, ebits: int = 4, bias: int = 15) -> torch.Tensor:
return torch.ops.fbgemm.HFP8QuantizedToFloat(
tensor.contiguous().view(torch.uint8),
ebits,
bias,
)
def measure_fp16_quant_error(input_tensor: torch.Tensor) -> None:
# TODO: log to tensorboard
num_nan_fp32_tensor = torch.numel(input_tensor[torch.isnan(input_tensor)])
logger.info(
"num NaN in fp32 tensor: {}, ratio: {}.".format(
num_nan_fp32_tensor, num_nan_fp32_tensor / torch.numel(input_tensor)
)
)
logger.info(
"fp32 tensor profile: min: {}, max: {}, min abs:{}, max abs:{}.".format(
torch.min(input_tensor),
torch.max(input_tensor),
torch.min(torch.abs(input_tensor)),
torch.max(torch.abs(input_tensor)),
)
)
fp16_tensor = fp32_to_fp16_with_clamp(input_tensor)
num_nan_fp16_tensor = torch.numel(fp16_tensor[torch.isnan(fp16_tensor)])
logger.info(
"num NaN in fp16 tensor: {}, ratio: {}.".format(
num_nan_fp16_tensor, num_nan_fp16_tensor / torch.numel(input_tensor)
)
)
diff = torch.abs(input_tensor - fp16_tensor.float())
rel_diff = diff / torch.abs(input_tensor)
logger.info(
"fp32_to_fp16 abs error: min={}, max={}, avg={}.".format(
torch.min(diff), torch.max(diff), torch.mean(diff)
)
)
rel_diff_not_nan = rel_diff[torch.logical_not(torch.isnan(rel_diff))]
logger.info(
"fp32_to_fp16 rel error: min={}, max={}, avg={}.".format(
torch.min(rel_diff_not_nan),
torch.max(rel_diff_not_nan),
torch.mean(rel_diff_not_nan),
)
)
rel_diff_1_idx = torch.where(rel_diff == 1.0)
fp32_rel_err_1_vals = input_tensor[rel_diff_1_idx]
if torch.numel(fp32_rel_err_1_vals) > 0:
fp32_rel_err_1_vals = torch.abs(fp32_rel_err_1_vals)
logger.info(
"fp32_to_fp16 rel error == 1: fp32 min:{}, fp32 max:{}, fp32 avg:{}.".format(
torch.min(fp32_rel_err_1_vals),
torch.max(fp32_rel_err_1_vals),
torch.mean(fp32_rel_err_1_vals),
)
)
subrange_ratio = torch.numel(fp16_tensor[rel_diff_1_idx]) / torch.numel(
fp16_tensor
)
logger.info("sub fp16 range ratio: {}".format(subrange_ratio)) |
get six digit naics count | from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from django.db.models.functions import Length
from django.db.models import Q
from usaspending_api.common.cache_decorator import cache_response
from usaspending_api.common.validator.tinyshield import TinyShield
from usaspending_api.references.models import NAICS
from usaspending_api.references.v2.views.filter_tree.filter_tree import DEFAULT_CHILDREN
class NAICSViewSet(APIView):
"""Return a list of NAICS or a filtered list of NAICS"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/references/naics.md"
naics_queryset = NAICS.objects.annotate(text_len=Length("code"))
def METHOD_NAME(self, code: str) -> int:
return self.naics_queryset.filter(code__startswith=code, text_len=6).count()
def _parse_and_validate_request(self, requested_naics: str, request_data) -> dict:
data = {"code": requested_naics, "filter": request_data.get("filter")}
models = [
{"key": "code", "name": "code", "type": "integer", "allow_nulls": True, "optional": True},
{
"key": "filter",
"name": "filter",
"type": "text",
"text_type": "search",
"default": None,
"optional": True,
"allow_nulls": True,
},
]
return TinyShield(models).block(data)
def _fetch_children(self, naics_code) -> list:
length = len(naics_code) + 2
results = [
{
"naics": naics.code,
"naics_description": naics.description,
"count": self.METHOD_NAME(naics.code) if len(naics.code) < 6 else DEFAULT_CHILDREN,
}
for naics in self.naics_queryset.filter(code__startswith=naics_code, text_len=length)
]
return sorted(results, key=lambda x: x["naics"])
def _filter_search(self, naics_filter: dict) -> dict:
search_filter = Q(description__icontains=naics_filter["description__icontains"])
search_filter |= Q(code__icontains=naics_filter["description__icontains"])
if naics_filter.get("code"):
search_filter &= Q(code__startswith=naics_filter["code"])
tier1_codes = set()
tier2_codes = set()
tier3_codes = set()
naics_list = list(self.naics_queryset.filter(search_filter))
tier3_naics = [naics for naics in naics_list if naics.text_len == 6]
tier2_naics = [naics for naics in naics_list if naics.text_len == 4]
tier1_naics = [naics for naics in naics_list if naics.text_len == 2]
for naics in tier3_naics:
tier3_codes.add(naics.code)
tier2_codes.add(naics.code[:4])
tier1_codes.add(naics.code[:2])
for naics in tier2_naics:
tier2_codes.add(naics.code)
tier1_codes.add(naics.code[:2])
extra_tier2_naics = self.naics_queryset.filter(code__in=tier2_codes, text_len=4)
extra_tier1_naics = self.naics_queryset.filter(code__in=tier1_codes, text_len=2)
tier2 = set(list(tier2_naics)) | set(list(extra_tier2_naics))
tier1 = set(list(tier1_naics)) | set(list(extra_tier1_naics))
tier2_results = {}
for naics in tier2:
result = {
"naics": naics.code,
"naics_description": naics.description,
"count": self.METHOD_NAME(naics.code),
"children": [],
}
tier2_results[naics.code] = result
for naics in tier3_naics:
result = {
"naics": naics.code,
"naics_description": naics.description,
"count": DEFAULT_CHILDREN,
}
tier2_results[naics.code[:4]]["children"].append(result)
tier2_results[naics.code[:4]]["children"].sort(key=lambda x: x["naics"])
tier1_results = {}
for naics in tier1:
result = {
"naics": naics.code,
"naics_description": naics.description,
"count": self.METHOD_NAME(naics.code),
"children": [],
}
tier1_results[naics.code] = result
for key in tier2_results.keys():
tier1_results[key[:2]]["children"].append(tier2_results[key])
tier1_results[key[:2]]["children"].sort(key=lambda x: x["naics"])
results = [tier1_results[key] for key in tier1_results.keys()]
return {"results": sorted(results, key=lambda x: x["naics"])}
def _default_view(self) -> dict:
results = [
{
"naics": naics.code,
"naics_description": naics.description,
"count": self.METHOD_NAME(naics.code),
}
for naics in self.naics_queryset.filter(text_len=2)
]
return {"results": sorted(results, key=lambda x: x["naics"])}
def _business_logic(self, request_data: dict) -> dict:
naics_filter = {}
code = request_data.get("code")
description = request_data.get("filter")
if not code and not description:
return self._default_view()
if code:
naics_filter.update({"code": code})
if description:
naics_filter.update({"description__icontains": description})
return self._filter_search(naics_filter)
results = []
for naics in self.naics_queryset.filter(Q(**naics_filter)):
if naics.text_len < 6:
result = {
"naics": naics.code,
"naics_description": naics.description,
"count": self.METHOD_NAME(naics.code),
"children": self._fetch_children(naics.code),
}
else:
result = {
"naics": naics.code,
"naics_description": naics.description,
"count": DEFAULT_CHILDREN,
}
results.append(result)
return {"results": results}
@cache_response()
def get(self, request: Request, requested_naics: str = None) -> Response:
request_data = self._parse_and_validate_request(requested_naics, request.GET)
results = self._business_logic(request_data)
return Response(results) |
test ties broken alphabetically | # These tests are auto-generated with test data from:
# https://github.com/exercism/problem-specifications/tree/main/exercises/tournament/canonical-data.json
# File last updated on 2023-07-19
import unittest
from tournament import (
tally,
)
class TournamentTest(unittest.TestCase):
def test_just_the_header_if_no_input(self):
results = []
table = ["Team | MP | W | D | L | P"]
self.assertEqual(tally(results), table)
def test_a_win_is_three_points_a_loss_is_zero_points(self):
results = ["Allegoric Alaskans;Blithering Badgers;win"]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 1 | 1 | 0 | 0 | 3",
"Blithering Badgers | 1 | 0 | 0 | 1 | 0",
]
self.assertEqual(tally(results), table)
def test_a_win_can_also_be_expressed_as_a_loss(self):
results = ["Blithering Badgers;Allegoric Alaskans;loss"]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 1 | 1 | 0 | 0 | 3",
"Blithering Badgers | 1 | 0 | 0 | 1 | 0",
]
self.assertEqual(tally(results), table)
def test_a_different_team_can_win(self):
results = ["Blithering Badgers;Allegoric Alaskans;win"]
table = [
"Team | MP | W | D | L | P",
"Blithering Badgers | 1 | 1 | 0 | 0 | 3",
"Allegoric Alaskans | 1 | 0 | 0 | 1 | 0",
]
self.assertEqual(tally(results), table)
def test_a_draw_is_one_point_each(self):
results = ["Allegoric Alaskans;Blithering Badgers;draw"]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 1 | 0 | 1 | 0 | 1",
"Blithering Badgers | 1 | 0 | 1 | 0 | 1",
]
self.assertEqual(tally(results), table)
def test_there_can_be_more_than_one_match(self):
results = [
"Allegoric Alaskans;Blithering Badgers;win",
"Allegoric Alaskans;Blithering Badgers;win",
]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 2 | 2 | 0 | 0 | 6",
"Blithering Badgers | 2 | 0 | 0 | 2 | 0",
]
self.assertEqual(tally(results), table)
def test_there_can_be_more_than_one_winner(self):
results = [
"Allegoric Alaskans;Blithering Badgers;loss",
"Allegoric Alaskans;Blithering Badgers;win",
]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 2 | 1 | 0 | 1 | 3",
"Blithering Badgers | 2 | 1 | 0 | 1 | 3",
]
self.assertEqual(tally(results), table)
def test_there_can_be_more_than_two_teams(self):
results = [
"Allegoric Alaskans;Blithering Badgers;win",
"Blithering Badgers;Courageous Californians;win",
"Courageous Californians;Allegoric Alaskans;loss",
]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 2 | 2 | 0 | 0 | 6",
"Blithering Badgers | 2 | 1 | 0 | 1 | 3",
"Courageous Californians | 2 | 0 | 0 | 2 | 0",
]
self.assertEqual(tally(results), table)
def test_typical_input(self):
results = [
"Allegoric Alaskans;Blithering Badgers;win",
"Devastating Donkeys;Courageous Californians;draw",
"Devastating Donkeys;Allegoric Alaskans;win",
"Courageous Californians;Blithering Badgers;loss",
"Blithering Badgers;Devastating Donkeys;loss",
"Allegoric Alaskans;Courageous Californians;win",
]
table = [
"Team | MP | W | D | L | P",
"Devastating Donkeys | 3 | 2 | 1 | 0 | 7",
"Allegoric Alaskans | 3 | 2 | 0 | 1 | 6",
"Blithering Badgers | 3 | 1 | 0 | 2 | 3",
"Courageous Californians | 3 | 0 | 1 | 2 | 1",
]
self.assertEqual(tally(results), table)
def test_incomplete_competition_not_all_pairs_have_played(self):
results = [
"Allegoric Alaskans;Blithering Badgers;loss",
"Devastating Donkeys;Allegoric Alaskans;loss",
"Courageous Californians;Blithering Badgers;draw",
"Allegoric Alaskans;Courageous Californians;win",
]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 3 | 2 | 0 | 1 | 6",
"Blithering Badgers | 2 | 1 | 1 | 0 | 4",
"Courageous Californians | 2 | 0 | 1 | 1 | 1",
"Devastating Donkeys | 1 | 0 | 0 | 1 | 0",
]
self.assertEqual(tally(results), table)
def METHOD_NAME(self):
results = [
"Courageous Californians;Devastating Donkeys;win",
"Allegoric Alaskans;Blithering Badgers;win",
"Devastating Donkeys;Allegoric Alaskans;loss",
"Courageous Californians;Blithering Badgers;win",
"Blithering Badgers;Devastating Donkeys;draw",
"Allegoric Alaskans;Courageous Californians;draw",
]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 3 | 2 | 1 | 0 | 7",
"Courageous Californians | 3 | 2 | 1 | 0 | 7",
"Blithering Badgers | 3 | 0 | 1 | 2 | 1",
"Devastating Donkeys | 3 | 0 | 1 | 2 | 1",
]
self.assertEqual(tally(results), table)
def test_ensure_points_sorted_numerically(self):
results = [
"Devastating Donkeys;Blithering Badgers;win",
"Devastating Donkeys;Blithering Badgers;win",
"Devastating Donkeys;Blithering Badgers;win",
"Devastating Donkeys;Blithering Badgers;win",
"Blithering Badgers;Devastating Donkeys;win",
]
table = [
"Team | MP | W | D | L | P",
"Devastating Donkeys | 5 | 4 | 0 | 1 | 12",
"Blithering Badgers | 5 | 1 | 0 | 4 | 3",
]
self.assertEqual(tally(results), table) |
get severity level | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
from typing import Sequence, Any
from opentelemetry._logs.severity import SeverityNumber
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.sdk._logs import LogData
from opentelemetry.sdk._logs.export import LogExporter, LogExportResult
from azure.monitor.opentelemetry.exporter import _utils
from azure.monitor.opentelemetry.exporter._constants import (
_EXCEPTION_ENVELOPE_NAME,
_MESSAGE_ENVELOPE_NAME,
)
from azure.monitor.opentelemetry.exporter._generated.models import (
MessageData,
MonitorBase,
TelemetryEventData,
TelemetryExceptionData,
TelemetryExceptionDetails,
TelemetryItem,
)
from azure.monitor.opentelemetry.exporter.export._base import (
BaseExporter,
ExportResult,
)
_logger = logging.getLogger(__name__)
_DEFAULT_SPAN_ID = 0
_DEFAULT_TRACE_ID = 0
__all__ = ["AzureMonitorLogExporter"]
_APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE = "APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE"
class AzureMonitorLogExporter(BaseExporter, LogExporter):
"""Azure Monitor Log exporter for OpenTelemetry."""
def export(
self, batch: Sequence[LogData], **kwargs: Any # pylint: disable=unused-argument
) -> LogExportResult:
"""Export log data.
:param batch: OpenTelemetry LogData(s) to export.
:type batch: ~typing.Sequence[~opentelemetry._logs.LogData]
:return: The result of the export.
:rtype: ~opentelemetry.sdk._logs.export.LogData
"""
envelopes = [self._log_to_envelope(log) for log in batch]
try:
result = self._transmit(envelopes)
self._handle_transmit_from_storage(envelopes, result)
return _get_log_export_result(result)
except Exception: # pylint: disable=broad-except
_logger.exception("Exception occurred while exporting the data.")
return _get_log_export_result(ExportResult.FAILED_NOT_RETRYABLE)
def shutdown(self) -> None:
"""Shuts down the exporter.
Called when the SDK is shut down.
"""
self.storage.close()
def _log_to_envelope(self, log_data: LogData) -> TelemetryItem:
if not log_data:
return None
envelope = _convert_log_to_envelope(log_data)
envelope.instrumentation_key = self._instrumentation_key
return envelope
@classmethod
def from_connection_string(
cls, conn_str: str, **kwargs: Any
) -> "AzureMonitorLogExporter":
"""
Create an AzureMonitorLogExporter from a connection string.
This is the recommended way of instantation if a connection string is passed in explicitly.
If a user wants to use a connection string provided by environment variable, the constructor
of the exporter can be called directly.
:param str conn_str: The connection string to be used for authentication.
:keyword str api_version: The service API version used. Defaults to latest.
:returns an instance of ~AzureMonitorLogExporter
:rtype ~azure.monitor.opentelemetry.exporter.AzureMonitorLogExporter
"""
return cls(connection_string=conn_str, **kwargs)
def _log_data_is_event(log_data: LogData):
log_record = log_data.log_record
is_event = log_record.attributes.get(_APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE)
return is_event is True
# pylint: disable=protected-access
def _convert_log_to_envelope(log_data: LogData) -> TelemetryItem:
log_record = log_data.log_record
time_stamp = log_record.timestamp if log_record.timestamp is not None else log_record.observed_timestamp
envelope = _utils._create_telemetry_item(time_stamp)
envelope.tags.update(_utils._populate_part_a_fields(log_record.resource))
envelope.tags["ai.operation.id"] = "{:032x}".format(
log_record.trace_id or _DEFAULT_TRACE_ID
)
envelope.tags["ai.operation.parentId"] = "{:016x}".format(
log_record.span_id or _DEFAULT_SPAN_ID
)
properties = _utils._filter_custom_properties(
log_record.attributes,
lambda key, val: not _is_ignored_attribute(key)
)
exc_type = log_record.attributes.get(SpanAttributes.EXCEPTION_TYPE)
exc_message = log_record.attributes.get(SpanAttributes.EXCEPTION_MESSAGE)
# pylint: disable=line-too-long
stack_trace = log_record.attributes.get(SpanAttributes.EXCEPTION_STACKTRACE)
severity_level = METHOD_NAME(log_record.severity_number)
if not log_record.body:
log_record.body = "n/a"
# Event telemetry
if _log_data_is_event(log_data):
envelope.name = 'Microsoft.ApplicationInsights.Event'
data = TelemetryEventData(
name=str(log_record.body)[:32768],
properties=properties,
)
envelope.data = MonitorBase(base_data=data, base_type="EventData")
# Exception telemetry
elif exc_type is not None or exc_message is not None:
envelope.name = _EXCEPTION_ENVELOPE_NAME
has_full_stack = stack_trace is not None
if not exc_message:
exc_message = "Exception"
exc_details = TelemetryExceptionDetails(
type_name=str(exc_type)[:1024],
message=str(exc_message)[:32768],
has_full_stack=has_full_stack,
stack=str(stack_trace)[:32768],
)
data = TelemetryExceptionData(
severity_level=severity_level,
properties=properties,
exceptions=[exc_details],
)
# pylint: disable=line-too-long
envelope.data = MonitorBase(base_data=data, base_type="ExceptionData")
else: # Message telemetry
envelope.name = _MESSAGE_ENVELOPE_NAME
# pylint: disable=line-too-long
# Severity number: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber
data = MessageData(
message=str(log_record.body)[:32768],
severity_level=severity_level,
properties=properties,
)
envelope.data = MonitorBase(base_data=data, base_type="MessageData")
return envelope
def _get_log_export_result(result: ExportResult) -> LogExportResult:
if result == ExportResult.SUCCESS:
return LogExportResult.SUCCESS
if result in (
ExportResult.FAILED_RETRYABLE,
ExportResult.FAILED_NOT_RETRYABLE,
):
return LogExportResult.FAILURE
return None
# pylint: disable=line-too-long
# Common schema: https://github.com/microsoft/common-schema/blob/main/Mappings/AzureMonitor-AI.md#messageseveritylevel
# SeverityNumber specs: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber
def METHOD_NAME(severity_number: SeverityNumber):
if severity_number.value < 9:
return 0
return int((severity_number.value - 1) / 4 - 1)
def _is_ignored_attribute(key: str) -> bool:
return key in _IGNORED_ATTRS
_IGNORED_ATTRS = frozenset(
(
SpanAttributes.EXCEPTION_TYPE,
SpanAttributes.EXCEPTION_MESSAGE,
SpanAttributes.EXCEPTION_STACKTRACE,
SpanAttributes.EXCEPTION_ESCAPED,
_APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE,
)
) |
delete empty color blocks | # Authors: see git history
#
# Copyright (c) 2010 Authors
# Licensed under the GNU GPL version 3.0 or later. See the file LICENSE for details.
from sys import exit
from typing import List
from inkex import errormsg
from ..i18n import _
from ..svg import PIXELS_PER_MM
from ..utils.geometry import Point
from ..utils.threading import check_stop_flag
from .color_block import ColorBlock
def stitch_groups_to_stitch_plan(stitch_groups, collapse_len=None, min_stitch_len=0.1, disable_ties=False): # noqa: C901
"""Convert a collection of StitchGroups to a StitchPlan.
* applies instructions embedded in the StitchGroup such as trim_after and stop_after
* adds tie-ins and tie-offs
* adds jump-stitches between stitch_group if necessary
"""
if not stitch_groups:
errormsg(_("There is no selected stitchable element. Please run "
"Extensions > Ink/Stitch > Troubleshoot > Troubleshoot objects in case you have expected a stitchout."))
exit(1)
if collapse_len is None:
collapse_len = 3.0
collapse_len = collapse_len * PIXELS_PER_MM
stitch_plan = StitchPlan()
color_block = stitch_plan.new_color_block(color=stitch_groups[0].color)
previous_stitch_group = None
need_tie_in = True
for stitch_group in stitch_groups:
check_stop_flag()
if not stitch_group.stitches:
continue
if color_block.color != stitch_group.color:
# add a lock stitch to the last element of the previous group
if not need_tie_in:
lock_stitches = previous_stitch_group.get_lock_stitches("end", disable_ties)
if lock_stitches:
color_block.add_stitches(stitches=lock_stitches)
need_tie_in = True
# end the previous block with a color change
color_block.add_stitch(color_change=True)
# make a new block of our color
color_block = stitch_plan.new_color_block(color=stitch_group.color)
else:
if (len(color_block) and not need_tie_in and
((stitch_group.stitches[0] - color_block.stitches[-1]).length() > collapse_len or
previous_stitch_group.force_lock_stitches)):
lock_stitches = previous_stitch_group.get_lock_stitches("end", disable_ties)
if lock_stitches:
color_block.add_stitches(stitches=lock_stitches)
need_tie_in = True
if need_tie_in is True:
lock_stitches = stitch_group.get_lock_stitches("start", disable_ties)
if lock_stitches:
color_block.add_stitch(lock_stitches[0], jump=True)
color_block.add_stitches(stitches=lock_stitches)
else:
color_block.add_stitch(stitch_group.stitches[0], jump=True)
need_tie_in = False
color_block.add_stitches(stitches=stitch_group.stitches)
if stitch_group.trim_after or stitch_group.stop_after:
lock_stitches = stitch_group.get_lock_stitches("end", disable_ties)
if lock_stitches:
color_block.add_stitches(stitches=lock_stitches)
need_tie_in = True
if stitch_group.trim_after:
color_block.add_stitch(trim=True)
if stitch_group.stop_after:
color_block.add_stitch(stop=True)
previous_stitch_group = stitch_group
if not need_tie_in:
# tie off at the end if we haven't already
lock_stitches = stitch_group.get_lock_stitches("end", disable_ties)
if lock_stitches:
color_block.add_stitches(stitches=lock_stitches)
if len(color_block) == 0:
# last block ended in a stop, so now we have an empty block
del stitch_plan.color_blocks[-1]
stitch_plan.filter_duplicate_stitches(min_stitch_len)
return stitch_plan
class StitchPlan(object):
"""Holds a set of color blocks, each containing stitches."""
def __init__(self):
self.color_blocks = []
def new_color_block(self, *args, **kwargs):
color_block = ColorBlock(*args, **kwargs)
self.color_blocks.append(color_block)
return color_block
def METHOD_NAME(self):
color_blocks = []
for color_block in self.color_blocks:
if len(color_block) > 0:
color_blocks.append(color_block)
self.color_blocks = color_blocks
def add_color_block(self, color_block):
self.color_blocks.append(color_block)
def filter_duplicate_stitches(self, min_stitch_len):
for color_block in self:
color_block.filter_duplicate_stitches(min_stitch_len)
def __iter__(self):
return iter(self.color_blocks)
def __len__(self):
return len(self.color_blocks)
def __repr__(self):
return "StitchPlan(%s)" % ", ".join(repr(cb) for cb in self.color_blocks)
def __json__(self):
return dict(color_blocks=self.color_blocks,
num_stops=self.num_stops,
num_trims=self.num_trims,
num_stitches=self.num_stitches,
bounding_box=self.bounding_box,
estimated_thread=self.estimated_thread
)
@property
def num_colors(self):
"""Number of unique colors in the stitch plan."""
return len({block.color for block in self})
@property
def num_color_blocks(self):
return len(self.color_blocks)
@property
def num_stops(self):
return sum(1 for block in self if block.stop_after)
@property
def num_trims(self):
return sum(block.num_trims for block in self)
@property
def num_stitches(self):
return sum(block.num_stitches for block in self)
@property
def bounding_box(self):
color_block_bounding_boxes = [cb.bounding_box for cb in self]
minx = min(bb[0] for bb in color_block_bounding_boxes)
miny = min(bb[1] for bb in color_block_bounding_boxes)
maxx = max(bb[2] for bb in color_block_bounding_boxes)
maxy = max(bb[3] for bb in color_block_bounding_boxes)
return minx, miny, maxx, maxy
@property
def estimated_thread(self):
thread_meter = sum(block.estimated_thread for block in self) / PIXELS_PER_MM / 1000
return round(thread_meter, 2)
@property
def dimensions(self):
minx, miny, maxx, maxy = self.bounding_box
return (maxx - minx, maxy - miny)
@property
def extents(self):
minx, miny, maxx, maxy = self.bounding_box
return max(-minx, maxx), max(-miny, maxy)
@property
def dimensions_mm(self):
dimensions = self.dimensions
return (dimensions[0] / PIXELS_PER_MM, dimensions[1] / PIXELS_PER_MM)
@property
def last_color_block(self):
if self.color_blocks:
return self.color_blocks[-1]
else:
return None
def make_offsets(self, offsets: List[Point]):
out = StitchPlan()
out.color_blocks = [block.make_offsets(offsets) for block in self]
return out |
test print topic help with devel for | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import pydoc
from unittest import mock
import fixtures
from testtools.matchers import Contains, Equals, StartsWith
from snapcraft_legacy.cli._runner import run
from snapcraft_legacy.cli.help import _TOPICS
from tests.legacy import fixture_setup
from . import CommandBaseTestCase
class HelpCommandBaseTestCase(CommandBaseTestCase):
def setUp(self):
super().setUp()
# pydoc pager guess can fail, for tests we want a plain pager
# anyway
p = mock.patch("pydoc.pager", new=pydoc.plainpager)
p.start()
self.addCleanup(p.stop)
class HelpCommandTestCase(HelpCommandBaseTestCase):
def test_topic_and_plugin_not_found_exits_with_tip(self):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
result = self.run_command(["help", "does-not-exist"])
self.assertThat(result.exit_code, Equals(1))
self.assertThat(
result.output, Contains("There is no help topic, plugin or command")
)
def test_topic_and_plugin_adds_ellipsis_for_long_arg(self):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
result = self.run_command(["help", "1234567890123"])
self.assertThat(result.exit_code, Equals(1))
self.assertThat(result.output, Contains("1234567890..."))
def test_print_module_help_for_valid_plugin_default_base(self):
result = self.run_command(["help", "nil"])
expected = "Displaying help for the 'nil' plugin for 'core20'."
output = result.output[: len(expected)]
self.assertThat(
output,
Equals(expected),
"The help message does not start with {!r} but with "
"{!r} instead".format(expected, output),
)
def test_print_module_help_for_valid_plugin_with_base(self):
result = self.run_command(["help", "nil", "--base", "core20"])
expected = "Displaying help for the 'nil' plugin for 'core20'."
output = result.output[: len(expected)]
self.expectThat(
output,
Equals(expected),
"The help message does not start with {!r} but with "
"{!r} instead".format(expected, output),
)
def test_print_module_help_for_valid_plugin_snapcraft_yaml(self):
self.useFixture(
fixture_setup.SnapcraftYaml(
self.path,
base="core18",
parts={"part1": {"source": ".", "plugin": "nil"}},
)
)
result = self.run_command(["help", "python", "--base", "core20"])
expected = (
"Displaying help for the 'python' plugin for 'core20'.\n\n"
"The python plugin can be used for"
)
output = result.output[: len(expected)]
self.assertThat(
output,
Equals(expected),
"The help message does not start with {!r} but with "
"{!r} instead".format(expected, output),
)
def test_print_module_named_with_dashes_help_for_valid_plugin(self):
result = self.run_command(["help", "catkin-tools", "--base", "core20"])
expected = "Displaying help for the 'catkin-tools' plugin for 'core20'."
self.assertThat(result.output, StartsWith(expected))
def test_show_module_help_with_devel_for_valid_plugin(self):
result = self.run_command(["help", "nil", "--devel"])
expected = (
"Help on module snapcraft_legacy.plugins.v2.nil in snapcraft_legacy.plugins"
)
output = result.output[: len(expected)]
self.assertThat(
output,
Equals(expected),
"The help message does not start with {!r} but with "
"{!r} instead".format(expected, output),
)
def test_print_topics(self):
result = self.run_command(["help", "topics"])
output = result.output.strip().split("\n")
for t in _TOPICS:
self.assertTrue(
t in output, "Missing topic: {!r} in {!r}".format(t, output)
)
def test_print_topic_help_for_valid_topic(self):
result = self.run_command(["help", "sources"])
expected = "Common 'source' options."
output = result.output[: len(expected)]
self.assertThat(
output,
Equals(expected),
"The help message does not start with {!r} but with "
"{!r} instead".format(expected, output),
)
def test_print_generic_help_by_default(self):
result = self.run_command(["help"])
self.assertThat(
result.output, Contains("Snapcraft is a delightful packaging tool.")
)
self.assertThat(result.output, Contains("For more help"))
def test_no_unicode_in_help_strings(self):
helps = ["topics"]
for key in _TOPICS.keys():
helps.append(str(key))
# Get a list of plugins
import os
from pathlib import Path
import snapcraft_legacy.plugins
for plugin in Path(snapcraft_legacy.plugins.__path__[0]).glob("*.py"):
if os.path.isfile(str(plugin)) and not os.path.basename(
str(plugin)
).startswith("_"):
helps.append(os.path.basename(str(plugin)[:-3]))
for key in helps:
result = self.run_command(["help", key])
# An UnicodeEncodeError will be raised if the help text has
# non-ASCII characters.
result.output.encode("ascii")
class TopicWithDevelTestCase(HelpCommandBaseTestCase):
def METHOD_NAME(self):
expected = {
"sources": "Help on package snapcraft",
"plugins": "Help on package snapcraft",
}
for topic in _TOPICS:
result = self.run_command(["help", topic, "--devel"])
output = result.output[: len(expected[topic])]
self.assertThat(
output,
Equals(expected[topic]),
"The help message does not start with {!r} but with "
"{!r} instead".format(expected[topic], output),
)
class TestHelpForCommand(HelpCommandBaseTestCase):
def test_help_for_command(self):
for command in run.commands:
result = self.run_command(["help", command])
self.assertThat(result.exit_code, Equals(0))
# Verify that the first line of help text is correct
# to ensure no name squatting takes place.
self.assertThat(
result.output, Contains(run.commands[command].help.split("\n")[0])
) |
test type raises for unknown type of | import pytest
from h import models
from h.models.group import AUTHORITY_PROVIDED_ID_MAX_LENGTH, ReadableBy, WriteableBy
def test_init_sets_given_attributes():
group = models.Group(name="My group", authority="example.com", enforce_scope=False)
assert group.name == "My group"
assert group.authority == "example.com"
assert not group.enforce_scope
def test_with_short_name():
"""Should raise ValueError if name shorter than 3 characters."""
with pytest.raises(ValueError):
models.Group(name="ab")
def test_with_long_name():
"""Should raise ValueError if name longer than 25 characters."""
with pytest.raises(ValueError):
models.Group(name="abcdefghijklmnopqrstuvwxyz")
def test_enforce_scope_is_True_by_default(db_session, factories):
user = factories.User()
group = models.Group(name="Foobar", authority="foobar.com", creator=user)
db_session.add(group)
db_session.flush()
assert group.enforce_scope is True
def test_enforce_scope_can_be_set_False(db_session, factories):
user = factories.User()
group = models.Group(
name="Foobar", authority="foobar.com", creator=user, enforce_scope=False
)
db_session.add(group)
db_session.flush()
assert not group.enforce_scope
def test_slug(db_session, factories, organization):
name = "My Hypothesis Group"
user = factories.User()
group = models.Group(
name=name,
authority="foobar.com",
creator=user,
organization=organization,
)
db_session.add(group)
db_session.flush()
assert group.slug == "my-hypothesis-group"
def test_type_returns_open_for_open_groups(factories):
assert factories.OpenGroup().type == "open"
def test_type_returns_private_for_private_groups(factories):
assert factories.Group().type == "private"
def test_type_returns_restricted_for_restricted_groups(factories):
assert factories.RestrictedGroup().type == "restricted"
def test_it_returns_None_by_default_for_authority_provided_id():
group = models.Group(name="abcdefg")
assert group.authority_provided_id is None
def test_it_returns_None_for_groupid_if_authority_provided_id_is_None(factories):
group = factories.Group(authority_provided_id=None)
assert group.groupid is None
def test_it_returns_formatted_groupid_if_authority_provided_id(factories):
group = factories.Group()
group.authority_provided_id = "hithere"
assert group.groupid == f"group:hithere@{group.authority}"
def test_groupid_setter_raises_ValueError_if_groupid_invalid(factories):
group = factories.Group()
with pytest.raises(ValueError, match="isn't a valid groupid"):
group.groupid = "nonsense"
def test_groupid_setter_sets_consistuent_fields(factories):
group = factories.Group()
group.groupid = "group:[email protected]"
assert group.authority_provided_id == "onetwo"
assert group.authority == "threefour.com"
def test_groupid_setter_accepts_None_and_nullifies_authority_provided_id(factories):
group = factories.Group()
group.groupid = "group:[email protected]"
group.groupid = None
assert group.groupid is None
assert group.authority == "threefour.com"
assert group.authority_provided_id is None
@pytest.mark.parametrize(
"authority_provided_id", ["%%&whatever", "^flop", "#---", "ßeta"]
)
def test_it_raises_ValueError_if_invalid_authority_provided_id(authority_provided_id):
group = models.Group(name="abcdefg")
with pytest.raises(ValueError, match="authority_provided_id must only contain"):
group.authority_provided_id = authority_provided_id
def test_it_raises_ValueError_if_authority_provided_id_too_long():
group = models.Group(name="abcdefg")
with pytest.raises(ValueError, match="characters or fewer"):
group.authority_provided_id = "a" * (AUTHORITY_PROVIDED_ID_MAX_LENGTH + 1)
def test_it_allows_authority_provided_id_to_be_None():
group = models.Group(name="abcdefg")
group.authority_provided_id = None
def METHOD_NAME(factories):
group = factories.Group()
# Set the group's access flags to an invalid / unused combination.
group.joinable_by = None
group.readable_by = ReadableBy.members
group.writeable_by = WriteableBy.authority
expected_err = "^This group doesn't seem to match any known type"
with pytest.raises(ValueError, match=expected_err):
_ = group.type
def test_you_cannot_set_type(factories):
group = factories.Group()
with pytest.raises(AttributeError, match="can't set attribute"):
group.type = "open"
def test_repr(db_session, factories, organization):
name = "My Hypothesis Group"
user = factories.User()
group = models.Group(
name=name,
authority="foobar.com",
creator=user,
organization=organization,
)
db_session.add(group)
db_session.flush()
assert repr(group) == "<Group: my-hypothesis-group>"
def test_group_organization(db_session):
name = "My Hypothesis Group"
org = models.Organization(name="My Organization", authority="foobar.com")
db_session.add(org)
db_session.flush()
group = models.Group(name=name, authority="foobar.com", organization=org)
db_session.add(group)
db_session.flush()
assert group.organization == org
assert group.organization_id == org.id
def test_created_by(db_session, factories, organization):
name_1 = "My first group"
name_2 = "My second group"
user = factories.User()
group_1 = models.Group(
name=name_1,
authority="foobar.com",
creator=user,
organization=organization,
)
group_2 = models.Group(
name=name_2,
authority="foobar.com",
creator=user,
organization=organization,
)
db_session.add_all([group_1, group_2])
db_session.flush()
assert models.Group.created_by(db_session, user).all() == [group_1, group_2]
def test_public_group():
group = models.Group(readable_by=ReadableBy.world)
assert group.is_public
def test_non_public_group():
group = models.Group(readable_by=ReadableBy.members)
assert not group.is_public
@pytest.fixture()
def organization(factories):
return factories.Organization() |
prep param lists | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class tofp16(nn.Module):
"""
Utility module that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def BN_convert_float(module):
"""
Utility function for network_to_half().
Retained for legacy purposes.
"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
module.float()
for child in module.children():
BN_convert_float(child)
return module
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
Retained for legacy purposes. It is recommended to use FP16Model.
"""
return nn.Sequential(tofp16(), BN_convert_float(network.half()))
def convert_module(module, dtype):
"""
Converts a module's immediate parameters and buffers to dtype.
"""
for param in module.parameters(recurse=False):
if param is not None:
if param.data.dtype.is_floating_point:
param.data = param.data.to(dtype=dtype)
if param._grad is not None and param._grad.data.dtype.is_floating_point:
param._grad.data = param._grad.data.to(dtype=dtype)
for buf in module.buffers(recurse=False):
if buf is not None and buf.data.dtype.is_floating_point:
buf.data = buf.data.to(dtype=dtype)
def convert_network(network, dtype):
"""
Converts a network's parameters and buffers to dtype.
"""
for module in network.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
continue
convert_module(module, dtype)
if isinstance(module, torch.nn.RNNBase) or isinstance(module, torch.nn.modules.rnn.RNNBase):
module.flatten_parameters()
return network
class FP16Model(nn.Module):
"""
Convert model to half precision in a batchnorm-safe way.
"""
def __init__(self, network):
from apex import deprecated_warning
deprecated_warning("apex.fp16_utils is deprecated and will be removed by the end of February 2023. Use [PyTorch AMP](https://pytorch.org/docs/stable/amp.html)")
super(FP16Model, self).__init__()
self.network = convert_network(network, dtype=torch.half)
def forward(self, *inputs):
inputs = tuple(t.half() for t in inputs)
return self.network(*inputs)
def backwards_debug_hook(grad):
raise RuntimeError("master_params recieved a gradient in the backward pass!")
def METHOD_NAME(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model_params]).float()
except:
print("Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer.")
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
"""
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
"""
if flat_master:
# The flattening may incur one more deep copy than is necessary.
master_params[0].grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for model, master in zip(model_params, master_params):
if model.grad is not None:
if master.grad is None:
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master_params_to_model_params(model_params, master_params, flat_master=False):
"""
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
"""
if flat_master:
for model, master in zip(model_params,
_unflatten_dense_tensors(master_params[0].data, model_params)):
model.data.copy_(master)
else:
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
# Backward compatibility fixes
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR <= 4:
clip_grad_norm = torch.nn.utils.clip_grad_norm
else:
clip_grad_norm = torch.nn.utils.clip_grad_norm_ |
test results display | import pytest
from grandchallenge.components.models import (
ComponentInterface,
ComponentInterfaceValue,
)
from grandchallenge.evaluation.models import Evaluation, Phase
from grandchallenge.evaluation.tasks import calculate_ranks
from tests.evaluation_tests.factories import EvaluationFactory, PhaseFactory
from tests.factories import UserFactory
@pytest.mark.django_db
def test_calculate_ranks(django_assert_max_num_queries):
phase = PhaseFactory()
results = [
# Warning: Do not change this values without updating the
# expected_ranks below.
{"a": 0.0, "b": 0.0},
{"a": 0.5, "b": 0.2},
{"a": 1.0, "b": 0.3},
{"a": 0.7, "b": 0.4},
{"a": 0.5, "b": 0.5},
# Following two are invalid as they are incomplete
{"a": 1.0},
{"b": 0.3},
# Add a valid, but unpublished result
{"a": 0.1, "b": 0.1},
]
queryset = [
EvaluationFactory(submission__phase=phase, status=Evaluation.SUCCESS)
for _ in range(len(results))
]
for e, r in zip(queryset, results, strict=True):
e.outputs.add(
ComponentInterfaceValue.objects.create(
interface=ComponentInterface.objects.get(
slug="metrics-json-file"
),
value=r,
)
)
# Unpublish the result
queryset[-1].published = False
queryset[-1].save()
expected = {
Phase.DESCENDING: {
Phase.ABSOLUTE: {
Phase.DESCENDING: {
"ranks": [5, 3, 1, 2, 3, 0, 0, 0],
"rank_scores": [5, 3, 1, 2, 3, 0, 0, 0],
},
Phase.ASCENDING: {
"ranks": [5, 3, 1, 2, 3, 0, 0, 0],
"rank_scores": [5, 3, 1, 2, 3, 0, 0, 0],
},
},
Phase.MEDIAN: {
Phase.DESCENDING: {
"ranks": [5, 4, 1, 1, 1, 0, 0, 0],
"rank_scores": [5, 3.5, 2, 2, 2, 0, 0, 0],
},
Phase.ASCENDING: {
"ranks": [3, 2, 1, 3, 5, 0, 0, 0],
"rank_scores": [3, 2.5, 2, 3, 4, 0, 0, 0],
},
},
Phase.MEAN: {
Phase.DESCENDING: {
"ranks": [5, 4, 1, 1, 1, 0, 0, 0],
"rank_scores": [5, 3.5, 2, 2, 2, 0, 0, 0],
},
Phase.ASCENDING: {
"ranks": [3, 2, 1, 3, 5, 0, 0, 0],
"rank_scores": [3, 2.5, 2, 3, 4, 0, 0, 0],
},
},
},
Phase.ASCENDING: {
Phase.ABSOLUTE: {
Phase.DESCENDING: {
"ranks": [1, 2, 5, 4, 2, 0, 0, 0],
"rank_scores": [1, 2, 5, 4, 2, 0, 0, 0],
},
Phase.ASCENDING: {
"ranks": [1, 2, 5, 4, 2, 0, 0, 0],
"rank_scores": [1, 2, 5, 4, 2, 0, 0, 0],
},
},
Phase.MEDIAN: {
Phase.DESCENDING: {
"ranks": [2, 2, 5, 2, 1, 0, 0, 0],
"rank_scores": [3, 3, 4, 3, 1.5, 0, 0, 0],
},
Phase.ASCENDING: {
"ranks": [1, 2, 4, 4, 3, 0, 0, 0],
"rank_scores": [1, 2, 4, 4, 3.5, 0, 0, 0],
},
},
Phase.MEAN: {
Phase.DESCENDING: {
"ranks": [2, 2, 5, 2, 1, 0, 0, 0],
"rank_scores": [3, 3, 4, 3, 1.5, 0, 0, 0],
},
Phase.ASCENDING: {
"ranks": [1, 2, 4, 4, 3, 0, 0, 0],
"rank_scores": [1, 2, 4, 4, 3.5, 0, 0, 0],
},
},
},
}
for score_method in (Phase.ABSOLUTE, Phase.MEDIAN, Phase.MEAN):
for a_order in (Phase.DESCENDING, Phase.ASCENDING):
for b_order in (Phase.DESCENDING, Phase.ASCENDING):
phase.score_jsonpath = "a"
phase.scoring_method_choice = score_method
phase.score_default_sort = a_order
phase.extra_results_columns = [
{"path": "b", "title": "b", "order": b_order}
]
phase.save()
with django_assert_max_num_queries(9):
calculate_ranks(phase_pk=phase.pk)
assert_ranks(
queryset,
expected[a_order][score_method][b_order]["ranks"],
expected[a_order][score_method][b_order]["rank_scores"],
)
@pytest.mark.django_db
def METHOD_NAME():
phase = PhaseFactory()
user1 = UserFactory()
user2 = UserFactory()
metrics = "metrics"
creator = "creator"
results = [
{metrics: {"b": 0.3}, creator: user1}, # Invalid result
{metrics: {"a": 0.6}, creator: user1},
{metrics: {"a": 0.4}, creator: user1},
{metrics: {"a": 0.2}, creator: user1},
{metrics: {"a": 0.1}, creator: user2},
{metrics: {"a": 0.5}, creator: user2},
{metrics: {"a": 0.3}, creator: user2},
]
queryset = [
EvaluationFactory(
submission__phase=phase,
submission__creator=r[creator],
status=Evaluation.SUCCESS,
)
for r in results
]
for e, r in zip(queryset, results, strict=True):
e.outputs.add(
ComponentInterfaceValue.objects.create(
interface=ComponentInterface.objects.get(
slug="metrics-json-file"
),
value=r[metrics],
)
)
phase.score_jsonpath = "a"
phase.result_display_choice = Phase.ALL
phase.save()
calculate_ranks(phase_pk=phase.pk)
expected_ranks = [0, 1, 3, 5, 6, 2, 4]
assert_ranks(queryset, expected_ranks)
phase.result_display_choice = Phase.MOST_RECENT
phase.save()
calculate_ranks(phase_pk=phase.pk)
expected_ranks = [0, 0, 0, 2, 0, 0, 1]
assert_ranks(queryset, expected_ranks)
phase.result_display_choice = Phase.BEST
phase.save()
calculate_ranks(phase_pk=phase.pk)
expected_ranks = [0, 1, 0, 0, 0, 2, 0]
assert_ranks(queryset, expected_ranks)
# now test reverse order
phase.score_default_sort = phase.ASCENDING
phase.save()
calculate_ranks(phase_pk=phase.pk)
expected_ranks = [0, 0, 0, 2, 1, 0, 0]
assert_ranks(queryset, expected_ranks)
phase.result_display_choice = Phase.MOST_RECENT
phase.save()
calculate_ranks(phase_pk=phase.pk)
expected_ranks = [0, 0, 0, 1, 0, 0, 2]
assert_ranks(queryset, expected_ranks)
@pytest.mark.django_db
def test_null_results():
phase = PhaseFactory()
results = [{"a": 0.6}, {"a": None}]
queryset = [
EvaluationFactory(submission__phase=phase, status=Evaluation.SUCCESS)
for _ in range(len(results))
]
for e, r in zip(queryset, results, strict=True):
e.outputs.add(
ComponentInterfaceValue.objects.create(
interface=ComponentInterface.objects.get(
slug="metrics-json-file"
),
value=r,
)
)
phase.score_jsonpath = "a"
phase.result_display_choice = Phase.ALL
phase.save()
calculate_ranks(phase_pk=phase.pk)
expected_ranks = [1, 0]
assert_ranks(queryset, expected_ranks)
def assert_ranks(queryset, expected_ranks, expected_rank_scores=None):
for r in queryset:
r.refresh_from_db()
assert [r.rank for r in queryset] == expected_ranks
if expected_rank_scores:
assert [r.rank_score for r in queryset] == expected_rank_scores |
nptensor2np | from typing import Tuple, Type
import numpy as np
from ..image_utils import MAX_VALUES_BY_DTYPE, as_3d
def np_denorm(x: np.ndarray, min_max: Tuple[float, float] = (-1.0, 1.0)) -> np.ndarray:
"""Denormalize from [-1,1] range to [0,1]
formula: xi' = (xi - mu)/sigma
Example: "out = (x + 1.0) / 2.0" for denorm
range (-1,1) to (0,1)
for use with proper act in Generator output (ie. tanh)
"""
out = (x - min_max[0]) / (min_max[1] - min_max[0])
return np.clip(out, 0, 1)
def np_norm(x: np.ndarray) -> np.ndarray:
"""Normalize (z-norm) from [0,1] range to [-1,1]"""
out = (x - 0.5) * 2.0
return np.clip(out, -1, 1)
def np_bgr_to_rgb(img: np.ndarray) -> np.ndarray:
out: np.ndarray = img[::-1, ...]
return out
def np_rgb_to_bgr(img: np.ndarray) -> np.ndarray:
# same operation as bgr_to_rgb(), flip image channels
return np_bgr_to_rgb(img)
def np_bgra_to_rgba(img: np.ndarray) -> np.ndarray:
out: np.ndarray = img[[2, 1, 0, 3], ...] # type: ignore
return out
def np_rgba_to_bgra(img: np.ndarray) -> np.ndarray:
# same operation as bgra_to_rgba(), flip image channels
return np_bgra_to_rgba(img)
def np2nptensor(
img: np.ndarray,
bgr2rgb=True,
data_range=1.0, # pylint: disable=unused-argument
normalize=False,
change_range=True,
add_batch=True,
) -> np.ndarray:
"""Converts a numpy image array into a numpy Tensor array.
Parameters:
img (numpy array): the input image numpy array
add_batch (bool): choose if new tensor needs batch dimension added
"""
# check how many channels the image has, then condition. ie. RGB, RGBA, Gray
# if bgr2rgb:
# img = img[
# :, :, [2, 1, 0]
# ] # BGR to RGB -> in numpy, if using OpenCV, else not needed. Only if image has colors.
if change_range:
dtype = img.dtype
maxval = MAX_VALUES_BY_DTYPE.get(dtype.name, 1.0)
t_dtype = np.dtype("float32")
img = img.astype(t_dtype) / maxval # ie: uint8 = /255
# "HWC to CHW" and "numpy to tensor"
img = np.ascontiguousarray(np.transpose(as_3d(img), (2, 0, 1))).astype(np.float32)
if bgr2rgb:
# BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.)
if (
img.shape[0] % 3 == 0
): # RGB or MultixRGB (3xRGB, 5xRGB, etc. For video tensors.)
img = np_bgr_to_rgb(img)
elif img.shape[0] == 4: # RGBA
img = np_bgra_to_rgba(img)
if add_batch:
img = np.expand_dims(
img, axis=0
) # Add fake batch dimension = 1 . squeeze() will remove the dimensions of size 1
if normalize:
img = np_norm(img)
return img
def METHOD_NAME(
img: np.ndarray,
rgb2bgr=True,
remove_batch=True,
data_range=255,
denormalize=False,
change_range=True,
imtype: Type = np.uint8,
) -> np.ndarray:
"""Converts a Tensor array into a numpy image array.
Parameters:
img (tensor): the input image tensor array
4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed
denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]
imtype (type): the desired type of the converted numpy array (np.uint8
default)
Output:
img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
"""
n_dim = img.ndim
img = img.astype(np.float32)
if n_dim in (4, 3):
# if n_dim == 4, has to convert to 3 dimensions
if n_dim == 4 and remove_batch:
# remove a fake batch dimension
img = img.squeeze(0)
if img.shape[0] == 3 and rgb2bgr: # RGB
# RGB to BGR -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = np_rgb_to_bgr(img)
elif img.shape[0] == 4 and rgb2bgr: # RGBA
# RGBA to BGRA -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = np_rgba_to_bgra(img)
else:
img_np = img
img_np = np.transpose(img_np, (1, 2, 0)) # CHW to HWC
elif n_dim == 2:
img_np = img
else:
raise TypeError(
f"Only support 4D, 3D and 2D tensor. But received with dimension: {n_dim:d}"
)
# if rgb2bgr:
# img_np = img_np[[2, 1, 0], :, :] #RGB to BGR -> in numpy, if using OpenCV, else not needed. Only if image has colors.
# TODO: Check: could denormalize in the begining in tensor form instead
if denormalize:
img_np = np_denorm(img_np) # denormalize if needed
if change_range:
img_np = np.clip(
data_range * img_np, 0, data_range # type: ignore
).round() # np.clip to the data_range
# has to be in range (0,255) before changing to np.uint8, else np.float32
return img_np.astype(imtype) |
simple policy | """Test the flask oso plugin."""
from pathlib import Path
import pytest
from flask import Flask
from oso import Oso, OsoError
from werkzeug.exceptions import Forbidden
from flask_oso import FlaskOso, authorize, skip_authorization
@pytest.fixture
def flask_app():
return Flask("test")
@pytest.fixture
def oso():
return Oso()
@pytest.fixture
def user():
return "user"
@pytest.fixture
def flask_oso(flask_app, oso, user):
fo = FlaskOso(oso=oso, app=flask_app)
fo.set_get_actor(lambda: user)
return fo
@pytest.fixture
def METHOD_NAME(oso):
"""Load a simple base policy into oso."""
oso.load_file(Path(__file__).parent / "simple.polar")
@pytest.fixture
def app_ctx(flask_app):
with flask_app.app_context():
yield
def test_initialization_with_set(flask_app, oso, METHOD_NAME, app_ctx, user):
"""Test that setting oso works correctly."""
# Establish that an improperly initialized flask oso throws an exception.
flask_oso = FlaskOso()
flask_oso.set_get_actor(lambda: user)
with pytest.raises(OsoError):
flask_oso.authorize(action="read", resource="resource")
# Works after set oso.
flask_oso.set_oso(oso)
flask_oso.authorize(action="read", resource="resource")
def test_initialization_with_init(flask_app, oso, METHOD_NAME, app_ctx, user):
# Works with oso init.
flask_oso = FlaskOso(oso=oso)
flask_oso.set_get_actor(lambda: user)
flask_oso.authorize(action="read", resource="resource")
def test_authorize(flask_app, flask_oso, METHOD_NAME, app_ctx):
"""Test that authorize function works correctly."""
# Actor defaults to current actor.
flask_oso.authorize("resource", action="read")
# Overridden actor.
with pytest.raises(Forbidden):
flask_oso.authorize("resource", actor="other", action="read")
flask_oso.authorize("other_resource", actor="other_user", action="read")
# Request method action default
with flask_app.test_request_context(method="GET"):
flask_oso.authorize("action_resource")
with flask_app.test_request_context(method="POST"):
with pytest.raises(Forbidden):
flask_oso.authorize("action_resource")
flask_oso.set_get_actor(lambda: "other_user")
flask_oso.authorize("other_resource", action="read")
def test_require_authorization(flask_app, flask_oso, app_ctx, METHOD_NAME):
flask_oso.require_authorization(flask_app)
flask_app.testing = True
@flask_app.route("/")
def hello():
return "Hello"
# Don't call authorize.
with pytest.raises(OsoError):
with flask_app.test_client() as c:
c.get("/")
@flask_app.route("/auth")
def auth():
flask_oso.authorize("resource", action="read")
return "Hello"
with flask_app.test_client() as c:
resp = c.get("/auth")
assert resp.status_code == 200
# Decorator works
@flask_app.route("/decorator")
@authorize(action="read", resource="resource")
def decorated():
return "Hello"
with flask_app.test_client() as c:
resp = c.get("/decorator")
assert resp.status_code == 200
# Skip auth silences error
@flask_app.route("/open")
@skip_authorization
def open():
return "open"
with flask_app.test_client() as c:
resp = c.get("/open")
assert resp.status_code == 200
# 404 doesn't require authorization
with flask_app.test_client() as c:
resp = c.get("/nonexistent")
assert resp.status_code == 404
# Server error does
@flask_app.route("/500")
def server_error():
raise Exception("You messed this one up")
flask_app.testing = False
# Ensure that requiring authorization doesn't interfere with surfacing
# other exceptions that occur during the request.
with flask_app.test_client() as c:
resp = c.get("/500")
assert resp.status_code == 500
def test_route_authorization(flask_oso, oso, flask_app, app_ctx):
"""Test that route authorization middleware works."""
flask_oso.perform_route_authorization(app=flask_app)
flask_app.testing = True
@flask_app.route("/test_route", methods=("GET",))
def test():
return "Test"
with flask_app.test_client() as c:
with pytest.raises(OsoError) as e:
c.get("/test_route")
assert "Query for undefined rule `allow`" in str(e)
# Add rule to policy.
oso.load_str('allow("user", "GET", _: Request{path: "/test_route"});')
flask_oso.set_get_actor(lambda: "other_user")
with flask_app.test_client() as c:
assert c.get("/test_route").status_code == 403
flask_oso.set_get_actor(lambda: "user")
with flask_app.test_client() as c:
assert c.get("/test_route").status_code == 200
# Confirm that route authorization doesn't mess with errors.
with flask_app.test_client() as c:
assert c.get("/not_a_route").status_code == 404
with flask_app.test_client() as c:
assert c.post("/test_route").status_code == 405
def test_route_authorizaton_manual(flask_oso, oso, flask_app, app_ctx):
"""Perform route auth manually."""
flask_app.testing = True
from flask import request
@flask_app.route("/test_route")
@authorize(resource=request)
def auth():
return "authed"
with flask_app.test_client() as c:
with pytest.raises(OsoError) as e:
c.get("/test_route")
assert "Query for undefined rule `allow`" in str(e)
# Add rule
oso.load_str('allow("user", "GET", _: Request{path: "/test_route"});')
flask_oso.set_get_actor(lambda: "other_user")
with flask_app.test_client() as c:
assert c.get("/test_route").status_code == 403
flask_oso.set_get_actor(lambda: "user")
with flask_app.test_client() as c:
assert c.get("/test_route").status_code == 200
def test_custom_unauthorize(flask_oso, oso, flask_app, app_ctx):
"""Test that a custom unauthorize handler can be provided."""
auth_failed = False
def unauth():
nonlocal auth_failed
auth_failed = True
flask_oso.set_unauthorized_action(unauth)
# Add rule
oso.load_str('allow(_, "not bad", _);')
flask_oso.authorize(resource="fail!", action="bad")
assert auth_failed
def test_no_oso_error(flask_app, oso):
"""Test that using authorize without init app throws an error."""
with pytest.raises(OsoError, match="Application context"):
@authorize(resource="test")
def orm_function():
return "model"
orm_function()
with flask_app.app_context():
with pytest.raises(OsoError, match="init_app"):
@flask_app.route("/")
@authorize(resource="test")
def route():
return "test"
flask_app.testing = True
with flask_app.test_client() as c:
c.get("/").status_code |
slice | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# The following code has been taken from
# https://github.com/facebookresearch/DPR, which is CC-BY-NC 4.0
# licensed as of now. More details on the license can be found
# at https://github.com/facebookresearch/DPR/blob/master/LICENSE
"""
Most of the tokenizers code here is copied from DrQA codebase to avoid adding extra dependency
"""
import copy
import logging
import regex
import spacy
logger = logging.getLogger(__name__)
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def METHOD_NAME(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.METHOD_NAME(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def __del__(self):
self.shutdown()
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
class SpacyTokenizer(Tokenizer):
def __init__(self, **kwargs):
"""
Args:
annotators: set that can include pos, lemma, and ner.
model: spaCy model to use (either path, or keyword like 'en').
"""
model = kwargs.get('model', 'en')
self.annotators = copy.deepcopy(kwargs.get('annotators', set()))
nlp_kwargs = {'parser': False}
if not any([p in self.annotators for p in ['lemma', 'pos', 'ner']]):
nlp_kwargs['tagger'] = False
if 'ner' not in self.annotators:
nlp_kwargs['entity'] = False
self.nlp = spacy.load(model, **nlp_kwargs)
def tokenize(self, text):
# We don't treat new lines as tokens.
clean_text = text.replace('\n', ' ')
tokens = self.nlp.tokenizer(clean_text)
if any([p in self.annotators for p in ['lemma', 'pos', 'ner']]):
self.nlp.tagger(tokens)
if 'ner' in self.annotators:
self.nlp.entity(tokens)
data = []
for i in range(len(tokens)):
# Get whitespace
start_ws = tokens[i].idx
if i + 1 < len(tokens):
end_ws = tokens[i + 1].idx
else:
end_ws = tokens[i].idx + len(tokens[i].text)
data.append((
tokens[i].text,
text[start_ws: end_ws],
(tokens[i].idx, tokens[i].idx + len(tokens[i].text)),
tokens[i].tag_,
tokens[i].lemma_,
tokens[i].ent_type_,
))
# Set special option for non-entity tag: '' vs 'O' in spaCy
return Tokens(data, self.annotators, opts={'non_ent': ''}) |
build | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.files import apply_conandata_patches, chdir, collect_libs, copy, export_conandata_patches, get, replace_in_file, rm, rmdir
from conan.tools.microsoft import is_msvc
from conan.tools.scm import Version
from conans import AutoToolsBuildEnvironment, MSBuild, tools
import os
required_conan_version = ">=1.53.0"
class LibStudXmlConan(ConanFile):
name = "libstudxml"
description = "A streaming XML pull parser and streaming XML serializer implementation for modern, standard C++."
topics = ("xml", "xml-parser", "serialization")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.codesynthesis.com/projects/libstudxml/"
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
pass
def requirements(self):
self.requires("expat/2.5.0", transitive_headers=True, transitive_libs=True)
def validate(self):
if self.info.settings.compiler == "Visual Studio" and Version(self.info.settings.compiler.version) < "9":
raise ConanInvalidConfiguration(f"Visual Studio {self.info.settings.compiler.version} is not supported.")
def build_requirements(self):
if not is_msvc(self):
self.tool_requires("gnu-config/cci.20210814")
self.tool_requires("libtool/2.4.7")
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.tool_requires("msys2/cci.latest")
def source(self):
get(self, **self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_autotools(self):
if not self._autotools:
args = ["--with-external-expat"]
if self.options.shared:
args.extend(["--enable-shared", "--disable-static"])
else:
args.extend(["--disable-shared", "--enable-static"])
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
self._autotools.configure(configure_dir=self._source_subfolder, args=args)
return self._autotools
@property
def _vc_ver(self):
if self.settings.compiler == "Visual Studio":
return str(Version(self.settings.compiler.version).major)
elif self.settings.compiler == "msvc":
return {
"170": "11",
"180": "12",
"190": "14",
"191": "15",
"192": "16",
"193": "17",
}[str(self.settings.compiler.version)]
return None
def _build_vs(self):
vc_ver = int(self._vc_ver)
sln_path = None
def get_sln_path():
return os.path.join(self.source_folder, self._source_subfolder, f"libstudxml-vc{vc_ver}.sln")
sln_path = get_sln_path()
while not os.path.exists(sln_path):
vc_ver -= 1
sln_path = get_sln_path()
proj_path = os.path.join(self.source_folder, self._source_subfolder, "xml", f"libstudxml-vc{vc_ver}.vcxproj")
if not self.options.shared:
replace_in_file(self, proj_path, "DynamicLibrary", "StaticLibrary")
replace_in_file(self, proj_path, "LIBSTUDXML_DYNAMIC_LIB", "LIBSTUDXML_STATIC_LIB")
msbuild = MSBuild(self)
msbuild.METHOD_NAME(sln_path, platforms={"x86": "Win32"})
def _build_autotools(self):
for gnu_config in [
self.conf.get("user.gnu-config:config_guess", check_type=str),
self.conf.get("user.gnu-config:config_sub", check_type=str),
]:
if gnu_config:
copy(
self,
os.path.basename(gnu_config),
src=os.path.dirname(gnu_config),
dst=os.path.join(self.source_folder, self._source_subfolder, "config"),
)
if self.settings.compiler.get_safe("libcxx") == "libc++":
# libc++ includes a file called 'version', and since libstudxml adds source_subfolder as an
# include dir, libc++ ends up including their 'version' file instead, causing a compile error
rm(self, "version", os.path.join(self.source_folder, self._source_subfolder))
with chdir(self, os.path.join(self.source_folder, self._source_subfolder)):
self.run("{} -fiv".format(tools.get_env("AUTORECONF")), win_bash=tools.os_info.is_windows)
autotools = self._configure_autotools()
autotools.make()
def METHOD_NAME(self):
apply_conandata_patches(self)
if is_msvc(self):
self._build_vs()
else:
self._build_autotools()
def package(self):
copy(self, "LICENSE", src=os.path.join(self.source_folder, self._source_subfolder), dst=os.path.join(self.package_folder, "licenses"))
if is_msvc(self):
self.copy("xml/value-traits", dst="include", src=self._source_subfolder)
self.copy("xml/serializer", dst="include", src=self._source_subfolder)
self.copy("xml/qname", dst="include", src=self._source_subfolder)
self.copy("xml/parser", dst="include", src=self._source_subfolder)
self.copy("xml/forward", dst="include", src=self._source_subfolder)
self.copy("xml/exception", dst="include", src=self._source_subfolder)
self.copy("xml/content", dst="include", src=self._source_subfolder)
self.copy("xml/*.ixx", dst="include", src=self._source_subfolder)
self.copy("xml/*.txx", dst="include", src=self._source_subfolder)
self.copy("xml/*.hxx", dst="include", src=self._source_subfolder)
self.copy("xml/*.h", dst="include", src=self._source_subfolder)
suffix = ""
if self.settings.arch == "x86_64":
suffix = "64"
if self.options.shared:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "lib" + suffix))
self.copy("*.dll", dst="bin", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
autotools = self._configure_autotools()
autotools.install()
rm(self, "*.la", os.path.join(self.package_folder, "lib"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rmdir(self, os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.set_property("pkg_config_name", "libstudxml")
self.cpp_info.libs = collect_libs(self)
# If built with makefile, static library mechanism is provided by their buildsystem already
if is_msvc(self) and not self.options.shared:
self.cpp_info.defines = ["LIBSTUDXML_STATIC_LIB=1"] |
main | # Developed for module tiericide, this script will quickly print out a market
# conversion map based on patch notes, as well as database conversion mapping.
import argparse
import os.path
import sqlite3
import sys
# Add eos root path to sys.path so we can import ourselves
path = os.path.dirname(__file__)
sys.path.append(os.path.realpath(os.path.join(path, "..")))
# change to correct conversion
rename_phrase = " renamed to "
conversion_phrase = " -> "
text = """
Veldspar Mining Crystal I -> Simple Asteroid Mining Crystal Type A I
Scordite Mining Crystal I -> Simple Asteroid Mining Crystal Type A I
Pyroxeres Mining Crystal I -> Simple Asteroid Mining Crystal Type A I
Plagioclase Mining Crystal I -> Simple Asteroid Mining Crystal Type A I
Veldspar Mining Crystal II -> Simple Asteroid Mining Crystal Type A II
Scordite Mining Crystal II -> Simple Asteroid Mining Crystal Type A II
Pyroxeres Mining Crystal II -> Simple Asteroid Mining Crystal Type A II
Plagioclase Mining Crystal II -> Simple Asteroid Mining Crystal Type A II
Omber Mining Crystal I -> Coherent Asteroid Mining Crystal Type A I
Kernite Mining Crystal I -> Coherent Asteroid Mining Crystal Type A I
Jaspet Mining Crystal I -> Coherent Asteroid Mining Crystal Type A I
Hemorphite Mining Crystal I -> Coherent Asteroid Mining Crystal Type A I
Hedbergite Mining Crystal I -> Coherent Asteroid Mining Crystal Type A I
Omber Mining Crystal II -> Coherent Asteroid Mining Crystal Type A II
Jaspet Mining Crystal II -> Coherent Asteroid Mining Crystal Type A II
Kernite Mining Crystal II -> Coherent Asteroid Mining Crystal Type A II
Hedbergite Mining Crystal II -> Coherent Asteroid Mining Crystal Type A II
Hemorphite Mining Crystal II -> Coherent Asteroid Mining Crystal Type A II
Gneiss Mining Crystal I -> Variegated Asteroid Mining Crystal Type A I
Dark Ochre Mining Crystal I -> Variegated Asteroid Mining Crystal Type A I
Crokite Mining Crystal I -> Variegated Asteroid Mining Crystal Type A I
Gneiss Mining Crystal II -> Variegated Asteroid Mining Crystal Type A II
Dark Ochre Mining Crystal II -> Variegated Asteroid Mining Crystal Type A II
Crokite Mining Crystal II -> Variegated Asteroid Mining Crystal Type A II
Bistot Mining Crystal I -> Complex Asteroid Mining Crystal Type A I
Arkonor Mining Crystal I -> Complex Asteroid Mining Crystal Type A I
Spodumain Mining Crystal I -> Complex Asteroid Mining Crystal Type A I
Bistot Mining Crystal II -> Complex Asteroid Mining Crystal Type A II
Arkonor Mining Crystal II -> Complex Asteroid Mining Crystal Type A II
Spodumain Mining Crystal II -> Complex Asteroid Mining Crystal Type A II
"""
def METHOD_NAME(old, new):
# Open both databases and get their cursors
old_db = sqlite3.connect(os.path.expanduser(old))
old_cursor = old_db.cursor()
new_db = sqlite3.connect(os.path.expanduser(new))
new_cursor = new_db.cursor()
renames = {}
conversions = {}
for x in text.splitlines():
x = x.strip()
if not x:
continue
if conversion_phrase in x:
c = x.split(conversion_phrase)
container = conversions
elif rename_phrase in x:
c = x.split(rename_phrase)
container = renames
else:
print("Unknown format: {}".format(x))
sys.exit()
old_name, new_name = c[0], c[1]
old_item, new_item = None, None
if "Blueprint" in old_name or "Blueprint" in new_name:
print("Blueprint: Skipping this line: %s"%x)
continue
# gather item info
new_cursor.execute('SELECT "typeID" FROM "invtypes" WHERE "typeName" = ?', (new_name,))
for row in new_cursor:
new_item = row[0]
break
old_cursor.execute('SELECT "typeID" FROM "invtypes" WHERE "typeName" = ?', (old_name,))
for row in old_cursor:
old_item = row[0]
break
if not old_item:
print("Error finding old item in {} -> {}".format(old_name, new_name))
if not new_item:
print("Error finding new item in {} -> {}".format(old_name, new_name))
if not container.get((new_item,new_name), None):
container[(new_item,new_name)] = []
container[(new_item,new_name)].append((old_item, old_name))
print(" # Renamed items")
for new, old in renames.items():
if len(old) != 1:
print("Incorrect length, key: {}, value: {}".format(new, old))
sys.exit()
old = old[0]
print(" \"{}\": \"{}\",".format(old[1], new[1]))
# Convert modules
print("\n # Converted items")
for new, olds in conversions.items():
for old in olds:
print(" \"{}\": \"{}\",".format(old[1], new[1]))
print()
print()
for new, old in conversions.items():
print(" {}: ( # {}".format(new[0], new[1]))
for item in old:
print(" {}, # {}".format(item[0], item[1]))
print(" ),")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--old", type=str)
parser.add_argument("-n", "--new", type=str)
args = parser.parse_args()
METHOD_NAME(args.old, args.new) |
test set api key | import json
from pathlib import Path
from unittest.mock import MagicMock, Mock, patch
from urllib.parse import unquote_plus
import pytest
from tribler.gui.utilities import TranslatedString, compose_magnetlink, create_api_key, dict_item_is_any_of, \
duration_to_string, format_api_key, get_i18n_file_path, get_languages_file_content, I18N_DIR, LANGUAGES_FILE, \
quote_plus_unicode, set_api_key, unicode_quoter
def test_quoter_char():
"""
Test if an ASCII character is quoted correctly
"""
char = 'A'
encoded = unicode_quoter(char)
assert char == unquote_plus(encoded)
def test_quoter_unichar():
"""
Test if a unicode character is quoted correctly
"""
char = '\u9b54'
encoded = unicode_quoter(char)
assert char == unquote_plus(encoded)
def test_quoter_reserved():
"""
Test if a URI reserved character is quoted correctly
"""
char = '+'
encoded = unicode_quoter(char)
assert char != encoded
assert char == unquote_plus(encoded)
def test_quote_plus_unicode_char():
"""
Test if a ASCII characters are quoted correctly
"""
s = 'Ab09'
encoded = quote_plus_unicode(s)
assert s == unquote_plus(encoded)
def test_quote_plus_unicode_unichar():
"""
Test if unicode characters are quoted correctly
"""
s = '\u9b54\u11b3\uaf92\u1111'
encoded = quote_plus_unicode(s)
assert s == unquote_plus(encoded)
def test_quote_plus_unicode_reserved():
"""
Test if a URI reserved characters are quoted correctly
"""
s = '+ &'
encoded = quote_plus_unicode(s)
assert s != encoded
assert s == unquote_plus(encoded)
def test_quote_plus_unicode_compound():
"""
Test if a jumble of unicode, reserved and normal chars are quoted correctly
"""
s = '\u9b54\u11b3+ A5&\uaf92\u1111'
encoded = quote_plus_unicode(s)
assert s != encoded
assert s == unquote_plus(encoded)
def test_compose_magnetlink():
infohash = "DC4B96CF85A85CEEDB8ADC4B96CF85A85CEEDB8A"
name = "Some torrent name"
trackers = ['http://tracker1.example.com:8080/announce', 'http://tracker1.example.com:8080/announce']
expected_link0 = ""
expected_link1 = "magnet:?xt=urn:btih:DC4B96CF85A85CEEDB8ADC4B96CF85A85CEEDB8A"
expected_link2 = "magnet:?xt=urn:btih:DC4B96CF85A85CEEDB8ADC4B96CF85A85CEEDB8A&dn=Some+torrent+name"
expected_link3 = (
"magnet:?xt=urn:btih:DC4B96CF85A85CEEDB8ADC4B96CF85A85CEEDB8A&dn=Some+torrent+name"
"&tr=http://tracker1.example.com:8080/announce&tr=http://tracker1.example.com:8080/announce"
)
composed_link0 = compose_magnetlink(None)
composed_link1 = compose_magnetlink(infohash)
composed_link2 = compose_magnetlink(infohash, name=name)
composed_link3 = compose_magnetlink(infohash, name=name, trackers=trackers)
assert composed_link0 == expected_link0
assert composed_link1 == expected_link1
assert composed_link2 == expected_link2
assert composed_link3 == expected_link3
def test_is_dict_has():
assert not dict_item_is_any_of(None, None, None)
assert not dict_item_is_any_of({}, None, None)
d = {
'k': 'v',
'k1': 'v1'
}
assert not dict_item_is_any_of(d, 'missed_key', None)
assert not dict_item_is_any_of(d, 'missed_key', ['any_value'])
assert not dict_item_is_any_of(d, 'k', ['missed_value'])
assert not dict_item_is_any_of(d, 'k', ['missed_value', 'missed_value1'])
assert dict_item_is_any_of(d, 'k', ['v'])
assert dict_item_is_any_of(d, 'k', ['v', 'a'])
assert dict_item_is_any_of(d, 'k', ['a', 'v'])
def test_create_api_key():
x = create_api_key()
assert len(x) == 32 and bytes.fromhex(x).hex() == x
def test_format_api_key():
api_key = "abcdef"
x = format_api_key(api_key)
assert x == "abcdef"
api_key = b"abcdef"
x = format_api_key(api_key)
assert x == "abcdef"
api_key = 123
match_str = r"^Got unexpected value type of api_key from gui settings \(should be str or bytes\): int$"
with pytest.raises(ValueError, match=match_str):
format_api_key(api_key)
def METHOD_NAME():
gui_settings = MagicMock()
set_api_key(gui_settings, "abcdef")
gui_settings.setValue.assert_called_once_with("api_key", b"abcdef")
TRANSLATIONS = [
(0, '0s'),
(61, '1m 1s'),
(3800, '1h 3m'),
(110000, '1d 6h'),
(1110000, '1w 5d'),
(91110000, '2y 46w'),
(11191110000, 'Forever'),
]
@pytest.mark.parametrize('seconds, translation', TRANSLATIONS)
@patch('tribler.gui.utilities.tr', new=Mock(side_effect=lambda x: x))
def test_duration_to_string(seconds, translation):
# test if the duration_to_string function returns the correct translation for all possible formats
assert duration_to_string(seconds) == translation
def test_correct_translation():
original_string = 'original %(key1)s'
translated_string = 'translated %(key1)s'
s = TranslatedString(translated_string, original_string)
assert s % {'key1': '123'} == 'translated 123'
@patch('tribler.gui.utilities.logger.warning')
def test_missed_key_in_translated_string(warning: Mock):
original_string = 'original %(key1)s'
translated_string = 'translated %(key2)s'
s = TranslatedString(translated_string, original_string)
# In this test, we pass the correct param 'key1' presented in the original string but missed in the translation.
# The KeyError is intercepted, the original string is used instead of the translation, and the error is logged
# as a warning.
assert s % {'key1': '123'} == 'original 123'
warning.assert_called_once_with('KeyError: No value provided for \'key2\' in translation "translated %(key2)s", '
'original string: "original %(key1)s"')
@patch('tribler.gui.utilities.logger.warning')
def test_missed_key_in_both_translated_and_original_strings(warning: Mock):
original_string = 'original %(key1)s'
translated_string = 'translated %(key2)s'
s = TranslatedString(translated_string, original_string)
with pytest.raises(KeyError, match=r"^'key1'$"):
# In this test, we pass an incorrect param 'key3' for interpolation, and also, the translation
# string (with param 'key2') differs from the original string (with param 'key1'). First,
# translated string tries to interpolate params and issues a warning that 'key2' is missed.
# Then, the original string tries to interpolate params and again gets a KeyError because 'key1'
# is also missed. This second exception is propagated because the main reason for the error is
# in the outside code that passes an incorrect parameter.
_ = s % {'key3': '123'}
warning.assert_called_once_with('KeyError: No value provided for \'key2\' in translation "translated %(key2)s", '
'original string: "original %(key1)s"')
@patch("tribler.gui.utilities.get_base_path")
def test_i18n_file_path_and_languages_content(mock_get_base_path, tmp_path):
mock_get_base_path.return_value = tmp_path
filename = "languages.json"
expected_path = Path(tmp_path) / I18N_DIR / filename
assert get_i18n_file_path(filename) == expected_path
languages_json = {
"unknown": "Unknown",
"en": "English",
"nl": "Dutch"
}
language_path = get_i18n_file_path(LANGUAGES_FILE)
language_path.parents[0].mkdir(parents=True, exist_ok=True)
language_path.write_text(json.dumps(languages_json))
assert languages_json == get_languages_file_content() |
test writing and reading registers | """Tests for Alazar DLL API
This suite of tests is expected to be executed on a Windows PC with a single
Alazar board installed.
"""
import gc
import logging
import os
from weakref import WeakValueDictionary
import pytest
from pytest import LogCaptureFixture
from qcodes.instrument_drivers.AlazarTech.ATS import AlazarTech_ATS
from qcodes.instrument_drivers.AlazarTech.ats_api import AlazarATSAPI
from qcodes.instrument_drivers.AlazarTech.constants import (
API_SUCCESS,
ERROR_CODES,
Capability,
)
from qcodes.instrument_drivers.AlazarTech.dll_wrapper import DllWrapperMeta
def _skip_if_alazar_dll_and_boards_not_installed():
if not os.path.exists(AlazarTech_ATS.dll_path + '.dll'):
return pytest.mark.skip(
"Alazar API DLL was not found in 'AlazarTech_ATS.dll_path'.")
return pytest.mark.skipif(
len(AlazarTech_ATS.find_boards()) != 1,
reason='No, or more than one Alazar boards are installed on this PC.')
pytestmark = _skip_if_alazar_dll_and_boards_not_installed()
# Set the following constants to correct values, they are used in tests below.
SYSTEM_ID = 1
BOARD_ID = 1
@pytest.fixture
def alazar():
alazar = AlazarTech_ATS('alazar', system_id=SYSTEM_ID, board_id=BOARD_ID)
yield alazar
alazar.close()
@pytest.fixture
def alazar_api():
yield AlazarATSAPI(AlazarTech_ATS.dll_path)
def test_alazar_api_singleton_behavior(caplog: LogCaptureFixture) -> None:
def using_msg(dll_path):
return f"Using existing instance for DLL path {dll_path}."
def creating_msg(dll_path):
return f"Creating new instance for DLL path {dll_path}."
assert DllWrapperMeta._instances == WeakValueDictionary()
with caplog.at_level(logging.DEBUG):
api1 = AlazarATSAPI(AlazarTech_ATS.dll_path)
assert DllWrapperMeta._instances == WeakValueDictionary(
{AlazarTech_ATS.dll_path: api1}
)
assert caplog.records[-1].message == creating_msg(AlazarTech_ATS.dll_path)
caplog.clear()
with caplog.at_level(logging.DEBUG):
api2 = AlazarATSAPI(AlazarTech_ATS.dll_path)
assert api2 is api1
assert DllWrapperMeta._instances == WeakValueDictionary(
{AlazarTech_ATS.dll_path: api1}
)
assert caplog.records[-1].message == using_msg(AlazarTech_ATS.dll_path)
caplog.clear()
# Indeed, this actually exposes a vulnarability of the setup. As far as
# LoadLibrary from ctypes is concerned, both "..\AlazarApi" and
# "..\AlazarApi.dll" would result in the same loaded library with even
# the same `_handle` value. But here we will abuse this in order to create
# a new instance of the Alazar API class by using the same DLL file.
# This should probably be fixed.
dll_path_3 = AlazarTech_ATS.dll_path + '.dll'
with caplog.at_level(logging.DEBUG):
api3 = AlazarATSAPI(dll_path_3)
assert api3 is not api1
assert api3 is not api2
assert DllWrapperMeta._instances == WeakValueDictionary(
{AlazarTech_ATS.dll_path: api1, dll_path_3: api3}
)
assert caplog.records[-1].message == creating_msg(dll_path_3)
caplog.clear()
del api2
gc.collect()
assert DllWrapperMeta._instances == WeakValueDictionary(
{AlazarTech_ATS.dll_path: api1, dll_path_3: api3}
)
del api1
gc.collect()
assert DllWrapperMeta._instances == WeakValueDictionary({dll_path_3: api3})
del api3
gc.collect()
assert DllWrapperMeta._instances == WeakValueDictionary()
def test_find_boards() -> None:
boards = AlazarTech_ATS.find_boards()
assert len(boards) == 1
assert boards[0]['system_id'] == SYSTEM_ID
assert boards[0]['board_id'] == BOARD_ID
def test_get_board_info(alazar_api) -> None:
info = AlazarTech_ATS.get_board_info(api=alazar_api,
system_id=SYSTEM_ID,
board_id=BOARD_ID)
assert {'system_id', 'board_id', 'board_kind',
'max_samples', 'bits_per_sample'} == set(list(info.keys()))
assert info['system_id'] == SYSTEM_ID
assert info['board_id'] == BOARD_ID
def test_idn(alazar) -> None:
idn = alazar.get_idn()
assert {'firmware', 'model', 'serial', 'vendor', 'CPLD_version',
'driver_version', 'SDK_version', 'latest_cal_date', 'memory_size',
'asopc_type', 'pcie_link_speed', 'pcie_link_width',
'bits_per_sample', 'max_samples'
} == set(list(idn.keys()))
assert idn['vendor'] == 'AlazarTech'
assert idn['model'][:3] == 'ATS'
def test_return_codes_are_correct(alazar_api) -> None:
"""
Test correctness of the coded return codes (success, failure, unknowns),
and consistency with what `AlazarErrorToText` function returns.
"""
for code, msg in ERROR_CODES.items():
real_msg = alazar_api.error_to_text(code)
assert real_msg in msg
assert alazar_api.error_to_text(API_SUCCESS) == 'ApiSuccess'
lower_unknown = API_SUCCESS - 1
assert alazar_api.error_to_text(lower_unknown) == 'Unknown'
upper_unknown = max(list(ERROR_CODES.keys())) + 1
assert alazar_api.error_to_text(upper_unknown) == 'Unknown'
def test_get_channel_info_convenient(alazar) -> None:
bps, max_s = alazar.api.get_channel_info_(alazar._handle)
assert isinstance(bps, int)
assert isinstance(max_s, int)
def test_get_cpld_version_convenient(alazar) -> None:
cpld_ver = alazar.api.get_cpld_version_(alazar._handle)
assert isinstance(cpld_ver, str)
assert len(cpld_ver.split('.')) == 2
def test_get_driver_version_convenient(alazar_api) -> None:
driver_ver = alazar_api.get_driver_version_()
assert isinstance(driver_ver, str)
assert len(driver_ver.split('.')) == 3
def test_get_sdk_version_convenient(alazar_api) -> None:
sdk_ver = alazar_api.get_sdk_version_()
assert isinstance(sdk_ver, str)
assert len(sdk_ver.split('.')) == 3
def test_query_capability_convenient(alazar) -> None:
cap = Capability.GET_SERIAL_NUMBER
cap_value = alazar.api.query_capability_(alazar._handle, cap)
assert isinstance(cap_value, int)
def METHOD_NAME(alazar) -> None:
"""
The approach is to read the register that includes information about
trigger holdoff parameter, and write the same value back to the board.
"""
trigger_holdoff_register_offset = 58
orig_val = alazar._read_register(trigger_holdoff_register_offset)
alazar._write_register(trigger_holdoff_register_offset, orig_val)
def test_get_num_channels() -> None:
assert 1 == AlazarTech_ATS.get_num_channels(1)
assert 1 == AlazarTech_ATS.get_num_channels(8)
assert 2 == AlazarTech_ATS.get_num_channels(3)
assert 2 == AlazarTech_ATS.get_num_channels(10)
assert 4 == AlazarTech_ATS.get_num_channels(15)
assert 8 == AlazarTech_ATS.get_num_channels(255)
assert 16 == AlazarTech_ATS.get_num_channels(65535)
with pytest.raises(RuntimeError, match='0'):
AlazarTech_ATS.get_num_channels(0)
with pytest.raises(RuntimeError, match='17'):
AlazarTech_ATS.get_num_channels(17)
with pytest.raises(RuntimeError, match='100'):
AlazarTech_ATS.get_num_channels(100) |
test empty play | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible.errors import AnsibleAssertionError, AnsibleParserError
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.playbook.block import Block
from ansible.playbook.play import Play
from ansible.playbook.role import Role
from ansible.playbook.task import Task
from units.mock.loader import DictDataLoader
def METHOD_NAME():
p = Play.load({})
assert str(p) == ''
def test_play_with_hosts_string():
p = Play.load({'hosts': 'foo'})
assert str(p) == 'foo'
# Test the caching since self.name should be set by previous call.
assert p.get_name() == 'foo'
def test_basic_play():
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
connection='local',
remote_user="root",
become=True,
become_user="testing",
))
assert p.name == 'test play'
assert p.hosts == ['foo']
assert p.connection == 'local'
def test_play_with_remote_user():
p = Play.load(dict(
name="test play",
hosts=['foo'],
user="testing",
gather_facts=False,
))
assert p.remote_user == "testing"
def test_play_with_user_conflict():
play_data = dict(
name="test play",
hosts=['foo'],
user="testing",
remote_user="testing",
)
with pytest.raises(AnsibleParserError):
Play.load(play_data)
def test_play_with_bad_ds_type():
play_data = []
with pytest.raises(AnsibleAssertionError, match=r"while preprocessing data \(\[\]\), ds should be a dict but was a <(?:class|type) 'list'>"):
Play.load(play_data)
def test_play_with_tasks():
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[dict(action='shell echo "hello world"')],
))
assert len(p.tasks) == 1
assert isinstance(p.tasks[0], Block)
assert p.tasks[0].has_tasks() is True
def test_play_with_handlers():
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
handlers=[dict(action='shell echo "hello world"')],
))
assert len(p.handlers) >= 1
assert len(p.get_handlers()) >= 1
assert isinstance(p.handlers[0], Block)
assert p.handlers[0].has_tasks() is True
def test_play_with_pre_tasks():
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
pre_tasks=[dict(action='shell echo "hello world"')],
))
assert len(p.pre_tasks) >= 1
assert isinstance(p.pre_tasks[0], Block)
assert p.pre_tasks[0].has_tasks() is True
assert len(p.get_tasks()) >= 1
assert isinstance(p.get_tasks()[0][0], Task)
assert p.get_tasks()[0][0].action == 'shell'
def test_play_with_post_tasks():
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
post_tasks=[dict(action='shell echo "hello world"')],
))
assert len(p.post_tasks) >= 1
assert isinstance(p.post_tasks[0], Block)
assert p.post_tasks[0].has_tasks() is True
def test_play_with_roles(mocker):
mocker.patch('ansible.playbook.role.definition.RoleDefinition._load_role_path', return_value=('foo', '/etc/ansible/roles/foo'))
fake_loader = DictDataLoader({
'/etc/ansible/roles/foo/tasks.yml': """
- name: role task
shell: echo "hello world"
""",
})
mock_var_manager = mocker.MagicMock()
mock_var_manager.get_vars.return_value = {}
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
roles=['foo'],
), loader=fake_loader, variable_manager=mock_var_manager)
blocks = p.compile()
assert len(blocks) > 1
assert all(isinstance(block, Block) for block in blocks)
assert isinstance(p.get_roles()[0], Role)
def test_play_compile():
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[dict(action='shell echo "hello world"')],
))
blocks = p.compile()
# with a single block, there will still be three
# implicit meta flush_handler blocks inserted
assert len(blocks) == 4
@pytest.mark.parametrize(
'value, expected',
(
('my_vars.yml', ['my_vars.yml']),
(['my_vars.yml'], ['my_vars.yml']),
(['my_vars1.yml', 'my_vars2.yml'], ['my_vars1.yml', 'my_vars2.yml']),
(None, []),
)
)
def test_play_with_vars_files(value, expected):
play = Play.load({
'name': 'Play with vars_files',
'hosts': ['testhost1'],
'vars_files': value,
})
assert play.vars_files == value
assert play.get_vars_files() == expected
@pytest.mark.parametrize('value', ([], tuple(), set(), {}, '', None, False, 0))
def test_play_empty_hosts(value):
with pytest.raises(AnsibleParserError, match='Hosts list cannot be empty'):
Play.load({'hosts': value})
@pytest.mark.parametrize('value', ([None], (None,), ['one', None]))
def test_play_none_hosts(value):
with pytest.raises(AnsibleParserError, match="Hosts list cannot contain values of 'None'"):
Play.load({'hosts': value})
@pytest.mark.parametrize(
'value',
(
{'one': None},
{'one': 'two'},
True,
1,
1.75,
AnsibleVaultEncryptedUnicode('secret'),
)
)
def test_play_invalid_hosts_sequence(value):
with pytest.raises(AnsibleParserError, match='Hosts list must be a sequence or string'):
Play.load({'hosts': value})
@pytest.mark.parametrize(
'value',
(
[[1, 'two']],
[{'one': None}],
[set((None, 'one'))],
['one', 'two', {'three': None}],
['one', 'two', {'three': 'four'}],
[AnsibleVaultEncryptedUnicode('secret')],
)
)
def test_play_invalid_hosts_value(value):
with pytest.raises(AnsibleParserError, match='Hosts list contains an invalid host value'):
Play.load({'hosts': value})
def test_play_with_vars():
play = Play.load({}, vars={'var1': 'val1'})
assert play.get_name() == ''
assert play.vars == {'var1': 'val1'}
assert play.get_vars() == {'var1': 'val1'}
def test_play_no_name_hosts_sequence():
play = Play.load({'hosts': ['host1', 'host2']})
assert play.get_name() == 'host1,host2'
def test_play_hosts_template_expression():
play = Play.load({'hosts': "{{ target_hosts }}"})
assert play.get_name() == '{{ target_hosts }}'
@pytest.mark.parametrize(
'call',
(
'_load_tasks',
'_load_pre_tasks',
'_load_post_tasks',
'_load_handlers',
'_load_roles',
)
)
def test_bad_blocks_roles(mocker, call):
mocker.patch('ansible.playbook.play.load_list_of_blocks', side_effect=AssertionError('Raised intentionally'))
mocker.patch('ansible.playbook.play.load_list_of_roles', side_effect=AssertionError('Raised intentionally'))
play = Play.load({})
with pytest.raises(AnsibleParserError, match='A malformed (block|(role declaration)) was encountered'):
getattr(play, call)('', None) |
roi align common | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-nested-blocks
"Roi align in python"
import math
import numpy as np
def _bilinear(a_np, n, c, y, x, height, width, layout):
if y < -1 or y > height or x < -1 or x > width:
return 0
y = min(max(y, 0), height - 1)
x = min(max(x, 0), width - 1)
y_low = int(math.floor(y))
x_low = int(math.floor(x))
y_high = y_low + 1
x_high = x_low + 1
wy_h = y - y_low
wx_h = x - x_low
wy_l = 1 - wy_h
wx_l = 1 - wx_h
val = 0
for wx, xp in zip((wx_l, wx_h), (x_low, x_high)):
for wy, yp in zip((wy_l, wy_h), (y_low, y_high)):
if 0 <= yp < height and 0 <= xp < width:
if layout == "NCHW":
val += wx * wy * a_np[n, c, yp, xp]
else:
val += wx * wy * a_np[n, yp, xp, c]
return val
def METHOD_NAME(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
layout,
):
"""Common code used by roi align NCHW and NHWC"""
num_roi = rois_np.shape[0]
for i in range(num_roi):
roi = rois_np[i]
batch_index = int(roi[0])
roi_start_w, roi_start_h, roi_end_w, roi_end_h = roi[1:] * spatial_scale
roi_h = max(roi_end_h - roi_start_h, 1.0)
roi_w = max(roi_end_w - roi_start_w, 1.0)
bin_h = roi_h / pooled_size_h
bin_w = roi_w / pooled_size_w
if sample_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = int(sample_ratio)
else:
roi_bin_grid_h = int(math.ceil(roi_h / pooled_size_h))
roi_bin_grid_w = int(math.ceil(roi_w / pooled_size_w))
count = roi_bin_grid_h * roi_bin_grid_w
for c in range(channel):
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
if avg_mode:
total = 0.0
if max_mode:
total = float("-inf")
for iy in range(roi_bin_grid_h):
for ix in range(roi_bin_grid_w):
y = roi_start_h + ph * bin_h + (iy + 0.5) * bin_h / roi_bin_grid_h
x = roi_start_w + pw * bin_w + (ix + 0.5) * bin_w / roi_bin_grid_w
if avg_mode:
total += (
_bilinear(a_np, batch_index, c, y, x, height, width, layout)
/ count
)
if max_mode:
total = max(
total,
_bilinear(a_np, batch_index, c, y, x, height, width, layout),
)
if layout == "NCHW":
b_np[i, c, ph, pw] = total
else:
b_np[i, ph, pw, c] = total
return b_np
def roi_align_nchw_python(a_np, rois_np, pooled_size, spatial_scale, sample_ratio, mode=b"avg"):
"""Roi align NCHW in python"""
avg_mode = mode in (b"avg", "avg", 0)
max_mode = mode in (b"max", "max", 1)
assert avg_mode or max_mode, "Mode must be average or max. Please pass a valid mode."
_, channel, height, width = a_np.shape
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
b_np = np.zeros((rois_np.shape[0], channel, pooled_size_h, pooled_size_w), dtype=a_np.dtype)
return METHOD_NAME(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
"NCHW",
)
def roi_align_nhwc_python(a_np, rois_np, pooled_size, spatial_scale, sample_ratio, mode=b"avg"):
"""Roi align NHWC in python"""
avg_mode = mode in (b"avg", "avg", 0)
max_mode = mode in (b"max", "max", 1)
assert avg_mode or max_mode, "Mode must be average or max. Please pass a valid mode."
_, height, width, channel = a_np.shape
num_roi = rois_np.shape[0]
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
b_np = np.zeros((num_roi, pooled_size_h, pooled_size_w, channel), dtype=a_np.dtype)
return METHOD_NAME(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
"NHWC",
) |
test main | """Test suite for the cProfile module."""
import sys
from test.test_support import run_unittest, TESTFN, unlink
# rip off all interesting stuff from test_profile
import cProfile
from test.test_profile import ProfileTest, regenerate_expected_output
class CProfileTest(ProfileTest):
profilerclass = cProfile.Profile
expected_list_sort_output = "{method 'sort' of 'list' objects}"
# Issue 3895.
def test_bad_counter_during_dealloc(self):
import _lsprof
# Must use a file as StringIO doesn't trigger the bug.
sys.stderr = open(TESTFN, 'w')
try:
obj = _lsprof.Profiler(lambda: int)
obj.enable()
obj = _lsprof.Profiler(1)
obj.disable()
finally:
sys.stderr = sys.__stderr__
unlink(TESTFN)
def METHOD_NAME():
run_unittest(CProfileTest)
def main():
if '-r' not in sys.argv:
METHOD_NAME()
else:
regenerate_expected_output(__file__, CProfileTest)
# Don't remove this comment. Everything below it is auto-generated.
#--cut--------------------------------------------------------------------------
CProfileTest.expected_output['print_stats'] = """\
126 function calls (106 primitive calls) in 1.000 seconds
Ordered by: standard name
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 1.000 1.000 <string>:1(<module>)
28 0.028 0.001 0.028 0.001 profilee.py:110(__getattr__)
1 0.270 0.270 1.000 1.000 profilee.py:25(testfunc)
23/3 0.150 0.007 0.170 0.057 profilee.py:35(factorial)
20 0.020 0.001 0.020 0.001 profilee.py:48(mul)
2 0.040 0.020 0.600 0.300 profilee.py:55(helper)
4 0.116 0.029 0.120 0.030 profilee.py:73(helper1)
2 0.000 0.000 0.140 0.070 profilee.py:84(helper2_indirect)
8 0.312 0.039 0.400 0.050 profilee.py:88(helper2)
8 0.064 0.008 0.080 0.010 profilee.py:98(subhelper)
12 0.000 0.000 0.012 0.001 {hasattr}
4 0.000 0.000 0.000 0.000 {method 'append' of 'list' objects}
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
8 0.000 0.000 0.000 0.000 {range}
4 0.000 0.000 0.000 0.000 {sys.exc_info}
"""
CProfileTest.expected_output['print_callers'] = """\
Ordered by: standard name
Function was called by...
ncalls tottime cumtime
<string>:1(<module>) <-
profilee.py:110(__getattr__) <- 16 0.016 0.016 profilee.py:98(subhelper)
12 0.012 0.012 {hasattr}
profilee.py:25(testfunc) <- 1 0.270 1.000 <string>:1(<module>)
profilee.py:35(factorial) <- 1 0.014 0.130 profilee.py:25(testfunc)
20/3 0.130 0.147 profilee.py:35(factorial)
2 0.006 0.040 profilee.py:84(helper2_indirect)
profilee.py:48(mul) <- 20 0.020 0.020 profilee.py:35(factorial)
profilee.py:55(helper) <- 2 0.040 0.600 profilee.py:25(testfunc)
profilee.py:73(helper1) <- 4 0.116 0.120 profilee.py:55(helper)
profilee.py:84(helper2_indirect) <- 2 0.000 0.140 profilee.py:55(helper)
profilee.py:88(helper2) <- 6 0.234 0.300 profilee.py:55(helper)
2 0.078 0.100 profilee.py:84(helper2_indirect)
profilee.py:98(subhelper) <- 8 0.064 0.080 profilee.py:88(helper2)
{hasattr} <- 4 0.000 0.004 profilee.py:73(helper1)
8 0.000 0.008 profilee.py:88(helper2)
{method 'append' of 'list' objects} <- 4 0.000 0.000 profilee.py:73(helper1)
{method 'disable' of '_lsprof.Profiler' objects} <-
{range} <- 8 0.000 0.000 profilee.py:98(subhelper)
{sys.exc_info} <- 4 0.000 0.000 profilee.py:73(helper1)
"""
CProfileTest.expected_output['print_callees'] = """\
Ordered by: standard name
Function called...
ncalls tottime cumtime
<string>:1(<module>) -> 1 0.270 1.000 profilee.py:25(testfunc)
profilee.py:110(__getattr__) ->
profilee.py:25(testfunc) -> 1 0.014 0.130 profilee.py:35(factorial)
2 0.040 0.600 profilee.py:55(helper)
profilee.py:35(factorial) -> 20/3 0.130 0.147 profilee.py:35(factorial)
20 0.020 0.020 profilee.py:48(mul)
profilee.py:48(mul) ->
profilee.py:55(helper) -> 4 0.116 0.120 profilee.py:73(helper1)
2 0.000 0.140 profilee.py:84(helper2_indirect)
6 0.234 0.300 profilee.py:88(helper2)
profilee.py:73(helper1) -> 4 0.000 0.004 {hasattr}
4 0.000 0.000 {method 'append' of 'list' objects}
4 0.000 0.000 {sys.exc_info}
profilee.py:84(helper2_indirect) -> 2 0.006 0.040 profilee.py:35(factorial)
2 0.078 0.100 profilee.py:88(helper2)
profilee.py:88(helper2) -> 8 0.064 0.080 profilee.py:98(subhelper)
8 0.000 0.008 {hasattr}
profilee.py:98(subhelper) -> 16 0.016 0.016 profilee.py:110(__getattr__)
8 0.000 0.000 {range}
{hasattr} -> 12 0.012 0.012 profilee.py:110(__getattr__)
{method 'append' of 'list' objects} ->
{method 'disable' of '_lsprof.Profiler' objects} ->
{range} ->
{sys.exc_info} ->
"""
if __name__ == "__main__":
main() |
get statistics | # -*- coding: utf-8 -*-
#
# http://www.privacyidea.org
#
# 2018-08-01 Cornelius Kölbel, <[email protected]>
# Initial writeup
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This endpoint is used fetch monitoring/statistics data
The code of this module is tested in tests/test_api_monitoring.py
"""
from flask import (Blueprint, request)
from privacyidea.api.lib.utils import getParam, send_result
from privacyidea.api.lib.prepolicy import prepolicy, check_base_action
from privacyidea.lib.utils import parse_legacy_time
from privacyidea.lib.log import log_with
from privacyidea.lib.monitoringstats import (get_stats_keys, get_values,
get_last_value, delete_stats)
from privacyidea.lib.tokenclass import AUTH_DATE_FORMAT
from flask import g
import logging
from privacyidea.lib.policy import ACTION
log = logging.getLogger(__name__)
monitoring_blueprint = Blueprint('monitoring_blueprint', __name__)
@monitoring_blueprint.route('/', methods=['GET'])
@monitoring_blueprint.route('/<stats_key>', methods=['GET'])
@log_with(log)
@prepolicy(check_base_action, request, ACTION.STATISTICSREAD)
def METHOD_NAME(stats_key=None):
"""
return a list of all available statistics keys in the database if no *stats_key*
is specified.
If a stats_key is specified it returns the data of this key.
The parameters "start" and "end" can be used to specify a time window,
from which the statistics data should be fetched.
"""
if stats_key is None:
stats_keys = get_stats_keys()
g.audit_object.log({"success": True})
return send_result(stats_keys)
else:
param = request.all_data
start = getParam(param, "start")
if start:
start = parse_legacy_time(start, return_date=True)
end = getParam(param, "end")
if end:
end = parse_legacy_time(end, return_date=True)
values = get_values(stats_key=stats_key, start_timestamp=start, end_timestamp=end)
# convert timestamps to strings
values_w_string = [(s[0].strftime(AUTH_DATE_FORMAT), s[1]) for s in values]
g.audit_object.log({"success": True})
return send_result(values_w_string)
@monitoring_blueprint.route('/<stats_key>', methods=['DELETE'])
@log_with(log)
@prepolicy(check_base_action, request, ACTION.STATISTICSDELETE)
def delete_statistics(stats_key):
"""
Delete the statistics data of a certain stats_key.
You can specify the start date and the end date when to delete the
monitoring data.
You should specify the dates including the timezone. Otherwise your client
could send its local time and the server would interpret it as its own local
time which would result in deleting unexpected entries.
You can specify the dates like 2010-12-31 22:00+0200
"""
param = request.all_data
start = getParam(param, "start")
if start:
start = parse_legacy_time(start, return_date=True)
end = getParam(param, "end")
if end:
end = parse_legacy_time(end, return_date=True)
r = delete_stats(stats_key, start, end)
g.audit_object.log({"success": True})
return send_result(r)
@monitoring_blueprint.route('/<stats_key>/last', methods=['GET'])
@log_with(log)
@prepolicy(check_base_action, request, ACTION.STATISTICSREAD)
def get_statistics_last(stats_key):
"""
Get the last value of the stats key
"""
last_value = get_last_value(stats_key)
g.audit_object.log({"success": True})
return send_result(last_value)
|
on attachment revision post save | import os
from django.conf import settings as django_settings
from django.db import models
from django.db.models import signals
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
from wiki import managers
from wiki.decorators import disable_signal_for_loaddata
from wiki.models.article import BaseRevisionMixin
from wiki.models.pluginbase import ReusablePlugin
from . import settings
class IllegalFileExtension(Exception):
"""File extension on upload is not allowed"""
pass
class Attachment(ReusablePlugin):
objects = managers.ArticleFkManager()
current_revision = models.OneToOneField(
"AttachmentRevision",
verbose_name=_("current revision"),
blank=True,
null=True,
related_name="current_set",
on_delete=models.CASCADE,
help_text=_(
"The revision of this attachment currently in use (on all articles using the attachment)"
),
)
original_filename = models.CharField(
max_length=256, verbose_name=_("original filename"), blank=True, null=True
)
def can_write(self, user):
if not settings.ANONYMOUS and (not user or user.is_anonymous):
return False
return ReusablePlugin.can_write(self, user)
def can_delete(self, user):
return self.can_write(user)
class Meta:
verbose_name = _("attachment")
verbose_name_plural = _("attachments")
# Matches label of upcoming 0.1 release
db_table = "wiki_attachments_attachment"
def __str__(self):
from wiki.models import Article
try:
return "%s: %s" % (
self.article.current_revision.title,
self.original_filename,
)
except Article.DoesNotExist:
return "Attachment for non-existing article"
def extension_allowed(filename):
try:
extension = filename.split(".")[-1]
except IndexError:
# No extension
raise IllegalFileExtension(
gettext("No file extension found in filename. That's not okay!")
)
if not extension.lower() in map(lambda x: x.lower(), settings.FILE_EXTENSIONS):
raise IllegalFileExtension(
gettext(
"The following filename is illegal: {filename:s}. Extension "
"has to be one of {extensions:s}"
).format(filename=filename, extensions=", ".join(settings.FILE_EXTENSIONS))
)
return extension
def upload_path(instance, filename):
extension = extension_allowed(filename)
# Has to match original extension filename
if instance.id and instance.attachment and instance.attachment.original_filename:
original_extension = instance.attachment.original_filename.split(".")[-1]
if not extension.lower() == original_extension:
raise IllegalFileExtension(
"File extension has to be '%s', not '%s'."
% (original_extension, extension.lower())
)
elif instance.attachment:
instance.attachment.original_filename = filename
upload_path = settings.UPLOAD_PATH
upload_path = upload_path.replace("%aid", str(instance.attachment.article.id))
if settings.UPLOAD_PATH_OBSCURIFY:
import random
import hashlib
m = hashlib.md5(str(random.randint(0, 100000000000000)).encode("ascii"))
upload_path = os.path.join(upload_path, m.hexdigest())
if settings.APPEND_EXTENSION:
filename += ".upload"
return os.path.join(upload_path, filename)
class AttachmentRevision(BaseRevisionMixin, models.Model):
attachment = models.ForeignKey("Attachment", on_delete=models.CASCADE)
file = models.FileField(
upload_to=upload_path, # @ReservedAssignment
max_length=255,
verbose_name=_("file"),
storage=settings.STORAGE_BACKEND,
)
description = models.TextField(blank=True)
class Meta:
verbose_name = _("attachment revision")
verbose_name_plural = _("attachment revisions")
ordering = ("created",)
get_latest_by = "revision_number"
# Matches label of upcoming 0.1 release
db_table = "wiki_attachments_attachmentrevision"
def get_filename(self):
"""Used to retrieve the filename of a revision.
But attachment.original_filename should always be used in the frontend
such that filenames stay consistent."""
# TODO: Perhaps we can let file names change when files are replaced?
if not self.file:
return None
filename = self.file.name.split("/")[-1]
return ".".join(filename.split(".")[:-1])
def get_size(self):
"""Used to retrieve the file size and not cause exceptions."""
try:
return self.file.size
except (ValueError, OSError):
return None
def __str__(self):
return "%s: %s (r%d)" % (
self.attachment.article.current_revision.title,
self.attachment.original_filename,
self.revision_number,
)
@disable_signal_for_loaddata
def on_revision_delete(instance, *args, **kwargs):
if not instance.file:
return
# Remove file
path = instance.file.path.split("/")[:-1]
instance.file.delete(save=False)
# Clean up empty directories
# Check for empty folders in the path. Delete the first two.
max_depth = 1
if len(path) != 0:
if len(path[-1]) == 32:
# Path was (most likely) obscurified so we should look 2 levels down
max_depth = 2
for depth in range(0, max_depth):
delete_path = "/".join(path[:-depth] if depth > 0 else path)
try:
if (
len(os.listdir(os.path.join(django_settings.MEDIA_ROOT, delete_path)))
== 0
):
os.rmdir(delete_path)
except OSError:
# Raised by os.listdir if directory is missing
pass
@disable_signal_for_loaddata
def on_attachment_revision_pre_save(**kwargs):
instance = kwargs["instance"]
if instance._state.adding:
update_previous_revision = (
not instance.previous_revision
and instance.attachment
and instance.attachment.current_revision
and instance.attachment.current_revision != instance
)
if update_previous_revision:
instance.previous_revision = instance.attachment.current_revision
if not instance.revision_number:
try:
previous_revision = instance.attachment.attachmentrevision_set.latest()
instance.revision_number = previous_revision.revision_number + 1
# NB! The above should not raise the below exception, but somehow
# it does.
except (AttachmentRevision.DoesNotExist, Attachment.DoesNotExist):
instance.revision_number = 1
@disable_signal_for_loaddata
def METHOD_NAME(**kwargs):
instance = kwargs["instance"]
if not instance.attachment.current_revision:
# If I'm saved from Django admin, then article.current_revision is
# me!
instance.attachment.current_revision = instance
instance.attachment.save()
signals.pre_delete.connect(on_revision_delete, AttachmentRevision)
signals.pre_save.connect(on_attachment_revision_pre_save, AttachmentRevision)
signals.post_save.connect(METHOD_NAME, AttachmentRevision) |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 32