label
stringlengths 1
61
| code
stringlengths 4k
8k
|
---|---|
package info | import os
from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import cross_building
from conan.tools.files import apply_conandata_patches, copy, chdir, export_conandata_patches, get, rmdir
from conan.tools.gnu import Autotools, AutotoolsToolchain
from conan.tools.layout import basic_layout
required_conan_version = ">=1.53.0"
class LibcapConan(ConanFile):
name = "libcap"
license = ("GPL-2.0-only", "BSD-3-Clause")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://git.kernel.org/pub/scm/libs/libcap/libcap.git"
description = "This is a library for getting and setting POSIX.1e" \
" (formerly POSIX 6) draft 15 capabilities"
topics = ("capabilities")
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"psx_syscals": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"psx_syscals": False,
}
def export_sources(self):
export_conandata_patches(self)
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
self.settings.rm_safe("compiler.libcxx")
self.settings.rm_safe("compiler.cppstd")
def layout(self):
basic_layout(self, src_folder="src")
def validate(self):
if self.settings.os != "Linux":
raise ConanInvalidConfiguration(f"{self.name} only supports Linux")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
tc = AutotoolsToolchain(self)
tc.fpic = self.options.get_safe("fPIC", True)
env = tc.environment()
env.define("SHARED", "yes" if self.options.shared else "no")
env.define("PTHREADS", "yes" if self.options.psx_syscals else "no")
env.define("DESTDIR", self.package_folder)
env.define("prefix", "/")
env.define("lib", "lib")
if cross_building(self):
# libcap needs to run an executable that is compiled from sources
# during the build - so it needs a native compiler (it doesn't matter which)
# Assume the `cc` command points to a working C compiler
env.define("BUILD_CC", "cc")
tc.generate(env)
def build(self):
apply_conandata_patches(self)
autotools = Autotools(self)
with chdir(self, os.path.join(self.source_folder, "libcap")):
autotools.make()
def package(self):
copy(self, "License", self.source_folder, os.path.join(self.package_folder, "licenses"))
autotools = Autotools(self)
with chdir(self, os.path.join(self.source_folder, "libcap")):
autotools.make(target="install-common-cap")
install_cap = ("install-shared-cap" if self.options.shared
else "install-static-cap")
autotools.make(target=install_cap)
if self.options.psx_syscals:
autotools.make(target="install-common-psx")
install_psx = ("install-shared-psx" if self.options.shared
else "install-static-psx")
autotools.make(target=install_psx)
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
def METHOD_NAME(self):
self.cpp_info.components["cap"].set_property("pkg_config_name", "libcap")
self.cpp_info.components["cap"].libs = ["cap"]
if self.options.psx_syscals:
self.cpp_info.components["psx"].set_property("pkg_config_name", "libpsx")
self.cpp_info.components["psx"].libs = ["psx"]
self.cpp_info.components["psx"].system_libs = ["pthread"]
self.cpp_info.components["psx"].exelinkflags = ["-Wl,-wrap,pthread_create"]
# trick to avoid conflicts with cap component
self.cpp_info.set_property("pkg_config_name", "libcap-do-not-use") |
test recursive repr | """Unit tests for collections.defaultdict."""
import os
import copy
import pickle
import tempfile
import unittest
from test import support
from collections import defaultdict
def foobar():
return list
class TestDefaultDict(unittest.TestCase):
def test_basic(self):
d1 = defaultdict()
self.assertEqual(d1.default_factory, None)
d1.default_factory = list
d1[12].append(42)
self.assertEqual(d1, {12: [42]})
d1[12].append(24)
self.assertEqual(d1, {12: [42, 24]})
d1[13]
d1[14]
self.assertEqual(d1, {12: [42, 24], 13: [], 14: []})
self.assertTrue(d1[12] is not d1[13] is not d1[14])
d2 = defaultdict(list, foo=1, bar=2)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, {"foo": 1, "bar": 2})
self.assertEqual(d2["foo"], 1)
self.assertEqual(d2["bar"], 2)
self.assertEqual(d2[42], [])
self.assertIn("foo", d2)
self.assertIn("foo", d2.keys())
self.assertIn("bar", d2)
self.assertIn("bar", d2.keys())
self.assertIn(42, d2)
self.assertIn(42, d2.keys())
self.assertNotIn(12, d2)
self.assertNotIn(12, d2.keys())
d2.default_factory = None
self.assertEqual(d2.default_factory, None)
try:
d2[15]
except KeyError as err:
self.assertEqual(err.args, (15,))
else:
self.fail("d2[15] didn't raise KeyError")
self.assertRaises(TypeError, defaultdict, 1)
def test_missing(self):
d1 = defaultdict()
self.assertRaises(KeyError, d1.__missing__, 42)
d1.default_factory = list
self.assertEqual(d1.__missing__(42), [])
def test_repr(self):
d1 = defaultdict()
self.assertEqual(d1.default_factory, None)
self.assertEqual(repr(d1), "defaultdict(None, {})")
self.assertEqual(eval(repr(d1)), d1)
d1[11] = 41
self.assertEqual(repr(d1), "defaultdict(None, {11: 41})")
d2 = defaultdict(int)
self.assertEqual(d2.default_factory, int)
d2[12] = 42
self.assertEqual(repr(d2), "defaultdict(<class 'int'>, {12: 42})")
def foo(): return 43
d3 = defaultdict(foo)
self.assertTrue(d3.default_factory is foo)
d3[13]
self.assertEqual(repr(d3), "defaultdict(%s, {13: 43})" % repr(foo))
def test_print(self):
d1 = defaultdict()
def foo(): return 42
d2 = defaultdict(foo, {1: 2})
# NOTE: We can't use tempfile.[Named]TemporaryFile since this
# code must exercise the tp_print C code, which only gets
# invoked for *real* files.
tfn = tempfile.mktemp()
try:
f = open(tfn, "w+")
try:
print(d1, file=f)
print(d2, file=f)
f.seek(0)
self.assertEqual(f.readline(), repr(d1) + "\n")
self.assertEqual(f.readline(), repr(d2) + "\n")
finally:
f.close()
finally:
os.remove(tfn)
def test_copy(self):
d1 = defaultdict()
d2 = d1.copy()
self.assertEqual(type(d2), defaultdict)
self.assertEqual(d2.default_factory, None)
self.assertEqual(d2, {})
d1.default_factory = list
d3 = d1.copy()
self.assertEqual(type(d3), defaultdict)
self.assertEqual(d3.default_factory, list)
self.assertEqual(d3, {})
d1[42]
d4 = d1.copy()
self.assertEqual(type(d4), defaultdict)
self.assertEqual(d4.default_factory, list)
self.assertEqual(d4, {42: []})
d4[12]
self.assertEqual(d4, {42: [], 12: []})
# Issue 6637: Copy fails for empty default dict
d = defaultdict()
d['a'] = 42
e = d.copy()
self.assertEqual(e['a'], 42)
def test_shallow_copy(self):
d1 = defaultdict(foobar, {1: 1})
d2 = copy.copy(d1)
self.assertEqual(d2.default_factory, foobar)
self.assertEqual(d2, d1)
d1.default_factory = list
d2 = copy.copy(d1)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, d1)
def test_deep_copy(self):
d1 = defaultdict(foobar, {1: [1]})
d2 = copy.deepcopy(d1)
self.assertEqual(d2.default_factory, foobar)
self.assertEqual(d2, d1)
self.assertTrue(d1[1] is not d2[1])
d1.default_factory = list
d2 = copy.deepcopy(d1)
self.assertEqual(d2.default_factory, list)
self.assertEqual(d2, d1)
def test_keyerror_without_factory(self):
d1 = defaultdict()
try:
d1[(1,)]
except KeyError as err:
self.assertEqual(err.args[0], (1,))
else:
self.fail("expected KeyError")
def METHOD_NAME(self):
# Issue2045: stack overflow when default_factory is a bound method
class sub(defaultdict):
def __init__(self):
self.default_factory = self._factory
def _factory(self):
return []
d = sub()
self.assertTrue(repr(d).startswith(
"defaultdict(<bound method sub._factory of defaultdict(..."))
# NOTE: printing a subclass of a builtin type does not call its
# tp_print slot. So this part is essentially the same test as above.
tfn = tempfile.mktemp()
try:
f = open(tfn, "w+")
try:
print(d, file=f)
finally:
f.close()
finally:
os.remove(tfn)
def test_callable_arg(self):
self.assertRaises(TypeError, defaultdict, {})
def test_pickleing(self):
d = defaultdict(int)
d[1]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
s = pickle.dumps(d, proto)
o = pickle.loads(s)
self.assertEqual(d, o)
def test_main():
support.run_unittest(TestDefaultDict)
if __name__ == "__main__":
test_main() |
test rx tx max id | #!/usr/bin/env python
"""
This module is testing the serial interface.
Copyright: 2017 Boris Wenzlaff
"""
import unittest
from unittest.mock import patch
import can
from can.interfaces.serial.serial_can import SerialBus
from .config import IS_PYPY
from .message_helper import ComparingMessagesTestCase
# Mentioned in #1010
TIMEOUT = 0.5 if IS_PYPY else 0.1 # 0.1 is the default set in SerialBus
class SerialDummy:
"""
Dummy to mock the serial communication
"""
msg = None
def __init__(self):
self.msg = bytearray()
def read(self, size=1):
return_value = bytearray()
for i in range(size):
return_value.append(self.msg.pop(0))
return bytes(return_value)
def write(self, msg):
self.msg = bytearray(msg)
def reset(self):
self.msg = None
class SimpleSerialTestBase(ComparingMessagesTestCase):
MAX_TIMESTAMP = 0xFFFFFFFF / 1000
def __init__(self):
ComparingMessagesTestCase.__init__(
self, allowed_timestamp_delta=None, preserves_channel=True
)
def test_can_protocol(self):
self.assertEqual(self.bus.protocol, can.CanProtocol.CAN_20)
def test_rx_tx_min_max_data(self):
"""
Tests the transfer from 0x00 to 0xFF for a 1 byte payload
"""
for b in range(0, 255):
msg = can.Message(data=[b])
self.bus.send(msg)
msg_receive = self.bus.recv()
self.assertMessageEqual(msg, msg_receive)
def test_rx_tx_min_max_dlc(self):
"""
Tests the transfer from a 1 - 8 byte payload
"""
payload = bytearray()
for b in range(1, 9):
payload.append(0)
msg = can.Message(data=payload)
self.bus.send(msg)
msg_receive = self.bus.recv()
self.assertMessageEqual(msg, msg_receive)
def test_rx_tx_data_none(self):
"""
Tests the transfer without payload
"""
msg = can.Message(data=None)
self.bus.send(msg)
msg_receive = self.bus.recv()
self.assertMessageEqual(msg, msg_receive)
def test_rx_tx_min_id(self):
"""
Tests the transfer with the lowest arbitration id
"""
msg = can.Message(arbitration_id=0)
self.bus.send(msg)
msg_receive = self.bus.recv()
self.assertMessageEqual(msg, msg_receive)
def METHOD_NAME(self):
"""
Tests the transfer with the highest arbitration id
"""
msg = can.Message(arbitration_id=536870911)
self.bus.send(msg)
msg_receive = self.bus.recv()
self.assertMessageEqual(msg, msg_receive)
def test_rx_tx_max_timestamp(self):
"""
Tests the transfer with the highest possible timestamp
"""
msg = can.Message(timestamp=self.MAX_TIMESTAMP)
self.bus.send(msg)
msg_receive = self.bus.recv()
self.assertMessageEqual(msg, msg_receive)
self.assertEqual(msg.timestamp, msg_receive.timestamp)
def test_rx_tx_max_timestamp_error(self):
"""
Tests for an exception with an out of range timestamp (max + 1)
"""
msg = can.Message(timestamp=self.MAX_TIMESTAMP + 1)
self.assertRaises(ValueError, self.bus.send, msg)
def test_rx_tx_min_timestamp(self):
"""
Tests the transfer with the lowest possible timestamp
"""
msg = can.Message(timestamp=0)
self.bus.send(msg)
msg_receive = self.bus.recv()
self.assertMessageEqual(msg, msg_receive)
self.assertEqual(msg.timestamp, msg_receive.timestamp)
def test_rx_tx_min_timestamp_error(self):
"""
Tests for an exception with an out of range timestamp (min - 1)
"""
msg = can.Message(timestamp=-1)
self.assertRaises(ValueError, self.bus.send, msg)
def test_when_no_fileno(self):
"""
Tests for the fileno method catching the missing pyserial implementeation on the Windows platform
"""
try:
fileno = self.bus.fileno()
except NotImplementedError:
pass # allow it to be left non-implemented for Windows platform
else:
fileno.__gt__ = (
lambda self, compare: True
) # Current platform implements fileno, so get the mock to respond to a greater than comparison
self.assertIsNotNone(fileno)
self.assertFalse(
fileno == -1
) # forcing the value to -1 is the old way of managing fileno on Windows but it is not compatible with notifiers
self.assertTrue(fileno > 0)
class SimpleSerialTest(unittest.TestCase, SimpleSerialTestBase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
SimpleSerialTestBase.__init__(self)
def setUp(self):
self.patcher = patch("serial.Serial")
self.mock_serial = self.patcher.start()
self.serial_dummy = SerialDummy()
self.mock_serial.return_value.write = self.serial_dummy.write
self.mock_serial.return_value.read = self.serial_dummy.read
self.addCleanup(self.patcher.stop)
self.bus = SerialBus("bus", timeout=TIMEOUT)
def tearDown(self):
self.serial_dummy.reset()
class SimpleSerialLoopTest(unittest.TestCase, SimpleSerialTestBase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
SimpleSerialTestBase.__init__(self)
def setUp(self):
self.bus = SerialBus("loop://", timeout=TIMEOUT)
def tearDown(self):
self.bus.shutdown()
if __name__ == "__main__":
unittest.main() |
level moderation | from __future__ import absolute_import
from __future__ import division
from common.models import Student, Class
from django.shortcuts import render
from portal.templatetags import app_tags
import game.messages as messages
import game.permissions as permissions
from game.forms import LevelModerationForm
from game.models import Level
from .helper import renderError
def METHOD_NAME(request):
"""Renders a page with students' scores.
**Context**
``RequestContext``
``form``
Form used to choose a class and level to show. Instance of `forms.ScoreboardForm.`
``studentData``
List of lists containing all the data to be stored in the scoreboard table.
``thead``
List of Strings representing the headers of the scoreboard table.
**Template:**
:template:`game/level_moderation.html`
"""
# Not showing this part to outsiders.
if not permissions.can_see_level_moderation(request.user):
return renderError(
request,
messages.no_permission_level_moderation_title(),
messages.no_permission_level_moderation_page(),
)
teacher = request.user.userprofile.teacher
if teacher.is_admin:
# Making sure the current teacher classes come up first
classes_taught = teacher.school.classes()
[
classes_taught.insert(0, classes_taught.pop(i))
for i in range(len(classes_taught))
if classes_taught[i].teacher.id == teacher.id
]
else:
classes_taught = Class.objects.filter(teacher=teacher)
if len(classes_taught) <= 0:
return renderError(
request,
messages.no_permission_level_moderation_title(),
messages.no_data_to_show_level_moderation(),
)
classes_taught_ids = [class_.id for class_ in classes_taught]
form = LevelModerationForm(
request.POST or None,
classes=classes_taught,
teacher=teacher,
initial={"classes": classes_taught_ids},
)
if request.method == "POST":
if form.is_valid():
class_ids = set(map(int, request.POST.getlist("classes")))
# check user has permission to look at the classes
if not all(class_id in classes_taught_ids for class_id in class_ids):
return renderError(
request,
messages.no_permission_level_moderation_title(),
messages.no_permission_level_moderation_class(),
)
else:
class_ids = []
else:
class_ids = [class_id for class_id in classes_taught_ids]
students = Student.objects.filter(
class_field_id__in=class_ids, new_user__is_active=True
)
owners = [student.user for student in students]
table_headers = [
"Student",
"Level name",
"Shared with",
"Actions",
]
level_data = []
for owner in owners:
for level in Level.objects.filter(owner=owner):
users_shared_with = [
user
for user in level.shared_with.all()
if permissions.CanShareLevelWith().can_share_level_with(
user, owner.user
)
and user != owner.user
]
if not users_shared_with:
shared_str = "-"
else:
shared_str = ", ".join(
app_tags.make_into_username(user) for user in users_shared_with
)
student_name = f"{app_tags.make_into_username(owner.user)}"
# If the teacher is an admin, append teacher names or "(you)" to students
if teacher.is_admin:
student_teacher = owner.student.class_field.teacher
if student_teacher == teacher:
student_teacher_name = "you"
else:
student_teacher_name = f"{student_teacher.new_user.first_name} {student_teacher.new_user.last_name}"
student_name += f" ({student_teacher_name})"
level_data.append(
{
"student": student_name,
"id": level.id,
"name": level.name,
"shared_with": shared_str,
}
)
return render(
request,
"game/level_moderation.html",
context={
"form": form,
"levelData": level_data,
"thead": table_headers,
},
) |
test dict redirect | import sys
import unittest
from unittest.mock import Mock
from ..utils import (
build_absolute_uri,
partial_pipeline_data,
sanitize_redirect,
slugify,
user_is_active,
user_is_authenticated,
)
from .models import TestPartial
PY3 = sys.version_info[0] == 3
class SanitizeRedirectTest(unittest.TestCase):
def test_none_redirect(self):
self.assertEqual(sanitize_redirect(["myapp.com"], None), None)
def test_empty_redirect(self):
self.assertEqual(sanitize_redirect(["myapp.com"], ""), None)
def METHOD_NAME(self):
self.assertEqual(sanitize_redirect(["myapp.com"], {}), None)
def test_invalid_redirect(self):
self.assertEqual(sanitize_redirect(["myapp.com"], {"foo": "bar"}), None)
def test_wrong_path_redirect(self):
self.assertEqual(
sanitize_redirect(["myapp.com"], "http://notmyapp.com/path/"), None
)
def test_invalid_evil_redirect(self):
self.assertEqual(sanitize_redirect(["myapp.com"], "///evil.com"), None)
def test_valid_absolute_redirect(self):
self.assertEqual(
sanitize_redirect(["myapp.com"], "http://myapp.com/path/"),
"http://myapp.com/path/",
)
def test_valid_relative_redirect(self):
self.assertEqual(sanitize_redirect(["myapp.com"], "/path/"), "/path/")
def test_multiple_hosts(self):
allowed_hosts = ["myapp1.com", "myapp2.com"]
for host in allowed_hosts:
url = f"http://{host}/path/"
self.assertEqual(sanitize_redirect(allowed_hosts, url), url)
def test_multiple_hosts_wrong_host(self):
self.assertEqual(
sanitize_redirect(
["myapp1.com", "myapp2.com"], "http://notmyapp.com/path/"
),
None,
)
class UserIsAuthenticatedTest(unittest.TestCase):
def test_user_is_none(self):
self.assertEqual(user_is_authenticated(None), False)
def test_user_is_not_none(self):
self.assertEqual(user_is_authenticated(object()), True)
def test_user_has_is_authenticated(self):
class User:
is_authenticated = True
self.assertEqual(user_is_authenticated(User()), True)
def test_user_has_is_authenticated_callable(self):
class User:
def is_authenticated(self):
return True
self.assertEqual(user_is_authenticated(User()), True)
class UserIsActiveTest(unittest.TestCase):
def test_user_is_none(self):
self.assertEqual(user_is_active(None), False)
def test_user_is_not_none(self):
self.assertEqual(user_is_active(object()), True)
def test_user_has_is_active(self):
class User:
is_active = True
self.assertEqual(user_is_active(User()), True)
def test_user_has_is_active_callable(self):
class User:
def is_active(self):
return True
self.assertEqual(user_is_active(User()), True)
class SlugifyTest(unittest.TestCase):
def test_slugify_formats(self):
if PY3:
self.assertEqual(slugify("FooBar"), "foobar")
self.assertEqual(slugify("Foo Bar"), "foo-bar")
self.assertEqual(slugify("Foo (Bar)"), "foo-bar")
else:
self.assertEqual(slugify("FooBar".decode("utf-8")), "foobar")
self.assertEqual(slugify("Foo Bar".decode("utf-8")), "foo-bar")
self.assertEqual(slugify("Foo (Bar)".decode("utf-8")), "foo-bar")
class BuildAbsoluteURITest(unittest.TestCase):
def setUp(self):
self.host = "http://foobar.com"
def tearDown(self):
self.host = None
def test_path_none(self):
self.assertEqual(build_absolute_uri(self.host), self.host)
def test_path_empty(self):
self.assertEqual(build_absolute_uri(self.host, ""), self.host)
def test_path_http(self):
self.assertEqual(
build_absolute_uri(self.host, "http://barfoo.com"), "http://barfoo.com"
)
def test_path_https(self):
self.assertEqual(
build_absolute_uri(self.host, "https://barfoo.com"), "https://barfoo.com"
)
def test_host_ends_with_slash_and_path_starts_with_slash(self):
self.assertEqual(
build_absolute_uri(self.host + "/", "/foo/bar"), "http://foobar.com/foo/bar"
)
def test_absolute_uri(self):
self.assertEqual(
build_absolute_uri(self.host, "/foo/bar"), "http://foobar.com/foo/bar"
)
class PartialPipelineData(unittest.TestCase):
def test_returns_partial_when_uid_and_email_do_match(self):
email = "[email protected]"
backend = self._backend({"uid": email})
backend.strategy.request_data.return_value = {backend.ID_KEY: email}
key, val = ("foo", "bar")
partial = partial_pipeline_data(backend, None, *(), **dict([(key, val)]))
self.assertTrue(key in partial.kwargs)
self.assertEqual(partial.kwargs[key], val)
self.assertEqual(backend.strategy.clean_partial_pipeline.call_count, 0)
def test_clean_pipeline_when_uid_does_not_match(self):
backend = self._backend({"uid": "[email protected]"})
backend.strategy.request_data.return_value = {backend.ID_KEY: "[email protected]"}
key, val = ("foo", "bar")
partial = partial_pipeline_data(backend, None, *(), **dict([(key, val)]))
self.assertIsNone(partial)
self.assertEqual(backend.strategy.clean_partial_pipeline.call_count, 1)
def test_kwargs_included_in_result(self):
backend = self._backend()
key, val = ("foo", "bar")
partial = partial_pipeline_data(backend, None, *(), **dict([(key, val)]))
self.assertTrue(key in partial.kwargs)
self.assertEqual(partial.kwargs[key], val)
self.assertEqual(backend.strategy.clean_partial_pipeline.call_count, 0)
def test_update_user(self):
user = object()
backend = self._backend(session_kwargs={"user": None})
partial = partial_pipeline_data(backend, user)
self.assertTrue("user" in partial.kwargs)
self.assertEqual(partial.kwargs["user"], user)
self.assertEqual(backend.strategy.clean_partial_pipeline.call_count, 0)
def _backend(self, session_kwargs=None):
backend = Mock()
backend.ID_KEY = "email"
backend.name = "mock-backend"
strategy = Mock()
strategy.request = None
strategy.request_data.return_value = {}
strategy.session_get.return_value = object()
strategy.partial_load.return_value = TestPartial.prepare(
backend.name, 0, {"args": [], "kwargs": session_kwargs or {}}
)
backend.strategy = strategy
return backend |
is vertical | # -*- coding: utf-8 -*-
# ***************************************************************************
# * Copyright (c) 2021 sliptonic <[email protected]> *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
import FreeCAD
import Path
import Path.Base.MachineState as PathMachineState
import Part
__title__ = "Feed Rate Helper Utility"
__author__ = "sliptonic (Brad Collette)"
__url__ = "https://www.freecad.org"
__doc__ = "Helper for adding Feed Rate to Path Commands"
"""
TODO: This needs to be able to handle feedrates for axes other than X,Y,Z
"""
if False:
Path.Log.setLevel(Path.Log.Level.DEBUG, Path.Log.thisModule())
Path.Log.trackModule(Path.Log.thisModule())
else:
Path.Log.setLevel(Path.Log.Level.INFO, Path.Log.thisModule())
def setFeedRate(commandlist, ToolController):
"""Set the appropriate feed rate for a list of Path commands using the information from a Tool Controller
Every motion command in the list will have a feed rate parameter added or overwritten based
on the information stored in the tool controller. If a motion is a plunge (vertical) motion, the
VertFeed value will be used, otherwise the HorizFeed value will be used instead."""
def METHOD_NAME(currentposition, command):
x = (
command.Parameters["X"]
if "X" in command.Parameters
else currentposition.x
)
y = (
command.Parameters["Y"]
if "Y" in command.Parameters
else currentposition.y
)
z = (
command.Parameters["Z"]
if "Z" in command.Parameters
else currentposition.z
)
endpoint = FreeCAD.Vector(x, y, z)
if Path.Geom.pointsCoincide(currentposition, endpoint):
return True
return Path.Geom.isVertical(Part.makeLine(currentposition, endpoint))
machine = PathMachineState.MachineState()
for command in commandlist:
if command.Name not in Path.Geom.CmdMoveAll:
continue
if METHOD_NAME(machine.getPosition(), command):
rate = (
ToolController.VertRapid.Value
if command.Name in Path.Geom.CmdMoveRapid
else ToolController.VertFeed.Value
)
else:
rate = (
ToolController.HorizRapid.Value
if command.Name in Path.Geom.CmdMoveRapid
else ToolController.HorizFeed.Value
)
params = command.Parameters
params["F"] = rate
command.Parameters = params
machine.addCommand(command)
return commandlist |
draw | # -*- coding: utf-8 -*-
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
from collections import defaultdict
import bpy
import sverchok.ui.nodeview_space_menu as sm # import other way breaks showing custom icons
from bpy.props import PointerProperty, EnumProperty, StringProperty, BoolProperty, IntProperty
class AddNodeToolPanel(bpy.types.Panel):
"""Nodes panel under the T panel"""
bl_idname = 'SV_PT_AddNodeToolPanel'
bl_space_type = "NODE_EDITOR"
bl_region_type = "TOOLS"
bl_label = "Sverchok Nodes"
_items: list[sm.AddNode] = []
_categories: dict[sm.Category, sm.AddNode] = dict()
def node_search_update(self, context):
request = context.scene.sv_add_node_panel_settings.node_search
if not request:
AddNodeToolPanel._categories = dict()
return
categories = defaultdict(list)
for cat in sm.get_add_node_menu().walk_categories():
for add_node in cat:
if not isinstance(add_node, sm.AddNode):
continue
if add_node.search_match(request):
categories[cat].append(add_node)
AddNodeToolPanel._categories = categories
def select_category_update(self, context):
cat_name = context.scene.sv_add_node_panel_settings.selected_category
for cat in sm.get_add_node_menu().walk_categories():
if cat.menu_cls.__name__ == cat_name:
items = [n for n in cat if isinstance(n, (sm.AddNode, sm.Separator))]
AddNodeToolPanel._items = items
return
@property
def categories(self):
if not self._categories:
self.node_search_update(bpy.context)
return self._categories
@property
def items(self):
"""After reloading the items will be none. They can't be updated from
registration function. So do this on demand"""
if not self._items:
self.select_category_update(bpy.context)
return self._items
@classmethod
def poll(cls, context):
try:
return context.space_data.node_tree.bl_idname == 'SverchCustomTreeType'
except:
return False
def METHOD_NAME(self, context):
layout = self.layout
row = layout.row(align=True)
row.prop(context.scene.sv_add_node_panel_settings, "node_search", text="")
if context.scene.sv_add_node_panel_settings.node_search:
for cat, add_nodes in self.categories.items():
icon_prop = sm.icon(cat.icon) if cat.icon else {}
layout.label(text=cat.name, **icon_prop)
self.draw_add_node(context, add_nodes)
else:
layout.prop(context.scene.sv_add_node_panel_settings, "selected_category", text="")
self.draw_add_node(context, self.items)
col = layout.column()
col.use_property_split = True
col.use_property_decorate = False
col.prop(context.scene.sv_add_node_panel_settings, 'icons_only')
if context.scene.sv_add_node_panel_settings.icons_only:
col.prop(context.scene.sv_add_node_panel_settings, 'columns_number')
def draw_add_node(self, context, items):
layout = self.layout
if context.scene.sv_add_node_panel_settings.icons_only:
num = context.scene.sv_add_node_panel_settings.columns_number
grid = layout.grid_flow(row_major=True, align=True, columns=num)
grid.scale_x = 1.5
for add_node in items:
if hasattr(add_node, 'draw_icon'):
add_node.draw_icon(grid)
else: # <- separator
grid = layout.grid_flow(row_major=True, align=True, columns=num)
grid.scale_x = 1.5
else:
col = layout.column(align=True)
for add_node in items:
add_node.METHOD_NAME(col)
class AddNodePanelSettings(bpy.types.PropertyGroup):
def categories(self, context):
# this should be a function because new categories can be added
# by Sverchok's extensions after the registration
for i, category in enumerate(sm.get_add_node_menu().walk_categories()):
if any(isinstance(add_node, sm.AddNode) for add_node in category):
identifier = category.menu_cls.__name__
yield identifier, category.name, category.name, i
selected_category: EnumProperty(
name="Category",
description="Select nodes category",
items=categories,
default=1, # it through errors in console without this option
update=AddNodeToolPanel.select_category_update,
)
node_search: StringProperty(
name="Search",
description="Enter search term and press Enter to search; clear the"
" field to return to selection of node category.",
update=AddNodeToolPanel.node_search_update,
options={'TEXTEDIT_UPDATE'},
)
icons_only: BoolProperty(
name="Icons only",
description="Show node icon only when icon has an icon, otherwise show it's name",
default=True,
)
columns_number: IntProperty(
name="Columns",
description="Number of icon panels per row; Set to 0 for automatic selection",
default=5,
min=1,
max=12,
)
classes = [AddNodeToolPanel, AddNodePanelSettings]
def register():
for cls in classes:
bpy.utils.register_class(cls)
bpy.types.Scene.sv_add_node_panel_settings = PointerProperty(
type=AddNodePanelSettings)
def unregister():
del bpy.types.Scene.sv_add_node_panel_settings
for cls in classes[::-1]:
bpy.utils.unregister_class(cls) |
test install restart | """
Test the win_wusa execution module
"""
import pytest
import salt.modules.win_wusa as win_wusa
from salt.exceptions import CommandExecutionError
from tests.support.mock import MagicMock, patch
pytestmark = [
pytest.mark.windows_whitelisted,
pytest.mark.skip_unless_on_windows,
]
@pytest.fixture
def configure_loader_modules():
return {win_wusa: {}}
def test_is_installed_false():
"""
test is_installed function when the KB is not installed
"""
mock_retcode = MagicMock(return_value=1)
with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}):
assert win_wusa.is_installed("KB123456") is False
def test_is_installed_true():
"""
test is_installed function when the KB is installed
"""
mock_retcode = MagicMock(return_value=0)
with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}):
assert win_wusa.is_installed("KB123456") is True
def test_list():
"""
test list function
"""
ret = [{"HotFixID": "KB123456"}, {"HotFixID": "KB123457"}]
mock_all = MagicMock(return_value=ret)
with patch("salt.utils.win_pwsh.run_dict", mock_all):
expected = ["KB123456", "KB123457"]
returned = win_wusa.list_()
assert returned == expected
def test_install():
"""
test install function
"""
mock_retcode = MagicMock(return_value=0)
path = "C:\\KB123456.msu"
with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}):
assert win_wusa.install(path) is True
mock_retcode.assert_called_once_with(
["wusa.exe", path, "/quiet", "/norestart"], ignore_retcode=True
)
def METHOD_NAME():
"""
test install function with restart=True
"""
mock_retcode = MagicMock(return_value=0)
path = "C:\\KB123456.msu"
with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}):
assert win_wusa.install(path, restart=True) is True
mock_retcode.assert_called_once_with(
["wusa.exe", path, "/quiet", "/forcerestart"], ignore_retcode=True
)
def test_install_already_installed():
"""
test install function when KB already installed
"""
retcode = 2359302
mock_retcode = MagicMock(return_value=retcode)
path = "C:\\KB123456.msu"
name = "KB123456.msu"
with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}):
with pytest.raises(CommandExecutionError) as excinfo:
win_wusa.install(path)
mock_retcode.assert_called_once_with(
["wusa.exe", path, "/quiet", "/norestart"], ignore_retcode=True
)
assert (
f"{name} is already installed. Additional info follows:\n\n{retcode}"
== excinfo.value.message
)
def test_install_reboot_needed():
"""
test install function when KB need a reboot
"""
retcode = 3010
mock_retcode = MagicMock(return_value=retcode)
path = "C:\\KB123456.msu"
name = "KB123456.msu"
with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}):
with pytest.raises(CommandExecutionError) as excinfo:
win_wusa.install(path)
mock_retcode.assert_called_once_with(
["wusa.exe", path, "/quiet", "/norestart"], ignore_retcode=True
)
assert (
f"{name} correctly installed but server reboot is needed to complete installation. Additional info follows:\n\n{retcode}"
== excinfo.value.message
)
def test_install_error_87():
"""
test install function when error 87 returned
"""
retcode = 87
mock_retcode = MagicMock(return_value=retcode)
path = "C:\\KB123456.msu"
with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}):
with pytest.raises(CommandExecutionError) as excinfo:
win_wusa.install(path)
mock_retcode.assert_called_once_with(
["wusa.exe", path, "/quiet", "/norestart"], ignore_retcode=True
)
assert (
f"Unknown error. Additional info follows:\n\n{retcode}" == excinfo.value.message
)
def test_install_error_other():
"""
test install function on other unknown error
"""
mock_retcode = MagicMock(return_value=1234)
path = "C:\\KB123456.msu"
with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}):
with pytest.raises(CommandExecutionError) as excinfo:
win_wusa.install(path)
mock_retcode.assert_called_once_with(
["wusa.exe", path, "/quiet", "/norestart"], ignore_retcode=True
)
assert "Unknown error: 1234" == excinfo.value.message
def test_uninstall_kb():
"""
test uninstall function passing kb name
"""
mock_retcode = MagicMock(return_value=0)
kb = "KB123456"
with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}), patch(
"os.path.exists", MagicMock(return_value=False)
):
assert win_wusa.uninstall(kb) is True
mock_retcode.assert_called_once_with(
[
"wusa.exe",
"/uninstall",
"/quiet",
f"/kb:{kb[2:]}",
"/norestart",
],
ignore_retcode=True,
)
def test_uninstall_path():
"""
test uninstall function passing full path to .msu file
"""
mock_retcode = MagicMock(return_value=0)
path = "C:\\KB123456.msu"
with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}), patch(
"os.path.exists", MagicMock(return_value=True)
):
assert win_wusa.uninstall(path) is True
mock_retcode.assert_called_once_with(
["wusa.exe", "/uninstall", "/quiet", path, "/norestart"],
ignore_retcode=True,
)
def test_uninstall_path_restart():
"""
test uninstall function with full path and restart=True
"""
mock_retcode = MagicMock(return_value=0)
path = "C:\\KB123456.msu"
with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}), patch(
"os.path.exists", MagicMock(return_value=True)
):
assert win_wusa.uninstall(path, restart=True) is True
mock_retcode.assert_called_once_with(
["wusa.exe", "/uninstall", "/quiet", path, "/forcerestart"],
ignore_retcode=True,
)
def test_uninstall_already_uninstalled():
"""
test uninstall function when KB already uninstalled
"""
retcode = 2359303
mock_retcode = MagicMock(return_value=retcode)
kb = "KB123456"
with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}):
with pytest.raises(CommandExecutionError) as excinfo:
win_wusa.uninstall(kb)
mock_retcode.assert_called_once_with(
[
"wusa.exe",
"/uninstall",
"/quiet",
f"/kb:{kb[2:]}",
"/norestart",
],
ignore_retcode=True,
)
assert (
f"{kb} not installed. Additional info follows:\n\n{retcode}"
== excinfo.value.message
)
def test_uninstall_path_error_other():
"""
test uninstall function with unknown error
"""
mock_retcode = MagicMock(return_value=1234)
path = "C:\\KB123456.msu"
with patch.dict(win_wusa.__salt__, {"cmd.retcode": mock_retcode}), patch(
"os.path.exists", MagicMock(return_value=True)
), pytest.raises(CommandExecutionError) as excinfo:
win_wusa.uninstall(path)
mock_retcode.assert_called_once_with(
["wusa.exe", "/uninstall", "/quiet", path, "/norestart"],
ignore_retcode=True,
)
assert "Unknown error: 1234" == excinfo.value.message |
get entropy | """Calculation of free energy of one-electronic states."""
# Copyright (C) 2018 Atsushi Togo
# All rights reserved.
#
# This file is part of phonopy.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the phonopy project nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from phonopy.units import Kb
def get_free_energy_at_T(tmin, tmax, tstep, eigenvalues, weights, n_electrons):
"""Return free energies at given temperatures."""
free_energies = []
efe = ElectronFreeEnergy(eigenvalues, weights, n_electrons)
temperatures = np.arange(tmin, tmax + 1e-8, tstep)
for T in temperatures:
efe.run(T)
free_energies.append(efe.free_energy)
return temperatures, free_energies
class ElectronFreeEnergy:
r"""Class to calculate free energy of one-electronic states.
Fixed density-of-states approximation for energy and entropy of electrons.
This is supposed to be used for metals, i.e., chemical potential is not
in band gap.
Entropy
-------
.. math::
S_\text{el}(V) = -gk_{\mathrm{B}}\Sigma_i \{ f_i(V) \ln f_i(V) +
[1-f_i(V)]\ln [1-f_i(V)] \}
.. math::
f_i(V) = \left\{ 1 + \exp\left[\frac{\epsilon_i(V) - \mu(V)}{T}\right]
\right\}^{-1}
where :math:`g` is 1 for non-spin polarized systems and 2 for spin
polarized systems.
Energy
------
.. math::
E_\text{el}(V) = g\sum_i f_i(V) \epsilon_i(V)
Attributes
----------
entropy: float
Entropy in eV (T * S).
energy: float
Energy in eV.
free_energy: float
energy - entropy in eV.
mu: float
Chemical potential in eV.
"""
def __init__(self, eigenvalues, weights, n_electrons):
"""Init method.
Parameters
----------
eigenvalues: ndarray
Eigenvalues in eV.
dtype='double'
shape=(spin, kpoints, bands)
weights: ndarray
Geometric k-point weights (number of arms of k-star in BZ).
dtype='int_'
shape=(irreducible_kpoints,)
n_electrons: float
Number of electrons in unit cell.
efermi: float
Initial Fermi energy
"""
# shape=(kpoints, spin, bands)
self._eigenvalues = np.array(
eigenvalues.swapaxes(0, 1), dtype="double", order="C"
)
self._weights = weights
self._n_electrons = n_electrons
if self._eigenvalues.shape[1] == 1:
self._g = 2
elif self._eigenvalues.shape[1] == 2:
self._g = 1
else:
raise RuntimeError
self._T = None
self._f = None
self._mu = None
self._entropy = None
self._energy = None
def run(self, T):
"""Calculate free energies.
Parameters
----------
T: float
Temperature in K
"""
if T < 1e-10:
self._T = 1e-10
else:
self._T = T * Kb
self._mu = self._chemical_potential()
self._f = self._occupation_number(self._eigenvalues, self._mu)
self._entropy = self.METHOD_NAME()
self._energy = self._get_energy()
@property
def free_energy(self):
"""Return free energies."""
return self._energy - self._entropy
@property
def energy(self):
"""Return energies."""
return self._energy
@property
def entropy(self):
"""Return entropies."""
return self._entropy
@property
def mu(self):
"""Return chemical potential."""
return self._mu
def METHOD_NAME(self):
S = 0
for f_k, w in zip(self._f.reshape(len(self._weights), -1), self._weights):
_f = np.extract((f_k > 1e-12) * (f_k < 1 - 1e-12), f_k)
S -= (_f * np.log(_f) + (1 - _f) * np.log(1 - _f)).sum() * w
return S * self._g * self._T / self._weights.sum()
def _get_energy(self):
occ_eigvals = self._f * self._eigenvalues
return (
np.dot(
occ_eigvals.reshape(len(self._weights), -1).sum(axis=1), self._weights
)
* self._g
/ self._weights.sum()
)
def _chemical_potential(self):
emin = np.min(self._eigenvalues)
emax = np.max(self._eigenvalues)
mu = (emin + emax) / 2
for i in range(1000):
n = self._number_of_electrons(mu)
if abs(n - self._n_electrons) < 1e-10:
break
elif n < self._n_electrons:
emin = mu
else:
emax = mu
mu = (emin + emax) / 2
return mu
def _number_of_electrons(self, mu):
eigvals = self._eigenvalues.reshape(len(self._weights), -1)
n = (
np.dot(self._occupation_number(eigvals, mu).sum(axis=1), self._weights)
* self._g
/ self._weights.sum()
)
return n
def _occupation_number(self, e, mu):
de = (e - mu) / self._T
de = np.where(de < 100, de, 100.0) # To avoid overflow
de = np.where(de > -100, de, -100.0) # To avoid underflow
return 1.0 / (1 + np.exp(de)) |
write data | # pywws - Python software for USB Wireless Weather Stations
# http://github.com/jim-easterbrook/pywws
# Copyright (C) 2008-20 pywws contributors
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Low level USB interface to weather station, using PyUSB v0.4.
Introduction
============
This module handles low level communication with the weather station
via the `PyUSB <http://sourceforge.net/apps/trac/pyusb/>`_ library. It
is one of several USB device modules, each of which uses a different
USB library interface. See :ref:`Installation - USB
library<dependencies-usb>` for details.
Testing
=======
Run :py:mod:`pywws.testweatherstation` with increased verbosity so it
reports which USB device access module is being used::
python -m pywws.testweatherstation -vv
18:28:09:pywws.weatherstation.CUSBDrive:using pywws.device_pyusb
0000 55 aa ff ff ff ff ff ff ff ff ff ff ff ff ff ff 05 20 01 41 11 00 00 00 81 00 00 0f 05 00 e0 51
0020 03 27 ce 27 00 00 00 00 00 00 00 12 02 14 18 27 41 23 c8 00 00 00 46 2d 2c 01 64 80 c8 00 00 00
0040 64 00 64 80 a0 28 80 25 a0 28 80 25 03 36 00 05 6b 00 00 0a 00 f4 01 12 00 00 00 00 00 00 00 00
0060 00 00 49 0a 63 12 05 01 7f 00 36 01 60 80 36 01 60 80 bc 00 7b 80 95 28 12 26 6c 28 25 26 c8 01
0080 1d 02 d8 00 de 00 ff 00 ff 00 ff 00 00 11 10 06 01 29 12 02 01 19 32 11 09 09 05 18 12 01 22 13
00a0 14 11 11 04 15 04 11 12 17 05 12 11 09 02 15 26 12 02 11 07 05 11 09 02 15 26 12 02 11 07 05 11
00c0 09 10 09 12 12 02 02 12 38 12 02 07 19 00 11 12 16 03 27 12 02 03 11 00 11 12 16 03 27 11 12 26
00e0 21 32 11 12 26 21 32 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57 12 02 06 19 57
API
===
"""
__docformat__ = "restructuredtext en"
import platform
import usb
class USBDevice(object):
"""Low level USB device access via PyUSB library.
:param idVendor: the USB "vendor ID" number, for example 0x1941.
:type idVendor: int
:param idProduct: the USB "product ID" number, for example 0x8021.
:type idProduct: int
"""
def __init__(self, idVendor, idProduct):
dev = self._find_device(idVendor, idProduct)
if not dev:
raise IOError("Weather station device not found")
self.devh = dev.open()
if not self.devh:
raise IOError("Open device failed")
self.devh.reset()
## if platform.system() is 'Windows':
## self.devh.setConfiguration(1)
try:
self.devh.claimInterface(0)
except usb.USBError:
# claim interface failed, try detaching kernel driver first
if not hasattr(self.devh, 'detachKernelDriver'):
raise RuntimeError(
"Please upgrade pyusb (or python-usb) to 0.4 or higher")
try:
self.devh.detachKernelDriver(0)
self.devh.claimInterface(0)
except usb.USBError:
raise IOError("Claim interface failed")
# device may have data left over from an incomplete read
for i in range(4):
try:
self.devh.interruptRead(0x81, 8, 1200)
except usb.USBError:
break
def __del__(self):
if self.devh:
try:
self.devh.releaseInterface()
except usb.USBError:
# interface was not claimed. No problem
pass
def _find_device(self, idVendor, idProduct):
"""Find a USB device by product and vendor id."""
for bus in usb.busses():
for device in bus.devices:
if (device.idVendor == idVendor and
device.idProduct == idProduct):
return device
return None
def read_data(self, size):
"""Receive data from the device.
If the read fails for any reason, an :obj:`IOError` exception
is raised.
:param size: the number of bytes to read.
:type size: int
:return: the data received.
:rtype: list(int)
"""
result = self.devh.interruptRead(0x81, size, 1200)
if result is None or len(result) < size:
raise IOError('pywws.device_libusb.USBDevice.read_data failed')
return list(result)
def METHOD_NAME(self, buf):
"""Send data to the device.
If the write fails for any reason, an :obj:`IOError` exception
is raised.
:param buf: the data to send.
:type buf: list(int)
:return: success status.
:rtype: bool
"""
result = self.devh.controlMsg(
usb.ENDPOINT_OUT + usb.TYPE_CLASS + usb.RECIP_INTERFACE,
usb.REQ_SET_CONFIGURATION, buf, value=0x200, timeout=50)
if result != len(buf):
raise IOError('pywws.device_libusb.USBDevice.write_data failed')
return True |
test gil released inside lifted loop | import ctypes
import ctypes.util
import os
import sys
import threading
import warnings
import numpy as np
import unittest
from numba.core.compiler import compile_isolated, Flags
from numba import jit
from numba.core import errors
from numba.tests.support import TestCase, tag
# This CPython API function is a portable way to get the current thread id.
PyThread_get_thread_ident = ctypes.pythonapi.PyThread_get_thread_ident
PyThread_get_thread_ident.restype = ctypes.c_long
PyThread_get_thread_ident.argtypes = []
# A way of sleeping from nopython code
if os.name == 'nt':
sleep = ctypes.windll.kernel32.Sleep
sleep.argtypes = [ctypes.c_uint]
sleep.restype = None
sleep_factor = 1 # milliseconds
else:
sleep = ctypes.CDLL(ctypes.util.find_library("c")).usleep
sleep.argtypes = [ctypes.c_uint]
sleep.restype = ctypes.c_int
sleep_factor = 1000 # microseconds
def f(a, indices):
# If run from one thread at a time, the function will always fill the
# array with identical values.
# If run from several threads at a time, the function will probably
# fill the array with differing values.
for idx in indices:
# Let another thread run
sleep(10 * sleep_factor)
a[idx] = PyThread_get_thread_ident()
f_sig = "void(int64[:], intp[:])"
def lifted_f(a, indices):
"""
Same as f(), but inside a lifted loop
"""
object() # Force object mode
for idx in indices:
# Let another thread run
sleep(10 * sleep_factor)
a[idx] = PyThread_get_thread_ident()
def object_f(a, indices):
"""
Same as f(), but in object mode
"""
for idx in indices:
# Let another thread run
sleep(10 * sleep_factor)
object() # Force object mode
a[idx] = PyThread_get_thread_ident()
class TestGILRelease(TestCase):
def make_test_array(self, n_members):
return np.arange(n_members, dtype=np.int64)
def run_in_threads(self, func, n_threads):
# Run the function in parallel over an array and collect results.
threads = []
# Warm up compilation, since we don't want that to interfere with
# the test proper.
func(self.make_test_array(1), np.arange(1, dtype=np.intp))
arr = self.make_test_array(50)
for i in range(n_threads):
# Ensure different threads write into the array in different
# orders.
indices = np.arange(arr.size, dtype=np.intp)
np.random.shuffle(indices)
t = threading.Thread(target=func, args=(arr, indices))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
return arr
def check_gil_held(self, func):
arr = self.run_in_threads(func, n_threads=4)
distinct = set(arr)
self.assertEqual(len(distinct), 1, distinct)
def check_gil_released(self, func):
for n_threads in (4, 12, 32):
# Try harder each time. On an empty machine 4 threads seems
# sufficient, but in some contexts (e.g. Travis CI) we need more.
arr = self.run_in_threads(func, n_threads)
distinct = set(arr)
try:
self.assertGreater(len(distinct), 1, distinct)
except AssertionError as e:
failure = e
else:
return
raise failure
def test_gil_held(self):
"""
Test the GIL is held by default, by checking serialized runs
produce deterministic results.
"""
cfunc = jit(f_sig, nopython=True)(f)
self.check_gil_held(cfunc)
def test_gil_released(self):
"""
Test releasing the GIL, by checking parallel runs produce
unpredictable results.
"""
cfunc = jit(f_sig, nopython=True, nogil=True)(f)
self.check_gil_released(cfunc)
def METHOD_NAME(self):
"""
Test the GIL can by released by a lifted loop even though the
surrounding code uses object mode.
"""
cfunc = jit(f_sig, nogil=True)(lifted_f)
self.check_gil_released(cfunc)
def test_gil_released_by_caller(self):
"""
Releasing the GIL in the caller is sufficient to have it
released in a callee.
"""
compiled_f = jit(f_sig, nopython=True)(f)
@jit(f_sig, nopython=True, nogil=True)
def caller(a, i):
compiled_f(a, i)
self.check_gil_released(caller)
def test_gil_released_by_caller_and_callee(self):
"""
Same, but with both caller and callee asking to release the GIL.
"""
compiled_f = jit(f_sig, nopython=True, nogil=True)(f)
@jit(f_sig, nopython=True, nogil=True)
def caller(a, i):
compiled_f(a, i)
self.check_gil_released(caller)
def test_gil_ignored_by_callee(self):
"""
When only the callee asks to release the GIL, it gets ignored.
"""
compiled_f = jit(f_sig, nopython=True, nogil=True)(f)
@jit(f_sig, nopython=True)
def caller(a, i):
compiled_f(a, i)
self.check_gil_held(caller)
def test_object_mode(self):
"""
When the function is compiled in object mode, a warning is
printed out.
"""
with warnings.catch_warnings(record=True) as wlist:
warnings.simplefilter('always', errors.NumbaWarning)
cfunc = jit(f_sig, nogil=True)(object_f)
self.assertTrue(any(w.category is errors.NumbaWarning
and "Code running in object mode won't allow parallel execution" in str(w.message)
for w in wlist), wlist)
# Just check it doesn't crash.
self.run_in_threads(cfunc, 2)
if __name__ == '__main__':
unittest.main() |
jsemit suite node | """Templetor extension to support javascript templates.
During AJAX development, there will be need to generate HTML and update
some part of the DOM. It it clumsy to do that in javascript. Even though
there are some javascript template engines, it often ends up in duplication
because of writing a Python template and a Javascript template for doing
the same thing.
This extension adds a new block `jsdef` to Templetor, which provides a
a template function just like `def` and also generates an equivalent
javascript function.
USAGE::
import jsdef
render = web.template.render("templates/", extensions=[jsdef.extension])
Sample Template::
$def with (page)
<h1>$page.title</h1>
$jsdef render_books(books):
<ul>
$for book in books:
<li><a href="$book.key">$book.title</a></li>
</ul>
<div id="books">
$:render_books(page.books)
</div>
<script type="text/javascript">
function udpate_books(books) {
document.getElementById("books").innerHTML = render_books(books);
}
</script>
For more details, see:
http://github.com/anandology/notebook/tree/master/2010/03/jsdef/
"""
__author__ = "Anand Chitipothu <[email protected]>"
__version__ = "0.3"
"""change notes:
0.1: first release
0.2: python to javascript conversion for "and", "or" and "not" keywords
0.3: Added support for elif.
"""
import json
import web
from web.template import (
Template,
Parser,
LineNode,
SuiteNode,
DefNode,
PythonTokenizer,
# INDENT,
)
INDENT = " "
def extension(parser):
r"""jsdef extension. Adds support for `jsdef` block to template parser.::
>>> t = Template("$jsdef hello(name):\n Hello $name!", extensions=[extension])
>>> print t() #doctest:+NORMALIZE_WHITESPACE
<script type="text/javascript">
function hello(name){
var self = [], loop;
self.push("Hello "); self.push(websafe(name)); self.push("!\n");
return self.join("");
}
</script>
"""
parser.statement_nodes['jsdef'] = JSDefNode
return parser
class JSDefNode(DefNode):
"""Node to represent jsdef block."""
def __init__(self, *a, **kw):
DefNode.__init__(self, *a, **kw)
self.suite.sections.append(JSNode(self))
self.stmt = self.stmt.replace("jsdef", "def")
class JSNode:
def __init__(self, node):
self.node = node
self._count = 0
def emit(self, indent, text_indent=""):
# Code generation logic is changed in version 0.34
if web.__version__ < "0.34":
return indent[4:] + 'yield "", %s\n' % repr(self.jsemit(self.node, ""))
else:
return indent[4:] + 'self.extend(%s)\n' % repr(self.jsemit(self.node, ""))
def jsemit(self, node, indent):
r"""Emit Javascript for given node.::
>>> jsemit = JSNode(None).jsemit
>>> jsemit(web.template.StatementNode("break"), "")
'break;\n'
>>> jsemit(web.template.AssignmentNode("x = 1"), "")
'var x = 1;\n'
"""
name = "jsemit_" + node.__class__.__name__
if f := getattr(self, name, None):
return f(node, indent)
else:
return ""
def METHOD_NAME(self, node, indent):
return "".join(self.jsemit(s, indent) for s in node.sections)
def jsemit_LineNode(self, node, indent):
text = ["self.push(%s);" % self.jsemit(n, "") for n in node.nodes]
return indent + " ".join(text) + "\n"
def jsemit_TextNode(self, node, indent):
return json.dumps(node.value)
def jsemit_ExpressionNode(self, node, indent):
if node.escape:
return "websafe(%s)" % py2js(node.value)
else:
return py2js(node.value)
def jsemit_AssignmentNode(self, node, indent):
return indent + "var " + py2js(node.code) + ";\n"
def jsemit_StatementNode(self, node, indent):
return indent + py2js(node.stmt) + ";\n"
def jsemit_BlockNode(self, node, indent):
text = ""
jsnames = {"elif": "else if"}
for n in ["if", "elif", "else", "for"]:
if node.stmt.startswith(n):
name = n
break
else:
return ""
expr = node.stmt[len(name) :].strip(": ")
expr = expr and "(" + expr + ")"
jsname = jsnames.get(name, name)
text += indent + f"{jsname} {py2js(expr)} {{\n"
text += self.jsemit(node.suite, indent + INDENT)
text += indent + "}\n"
return text
jsemit_IfNode = jsemit_BlockNode
jsemit_ElseNode = jsemit_BlockNode
jsemit_ElifNode = jsemit_BlockNode
def jsemit_ForNode(self, node, indent):
tok = PythonTokenizer(node.stmt)
tok.consume_till('in')
a = node.stmt[: tok.index].strip() # for i in
a = a[len("for") : -len("in")].strip() # strip `for` and `in`
b = node.stmt[tok.index : -1].strip() # rest of for stmt excluding :
b = web.re_compile(r"loop.setup\((.*)\)").match(b).group(1)
text = ""
text += indent + f"foreach({py2js(b)}, loop, function(loop, {a}) {{\n"
text += self.jsemit(node.suite, indent + INDENT)
text += indent + "});\n"
return text
def jsemit_JSDefNode(self, node, indent):
text = ""
text += '<script type="text/javascript"><!--\n'
text += node.stmt.replace("def ", "function ").strip(": ") + "{\n"
text += ' var self = [], loop;\n'
text += self.jsemit(node.suite, indent + INDENT)
text += ' return self.join("");\n'
text += "}\n"
text += "//--></script>\n"
return text
def tokenize(code):
"""Tokenize python code.::
>>> list(tokenize("x + y"))
['x', ' ', '+', ' ', 'y']
"""
end = 0
tok = PythonTokenizer(code)
try:
while True:
x = next(tok)
begin = x.begin[1]
if begin > end:
yield ' ' * (begin - end)
if x.value:
yield x.value
end = x.end[1]
except StopIteration:
pass
def py2js(expr):
"""Converts a python expression to javascript.::
>>> py2js("x + y")
'x + y'
>>> py2js("x and y")
'x && y'
>>> py2js("x or not y")
'x || ! y'
"""
d = {"and": "&&", "or": "||", "not": "!"}
def f(tokens):
for t in tokens:
yield d.get(t, t)
return "".join(f(tokenize(expr)))
def _testrun(code):
parser = extension(web.template.Parser())
root = parser.parse(code)
node = root.suite
jnode = JSNode(node)
return jnode.jsemit(node, "")
def _test():
r"""
>>> t = _testrun
>>> t("$x")
'self.push(websafe(x));\n'
>>> t("$:x")
'self.push(x);\n'
>>> t("$ x = 1")
'var x = 1;\n'
>>> t("$ x = a and b")
'var x = a && b;\n'
>>> t("$if a or not b: $a")
u'if (a || ! b) {\n self.push(websafe(a));\n}\n'
>>> t("$for i in a and a.data or []: $i")
u'foreach(a && a.data || [], loop, function(loop, i) {\n self.push(websafe(i));\n});\n'
"""
if __name__ == "__main__":
import doctest
doctest.testmod() |
test phaseplot set ylim | import os
import shutil
import tempfile
import unittest
import pytest
import yt
from yt.testing import assert_allclose_units, fake_random_ds
from yt.visualization.api import PhasePlot
class TestPhasePlotAPI:
@classmethod
def setup_class(cls):
cls.ds = fake_random_ds(
16, fields=("density", "temperature"), units=("g/cm**3", "K")
)
def get_plot(self):
return PhasePlot(
self.ds, ("gas", "density"), ("gas", "temperature"), ("gas", "mass")
)
@pytest.mark.parametrize("kwargs", [{}, {"color": "b"}])
@pytest.mark.mpl_image_compare
def test_phaseplot_annotate_text(self, kwargs):
p = self.get_plot()
p.annotate_text(1e-4, 1e-2, "Test text annotation", **kwargs)
p.render()
return p.plots["gas", "mass"].figure
@pytest.mark.mpl_image_compare
def test_phaseplot_set_title(self):
p = self.get_plot()
p.set_title(("gas", "mass"), "Test Title")
p.render()
return p.plots["gas", "mass"].figure
@pytest.mark.mpl_image_compare
def test_phaseplot_set_log(self):
p = self.get_plot()
p.set_log(("gas", "mass"), False)
p.render()
return p.plots["gas", "mass"].figure
@pytest.mark.mpl_image_compare
def test_phaseplot_set_unit(self):
p = self.get_plot()
p.set_unit(("gas", "mass"), "Msun")
p.render()
return p.plots["gas", "mass"].figure
@pytest.mark.mpl_image_compare
def test_phaseplot_set_xlim(self):
p = self.get_plot()
p.set_xlim(1e-3, 1e0)
p.render()
return p.plots["gas", "mass"].figure
@pytest.mark.mpl_image_compare
def METHOD_NAME(self):
p = self.get_plot()
p.set_ylim(1e-2, 1e0)
p.render()
return p.plots["gas", "mass"].figure
def test_set_units():
fields = ("density", "temperature")
units = (
"g/cm**3",
"K",
)
ds = fake_random_ds(16, fields=fields, units=units)
sp = ds.sphere("max", (1.0, "Mpc"))
p1 = yt.ProfilePlot(sp, ("index", "radius"), ("gas", "density"))
p2 = yt.PhasePlot(sp, ("gas", "density"), ("gas", "temperature"), ("gas", "mass"))
# make sure we can set the units using the tuple without erroring out
p1.set_unit(("gas", "density"), "Msun/kpc**3")
p2.set_unit(("gas", "temperature"), "R")
def test_set_labels():
ds = fake_random_ds(16)
ad = ds.all_data()
plot = yt.ProfilePlot(
ad,
("index", "radius"),
[("gas", "velocity_x"), ("gas", "density")],
weight_field=None,
)
# make sure we can set the labels without erroring out
plot.set_ylabel("all", "test ylabel")
plot.set_xlabel("test xlabel")
def test_create_from_dataset():
ds = fake_random_ds(16)
plot1 = yt.ProfilePlot(
ds,
("index", "radius"),
[("gas", "velocity_x"), ("gas", "density")],
weight_field=None,
)
plot2 = yt.ProfilePlot(
ds.all_data(),
("index", "radius"),
[("gas", "velocity_x"), ("gas", "density")],
weight_field=None,
)
assert_allclose_units(
plot1.profiles[0][("gas", "density")], plot2.profiles[0][("gas", "density")]
)
assert_allclose_units(
plot1.profiles[0]["velocity_x"], plot2.profiles[0]["velocity_x"]
)
plot1 = yt.PhasePlot(ds, ("gas", "density"), ("gas", "velocity_x"), ("gas", "mass"))
plot2 = yt.PhasePlot(
ds.all_data(), ("gas", "density"), ("gas", "velocity_x"), ("gas", "mass")
)
assert_allclose_units(plot1.profile["mass"], plot2.profile["mass"])
class TestAnnotations(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.tmpdir = tempfile.mkdtemp()
cls.curdir = os.getcwd()
os.chdir(cls.tmpdir)
ds = fake_random_ds(16)
ad = ds.all_data()
cls.fields = [
("gas", "velocity_x"),
("gas", "velocity_y"),
("gas", "velocity_z"),
]
cls.plot = yt.ProfilePlot(
ad, ("index", "radius"), cls.fields, weight_field=None
)
@classmethod
def tearDownClass(cls):
os.chdir(cls.curdir)
shutil.rmtree(cls.tmpdir)
def test_annotations(self):
# make sure we can annotate without erroring out
# annotate the plot with only velocity_x
self.plot.annotate_title("velocity_x plot", self.fields[0])
self.plot.annotate_text(1e-1, 1e1, "Annotated velocity_x")
# annotate the plots with velocity_y and velocity_z with
# the same annotations
self.plot.annotate_title("Velocity Plots (Y or Z)", self.fields[1:])
self.plot.annotate_text(1e-1, 1e1, "Annotated vel_y, vel_z", self.fields[1:])
self.plot.save()
def test_annotations_wrong_fields(self):
from yt.utilities.exceptions import YTFieldNotFound
with self.assertRaises(YTFieldNotFound):
self.plot.annotate_title("velocity_x plot", "wrong_field_name")
with self.assertRaises(YTFieldNotFound):
self.plot.annotate_text(1e-1, 1e1, "Annotated text", "wrong_field_name")
def test_phaseplot_set_log():
fields = ("density", "temperature")
units = (
"g/cm**3",
"K",
)
ds = fake_random_ds(16, fields=fields, units=units)
sp = ds.sphere("max", (1.0, "Mpc"))
p1 = yt.ProfilePlot(sp, ("index", "radius"), ("gas", "density"))
p2 = yt.PhasePlot(sp, ("gas", "density"), ("gas", "temperature"), ("gas", "mass"))
# make sure we can set the log-scaling using the tuple without erroring out
p1.set_log(("gas", "density"), False)
p2.set_log(("gas", "temperature"), False)
assert not p1.y_log["gas", "density"]
assert not p2.y_log
# make sure we can set the log-scaling using a string without erroring out
p1.set_log(("gas", "density"), True)
p2.set_log(("gas", "temperature"), True)
assert p1.y_log["gas", "density"]
assert p2.y_log
# make sure we can set the log-scaling using a field object
p1.set_log(ds.fields.gas.density, False)
p2.set_log(ds.fields.gas.temperature, False)
assert not p1.y_log["gas", "density"]
assert not p2.y_log
def test_phaseplot_showhide_colorbar_axes():
fields = ("density", "temperature")
units = (
"g/cm**3",
"K",
)
ds = fake_random_ds(16, fields=fields, units=units)
ad = ds.all_data()
plot = yt.PhasePlot(ad, ("gas", "density"), ("gas", "temperature"), ("gas", "mass"))
# make sure we can hide colorbar
plot.hide_colorbar()
with tempfile.NamedTemporaryFile(suffix="png") as f1:
plot.save(f1.name)
# make sure we can show colorbar
plot.show_colorbar()
with tempfile.NamedTemporaryFile(suffix="png") as f2:
plot.save(f2.name)
# make sure we can hide axes
plot.hide_axes()
with tempfile.NamedTemporaryFile(suffix="png") as f3:
plot.save(f3.name)
# make sure we can show axes
plot.show_axes()
with tempfile.NamedTemporaryFile(suffix="png") as f4:
plot.save(f4.name) |
create response | from pubnub.endpoints.file_operations.file_based_endpoint import FileOperationEndpoint
from pubnub.crypto import PubNubFileCrypto
from pubnub.enums import HttpMethod, PNOperationType
from pubnub.models.consumer.file import PNSendFileResult
from pubnub.endpoints.file_operations.publish_file_message import PublishFileMessage
from pubnub.endpoints.file_operations.fetch_upload_details import FetchFileUploadS3Data
from pubnub.request_handlers.requests_handler import RequestsRequestHandler
from pubnub.endpoints.mixins import TimeTokenOverrideMixin
class SendFileNative(FileOperationEndpoint, TimeTokenOverrideMixin):
def __init__(self, pubnub):
super(SendFileNative, self).__init__(pubnub)
self._file_name = None
self._pubnub = pubnub
self._file_upload_envelope = None
self._message = None
self._should_store = None
self._ttl = 0
self._meta = None
self._cipher_key = None
self._file_object = None
self._replicate = None
self._ptto = None
def file_object(self, fd):
self._file_object = fd
return self
def build_params_callback(self):
return lambda a: {}
def build_path(self):
return self._file_upload_envelope.result.data["url"]
def encrypt_payload(self):
if self._cipher_key or self._pubnub.config.cipher_key:
try:
payload = self._file_object.read()
except AttributeError:
payload = self._file_object
return PubNubFileCrypto(self._pubnub.config).encrypt(
self._cipher_key or self._pubnub.config.cipher_key,
payload
)
else:
return self._file_object
def build_file_upload_request(self):
file = self.encrypt_payload()
multipart_body = {}
for form_field in self._file_upload_envelope.result.data["form_fields"]:
multipart_body[form_field["key"]] = (None, form_field["value"])
multipart_body["file"] = (self._file_name, file, None)
return multipart_body
def http_method(self):
return HttpMethod.POST
def use_compression(self, compress=True):
self._use_compression = bool(compress)
return self
def is_compressable(self):
return True
def custom_params(self):
return {}
def validate_params(self):
self.validate_subscribe_key()
self.validate_channel()
self.validate_file_object()
self.validate_file_name()
def use_base_path(self):
return False
def non_json_response(self):
return True
def is_auth_required(self):
return False
def should_store(self, should_store):
self._should_store = bool(should_store)
return self
def ttl(self, ttl):
self._ttl = ttl
return self
def meta(self, meta):
self._meta = meta
return self
def message(self, message):
self._message = message
return self
def file_name(self, file_name):
self._file_name = file_name
return self
def cipher_key(self, cipher_key):
self._cipher_key = cipher_key
return self
def METHOD_NAME(self, envelope, data=None):
return PNSendFileResult(envelope, self._file_upload_envelope)
def operation_type(self):
return PNOperationType.PNSendFileAction
def request_headers(self):
return {}
def name(self):
return "Send file to S3"
def sync(self):
self._file_upload_envelope = FetchFileUploadS3Data(self._pubnub).\
channel(self._channel).\
file_name(self._file_name).sync()
response_envelope = super(SendFileNative, self).sync()
publish_file_response = PublishFileMessage(self._pubnub).\
channel(self._channel).\
meta(self._meta).\
message(self._message).\
file_id(response_envelope.result.file_id).\
file_name(response_envelope.result.name).\
should_store(self._should_store).\
ttl(self._ttl).\
replicate(self._replicate).\
ptto(self._ptto).\
cipher_key(self._cipher_key).sync()
response_envelope.result.timestamp = publish_file_response.result.timestamp
return response_envelope
def pn_async(self, callback):
return RequestsRequestHandler(self._pubnub).async_file_based_operation(self.sync, callback, "File Download") |
flatten aggregations | from copy import deepcopy
from enum import Enum
from typing import List, Union
from urllib.parse import urlparse
import pandas as pd
from elasticsearch import Elasticsearch
from pandas.io.json import json_normalize
from pydantic import BaseModel, Field, SecretStr
from toucan_connectors.common import nosql_apply_parameters_to_query
from toucan_connectors.toucan_connector import ToucanConnector, ToucanDataSource
def _is_branch_list(val):
res = False
if isinstance(val, dict):
for k, v in val.items():
if _is_branch_list(v):
res = True
break
elif isinstance(val, list):
res = True
return res
def METHOD_NAME(data, parent=None, neighbours=None):
"""
Read `aggregations` block in data.
Example
Input data:
```
aggregation: {
field1 : {
buckets: [
{key: 'name1', count: 5},
{key: 'name2', count: 10}
]
},
field2: 5,
field3 : {
buckets: [
{key: 'name3', count: 7}
]
},
}
```
Result:
```
[{'field2': 5, 'field1_bucket_key': 'name1', 'field1_bucket_count': 5},
{'field2': 5, 'field1_bucket_key': 'name2', 'field1_bucket_count': 10},
{'field2': 5, 'field3_bucket_key': 'name3', 'field3_bucket_count': 7}]
```
"""
if not neighbours:
neighbours = {}
if isinstance(data, dict):
branch_l = {}
for k, v in deepcopy(data).items():
if _is_branch_list(v):
branch_l[k] = v
data.pop(k)
for k, v in data.items():
new_parent = f'{parent}_{k}' if parent else k
neighbours = METHOD_NAME(v, new_parent, neighbours)
if not branch_l:
return neighbours
else:
res = []
for k, v in branch_l.items():
new_parent = f'{parent}_{k}' if parent else k
if isinstance(v, list): # buckets
new_list = []
for elt in v:
new_elt = METHOD_NAME(elt, new_parent, neighbours)
if isinstance(new_elt, list):
new_list += new_elt
else:
new_list.append(new_elt)
res += new_list
else:
res += METHOD_NAME(v, new_parent, neighbours)
return res
else:
return {**{parent: data}, **neighbours}
def _read_response(response):
if 'aggregations' in response:
res = METHOD_NAME(response['aggregations'])
if isinstance(res, dict):
res = [res]
else:
res = [elt['_source'] for elt in response['hits']['hits']]
return res
class ElasticsearchHost(BaseModel):
url: str
port: int = None
scheme: str = None
username: str = None
password: SecretStr = Field(None, description='Your login password')
headers: dict = None
class SearchMethod(str, Enum):
search = 'search'
msearch = 'msearch'
class ElasticsearchDataSource(ToucanDataSource):
search_method: SearchMethod
index: str = None
body: Union[dict, list]
class ElasticsearchConnector(ToucanConnector):
data_source_model: ElasticsearchDataSource
hosts: List[ElasticsearchHost]
def _retrieve_data(self, data_source: ElasticsearchDataSource) -> pd.DataFrame:
data_source.body = nosql_apply_parameters_to_query(data_source.body, data_source.parameters)
connection_params = []
for host in self.hosts:
parsed_url = urlparse(host.url)
h = {'host': parsed_url.hostname}
if parsed_url.path and parsed_url.path != '/':
h['url_prefix'] = parsed_url.path
if parsed_url.scheme == 'https':
h['port'] = host.port or 443
h['use_ssl'] = True
h['scheme'] = parsed_url.scheme
elif host.port:
h['port'] = host.port
h['scheme'] = parsed_url.scheme
if host.username or host.password:
h['http_auth'] = f'{host.username}:{host.password.get_secret_value()}'
if host.headers:
h['headers'] = host.headers
connection_params.append(h)
esclient = Elasticsearch(connection_params)
response = getattr(esclient, data_source.search_method)(
index=data_source.index, body=data_source.body
)
if data_source.search_method == SearchMethod.msearch:
res = []
# Body alternate index and query `[index, query, index, query...]`
queries = data_source.body[1::2]
for query, data in zip(queries, response['responses']):
res += _read_response(data)
else:
res = _read_response(response)
df = json_normalize(res)
return df |
get learning rate | """Training algorithm track submission functions for LibriSpeech."""
import functools
from typing import Dict, Iterator, List, Tuple
from absl import logging
from flax import jax_utils
import jax
from jax import lax
import jax.numpy as jnp
import numpy as np
import optax
from algorithmic_efficiency import spec
_GRAD_CLIP_EPS = 1e-6
def get_batch_size(workload_name):
# Return the global batch size.
del workload_name
return 256
def METHOD_NAME(step, hyperparams):
warmup_steps = hyperparams.warmup_steps
if step < warmup_steps:
current_lr = (step * hyperparams.base_lr) / warmup_steps
else:
decay_factor = (1 + np.cos(step / hyperparams.training_steps * np.pi)) * 0.5
current_lr = hyperparams.base_lr * decay_factor
return current_lr
def optimizer(hyperparameters: spec.Hyperparameters, num_train_examples: int):
opt_init_fn, opt_update_fn = optax.inject_hyperparams(optax.adamw)(
b1=hyperparameters.beta1,
b2=hyperparameters.beta2,
eps=hyperparameters.epsilon,
weight_decay=hyperparameters.weight_decay,
learning_rate=0.0)
return opt_init_fn, opt_update_fn
def init_optimizer_state(workload: spec.Workload,
model_params: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
rng: spec.RandomState) -> spec.OptimizerState:
del model_state
del rng
params_zeros_like = jax.tree_map(lambda s: jnp.zeros(s.shape_tuple),
workload.param_shapes)
opt_init_fn, opt_update_fn = optimizer(hyperparameters,
workload.num_train_examples)
optimizer_state = opt_init_fn(params_zeros_like)
return jax_utils.replicate(optimizer_state), opt_update_fn
def l2_regularization(params, l2_decay_rank_threshold):
"""Computes the squared l2 norm of the given parameters.
This function will only filter for parameters with
rank >= l2_decay_rank_threshold. So if this threshold is set to 2, then all
1d (and lower) parameter arrays, including all bias and batch norm params,
will be ignored in this computation.
Args:
params: Pytree containing parameters.
l2_decay_rank_threshold: The calculation will only include parameters with
param.ndim >= l2_decay_rank_threshold. Set to 2 to ignore all bias and
batch_norm params in the model.
Returns:
weight_l2: the squared l2 norm of all params matching the threshold.
"""
weight_penalty_params = jax.tree_util.tree_leaves(params)
weight_l2 = sum(
jnp.sum(x**2)
for x in weight_penalty_params
if x.ndim >= l2_decay_rank_threshold)
return weight_l2
@functools.partial(
jax.pmap,
axis_name='batch',
in_axes=(None, None, 0, 0, 0, None, 0, 0, None),
static_broadcasted_argnums=(0, 1))
def pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
rng,
lr):
optimizer_state.hyperparams['learning_rate'] = lr
def _loss_fn(params):
"""loss function used for training."""
(logits, logit_paddings), new_model_state = workload.model_fn(
params,
batch,
model_state,
spec.ForwardPassMode.TRAIN,
rng,
update_batch_norm=True)
loss_dict = workload.loss_fn(batch['targets'], (logits, logit_paddings))
summed_loss = loss_dict['summed']
n_valid_examples = loss_dict['n_valid_examples']
return summed_loss, (n_valid_examples, new_model_state)
grad_fn = jax.value_and_grad(_loss_fn, has_aux=True)
(summed_loss, (n_valid_examples, new_model_state)), grad = grad_fn(
current_param_container)
# Get correct global mean loss and grad.
(summed_loss, n_valid_examples, grad) = lax.psum(
(summed_loss, n_valid_examples, grad), axis_name='batch')
loss = summed_loss / n_valid_examples
grad = jax.tree_map(lambda x: x / n_valid_examples, grad)
grad_norm = jnp.sqrt(l2_regularization(grad, 0))
grad_clip = hyperparameters.grad_clip
grad_scaling_factor = grad_clip / (grad_norm + _GRAD_CLIP_EPS)
grad_scaling_factor = jax.lax.clamp(min=0.0, x=grad_scaling_factor, max=1.0)
grad = jax.tree_map(lambda x: x * grad_scaling_factor, grad)
updates, new_optimizer_state = opt_update_fn(grad, optimizer_state,
current_param_container)
updated_params = optax.apply_updates(current_param_container, updates)
return new_model_state, new_optimizer_state, updated_params, loss, grad_norm
def update_params(workload: spec.Workload,
current_param_container: spec.ParameterContainer,
current_params_types: spec.ParameterTypeTree,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
batch: Dict[str, spec.Tensor],
loss_type: spec.LossType,
optimizer_state: spec.OptimizerState,
eval_results: List[Tuple[int, float]],
global_step: int,
rng: spec.RandomState) -> spec.UpdateReturn:
"""Return (updated_optimizer_state, updated_params)."""
del current_params_types
del eval_results
del loss_type
lr = METHOD_NAME(global_step, hyperparameters)
optimizer_state, opt_update_fn = optimizer_state
per_device_rngs = jax.random.split(rng, jax.local_device_count())
outputs = pmapped_train_step(workload,
opt_update_fn,
model_state,
optimizer_state,
current_param_container,
hyperparameters,
batch,
per_device_rngs,
lr)
new_model_state, new_optimizer_state, new_params, loss, grad_norm = outputs
if global_step <= 1000 or global_step % 100 == 0:
logging.info('%d) loss = %0.3f, grad_norm = %0.3f lr = %0.6f',
global_step,
loss.mean(),
grad_norm.mean(),
lr)
if workload.summary_writer is not None:
workload.summary_writer.scalar('train_step_ctc_loss',
loss.mean(),
global_step)
workload.summary_writer.scalar('grad_norm', grad_norm.mean(), global_step)
workload.summary_writer.scalar('learning_rate', lr, global_step)
return (new_optimizer_state, opt_update_fn), new_params, new_model_state
# Not allowed to update the model parameters, hyperparameters, global step, or
# optimzier state.
def data_selection(workload: spec.Workload,
input_queue: Iterator[Dict[str, spec.Tensor]],
optimizer_state: spec.OptimizerState,
current_param_container: spec.ParameterContainer,
model_state: spec.ModelAuxiliaryState,
hyperparameters: spec.Hyperparameters,
global_step: int,
rng: spec.RandomState) -> Dict[str, spec.Tensor]:
"""Select data from the infinitely repeating, pre-shuffled input queue.
Each element of the queue is a batch of training examples and labels.
"""
del optimizer_state
del current_param_container
del global_step
del rng
del hyperparameters
del workload
return next(input_queue) |
test pre phase | import pytest
import rules
from adhocracy4.projects.enums import Access
from adhocracy4.test.helpers import freeze_phase
from adhocracy4.test.helpers import freeze_post_phase
from adhocracy4.test.helpers import freeze_pre_phase
from adhocracy4.test.helpers import setup_phase
from adhocracy4.test.helpers import setup_users
from meinberlin.apps.topicprio import phases
from meinberlin.test.helpers import setup_group_members
perm_name = "meinberlin_topicprio.rate_topic"
def test_perm_exists():
assert rules.perm_exists(perm_name)
@pytest.mark.django_db
def METHOD_NAME(
phase_factory, topic_factory, user, admin, user_factory, group_factory
):
phase, _, project, item = setup_phase(
phase_factory, topic_factory, phases.PrioritizePhase
)
anonymous, moderator, initiator = setup_users(project)
creator = item.creator
(
project,
group_member_in_org,
group_member_in_pro,
group_member_out,
) = setup_group_members(project, group_factory, user_factory)
assert project.is_public
with freeze_pre_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert not rules.has_perm(perm_name, user, item)
assert not rules.has_perm(perm_name, creator, item)
assert not rules.has_perm(perm_name, group_member_out, item)
assert not rules.has_perm(perm_name, group_member_in_org, item)
assert rules.has_perm(perm_name, group_member_in_pro, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
assert rules.has_perm(perm_name, admin, item)
@pytest.mark.django_db
def test_phase_active(
phase_factory, topic_factory, user, admin, user_factory, group_factory
):
phase, _, project, item = setup_phase(
phase_factory, topic_factory, phases.PrioritizePhase
)
anonymous, moderator, initiator = setup_users(project)
creator = item.creator
(
project,
group_member_in_org,
group_member_in_pro,
group_member_out,
) = setup_group_members(project, group_factory, user_factory)
assert project.is_public
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert rules.has_perm(perm_name, user, item)
assert rules.has_perm(perm_name, creator, item)
assert rules.has_perm(perm_name, group_member_out, item)
assert rules.has_perm(perm_name, group_member_in_org, item)
assert rules.has_perm(perm_name, group_member_in_pro, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
assert rules.has_perm(perm_name, admin, item)
@pytest.mark.django_db
def test_phase_active_project_private(
phase_factory, topic_factory, user, admin, user_factory, group_factory
):
phase, _, project, item = setup_phase(
phase_factory,
topic_factory,
phases.PrioritizePhase,
module__project__access=Access.PRIVATE,
)
anonymous, moderator, initiator = setup_users(project)
creator = item.creator
(
project,
group_member_in_org,
group_member_in_pro,
group_member_out,
) = setup_group_members(project, group_factory, user_factory)
participant = user_factory()
project.participants.add(participant)
assert project.access == Access.PRIVATE
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert not rules.has_perm(perm_name, user, item)
assert rules.has_perm(perm_name, participant, item)
assert not rules.has_perm(perm_name, creator, item)
assert not rules.has_perm(perm_name, group_member_out, item)
assert not rules.has_perm(perm_name, group_member_in_org, item)
assert rules.has_perm(perm_name, group_member_in_pro, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
assert rules.has_perm(perm_name, admin, item)
@pytest.mark.django_db
def test_phase_active_project_semipublic(
phase_factory, topic_factory, user, admin, user_factory, group_factory
):
phase, _, project, item = setup_phase(
phase_factory,
topic_factory,
phases.PrioritizePhase,
module__project__access=Access.SEMIPUBLIC,
)
anonymous, moderator, initiator = setup_users(project)
creator = item.creator
(
project,
group_member_in_org,
group_member_in_pro,
group_member_out,
) = setup_group_members(project, group_factory, user_factory)
participant = user_factory()
project.participants.add(participant)
assert project.access == Access.SEMIPUBLIC
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert not rules.has_perm(perm_name, user, item)
assert rules.has_perm(perm_name, participant, item)
assert not rules.has_perm(perm_name, creator, item)
assert not rules.has_perm(perm_name, group_member_out, item)
assert not rules.has_perm(perm_name, group_member_in_org, item)
assert rules.has_perm(perm_name, group_member_in_pro, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
assert rules.has_perm(perm_name, admin, item)
@pytest.mark.django_db
def test_phase_active_project_draft(
phase_factory, topic_factory, user, admin, user_factory, group_factory
):
phase, _, project, item = setup_phase(
phase_factory,
topic_factory,
phases.PrioritizePhase,
module__project__is_draft=True,
)
anonymous, moderator, initiator = setup_users(project)
creator = item.creator
(
project,
group_member_in_org,
group_member_in_pro,
group_member_out,
) = setup_group_members(project, group_factory, user_factory)
assert project.is_draft
with freeze_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert not rules.has_perm(perm_name, user, item)
assert not rules.has_perm(perm_name, creator, item)
assert not rules.has_perm(perm_name, group_member_out, item)
assert not rules.has_perm(perm_name, group_member_in_org, item)
assert rules.has_perm(perm_name, group_member_in_pro, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
assert rules.has_perm(perm_name, admin, item)
@pytest.mark.django_db
def test_post_phase_project_archived(
phase_factory, topic_factory, user, admin, user_factory, group_factory
):
phase, _, project, item = setup_phase(
phase_factory,
topic_factory,
phases.PrioritizePhase,
module__project__is_archived=True,
)
anonymous, moderator, initiator = setup_users(project)
creator = item.creator
(
project,
group_member_in_org,
group_member_in_pro,
group_member_out,
) = setup_group_members(project, group_factory, user_factory)
assert project.is_archived
with freeze_post_phase(phase):
assert not rules.has_perm(perm_name, anonymous, item)
assert not rules.has_perm(perm_name, user, item)
assert not rules.has_perm(perm_name, creator, item)
assert not rules.has_perm(perm_name, group_member_out, item)
assert not rules.has_perm(perm_name, group_member_in_org, item)
assert rules.has_perm(perm_name, group_member_in_pro, item)
assert rules.has_perm(perm_name, moderator, item)
assert rules.has_perm(perm_name, initiator, item)
assert rules.has_perm(perm_name, admin, item) |
test get | from typing import List
import pytest
@pytest.mark.apitest
class TestFind:
async def test_find(self, fake2, snapshot, spawn_client):
"""
Test that a ``GET /labels`` return a complete list of labels.
"""
client = await spawn_client(authorize=True, administrator=True)
label_1 = await fake2.labels.create()
label_2 = await fake2.labels.create()
await client.db.samples.insert_many(
[
{
"_id": "foo",
"name": "Foo",
"labels": [
label_1.id,
],
},
{"_id": "bar", "name": "Bar", "labels": [label_1.id, label_2.id]},
{"_id": "baz", "name": "Baz", "labels": [label_2.id]},
],
session=None,
)
resp = await client.get("/labels")
assert resp.status == 200
assert await resp.json() == snapshot
async def test_find_by_name(self, fake2, snapshot, spawn_client):
"""
Test that a ``GET /labels`` with a `find` query returns a particular label. Also test for partial matches.
"""
client = await spawn_client(authorize=True, administrator=True)
label = await fake2.labels.create()
term = label.name[:2].lower()
resp = await client.get(f"/labels?find={term}")
assert resp.status == 200
assert await resp.json() == snapshot
resp = await client.get("/labels?find=Question")
assert resp.status == 200
assert await resp.json() == snapshot
@pytest.mark.apitest
@pytest.mark.parametrize("status", [200, 404])
async def METHOD_NAME(status, fake2, spawn_client, snapshot):
"""
Test that a ``GET /labels/:label_id`` return the correct label document.
"""
client = await spawn_client(authorize=True, administrator=True)
label_1 = await fake2.labels.create()
label_2 = await fake2.labels.create()
await client.db.samples.insert_many(
[
{"_id": "foo", "name": "Foo", "labels": [label_1.id]},
{"_id": "bar", "name": "Bar", "labels": [label_2.id]},
{"_id": "baz", "name": "Baz", "labels": [label_1.id]},
],
session=None,
)
resp = await client.get(f"/labels/{22 if status == 404 else label_1.id}")
assert resp.status == status
assert await resp.json() == snapshot
@pytest.mark.apitest
@pytest.mark.parametrize("error", [None, "400_exists", "400_color"])
async def test_create(error, fake2, spawn_client, test_random_alphanumeric, resp_is):
"""
Test that a label can be added to the database at ``POST /labels``.
"""
client = await spawn_client(authorize=True, administrator=True)
label = await fake2.labels.create()
data = {"name": "Bug", "color": "#a83432", "description": "This is a bug"}
if error == "400_exists":
data["name"] = label.name
if error == "400_color":
data["color"] = "#1234567"
resp = await client.post("/labels", data)
if error == "400_exists":
await resp_is.bad_request(resp, "Label name already exists")
return
if error == "400_color":
assert resp.status == 400
return
assert resp.status == 201
assert await resp.json() == {
"id": 2,
"name": "Bug",
"color": "#A83432",
"description": "This is a bug",
"count": 0,
}
@pytest.mark.apitest
@pytest.mark.parametrize("error", [None, "404", "400_name", "400_color", "400_null"])
async def test_edit(error, fake2, spawn_client, resp_is, snapshot):
"""
Test that a label can be updated at ``PATCH /labels/:label_id``.
"""
client = await spawn_client(authorize=True, administrator=True)
label_1 = await fake2.labels.create()
label_2 = await fake2.labels.create()
await client.db.samples.insert_many(
[
{"_id": "foo", "name": "Foo", "labels": [label_1.id]},
{"_id": "bar", "name": "Bar", "labels": [label_2.id]},
{"_id": "baz", "name": "Baz", "labels": [label_1.id]},
],
session=None,
)
data = {}
if error is None:
data["name"] = "Summer"
if error == "400_color":
data["color"] = "#123bzp1"
if error == "400_name":
# Name already exists.
data["name"] = label_2.name
if error == "400_null":
data["name"] = None
resp = await client.patch(
f"/labels/{5 if error == '404' else label_1.id}", data=data
)
if error == "404":
await resp_is.not_found(resp)
return
if error == "400_color":
assert resp.status == 400
return
if error == "400_name":
await resp_is.bad_request(resp, "Label name already exists")
return
if error == "400_null":
assert resp.status == 400
return
assert resp.status == 200
assert await resp.json() == snapshot
@pytest.mark.apitest
@pytest.mark.parametrize("status", [204, 404])
async def test_remove(
status,
fake2,
spawn_client,
mock_samples: List[dict],
snapshot,
):
"""
Test that a label can be deleted to the database at ``DELETE /labels/:label_id``.
Test that samples are updated when a label is deleted.
"""
client = await spawn_client(authorize=True, administrator=True)
label_1 = await fake2.labels.create()
label_2 = await fake2.labels.create()
label_3 = await fake2.labels.create()
await client.db.subtraction.insert_many(
[{"_id": "foo", "name": "Foo"}, {"_id": "bar", "name": "Bar"}], session=None
)
mock_samples[0].update({"labels": [label_1.id, label_3.id]})
mock_samples[1].update({"labels": [label_2.id, label_3.id]})
mock_samples[2].update({"labels": [label_1.id]})
await client.db.samples.insert_many(mock_samples, session=None)
resp = await client.delete(f"/labels/{22 if status == 404 else label_1.id}")
assert resp.status == status
assert await resp.json() == snapshot
if status == 204:
label_ids_in_samples = await client.db.samples.distinct("labels")
assert label_1.id not in label_ids_in_samples
assert label_2.id in label_ids_in_samples
assert label_3.id in label_ids_in_samples
@pytest.mark.apitest
@pytest.mark.parametrize("value", ["valid_hex_color", "invalid_hex_color"])
async def test_is_valid_hex_color(value, spawn_client, resp_is):
"""
Tests that when an invalid hex color is used, validators.is_valid_hex_color raises a 422 error.
"""
client = await spawn_client(authorize=True)
data = {
"name": "test",
"color": "#fc5203" if value == "valid_hex_color" else "foo",
"description": "test",
}
resp = await client.patch("/labels/00", data=data)
if value == "valid_hex_color":
await resp_is.not_found(resp)
else:
assert resp.status == 400
assert await resp.json() == [
{
"loc": ["color"],
"msg": "The format of the color code is invalid",
"type": "value_error",
"in": "body",
}
] |
pyinit | #-------------------------------------------------------------------------------
# GSPHHydroBase
#-------------------------------------------------------------------------------
from PYB11Generator import *
from GenericRiemannHydro import *
from RestartMethods import *
@PYB11template("Dimension")
@PYB11module("SpheralGSPH")
class MFMHydroBase(GenericRiemannHydro):
PYB11typedefs = """
typedef typename %(Dimension)s::Scalar Scalar;
typedef typename %(Dimension)s::Vector Vector;
typedef typename %(Dimension)s::Tensor Tensor;
typedef typename %(Dimension)s::SymTensor SymTensor;
typedef typename Physics<%(Dimension)s>::TimeStepType TimeStepType;
"""
def METHOD_NAME(smoothingScaleMethod = "const SmoothingScaleBase<%(Dimension)s>&",
dataBase = "DataBase<%(Dimension)s>&",
riemannSolver = "RiemannSolverBase<%(Dimension)s>&",
W = "const TableKernel<%(Dimension)s>&",
epsDiffusionCoeff = "const Scalar",
cfl = "const double",
useVelocityMagnitudeForDt = "const bool",
compatibleEnergyEvolution = "const bool",
evolveTotalEnergy = "const bool",
XSPH = "const bool",
correctVelocityGradient = "const bool",
gradType = "const GradientType",
densityUpdate = "const MassDensityType",
HUpdate = "const HEvolutionType",
epsTensile = "const double",
nTensile = "const double",
xmin = "const Vector&",
xmax = "const Vector&"):
"GSPHHydroBase constructor"
#...........................................................................
# Virtual methods
@PYB11virtual
def initializeProblemStartup(dataBase = "DataBase<%(Dimension)s>&"):
"Tasks we do once on problem startup."
return "void"
@PYB11virtual
def registerState(dataBase = "DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&"):
"Register the state Hydro expects to use and evolve."
return "void"
@PYB11virtual
def registerDerivatives(dataBase = "DataBase<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Register the derivatives/change fields for updating state."
return "void"
@PYB11virtual
def preStepInitialize(self,
dataBase = "const DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Optional hook to be called at the beginning of a time step."
return "void"
@PYB11virtual
def initialize(time = "const Scalar",
dt = "const Scalar",
dataBase = "const DataBase<%(Dimension)s>&",
state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Initialize the Hydro before we start a derivative evaluation."
return "void"
@PYB11virtual
@PYB11const
def evaluateDerivatives(time = "const Scalar",
dt = "const Scalar",
dataBase = "const DataBase<%(Dimension)s>&",
state = "const State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"""Evaluate the derivatives for the principle hydro
mass density, velocity, and specific thermal energy."""
return "void"
@PYB11virtual
@PYB11const
def finalizeDerivatives(time = "const Scalar",
dt = "const Scalar",
dataBase = "const DataBase<%(Dimension)s>&",
state = "const State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Finalize the derivatives."
return "void"
@PYB11virtual
def applyGhostBoundaries(state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Apply boundary conditions to the physics specific fields."
return "void"
@PYB11virtual
def enforceBoundaries(state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&"):
"Enforce boundary conditions for the physics specific fields."
return "void"
DvolumeDt = PYB11property("const FieldList<%(Dimension)s, Scalar>&", "DvolumeDt", returnpolicy="reference_internal")
#-------------------------------------------------------------------------------
# Inject methods
#-------------------------------------------------------------------------------
PYB11inject(RestartMethods, MFMHydroBase) |
test pickle named sharding | # Copyright 2021 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for interoperability between JAX and pickling libraries."""
import pickle
import unittest
from absl.testing import absltest
from absl.testing import parameterized
try:
import cloudpickle
except ImportError:
cloudpickle = None
import jax
from jax import numpy as jnp
from jax import config
from jax.interpreters import pxla
from jax._src import test_util as jtu
from jax._src.lib import xla_client as xc
import numpy as np
config.parse_flags_with_absl()
def _get_device_by_id(device_id: int) -> xc.Device:
for device in jax.devices():
if device.id == device_id:
return device
raise ValueError(f'Device {device_id} was not found')
xc.Device.__reduce__ = lambda d: (_get_device_by_id, (d.id,))
if cloudpickle is not None:
def _reduce_mesh(mesh):
# Avoid including mesh._hash in the serialized bytes for Mesh. Without this
# the Mesh would be different among the workers.
return jax.sharding.Mesh, (mesh.devices, mesh.axis_names)
cloudpickle.CloudPickler.dispatch_table[jax.sharding.Mesh] = _reduce_mesh
class CloudpickleTest(jtu.JaxTestCase):
@unittest.skipIf(cloudpickle is None, "Requires cloudpickle")
def testPickleOfJittedFunctions(self):
@jax.jit
def f(x, y):
return x * y
@jax.jit
def g(z):
return f(z, z + 77) # noqa: F821
expected = g(32)
s = cloudpickle.dumps(g)
del f, g
g_unpickled = pickle.loads(s)
actual = g_unpickled(32)
self.assertEqual(expected, actual)
@unittest.skipIf(cloudpickle is None, "Requires cloudpickle")
def testPickleOfPmappedFunctions(self):
@jax.pmap
def f(x, y):
return x * y
@jax.pmap
def g(z):
return f(z, z + 77) # noqa: F821
expected = g(jnp.asarray([[32]]))
s = cloudpickle.dumps(g)
del f, g
g_unpickled = pickle.loads(s)
actual = g_unpickled(jnp.asarray([[32]]))
self.assertEqual(expected, actual)
class PickleTest(jtu.JaxTestCase):
def testPickleOfArray(self):
x = jnp.arange(10.0)
s = pickle.dumps(x)
y = pickle.loads(s)
self.assertArraysEqual(x, y)
self.assertIsInstance(y, type(x))
self.assertEqual(x.aval, y.aval)
def testPickleOfArrayWeakType(self):
x = jnp.array(4.0)
self.assertEqual(x.aval.weak_type, True)
s = pickle.dumps(x)
y = pickle.loads(s)
self.assertArraysEqual(x, y)
self.assertIsInstance(y, type(x))
self.assertEqual(x.aval, y.aval)
@jtu.sample_product(prng_name=['threefry2x32', 'rbg', 'unsafe_rbg'])
def testPickleOfKeyArray(self, prng_name):
with jax.default_prng_impl(prng_name):
k1 = jax.random.PRNGKey(72)
s = pickle.dumps(k1)
k2 = pickle.loads(s)
self.assertEqual(k1.dtype, k2.dtype)
with jax.legacy_prng_key('allow'):
self.assertArraysEqual(jax.random.key_data(k1),
jax.random.key_data(k2))
@parameterized.parameters(
(jax.sharding.PartitionSpec(),),
(jax.sharding.PartitionSpec(None),),
(jax.sharding.PartitionSpec('x', None),),
(jax.sharding.PartitionSpec(None, 'y'),),
(jax.sharding.PartitionSpec('x', 'y'),),
(jax.sharding.PartitionSpec(('x', 'y'),),),
)
def testPickleOfPartitionSpecs(self, partition_spec):
restored_partition_spec = pickle.loads(pickle.dumps(partition_spec))
self.assertIsInstance(restored_partition_spec, jax.sharding.PartitionSpec)
self.assertTupleEqual(partition_spec, restored_partition_spec)
def testPickleX64(self):
with jax.experimental.enable_x64():
x = jnp.array(4.0, dtype='float64')
s = pickle.dumps(x)
with jax.experimental.disable_x64():
y = pickle.loads(s)
self.assertEqual(x.dtype, jnp.float64)
self.assertArraysEqual(x, y, check_dtypes=False)
self.assertEqual(y.dtype, jnp.float32)
self.assertEqual(y.aval.dtype, jnp.float32)
self.assertIsInstance(y, type(x))
def testPickleTracerError(self):
with self.assertRaises(jax.errors.ConcretizationTypeError):
jax.jit(pickle.dumps)(0)
def testPickleSharding(self):
sharding = pxla.ShardingSpec((pxla.NoSharding(), pxla.Chunked(
(2, 2)), pxla.Unstacked(3)), (pxla.ShardedAxis(0), pxla.ShardedAxis(1),
pxla.ShardedAxis(2), pxla.Replicated(4)))
self.assertEqual(pickle.loads(pickle.dumps(sharding)), sharding)
def testPickleOpSharding(self):
sharding = pxla.ShardingSpec((pxla.NoSharding(), pxla.Chunked((2, 2))),
(pxla.ShardedAxis(0), pxla.ShardedAxis(1)))
op_sharding = sharding.sharding_proto().to_proto()
self.assertTrue(
xc.HloSharding.from_proto(pickle.loads(pickle.dumps(op_sharding))),
xc.HloSharding.from_proto(op_sharding))
def test_pickle_single_device_sharding(self):
s = jax.sharding.SingleDeviceSharding(jax.devices()[0])
self.assertEqual(s, pickle.loads(pickle.dumps(s)))
def test_pickle_pmap_sharding(self):
ss = pxla.ShardingSpec(
sharding=(pxla.Unstacked(8),),
mesh_mapping=(pxla.ShardedAxis(0),))
s = jax.sharding.PmapSharding(jax.devices(), ss)
self.assertEqual(s, pickle.loads(pickle.dumps(s)))
def test_pickle_gspmd_sharding(self):
s = jax.sharding.GSPMDSharding.get_replicated(jax.devices())
self.assertEqual(s, pickle.loads(pickle.dumps(s)))
@unittest.skipIf(cloudpickle is None, "Requires cloudpickle")
def METHOD_NAME(self):
s = jax.sharding.NamedSharding(
mesh=jax.sharding.Mesh(np.array(jax.devices()), 'd'),
spec=jax.sharding.PartitionSpec('d'))
self.assertEqual(s, pickle.loads(pickle.dumps(s)))
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader()) |
dispatch msc | #!/usr/bin/python
# -*- coding: utf-8; -*-
#
# (c) 2012 Mandriva, http://www.mandriva.com/
#
# This file is part of Pulse 2, http://pulse2.mandriva.org
#
# Pulse 2 is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Pulse 2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pulse 2; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Anticipated executing of scheduled tasks on scheduler.
"""
import logging
from time import sleep
from pulse2.inventoryserver.utils import InventoryUtils, MMCProxy
logger = logging.getLogger()
class AttemptToScheduler(object):
"""
Trigger to early executions of scheduled attempts.
This engine is called when an inventory is received.
"""
# First delay after the inventory reception
# TODO - move the delays in a .ini file ?
FIRST_DELAY = 60
BETWEEN_TASKS_DELAY = 1
def __init__(self, xml_content, uuid):
"""
@param uuid: Host UUID
@type uuid: string
"""
self.xml = xml_content
self.uuid = uuid
mmc = MMCProxy()
if not mmc.failure:
self.proxy = mmc.proxy
self.check_target()
else:
logger.warn("<Inventory2Scheduler> Unable to contact MMC Agent using XMLRPC (check host/port and credentials)")
logger.warn("<Inventory2Scheduler> Scheduler actions aborted")
logger.info("<Inventory2Scheduler> Scheduler actions finished")
def check_target(self):
if InventoryUtils.is_coming_from_pxe(self.xml):
logger.info("<Inventory2Scheduler> Ignoring inventory for %s received (Minimal PXE inventory)" % self.uuid)
return
if self.proxy.msc.is_pull_target(self.uuid):
logger.info("<Inventory2Scheduler> Ignoring inventory for %s received (Client is in Pull mode)" % self.uuid)
return
logger.info("<Inventory2Scheduler> Valid inventory for %s received" % self.uuid)
self.METHOD_NAME()
def METHOD_NAME(self):
"""
Get a filtered list of scheduled tasks and executing each of them.
"""
try:
tasks = self.proxy.msc.checkLightPullCommands(self.uuid)
except Exception, e:
logger.exception("<Inventory2Scheduler> Unable to start Light Pull, error was: %s" % str(e))
return False
#if tasks == False:
if len(tasks) == 0:
logger.debug("<Inventory2Scheduler> Light Pull: No deployments scheduled, skipping")
return
else:
# execute all commands on host :
total = len(tasks)
logger.info("<Inventory2Scheduler> Light Pull: %d deployments to start" % total)
success = self.start_all_tasks_on_host(tasks)
if not success:
return False
def start_all_tasks_on_host(self, tasks):
"""
Listing of all the commands to execute, including the delays
before and between the executions.
@param tasks: list of ids (coh) command_on_host.id
@type tasks: list
@return: bool
"""
logger.info("<Inventory2Scheduler> Light Pull: Waiting %d seconds before awaking deployments" % self.FIRST_DELAY)
sleep(self.FIRST_DELAY)
for id in tasks:
try:
self.proxy.msc.start_command_on_host(id)
except Exception, e:
logger.exception("<Inventory2Scheduler> Light Pull: Unable to start command %d on host %s, error was: %s" % (id, self.uuid, str(e)))
return False
logger.info("<Inventory2Scheduler> Light Pull: Task %d on host %s successfully re-queued)" % (int(id), self.uuid))
sleep(self.BETWEEN_TASKS_DELAY)
return True |
init state | """ScorerInterface implementation for CTC."""
import numpy as np
import torch
from espnet.nets.ctc_prefix_score import CTCPrefixScore, CTCPrefixScoreTH
from espnet.nets.scorer_interface import BatchPartialScorerInterface
class CTCPrefixScorer(BatchPartialScorerInterface):
"""Decoder interface wrapper for CTCPrefixScore."""
def __init__(self, ctc: torch.nn.Module, eos: int):
"""Initialize class.
Args:
ctc (torch.nn.Module): The CTC implementation.
For example, :class:`espnet.nets.pytorch_backend.ctc.CTC`
eos (int): The end-of-sequence id.
"""
self.ctc = ctc
self.eos = eos
self.impl = None
def METHOD_NAME(self, x: torch.Tensor):
"""Get an initial state for decoding.
Args:
x (torch.Tensor): The encoded feature tensor
Returns: initial state
"""
logp = self.ctc.log_softmax(x.unsqueeze(0)).detach().squeeze(0).cpu().numpy()
# TODO(karita): use CTCPrefixScoreTH
self.impl = CTCPrefixScore(logp, 0, self.eos, np)
return 0, self.impl.initial_state()
def select_state(self, state, i, new_id=None):
"""Select state with relative ids in the main beam search.
Args:
state: Decoder state for prefix tokens
i (int): Index to select a state in the main beam search
new_id (int): New label id to select a state if necessary
Returns:
state: pruned state
"""
if type(state) == tuple:
if len(state) == 2: # for CTCPrefixScore
sc, st = state
return sc[i], st[i]
else: # for CTCPrefixScoreTH (need new_id > 0)
r, log_psi, f_min, f_max, scoring_idmap = state
s = log_psi[i, new_id].expand(log_psi.size(1))
if scoring_idmap is not None:
return r[:, :, i, scoring_idmap[i, new_id]], s, f_min, f_max
else:
return r[:, :, i, new_id], s, f_min, f_max
return None if state is None else state[i]
def score_partial(self, y, ids, state, x):
"""Score new token.
Args:
y (torch.Tensor): 1D prefix token
next_tokens (torch.Tensor): torch.int64 next token to score
state: decoder state for prefix tokens
x (torch.Tensor): 2D encoder feature that generates ys
Returns:
tuple[torch.Tensor, Any]:
Tuple of a score tensor for y that has a shape `(len(next_tokens),)`
and next state for ys
"""
prev_score, state = state
presub_score, new_st = self.impl(y.cpu(), ids.cpu(), state)
tscore = torch.as_tensor(
presub_score - prev_score, device=x.device, dtype=x.dtype
)
return tscore, (presub_score, new_st)
def batch_init_state(self, x: torch.Tensor):
"""Get an initial state for decoding.
Args:
x (torch.Tensor): The encoded feature tensor
Returns: initial state
"""
logp = self.ctc.log_softmax(x.unsqueeze(0)) # assuming batch_size = 1
xlen = torch.tensor([logp.size(1)])
self.impl = CTCPrefixScoreTH(logp, xlen, 0, self.eos)
return None
def batch_score_partial(self, y, ids, state, x):
"""Score new token.
Args:
y (torch.Tensor): 1D prefix token
ids (torch.Tensor): torch.int64 next token to score
state: decoder state for prefix tokens
x (torch.Tensor): 2D encoder feature that generates ys
Returns:
tuple[torch.Tensor, Any]:
Tuple of a score tensor for y that has a shape `(len(next_tokens),)`
and next state for ys
"""
batch_state = (
(
torch.stack([s[0] for s in state], dim=2),
torch.stack([s[1] for s in state]),
state[0][2],
state[0][3],
)
if state[0] is not None
else None
)
return self.impl(y, batch_state, ids)
def extend_prob(self, x: torch.Tensor):
"""Extend probs for decoding.
This extension is for streaming decoding
as in Eq (14) in https://arxiv.org/abs/2006.14941
Args:
x (torch.Tensor): The encoded feature tensor
"""
logp = self.ctc.log_softmax(x.unsqueeze(0))
self.impl.extend_prob(logp)
def extend_state(self, state):
"""Extend state for decoding.
This extension is for streaming decoding
as in Eq (14) in https://arxiv.org/abs/2006.14941
Args:
state: The states of hyps
Returns: exteded state
"""
new_state = []
for s in state:
new_state.append(self.impl.extend_state(s))
return new_state |
connect | import pytest
import datetime as dt
from typing import Any, Dict
import psycopg
from psycopg.conninfo import conninfo_to_dict
from . import dbapi20
from . import dbapi20_tpc
@pytest.fixture(scope="class")
def with_dsn(request, session_dsn):
request.cls.connect_args = (session_dsn,)
@pytest.mark.usefixtures("with_dsn")
class PsycopgTests(dbapi20.DatabaseAPI20Test):
driver = psycopg
# connect_args = () # set by the fixture
connect_kw_args: Dict[str, Any] = {}
def test_nextset(self):
# tested elsewhere
pass
def test_setoutputsize(self):
# no-op
pass
@pytest.mark.usefixtures("tpc")
@pytest.mark.usefixtures("with_dsn")
class PsycopgTPCTests(dbapi20_tpc.TwoPhaseCommitTests):
driver = psycopg
connect_args = () # set by the fixture
def METHOD_NAME(self):
return psycopg.METHOD_NAME(*self.connect_args)
# Shut up warnings
PsycopgTests.failUnless = PsycopgTests.assertTrue
PsycopgTPCTests.assertEquals = PsycopgTPCTests.assertEqual
@pytest.mark.parametrize(
"typename, singleton",
[
("bytea", "BINARY"),
("date", "DATETIME"),
("timestamp without time zone", "DATETIME"),
("timestamp with time zone", "DATETIME"),
("time without time zone", "DATETIME"),
("time with time zone", "DATETIME"),
("interval", "DATETIME"),
("integer", "NUMBER"),
("smallint", "NUMBER"),
("bigint", "NUMBER"),
("real", "NUMBER"),
("double precision", "NUMBER"),
("numeric", "NUMBER"),
("decimal", "NUMBER"),
("oid", "ROWID"),
("varchar", "STRING"),
("char", "STRING"),
("text", "STRING"),
],
)
def test_singletons(conn, typename, singleton):
singleton = getattr(psycopg, singleton)
cur = conn.cursor()
cur.execute(f"select null::{typename}")
oid = cur.description[0].type_code
assert singleton == oid
assert oid == singleton
assert singleton != oid + 10000
assert oid + 10000 != singleton
@pytest.mark.parametrize(
"ticks, want",
[
(0, "1970-01-01T00:00:00.000000+0000"),
(1273173119.99992, "2010-05-06T14:11:59.999920-0500"),
],
)
def test_timestamp_from_ticks(ticks, want):
s = psycopg.TimestampFromTicks(ticks)
want = dt.datetime.strptime(want, "%Y-%m-%dT%H:%M:%S.%f%z")
assert s == want
@pytest.mark.parametrize(
"ticks, want",
[
(0, "1970-01-01"),
# Returned date is local
(1273173119.99992, ["2010-05-06", "2010-05-07"]),
],
)
def test_date_from_ticks(ticks, want):
s = psycopg.DateFromTicks(ticks)
if isinstance(want, str):
want = [want]
want = [dt.datetime.strptime(w, "%Y-%m-%d").date() for w in want]
assert s in want
@pytest.mark.parametrize(
"ticks, want",
[(0, "00:00:00.000000"), (1273173119.99992, "00:11:59.999920")],
)
def test_time_from_ticks(ticks, want):
s = psycopg.TimeFromTicks(ticks)
want = dt.datetime.strptime(want, "%H:%M:%S.%f").time()
assert s.replace(hour=0) == want
@pytest.mark.parametrize(
"args, kwargs, want",
[
((), {}, ""),
(("",), {}, ""),
(("host=foo user=bar",), {}, "host=foo user=bar"),
(("host=foo",), {"user": "baz"}, "host=foo user=baz"),
(
("host=foo port=5432",),
{"host": "qux", "user": "joe"},
"host=qux user=joe port=5432",
),
(("host=foo",), {"user": None}, "host=foo"),
],
)
def test_connect_args(monkeypatch, pgconn, args, kwargs, want):
the_conninfo: str
def fake_connect(conninfo):
nonlocal the_conninfo
the_conninfo = conninfo
return pgconn
yield
monkeypatch.setattr(psycopg.connection, "connect", fake_connect)
conn = psycopg.METHOD_NAME(*args, **kwargs)
assert conninfo_to_dict(the_conninfo) == conninfo_to_dict(want)
conn.close()
@pytest.mark.parametrize(
"args, kwargs, exctype",
[
(("host=foo", "host=bar"), {}, TypeError),
(("", ""), {}, TypeError),
((), {"nosuchparam": 42}, psycopg.ProgrammingError),
],
)
def test_connect_badargs(monkeypatch, pgconn, args, kwargs, exctype):
def fake_connect(conninfo):
return pgconn
yield
with pytest.raises(exctype):
psycopg.METHOD_NAME(*args, **kwargs) |
get parent | # -*- coding: utf-8 -*-
#
# ast_input_block.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from pynestml.meta_model.ast_input_port import ASTInputPort
from pynestml.meta_model.ast_node import ASTNode
class ASTInputBlock(ASTNode):
"""
This class is used to store blocks of input definitions.
ASTInputBlock represents the input block, e.g.:
.. code-block:: nestml
input:
spike_in pA <- excitatory spike
current_in pA <- continuous
@attribute inputPort set of input ports.
Grammar:
inputBlock: 'input'
BLOCK_OPEN
(inputPort | NEWLINE)*
BLOCK_CLOSE;
Attributes:
input_definitions = None
"""
def __init__(self, input_definitions=None, *args, **kwargs):
"""
Standard constructor.
Parameters for superclass (ASTNode) can be passed through :python:`*args` and :python:`**kwargs`.
:param input_definitions:
:type input_definitions: List[ASTInputPort]
"""
super(ASTInputBlock, self).__init__(*args, **kwargs)
if input_definitions is None:
input_definitions = []
assert (input_definitions is not None and isinstance(input_definitions, list)), \
'(PyNestML.AST.Input) No or wrong type of input definitions provided (%s)!' % type(input_definitions)
for definition in input_definitions:
assert (definition is not None and isinstance(definition, ASTInputPort)), \
'(PyNestML.AST.Input) No or wrong type of input definition provided (%s)!' % type(definition)
self.input_definitions = input_definitions
def clone(self):
"""
Return a clone ("deep copy") of this node.
:return: new AST node instance
:rtype: ASTInputBlock
"""
input_definitions_dup = [input_definition.clone() for input_definition in self.input_definitions]
dup = ASTInputBlock(input_definitions=input_definitions_dup,
# ASTNode common attributes:
source_position=self.source_position,
scope=self.scope,
comment=self.comment,
pre_comments=[s for s in self.pre_comments],
in_comment=self.in_comment,
implicit_conversion_factor=self.implicit_conversion_factor)
return dup
def get_input_ports(self):
"""
Returns the list of input ports.
:return: a list of input ports
:rtype: list(ASTInputPort)
"""
return self.input_definitions
def METHOD_NAME(self, ast):
"""
Indicates whether a this node contains the handed over node.
:param ast: an arbitrary meta_model node.
:type ast: AST_
:return: AST if this or one of the child nodes contains the handed over element.
:rtype: AST_ or None
"""
for port in self.get_input_ports():
if port is ast:
return self
if port.METHOD_NAME(ast) is not None:
return port.METHOD_NAME(ast)
return None
def equals(self, other):
"""
The equals method.
:param other: a different object.
:type other: object
:return: True if equal, otherwise False.
:rtype: bool
"""
if not isinstance(other, ASTInputBlock):
return False
if len(self.get_input_ports()) != len(other.get_input_ports()):
return False
my_input_ports = self.get_input_ports()
your_input_ports = other.get_input_ports()
for i in range(0, len(my_input_ports)):
if not my_input_ports[i].equals(your_input_ports[i]):
return False
return True |
get supported forks | """
Utilities for the EVM tools
"""
import json
import logging
import sys
from typing import Any, Callable, Dict, Optional, Tuple, TypeVar
import coincurve
from ethereum.base_types import U64, U256, Uint
from ethereum.crypto.hash import Hash32
from ..forks import Hardfork
W = TypeVar("W", Uint, U64, U256)
EXCEPTION_MAPS = {
"FrontierToHomesteadAt5": {
"fork_blocks": [("frontier", 0), ("homestead", 5)],
},
"HomesteadToDaoAt5": {
"fork_blocks": [("homestead", 0), ("dao_fork", 5)],
},
"HomesteadToEIP150At5": {
"fork_blocks": [("homestead", 0), ("tangerine_whistle", 5)],
},
"EIP158ToByzantiumAt5": {
"fork_blocks": [("spurious_dragon", 0), ("byzantium", 5)],
},
"ByzantiumToConstantinopleAt5": {
"fork_blocks": [("byzantium", 0), ("constantinople", 5)],
},
"ConstantinopleToIstanbulAt5": {
"fork_blocks": [("constantinople", 0), ("istanbul", 5)],
},
"BerlinToLondonAt5": {
"fork_blocks": [("berlin", 0), ("london", 5)],
},
"EIP150": {
"fork_blocks": [("tangerine_whistle", 0)],
},
"EIP158": {
"fork_blocks": [("spurious_dragon", 0)],
},
"Merge": {
"fork_blocks": [("paris", 0)],
},
"ConstantinopleFix": {
"fork_blocks": [("constantinople", 0)],
},
}
UNSUPPORTED_FORKS = ("constantinople",)
def parse_hex_or_int(value: str, to_type: Callable[[int], W]) -> W:
"""Read a Uint type from a hex string or int"""
# find the function based on the type
# if the value is a hex string, convert it
if isinstance(value, str) and value.startswith("0x"):
return to_type(int(value[2:], 16))
# if the value is an str, convert it
else:
return to_type(int(value))
class FatalException(Exception):
"""Exception that causes the tool to stop"""
pass
def ensure_success(f: Callable, *args: Any) -> Any:
"""
Ensure that the function call succeeds.
Raise a FatalException if it fails.
"""
try:
return f(*args)
except Exception as e:
raise FatalException(e)
def get_module_name(forks: Any, options: Any, stdin: Any) -> Tuple[str, int]:
"""
Get the module name and the fork block for the given state fork.
"""
if options.state_fork.casefold() in UNSUPPORTED_FORKS:
sys.exit(f"Unsupported state fork: {options.state_fork}")
# If the state fork is an exception, use the exception config.
exception_config: Optional[Dict[str, Any]] = None
try:
exception_config = EXCEPTION_MAPS[options.state_fork]
except KeyError:
pass
if exception_config:
if options.input_env == "stdin":
assert stdin is not None
data = stdin["env"]
else:
with open(options.input_env, "r") as f:
data = json.load(f)
block_number = parse_hex_or_int(data["currentNumber"], Uint)
for fork, fork_block in exception_config["fork_blocks"]:
if block_number >= fork_block:
current_fork_module = fork
current_fork_block = fork_block
return current_fork_module, current_fork_block
# If the state fork is not an exception, use the fork name.
for fork in forks:
fork_module = fork.name.split(".")[-1]
key = "".join(x.title() for x in fork_module.split("_"))
if key == options.state_fork:
return fork_module, 0
# Neither in exception nor a standard fork name.
sys.exit(f"Unsupported state fork: {options.state_fork}")
def METHOD_NAME() -> str:
"""
Get the supported forks.
"""
supported_forks = [
fork.title_case_name.replace(" ", "") for fork in Hardfork.discover()
]
# Add the exception forks
supported_forks.extend(EXCEPTION_MAPS.keys())
# Remove the unsupported forks
supported_forks = [
fork
for fork in supported_forks
if fork.casefold() not in UNSUPPORTED_FORKS
]
return "\n".join(supported_forks)
def get_stream_logger(name: str) -> Any:
"""
Get a logger that writes to stdout.
"""
logger = logging.getLogger(name)
if not logger.handlers:
logger.setLevel(level=logging.INFO)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter("%(levelname)s:%(name)s:%(message)s")
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
def secp256k1_sign(msg_hash: Hash32, secret_key: int) -> Tuple[U256, ...]:
"""
Returns the signature of a message hash given the secret key.
"""
private_key = coincurve.PrivateKey.from_int(secret_key)
signature = private_key.sign_recoverable(msg_hash, hasher=None)
return (
U256.from_be_bytes(signature[0:32]),
U256.from_be_bytes(signature[32:64]),
U256(signature[64]),
) |
draw phil object | from __future__ import absolute_import, division, print_function
from wx.lib.agw import customtreectrl
import wx
from libtbx.utils import Sorry
import re
class PhilTreeCtrl(customtreectrl.CustomTreeCtrl):
def __init__(self, *args, **kwds):
kwds = dict(kwds)
kwds['agwStyle'] = wx.TR_HAS_VARIABLE_ROW_HEIGHT|wx.TR_HAS_BUTTONS| \
wx.TR_TWIST_BUTTONS|wx.TR_HIDE_ROOT|wx.TR_SINGLE
customtreectrl.CustomTreeCtrl.__init__(self, *args, **kwds)
self.Bind(wx.EVT_TREE_KEY_DOWN, self.OnChar)
self.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.OnRightClick)
self.Bind(wx.EVT_LEFT_DCLICK, self.OnDoubleClick)
self.AddRoot("(parameters)")
self._nodes_lookup_short = {}
self._nodes_lookup_full = {}
self._node_names_ordered = []
self.ClearSearch()
def DeleteAllItems(self):
customtreectrl.CustomTreeCtrl.DeleteAllItems(self)
self._nodes_lookup_short = {}
self._nodes_lookup_full = {}
self._node_names_ordered = []
def ClearSearch(self):
self._search_results = []
self._current_search_text = None
self._current_search_item = -1
def SaveNode(self, node, phil_object):
name = phil_object.name
full_path = phil_object.full_path
self._node_names_ordered.append((name, full_path))
node.SetData(phil_object)
if (name in self._nodes_lookup_short):
self._nodes_lookup_short[name].append(node)
else :
self._nodes_lookup_short[name] = [node]
if (full_path in self._nodes_lookup_full):
self._nodes_lookup_full[full_path].append(node)
else :
self._nodes_lookup_full[full_path] = [node]
def METHOD_NAME(self, phil_root):
assert (phil_root.name == "")
for phil_object in phil_root.objects :
self._DrawPhilObject(phil_object, self.GetRootItem())
def _DrawPhilObject(self, phil_object, current_node):
if (phil_object.is_definition):
node = self.AppendItem(current_node, phil_object.as_str().strip())
self.SaveNode(node, phil_object)
else :
new_node = self.AppendItem(current_node, phil_object.name)
self.SaveNode(new_node, phil_object)
for object_ in phil_object.objects :
self._DrawPhilObject(object_, new_node)
self.Expand(new_node)
def SearchItems(self, search_text, partial=False):
if (search_text != self._current_search_text):
results = []
if ("." in search_text):
for name, path in self._node_names_ordered :
if (partial and (search_text in path)) or (path == search_text):
results.extend(self._nodes_lookup_full[path])
else :
for name, path in self._node_names_ordered :
if (partial and (search_text in name)) or (name == search_text):
results.extend(self._nodes_lookup_short[name])
self._search_results = results
self.OnNext(None)
return len(self._search_results)
def OnNext(self, event):
if (len(self._search_results) == 0):
return
self._current_search_item += 1
if (self._current_search_item == len(self._search_results)):
self._current_search_item = 0
node = self._search_results[self._current_search_item]
self.SelectItem(node)
self.ScrollTo(node)
def OnPrevious(self, event):
if (len(self._search_results) == 0):
return
self._current_search_item -= 1
if (self._current_search_item < 0):
self._current_search_item = len(self._search_results) - 1
node = self._search_results[self._current_search_item]
self.SelectItem(node)
self.ScrollTo(node)
def OnChar(self, event):
key = event.GetKeyCode()
if (key == wx.WXK_DOWN):
if (self._current_search_text is not None):
self.OnPrevious(None)
else :
current_item = self.GetSelection()
if (current_item is None):
current_item = self.GetFirstVisibleItem()
item = self.GetNextVisible(current_item)
if (item is not None):
self.SelectItem(item)
self.ScrollTo(item)
elif (key == wx.WXK_UP):
if (self._current_search_text is not None):
self.OnNext(None)
else :
current_item = self.GetSelection()
if (current_item is None):
item = self.GetFirstVisibleItem()
else :
item = self.GetPrevVisible(current_item)
if (item is not None):
self.SelectItem(item)
self.ScrollTo(item)
elif (key == wx.WXK_RETURN):
item = self.GetSelection()
self.EditNode(item)
def OnRightClick(self, event):
item = event.GetItem()
self.EditNode(item)
def OnDoubleClick(self, event):
item = self.GetSelection()
if (item is not None):
self.EditNode(item)
def EditNode(self, item):
if (item is not None):
phil_object = item.GetData()
phil_object.show()
valid_text = re.compile("^[a-zA-Z]{1,}[a-zA-z0-9_]*$")
valid_text_partial = re.compile("^[a-zA-Z0-9_]*$")
class PhilTreeFrame(wx.Frame):
def __init__(self, *args, **kwds):
wx.Frame.__init__(self, *args, **kwds)
self.panel = wx.Panel(self)
szr = wx.BoxSizer(wx.VERTICAL)
self.panel.SetSizer(szr)
self.tree = PhilTreeCtrl(self.panel, -1, size=(600,400),
style=wx.SUNKEN_BORDER)
txt1 = wx.StaticText(self.panel, -1, "Search:")
szr2 = wx.FlexGridSizer(cols=2)
szr.Add(szr2)
szr.Add(self.tree, 1, wx.EXPAND, 2)
szr2.Add(txt1, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
szr3 = wx.BoxSizer(wx.HORIZONTAL)
szr2.Add(szr3, 0, wx.ALIGN_CENTER_VERTICAL)
search_box = wx.SearchCtrl(self.panel, style=wx.TE_PROCESS_ENTER,
size=(160,-1))
search_box.ShowCancelButton(True)
szr3.Add(search_box, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.Bind(wx.EVT_TEXT_ENTER, self.OnSearch, search_box)
self.Bind(wx.EVT_SEARCHCTRL_SEARCH_BTN, self.OnSearch, search_box)
self.Bind(wx.EVT_SEARCHCTRL_CANCEL_BTN, self.OnCancel, search_box)
self.partial_box = wx.CheckBox(self.panel, -1, "Include partial matches")
szr3.Add(self.partial_box, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
self.search_result = wx.StaticText(self.panel, -1, "", size=(300,-1))
szr2.Add((1,1))
szr2.Add(self.search_result, 0, wx.ALL|wx.ALIGN_CENTER_VERTICAL, 5)
szr.Layout()
szr.Fit(self.panel)
self.Fit()
self.SetMinSize((480,320))
def DrawPhilTree(self, phil_object):
self.tree.METHOD_NAME(phil_object)
self.Refresh()
def OnSearch(self, event):
search_text = event.GetEventObject().GetValue()
partial = self.partial_box.GetValue()
if ((partial and (not valid_text_partial.match(search_text))) or
(not partial and (not valid_text.match(search_text)))):
self.search_result.SetLabel("Invalid search text!")
self.search_result.SetForegroundColour((200,0,0))
raise Sorry("Invalid search text - only alphanumeric characters ("+
"including) underscore are allowed. If partial matches are not "+
"included, the search text must also begin with a letter.")
n_items = self.tree.SearchItems(search_text, partial)
if (n_items == 0):
self.search_result.SetForegroundColour((200,0,0))
else :
self.search_result.SetForegroundColour((0,0,0))
self.search_result.SetLabel("%d items found" % n_items)
self.panel.Layout()
def OnCancel(self, event):
event.GetEventObject().Clear()
self.tree.ClearSearch()
if (__name__ == "__main__"):
from mmtbx.command_line import fmodel
app = wx.App(0)
frame = PhilTreeFrame(None, -1, "Phenix settings")
frame.DrawPhilTree(fmodel.fmodel_from_xray_structure_master_params)
frame.Show()
app.MainLoop() |
test get running rhel containers both ok | import pytest
from mock.mock import patch
from insights.core.context import HostContext
from insights.core.exceptions import SkipComponent
from insights.parsers.docker_list import DockerListContainers
from insights.parsers.podman_list import PodmanListContainers
from insights.specs.datasources.container import running_rhel_containers
from insights.tests import context_wrap
PODMAN_LIST_CONTAINERS_2_UP = """
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE
03e2861336a76e29155836113ff6560cb70780c32f95062642993b2b3d0fc216 rhel7_httpd "/usr/sbin/httpd -DFOREGROUND" 45 seconds ago Up 37 seconds 0.0.0.0:8080->80/tcp angry_saha 796 B (virtual 669.2 MB)
05516ea08b565e37e2a4bca3333af40a240c368131b77276da8dec629b7fe102 bd8638c869ea "/bin/sh -c 'yum install -y vsftpd-2.2.2-6.el6'" 18 hours ago Up 18 hours ago tender_rosalind 4.751 MB (virtual 200.4 MB)
""".strip()
DOCKER_LIST_CONTAINERS_1_UP = """
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES SIZE
d3e2861336a76e29155836113ff6560cb70780c32f95062642993b2b3d0fc216 rhel7_httpd "/usr/sbin/httpd -DFOREGROUND" 45 seconds ago Up 37 seconds 0.0.0.0:8080->80/tcp angry_saha 796 B (virtual 669.2 MB)
d5516ea08b565e37e2a4bca3333af40a240c368131b77276da8dec629b7fe102 bd8638c869ea "/bin/sh -c 'yum install -y vsftpd-2.2.2-6.el6'" 18 hours ago Exited (137) 18 hours ago tender_rosalind 4.751 MB (virtual 200.4 MB)
""".strip()
FEDORA = """
Fedora release 23 (Twenty Three)
""".strip()
REDHAT_RELEASE7 = """
Red Hat Enterprise Linux release 7.3
""".strip()
def fake_shell_out(cmd, split=True, timeout=None, keep_rc=False, env=None, signum=None):
tmp_cmd = cmd.strip().split()
if 'podman' in tmp_cmd[0]:
return [REDHAT_RELEASE7, ]
if 'docker' in tmp_cmd[0]:
return [FEDORA, ]
raise Exception()
@patch("insights.core.context.HostContext.shell_out", return_value=[REDHAT_RELEASE7, ])
def METHOD_NAME(fso):
p_ctn = PodmanListContainers(context_wrap(PODMAN_LIST_CONTAINERS_2_UP))
d_ctn = DockerListContainers(context_wrap(DOCKER_LIST_CONTAINERS_1_UP))
assert p_ctn is not None
assert d_ctn is not None
broker = {
PodmanListContainers: p_ctn,
DockerListContainers: d_ctn,
HostContext: HostContext()}
ret = running_rhel_containers(broker)
assert len(ret) == 3
assert ('rhel7_httpd', 'podman', '03e2861336a7') in ret
assert ('bd8638c869ea', 'podman', '05516ea08b56') in ret
assert ('rhel7_httpd', 'docker', 'd3e2861336a7') in ret
# the stopped container is not collected
@patch("insights.core.context.HostContext.shell_out", side_effect=fake_shell_out)
def test_get_running_rhel_containers_podman_only(fso):
p_ctn = PodmanListContainers(context_wrap(PODMAN_LIST_CONTAINERS_2_UP))
d_ctn = DockerListContainers(context_wrap(DOCKER_LIST_CONTAINERS_1_UP))
assert p_ctn is not None
assert d_ctn is not None
broker = {
PodmanListContainers: p_ctn,
DockerListContainers: d_ctn,
HostContext: HostContext()}
ret = running_rhel_containers(broker)
assert len(ret) == 2
assert ('rhel7_httpd', 'podman', '03e2861336a7') in ret
assert ('bd8638c869ea', 'podman', '05516ea08b56') in ret
# docker container is from Fedora image, not collected
@patch("insights.core.context.HostContext.shell_out", return_value=[REDHAT_RELEASE7, ])
def test_get_running_rhel_containers_skip_dup(fso):
p_ctn = PodmanListContainers(context_wrap(PODMAN_LIST_CONTAINERS_2_UP))
# use the 'podman list' result as input for docker
d_ctn = DockerListContainers(context_wrap(PODMAN_LIST_CONTAINERS_2_UP))
assert p_ctn is not None
assert d_ctn is not None
broker = {
PodmanListContainers: p_ctn,
DockerListContainers: d_ctn,
HostContext: HostContext()}
ret = running_rhel_containers(broker)
assert len(ret) == 2
assert ('rhel7_httpd', 'podman', '03e2861336a7') in ret
assert ('bd8638c869ea', 'podman', '05516ea08b56') in ret
# duplicated container is removed from docker, not collected
@patch("insights.core.context.HostContext.shell_out", return_value=[FEDORA, ])
def test_get_running_rhel_containers_empty(fso):
p_ctn = PodmanListContainers(context_wrap(PODMAN_LIST_CONTAINERS_2_UP))
d_ctn = DockerListContainers(context_wrap(DOCKER_LIST_CONTAINERS_1_UP))
assert p_ctn is not None
assert d_ctn is not None
broker = {
PodmanListContainers: p_ctn,
DockerListContainers: d_ctn,
HostContext: HostContext()}
with pytest.raises(SkipComponent):
ret = running_rhel_containers(broker)
assert len(ret) == 0 |
prepare | """ ArgumentEffects computes write effect on arguments. """
from pythran.analyses.aliases import Aliases
from pythran.analyses.intrinsics import Intrinsics
from pythran.analyses.global_declarations import GlobalDeclarations
from pythran.passmanager import ModuleAnalysis
from pythran.tables import MODULES
from pythran.graph import DiGraph
# FIXME: investigate why we need to import it that way
from pythran import intrinsic
import gast as ast
from functools import reduce
class FunctionEffects(object):
def __init__(self, node):
self.func = node
if isinstance(node, ast.FunctionDef):
self.update_effects = [False] * len(node.args.args)
elif isinstance(node, intrinsic.Intrinsic):
self.update_effects = [isinstance(x, intrinsic.UpdateEffect)
for x in node.argument_effects]
elif isinstance(node, ast.alias):
self.update_effects = []
elif isinstance(node, intrinsic.Class):
self.update_effects = []
else:
raise NotImplementedError
# Compute the intrinsic effects only once
IntrinsicArgumentEffects = {}
def save_function_effect(module):
""" Recursively save function effect for pythonic functions. """
for intr in module.values():
if isinstance(intr, dict): # Submodule case
save_function_effect(intr)
else:
fe = FunctionEffects(intr)
IntrinsicArgumentEffects[intr] = fe
if isinstance(intr, intrinsic.Class):
save_function_effect(intr.fields)
for module in MODULES.values():
save_function_effect(module)
class ArgumentEffects(ModuleAnalysis):
"""Gathers inter-procedural effects on function arguments."""
def __init__(self):
# There's an edge between src and dest if a parameter of dest is
# modified by src
self.result = DiGraph()
self.node_to_functioneffect = {}
super(ArgumentEffects, self).__init__(Aliases, GlobalDeclarations,
Intrinsics)
def METHOD_NAME(self, node):
"""
Initialise arguments effects as this analyse is inter-procedural.
Initialisation done for Pythonic functions and default value set for
user defined functions.
"""
super(ArgumentEffects, self).METHOD_NAME(node)
for i in self.intrinsics:
fe = IntrinsicArgumentEffects[i]
self.node_to_functioneffect[i] = fe
self.result.add_node(fe)
for n in self.global_declarations.values():
fe = FunctionEffects(n)
self.node_to_functioneffect[n] = fe
self.result.add_node(fe)
def run(self, node):
result = super(ArgumentEffects, self).run(node)
candidates = set(result)
while candidates:
function = candidates.pop()
for ue in enumerate(function.update_effects):
update_effect_idx, update_effect = ue
if not update_effect:
continue
for pred in result.successors(function):
edge = result.edges[function, pred]
for fp in enumerate(edge["formal_parameters"]):
i, formal_parameter_idx = fp
# propagate the impurity backward if needed.
# Afterward we may need another graph iteration
ith_effectiv = edge["effective_parameters"][i]
if(formal_parameter_idx == update_effect_idx and
not pred.update_effects[ith_effectiv]):
pred.update_effects[ith_effectiv] = True
candidates.add(pred)
self.result = {f.func: f.update_effects for f in result}
return self.result
def argument_index(self, node):
while isinstance(node, ast.Subscript):
node = node.value
for node_alias in self.aliases[node]:
while isinstance(node_alias, ast.Subscript):
node_alias = node_alias.value
if node_alias in self.current_arguments:
return self.current_arguments[node_alias]
if node_alias in self.current_subscripted_arguments:
return self.current_subscripted_arguments[node_alias]
return -1
def visit_FunctionDef(self, node):
self.current_function = self.node_to_functioneffect[node]
self.current_arguments = {arg: i
for i, arg
in enumerate(node.args.args)}
self.current_subscripted_arguments = dict()
assert self.current_function in self.result
self.generic_visit(node)
def visit_For(self, node):
ai = self.argument_index(node.iter)
if ai >= 0:
self.current_subscripted_arguments[node.target] = ai
self.generic_visit(node)
def visit_AugAssign(self, node):
n = self.argument_index(node.target)
if n >= 0:
self.current_function.update_effects[n] = True
self.generic_visit(node)
def visit_Assign(self, node):
for t in node.targets:
if isinstance(t, ast.Subscript):
n = self.argument_index(t)
if n >= 0:
self.current_function.update_effects[n] = True
self.generic_visit(node)
def visit_Call(self, node):
for i, arg in enumerate(node.args):
n = self.argument_index(arg)
if n >= 0:
func_aliases = self.aliases[node.func]
# pessimistic case: no alias found
if func_aliases is None:
self.current_function.update_effects[n] = True
continue
# expand argument if any
func_aliases = reduce(
lambda x, y: x + (
# all functions
list(self.node_to_functioneffect.keys())
if (isinstance(y, ast.Name) and
self.argument_index(y) >= 0)
else [y]),
func_aliases,
list())
for func_alias in func_aliases:
# special hook for binded functions
if isinstance(func_alias, ast.Call):
bound_name = func_alias.args[0].id
func_alias = self.global_declarations[bound_name]
if func_alias is intrinsic.UnboundValue:
continue
if func_alias not in self.node_to_functioneffect:
continue
if func_alias is MODULES['functools']['partial']:
base_func_aliases = self.aliases[node.args[0]]
fe = self.node_to_functioneffect[func_alias]
if len(base_func_aliases) == 1:
base_func_alias = next(iter(base_func_aliases))
fe = self.node_to_functioneffect.get(
base_func_alias,
fe)
else:
fe = self.node_to_functioneffect[func_alias]
if not self.result.has_edge(fe, self.current_function):
self.result.add_edge(
fe,
self.current_function,
effective_parameters=[],
formal_parameters=[])
edge = self.result.edges[fe, self.current_function]
edge["effective_parameters"].append(n)
edge["formal_parameters"].append(i)
self.generic_visit(node) |
get formula error | """
This module defines what sandbox functions are made available to the Node controller,
and starts the grist sandbox. See engine.py for the API documentation.
"""
import os
import random
import sys
sys.path.append('thirdparty')
# pylint: disable=wrong-import-position
import logging
import marshal
import functools
import six
import actions
import engine
import formula_prompt
import migrations
import schema
import useractions
import objtypes
from acl_formula import parse_acl_formula
from sandbox import get_default_sandbox
from imports.register import register_import_parsers
# Handler for logging, which flushes each message.
class FlushingStreamHandler(logging.StreamHandler):
def emit(self, record):
super(FlushingStreamHandler, self).emit(record)
self.flush()
# Configure logging module to produce messages with log level and logger name.
logging.basicConfig(format="[%(levelname)s] [%(name)s] %(message)s",
handlers=[FlushingStreamHandler(sys.stderr)],
level=logging.INFO)
# The default level is INFO. If a different level is desired, add a call like this:
# log.setLevel(logging.WARNING)
log = logging.getLogger(__name__)
def table_data_from_db(table_name, table_data_repr):
if table_data_repr is None:
return actions.TableData(table_name, [], {})
table_data_parsed = marshal.loads(table_data_repr)
table_data_parsed = {key.decode("utf8"): value for key, value in table_data_parsed.items()}
id_col = table_data_parsed.pop("id")
return actions.TableData(table_name, id_col,
actions.decode_bulk_values(table_data_parsed, _decode_db_value))
def _decode_db_value(value):
# Decode database values received from SQLite's allMarshal() call. These are encoded by
# marshalling certain types and storing as BLOBs (received in Python as binary strings, as
# opposed to text which is received as unicode). See also encodeValue() in DocStorage.js
t = type(value)
if t == six.binary_type:
return objtypes.decode_object(marshal.loads(value))
else:
return value
def run(sandbox):
eng = engine.Engine()
def export(method):
# Wrap each method so that it logs a message that it's being called.
@functools.wraps(method)
def wrapper(*args, **kwargs):
log.debug("calling %s", method.__name__)
return method(*args, **kwargs)
sandbox.register(method.__name__, wrapper)
def load_and_record_table_data(table_name, table_data_repr):
result = table_data_from_db(table_name, table_data_repr)
eng.record_table_stats(result, table_data_repr)
return result
@export
def apply_user_actions(action_reprs, user=None):
action_group = eng.apply_user_actions([useractions.from_repr(u) for u in action_reprs], user)
result = dict(
rowCount=eng.count_rows(),
**eng.acl_split(action_group).to_json_obj()
)
if action_group.requests:
result["requests"] = action_group.requests
return result
@export
def fetch_table(table_id, formulas=True, query=None):
return actions.get_action_repr(eng.fetch_table(table_id, formulas=formulas, query=query))
@export
def fetch_table_schema():
return eng.fetch_table_schema()
@export
def autocomplete(txt, table_id, column_id, row_id, user):
return eng.autocomplete(txt, table_id, column_id, row_id, user)
@export
def find_col_from_values(values, n, opt_table_id):
return eng.find_col_from_values(values, n, opt_table_id)
@export
def fetch_meta_tables(formulas=True):
return {table_id: actions.get_action_repr(table_data)
for (table_id, table_data) in six.iteritems(eng.fetch_meta_tables(formulas))}
@export
def load_meta_tables(meta_tables, meta_columns):
return eng.load_meta_tables(load_and_record_table_data("_grist_Tables", meta_tables),
load_and_record_table_data("_grist_Tables_column", meta_columns))
@export
def load_table(table_name, table_data):
return eng.load_table(load_and_record_table_data(table_name, table_data))
@export
def get_table_stats():
return eng.get_table_stats()
@export
def create_migrations(all_tables, metadata_only=False):
doc_actions = migrations.create_migrations(
{t: table_data_from_db(t, data) for t, data in six.iteritems(all_tables)}, metadata_only)
return [actions.get_action_repr(action) for action in doc_actions]
@export
def get_version():
return schema.SCHEMA_VERSION
@export
def initialize(doc_url):
if os.environ.get("DETERMINISTIC_MODE"):
random.seed(1)
else:
# Make sure we have randomness, even if we are being cloned from a checkpoint
random.seed()
if doc_url:
os.environ['DOC_URL'] = doc_url
@export
def METHOD_NAME(table_id, col_id, row_id):
return objtypes.encode_object(eng.METHOD_NAME(table_id, col_id, row_id))
@export
def get_formula_prompt(table_id, col_id, description):
return formula_prompt.get_formula_prompt(eng, table_id, col_id, description)
@export
def convert_formula_completion(completion):
return formula_prompt.convert_completion(completion)
@export
def evaluate_formula(table_id, col_id, row_id):
return formula_prompt.evaluate_formula(eng, table_id, col_id, row_id)
export(parse_acl_formula)
export(eng.load_empty)
export(eng.load_done)
register_import_parsers(sandbox)
log.info("Ready") # This log message is significant for checkpointing.
sandbox.run()
def main():
run(get_default_sandbox())
if __name__ == "__main__":
main() |
app | import contextlib
import datetime
import functools
import itertools
import os
import random
import signal as stdlib_signal
import string
import uuid
import aiopg
import psycopg2
import pytest
from psycopg2 import sql
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
from procrastinate import aiopg_connector as aiopg_connector_module
from procrastinate import METHOD_NAME as app_module
from procrastinate import blueprints, builtin_tasks, jobs
from procrastinate import psycopg2_connector as psycopg2_connector_module
from procrastinate import schema, testing
from procrastinate.contrib.sqlalchemy import (
psycopg2_connector as sqlalchemy_psycopg2_connector_module,
)
# Just ensuring the tests are not polluted by environment
for key in os.environ:
if key.startswith("PROCRASTINATE_"):
os.environ.pop(key)
def cursor_execute(cursor, query, *identifiers, format=True):
if identifiers:
query = sql.SQL(query).format(
*(sql.Identifier(identifier) for identifier in identifiers)
)
cursor.execute(query)
@contextlib.contextmanager
def db_executor(dbname):
with contextlib.closing(psycopg2.connect("", dbname=dbname)) as connection:
connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with connection.cursor() as cursor:
yield functools.partial(cursor_execute, cursor)
@pytest.fixture
def db_execute():
return db_executor
def db_create(dbname, template=None):
with db_executor("postgres") as execute:
execute("DROP DATABASE IF EXISTS {}", dbname)
if template:
execute("CREATE DATABASE {} TEMPLATE {}", dbname, template)
else:
execute("CREATE DATABASE {}", dbname)
def db_drop(dbname):
with db_executor("postgres") as execute:
execute("DROP DATABASE IF EXISTS {}", dbname)
@pytest.fixture
def db_factory():
dbs_to_drop = []
def _(dbname, template=None):
db_create(dbname=dbname, template=template)
dbs_to_drop.append(dbname)
yield _
for dbname in dbs_to_drop:
db_drop(dbname=dbname)
@pytest.fixture(scope="session")
def setup_db():
dbname = "procrastinate_test_template"
db_create(dbname=dbname)
connector = aiopg_connector_module.AiopgConnector(dbname=dbname)
connector.open()
schema_manager = schema.SchemaManager(connector=connector)
schema_manager.apply_schema()
# We need to close the psycopg2 underlying connection synchronously
connector.close()
yield dbname
db_drop(dbname=dbname)
@pytest.fixture
def connection_params(setup_db, db_factory):
db_factory(dbname="procrastinate_test", template=setup_db)
yield {"dsn": "", "dbname": "procrastinate_test"}
@pytest.fixture
def sqlalchemy_engine_dsn(setup_db, db_factory):
db_factory(dbname="procrastinate_test", template=setup_db)
yield "postgresql+psycopg2:///procrastinate_test"
@pytest.fixture
async def connection(connection_params):
async with aiopg.connect(**connection_params) as connection:
yield connection
@pytest.fixture
async def not_opened_aiopg_connector(connection_params):
yield aiopg_connector_module.AiopgConnector(**connection_params)
@pytest.fixture
def not_opened_psycopg2_connector(connection_params):
yield psycopg2_connector_module.Psycopg2Connector(**connection_params)
@pytest.fixture
def not_opened_sqlalchemy_psycopg2_connector(sqlalchemy_engine_dsn):
yield sqlalchemy_psycopg2_connector_module.SQLAlchemyPsycopg2Connector(
dsn=sqlalchemy_engine_dsn, echo=True
)
@pytest.fixture
async def aiopg_connector(not_opened_aiopg_connector):
await not_opened_aiopg_connector.open_async()
yield not_opened_aiopg_connector
await not_opened_aiopg_connector.close_async()
@pytest.fixture
def psycopg2_connector(not_opened_psycopg2_connector):
not_opened_psycopg2_connector.open()
yield not_opened_psycopg2_connector
not_opened_psycopg2_connector.close()
@pytest.fixture
def sqlalchemy_psycopg2_connector(not_opened_sqlalchemy_psycopg2_connector):
not_opened_sqlalchemy_psycopg2_connector.open()
yield not_opened_sqlalchemy_psycopg2_connector
not_opened_sqlalchemy_psycopg2_connector.close()
@pytest.fixture
def kill_own_pid():
def f(signal=stdlib_signal.SIGTERM):
os.kill(os.getpid(), signal)
return f
@pytest.fixture
def connector():
return testing.InMemoryConnector()
@pytest.fixture
def reset_builtin_task_names():
builtin_tasks.remove_old_jobs.name = "procrastinate.builtin_tasks.remove_old_jobs"
builtin_tasks.builtin.tasks = {
task.name: task for task in builtin_tasks.builtin.tasks.values()
}
@pytest.fixture
def not_opened_app(connector, reset_builtin_task_names):
return app_module.App(connector=connector)
@pytest.fixture
def METHOD_NAME(not_opened_app):
with not_opened_app.open() as METHOD_NAME:
yield METHOD_NAME
@pytest.fixture
def blueprint():
return blueprints.Blueprint()
@pytest.fixture
def job_manager(METHOD_NAME):
return METHOD_NAME.job_manager
@pytest.fixture
def serial():
return itertools.count(1)
@pytest.fixture
def random_str():
def _(length=8):
return "".join(random.choice(string.ascii_lowercase) for _ in range(length))
return _
@pytest.fixture
def job_factory(serial, random_str):
def factory(**kwargs):
defaults = {
"id": next(serial),
"task_name": f"task_{random_str()}",
"task_kwargs": {},
"lock": str(uuid.uuid4()),
"queueing_lock": None,
"queue": f"queue_{random_str()}",
}
final_kwargs = defaults.copy()
final_kwargs.update(kwargs)
return jobs.Job(**final_kwargs)
return factory
@pytest.fixture
def deferred_job_factory(job_factory, job_manager):
async def factory(*, job_manager=job_manager, **kwargs):
job = job_factory(id=None, **kwargs)
return await job_manager.defer_job_async(job)
return factory
def aware_datetime(
year, month, day, hour=0, minute=0, second=0, microsecond=0, tz_offset=None
):
tzinfo = (
datetime.timezone(datetime.timedelta(hours=tz_offset))
if tz_offset
else datetime.timezone.utc
)
return datetime.datetime(
year, month, day, hour, minute, second, microsecond, tzinfo=tzinfo
) |
get open data params schema | # The MIT License (MIT)
# Copyright (c) 2023 by the xcube team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from abc import abstractmethod
import geopandas as gpd
import pandas as pd
from xcube.util.assertions import assert_instance
from xcube.util.fspath import is_local_fs
from xcube.util.jsonschema import JsonObjectSchema
from xcube.util.temp import new_temp_file
from ..accessor import FsDataAccessor
from ...datatype import DataType
from ...datatype import GEO_DATA_FRAME_TYPE
class GeoDataFrameFsDataAccessor(FsDataAccessor):
"""
Extension name: "geodataframe:<format_id>:<protocol>"
"""
@classmethod
def get_data_type(cls) -> DataType:
return GEO_DATA_FRAME_TYPE
@classmethod
@abstractmethod
def get_driver_name(cls) -> str:
"""Get the GeoDataFrame I/O driver name"""
def METHOD_NAME(self, data_id: str = None) \
-> JsonObjectSchema:
return JsonObjectSchema(
properties=dict(
storage_options=self.get_storage_options_schema(),
# TODO: add more, see https://geopandas.org/io.html
),
)
def open_data(self, data_id: str, **open_params) -> gpd.GeoDataFrame:
# TODO: implement me correctly,
# this is not valid for shapefile AND geojson
fs, root, open_params = self.load_fs(open_params)
is_local = is_local_fs(fs)
if is_local:
file_path = data_id
else:
_, file_path = new_temp_file()
fs.get_file(data_id, file_path)
return gpd.read_file(file_path,
driver=self.get_driver_name(),
**open_params)
def get_write_data_params_schema(self) -> JsonObjectSchema:
return JsonObjectSchema(
properties=dict(
storage_options=self.get_storage_options_schema(),
# TODO: add more, see https://geopandas.org/io.html
),
)
def write_data(self,
data: gpd.GeoDataFrame,
data_id: str,
**write_params) -> str:
# TODO: implement me correctly,
# this is not valid for shapefile AND geojson
assert_instance(data, (gpd.GeoDataFrame, pd.DataFrame), 'data')
fs, root, write_params = self.load_fs(write_params)
is_local = is_local_fs(fs)
if is_local:
file_path = data_id
else:
_, file_path = new_temp_file()
data.to_file(file_path, driver=self.get_driver_name(), **write_params)
if not is_local:
fs.put_file(file_path, data_id)
return data_id
class GeoDataFrameShapefileFsDataAccessor(GeoDataFrameFsDataAccessor):
"""
Extension name: "geodataframe:shapefile:<protocol>"
"""
@classmethod
def get_format_id(cls) -> str:
return 'shapefile'
@classmethod
def get_driver_name(cls) -> str:
return 'ESRI Shapefile'
class GeoDataFrameGeoJsonFsDataAccessor(GeoDataFrameFsDataAccessor):
"""
Extension name: "geodataframe:geojson:<protocol>"
"""
@classmethod
def get_format_id(cls) -> str:
return 'geojson'
@classmethod
def get_driver_name(cls) -> str:
return 'GeoJSON' |
time einsum i ij j | from .common import Benchmark, get_squares_, get_indexes_rand, TYPES1
import numpy as np
class Eindot(Benchmark):
def setup(self):
self.a = np.arange(60000.0).reshape(150, 400)
self.ac = self.a.copy()
self.at = self.a.T
self.atc = self.a.T.copy()
self.b = np.arange(240000.0).reshape(400, 600)
self.c = np.arange(600)
self.d = np.arange(400)
self.a3 = np.arange(480000.).reshape(60, 80, 100)
self.b3 = np.arange(192000.).reshape(80, 60, 40)
def time_dot_a_b(self):
np.dot(self.a, self.b)
def time_dot_d_dot_b_c(self):
np.dot(self.d, np.dot(self.b, self.c))
def time_dot_trans_a_at(self):
np.dot(self.a, self.at)
def time_dot_trans_a_atc(self):
np.dot(self.a, self.atc)
def time_dot_trans_at_a(self):
np.dot(self.at, self.a)
def time_dot_trans_atc_a(self):
np.dot(self.atc, self.a)
def METHOD_NAME(self):
np.einsum('i,ij,j', self.d, self.b, self.c)
def time_einsum_ij_jk_a_b(self):
np.einsum('ij,jk', self.a, self.b)
def time_einsum_ijk_jil_kl(self):
np.einsum('ijk,jil->kl', self.a3, self.b3)
def time_inner_trans_a_a(self):
np.inner(self.a, self.a)
def time_inner_trans_a_ac(self):
np.inner(self.a, self.ac)
def time_matmul_a_b(self):
np.matmul(self.a, self.b)
def time_matmul_d_matmul_b_c(self):
np.matmul(self.d, np.matmul(self.b, self.c))
def time_matmul_trans_a_at(self):
np.matmul(self.a, self.at)
def time_matmul_trans_a_atc(self):
np.matmul(self.a, self.atc)
def time_matmul_trans_at_a(self):
np.matmul(self.at, self.a)
def time_matmul_trans_atc_a(self):
np.matmul(self.atc, self.a)
def time_tensordot_a_b_axes_1_0_0_1(self):
np.tensordot(self.a3, self.b3, axes=([1, 0], [0, 1]))
class Linalg(Benchmark):
params = set(TYPES1) - set(['float16'])
param_names = ['dtype']
def setup(self, typename):
np.seterr(all='ignore')
self.a = get_squares_()[typename]
def time_svd(self, typename):
np.linalg.svd(self.a)
def time_pinv(self, typename):
np.linalg.pinv(self.a)
def time_det(self, typename):
np.linalg.det(self.a)
class LinalgNorm(Benchmark):
params = TYPES1
param_names = ['dtype']
def setup(self, typename):
self.a = get_squares_()[typename]
def time_norm(self, typename):
np.linalg.norm(self.a)
class LinalgSmallArrays(Benchmark):
""" Test overhead of linalg methods for small arrays """
def setup(self):
self.array_5 = np.arange(5.)
self.array_5_5 = np.reshape(np.arange(25.), (5, 5))
def time_norm_small_array(self):
np.linalg.norm(self.array_5)
def time_det_small_array(self):
np.linalg.det(self.array_5_5)
class Lstsq(Benchmark):
def setup(self):
self.a = get_squares_()['float64']
self.b = get_indexes_rand()[:100].astype(np.float64)
def time_numpy_linalg_lstsq_a__b_float64(self):
np.linalg.lstsq(self.a, self.b, rcond=-1)
class Einsum(Benchmark):
param_names = ['dtype']
params = [[np.float32, np.float64]]
def setup(self, dtype):
self.one_dim_small = np.arange(600, dtype=dtype)
self.one_dim = np.arange(3000, dtype=dtype)
self.one_dim_big = np.arange(480000, dtype=dtype)
self.two_dim_small = np.arange(1200, dtype=dtype).reshape(30, 40)
self.two_dim = np.arange(240000, dtype=dtype).reshape(400, 600)
self.three_dim_small = np.arange(10000, dtype=dtype).reshape(10,100,10)
self.three_dim = np.arange(24000, dtype=dtype).reshape(20, 30, 40)
# non_contiguous arrays
self.non_contiguous_dim1_small = np.arange(1, 80, 2, dtype=dtype)
self.non_contiguous_dim1 = np.arange(1, 4000, 2, dtype=dtype)
self.non_contiguous_dim2 = np.arange(1, 2400, 2, dtype=dtype).reshape(30, 40)
self.non_contiguous_dim3 = np.arange(1, 48000, 2, dtype=dtype).reshape(20, 30, 40)
# outer(a,b): trigger sum_of_products_contig_stride0_outcontig_two
def time_einsum_outer(self, dtype):
np.einsum("i,j", self.one_dim, self.one_dim, optimize=True)
# multiply(a, b):trigger sum_of_products_contig_two
def time_einsum_multiply(self, dtype):
np.einsum("..., ...", self.two_dim_small, self.three_dim , optimize=True)
# sum and multiply:trigger sum_of_products_contig_stride0_outstride0_two
def time_einsum_sum_mul(self, dtype):
np.einsum(",i...->", 300, self.three_dim_small, optimize=True)
# sum and multiply:trigger sum_of_products_stride0_contig_outstride0_two
def time_einsum_sum_mul2(self, dtype):
np.einsum("i...,->", self.three_dim_small, 300, optimize=True)
# scalar mul: trigger sum_of_products_stride0_contig_outcontig_two
def time_einsum_mul(self, dtype):
np.einsum("i,->i", self.one_dim_big, 300, optimize=True)
# trigger contig_contig_outstride0_two
def time_einsum_contig_contig(self, dtype):
np.einsum("ji,i->", self.two_dim, self.one_dim_small, optimize=True)
# trigger sum_of_products_contig_outstride0_one
def time_einsum_contig_outstride0(self, dtype):
np.einsum("i->", self.one_dim_big, optimize=True)
# outer(a,b): non_contiguous arrays
def time_einsum_noncon_outer(self, dtype):
np.einsum("i,j", self.non_contiguous_dim1, self.non_contiguous_dim1, optimize=True)
# multiply(a, b):non_contiguous arrays
def time_einsum_noncon_multiply(self, dtype):
np.einsum("..., ...", self.non_contiguous_dim2, self.non_contiguous_dim3, optimize=True)
# sum and multiply:non_contiguous arrays
def time_einsum_noncon_sum_mul(self, dtype):
np.einsum(",i...->", 300, self.non_contiguous_dim3, optimize=True)
# sum and multiply:non_contiguous arrays
def time_einsum_noncon_sum_mul2(self, dtype):
np.einsum("i...,->", self.non_contiguous_dim3, 300, optimize=True)
# scalar mul: non_contiguous arrays
def time_einsum_noncon_mul(self, dtype):
np.einsum("i,->i", self.non_contiguous_dim1, 300, optimize=True)
# contig_contig_outstride0_two: non_contiguous arrays
def time_einsum_noncon_contig_contig(self, dtype):
np.einsum("ji,i->", self.non_contiguous_dim2, self.non_contiguous_dim1_small, optimize=True)
# sum_of_products_contig_outstride0_one:non_contiguous arrays
def time_einsum_noncon_contig_outstride0(self, dtype):
np.einsum("i->", self.non_contiguous_dim1, optimize=True)
class LinAlgTransposeVdot(Benchmark):
# Smaller for speed
# , (128, 128), (256, 256), (512, 512),
# (1024, 1024)
params = [[(16, 16), (32, 32),
(64, 64)], TYPES1]
param_names = ['shape', 'npdtypes']
def setup(self, shape, npdtypes):
self.xarg = np.random.uniform(-1, 1, np.dot(*shape)).reshape(shape)
self.xarg = self.xarg.astype(npdtypes)
self.x2arg = np.random.uniform(-1, 1, np.dot(*shape)).reshape(shape)
self.x2arg = self.x2arg.astype(npdtypes)
if npdtypes.startswith('complex'):
self.xarg += self.xarg.T*1j
self.x2arg += self.x2arg.T*1j
def time_transpose(self, shape, npdtypes):
np.transpose(self.xarg)
def time_vdot(self, shape, npdtypes):
np.vdot(self.xarg, self.x2arg) |
test data review cascades | #
# This file is part of HEPData.
# Copyright (C) 2020 CERN.
#
# HEPData is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HEPData; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import logging
import os.path
import shutil
from invenio_db import db
from hepdata.modules.submission.models import DataReview, DataResource, DataSubmission, \
Message, receive_before_flush
def test_data_submission_cascades(app):
# Create a data submission
recid = "12345"
datasubmission = DataSubmission(publication_recid=recid)
db.session.add(datasubmission)
db.session.commit()
# Add a data review with a message
message = Message(user=1, message="Test review message")
datareview = DataReview(publication_recid=recid,
data_recid=datasubmission.id,
messages=[message])
db.session.add(datareview)
db.session.commit()
reviews = DataReview.query.filter_by(publication_recid=recid).all()
assert(len(reviews) == 1)
assert(reviews[0] == datareview)
messages = Message.query.all()
assert(len(messages) == 1)
# Add some data resources with files
files_dir = os.path.join(app.config['CFG_DATADIR'], 'models_test')
os.makedirs(files_dir, exist_ok=True)
resources = []
for i in range(3):
file_path = os.path.join(files_dir, f'file{i}.txt')
f = open(file_path, 'w')
f.close()
dataresource = DataResource(file_location=file_path, file_type="data")
db.session.add(dataresource)
db.session.commit()
resources.append(dataresource)
# Create an extra DataResource linking to file0.txt but
# not linked to the submission
# (because this situation has arisen in prod)
dataresource = DataResource(
file_location=os.path.join(files_dir, 'file0.txt'),
file_type="data"
)
db.session.add(dataresource)
db.session.commit()
assert(len(os.listdir(files_dir)) == 3)
datasubmission.data_file = resources[0].id
datasubmission.resources = resources[1:]
db.session.add(datasubmission)
db.session.commit()
# Check we can find resources in db
dataresources = DataResource.query.filter(
DataResource.id.in_([x.id for x in resources])
).all()
assert(len(dataresources) == 3)
# Delete datasubmission
db.session.delete(datasubmission)
db.session.commit()
# Check that datareview is deleted
reviews = DataReview.query.filter_by(publication_recid=recid).all()
assert(len(reviews) == 0)
# Check that message is deleted
messages = Message.query.all()
assert(len(messages) == 0)
# Check all resources have been deleted
dataresources = DataResource.query.filter(
DataResource.id.in_([x.id for x in resources])
).all()
assert(len(dataresources) == 0)
# Check files are also deleted, apart from file0
# as that's referenced by another DataResource
assert(os.listdir(files_dir) == ['file0.txt'])
# Tidy up
shutil.rmtree(files_dir)
def METHOD_NAME(app):
# Create a data submission
recid = "12345"
datasubmission = DataSubmission(publication_recid=recid)
db.session.add(datasubmission)
db.session.commit()
# Add a data review with a message
message = Message(user=1, message="Test review message")
datareview = DataReview(publication_recid=recid,
data_recid=datasubmission.id,
messages=[message])
db.session.add(datareview)
db.session.commit()
# Check that message is created
reviews = DataReview.query.filter_by(publication_recid=recid).all()
assert(len(reviews) == 1)
assert(len(reviews[0].messages) == 1)
assert(reviews[0].messages[0].message == message.message)
review_messages = list(db.engine.execute("select * from review_messages where datareview_id = %s" % datareview.id))
assert(len(review_messages) == 1)
assert(review_messages[0].datareview_id == datareview.id)
db.session.delete(datareview)
db.session.commit()
# Check that datareview is deleted
reviews = DataReview.query.filter_by(publication_recid=recid).all()
assert(len(reviews) == 0)
# Check that message is deleted
messages = Message.query.filter_by(id=review_messages[0].message_id).all()
assert(len(messages) == 0)
def test_receive_before_flush_errors(app, mocker, caplog):
# Test that errors are logged in receive_before_flush
# We mimic errors by providing unpersisted objects to the DataResource and
# DataReview queries using mocking, so that they cannot successfully be
# deleted from the db
caplog.set_level(logging.ERROR)
recid = "12345"
datasubmission = DataSubmission(publication_recid=recid)
db.session.add(datasubmission)
db.session.commit()
mockResourceFilterBy = mocker.Mock(first=lambda: DataResource())
mockResourceQuery = mocker.Mock(filter_by=lambda id: mockResourceFilterBy)
mockDataResource = mocker.Mock(query=mockResourceQuery)
mocker.patch('hepdata.modules.submission.models.DataResource',
mockDataResource)
mockReviewFilterBy = mocker.Mock(all=lambda: [DataReview()])
mockReviewQuery = mocker.Mock(filter_by=lambda data_recid: mockReviewFilterBy)
mockDataReview = mocker.Mock(query=mockReviewQuery)
mocker.patch('hepdata.modules.submission.models.DataReview',
mockDataReview)
db.session.delete(datasubmission)
db.session.commit()
# Last error logs are what we're looking for
assert(len(caplog.records) == 2)
assert(caplog.records[0].levelname == "ERROR")
assert(caplog.records[0].msg.startswith(
"Unable to delete data resource with id None whilst deleting data submission id 1. Error was: Instance '<DataResource at "
))
assert(caplog.records[0].msg.endswith(
" is not persisted"
))
assert(caplog.records[1].levelname == "ERROR")
assert(caplog.records[1].msg.startswith(
"Unable to delete review with id None whilst deleting data submission id 1. Error was: Instance '<DataReview at "
))
assert(caplog.records[1].msg.endswith(
" is not persisted"
)) |
all classes | from GangaCore.Utility.logging import getLogger
from GangaCore.Core.exceptions import GangaValueError
logger = getLogger()
class PluginManagerError(GangaValueError):
def __init__(self, x):
super(PluginManagerError, self).__init__(self, x)
# Simple Ganga Plugin Mechanism
#
# Any object may be registered (added) in the plugin manager provided that
# you are able to specify the name and the category to which it belongs.
#
# If you do not use category all plugins are registered in a flat list. Otherwise
# there is a list of names for each category seaprately.
class PluginManager(object):
__slots__ = ('all_dict', 'first', '_prev_found')
def __init__(self):
self.all_dict = {}
self.first = {}
self._prev_found = {}
def find(self, category, name):
"""
Return a plugin added with 'name' in the given 'category'.
If 'name' is None then the default plugin in the category is returned.
Typically the default plugin is the first added.
If plugin not found raise PluginManagerError.
"""
#logger.debug( "Attempting to Find Plugin: %s" % name )
#import traceback
# traceback.print_stack()
# Simple attempt to pre-load and cache Plugin lookups
key = str(category) + "_" + str(name)
if key in self._prev_found:
return self._prev_found[key]
try:
if name is not None:
if category in self.first:
# This is expected to work and is quite verbose when debugging turned on
#logger.debug("Returning based upon Category and Name")
#logger.debug("name: %s cat: %s" % (str(name), str(category)))
if name in self.all_dict[category]:
self._prev_found[key] = self.all_dict[category][name]
return self.all_dict[category][name]
if (name is None) and category is not None:
if (category in self.first):
# This is expected to work and is quite verbose when debugging turned on
#logger.debug("Returning based upon Category ONLY")
#logger.debug("name: %s cat: %s" % (str(name), str(category)))
self._prev_found[key] = self.first[category]
return self.first[category]
elif (name is not None) and (category is not None):
for category_i in self.all_dict:
for this_name in self.all_dict[category_i]:
if name == this_name:
message1 = "Category of %s, has likely changed between ganga versions!" % name
message2 = "Category Requested: %s, Category in which plugin was found: %s" % (
category, category_i)
message3 = "Attempting to use new category %s to load a stored object, this may fail!" % category_i
logger.debug(message1)
logger.debug(message2)
logger.debug(message3)
self._prev_found[key] = self.all_dict[category_i][name]
return self.all_dict[category_i][name]
except KeyError:
logger.debug("KeyError from Config system!")
except:
logger.error("Some Other unexpected ERROR!")
raise
if name is None:
s = "cannot find default plugin for category " + category
else:
s = "cannot find '%s' in a category '%s', or elsewhere" % (name, category)
if name is None and category is None:
s = "Serious Plugin Error has occured"
logger.debug(s)
raise PluginManagerError(s)
def add(self, pluginobj, category, name):
""" Add a pluginobj to the plugin manager with the name and the category labels.
The first plugin is default unless changed explicitly.
"""
cat = self.all_dict.setdefault(category, {})
self.first.setdefault(category, pluginobj)
cat[name] = pluginobj
logger.debug('adding plugin %s (category "%s") ' % (name, category))
def setDefault(self, category, name):
""" Make the plugin 'name' be default in a given 'category'.
You must first add() the plugin object before calling this method. Otherwise
PluginManagerError is raised.
"""
assert(not name is None)
pluginobj = self.find(category, name)
self.first[category] = pluginobj
def allCategories(self):
return self.all_dict
def METHOD_NAME(self, category):
cat = self.all_dict.get(category)
if cat:
return cat
else:
return {}
allPlugins = PluginManager() |
link or merge | from __future__ import annotations
from typing import Any, Generator, List, Optional, Set, Tuple, Union
from astroid import Break, Continue, NodeNG, Raise, Return
class ControlFlowGraph:
"""A graph representing the control flow of a Python program."""
start: CFGBlock
end: CFGBlock
# The unique id of this cfg. Defaults to 0 if not initialized in a CFGVisitor instance.
cfg_id: int
# block_count is used as an "autoincrement" to ensure the block ids are unique.
block_count: int
# blocks (with at least one statement) that will never be executed in runtime.
unreachable_blocks: Set[CFGBlock]
def __init__(self, cfg_id: int = 0) -> None:
self.block_count = 0
self.cfg_id = cfg_id
self.unreachable_blocks = set()
self.start = self.create_block()
self.end = self.create_block()
def create_block(
self, pred: Optional[CFGBlock] = None, edge_label: Optional[Any] = None
) -> CFGBlock:
"""Create a new CFGBlock for this graph.
If pred is specified, set that block as a predecessor of the new block.
If edge_label is specified, set the corresponding edge in the CFG with that label.
"""
new_block = CFGBlock(self.block_count)
self.unreachable_blocks.add(new_block)
self.block_count += 1
if pred:
self.METHOD_NAME(pred, new_block, edge_label)
return new_block
def link(self, source: CFGBlock, target: CFGBlock) -> None:
"""Link source to target."""
if not source.is_jump():
CFGEdge(source, target)
def METHOD_NAME(
self, source: CFGBlock, target: CFGBlock, edge_label: Optional[Any] = None
) -> None:
"""Link source to target, or merge source into target if source is empty.
An "empty" node for this purpose is when source has no statements.
source with a jump statement cannot be further linked or merged to
another target.
If edge_label is specified, set the corresponding edge in the CFG with that label.
"""
if source.is_jump():
return
if source.statements == []:
if source is self.start:
self.start = target
else:
for edge in source.predecessors:
edge.target = target
target.predecessors.append(edge)
# source is a utility block that helps build the cfg that does not
# represent any part of the program so it is redundant.
self.unreachable_blocks.remove(source)
else:
CFGEdge(source, target, edge_label)
def multiple_link_or_merge(self, source: CFGBlock, targets: List[CFGBlock]) -> None:
"""Link source to multiple target, or merge source into targets if source is empty.
An "empty" node for this purpose is when source has no statements.
source with a jump statement cannot be further linked or merged to
another target.
Precondition:
- source != cfg.start
"""
if source.statements == []:
for edge in source.predecessors:
for t in targets:
CFGEdge(edge.source, t)
edge.source.successors.remove(edge)
source.predecessors = []
self.unreachable_blocks.remove(source)
else:
for target in targets:
self.link(source, target)
def get_blocks(self) -> Generator[CFGBlock, None, None]:
"""Generate a sequence of all blocks in this graph."""
yield from self._get_blocks(self.start, set())
def _get_blocks(self, block: CFGBlock, visited: Set[int]) -> Generator[CFGBlock, None, None]:
if block.id in visited:
return
yield block
visited.add(block.id)
for edge in block.successors:
yield from self._get_blocks(edge.target, visited)
def get_blocks_postorder(self) -> Generator[CFGBlock, None, None]:
"""Return the sequence of all blocks in this graph in the order of
a post-order traversal."""
yield from self._get_blocks_postorder(self.start, set())
def _get_blocks_postorder(self, block: CFGBlock, visited) -> Generator[CFGBlock, None, None]:
if block.id in visited:
return
visited.add(block.id)
for succ in block.successors:
yield from self._get_blocks_postorder(succ.target, visited)
yield block
def get_edges(self) -> Generator[CFGEdge, None, None]:
"""Generate a sequence of all edges in this graph."""
yield from self._get_edges(self.start, set())
def _get_edges(self, block: CFGBlock, visited: Set[int]) -> Generator[CFGEdge, None, None]:
if block.id in visited:
return
visited.add(block.id)
for edge in block.successors:
yield edge
yield from self._get_edges(edge.target, visited)
def update_block_reachability(self) -> None:
for block in self.get_blocks():
block.reachable = True
if block in self.unreachable_blocks:
self.unreachable_blocks.remove(block)
class CFGBlock:
"""A node in a control flow graph.
Represents a maximal block of code whose statements are guaranteed to execute in sequence.
"""
# A unique identifier
id: int
# The statements in this block.
statements: List[NodeNG]
# This block's in-edges (from blocks that can execute immediately before this one).
predecessors: List[CFGEdge]
# This block's out-edges (to blocks that can execute immediately after this one).
successors: List[CFGEdge]
# Whether there exists a path from the start block to this block.
reachable: bool
def __init__(self, id_: int) -> None:
"""Initialize a new CFGBlock."""
self.id = id_
self.statements = []
self.predecessors = []
self.successors = []
self.reachable = False
def add_statement(self, statement: NodeNG) -> None:
if not self.is_jump():
self.statements.append(statement)
statement.cfg_block = self
@property
def jump(self) -> Optional[NodeNG]:
if len(self.statements) > 0:
return self.statements[-1]
def is_jump(self) -> bool:
"""Returns True if the block has a statement that branches
the control flow (ex: `break`)"""
return isinstance(self.jump, (Break, Continue, Return, Raise))
class CFGEdge:
"""An edge in a control flow graph.
Edges are directed, and in the future may be augmented with auxiliary metadata about the control flow.
"""
source: CFGBlock
target: CFGBlock
label: Optional[Any]
def __init__(
self, source: CFGBlock, target: CFGBlock, edge_label: Optional[Any] = None
) -> None:
self.source = source
self.target = target
self.label = edge_label
self.source.successors.append(self)
self.target.predecessors.append(self) |
create event with body inner | import json
import os
from pytest import fixture
from hypothesis import settings, HealthCheck
from chalice.app import Chalice
# From:
# http://hypothesis.readthedocs.io/en/latest/settings.html#settings-profiles
# On travis we'll have it run through more iterations.
from chalice.deploy import models
settings.register_profile(
'ci', settings(max_examples=2000,
suppress_health_check=[HealthCheck.too_slow]),
)
# When you're developing locally, we'll only run a few examples
# to keep unit tests fast. If you want to run more iterations
# locally just set HYPOTHESIS_PROFILE=ci.
settings.register_profile('dev', settings(max_examples=10))
settings.load_profile(os.getenv('HYPOTHESIS_PROFILE', 'dev'))
print("HYPOTHESIS PROFILE: %s" % os.environ.get("HYPOTHESIS_PROFILE"))
@fixture(autouse=True)
def ensure_no_local_config(no_local_config):
pass
@fixture
def sample_app():
app = Chalice('sample')
@app.route('/')
def foo():
return {}
return app
@fixture
def sample_app_with_auth():
app = Chalice('sampleauth')
@app.authorizer('myauth')
def myauth(auth_request):
pass
@app.route('/', authorizer=myauth)
def foo():
return {}
return app
@fixture
def sample_app_schedule_only():
app = Chalice('schedule_only')
@app.schedule('rate(5 minutes)')
def cron(event):
pass
return app
@fixture
def sample_sqs_event_app():
app = Chalice('sqs-event')
@app.on_sqs_message(queue='myqueue')
def handler(event):
pass
return app
@fixture
def sample_kinesis_event_app():
app = Chalice('kinesis-event')
@app.on_kinesis_record(stream='mystream')
def handler(event):
pass
return app
@fixture
def sample_ddb_event_app():
app = Chalice('ddb-event')
@app.on_dynamodb_record(stream_arn='arn:aws:...:stream')
def handler(event):
pass
return app
@fixture
def sample_app_lambda_only():
app = Chalice('lambda_only')
@app.lambda_function()
def myfunction(event, context):
pass
return app
@fixture
def sample_websocket_app():
app = Chalice('sample')
@app.on_ws_connect()
def connect():
pass
@app.on_ws_message()
def message():
pass
@app.on_ws_disconnect()
def disconnect():
pass
return app
@fixture
def sample_s3_event_app():
app = Chalice('s3-event')
@app.on_s3_event(bucket='mybucket')
def handler(event):
pass
return app
@fixture
def sample_sns_event_app():
app = Chalice('sns-event')
@app.on_sns_message(topic='mytopic')
def handler(event):
pass
return app
@fixture
def sample_cloudwatch_event_app():
app = Chalice('cloudwatch-event')
@app.on_cw_event({'source': {'source': ['aws.ec2']}})
def foo(event):
return event
return app
@fixture
def create_event():
def create_event_inner(uri, method, path, content_type='application/json'):
return {
'requestContext': {
'httpMethod': method,
'resourcePath': uri,
},
'headers': {
'Content-Type': content_type,
},
'pathParameters': path,
'multiValueQueryStringParameters': None,
'body': "",
'stageVariables': {},
}
return create_event_inner
@fixture
def create_websocket_event():
def create_event_inner(
route_key, body='',
endpoint='abcd1234.execute-api.us-west-2.amazonaws.com'):
return {
'requestContext': {
'routeKey': route_key,
'domainName': endpoint,
'stage': 'api',
'connectionId': 'ABCD1234=',
'apiId': 'abcd1234',
},
'body': body,
}
return create_event_inner
@fixture
def create_empty_header_event():
def create_empty_header_event_inner(uri, method, path,
content_type='application/json'):
return {
'requestContext': {
'httpMethod': method,
'resourcePath': uri,
},
'headers': None,
'pathParameters': path,
'multiValueQueryStringParameters': None,
'body': "",
'stageVariables': {},
}
return create_empty_header_event_inner
@fixture
def create_event_with_body(create_event):
def METHOD_NAME(body, uri='/', method='POST',
content_type='application/json'):
event = create_event(uri, method, {}, content_type)
if content_type == 'application/json':
body = json.dumps(body)
event['body'] = body
return event
return METHOD_NAME
@fixture
def lambda_function():
return models.LambdaFunction(
resource_name='foo',
function_name='app-stage-foo',
deployment_package=None,
environment_variables={},
runtime='python2.7',
handler='app.app',
tags={},
timeout=None,
memory_size=None,
role=models.PreCreatedIAMRole(role_arn='foobar'),
security_group_ids=[],
subnet_ids=[],
layers=[],
reserved_concurrency=None,
xray=None,
) |
ballot item retrieve view | # apis_v1/views/views_ballot.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from ballot.controllers import all_ballot_items_retrieve_for_api, ballot_item_highlights_retrieve_for_api, \
ballot_item_options_retrieve_for_api, ballot_items_search_retrieve_for_api
from candidate.controllers import candidate_retrieve_for_api
from config.base import get_environment_variable
from django.http import HttpResponse
import json
from measure.controllers import measure_retrieve_for_api
from office.controllers import office_retrieve_for_api
from ballot.models import OFFICE, CANDIDATE, MEASURE
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists, get_voter_device_id
from voter.controllers import email_ballot_data_for_api
logger = wevote_functions.admin.get_logger(__name__)
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
def all_ballot_items_retrieve_view(request): # allBallotItemsRetrieve
"""
Return all the ballot data requested for an election
:param request:
:return:
"""
# If passed in, we want to look at
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
use_test_election = positive_value_exists(request.GET.get('use_test_election', False))
if use_test_election:
google_civic_election_id = 2000 # The Google Civic test election
json_data = all_ballot_items_retrieve_for_api(google_civic_election_id, state_code)
return HttpResponse(json.dumps(json_data), content_type='application/json')
def ballot_item_highlights_retrieve_view(request): # ballotItemHighlightsRetrieve
starting_year = request.GET.get('starting_year', 0)
json_data = ballot_item_highlights_retrieve_for_api(starting_year)
response = HttpResponse(json.dumps(json_data), content_type='application/json')
return response
def ballot_item_options_retrieve_view(request): # ballotItemOptionsRetrieve
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
search_string = request.GET.get('search_string', '')
state_code = request.GET.get('state_code', '')
results = ballot_item_options_retrieve_for_api(google_civic_election_id, search_string, state_code)
response = HttpResponse(json.dumps(results['json_data']), content_type='application/json')
return response
def METHOD_NAME(request): # ballotItemRetrieve
kind_of_ballot_item = request.GET.get('kind_of_ballot_item', '')
ballot_item_id = request.GET.get('ballot_item_id', 0)
ballot_item_we_vote_id = request.GET.get('ballot_item_we_vote_id', None)
if not positive_value_exists(kind_of_ballot_item) or kind_of_ballot_item not in (OFFICE, CANDIDATE, MEASURE):
status = 'VALID_BALLOT_ITEM_TYPE_MISSING'
json_data = {
'status': status,
'success': False,
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_id': ballot_item_id,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
if kind_of_ballot_item == OFFICE:
return office_retrieve_for_api(ballot_item_id, ballot_item_we_vote_id)
elif kind_of_ballot_item == CANDIDATE:
return candidate_retrieve_for_api(ballot_item_id, ballot_item_we_vote_id)
elif kind_of_ballot_item == MEASURE:
return measure_retrieve_for_api(ballot_item_id, ballot_item_we_vote_id)
else:
status = 'BALLOT_ITEM_RETRIEVE_UNKNOWN_ERROR'
json_data = {
'status': status,
'success': False,
'kind_of_ballot_item': kind_of_ballot_item,
'ballot_item_id': ballot_item_id,
'ballot_item_we_vote_id': ballot_item_we_vote_id,
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
def ballot_items_search_retrieve_view(request): # ballotItemsSearchRetrieve
"""
Return ballot search results
NOT FULLY IMPLEMENTED -- See ballotItemOptionsRetrieve instead
:param request:
:return:
"""
# google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0)) # Convert to list
search_string = request.GET.get('search_string', '')
# state_code = request.GET.get('state_code', '') # Convert to list
json_data = ballot_items_search_retrieve_for_api(search_string)
return HttpResponse(json.dumps(json_data), content_type='application/json')
def email_ballot_data_view(request): # emailBallotData
"""
:param request:
:return:
"""
voter_device_id = get_voter_device_id(request) # We standardize how we take in the voter_device_id
email_address_array = request.GET.getlist('email_address_array[]', "")
first_name_array = request.GET.getlist('first_name_array[]', "")
last_name_array = request.GET.getlist('last_name_array[]', "")
email_addresses_raw = request.GET.get('email_addresses_raw', "")
invitation_message = request.GET.get('invitation_message', "")
ballot_link = request.GET.get('ballot_link', "")
sender_email_address = request.GET.get('sender_email_address', "")
verification_email_sent = positive_value_exists(request.GET.get('verification_email_sent', False))
hostname = request.GET.get('hostname', "")
results = email_ballot_data_for_api(voter_device_id, email_address_array, first_name_array,
last_name_array, email_addresses_raw,
invitation_message, ballot_link, sender_email_address,
verification_email_sent,
web_app_root_url=hostname)
json_data = {
'status': results['status'],
'success': results['success'],
'voter_device_id': voter_device_id,
'sender_voter_email_address_missing': results['sender_voter_email_address_missing'],
}
return HttpResponse(json.dumps(json_data), content_type='application/json') |
visit alternate type | #!/usr/bin/env python3
#
# QAPI parser test harness
#
# Copyright (c) 2013 Red Hat Inc.
#
# Authors:
# Markus Armbruster <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
#
import argparse
import difflib
import os
import sys
from io import StringIO
from qapi.error import QAPIError
from qapi.schema import QAPISchema, QAPISchemaVisitor
class QAPISchemaTestVisitor(QAPISchemaVisitor):
def visit_module(self, name):
print('module %s' % name)
def visit_include(self, name, info):
print('include %s' % name)
def visit_enum_type(self, name, info, ifcond, features, members, prefix):
print('enum %s' % name)
if prefix:
print(' prefix %s' % prefix)
for m in members:
print(' member %s' % m.name)
self._print_if(m.ifcond, indent=8)
self._print_features(m.features, indent=8)
self._print_if(ifcond)
self._print_features(features)
def visit_array_type(self, name, info, ifcond, element_type):
if not info:
return # suppress built-in arrays
print('array %s %s' % (name, element_type.name))
self._print_if(ifcond)
def visit_object_type(self, name, info, ifcond, features,
base, members, variants):
print('object %s' % name)
if base:
print(' base %s' % base.name)
for m in members:
print(' member %s: %s optional=%s'
% (m.name, m.type.name, m.optional))
self._print_if(m.ifcond, 8)
self._print_features(m.features, indent=8)
self._print_variants(variants)
self._print_if(ifcond)
self._print_features(features)
def METHOD_NAME(self, name, info, ifcond, features, variants):
print('alternate %s' % name)
self._print_variants(variants)
self._print_if(ifcond)
self._print_features(features)
def visit_command(self, name, info, ifcond, features,
arg_type, ret_type, gen, success_response, boxed,
allow_oob, allow_preconfig, coroutine):
print('command %s %s -> %s'
% (name, arg_type and arg_type.name,
ret_type and ret_type.name))
print(' gen=%s success_response=%s boxed=%s oob=%s preconfig=%s%s'
% (gen, success_response, boxed, allow_oob, allow_preconfig,
" coroutine=True" if coroutine else ""))
self._print_if(ifcond)
self._print_features(features)
def visit_event(self, name, info, ifcond, features, arg_type, boxed):
print('event %s %s' % (name, arg_type and arg_type.name))
print(' boxed=%s' % boxed)
self._print_if(ifcond)
self._print_features(features)
@staticmethod
def _print_variants(variants):
if variants:
print(' tag %s' % variants.tag_member.name)
for v in variants.variants:
print(' case %s: %s' % (v.name, v.type.name))
QAPISchemaTestVisitor._print_if(v.ifcond, indent=8)
@staticmethod
def _print_if(ifcond, indent=4):
# TODO Drop this hack after replacing OrderedDict by plain
# dict (requires Python 3.7)
def _massage(subcond):
if isinstance(subcond, str):
return subcond
if isinstance(subcond, list):
return [_massage(val) for val in subcond]
return {key: _massage(val) for key, val in subcond.items()}
if ifcond.is_present():
print('%sif %s' % (' ' * indent, _massage(ifcond.ifcond)))
@classmethod
def _print_features(cls, features, indent=4):
if features:
for f in features:
print('%sfeature %s' % (' ' * indent, f.name))
cls._print_if(f.ifcond, indent + 4)
def test_frontend(fname):
schema = QAPISchema(fname)
schema.visit(QAPISchemaTestVisitor())
for doc in schema.docs:
if doc.symbol:
print('doc symbol=%s' % doc.symbol)
else:
print('doc freeform')
print(' body=\n%s' % doc.body.text)
for arg, section in doc.args.items():
print(' arg=%s\n%s' % (arg, section.text))
for feat, section in doc.features.items():
print(' feature=%s\n%s' % (feat, section.text))
for section in doc.sections:
print(' section=%s\n%s' % (section.name, section.text))
def open_test_result(dir_name, file_name, update):
mode = 'r+' if update else 'r'
try:
fp = open(os.path.join(dir_name, file_name), mode)
except FileNotFoundError:
if not update:
raise
fp = open(os.path.join(dir_name, file_name), 'w+')
return fp
def test_and_diff(test_name, dir_name, update):
sys.stdout = StringIO()
try:
test_frontend(os.path.join(dir_name, test_name + '.json'))
except QAPIError as err:
errstr = str(err) + '\n'
if dir_name:
errstr = errstr.replace(dir_name + '/', '')
actual_err = errstr.splitlines(True)
else:
actual_err = []
finally:
actual_out = sys.stdout.getvalue().splitlines(True)
sys.stdout.close()
sys.stdout = sys.__stdout__
try:
outfp = open_test_result(dir_name, test_name + '.out', update)
errfp = open_test_result(dir_name, test_name + '.err', update)
expected_out = outfp.readlines()
expected_err = errfp.readlines()
except OSError as err:
print("%s: can't open '%s': %s"
% (sys.argv[0], err.filename, err.strerror),
file=sys.stderr)
return 2
if actual_out == expected_out and actual_err == expected_err:
return 0
print("%s %s" % (test_name, 'UPDATE' if update else 'FAIL'),
file=sys.stderr)
out_diff = difflib.unified_diff(expected_out, actual_out, outfp.name)
err_diff = difflib.unified_diff(expected_err, actual_err, errfp.name)
sys.stdout.writelines(out_diff)
sys.stdout.writelines(err_diff)
if not update:
return 1
try:
outfp.truncate(0)
outfp.seek(0)
outfp.writelines(actual_out)
errfp.truncate(0)
errfp.seek(0)
errfp.writelines(actual_err)
except OSError as err:
print("%s: can't write '%s': %s"
% (sys.argv[0], err.filename, err.strerror),
file=sys.stderr)
return 2
return 0
def main(argv):
parser = argparse.ArgumentParser(
description='QAPI schema tester')
parser.add_argument('-d', '--dir', action='store', default='',
help="directory containing tests")
parser.add_argument('-u', '--update', action='store_true',
help="update expected test results")
parser.add_argument('tests', nargs='*', metavar='TEST', action='store')
args = parser.parse_args()
status = 0
for t in args.tests:
(dir_name, base_name) = os.path.split(t)
dir_name = dir_name or args.dir
test_name = os.path.splitext(base_name)[0]
status |= test_and_diff(test_name, dir_name, args.update)
exit(status)
if __name__ == '__main__':
main(sys.argv)
exit(0) |
test deserialize | from pathlib import Path
import pytest
import yaml
from azure.ai.ml import Input, load_job
from azure.ai.ml._restclient.v2023_04_01_preview.models import InputDeliveryMode, JobOutputType, OutputDeliveryMode
from azure.ai.ml._schema import SparkJobSchema
from azure.ai.ml.constants._common import BASE_PATH_CONTEXT_KEY
from azure.ai.ml.entities import SparkJob
from azure.ai.ml.entities._job.to_rest_functions import to_rest_job_object
from azure.ai.ml.exceptions import ValidationException
@pytest.mark.unittest
@pytest.mark.training_experiences_test
class TestSparkJobSchema:
def METHOD_NAME(self):
test_path = "./tests/test_configs/spark_job/spark_job_test.yml"
with open("./tests/test_configs/spark_job/spark_job_rest.json", "r") as f:
target = yaml.safe_load(f)
with open(test_path, "r") as f:
cfg = yaml.safe_load(f)
context = {BASE_PATH_CONTEXT_KEY: Path(test_path).parent}
schema = SparkJobSchema(context=context)
internal_representation: SparkJob = SparkJob(**schema.load(cfg))
source = internal_representation._to_rest_object()
assert source.name == target["name"]
assert source.properties.conf == target["conf"]
assert source.properties.code_id == target["code"]
def test_invalid_runtime_version(self):
test_path = "./tests/test_configs/spark_job/spark_job_invalid_runtime.yml"
with open(test_path, "r") as f:
cfg = yaml.safe_load(f)
context = {BASE_PATH_CONTEXT_KEY: Path(test_path).parent}
schema = SparkJobSchema(context=context)
internal_representation: SparkJob = SparkJob(**schema.load(cfg))
with pytest.raises(ValidationException) as ve:
source = internal_representation._to_rest_object()
assert ve.message == "runtime version should be either 3.2 or 3.3"
def test_invalid_instance_type(self):
test_path = "./tests/test_configs/spark_job/spark_job_invalid_instance_type.yml"
with open(test_path, "r") as f:
cfg = yaml.safe_load(f)
context = {BASE_PATH_CONTEXT_KEY: Path(test_path).parent}
schema = SparkJobSchema(context=context)
internal_representation: SparkJob = SparkJob(**schema.load(cfg))
with pytest.raises(ValidationException) as ve:
source = internal_representation._to_rest_object()
assert (
ve.message
== "Instance type must be specified for the list of standard_e4s_v3,standard_e8s_v3,standard_e16s_v3,standard_e32s_v3,standard_e64s_v3"
)
def test_deserialize_inputs(self):
test_path = "./tests/test_configs/spark_job/spark_job_inputs_outputs_test.yml"
with open("./tests/test_configs/spark_job/spark_job_inputs_outputs_rest.json", "r") as f:
target = yaml.safe_load(f)
with open(test_path, "r") as f:
cfg = yaml.safe_load(f)
context = {BASE_PATH_CONTEXT_KEY: Path(test_path).parent}
schema = SparkJobSchema(context=context)
internal_representation: SparkJob = SparkJob(**schema.load(cfg))
source = internal_representation._to_rest_object()
assert source.properties.inputs["input1"].uri == target["inputs"]["input1"]["uri"]
assert source.properties.inputs["input1"].mode == target["inputs"]["input1"]["mode"]
def test_input_is_loaded_from_dictionary(self):
spark_job = SparkJob(
code="./tests/test_configs/spark_job/basic_spark_job/src",
entry={"file": "./main.py"},
inputs={
"input1": Input(
type="uri_file", path="azureml://datastores/workspaceblobstore/paths/python/data.csv", mode="direct"
)
},
compute="douglassynapse",
environment="AzureML-sklearn-1.0-ubuntu20.04-py38-cpu:33",
resources={
"instance_type": "Standard_E8S_V3",
"runtime_version": "3.2.0",
},
)
assert isinstance(spark_job.inputs["input1"], Input)
def test_standalone_job_inputs_outputs(self):
original_entity = load_job(Path("./tests/test_configs/spark_job/spark_job_inputs_outputs_test.yml"))
rest_representation = to_rest_job_object(original_entity)
assert rest_representation.properties.inputs["input1"].mode == InputDeliveryMode.DIRECT
assert rest_representation.properties.outputs["output1"].mode == OutputDeliveryMode.DIRECT
assert (
rest_representation.properties.inputs["input1"].uri
== "https://azuremlexamples.blob.core.windows.net/datasets/iris.csv"
)
assert rest_representation.properties.outputs["output1"].job_output_type == JobOutputType.URI_FILE |
query list versions | # Parsec Cloud (https://parsec.cloud) Copyright (c) BUSL-1.1 2016-present Scille SAS
from __future__ import annotations
from typing import Dict, Tuple
import triopg
from parsec._parsec import DateTime, DeviceID, OrganizationID, RealmID, VlobID
from parsec.backend.postgresql.utils import (
Q,
q_device,
q_organization_internal_id,
q_realm_internal_id,
q_vlob_encryption_revision_internal_id,
query,
)
from parsec.backend.postgresql.vlob_queries.utils import (
_check_realm_and_read_access,
_get_last_role_granted_on,
_get_realm_id_from_vlob_id,
)
from parsec.backend.vlob import VlobNotFoundError, VlobVersionError
_q_read_data_without_timestamp = Q(
f"""
SELECT
version,
blob,
{ q_device(_id="author", select="device_id") } as author,
created_on
FROM vlob_atom
WHERE
vlob_encryption_revision = {
q_vlob_encryption_revision_internal_id(
organization_id="$organization_id",
realm_id="$realm_id",
encryption_revision="$encryption_revision",
)
}
AND vlob_id = $vlob_id
ORDER BY version DESC
LIMIT 1
"""
)
_q_read_data_with_timestamp = Q(
f"""
SELECT
version,
blob,
{ q_device(_id="author", select="device_id") } as author,
created_on
FROM vlob_atom
WHERE
vlob_encryption_revision = {
q_vlob_encryption_revision_internal_id(
organization_id="$organization_id",
realm_id="$realm_id",
encryption_revision="$encryption_revision",
)
}
AND vlob_id = $vlob_id
AND created_on <= $timestamp
ORDER BY version DESC
LIMIT 1
"""
)
_q_read_data_with_version = Q(
f"""
SELECT
version,
blob,
{ q_device(_id="author", select="device_id") } as author,
created_on
FROM vlob_atom
WHERE
vlob_encryption_revision = {
q_vlob_encryption_revision_internal_id(
organization_id="$organization_id",
realm_id="$realm_id",
encryption_revision="$encryption_revision",
)
}
AND vlob_id = $vlob_id
AND version = $version
"""
)
@query(in_transaction=True)
async def query_read(
conn: triopg._triopg.TrioConnectionProxy,
organization_id: OrganizationID,
author: DeviceID,
encryption_revision: int,
vlob_id: VlobID,
version: int | None = None,
timestamp: DateTime | None = None,
) -> Tuple[int, bytes, DeviceID, DateTime, DateTime]:
realm_id = await _get_realm_id_from_vlob_id(conn, organization_id, vlob_id)
await _check_realm_and_read_access(conn, organization_id, author, realm_id, encryption_revision)
if version is None:
if timestamp is None:
data = await conn.fetchrow(
*_q_read_data_without_timestamp(
organization_id=organization_id.str,
realm_id=realm_id,
encryption_revision=encryption_revision,
vlob_id=vlob_id,
)
)
assert data # _get_realm_id_from_vlob_id checks vlob presence
else:
data = await conn.fetchrow(
*_q_read_data_with_timestamp(
organization_id=organization_id.str,
realm_id=realm_id,
encryption_revision=encryption_revision,
vlob_id=vlob_id,
timestamp=timestamp,
)
)
if not data:
raise VlobVersionError()
else:
data = await conn.fetchrow(
*_q_read_data_with_version(
organization_id=organization_id.str,
realm_id=realm_id,
encryption_revision=encryption_revision,
vlob_id=vlob_id,
version=version,
)
)
if not data:
raise VlobVersionError()
version, blob, vlob_author, created_on = data
assert isinstance(version, int)
assert isinstance(blob, bytes)
vlob_author = DeviceID(vlob_author)
author_last_role_granted_on = await _get_last_role_granted_on(
conn, organization_id, realm_id, vlob_author
)
assert isinstance(author_last_role_granted_on, DateTime)
return version, blob, vlob_author, created_on, author_last_role_granted_on
_q_poll_changes = Q(
f"""
SELECT
index,
vlob_id,
vlob_atom.version
FROM realm_vlob_update
LEFT JOIN vlob_atom ON realm_vlob_update.vlob_atom = vlob_atom._id
WHERE
realm = { q_realm_internal_id(organization_id="$organization_id", realm_id="$realm_id") }
AND index > $checkpoint
ORDER BY index ASC
"""
)
_q_list_versions = Q(
f"""
SELECT
version,
{ q_device(_id="author", select="device_id") } as author,
created_on
FROM vlob_atom
WHERE
organization = { q_organization_internal_id("$organization_id") }
AND vlob_id = $vlob_id
ORDER BY version DESC
"""
)
@query(in_transaction=True)
async def query_poll_changes(
conn: triopg._triopg.TrioConnectionProxy,
organization_id: OrganizationID,
author: DeviceID,
realm_id: RealmID,
checkpoint: int,
) -> Tuple[int, Dict[VlobID, int]]:
await _check_realm_and_read_access(conn, organization_id, author, realm_id, None)
ret = await conn.fetch(
*_q_poll_changes(
organization_id=organization_id.str, realm_id=realm_id, checkpoint=checkpoint
)
)
changes_since_checkpoint: Dict[VlobID, int] = {
VlobID.from_hex(src_id): src_version for _, src_id, src_version in ret
}
new_checkpoint: int = ret[-1][0] if ret else checkpoint
return (new_checkpoint, changes_since_checkpoint)
@query(in_transaction=True)
async def METHOD_NAME(
conn: triopg._triopg.TrioConnectionProxy,
organization_id: OrganizationID,
author: DeviceID,
vlob_id: VlobID,
) -> Dict[int, Tuple[DateTime, DeviceID]]:
realm_id = await _get_realm_id_from_vlob_id(conn, organization_id, vlob_id)
await _check_realm_and_read_access(conn, organization_id, author, realm_id, None)
rows = await conn.fetch(*_q_list_versions(organization_id=organization_id.str, vlob_id=vlob_id))
assert rows
if not rows:
raise VlobNotFoundError(f"Vlob `{vlob_id.hex}` doesn't exist")
return {
row["version"]: (
row["created_on"],
DeviceID(row["author"]),
)
for row in rows
} |
test bosch hale d d he3p | # bluemira is an integrated inter-disciplinary design tool for future fusion
# reactors. It incorporates several modules, some of which rely on other
# codes, to carry out a range of typical conceptual fusion reactor design
# activities.
#
# Copyright (C) 2021-2023 M. Coleman, J. Cook, F. Franza, I.A. Maione, S. McIntosh,
# J. Morris, D. Short
#
# bluemira is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# bluemira is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with bluemira; if not, see <https://www.gnu.org/licenses/>.
import json
import os
import numpy as np
import pytest
from bluemira.base.constants import raw_uc
from bluemira.base.file import get_bluemira_path
from bluemira.plasma_physics.reactions import E_DD_fusion, E_DT_fusion, reactivity
class TestReactionEnergies:
def _msg(self, e, v):
delta = e - v
relate = "higher" if delta > 0 else "lower"
return "E=mc^2 value {0:.2f} MeV {1} than Kikuchi " "reference.".format(
delta * 1e-6, relate
)
def test_DT(self): # noqa :N802
e_dt_kikuchi = (3.5 + 14.1) * 1e6
e, v = E_DT_fusion(), e_dt_kikuchi
assert np.isclose(e, v, rtol=1e-3), self._msg(e, v)
def test_DD(self): # noqa :N802
e_dd_kikuchi = np.array([1.01 + 3.02, 0.82 + 2.45]) * 1e6
e, v = E_DD_fusion(), np.average(e_dd_kikuchi)
assert np.isclose(e, v, rtol=1e-3), self._msg(e, v)
@pytest.fixture
def xfail_DD_He3p_erratum_erratum(request):
"""
As far as I can tell, there is either something wrong with the parameterisation,
or more likely with the data presented in:
H.-S. Bosch and G.M. Hale 1993 Nucl. Fusion 33 1919
"""
t = request.getfixturevalue("temp_kev")
if t == 1.3:
request.node.add_marker(pytest.mark.xfail(reason="Error in erratum data?"))
class TestReactivity:
"""
H.-S. Bosch and G.M. Hale 1993 Nucl. Fusion 33 1919
"""
path = get_bluemira_path("plasma_physics/test_data", subfolder="tests")
filename = "reactivity_Bosch_Hale_1993.json"
file_path = os.path.join(path, filename)
with open(file_path, "r") as file:
data = json.load(file)
temp = np.array(data["temperature_kev"])
sv_DT = np.array(data["sv_DT_m3s"]) # noqa: N815
sv_DHe3 = np.array(data["sv_DHe3_m3s"]) # noqa: N815
sv_DD_He3p = np.array(data["sv_DD_He3p_m3s"]) # noqa: N815
sv_DD_Tp = np.array(data["sv_DD_Tp_m3s"]) # noqa: N815
@pytest.mark.parametrize("method, rtol", [("Bosch-Hale", 0.0025), ("PLASMOD", 0.1)])
@pytest.mark.parametrize("temp_kev, sigmav", np.c_[temp, sv_DT])
def test_Bosch_Hale_DT(self, temp_kev, sigmav, method, rtol):
temp_k = raw_uc(temp_kev, "keV", "K")
result = reactivity(temp_k, reaction="D-T", method=method)
np.testing.assert_allclose(result, sigmav, rtol=rtol, atol=0)
@pytest.mark.parametrize("temp_kev, sigmav", np.c_[temp, sv_DHe3])
def test_Bosch_Hale_DHe(self, temp_kev, sigmav):
temp_k = raw_uc(temp_kev, "keV", "K")
result = reactivity(temp_k, reaction="D-He3", method="Bosch-Hale")
np.testing.assert_allclose(result, sigmav, rtol=0.003, atol=0)
@pytest.mark.parametrize("temp_kev, sigmav", np.c_[temp, sv_DD_He3p])
@pytest.mark.usefixtures("xfail_DD_He3p_erratum_erratum")
def METHOD_NAME(self, temp_kev, sigmav):
temp_k = raw_uc(temp_kev, "keV", "K")
result = reactivity(temp_k, reaction="D-D1", method="Bosch-Hale")
np.testing.assert_allclose(result, sigmav, rtol=0.003, atol=0)
@pytest.mark.parametrize("temp_kev, sigmav", np.c_[temp, sv_DD_Tp])
def test_Bosch_Hale_DD_Tp(self, temp_kev, sigmav):
temp_k = raw_uc(temp_kev, "keV", "K")
result = reactivity(temp_k, reaction="D-D2", method="Bosch-Hale")
np.testing.assert_allclose(result, sigmav, rtol=0.0035, atol=0) |
test signmessage | # This file is part of the Trezor project.
#
# Copyright (C) 2012-2021 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import pytest
from trezorlib import btc, messages
from trezorlib.debuglink import TrezorClientDebugLink as Client
from trezorlib.tools import parse_path
from .signtx import forge_prevtx
VECTORS = ( # path, script_types
# GreenAddress A m/[1,4]/address_index
(
"m/4/255",
(
messages.InputScriptType.SPENDADDRESS,
messages.InputScriptType.SPENDWITNESS,
messages.InputScriptType.SPENDP2SHWITNESS,
),
),
# GreenAddress B m/3'/[1-100]'/[1,4]/address_index
(
"m/3h/100h/4/255",
(
messages.InputScriptType.SPENDADDRESS,
messages.InputScriptType.SPENDWITNESS,
messages.InputScriptType.SPENDP2SHWITNESS,
),
),
# GreenAdress Sign A m/1195487518
(
"m/1195487518",
(
messages.InputScriptType.SPENDADDRESS,
messages.InputScriptType.SPENDWITNESS,
messages.InputScriptType.SPENDP2SHWITNESS,
),
),
# GreenAdress Sign B m/1195487518/6/address_index
(
"m/1195487518/6/255",
(
messages.InputScriptType.SPENDADDRESS,
messages.InputScriptType.SPENDWITNESS,
messages.InputScriptType.SPENDP2SHWITNESS,
),
),
# Casa m/49/coin_type/account/change/address_index
(
"m/49/0/63/0/255",
(messages.InputScriptType.SPENDP2SHWITNESS,),
),
)
# 2-of-3 multisig, first path is ours
VECTORS_MULTISIG = ( # paths, address_index
# GreenAddress A m/[1,4]/address_index
(("m/1", "m/1", "m/4"), [255]),
# GreenAddress B m/3'/[1-100]'/[1,4]/address_index
(("m/3h/100h/1", "m/3h/99h/1", "m/3h/98h/1"), [255]),
# GreenAdress Sign A m/1195487518
(("m/1195487518", "m/1195487518", "m/1195487518"), []),
# GreenAdress Sign B m/1195487518/6/address_index
(("m/1195487518/6", "m/1195487518/6", "m/1195487518/6"), [255]),
# Unchained hardened m/45'/coin_type'/account'/[0-1000000]/change/address_index
(
("m/45h/0h/63h/1000000", "m/45h/0h/62h/1000000", "m/45h/0h/61h/1000000"),
[0, 255],
),
# Unchained unhardened m/45'/coin_type/account/[0-1000000]/change/address_index
(("m/45h/0/63/1000000", "m/45h/0/62/1000000", "m/45h/0/61/1000000"), [0, 255]),
# Unchained deprecated m/45'/coin_type'/account'/[0-1000000]/address_index
(("m/45h/0h/63h/1000000", "m/45h/0h/62h/1000000", "m/45h/0/61/1000000"), [255]),
# Casa Paths
(("m/45h/0/60/1", "m/45h/1/60/0", "m/45h/2/60/0"), [255]),
)
# Has AlwaysMatchingSchema but let's make sure the nonstandard paths are
# accepted in case we make this more restrictive in the future.
@pytest.mark.parametrize("path, script_types", VECTORS)
def test_getpublicnode(client: Client, path, script_types):
for script_type in script_types:
res = btc.get_public_node(
client, parse_path(path), coin_name="Bitcoin", script_type=script_type
)
assert res.xpub
@pytest.mark.parametrize("path, script_types", VECTORS)
def test_getaddress(client: Client, path, script_types):
for script_type in script_types:
res = btc.get_address(
client,
"Bitcoin",
parse_path(path),
show_display=True,
script_type=script_type,
)
assert res
@pytest.mark.parametrize("path, script_types", VECTORS)
def METHOD_NAME(client: Client, path, script_types):
for script_type in script_types:
sig = btc.sign_message(
client,
coin_name="Bitcoin",
n=parse_path(path),
script_type=script_type,
message="This is an example of a signed message.",
)
assert sig.signature
@pytest.mark.parametrize("path, script_types", VECTORS)
def test_signtx(client: Client, path, script_types):
address_n = parse_path(path)
for script_type in script_types:
address = btc.get_address(client, "Bitcoin", address_n, script_type=script_type)
prevhash, prevtx = forge_prevtx([(address, 390_000)])
inp1 = messages.TxInputType(
address_n=address_n,
amount=390_000,
prev_hash=prevhash,
prev_index=0,
script_type=script_type,
)
out1 = messages.TxOutputType(
address="1MJ2tj2ThBE62zXbBYA5ZaN3fdve5CPAz1",
amount=390_000 - 10_000,
script_type=messages.OutputScriptType.PAYTOADDRESS,
)
_, serialized_tx = btc.sign_tx(
client, "Bitcoin", [inp1], [out1], prev_txes={prevhash: prevtx}
)
assert serialized_tx.hex()
@pytest.mark.multisig
@pytest.mark.parametrize("paths, address_index", VECTORS_MULTISIG)
def test_getaddress_multisig(client: Client, paths, address_index):
pubs = [
messages.HDNodePathType(
node=btc.get_public_node(
client, parse_path(path), coin_name="Bitcoin"
).node,
address_n=address_index,
)
for path in paths
]
multisig = messages.MultisigRedeemScriptType(pubkeys=pubs, m=2)
address = btc.get_address(
client,
"Bitcoin",
parse_path(paths[0]) + address_index,
show_display=True,
multisig=multisig,
script_type=messages.InputScriptType.SPENDMULTISIG,
)
assert address
@pytest.mark.multisig
@pytest.mark.parametrize("paths, address_index", VECTORS_MULTISIG)
def test_signtx_multisig(client: Client, paths, address_index):
pubs = [
messages.HDNodePathType(
node=btc.get_public_node(
client, parse_path(path), coin_name="Bitcoin"
).node,
address_n=address_index,
)
for path in paths
]
signatures = [b""] * 3
multisig = messages.MultisigRedeemScriptType(
pubkeys=pubs, signatures=signatures, m=2
)
address_n = parse_path(paths[0]) + address_index
address = btc.get_address(
client,
"Bitcoin",
address_n,
multisig=multisig,
script_type=messages.InputScriptType.SPENDMULTISIG,
)
prevhash, prevtx = forge_prevtx([(address, 20_000)])
inp1 = messages.TxInputType(
address_n=address_n,
amount=20_000,
prev_hash=prevhash,
prev_index=0,
script_type=messages.InputScriptType.SPENDMULTISIG,
multisig=multisig,
)
out1 = messages.TxOutputType(
address="17kTB7qSk3MupQxWdiv5ZU3zcrZc2Azes1",
amount=10_000,
script_type=messages.OutputScriptType.PAYTOADDRESS,
)
sig, _ = btc.sign_tx(
client, "Bitcoin", [inp1], [out1], prev_txes={prevhash: prevtx}
)
assert sig[0] |
create user | from __future__ import annotations
import functools
import secrets
from typing import TYPE_CHECKING, List, Optional
from aiohttp import ClientResponseError
from iambic.core.context import ctx
from iambic.core.logger import log
from iambic.core.models import ProposedChange, ProposedChangeType
from iambic.core.utils import GlobalRetryController, snake_to_camelback
from iambic.plugins.v0_1_0.azure_ad.user.models import UserTemplateProperties
if TYPE_CHECKING:
from iambic.plugins.v0_1_0.azure_ad.models import AzureADOrganization
async def list_users(
azure_ad_organization: AzureADOrganization, **kwargs
) -> List[UserTemplateProperties]:
"""
List all users in Azure AD.
Args:
- azure_ad_organization: An instance of the AzureADOrganization class, which provides access to the Azure AD API.
Returns:
- A list of `User` instances, representing the users in Azure AD.
"""
async with GlobalRetryController(
fn_identifier="azure_ad.list_users"
) as retry_controller:
fn = functools.partial(azure_ad_organization.list, "users", **kwargs)
users = await retry_controller(fn)
return [UserTemplateProperties.from_azure_response(user) for user in users]
async def get_user(
azure_ad_organization: AzureADOrganization,
user_id: Optional[str] = None,
username: Optional[str] = None,
allow_template_ref: bool = False,
) -> UserTemplateProperties:
"""
Get Azure AD user.
Args:
- azure_ad_organization: An instance of the AzureADOrganization class, which provides access to the Azure AD API.
- user_id: The user ID to get.
- allow_template_ref: If True, attempt to resolve the user by loading the IAMbic template
Returns:
- The `User` instance, representing the user in Azure AD.
"""
assert user_id or username
if user_id:
async with GlobalRetryController(
fn_identifier="azure_ad.get_user"
) as retry_controller:
fn = functools.partial(azure_ad_organization.get, f"users/{user_id}")
user = await retry_controller(fn)
return UserTemplateProperties.from_azure_response(user)
users = await list_users(
azure_ad_organization,
params={"$filter": f"userPrincipalName eq '{username}'"},
)
if users:
return users[0]
raise Exception(f"User not found with username {username}")
async def METHOD_NAME(
azure_ad_organization: AzureADOrganization,
username: str,
mail_nickname: Optional[str],
display_name: str,
) -> Optional[UserTemplateProperties]:
"""
Create a new user in Azure AD.
Args:
- azure_ad_organization (AzureADOrganization): The Azure AD organization to update the user in.
- username: The name of the user to create.
- mail_nickname: The mail alias for the user.
- display_name: The name to display in the address book for the user.
Returns:
- An instance of the `UserTemplateProperties` class, representing the created user.
"""
if ctx.execute:
log.warning(
"request data",
request_data={
"accountEnabled": True,
"displayName": display_name,
"mailNickname": mail_nickname,
"userPrincipalName": username,
},
)
user = await azure_ad_organization.post(
"users",
json={
"accountEnabled": True,
"displayName": display_name,
"mailNickname": mail_nickname,
"userPrincipalName": username,
"passwordProfile": {
"forceChangePasswordNextSignIn": True,
"forceChangePasswordNextSignInWithMfa": azure_ad_organization.require_user_mfa_on_create,
"password": secrets.token_urlsafe(15),
},
},
)
return UserTemplateProperties.from_azure_response(user)
async def update_user_attributes(
azure_ad_organization: AzureADOrganization,
template_user: UserTemplateProperties,
cloud_user: UserTemplateProperties,
log_params: dict[str, str],
) -> List[ProposedChange]:
"""
Update the name of a user in Azure AD.
Args:
azure_ad_organization (AzureADOrganization): The Azure AD organization to update the user in.
template_user (UserTemplateProperties): The template representation of the user.
cloud_user (UserTemplateProperties): The current representation of the user in the cloud.
log_params (dict): Logging parameters.
Returns:
List[ProposedChange]: A list of proposed changes to be applied.
"""
response: list[ProposedChange] = []
patch_request = {}
for attr, value in cloud_user.dict(
exclude_none=False, exclude={"user_id", "fullname"}
).items():
if (template_value := getattr(template_user, attr)) != value:
response.append(
ProposedChange(
change_type=ProposedChangeType.UPDATE,
resource_id=template_user.user_id,
resource_type=template_user.resource_type,
attribute=attr,
current_value=value,
new_value=template_value,
)
)
attr = (
"username" if attr == "userPrincipalName" else snake_to_camelback(attr)
)
patch_request[attr] = template_value
if ctx.execute and patch_request:
try:
await azure_ad_organization.patch(
f"users/{cloud_user.user_id}",
json=patch_request,
)
except ClientResponseError as err:
log.exception(
"Failed to update user in Azure AD",
**log_params,
)
response[0].exceptions_seen = [str(err)]
return response
async def delete_user(
azure_ad_organization: AzureADOrganization,
user: UserTemplateProperties,
log_params: dict[str, str],
) -> List[ProposedChange]:
"""
Delete a user in Azure AD.
Args:
azure_ad_organization (AzureADOrganization): The Azure AD organization to delete the user from.
user (UserTemplateProperties): The user to delete.
log_params (dict): Logging parameters.
Returns:
List[ProposedChange]: A list of proposed changes to be applied.
"""
response: list[ProposedChange] = [
ProposedChange(
change_type=ProposedChangeType.DELETE,
resource_id=user.user_id,
resource_type=user.resource_type,
attribute="user",
change_summary={"user": user.username},
current_value=user.username,
new_value=None,
)
]
if ctx.execute:
try:
await azure_ad_organization.delete(f"users/{user.user_id}")
except ClientResponseError as err:
log.exception(
"Failed to delete user in Azure AD",
**log_params,
)
response[0].exceptions_seen = [str(err)]
return response |
url parameters | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network nic delete",
)
class Delete(AAZCommand):
"""Delete a network interface.
:example: Delete a network interface.
az network nic delete -g MyResourceGroup -n MyNic
"""
_aaz_info = {
"version": "2022-11-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/networkinterfaces/{}", "2022-11-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="Name of the network interface (NIC).",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.NetworkInterfacesDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class NetworkInterfacesDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.METHOD_NAME,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.METHOD_NAME,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.METHOD_NAME,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkInterfaces/{networkInterfaceName}",
**self.METHOD_NAME
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "ODataV4Format"
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_url_param(
"networkInterfaceName", self.ctx.args.name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
test top k zeros | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sorting operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class XlaSortOpTest(xla_test.XLATestCase):
def _assertOpOutputMatchesExpected(self, op, args, expected):
with self.session() as session:
with self.test_scope():
placeholders = [
array_ops.placeholder(dtypes.as_dtype(arg.dtype), arg.shape)
for arg in args
]
feeds = {placeholders[i]: args[i] for i in range(0, len(args))}
output = op(*placeholders)
if isinstance(output, ops.Tensor):
output = [output]
results = session.run(output, feeds)
for result, v in zip(results, expected):
self.assertAllClose(v, result, rtol=1e-3)
def testSort(self):
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
x = np.arange(101, dtype=dtype)
np.random.shuffle(x)
self._assertOpOutputMatchesExpected(
xla.sort, [x], expected=[np.arange(101, dtype=dtype)])
def testKeyValueSort(self):
supported_key_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
supported_value_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32,
dtypes.int64.as_numpy_dtype, dtypes.uint64.as_numpy_dtype])
for key_type in supported_key_types.intersection(self.numeric_types):
for value_type in supported_value_types.intersection(self.numeric_types):
x = np.arange(101, dtype=key_type)
np.random.shuffle(x)
y = (-x).astype(value_type)
self._assertOpOutputMatchesExpected(
xla.key_value_sort, [x, y],
expected=[
np.arange(101, dtype=key_type),
-np.arange(101, dtype=value_type)
])
def testTopK(self):
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
# Use small input size for bfloat16. Otherwise, we'll get duplicate values
# after conversion to bfloat16, so the possible resulting index array is
# no longer unique.
if dtype == dtypes.bfloat16.as_numpy_dtype:
array_size = 20
k_options = [0, 1, 2, 10, 20]
else:
array_size = 200 * 1000
k_options = [0, 1, 2, 10, 20, 100, 1000, 200 * 1000]
for x in [np.arange(array_size)]:
np.random.shuffle(x)
for k in k_options:
indices = x.argsort()[::-1][:k]
def topk(v, k=k):
return nn_ops.top_k(v, k=k, sorted=True)
self._assertOpOutputMatchesExpected(
topk, [x.astype(dtype)],
expected=[x[indices].astype(dtype), indices])
def testTopK2D(self):
supported_types = set(
[dtypes.bfloat16.as_numpy_dtype, np.float32, np.int32, np.uint32])
for dtype in supported_types.intersection(self.numeric_types):
# Use small input size for bfloat16. Otherwise, we'll get duplicate values
# after conversion to bfloat16, so the possible resulting index array is
# no longer unique.
if dtype == dtypes.bfloat16.as_numpy_dtype:
array_size = 10
k_options = [0, 1, 2, 10]
else:
array_size = 200 * 1000
k_options = [0, 1, 2, 10, 20, 100, 1000, 200 * 1000]
batch = 16
for x in [np.arange(batch * array_size)]:
np.random.shuffle(x)
x = np.reshape(x, [batch, array_size])
for k in k_options:
indices = x.argsort(axis=1)[::, -1:-k - 1:-1]
expected = np.sort(x, axis=1)[::, -1:-k - 1:-1]
def topk(v, k=k):
return nn_ops.top_k(v, k=k, sorted=True)
self._assertOpOutputMatchesExpected(
topk, [x.astype(dtype)],
expected=[expected.astype(dtype), indices])
def METHOD_NAME(self):
"""Tests that positive and negative zeros sort correctly."""
# Only bfloat16 is implemented.
bfloat16 = dtypes.bfloat16.as_numpy_dtype
if bfloat16 not in self.numeric_types:
return
with self.session() as sess:
p = array_ops.placeholder(dtypes.bfloat16)
with self.test_scope():
topk = nn_ops.top_k(p, k=4)
results = sess.run(
topk,
{p: np.array([0., -0., 0., 3., -0., -4., 0., -0.], dtype=bfloat16)})
self.assertAllEqual(
np.array([3., 0., 0., 0.], dtype=bfloat16), results[0])
self.assertEqual(list([3, 0, 2, 6]), list(results[1]))
def testTopKInfinities(self):
"""Tests that positive and negative infinity sort correctly."""
# Only bfloat16 is implemented.
bfloat16 = dtypes.bfloat16.as_numpy_dtype
if bfloat16 not in self.numeric_types:
return
with self.session() as sess:
p = array_ops.placeholder(dtypes.bfloat16)
with self.test_scope():
topk = nn_ops.top_k(p, k=6)
results = sess.run(topk, {
p: np.array(
[1, 2, float("inf"), -float("inf"), -1, -2], dtype=bfloat16)
})
self.assertAllEqual(
np.array(
[float("inf"), 2.0, 1.0, -1.0, -2.0, -float("inf")],
dtype=bfloat16), results[0])
self.assertEqual(list([2, 1, 0, 4, 5, 3]), list(results[1]))
def testInTopK(self):
supported_types = set([np.int32, np.int64])
for dtype in supported_types.intersection(self.numeric_types):
array_size = 200 * 1000
k_options = [0, 1, 2, 10, 20, 100, 1000, 200 * 1000]
batch = 16
for x in [np.arange(batch * array_size)]:
np.random.shuffle(x)
x = np.reshape(x, [batch, array_size])
y = np.random.randint(0, array_size, size=batch)
for k in k_options:
indices = x.argsort(axis=1)[::, -1:-k - 1:-1]
expected = [y[i] in indices[i] for i in range(batch)]
def in_topk(predictions, targets, k=k):
return nn_ops.in_top_k(predictions, targets, k)
self._assertOpOutputMatchesExpected(
in_topk,
[x.astype(np.float32), y.astype(dtype)],
expected=[expected])
if __name__ == "__main__":
test.main() |
list books by author | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Purpose
Shows how to implement routing for a lending library REST API that uses AWS Chalice.
Book, patron, and lending data is accessed through a serverless Amazon Aurora database.
This file is deployed to AWS Lambda as part of the Chalice deployment.
"""
import logging
import urllib.parse
from chalice import Chalice
from chalice.app import RequestTimeoutError
import chalicelib.library_data
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
app = Chalice(app_name='library_api')
app.debug = True # Set this to False for production use.
_STORAGE = None
def get_storage():
"""Creates or gets the storage object that calls the database."""
global _STORAGE
if _STORAGE is None:
_STORAGE = chalicelib.library_data.Storage.from_env()
return _STORAGE
def storage_timeout(func):
def _timeout(*args, **kwargs):
try:
result = func(*args, **kwargs)
except chalicelib.library_data.DataServiceNotReadyException as err:
raise RequestTimeoutError(err)
else:
return result
return _timeout
@app.route('/')
def index():
"""Briefly describes the REST API."""
return {'description': 'A simple lending library REST API that runs entirely on '
'serverless components.'}
@app.route('/books', methods=['GET'])
@storage_timeout
def list_books():
"""
Lists the books in the library.
:return: The list of books.
"""
return {'books': get_storage().get_books()}
@app.route('/books', methods=['POST'])
@storage_timeout
def add_book():
"""
Adds a book to the library. The author is also added.
The book must be in the request body as JSON in the following format:
{
"Books.Title": "Title of the Book",
"Authors.FirstName": "Allison",
"Authors.LastName": "Author"
}
:return: The IDs of the added author and book.
"""
author_id, book_id = get_storage().add_book(app.current_request.json_body)
return {'Authors.AuthorID': author_id, 'Books.BookID': book_id}
@app.route('/books/{author_id}', methods=['GET'])
@storage_timeout
def METHOD_NAME(author_id):
"""
Lists books in the library written by the specified author.
:param author_id: The ID of the author to query.
:return: The list of books written by the specified author.
"""
author_id = int(urllib.parse.unquote(author_id))
return {'books': get_storage().get_books(author_id=author_id)}
@app.route('/authors', methods=['GET'])
@storage_timeout
def list_authors():
"""
Lists the authors in the library.
:return: The list of authors.
"""
return {'authors': get_storage().get_authors()}
@app.route('/patrons', methods=['GET'])
@storage_timeout
def list_patrons():
"""
Lists the patrons of the library.
:return: The list of patrons.
"""
return {'patrons': get_storage().get_patrons()}
@app.route('/patrons', methods=['POST'])
@storage_timeout
def add_patron():
"""
Adds a patron to the library.
Patrons must be in the request body as JSON in the following format:
{
"Patrons.FirstName": "Paulo",
"Patrons.LastName": "Patron"
}
:return: The ID of the added patron.
"""
patron_id = get_storage().add_patron(app.current_request.json_body)
return {'Patrons.PatronID': patron_id}
@app.route('/patrons/{patron_id}', methods=['DELETE'])
@storage_timeout
def delete_patron(patron_id):
"""
Removes a patron from the library.
:param patron_id: The ID of the patron to remove.
"""
patron_id = int(urllib.parse.unquote(patron_id))
get_storage().delete_patron(patron_id)
@app.route('/lending', methods=['GET'])
@storage_timeout
def list_borrowed_books():
"""
Lists the books that are currently lent out from the library.
:return: The list of currently borrowed books.
"""
return {'books': get_storage().get_borrowed_books()}
@app.route('/lending/{book_id}/{patron_id}', methods=['PUT', 'DELETE'])
@storage_timeout
def book_lending(book_id, patron_id):
"""
Borrows or returns a book.
To borrow a book, PUT the book ID and the patron ID.
To return a book, DELETE the bookID and the patron ID.
:param book_id: The ID of the book.
:param patron_id: The ID of the patron.
"""
book_id = int(urllib.parse.unquote(book_id))
patron_id = int(urllib.parse.unquote(patron_id))
if app.current_request.method == 'PUT':
get_storage().borrow_book(book_id, patron_id)
elif app.current_request.method == 'DELETE':
get_storage().return_book(book_id, patron_id) |
test copy kms | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Integration tests for gcsio module.
Runs tests against Google Cloud Storage service.
Instantiates a TestPipeline to get options such as GCP project name, but
doesn't actually start a Beam pipeline or test any specific runner.
Options:
--kms_key_name=projects/<project-name>/locations/<region>/keyRings/\
<key-ring-name>/cryptoKeys/<key-name>/cryptoKeyVersions/<version>
Pass a Cloud KMS key name to test GCS operations using customer managed
encryption keys (CMEK).
Cloud KMS permissions:
The project's Cloud Storage service account requires Encrypter/Decrypter
permissions for the key specified in --kms_key_name.
To run these tests manually:
./gradlew :sdks:python:test-suites:dataflow:integrationTest \
-Dtests=apache_beam.io.gcp.gcsio_integration_test:GcsIOIntegrationTest \
-DkmsKeyName=KMS_KEY_NAME
"""
# pytype: skip-file
import logging
import unittest
import uuid
import pytest
from apache_beam.io.filesystems import FileSystems
from apache_beam.testing.test_pipeline import TestPipeline
try:
from apache_beam.io.gcp import gcsio
except ImportError:
gcsio = None # type: ignore
@unittest.skipIf(gcsio is None, 'GCP dependencies are not installed')
class GcsIOIntegrationTest(unittest.TestCase):
INPUT_FILE = 'gs://dataflow-samples/shakespeare/kinglear.txt'
# Larger than 1MB to test maxBytesRewrittenPerCall.
# Also needs to be in a different region than the dest to take effect.
INPUT_FILE_LARGE = 'gs://apache-beam-samples-us-east1/wikipedia_edits/wiki_data-000000000000.json' # pylint: disable=line-too-long
def setUp(self):
self.test_pipeline = TestPipeline(is_integration_test=True)
self.runner_name = type(self.test_pipeline.runner).__name__
if self.runner_name != 'TestDataflowRunner':
# This test doesn't run a pipeline, so it doesn't make sense to try it on
# different runners. Running with TestDataflowRunner makes sense since
# it uses GoogleCloudOptions such as 'project'.
raise unittest.SkipTest('This test only runs with TestDataflowRunner.')
self.project = self.test_pipeline.get_option('project')
self.gcs_tempdir = (
self.test_pipeline.get_option('temp_location') + '/gcs_it-' +
str(uuid.uuid4()))
self.kms_key_name = self.test_pipeline.get_option('kms_key_name')
self.gcsio = gcsio.GcsIO()
def tearDown(self):
FileSystems.delete([self.gcs_tempdir + '/'])
def _verify_copy(self, src, dst, dst_kms_key_name=None):
self.assertTrue(FileSystems.exists(src), 'src does not exist: %s' % src)
self.assertTrue(FileSystems.exists(dst), 'dst does not exist: %s' % dst)
src_checksum = self.gcsio.checksum(src)
dst_checksum = self.gcsio.checksum(dst)
self.assertEqual(src_checksum, dst_checksum)
actual_dst_kms_key = self.gcsio.kms_key(dst)
if actual_dst_kms_key is None:
self.assertEqual(actual_dst_kms_key, dst_kms_key_name)
else:
self.assertTrue(
actual_dst_kms_key.startswith(dst_kms_key_name),
"got: %s, wanted startswith: %s" %
(actual_dst_kms_key, dst_kms_key_name))
def _test_copy(
self,
name,
kms_key_name=None,
max_bytes_rewritten_per_call=None,
src=None):
src = src or self.INPUT_FILE
dst = self.gcs_tempdir + '/%s' % name
extra_kwargs = {}
if max_bytes_rewritten_per_call is not None:
extra_kwargs['max_bytes_rewritten_per_call'] = (
max_bytes_rewritten_per_call)
self.gcsio.copy(src, dst, kms_key_name, **extra_kwargs)
self._verify_copy(src, dst, kms_key_name)
@pytest.mark.it_postcommit
def test_copy(self):
self._test_copy("test_copy")
@pytest.mark.it_postcommit
def METHOD_NAME(self):
if self.kms_key_name is None:
raise unittest.SkipTest('--kms_key_name not specified')
self._test_copy("test_copy_kms", self.kms_key_name)
@pytest.mark.it_postcommit
def test_copy_rewrite_token(self):
# Tests a multi-part copy (rewrite) operation. This is triggered by a
# combination of 3 conditions:
# - a large enough src
# - setting max_bytes_rewritten_per_call
# - setting kms_key_name
if self.kms_key_name is None:
raise unittest.SkipTest('--kms_key_name not specified')
rewrite_responses = []
self.gcsio._set_rewrite_response_callback(
lambda response: rewrite_responses.append(response))
self._test_copy(
"test_copy_rewrite_token",
kms_key_name=self.kms_key_name,
max_bytes_rewritten_per_call=50 * 1024 * 1024,
src=self.INPUT_FILE_LARGE)
# Verify that there was a multi-part rewrite.
self.assertTrue(any(not r.done for r in rewrite_responses))
def _test_copy_batch(
self,
name,
kms_key_name=None,
max_bytes_rewritten_per_call=None,
src=None):
num_copies = 10
srcs = [src or self.INPUT_FILE] * num_copies
dsts = [self.gcs_tempdir + '/%s_%d' % (name, i) for i in range(num_copies)]
src_dst_pairs = list(zip(srcs, dsts))
extra_kwargs = {}
if max_bytes_rewritten_per_call is not None:
extra_kwargs['max_bytes_rewritten_per_call'] = (
max_bytes_rewritten_per_call)
result_statuses = self.gcsio.copy_batch(
src_dst_pairs, kms_key_name, **extra_kwargs)
for status in result_statuses:
self.assertIsNone(status[2], status)
for _src, _dst in src_dst_pairs:
self._verify_copy(_src, _dst, kms_key_name)
@pytest.mark.it_postcommit
def test_copy_batch(self):
self._test_copy_batch("test_copy_batch")
@pytest.mark.it_postcommit
def test_copy_batch_kms(self):
if self.kms_key_name is None:
raise unittest.SkipTest('--kms_key_name not specified')
self._test_copy_batch("test_copy_batch_kms", self.kms_key_name)
@pytest.mark.it_postcommit
def test_copy_batch_rewrite_token(self):
# Tests a multi-part copy (rewrite) operation. This is triggered by a
# combination of 3 conditions:
# - a large enough src
# - setting max_bytes_rewritten_per_call
# - setting kms_key_name
if self.kms_key_name is None:
raise unittest.SkipTest('--kms_key_name not specified')
rewrite_responses = []
self.gcsio._set_rewrite_response_callback(
lambda response: rewrite_responses.append(response))
self._test_copy_batch(
"test_copy_batch_rewrite_token",
kms_key_name=self.kms_key_name,
max_bytes_rewritten_per_call=50 * 1024 * 1024,
src=self.INPUT_FILE_LARGE)
# Verify that there was a multi-part rewrite.
self.assertTrue(any(not r.done for r in rewrite_responses))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main() |
draw buttons | # This file is part of project Sverchok. It's copyrighted by the contributors
# recorded in the version control history of the file, available from
# its original location https://github.com/nortikin/sverchok/commit/master
#
# SPDX-License-Identifier: GPL3
# License-Filename: LICENSE
from typing import List, Dict
import bpy
from bmesh.ops import split_edges
from sverchok.nodes.list_masks.mask_convert import mask_converter_node
from sverchok.node_tree import SverchCustomTreeNode
from sverchok.data_structure import updateNode, fixed_iter
from sverchok.utils.sv_bmesh_utils import empty_bmesh, add_mesh_to_bmesh, pydata_from_bmesh
from sverchok.utils.sv_mesh_utils import polygons_to_edges_np
from sverchok.utils.nodes_mixins.sockets_config import ModifierNode
def split_mesh_elements_node(vertices=None,
edges=None,
faces=None,
face_data=None,
mask=None,
mask_mode='BY_VERTEX',
split_type='VERTS'):
if not vertices:
return [], [], [], []
edges = edges or []
faces = faces or []
face_data = list(fixed_iter(face_data, len(faces))) if face_data else None
mask = mask or []
if split_type == 'VERTS':
if mask_mode != 'BY_VERTEX':
mask, _, _ = mask_converter_node(
vertices, edges, faces,
edges_mask=mask if mask_mode == 'BY_EDGE' else None,
faces_mask=mask if mask_mode == 'BY_FACE' else None,
mode=mask_mode)
vs, es, fs, fds = split_by_vertices(vertices, edges, faces, mask, face_data)
elif split_type == 'EDGES':
if mask_mode != 'BY_EDGE':
_, mask, _ = mask_converter_node(
vertices, edges, faces,
vertices_mask=mask if mask_mode == 'BY_VERTEX' else None,
faces_mask=mask if mask_mode == 'BY_FACE' else None,
mode=mask_mode)
vs, es, fs, fds = split_by_edges(vertices, edges, faces, face_data, mask)
else:
raise TypeError(f'Unknown "split_typ" mode = {split_type}')
return vs, es, fs, fds
def split_by_vertices(verts,
edges=None,
faces=None,
selected_verts: List[bool] = None,
face_data=None):
"""it ignores edges for now"""
edges = edges or []
faces = faces or []
if not selected_verts:
selected_verts = [True] * len(verts)
elif len(selected_verts) != len(verts):
selected_verts = list(fixed_iter(selected_verts, len(verts)))
out_verts = []
out_faces = []
old_new_verts: Dict[int, int] = dict()
for face in faces:
new_face = []
for i in face:
if selected_verts[i]:
out_verts.append(verts[i])
new_face.append(len(out_verts) - 1)
else:
if i in old_new_verts:
new_face.append(old_new_verts[i])
else:
out_verts.append(verts[i])
old_new_verts[i] = len(out_verts) - 1
new_face.append(len(out_verts) - 1)
out_faces.append(new_face)
out_edges = polygons_to_edges_np([out_faces], unique_edges=True)[0]
return out_verts, out_edges, out_faces, face_data
def split_by_edges(verts, edges=None, faces=None, face_data=None, selected_edges: List[bool] = None):
with empty_bmesh() as bm:
add_mesh_to_bmesh(bm, verts, edges, faces, 'initial_index')
split_edges(bm, edges=[e for e, b in zip(bm.edges, selected_edges) if b])
if face_data:
v, e, f, fd = pydata_from_bmesh(bm, face_data=face_data)
else:
v, e, f = pydata_from_bmesh(bm)
fd = []
return v, e, f, fd
class SvSplitMeshElements(ModifierNode, SverchCustomTreeNode, bpy.types.Node):
"""
Triggers: split rip separate
Split selected mesh elements from each other
"""
bl_idname = 'SvSplitMeshElements'
bl_label = 'Split Mesh Elements'
bl_icon = 'MOD_EDGESPLIT'
select_mode_items = [(n.upper(), n, '', ic, i) for i, (n, ic) in enumerate(zip(
('By_Vertex', 'By_Edge', 'By_Face'), ('VERTEXSEL', 'EDGESEL', 'FACESEL')))]
mask_mode: bpy.props.EnumProperty(items=select_mode_items, update=updateNode)
split_type: bpy.props.EnumProperty(items=[(i.upper(), i, '') for i in ['verts', 'edges']], update=updateNode)
def METHOD_NAME(self, context, layout):
layout.prop(self, 'split_type', expand=True)
def draw_mask_socket(self, socket, context, layout):
row = layout.row()
text = f'. {socket.objects_number}' if socket.objects_number else ""
row.label(text=f'{socket.label or socket.name}{text}')
row.prop(self, 'mask_mode', expand=True, icon_only=True)
def sv_init(self, context):
self.inputs.new('SvVerticesSocket', 'Vertices')
self.inputs.new('SvStringsSocket', 'Edges')
self.inputs.new('SvStringsSocket', 'Faces')
self.inputs.new('SvStringsSocket', 'FaceData')
self.inputs.new('SvStringsSocket', 'Mask').custom_draw = 'draw_mask_socket'
self.outputs.new('SvVerticesSocket', 'Vertices')
self.outputs.new('SvStringsSocket', 'Edges')
self.outputs.new('SvStringsSocket', 'Faces')
self.outputs.new('SvStringsSocket', 'FaceData')
def process(self):
vertices = self.inputs['Vertices'].sv_get(deepcopy=False, default=[])
edges = self.inputs['Edges'].sv_get(deepcopy=False, default=[])
faces = self.inputs['Faces'].sv_get(deepcopy=False, default=[])
face_data = self.inputs['FaceData'].sv_get(deepcopy=False, default=[])
mask = self.inputs['Mask'].sv_get(deepcopy=False, default=[])
out = []
data = [vertices, edges, faces, face_data, mask]
obj_n = max(map(len, data))
iter_data = zip(*[fixed_iter(d, obj_n, None) for d in data])
for v, e, f, fd, m in iter_data:
out.append(split_mesh_elements_node(v, e, f, fd, m, self.mask_mode, self.split_type))
vs, es, fs, fds = list(zip(*out)) if out else ([], [], [], [])
self.outputs['Vertices'].sv_set(vs)
self.outputs['Edges'].sv_set(es)
self.outputs['Faces'].sv_set(fs)
self.outputs['FaceData'].sv_set(fds)
register, unregister = bpy.utils.register_classes_factory([SvSplitMeshElements]) |
reflection added | from ....model.util.HelperModule import get_partial_index
# imports for type hinting in PyCharm -- DO NOT DELETE
from ....model.DioptasModel import DioptasModel
from ....widgets.integration import BatchWidget
from ....widgets.plot_widgets.ImgWidget import IntegrationImgWidget
class PhaseInBatchController(object):
"""
PhaseInBatchController handles all the interaction between the phase controls and the plotted lines in the cake view.
"""
def __init__(self, batch_widget, dioptas_model):
"""
:param batch_widget: Reference to an IntegrationWidget
:param dioptas_model: reference to DioptasModel object
:type batch_widget: BatchWidget
:type dioptas_model: DioptasModel
"""
self.model = dioptas_model
self.phase_model = self.model.phase_model
self.batch_widget = batch_widget
self.batch_view_widget = batch_widget.stack_plot_widget.img_view # type: IntegrationImgWidget
self.connect()
def connect(self):
self.phase_model.phase_added.connect(self.add_phase_plot)
self.model.phase_model.phase_removed.connect(self.batch_view_widget.del_cake_phase)
self.phase_model.phase_changed.connect(self.update_phase_lines)
self.phase_model.phase_changed.connect(self.update_phase_color)
self.phase_model.phase_changed.connect(self.update_phase_visible)
self.phase_model.METHOD_NAME.connect(self.METHOD_NAME)
self.phase_model.reflection_deleted.connect(self.reflection_deleted)
self.model.enabled_phases_in_cake.connect(self.update_all_phases)
def get_phase_position_and_intensities(self, ind, clip=True):
"""
Obtains the positions and intensities for lines of a phase with an index ind within the batch view.
No clipping is used for the first call to add the CakePhasePlot to the ImgWidget. Subsequent calls are used with
clipping. Thus, only lines within the cake_tth are returned. The visibility of each line is then estimated in
the ImgWidget based on the length of the clipped and not clipped lists.
:param ind: the index of the phase
:param clip: whether or not the lists should be clipped. Clipped means that lines which have positions larger
than the
:return: line_positions, line_intensities
"""
if self.model.batch_model.binning is None:
cake_tth = self.model.calibration_model.tth
else:
start_x, stop_x = self._get_x_range()
cake_tth = self.model.batch_model.binning[start_x:stop_x]
reflections_tth = self.phase_model.get_phase_line_positions(ind, 'tth',
self.model.calibration_model.wavelength * 1e10)
reflections_intensities = [reflex[1] for reflex in self.phase_model.reflections[ind]]
cake_line_positions = []
cake_line_intensities = []
for ind, tth in enumerate(reflections_tth):
pos_ind = get_partial_index(cake_tth, tth)
if pos_ind is not None:
cake_line_positions.append(pos_ind + 0.5)
cake_line_intensities.append(reflections_intensities[ind])
elif clip is False:
cake_line_positions.append(0)
cake_line_intensities.append(reflections_intensities[ind])
return cake_line_positions, cake_line_intensities
def add_phase_plot(self):
cake_line_positions, cake_line_intensities = self.get_phase_position_and_intensities(-1, False)
self.batch_view_widget.add_cake_phase(cake_line_positions, cake_line_intensities,
self.phase_model.phase_colors[-1])
def update_phase_lines(self, ind):
cake_line_positions, cake_line_intensities = self.get_phase_position_and_intensities(ind)
self.batch_view_widget.update_phase_intensities(ind, cake_line_positions, cake_line_intensities)
def update_all_phases(self):
for ind in range(len(self.phase_model.phases)):
self.update_phase_lines(ind)
def update_phase_color(self, ind):
self.batch_view_widget.set_cake_phase_color(ind, self.model.phase_model.phase_colors[ind])
def update_phase_visible(self, ind):
if self.phase_model.phase_visible[ind] and self.batch_widget.control_widget.phases_btn.isChecked():
self.batch_view_widget.show_cake_phase(ind)
else:
self.batch_view_widget.hide_cake_phase(ind)
def METHOD_NAME(self, ind):
self.batch_view_widget.phases[ind].add_line()
def reflection_deleted(self, phase_ind, reflection_ind):
self.batch_view_widget.phases[phase_ind].delete_line(reflection_ind)
def _get_x_range(self):
"""
Return bin-x range of the batch plot
"""
if self.model.batch_model.data is None:
return 0, 0
start_x = 0
stop_x = self.model.batch_model.data.shape[1]
if self.batch_widget.options_widget.bkg_cut_btn.isChecked():
bkg_roi = self.model.pattern_model.pattern.auto_background_subtraction_roi
if bkg_roi is not None:
binning = self.model.batch_model.binning
scale = (binning[-1] - binning[0]) / binning.shape[0]
start_x = (bkg_roi[0] - binning[0]) / scale
stop_x = (bkg_roi[1] - binning[0]) / scale
return int(start_x), int(stop_x) |
add custom teardown | import json
import logging
from contextlib import contextmanager
from typing import TYPE_CHECKING, Callable, Optional
import aws_cdk as cdk
from typing_extensions import Self
from localstack.testing.aws.util import is_aws_cloud
if TYPE_CHECKING:
from mypy_boto3_s3 import S3Client
from localstack.aws.api.cloudformation import Capability
from localstack.aws.connect import ServiceLevelClientFactory
LOG = logging.getLogger(__name__)
CDK_BOOTSTRAP_PARAM = "/cdk-bootstrap/hnb659fds/version"
WAITER_CONFIG_AWS = {"Delay": 10, "MaxAttempts": 1000}
WAITER_CONFIG_LS = {"Delay": 1, "MaxAttempts": 500}
def cleanup_s3_bucket(s3_client: "S3Client", bucket_name: str):
LOG.debug(f"Cleaning provisioned S3 Bucket {bucket_name}")
try:
objs = s3_client.list_objects_v2(Bucket=bucket_name)
objs_num = objs["KeyCount"]
if objs_num > 0:
LOG.debug(f"Deleting {objs_num} objects from {bucket_name}")
obj_keys = [{"Key": o["Key"]} for o in objs["Contents"]]
s3_client.delete_objects(Bucket=bucket_name, Delete={"Objects": obj_keys})
except Exception:
LOG.warning(
f"Failed to clean provisioned S3 Bucket {bucket_name}",
exc_info=LOG.isEnabledFor(logging.DEBUG),
)
class InfraProvisioner:
"""
TODO: explore adding support for updates during tests
TODO: explore asset handling
"""
cloudformation_stacks: dict[str, dict]
custom_cleanup_steps: list[Callable]
custom_setup_steps: list[Callable]
skipped_provisioning: bool = False
def __init__(self, aws_client: ServiceLevelClientFactory):
self.cloudformation_stacks = {}
self.custom_cleanup_steps = []
self.custom_setup_steps = []
self.aws_client = aws_client
@contextmanager
def provisioner(self, skip_teardown: bool = False) -> Self:
try:
self.provision()
yield self
finally:
if not skip_teardown:
self.teardown()
else:
LOG.info("Skipping teardown. Resources and stacks are not deleted.")
def provision(self):
if all(
self._is_stack_deployed(stack_name, stack)
for stack_name, stack in self.cloudformation_stacks.items()
):
# TODO it's currently all or nothing -> deploying one new stack will most likely fail
LOG.info("All stacks are already deployed. Skipping the provisioning.")
self.skipped_provisioning = True
return
self.run_manual_setup_tasks()
self.bootstrap_cdk()
for stack_name, stack in self.cloudformation_stacks.items():
self.aws_client.cloudformation.create_stack(
StackName=stack_name,
TemplateBody=stack["Template"],
Capabilities=[
Capability.CAPABILITY_AUTO_EXPAND,
Capability.CAPABILITY_IAM,
Capability.CAPABILITY_NAMED_IAM,
],
)
self.aws_client.cloudformation.get_waiter("stack_create_complete").wait(
StackName=stack_name,
WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
)
describe_stack = self.aws_client.cloudformation.describe_stacks(StackName=stack_name)
outputs = describe_stack["Stacks"][0].get("Outputs", {})
stack["Outputs"] = {o["OutputKey"]: o["OutputValue"] for o in outputs}
if stack["AutoCleanS3"]:
stack_resources = self.aws_client.cloudformation.describe_stack_resources(
StackName=stack_name
)["StackResources"]
s3_buckets = [
r["PhysicalResourceId"]
for r in stack_resources
if r["ResourceType"] == "AWS::S3::Bucket"
]
for s3_bucket in s3_buckets:
self.custom_cleanup_steps.append(
lambda: cleanup_s3_bucket(self.aws_client.s3, s3_bucket)
)
def get_stack_outputs(self, stack_name: str):
return self.cloudformation_stacks[stack_name]["Outputs"]
def teardown(self):
for fn in self.custom_cleanup_steps:
fn()
for stack_name, stack in self.cloudformation_stacks.items():
self.aws_client.cloudformation.delete_stack(StackName=stack_name)
self.aws_client.cloudformation.get_waiter("stack_delete_complete").wait(
StackName=stack_name,
WaiterConfig=WAITER_CONFIG_AWS if is_aws_cloud() else WAITER_CONFIG_LS,
)
# TODO log-groups created by lambda are not automatically cleaned up by CDK
if not is_aws_cloud():
# TODO proper handling of ssm parameter
try:
self.aws_client.ssm.delete_parameter(Name=CDK_BOOTSTRAP_PARAM)
except Exception:
pass
def add_cdk_stack(self, cdk_stack: cdk.Stack, autoclean_buckets: Optional[bool] = True):
"""
1. check if synthesized templates exists
2. if no templates exists OR forced update enabled => synth cdk.App into CloudFormation template and save it
3. deploy templates / assets / etc.
4. register teardown
"""
template_json = cdk.assertions.Template.from_stack(cdk_stack).to_json()
template = json.dumps(template_json)
self.cloudformation_stacks[cdk_stack.stack_name] = {
"StackName": cdk_stack.stack_name,
"Template": template,
"AutoCleanS3": autoclean_buckets,
}
def add_cdk_app(self, cdk_app: cdk.App):
"""
!!! WORK IN PROGRESS !!!
1. check if synthesized templates exists
2. if no templates exists OR forced update enabled => synth cdk.App into CloudFormation template and save it
3. deploy templates / assets / etc.
4. register teardown
"""
# cloud_assembly = cdk_app.synth()
...
def bootstrap_cdk(self):
# TODO: add proper bootstrap template to deploy here if there's no parameter yet
try:
self.aws_client.ssm.get_parameter(Name=CDK_BOOTSTRAP_PARAM)
except self.aws_client.ssm.exceptions.ParameterNotFound:
self.aws_client.ssm.put_parameter(Name=CDK_BOOTSTRAP_PARAM, Type="String", Value="10")
def METHOD_NAME(self, cleanup_task: Callable):
self.custom_cleanup_steps.append(cleanup_task)
def add_custom_setup_provisioning_step(self, setup_task: Callable):
self.custom_setup_steps.append(setup_task)
def run_manual_setup_tasks(self):
for fn in self.custom_setup_steps:
fn()
def _is_stack_deployed(self, stack_name: str, stack: dict) -> bool:
try:
describe_stack = self.aws_client.cloudformation.describe_stacks(StackName=stack_name)
if outputs := describe_stack["Stacks"][0].get("Outputs"):
stack["Outputs"] = {o["OutputKey"]: o["OutputValue"] for o in outputs}
except Exception:
return False
# TODO should we try to run teardown first, if the status is not "CREATE_COMPLETE"?
return describe_stack["Stacks"][0]["StackStatus"] == "CREATE_COMPLETE" |
close event | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path as osp
import math
from functools import partial
from PyQt5.QtCore import QPoint
from PyQt5.QtWidgets import QDesktopWidget
from qtpy import QtCore, QtWidgets
from qtpy.QtWidgets import (
QWidget,
QLabel,
QPushButton,
QGridLayout,
QKeySequenceEdit,
QMessageBox, )
from qtpy.QtGui import QIcon
from qtpy import QtCore
from qtpy.QtCore import Qt
from util import save_configs
class RecordShortcutWidget(QKeySequenceEdit):
def __init__(self, finishCallback, location):
super().__init__()
self.finishCallback = finishCallback
# 隐藏界面
self.setWindowFlags(Qt.FramelessWindowHint)
self.move(location)
self.show()
self.editingFinished.connect(lambda: finishCallback(self.keySequence()))
def keyReleaseEvent(self, ev):
self.finishCallback(self.keySequence())
class ShortcutWidget(QWidget):
def __init__(self, actions, pjpath):
super().__init__()
self.tr = partial(QtCore.QCoreApplication.translate, "ShortcutWidget")
self.setWindowTitle(self.tr("编辑快捷键"))
self.setWindowIcon(QIcon(osp.join(pjpath, "resource/Shortcut.png")))
# self.setFixedSize(self.width(), self.height())
self.actions = actions
self.recorder = None
self.initUI()
def initUI(self):
grid = QGridLayout()
self.setLayout(grid)
actions = self.actions
for idx, action in enumerate(actions):
# 2列英文看不清
grid.addWidget(QLabel(action.iconText()[1:]), idx // 3, idx % 3 * 3)
shortcut = action.shortcut().toString()
if len(shortcut) == 0:
shortcut = self.tr("-")
button = QPushButton(shortcut)
button.setFixedWidth(150)
button.setFixedHeight(30)
button.clicked.connect(partial(self.recordShortcut, action))
grid.addWidget(
button,
idx // 3,
idx % 3 * 3 + 1, )
def refreshUi(self):
actions = self.actions
for idx, action in enumerate(actions):
shortcut = action.shortcut().toString()
if len(shortcut) == 0:
shortcut = self.tr("-")
self.layout().itemAtPosition(
idx // 3,
idx % 3 * 3 + 1, ).widget().setText(shortcut)
def recordShortcut(self, action):
# 打开快捷键设置的窗口时,如果之前的还在就先关闭
if self.recorder is not None:
self.recorder.close()
rect = self.geometry()
x = rect.x()
y = rect.y() + rect.height()
self.recorder = RecordShortcutWidget(self.setShortcut, QPoint(x, y))
self.currentAction = action
def setShortcut(self, key):
self.recorder.close()
for a in self.actions:
if a.shortcut() == key:
key = key.toString()
msg = QMessageBox()
msg.setIcon(QMessageBox.Warning)
msg.setWindowTitle(key + " " + self.tr("快捷键冲突"))
msg.setText(key + " " + self.tr("快捷键已被") + " " + a.data(
) + " " + self.tr("使用,请设置其他快捷键或先修改") + " " + a.data() + " " +
self.tr("的快捷键"))
msg.setStandardButtons(QMessageBox.Ok)
msg.exec_()
return
key = "" if key.toString() == "Esc" else key # ESC不设置快捷键
self.currentAction.setShortcut(key)
self.refreshUi()
save_configs(None, None, self.actions)
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
# 快捷键设置跟随移动
def moveEvent(self, event):
p = self.geometry()
x = p.x()
y = p.y() + p.height()
if self.recorder is not None:
self.recorder.move(x, y)
def METHOD_NAME(self, event):
# 关闭时也退出快捷键设置
if self.recorder is not None:
self.recorder.close() |
test attempt update nonexistent user | from __future__ import annotations
import uuid
from random import randint
from test.plugins.v0_1_0.azure_ad.test_utils import ( # noqa: F401 # intentional for mocks
MockAzureADOrganization,
azure_ad_organization,
)
import pytest
from aiohttp import ClientResponseError
from iambic.plugins.v0_1_0.azure_ad.user.utils import (
create_user,
delete_user,
get_user,
list_users,
update_user_attributes,
)
@pytest.mark.asyncio
async def test_list_users_success(
azure_ad_organization: MockAzureADOrganization, # noqa: F811 # intentional for mocks
):
# Test list_users with successful response
azure_ad_organization.request_data["users"] = {
"user_id_1": {
"id": "user_id_1",
"userPrincipalName": "[email protected]",
"displayName": "User 1",
"mailNickname": "user1",
},
"user_id_2": {
"id": "user_id_2",
"userPrincipalName": "[email protected]",
"mailNickname": "user2",
"displayName": "User 2",
},
}
users = await list_users(azure_ad_organization)
# Check if the returned list of users has the correct length
assert len(users) == 2
# Check if the returned users have the correct properties
assert users[0].user_id == "user_id_1"
assert users[0].username == "[email protected]"
assert users[1].user_id == "user_id_2"
assert users[1].username == "[email protected]"
@pytest.mark.asyncio
async def test_list_users_empty(
azure_ad_organization: MockAzureADOrganization, # noqa: F811 # intentional for mocks
):
# Test list_users with empty response (no users found)
users = await list_users(azure_ad_organization)
# Check if the returned list of users is empty
assert len(users) == 0
@pytest.mark.asyncio
async def test_create_user_success(
azure_ad_organization: MockAzureADOrganization, # noqa: F811 # intentional for mocks
):
# Test create user with successful response
user_suffix = randint(0, 100000)
username = f"user{user_suffix}@example.com"
user = await create_user(
azure_ad_organization,
username=username,
mail_nickname=f"user{user_suffix}",
display_name=f"user{user_suffix}",
)
user_data = azure_ad_organization.request_data["users"].get(user.user_id)
assert bool(user_data)
assert user_data["userPrincipalName"] == username
@pytest.mark.asyncio
async def test_get_user_by_id_success(
azure_ad_organization: MockAzureADOrganization, # noqa: F811 # intentional for mocks
):
# Test get user by id with successful response
user_suffix = randint(0, 100000)
username = f"user{user_suffix}@example.com"
user = await create_user(
azure_ad_organization,
username=username,
mail_nickname=f"user{user_suffix}",
display_name=f"user{user_suffix}",
)
response = await get_user(azure_ad_organization, user_id=user.user_id)
assert user.username == response.username
@pytest.mark.asyncio
async def test_get_user_by_id_not_found(
azure_ad_organization: MockAzureADOrganization, # noqa: F811 # intentional for mocks
):
# Test get non-existent user by id with client error
with pytest.raises(ClientResponseError):
await get_user(azure_ad_organization, user_id=str(uuid.uuid4()))
@pytest.mark.asyncio
async def test_get_user_by_username_success(
azure_ad_organization: MockAzureADOrganization, # noqa: F811 # intentional for mocks
):
# Test get user by username with successful response
user_suffix = randint(0, 100000)
username = f"user{user_suffix}@example.com"
user = await create_user(
azure_ad_organization,
username=username,
mail_nickname=f"user{user_suffix}",
display_name=f"user{user_suffix}",
)
assert bool(azure_ad_organization.request_data["users"].get(user.user_id))
response = await get_user(azure_ad_organization, username=user.username)
assert user.user_id == response.user_id
@pytest.mark.asyncio
async def test_get_user_by_username_not_found(
azure_ad_organization: MockAzureADOrganization, # noqa: F811 # intentional for mocks
):
# Test attempt get non-existent user by username
with pytest.raises(Exception):
await get_user(azure_ad_organization, username=str(uuid.uuid4()))
@pytest.mark.asyncio
async def test_delete_user_success(
azure_ad_organization: MockAzureADOrganization, # noqa: F811 # intentional for mocks
):
# Test delete user
user_suffix = randint(0, 100000)
username = f"user{user_suffix}@example.com"
user = await create_user(
azure_ad_organization,
username=username,
mail_nickname=f"user{user_suffix}",
display_name=f"user{user_suffix}",
)
assert bool(azure_ad_organization.request_data["users"].get(user.user_id))
await delete_user(azure_ad_organization, user=user, log_params={})
assert not bool(azure_ad_organization.request_data["users"].get(user.user_id))
@pytest.mark.asyncio
async def test_attempt_delete_nonexistent_user(
azure_ad_organization: MockAzureADOrganization, # noqa: F811 # intentional for mocks
):
# Test attempting to delete a non-existent user
user_suffix = randint(0, 100000)
username = f"user{user_suffix}@example.com"
user = await create_user(
azure_ad_organization,
username=username,
mail_nickname=f"user{user_suffix}",
display_name=f"user{user_suffix}",
)
user.user_id = "nonexistent_user_id"
response = await delete_user(azure_ad_organization, user=user, log_params={})
assert len(response[0].exceptions_seen) > 0
@pytest.mark.asyncio
async def test_update_user(
azure_ad_organization: MockAzureADOrganization, # noqa: F811 # intentional for mocks
):
user_suffix = randint(0, 100000)
username = f"user{user_suffix}@example.com"
user = await create_user(
azure_ad_organization,
username=username,
mail_nickname=f"user{user_suffix}",
display_name=f"user{user_suffix}",
)
template_user = user.copy()
template_user.display_name = "new_display_name"
await update_user_attributes(
azure_ad_organization, template_user, user, log_params={}
)
user_data = azure_ad_organization.request_data["users"].get(user.user_id)
assert user_data["displayName"] == template_user.display_name
@pytest.mark.asyncio
async def METHOD_NAME(
azure_ad_organization: MockAzureADOrganization, # noqa: F811 # intentional for mocks
):
user_suffix = randint(0, 100000)
username = f"user{user_suffix}@example.com"
user = await create_user(
azure_ad_organization,
username=username,
mail_nickname=f"user{user_suffix}",
display_name=f"user{user_suffix}",
)
user.user_id = "nonexistent_user_id"
template_user = user.copy()
template_user.display_name = "new_display_name"
response = await update_user_attributes(
azure_ad_organization, template_user, user, log_params={}
)
assert len(response[0].exceptions_seen) > 0 |
die flat | # ---
# jupyter:
# jupytext:
# cell_metadata_filter: -all
# custom_cell_magics: kql
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.11.2
# kernelspec:
# display_name: base
# language: python
# name: python3
# ---
# %% [markdown]
# # Common mistakes
#
# ## 1. Creating cells without `cell` decorator
#
# The cell decorator names cells deterministically and uniquely based on the name of the functions and its parameters.
#
# It also uses a caching mechanisms that improves performance and guards against duplicated names.
#
# ### 1.1 naming cells manually
#
# Naming cells manually is susceptible to name collisions
#
# in GDS you can't have two cells with the same name.
#
# For example: this code will raise a `duplicated cell name ValueError`
#
# ```python
# import gdsfactory as gf
#
# c1 = gf.Component("wg")
# c1 << gf.components.straight(length=5)
#
#
# c2 = gf.Component("wg")
# c2 << gf.components.straight(length=50)
#
#
# c3 = gf.Component("waveguides")
# wg1 = c3 << c1
# wg2 = c3 << c2
# wg2.movey(10)
# c3
# ```
#
# **Solution**: Use the `gf.cell` decorator for automatic naming your components.
# %%
import gdsfactory as gf
from gdsfactory.generic_tech import get_generic_pdk
gf.config.rich_output()
PDK = get_generic_pdk()
PDK.activate()
@gf.cell
def wg(length: float = 3):
return gf.components.straight(length=length)
print(wg(length=5))
print(wg(length=50))
# %% [markdown]
# ### 1.2 Not naming components with a unique and deterministic name
#
# In the case of not wrapping the function with `cell` you will get unique names thanks to the unique identifier `uuid`.
#
# This name will be different and non-deterministic for different invocations of the script.
#
# However it will be hard for you to know where that cell came from.
# %%
c1 = gf.Component()
c2 = gf.Component()
print(c1.name)
print(c2.name)
# %% [markdown]
# Notice how gdsfactory raises a Warning when you save this `Unnamed` Components
# %%
c1.write_gds()
# %% [markdown]
# ### 1.3 Intermediate Unnamed cells
#
# While creating a cell, you should not create intermediate cells, because they won't be Cached and you can end up with duplicated cell names or name conflicts, where one of the cells that has the same name as the other will be replaced.
#
# %%
@gf.cell
def die_bad():
"""c1 is an intermediate Unnamed cell"""
c1 = gf.Component()
c1 << gf.components.straight(length=10)
return gf.components.die_bbox(c1, street_width=10)
c = die_bad(cache=False)
print(c.references)
c.plot()
# %% [markdown]
# **Solution1** Don't use intermediate cells
#
# %%
@gf.cell
def die_good():
c = gf.Component()
c << gf.components.straight(length=10)
c << gf.components.die_bbox_frame(c.bbox, street_width=10)
return c
c = die_good(cache=False)
print(c.references)
c.plot()
# %% [markdown]
# **Solution2** You can flatten the cell, but you will lose the memory savings from cell references. Solution1 is more elegant.
#
# %%
@gf.cell
def METHOD_NAME():
"""c will be an intermediate unnamed cell"""
c = gf.Component()
c << gf.components.straight(length=10)
c2 = gf.components.die_bbox(c, street_width=10)
c2 = c2.flatten()
return c2
c = METHOD_NAME(cache=False)
print(c.references)
c.plot()
# %%
import gdsfactory as gf
@gf.cell
def dangerous_intermediate_cells(width=0.5):
"""Example that will show the dangers of using intermediate cells."""
c = gf.Component("safe")
c2 = gf.Component(
"dangerous"
) # This should be forbidden as it will create duplicated cells
c2 << gf.components.hline(width=width)
c << c2
return c
@gf.cell
def using_dangerous_intermediate_cells():
"""Example on how things can go wrong.
Here we try to create to lines with different widths
they end up with two duplicated cells and a name collision on the intermediate cell
"""
c = gf.Component()
c << dangerous_intermediate_cells(width=0.5)
r3 = c << dangerous_intermediate_cells(width=2)
r3.movey(5)
return c
c = using_dangerous_intermediate_cells()
c.plot_klayout()
# %%
for component in c.get_dependencies(recursive=True):
if not component._locked:
print(
f"Component {component.name!r} was NOT properly locked. "
"You need to write it into a function that has the @cell decorator."
)
# %% |
convert dict | # © 2017 Akretion (http://www.akretion.com)
# Sébastien BEAU <[email protected]>
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from collections import OrderedDict
from odoo import fields, models
from odoo.tools import ormcache
def partition(line, accessor):
"""Partition a recordset according to an accessor (e.g. a lambda).
Returns a dictionary whose keys are the values obtained from accessor,
and values are the items that have this value.
Example: partition([{"name": "ax"}, {"name": "by"}], lambda x: "x" in x["name"])
=> {True: [{"name": "ax"}], False: [{"name": "by"}]}
"""
result = {}
for item in line:
key = accessor(item)
if key not in result:
result[key] = []
result[key].append(item)
return result
def update_dict(data, fields, options):
"""Contruct a tree of fields.
Example:
{
"name": True,
"resource": True,
}
Order of keys is important.
"""
field = fields[0]
if len(fields) == 1:
if field == ".id":
field = "id"
data[field] = (True, options)
else:
if field not in data:
data[field] = (False, OrderedDict())
update_dict(data[field][1], fields[1:], options)
def METHOD_NAME(dict_parser):
"""Convert dict returned by update_dict to list consistent w/ Odoo API.
The list is composed of strings (field names or targets) or tuples.
"""
parser = []
for field, value in dict_parser.items():
if value[0] is True: # is a leaf
parser.append(field_dict(field, value[1]))
else:
parser.append((field_dict(field), METHOD_NAME(value[1])))
return parser
def field_dict(field, options=None):
"""Create a parser dict for the field field."""
result = {"name": field.split(":")[0]}
if len(field.split(":")) > 1:
result["target"] = field.split(":")[1]
for option in options or {}:
if options[option]:
result[option] = options[option]
return result
class IrExports(models.Model):
_inherit = "ir.exports"
language_agnostic = fields.Boolean(
default=False,
help="If set, will set the lang to False when exporting lines without lang,"
" otherwise it uses the lang in the given context to export these fields",
)
global_resolver_id = fields.Many2one(
comodel_name="ir.exports.resolver",
string="Custom global resolver",
domain="[('type', '=', 'global')]",
help="If set, will apply the global resolver to the result",
)
@ormcache(
"self.language_agnostic",
"self.global_resolver_id.id",
"tuple(self.export_fields.mapped('write_date'))",
)
def get_json_parser(self):
"""Creates a parser from ir.exports record and return it.
The final parser can be used to "jsonify" records of ir.export's model.
"""
self.ensure_one()
parser = {}
lang_to_lines = partition(self.export_fields, lambda l: l.lang_id.code)
lang_parsers = {}
for lang in lang_to_lines:
dict_parser = OrderedDict()
for line in lang_to_lines[lang]:
names = line.name.split("/")
if line.target:
names = line.target.split("/")
function = line.instance_method_name
options = {"resolver": line.resolver_id, "function": function}
update_dict(dict_parser, names, options)
lang_parsers[lang] = METHOD_NAME(dict_parser)
if list(lang_parsers.keys()) == [False]:
parser["fields"] = lang_parsers[False]
else:
parser["langs"] = lang_parsers
if self.global_resolver_id:
parser["resolver"] = self.global_resolver_id
if self.language_agnostic:
parser["language_agnostic"] = self.language_agnostic
return parser |
test replace | # Copyright (c) 2017 LINE Corporation
# These sources are released under the terms of the MIT license: see LICENSE
from unittest import mock
import requests
from django.test import override_settings
from django.urls import reverse
from promgen import models, tests, views
TEST_SETTINGS = tests.Data("examples", "promgen.yml").yaml()
TEST_IMPORT = tests.Data("examples", "import.json").raw()
TEST_REPLACE = tests.Data("examples", "replace.json").raw()
class RouteTests(tests.PromgenTest):
def setUp(self):
self.user = self.force_login(username="demo")
@override_settings(PROMGEN=TEST_SETTINGS)
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
@mock.patch("promgen.signals._trigger_write_config")
@mock.patch("promgen.tasks.reload_prometheus")
def test_import(self, mock_write, mock_reload):
self.add_user_permissions(
"promgen.change_rule", "promgen.change_site", "promgen.change_exporter"
)
response = self.client.post(reverse("import"), {"config": TEST_IMPORT})
self.assertRoute(response, views.Import, 302, "Redirect to imported object")
self.assertCount(models.Service, 3, "Import one service (Fixture has two services)")
self.assertCount(models.Project, 4, "Import two projects")
self.assertCount(models.Exporter, 2, "Import two exporters")
self.assertCount(models.Host, 3, "Import three hosts")
@override_settings(PROMGEN=TEST_SETTINGS)
@override_settings(CELERY_TASK_ALWAYS_EAGER=True)
@mock.patch("promgen.signals._trigger_write_config")
@mock.patch("promgen.tasks.reload_prometheus")
def METHOD_NAME(self, mock_write, mock_reload):
self.add_user_permissions(
"promgen.change_rule", "promgen.change_site", "promgen.change_exporter"
)
response = self.client.post(reverse("import"), {"config": TEST_IMPORT})
self.assertRoute(response, views.Import, 302, "Redirect to imported object")
response = self.client.post(reverse("import"), {"config": TEST_REPLACE})
self.assertRoute(response, views.Import, 302, "Redirect to imported object (2)")
self.assertCount(models.Service, 3, "Import one service (Fixture has two services)")
self.assertCount(models.Project, 4, "Import two projects (Fixture has 2 projectsa)")
self.assertCount(models.Exporter, 2, "Import two exporters")
self.assertCount(
models.Farm, 4, "Original two farms and one new farm (fixture has one farm)"
)
self.assertCount(models.Host, 5, "Original 3 hosts and two new ones")
@mock.patch("requests.get")
def test_scrape(self, mock_get):
shard = models.Shard.objects.create(name="test_scrape_shard")
service = models.Service.objects.create(name="test_scrape_service")
farm = models.Farm.objects.create(name="test_scrape_farm")
farm.host_set.create(name="example.com")
project = models.Project.objects.create(
name="test_scrape_project", service=service, shard=shard, farm=farm
)
# Uses the scrape target as the key, and the POST body that should
# result in that URL
exporters = {
"http://example.com:8000/metrics": {
"target": "#exporterresult",
"job": "foo",
"port": 8000,
"scheme": "http",
},
"https://example.com:8000/foo": {
"target": "#exporterresult",
"job": "foo",
"port": 8000,
"path": "/foo",
"scheme": "https",
},
}
for url, body in exporters.items():
response = requests.Response()
response.url = url
response.status_code = 200
mock_get.return_value = response
# For each POST body, check to see that we generate and attempt to
# scrape the correct URL
response = self.client.post(reverse("exporter-scrape", kwargs={"pk": project.pk}), body)
self.assertRoute(response, views.ExporterScrape, 200)
self.assertEqual(mock_get.call_args[0][0], url)
def test_failed_permission(self):
# Test for redirect
for request in [{"viewname": "rule-new", "args": ("site", 1)}]:
response = self.client.get(reverse(**request))
self.assertRoute(response, views.AlertRuleRegister, 302)
self.assertTrue(response.url.startswith("/login"))
def test_other_routes(self):
self.add_user_permissions("promgen.add_rule", "promgen.change_site")
for request in [{"viewname": "rule-new", "args": ("site", 1)}]:
response = self.client.get(reverse(**request))
self.assertRoute(response, views.AlertRuleRegister, 200) |
test mandatory outvol | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
import pytest
from looseversion import LooseVersion
from nipype.testing.fixtures import create_files_in_directory
from nipype.interfaces import freesurfer
from nipype.interfaces.freesurfer import Info
@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed")
def test_robustregister(create_files_in_directory):
filelist, outdir = create_files_in_directory
reg = freesurfer.RobustRegister()
cwd = os.getcwd()
# make sure command gets called
assert reg.cmd == "mri_robust_register"
# test raising error with mandatory args absent
with pytest.raises(ValueError):
reg.run()
# .inputs based parameters setting
reg.inputs.source_file = filelist[0]
reg.inputs.target_file = filelist[1]
reg.inputs.auto_sens = True
assert reg.cmdline == (
"mri_robust_register --satit --lta "
"%s/%s_robustreg.lta --mov %s --dst %s"
% (cwd, filelist[0][:-4], filelist[0], filelist[1])
)
# constructor based parameter setting
reg2 = freesurfer.RobustRegister(
source_file=filelist[0],
target_file=filelist[1],
outlier_sens=3.0,
out_reg_file="foo.lta",
half_targ=True,
)
assert reg2.cmdline == (
"mri_robust_register --halfdst %s_halfway.nii --lta foo.lta "
"--sat 3.0000 --mov %s --dst %s"
% (os.path.join(outdir, filelist[1][:-4]), filelist[0], filelist[1])
)
@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed")
def test_fitmsparams(create_files_in_directory):
filelist, outdir = create_files_in_directory
fit = freesurfer.FitMSParams()
# make sure command gets called
assert fit.cmd == "mri_ms_fitparms"
# test raising error with mandatory args absent
with pytest.raises(ValueError):
fit.run()
# .inputs based parameters setting
fit.inputs.in_files = filelist
fit.inputs.out_dir = outdir
assert fit.cmdline == "mri_ms_fitparms {} {} {}".format(
filelist[0],
filelist[1],
outdir,
)
# constructor based parameter setting
fit2 = freesurfer.FitMSParams(
in_files=filelist, te_list=[1.5, 3.5], flip_list=[20, 30], out_dir=outdir
)
assert fit2.cmdline == (
"mri_ms_fitparms -te %.3f -fa %.1f %s -te %.3f -fa %.1f %s %s"
% (1.500, 20.0, filelist[0], 3.500, 30.0, filelist[1], outdir)
)
@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed")
def test_synthesizeflash(create_files_in_directory):
filelist, outdir = create_files_in_directory
syn = freesurfer.SynthesizeFLASH()
# make sure command gets called
assert syn.cmd == "mri_synthesize"
# test raising error with mandatory args absent
with pytest.raises(ValueError):
syn.run()
# .inputs based parameters setting
syn.inputs.t1_image = filelist[0]
syn.inputs.pd_image = filelist[1]
syn.inputs.flip_angle = 30
syn.inputs.te = 4.5
syn.inputs.tr = 20
assert syn.cmdline == (
"mri_synthesize 20.00 30.00 4.500 %s %s %s"
% (filelist[0], filelist[1], os.path.join(outdir, "synth-flash_30.mgz"))
)
# constructor based parameters setting
syn2 = freesurfer.SynthesizeFLASH(
t1_image=filelist[0], pd_image=filelist[1], flip_angle=20, te=5, tr=25
)
assert syn2.cmdline == (
"mri_synthesize 25.00 20.00 5.000 %s %s %s"
% (filelist[0], filelist[1], os.path.join(outdir, "synth-flash_20.mgz"))
)
@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed")
def METHOD_NAME(create_files_in_directory):
filelist, outdir = create_files_in_directory
mni = freesurfer.MNIBiasCorrection()
# make sure command gets called
assert mni.cmd == "mri_nu_correct.mni"
# test raising error with mandatory args absent
with pytest.raises(ValueError):
mni.cmdline
# test with minimal args
mni.inputs.in_file = filelist[0]
base, ext = os.path.splitext(os.path.basename(filelist[0]))
if ext == ".gz":
base, ext2 = os.path.splitext(base)
ext = ext2 + ext
assert mni.cmdline == (
f"mri_nu_correct.mni --i {filelist[0]} --n 4 --o {base}_output{ext}"
)
# test with custom outfile
mni.inputs.out_file = "new_corrected_file.mgz"
assert mni.cmdline == (
"mri_nu_correct.mni --i %s --n 4 --o new_corrected_file.mgz" % (filelist[0])
)
# constructor based tests
mni2 = freesurfer.MNIBiasCorrection(
in_file=filelist[0], out_file="bias_corrected_output", iterations=2
)
assert mni2.cmdline == (
"mri_nu_correct.mni --i %s --n 2 --o bias_corrected_output" % filelist[0]
)
@pytest.mark.skipif(freesurfer.no_freesurfer(), reason="freesurfer is not installed")
def test_bbregister(create_files_in_directory):
filelist, outdir = create_files_in_directory
bbr = freesurfer.BBRegister()
# make sure command gets called
assert bbr.cmd == "bbregister"
# test raising error with mandatory args absent
with pytest.raises(ValueError):
bbr.cmdline
bbr.inputs.subject_id = "fsaverage"
bbr.inputs.source_file = filelist[0]
bbr.inputs.contrast_type = "t2"
# Check that 'init' is mandatory in FS < 6, but not in 6+
if Info.looseversion() < LooseVersion("6.0.0"):
with pytest.raises(ValueError):
bbr.cmdline
else:
bbr.cmdline
bbr.inputs.init = "fsl"
base, ext = os.path.splitext(os.path.basename(filelist[0]))
if ext == ".gz":
base, _ = os.path.splitext(base)
assert bbr.cmdline == (
"bbregister --t2 --init-fsl "
"--reg {base}_bbreg_fsaverage.dat "
"--mov {full} --s fsaverage".format(full=filelist[0], base=base)
)
def test_FSVersion():
"""Check that FSVersion is a string that can be compared with LooseVersion"""
assert isinstance(freesurfer.preprocess.FSVersion, str)
assert LooseVersion(freesurfer.preprocess.FSVersion) >= LooseVersion("0") |
test case | # !/usr/bin/env python3
import os
# This is the regression test base module used for all test cases.
# There are two different schemes that differ from the executable path.
# The first one is calling Python script by CTest, such as test_0d_regression_test.
# The other one is calling Python script directly manually.
# They have different relative paths, and correspond to different modules.
class SphinxsysRegressionTestByCTest:
def __init__(self, casename, bodyname, parametername):
self.sphinxsys_exec_path = os.path.abspath(os.path.join(os.getcwd()))
self.sphinxsys_case_path = os.path.abspath(os.path.join(self.sphinxsys_exec_path))
self.sphinxsys_case_name = casename
self.sphinxsys_body_name = bodyname
self.sphinxsys_parameter_name = parametername
self.enter_sphinxsys_exec_folder = f"cd {self.sphinxsys_exec_path};"
self.enter_sphinxsys_case_folder = f"cd {self.sphinxsys_case_path};"
self.input_file_path = os.path.join(self.sphinxsys_exec_path, "input")
self.condition_file_path = os.path.join(self.input_file_path, f"{bodyname}_{parametername}_runtimes.dat")
def compile_case(self) -> None:
print('Start compiling test case....')
command = "make -j8"
os.system(self.enter_sphinxsys_case_folder)
os.system(command)
print('Compiling test case is finished...')
def run_particle_relaxation(self) -> None:
print('Start particle relaxation for the simulation...')
command = f".{os.sep}{self.sphinxsys_case_name} --relax=true"
os.system(self.enter_sphinxsys_exec_folder)
os.system(command)
print('Simulating case is finished...')
def run_case(self) -> None:
print('Start case simulation...')
print(self.enter_sphinxsys_exec_folder)
command = f".{os.sep}{self.sphinxsys_case_name} --regression=true"
os.system(self.enter_sphinxsys_exec_folder)
os.system(command)
print('Simulating case is finished...')
def run_case_with_reload(self) -> None:
print('Start case simulation with particle reload...')
print(self.enter_sphinxsys_exec_folder)
command = f".{os.sep}{self.sphinxsys_case_name} --reload=true --regression=true"
os.system(self.enter_sphinxsys_exec_folder)
os.system(command)
print('Simulating case is finished...')
def read_dat_file(self):
file = open(self.condition_file_path)
ifconverged = file.readline(4)
file.close()
return ifconverged
class SphinxsysRegressionTest:
def __init__(self, casename, bodyname, parametername):
self.sphinxsys_exec_path = os.path.abspath(os.path.join(os.getcwd(), ".."))
self.sphinxsys_case_path = os.path.abspath(os.path.join(self.sphinxsys_exec_path, ".."))
self.sphinxsys_src_path = os.path.join(self.sphinxsys_case_path, "src")
self.sphinxsys_rld_path = os.path.join(self.sphinxsys_src_path, "reload")
self.sphinxsys_case_name = casename
self.sphinxsys_body_name = bodyname
self.sphinxsys_parameter_name = parametername
self.enter_sphinxsys_exec_folder = f"cd {self.sphinxsys_exec_path};"
self.enter_sphinxsys_case_folder = f"cd {self.sphinxsys_case_path};"
self.input_file_path = os.path.join(self.sphinxsys_exec_path, "input")
self.condition_file_path = os.path.join(self.input_file_path, f"{bodyname}_{parametername}_runtimes.dat")
def compile_case(self) -> None:
print('Start compiling test case....')
command = "make -j8"
os.system(self.enter_sphinxsys_case_folder)
os.system(command)
print('Compiling test case is finished...')
def METHOD_NAME(self) -> None:
print('Start test case...')
command = "make test"
os.system(self.enter_sphinxsys_case_folder)
os.system(command)
print('Testing case is finished...')
def copy_reload(self) -> None:
print('Start copy the reload file...')
command = "cp -r reload bin"
os.system(self.enter_sphinxsys_case_folder)
os.system(command)
print('Copying the reload file is finished...')
def run_particle_relaxation(self) -> None:
print('Start particle relaxation for the simulation...')
command = f".{os.sep}{self.sphinxsys_case_name} --relax=true"
os.chdir(self.sphinxsys_exec_path)
os.system(command)
print('Simulating case is finished...')
def run_case(self) -> None:
print('Start case simulation...')
print(self.enter_sphinxsys_exec_folder)
command = f".{os.sep}{self.sphinxsys_case_name} --regression=true"
os.chdir(self.sphinxsys_exec_path)
os.system(command)
print('Simulating case is finished...')
def run_case_with_reload(self) -> None:
print('Start case simulation with particle reload...')
print(self.enter_sphinxsys_exec_folder)
command = f".{os.sep}{self.sphinxsys_case_name} --reload=true --regression=true"
os.chdir(self.sphinxsys_exec_path)
os.system(command)
print('Simulating case is finished...')
def read_dat_file(self):
file = open(self.condition_file_path)
ifconverged = file.readline(4)
file.close()
return ifconverged |
test boto wait for certificate validation | import pytest
from moto import settings as moto_settings
from moto.ec2 import utils as ec2_utils
from localstack.aws.accounts import get_aws_account_id
from localstack.testing.aws.util import is_aws_cloud
from localstack.testing.pytest import markers
from localstack.utils.aws import aws_stack
from localstack.utils.strings import short_uid
from localstack.utils.sync import retry
DIGICERT_ROOT_CERT = """
-----BEGIN CERTIFICATE-----
MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw
CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg
RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV
UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq
hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf
Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q
RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD
AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY
JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv
6pZjamVFkpUBtA==
-----END CERTIFICATE-----
"""
class TestACM:
@markers.aws.unknown
def test_import_certificate(self, aws_client):
certs_before = aws_client.acm.list_certificates().get("CertificateSummaryList", [])
with pytest.raises(Exception) as exec_info:
aws_client.acm.import_certificate(Certificate=b"CERT123", PrivateKey=b"KEY123")
assert "PEM" in str(exec_info)
private_key = ec2_utils.random_key_pair()["material"]
result = None
try:
result = aws_client.acm.import_certificate(
Certificate=DIGICERT_ROOT_CERT, PrivateKey=private_key
)
assert "CertificateArn" in result
expected_arn = "arn:aws:acm:{0}:{1}:certificate".format(
aws_stack.get_region(),
get_aws_account_id(),
)
acm_cert_arn = result["CertificateArn"].split("/")[0]
assert expected_arn == acm_cert_arn
certs_after = aws_client.acm.list_certificates().get("CertificateSummaryList", [])
assert len(certs_before) + 1 == len(certs_after)
finally:
if result is not None:
aws_client.acm.delete_certificate(CertificateArn=result["CertificateArn"])
@markers.aws.unknown
def test_domain_validation(self, acm_request_certificate, aws_client):
certificate_arn = acm_request_certificate()["CertificateArn"]
result = aws_client.acm.describe_certificate(CertificateArn=certificate_arn)
options = result["Certificate"]["DomainValidationOptions"]
assert len(options) == 1
@markers.aws.unknown
def METHOD_NAME(
self, acm_request_certificate, aws_client, monkeypatch
):
monkeypatch.setattr(moto_settings, "ACM_VALIDATION_WAIT", 1)
certificate_arn = acm_request_certificate()["CertificateArn"]
waiter = aws_client.acm.get_waiter("certificate_validated")
waiter.wait(CertificateArn=certificate_arn, WaiterConfig={"Delay": 0.5, "MaxAttempts": 3})
@markers.aws.validated
@markers.snapshot.skip_snapshot_verify(paths=["$..Certificate.SignatureAlgorithm"])
def test_certificate_for_subdomain_wildcard(
self, acm_request_certificate, aws_client, snapshot, monkeypatch
):
snapshot.add_transformer(snapshot.transform.key_value("OID"))
snapshot.add_transformer(snapshot.transform.key_value("Serial"))
monkeypatch.setattr(moto_settings, "ACM_VALIDATION_WAIT", 2)
# request certificate for subdomain
domain_name = f"test-domain-{short_uid()}.localhost.localstack.cloud"
subdomain_pattern = f"*.{domain_name}"
create_response = acm_request_certificate(
ValidationMethod="DNS", DomainName=subdomain_pattern
)
cert_arn = create_response["CertificateArn"]
snapshot.add_transformer(snapshot.transform.regex(domain_name, "<domain-name>"))
cert_id = cert_arn.split("certificate/")[-1]
snapshot.add_transformer(snapshot.transform.regex(cert_id, "<cert-id>"))
snapshot.match("request-cert", create_response)
def _get_cert_with_records():
response = aws_client.acm.describe_certificate(CertificateArn=cert_arn)
assert response["Certificate"]["DomainValidationOptions"][0]["ResourceRecord"]
return response
# wait for cert with ResourceRecord CNAME entry
response = retry(_get_cert_with_records, sleep=1, retries=30)
dns_options = response["Certificate"]["DomainValidationOptions"][0]["ResourceRecord"]
snapshot.add_transformer(
snapshot.transform.regex(dns_options["Name"].split(".")[0], "<record-prefix>")
)
snapshot.add_transformer(snapshot.transform.regex(dns_options["Value"], "<record-value>"))
snapshot.match("describe-cert", response)
if is_aws_cloud():
# Wait until DNS entry has been added (needs to be done manually!)
# Note: When running parity tests against AWS, we need to add the CNAME record to our DNS
# server (currently with gandi.net), to enable validation of the certificate.
prompt = (
f"Please add the following CNAME entry to the LocalStack DNS server, then hit [ENTER] once "
f"the certificate has been validated in AWS: {dns_options['Name']} = {dns_options['Value']}"
)
input(prompt)
def _get_cert_issued():
response = aws_client.acm.describe_certificate(CertificateArn=cert_arn)
assert response["Certificate"]["Status"] == "ISSUED"
return response
# get cert again after validation
response = retry(_get_cert_issued, sleep=1, retries=30)
snapshot.match("describe-cert-2", response)
# also snapshot response of cert summaries via list_certificates
response = aws_client.acm.list_certificates()
summaries = response.get("CertificateSummaryList") or []
matching = [cert for cert in summaries if cert["CertificateArn"] == cert_arn]
snapshot.match("list-cert", matching) |
package info | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.build import check_min_cppstd
from conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps, cmake_layout
from conan.tools.files import get, copy, rmdir
from conan.tools.scm import Version
import os
required_conan_version = ">=1.53.0"
class SevenBitDIConan(ConanFile):
name = "7bitdi"
homepage = "https://github.com/7bitcoder/7bitDI"
description = "7bitDI is a simple C++ dependency injection library."
topics = ("cpp17", "dependency-injector", "injector", "header-only")
url = "https://github.com/conan-io/conan-center-index"
license = "MIT"
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"header_only": [True, False],
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"header_only": False,
"shared": False,
"fPIC": True,
}
@property
def _minimum_compilers_version(self):
return {
"Visual Studio": "14",
"msvc": "192",
"gcc": "6",
"clang": "6",
"apple-clang": "10",
}
def config_options(self):
if self.settings.os == "Windows":
self.options.rm_safe("fPIC")
def configure(self):
if self.options.get_safe("shared") or self.options.header_only:
self.options.rm_safe("fPIC")
if self.options.header_only:
self.options.rm_safe("shared")
def layout(self):
cmake_layout(self, src_folder="src")
def package_id(self):
if self.info.options.header_only:
self.info.clear()
def validate(self):
compiler = self.settings.compiler
compiler_name = str(compiler)
if compiler.get_safe("cppstd"):
check_min_cppstd(self, 17)
minimum_version = self._minimum_compilers_version.get(compiler_name, False)
if minimum_version and Version(compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"Requires compiler {compiler_name} minimum version: {minimum_version} with C++17 support."
)
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
if not self.options.header_only:
tc = CMakeToolchain(self)
tc.variables["_7BIT_DI_BUILD_EXAMPLES"] = False
tc.variables["_7BIT_DI_BUILD_TESTS"] = False
tc.variables["_7BIT_DI_BUILD_DOC"] = False
tc.variables["_7BIT_DI_BUILD_SINGLE_HEADER"] = False
tc.variables["_7BIT_DI_INSTALL"] = True
tc.variables["_7BIT_DI_LIBRARY_TYPE"] = self.getSevenBitDILibraryType()
tc.generate()
def getSevenBitDILibraryType(self):
if self.options.header_only:
return "HeaderOnly"
elif self.options.shared:
return "Shared"
else:
return "Static"
def build(self):
if not self.options.header_only:
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(
self,
"LICENSE",
dst=os.path.join(self.package_folder, "licenses"),
src=self.source_folder,
)
if self.options.header_only:
copy(
self,
src=os.path.join(self.source_folder, "Include"),
pattern="*.hpp",
dst=os.path.join(self.package_folder, "include"),
)
else:
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
def METHOD_NAME(self):
self.cpp_info.set_property("cmake_file_name", "7bitDI")
self.cpp_info.set_property("cmake_target_name", "7bitDI::7bitDI")
if self.options.header_only:
self.cpp_info.libs = []
self.cpp_info.bindirs = []
else:
suffix = "d" if self.settings.build_type == "Debug" else ""
self.cpp_info.libs = ["7bitDI" + suffix]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.system_libs.append("m")
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.names["cmake_find_package"] = "7bitDI"
self.cpp_info.names["cmake_find_package_multi"] = "7bitDI" |
test backup current hour | from django.test import TestCase
from mock import patch, call
from datetime import datetime, timedelta
from model_mommy import mommy
from backup.tasks import make_databases_backup
from backup.models import Snapshot, BackupGroup
from dbaas.tests.helpers import DatabaseHelper, InfraHelper, PlanHelper
from physical.models import Environment
FAKE_NOW = datetime(2020, 1, 1, 5, 10, 00)
class FakeDatetime(datetime):
@staticmethod
def now():
return FAKE_NOW
@patch('backup.tasks.make_instance_snapshot_backup')
@patch('backup.models.BackupGroup.save')
@patch('notification.models.TaskHistory.register')
@patch('backup.tasks.get_worker_name')
class TestMakeDatabasesBackup(TestCase):
def setUp(self):
self.backup_hour = 5
self.year = 2020
self.month = 1
self.day = 1
mommy.make(
'Configuration', name='backup_hour', value=str(self.backup_hour)
)
mommy.make(
'Configuration', name='parallel_backup', value=0
)
self.dev_env = mommy.make(
'Environment', name='dev', stage=Environment.DEV
)
mommy.make('Environment', name='prod', stage=Environment.PROD)
_, _, _, self.plan = PlanHelper.create()
self.infra = InfraHelper.create(
backup_hour=self.backup_hour,
plan__has_persistence=True,
environment=self.dev_env,
plan=self.plan
)
self.instance = mommy.make(
'Instance', databaseinfra=self.infra
)
self.database = DatabaseHelper.create(
databaseinfra=self.infra,
environment=self.dev_env
)
@patch('backup.tasks.datetime', FakeDatetime)
def METHOD_NAME(
self, get_worker_name_mock, task_register, save_backup_group,
make_instance_snapshot_backup
):
get_worker_name_mock.return_value = 'test'
group = BackupGroup()
save_backup_group.return_value = group
make_instance_snapshot_backup.return_value.status.return_value = (
Snapshot.SUCCESS
)
make_databases_backup()
make_instance_snapshot_backup.assert_called_with(
current_hour=self.backup_hour, instance=self.instance, error={},
group=group
)
@patch('backup.tasks.datetime', FakeDatetime)
def test_current_hour_without_pending_backup(
self, get_worker_name_mock, task_register, save_backup_group,
make_instance_snapshot_backup
):
infra_mock = InfraHelper.create(
name='backup_test',
backup_hour=self.backup_hour-1,
plan__has_persistence=True,
environment=self.dev_env,
plan=self.plan,
)
DatabaseHelper.create(
databaseinfra=infra_mock,
environment=self.dev_env
)
instance_mock = mommy.make(
'Instance', databaseinfra=infra_mock
)
get_worker_name_mock.return_value = 'test'
group = BackupGroup()
save_backup_group.return_value = group
snapshot = mommy.make(
'Snapshot', instance=instance_mock, group=group,
status=Snapshot.SUCCESS, end_at=FAKE_NOW - timedelta(hours=1)
)
make_instance_snapshot_backup.return_value = snapshot
make_databases_backup()
make_instance_snapshot_backup.assert_called_once_with(
current_hour=self.backup_hour, instance=self.instance, error={},
group=group
)
@patch('backup.tasks.datetime', FakeDatetime)
def test_current_hour_with_pending_backup(
self, get_worker_name_mock, task_register, save_backup_group,
make_instance_snapshot_backup
):
infra_mock = InfraHelper.create(
name='pending_backup_test',
backup_hour=self.backup_hour-1,
plan__has_persistence=True,
environment=self.dev_env,
plan=self.plan
)
DatabaseHelper.create(
databaseinfra=infra_mock, environment=self.dev_env
)
instance_mock = mommy.make(
'Instance', databaseinfra=infra_mock
)
get_worker_name_mock.return_value = 'test'
group = BackupGroup()
save_backup_group.return_value = group
make_instance_snapshot_backup.return_value.status.return_value = (
Snapshot.SUCCESS
)
make_databases_backup()
calls = [
call(
current_hour=self.backup_hour, instance=self.instance,
error={}, group=group
),
call(
current_hour=self.backup_hour, instance=instance_mock,
error={}, group=group
)
]
make_instance_snapshot_backup.assert_has_calls(calls, any_order=True)
def test_snapshot_with_warning(
self, get_worker_name_mock, task_register, save_backup_group,
make_instance_snapshot_backup
):
get_worker_name_mock.return_value = 'test'
group = BackupGroup()
save_backup_group.return_value = group
snapshot = mommy.make(
'Snapshot', instance=self.instance, group=group,
status=Snapshot.WARNING
)
make_instance_snapshot_backup.return_value = snapshot
make_databases_backup()
make_instance_snapshot_backup.assertEqual(
snapshot.status, Snapshot.WARNING
)
def test_snapshot_with_error(
self, get_worker_name_mock, task_register, save_backup_group,
make_instance_snapshot_backup
):
get_worker_name_mock.return_value = 'test'
group = BackupGroup()
save_backup_group.return_value = group
snapshot = mommy.make(
'Snapshot', instance=self.instance, group=group,
status=Snapshot.ERROR
)
make_instance_snapshot_backup.return_value = snapshot
make_databases_backup()
make_instance_snapshot_backup.assertEqual(
snapshot.status, Snapshot.ERROR
) |
make one | from Acquisition import aq_parent
from plone.base.interfaces import ILoginSchema
from plone.registry.interfaces import IRegistry
from Products.CMFCore.tests.base.dummy import DummyContent
from Products.CMFCore.tests.base.dummy import DummyFolder
from Products.CMFCore.tests.base.dummy import DummySite
from zope.component import getSiteManager
import unittest
class DummyFolder(DummyFolder):
def absolute_url(self):
return "/".join([aq_parent(self).absolute_url(), self.getId()])
class DummyLoginSettings:
allow_external_login_sites = [
"http://external1",
"http://external2/",
"http://external3/site",
"http://external4/site/",
]
class DummyRegistry(DummyContent):
def __getitem__(self, name, default=None):
if name == "plone.allow_external_login_sites":
return DummyLoginSettings().allow_external_login_sites
return default
def forInterface(self, iface, prefix=""):
if iface == ILoginSchema:
return DummyLoginSettings()
class TestURLTool(unittest.TestCase):
def setUp(self):
self.site = DummySite(id="foo")
self.site._setObject("foo", DummyFolder(id="foo"))
self.site.foo._setObject("doc1", DummyContent(id="doc1"))
mock_registry = DummyRegistry(id="portal_registry")
self.site.portal_registry = mock_registry
sm = getSiteManager()
sm.registerUtility(component=mock_registry, provided=IRegistry)
def METHOD_NAME(self, *args, **kw):
from Products.CMFPlone.URLTool import URLTool
url_tool = URLTool(*args, **kw)
return url_tool.__of__(self.site)
def test_isURLInPortal(self):
# First test what the absolute url of the site is, otherwise these
# tests look really weird. Apparently our domain is www.foobar.com.
self.assertEqual(self.site.absolute_url(), "http://www.foobar.com/bar/foo")
url_tool = self.METHOD_NAME()
iURLiP = url_tool.isURLInPortal
self.assertTrue(iURLiP("http://www.foobar.com/bar/foo/folder"))
self.assertTrue(iURLiP("http://www.foobar.com/bar/foo"))
self.assertFalse(iURLiP("http://www.foobar.com/bar2/foo"))
self.assertTrue(iURLiP("https://www.foobar.com/bar/foo/folder"))
self.assertFalse(iURLiP("http://www.foobar.com:8080/bar/foo/folder"))
self.assertFalse(iURLiP("http://www.foobar.com/bar"))
self.assertTrue(iURLiP("//www.foobar.com/bar/foo"))
self.assertFalse(iURLiP("/images"))
self.assertTrue(iURLiP("/bar/foo/foo"))
def test_isURLInPortalRelative(self):
url_tool = self.METHOD_NAME()
iURLiP = url_tool.isURLInPortal
# non-root relative urls will need a current context to be passed in
self.assertTrue(iURLiP("images/img1.jpg"))
self.assertTrue(iURLiP("./images/img1.jpg"))
# /bar/foo/something
self.assertTrue(iURLiP("../something", self.site.foo.doc1))
# /bar/afolder
self.assertFalse(iURLiP("../../afolder", self.site.foo.doc1))
# /afolder
self.assertFalse(iURLiP("../../../afolder", self.site.foo.doc1))
# /../afolder? How do we have more ../'s than there are parts in
# the URL?
self.assertFalse(iURLiP("../../../../afolder", self.site.foo.doc1))
# /bar/foo/afolder
self.assertTrue(iURLiP("../../foo/afolder", self.site.foo.doc1))
def test_isURLInPortalExternal(self):
url_tool = self.METHOD_NAME()
iURLiP = url_tool.isURLInPortal
self.assertTrue(iURLiP("http://external1"))
self.assertTrue(iURLiP("http://external1/"))
self.assertTrue(iURLiP("http://external1/something"))
self.assertTrue(iURLiP("http://external2"))
self.assertTrue(iURLiP("http://external2/"))
self.assertTrue(iURLiP("http://external2/something"))
self.assertTrue(iURLiP("http://external3/site"))
self.assertTrue(iURLiP("http://external3/site/"))
self.assertTrue(iURLiP("http://external3/site/something"))
self.assertTrue(iURLiP("http://external4/site"))
self.assertTrue(iURLiP("http://external4/site/"))
self.assertTrue(iURLiP("http://external4/site/something"))
self.assertFalse(iURLiP("http://external3/other"))
self.assertFalse(iURLiP("http://external4/other"))
self.assertFalse(iURLiP("http://external5"))
self.assertFalse(iURLiP("http://external11"))
def test_script_tag_url_not_in_portal(self):
url_tool = self.METHOD_NAME()
iURLiP = url_tool.isURLInPortal
self.assertFalse(iURLiP('<script>alert("hi");</script>'))
self.assertFalse(iURLiP('<sCript>alert("hi");</script>'))
self.assertFalse(iURLiP("%3Cscript%3Ealert(%22hi%22)%3B%3C%2Fscript%3E"))
self.assertFalse(iURLiP("%3CsCript%3Ealert(%22hi%22)%3B%3C%2Fscript%3E"))
def test_inline_url_not_in_portal(self):
url_tool = self.METHOD_NAME()
iURLiP = url_tool.isURLInPortal
self.assertFalse(iURLiP("javascript%3Aalert(3)"))
self.assertFalse(iURLiP("jaVascript%3Aalert(3)"))
self.assertFalse(iURLiP("javascript:alert(3)"))
self.assertFalse(iURLiP("jaVascript:alert(3)"))
def test_double_back_slash(self):
url_tool = self.METHOD_NAME()
iURLiP = url_tool.isURLInPortal
self.assertFalse(iURLiP("\\\\www.example.com"))
def test_escape(self):
url_tool = self.METHOD_NAME()
iURLiP = url_tool.isURLInPortal
self.assertFalse(iURLiP(r"\/\/www.example.com"))
self.assertFalse(iURLiP(r"\%2F\%2Fwww.example.com"))
self.assertFalse(iURLiP(r"\%2f\%2fwww.example.com"))
self.assertFalse(iURLiP("%2F%2Fwww.example.com"))
self.assertFalse(iURLiP("%2f%2fwww.example.com"))
def test_regression_absolute_url_in_portal(self):
url_tool = self.METHOD_NAME()
iURLiP = url_tool.isURLInPortal
self.assertTrue(iURLiP(url_tool()))
self.assertTrue(iURLiP(url_tool() + "/shrubbery?knights=ni#ekki-ekki"))
def test_mailto_simple_not_in_portal(self):
url_tool = self.METHOD_NAME()
iURLiP = url_tool.isURLInPortal
self.assertFalse(iURLiP("mailto:[email protected]"))
def test_mailto_complex_not_in_portal(self):
url_tool = self.METHOD_NAME()
iURLiP = url_tool.isURLInPortal
self.assertFalse(
iURLiP(
"mailto:192.168.163.154:8080/Plone'"
""><html><svg onload=alert(document"
".domain)></html>"
)
)
def test_data_not_in_portal(self):
url_tool = self.METHOD_NAME()
iURLiP = url_tool.isURLInPortal
self.assertFalse(
iURLiP("data:text/html%3bbase64,PHNjcmlwdD5hbGVydCgnWFNTJyk8L3NjcmlwdD4K")
)
def test_double_slash(self):
# I wondered if this might be a problem after reading
# https://bugs.python.org/issue23505
# Apparently not, but let's test it.
url_tool = self.METHOD_NAME()
iURLiP = url_tool.isURLInPortal
self.assertFalse(iURLiP("//www.google.com"))
self.assertFalse(iURLiP("////www.google.com")) |
supports range index | """
AutoARIMA
---------
"""
from typing import Optional
from pmdarima import AutoARIMA as PmdAutoARIMA
from darts.logging import get_logger, raise_if
from darts.models.forecasting.forecasting_model import (
FutureCovariatesLocalForecastingModel,
)
from darts.timeseries import TimeSeries
logger = get_logger(__name__)
class AutoARIMA(FutureCovariatesLocalForecastingModel):
def __init__(
self, *autoarima_args, add_encoders: Optional[dict] = None, **autoarima_kwargs
):
"""Auto-ARIMA
This implementation is a thin wrapper around `pmdarima AutoARIMA model
<https://alkaline-ml.com/pmdarima/modules/generated/pmdarima.arima.AutoARIMA.html>`_,
which provides functionality similar to R's `auto.arima
<https://www.rdocumentation.org/packages/forecast/versions/7.3/topics/auto.arima>`_.
This model supports the same parameters as the pmdarima AutoARIMA model.
See `pmdarima documentation
<https://alkaline-ml.com/pmdarima/modules/generated/pmdarima.arima.AutoARIMA.html>`_
for an extensive documentation and a list of supported parameters.
.. note::
For a faster and probabilistic version of AutoARIMA, checkout
the :class:`StatsForecastAutoARIMA` model.
Parameters
----------
autoarima_args
Positional arguments for the pmdarima.AutoARIMA model
autoarima_kwargs
Keyword arguments for the pmdarima.AutoARIMA model
add_encoders
A large number of future covariates can be automatically generated with `add_encoders`.
This can be done by adding multiple pre-defined index encoders and/or custom user-made functions that
will be used as index encoders. Additionally, a transformer such as Darts' :class:`Scaler` can be added to
transform the generated covariates. This happens all under one hood and only needs to be specified at
model creation.
Read :meth:`SequentialEncoder <darts.dataprocessing.encoders.SequentialEncoder>` to find out more about
``add_encoders``. Default: ``None``. An example showing some of ``add_encoders`` features:
.. highlight:: python
.. code-block:: python
add_encoders={
'cyclic': {'future': ['month']},
'datetime_attribute': {'future': ['hour', 'dayofweek']},
'position': {'future': ['relative']},
'custom': {'future': [lambda idx: (idx.year - 1950) / 50]},
'transformer': Scaler()
}
..
"""
super().__init__(add_encoders=add_encoders)
self.model = PmdAutoARIMA(*autoarima_args, **autoarima_kwargs)
self.trend = self.model.trend
@property
def supports_multivariate(self) -> bool:
return False
def _fit(self, series: TimeSeries, future_covariates: Optional[TimeSeries] = None):
super()._fit(series, future_covariates)
self._assert_univariate(series)
series = self.training_series
self.model.fit(
series.values(), X=future_covariates.values() if future_covariates else None
)
return self
def _predict(
self,
n: int,
future_covariates: Optional[TimeSeries] = None,
num_samples: int = 1,
verbose: bool = False,
):
super()._predict(n, future_covariates, num_samples)
forecast = self.model.predict(
n_periods=n, X=future_covariates.values() if future_covariates else None
)
return self._build_forecast_series(forecast)
@property
def min_train_series_length(self) -> int:
return 10
@property
def METHOD_NAME(self) -> bool:
raise_if(
self.trend and self.trend != "c",
"'trend' is not None. Range indexing is not supported in that case.",
logger,
)
return True |
uuid | import os
import shutil
import yaml
from dataclasses import dataclass, field
from jinja2 import Template
from mage_ai.data_preparation.models.block import Block
from mage_ai.data_preparation.models.constants import (
BLOCK_LANGUAGE_TO_FILE_EXTENSION,
BlockColor,
BlockLanguage,
BlockType,
)
from mage_ai.data_preparation.models.file import File
from mage_ai.data_preparation.models.pipeline import Pipeline
from mage_ai.data_preparation.models.custom_templates.constants import (
DIRECTORY_FOR_BLOCK_TEMPLATES,
METADATA_FILENAME_WITH_EXTENSION,
)
from mage_ai.data_preparation.models.custom_templates.utils import custom_templates_directory
from mage_ai.settings.repo import get_repo_path
from mage_ai.shared.config import BaseConfig
from mage_ai.shared.hash import merge_dict
from mage_ai.shared.io import safe_write
from typing import Dict, List
@dataclass
class CustomBlockTemplate(BaseConfig):
block_type: BlockType = None
color: BlockColor = None
configuration: Dict = None
content: str = ''
description: str = None
language: BlockLanguage = None
name: str = None
pipeline: Dict = field(default_factory=dict)
tags: List = field(default_factory=list)
template_uuid: str = None
user: Dict = field(default_factory=dict)
@classmethod
def load(self, template_uuid: str = None, METHOD_NAME: str = None):
uuid_use = METHOD_NAME
template_uuid_use = template_uuid
if uuid_use:
parts = uuid_use.split(os.sep)
template_uuid_use = os.path.join(*parts[2:])
elif template_uuid_use:
uuid_use = os.path.join(
custom_templates_directory(),
DIRECTORY_FOR_BLOCK_TEMPLATES,
template_uuid_use,
)
try:
config_path_metadata = os.path.join(
get_repo_path(),
uuid_use,
METADATA_FILENAME_WITH_EXTENSION,
)
custom_template = super().load(config_path_metadata)
custom_template.template_uuid = template_uuid_use
return custom_template
except Exception as err:
print(f'[WARNING] CustomBlockTemplate.load: {err}')
@property
def METHOD_NAME(self):
return os.path.join(
custom_templates_directory(),
DIRECTORY_FOR_BLOCK_TEMPLATES,
self.template_uuid,
)
@property
def metadata_file_path(self) -> str:
return os.path.join(
get_repo_path(),
self.METHOD_NAME,
METADATA_FILENAME_WITH_EXTENSION,
)
def create_block(
self,
block_name: str,
pipeline: Pipeline,
extension_uuid: str = None,
priority: int = None,
upstream_block_uuids: List[str] = None,
**kwargs,
) -> Block:
configuration = None
if self.configuration and type(self.configuration) is dict:
configuration = self.configuration
return Block.create(
block_name,
self.block_type,
get_repo_path(),
color=self.color,
configuration=configuration,
extension_uuid=extension_uuid,
language=self.language,
pipeline=pipeline,
priority=priority,
upstream_block_uuids=upstream_block_uuids,
)
def load_template_content(self, language: BlockLanguage = None) -> str:
language_to_use = language or self.language
filename = '.'.join([
self.template_uuid,
BLOCK_LANGUAGE_TO_FILE_EXTENSION.get(language_to_use, ''),
])
return File(
dir_path=self.METHOD_NAME,
filename=filename,
repo_path=get_repo_path(),
).content()
def render_template(
self,
language: BlockLanguage = None,
variables: Dict = None,
) -> str:
content = self.load_template_content(language)
if content:
return Template(content).render(**(variables or {}))
def to_dict(self, include_content: bool = False) -> Dict:
data = merge_dict(self.to_dict_base(), dict(
template_uuid=self.template_uuid,
METHOD_NAME=self.METHOD_NAME,
))
if include_content:
data['content'] = self.load_template_content()
return data
def to_dict_base(self) -> Dict:
return dict(
block_type=self.block_type,
color=self.color,
configuration=self.configuration,
description=self.description,
language=self.language,
name=self.name,
pipeline=self.pipeline,
tags=self.tags,
user=self.user,
)
def save(self) -> None:
content = yaml.safe_dump(self.to_dict_base())
file_path = self.metadata_file_path
os.makedirs(os.path.dirname(file_path), exist_ok=True)
safe_write(file_path, content)
if self.content:
filename = '.'.join([
self.template_uuid,
BLOCK_LANGUAGE_TO_FILE_EXTENSION[self.language],
])
File.create(
filename,
self.METHOD_NAME,
self.content,
get_repo_path(),
)
def delete(self) -> None:
shutil.rmtree(os.path.join(get_repo_path(), self.METHOD_NAME)) |
handle login | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
from typing import List
from nvflare.fuel.hci.conn import Connection
from nvflare.fuel.hci.proto import CredentialType, InternalCommands
from nvflare.fuel.hci.reg import CommandModule, CommandModuleSpec, CommandSpec
from nvflare.fuel.hci.security import IdentityKey, verify_password
from nvflare.fuel.hci.server.constants import ConnProps
from .reg import CommandFilter
from .sess import Session, SessionManager
class Authenticator(ABC):
"""Base class for authenticating credentials."""
@abstractmethod
def authenticate(self, user_name: str, credential: str, credential_type: CredentialType) -> bool:
"""Authenticate a specified user with the provided credential.
Args:
user_name: user login name
credential: provided credential
credential_type: type of credential
Returns: True if successful, False otherwise
"""
pass
class SimpleAuthenticator(Authenticator):
def __init__(self, users):
"""Authenticator to use in the LoginModule for authenticating admin clients for login.
Args:
users: user information
"""
self.users = users
def authenticate_password(self, user_name: str, pwd: str):
pwd_hash = self.users.get(user_name)
if pwd_hash is None:
return False
return verify_password(pwd_hash, pwd)
def authenticate_cn(self, user_name: str, cn):
return user_name == cn
def authenticate(self, user_name: str, credential, credential_type):
if credential_type == CredentialType.PASSWORD:
return self.authenticate_password(user_name, credential)
elif credential_type == CredentialType.CERT:
return self.authenticate_cn(user_name, credential)
else:
return False
class LoginModule(CommandModule, CommandFilter):
def __init__(self, authenticator: Authenticator, sess_mgr: SessionManager):
"""Login module.
CommandModule containing the login commands to handle login and logout of admin clients, as well as the
CommandFilter pre_command to check that a client is logged in with a valid session.
Args:
authenticator: Authenticator
sess_mgr: SessionManager
"""
if authenticator:
if not isinstance(authenticator, Authenticator):
raise TypeError("authenticator must be Authenticator but got {}.".format(type(authenticator)))
if not isinstance(sess_mgr, SessionManager):
raise TypeError("sess_mgr must be SessionManager but got {}.".format(type(sess_mgr)))
self.authenticator = authenticator
self.session_mgr = sess_mgr
def get_spec(self):
return CommandModuleSpec(
name="login",
cmd_specs=[
CommandSpec(
name=InternalCommands.PWD_LOGIN,
description="login to server",
usage="login userName password",
handler_func=self.METHOD_NAME,
visible=False,
),
CommandSpec(
name=InternalCommands.CERT_LOGIN,
description="login to server with SSL cert",
usage="login userName",
handler_func=self.handle_cert_login,
visible=False,
),
CommandSpec(
name="_logout",
description="logout from server",
usage="logout",
handler_func=self.handle_logout,
visible=False,
),
],
)
def METHOD_NAME(self, conn: Connection, args: List[str]):
if not self.authenticator:
conn.append_string("OK")
return
if len(args) != 3:
conn.append_string("REJECT")
return
user_name = args[1]
pwd = args[2]
ok = self.authenticator.authenticate(user_name, pwd, CredentialType.PASSWORD)
if not ok:
conn.append_string("REJECT")
return
session = self.session_mgr.create_session(user_name=user_name, user_org="global", user_role="project_admin")
conn.append_string("OK")
conn.append_token(session.token)
def handle_cert_login(self, conn: Connection, args: List[str]):
if not self.authenticator:
conn.append_string("OK")
return
if len(args) != 2:
conn.append_string("REJECT")
return
identity = conn.get_prop(ConnProps.CLIENT_IDENTITY, None)
if identity is None:
conn.append_string("REJECT")
return
user_name = args[1]
ok = self.authenticator.authenticate(user_name, identity[IdentityKey.NAME], CredentialType.CERT)
if not ok:
conn.append_string("REJECT")
return
session = self.session_mgr.create_session(
user_name=identity[IdentityKey.NAME],
user_org=identity.get(IdentityKey.ORG, ""),
user_role=identity.get(IdentityKey.ROLE, ""),
)
conn.append_string("OK")
conn.append_token(session.token)
def handle_logout(self, conn: Connection, args: List[str]):
if self.authenticator and self.session_mgr:
token = conn.get_prop(ConnProps.TOKEN)
if token:
self.session_mgr.end_session(token)
conn.append_string("OK")
def pre_command(self, conn: Connection, args: List[str]):
if args[0] in [InternalCommands.PWD_LOGIN, InternalCommands.CERT_LOGIN, InternalCommands.CHECK_SESSION]:
# skip login and check session commands
return True
# validate token
req_json = conn.request
token = None
data = req_json["data"]
for item in data:
it = item["type"]
if it == "token":
token = item["data"]
break
if token is None:
conn.append_error("not authenticated - no token")
return False
sess = self.session_mgr.get_session(token)
if sess:
assert isinstance(sess, Session)
sess.mark_active()
conn.set_prop(ConnProps.SESSION, sess)
conn.set_prop(ConnProps.USER_NAME, sess.user_name)
conn.set_prop(ConnProps.USER_ORG, sess.user_org)
conn.set_prop(ConnProps.USER_ROLE, sess.user_role)
conn.set_prop(ConnProps.TOKEN, token)
return True
else:
conn.append_error("session_inactive")
conn.append_string(
"user not authenticated or session timed out after {} seconds of inactivity - logged out".format(
self.session_mgr.idle_timeout
)
)
return False
def close(self):
self.session_mgr.shutdown() |
test stack requires circular ref | """Tests for runway.cfngin.stack."""
# pyright: basic
import unittest
from typing import Any
from mock import MagicMock
from runway.cfngin.lookups.registry import (
register_lookup_handler,
unregister_lookup_handler,
)
from runway.cfngin.stack import Stack
from runway.config import CfnginConfig
from runway.context import CfnginContext
from runway.lookups.handlers.base import LookupHandler
from .factories import generate_definition
class TestStack(unittest.TestCase):
"""Tests for runway.cfngin.stack.Stack."""
def setUp(self) -> None:
"""Run before tests."""
self.sd = {"name": "test"} # pylint: disable=invalid-name
self.config = CfnginConfig.parse_obj({"namespace": "namespace"})
self.context = CfnginContext(config=self.config)
self.stack = Stack(
definition=generate_definition("vpc", 1), context=self.context
)
class FakeLookup(LookupHandler):
"""False Lookup."""
# pylint: disable=arguments-differ,unused-argument
@classmethod
def handle(cls, value: str, *__args: Any, **__kwargs: Any) -> str: # type: ignore
"""Perform the lookup."""
return "test"
register_lookup_handler("noop", FakeLookup)
def tearDown(self) -> None:
"""Run after tests."""
unregister_lookup_handler("noop")
return super().tearDown()
def test_stack_requires(self) -> None:
"""Test stack requires."""
definition = generate_definition(
base_name="vpc",
stack_id=1,
variables={
"Var1": "${noop fakeStack3::FakeOutput}",
"Var2": (
"some.template.value:${output fakeStack2.FakeOutput}:"
"${output fakeStack.FakeOutput}"
),
"Var3": "${output fakeStack.FakeOutput},"
"${output fakeStack2.FakeOutput}",
},
requires=["fakeStack"],
)
stack = Stack(definition=definition, context=self.context)
self.assertEqual(len(stack.requires), 2)
self.assertIn("fakeStack", stack.requires)
self.assertIn("fakeStack2", stack.requires)
def METHOD_NAME(self) -> None:
"""Test stack requires circular ref."""
definition = generate_definition(
base_name="vpc",
stack_id=1,
variables={"Var1": "${output vpc-1.FakeOutput}"},
)
stack = Stack(definition=definition, context=self.context)
with self.assertRaises(ValueError):
stack.requires # pylint: disable=pointless-statement
def test_stack_cfn_parameters(self) -> None:
"""Test stack cfn parameters."""
definition = generate_definition(
base_name="vpc",
stack_id=1,
variables={"Param1": "${output fakeStack.FakeOutput}"},
)
stack = Stack(definition=definition, context=self.context)
# pylint: disable=protected-access
stack._blueprint = MagicMock()
stack._blueprint.parameter_values = {
"Param2": "Some Resolved Value",
}
param = stack.parameter_values["Param2"]
self.assertEqual(param, "Some Resolved Value")
def test_stack_tags_default(self) -> None:
"""Test stack tags default."""
self.config.tags = {"environment": "prod"}
definition = generate_definition(base_name="vpc", stack_id=1)
stack = Stack(definition=definition, context=self.context)
self.assertEqual(stack.tags, {"environment": "prod"})
def test_stack_tags_override(self) -> None:
"""Test stack tags override."""
self.config.tags = {"environment": "prod"}
definition = generate_definition(
base_name="vpc", stack_id=1, tags={"environment": "stage"}
)
stack = Stack(definition=definition, context=self.context)
self.assertEqual(stack.tags, {"environment": "stage"})
def test_stack_tags_extra(self) -> None:
"""Test stack tags extra."""
self.config.tags = {"environment": "prod"}
definition = generate_definition(
base_name="vpc", stack_id=1, tags={"app": "graph"}
)
stack = Stack(definition=definition, context=self.context)
self.assertEqual(stack.tags, {"environment": "prod", "app": "graph"})
if __name__ == "__main__":
unittest.main() |
read magnet | """
The MIT License (MIT)
Copyright (c) 2013, 2014 Damien P. George
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
LSM9DS1 - 9DOF inertial sensor of STMicro driver for MicroPython.
The sensor contains an accelerometer / gyroscope / magnetometer
Uses the internal FIFO to store up to 16 gyro/accel data, use the iter_accel_gyro generator to access it.
Source repo: https://github.com/hoihu/projects/tree/master/raspi-hat
Example usage:
import time
from lsm9ds1 import LSM9DS1
from machine import Pin, I2C
lsm = LSM9DS1(I2C(1, scl=Pin(15), sda=Pin(14)))
while (True):
#for g,a in lsm.iter_accel_gyro(): print(g,a) # using fifo
print('Accelerometer: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_accel()))
print('Magnetometer: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_magnet()))
print('Gyroscope: x:{:>8.3f} y:{:>8.3f} z:{:>8.3f}'.format(*lsm.read_gyro()))
print("")
time.sleep_ms(100)
"""
import array
class LSM9DS1:
WHO_AM_I = const(0xF)
CTRL_REG1_G = const(0x10)
INT_GEN_SRC_G = const(0x14)
OUT_TEMP = const(0x15)
OUT_G = const(0x18)
CTRL_REG4_G = const(0x1E)
STATUS_REG = const(0x27)
OUT_XL = const(0x28)
FIFO_CTRL_REG = const(0x2E)
FIFO_SRC = const(0x2F)
OFFSET_REG_X_M = const(0x05)
CTRL_REG1_M = const(0x20)
OUT_M = const(0x28)
SCALE_GYRO = [(245, 0), (500, 1), (2000, 3)]
SCALE_ACCEL = [(2, 0), (4, 2), (8, 3), (16, 1)]
def __init__(self, i2c, address_gyro=0x6B, address_magnet=0x1E):
self.i2c = i2c
self.address_gyro = address_gyro
self.address_magnet = address_magnet
# check id's of accelerometer/gyro and magnetometer
if (self.read_id_magnet() != b"=") or (self.read_id_gyro() != b"h"):
raise OSError(
"Invalid LSM9DS1 device, using address {}/{}".format(address_gyro, address_magnet)
)
# allocate scratch buffer for efficient conversions and memread op's
self.scratch = array.array("B", [0, 0, 0, 0, 0, 0])
self.scratch_int = array.array("h", [0, 0, 0])
self.init_gyro_accel()
self.init_magnetometer()
def init_gyro_accel(self, sample_rate=6, scale_gyro=0, scale_accel=0):
"""Initalizes Gyro and Accelerator.
sample rate: 0-6 (off, 14.9Hz, 59.5Hz, 119Hz, 238Hz, 476Hz, 952Hz)
scale_gyro: 0-2 (245dps, 500dps, 2000dps )
scale_accel: 0-3 (+/-2g, +/-4g, +/-8g, +-16g)
"""
assert sample_rate <= 6, "invalid sampling rate: %d" % sample_rate
assert scale_gyro <= 2, "invalid gyro scaling: %d" % scale_gyro
assert scale_accel <= 3, "invalid accelerometer scaling: %d" % scale_accel
i2c = self.i2c
addr = self.address_gyro
mv = memoryview(self.scratch)
# angular control registers 1-3 / Orientation
mv[0] = ((sample_rate & 0x07) << 5) | ((self.SCALE_GYRO[scale_gyro][1] & 0x3) << 3)
mv[1:4] = b"\x00\x00\x00"
i2c.writeto_mem(addr, CTRL_REG1_G, mv[:5])
# ctrl4 - enable x,y,z, outputs, no irq latching, no 4D
# ctrl5 - enable all axes, no decimation
# ctrl6 - set scaling and sample rate of accel
# ctrl7,8 - leave at default values
# ctrl9 - FIFO enabled
mv[0] = mv[1] = 0x38
mv[2] = ((sample_rate & 7) << 5) | ((self.SCALE_ACCEL[scale_accel][1] & 0x3) << 3)
mv[3] = 0x00
mv[4] = 0x4
mv[5] = 0x2
i2c.writeto_mem(addr, CTRL_REG4_G, mv[:6])
# fifo: use continous mode (overwrite old data if overflow)
i2c.writeto_mem(addr, FIFO_CTRL_REG, b"\x00")
i2c.writeto_mem(addr, FIFO_CTRL_REG, b"\xc0")
self.scale_gyro = 32768 / self.SCALE_GYRO[scale_gyro][0]
self.scale_accel = 32768 / self.SCALE_ACCEL[scale_accel][0]
def init_magnetometer(self, sample_rate=7, scale_magnet=0):
"""
sample rates = 0-7 (0.625, 1.25, 2.5, 5, 10, 20, 40, 80Hz)
scaling = 0-3 (+/-4, +/-8, +/-12, +/-16 Gauss)
"""
assert sample_rate < 8, "invalid sample rate: %d (0-7)" % sample_rate
assert scale_magnet < 4, "invalid scaling: %d (0-3)" % scale_magnet
i2c = self.i2c
addr = self.address_magnet
mv = memoryview(self.scratch)
mv[0] = 0x40 | (sample_rate << 2) # ctrl1: high performance mode
mv[1] = scale_magnet << 5 # ctrl2: scale, normal mode, no reset
mv[2] = 0x00 # ctrl3: continous conversion, no low power, I2C
mv[3] = 0x08 # ctrl4: high performance z-axis
mv[4] = 0x00 # ctr5: no fast read, no block update
i2c.writeto_mem(addr, CTRL_REG1_M, mv[:5])
self.scale_factor_magnet = 32768 / ((scale_magnet + 1) * 4)
def calibrate_magnet(self, offset):
"""
offset is a magnet vecor that will be substracted by the magnetometer
for each measurement. It is written to the magnetometer's offset register
"""
offset = [int(i * self.scale_factor_magnet) for i in offset]
mv = memoryview(self.scratch)
mv[0] = offset[0] & 0xFF
mv[1] = offset[0] >> 8
mv[2] = offset[1] & 0xFF
mv[3] = offset[1] >> 8
mv[4] = offset[2] & 0xFF
mv[5] = offset[2] >> 8
self.i2c.writeto_mem(self.address_magnet, OFFSET_REG_X_M, mv[:6])
def read_id_gyro(self):
return self.i2c.readfrom_mem(self.address_gyro, WHO_AM_I, 1)
def read_id_magnet(self):
return self.i2c.readfrom_mem(self.address_magnet, WHO_AM_I, 1)
def METHOD_NAME(self):
"""Returns magnetometer vector in gauss.
raw_values: if True, the non-scaled adc values are returned
"""
mv = memoryview(self.scratch_int)
f = self.scale_factor_magnet
self.i2c.readfrom_mem_into(self.address_magnet, OUT_M | 0x80, mv)
return (mv[0] / f, mv[1] / f, mv[2] / f)
def read_gyro(self):
"""Returns gyroscope vector in degrees/sec."""
mv = memoryview(self.scratch_int)
f = self.scale_gyro
self.i2c.readfrom_mem_into(self.address_gyro, OUT_G | 0x80, mv)
return (mv[0] / f, mv[1] / f, mv[2] / f)
def read_accel(self):
"""Returns acceleration vector in gravity units (9.81m/s^2)."""
mv = memoryview(self.scratch_int)
f = self.scale_accel
self.i2c.readfrom_mem_into(self.address_gyro, OUT_XL | 0x80, mv)
return (mv[0] / f, mv[1] / f, mv[2] / f)
def iter_accel_gyro(self):
"""A generator that returns tuples of (gyro,accelerometer) data from the fifo."""
while True:
fifo_state = int.from_bytes(
self.i2c.readfrom_mem(self.address_gyro, FIFO_SRC, 1), "big"
)
if fifo_state & 0x3F:
# print("Available samples=%d" % (fifo_state & 0x1f))
yield self.read_gyro(), self.read_accel()
else:
break |
upgrade func | import pytz
from monitorrent.db import UTCDateTime
from monitorrent.plugins import upgrade, get_current_version
from monitorrent.plugins.status import Status
from sqlalchemy import Column, Integer, String, Boolean, MetaData, Table
from sqlalchemy_enum34 import EnumType
from sqlalchemy.orm import Session, sessionmaker, scoped_session
from datetime import datetime
from tests import UpgradeTestCase
class TopicUpgradeTest(UpgradeTestCase):
m0 = MetaData()
Topic0 = Table("topics", m0,
Column('id', Integer, primary_key=True),
Column('display_name', String, unique=True, nullable=False),
Column('url', String, nullable=False, unique=True),
Column('last_update', UTCDateTime, nullable=True),
Column('type', String))
m1 = MetaData()
Topic1 = Table("topics", m1,
Column('id', Integer, primary_key=True),
Column('display_name', String, unique=True, nullable=False),
Column('url', String, nullable=False, unique=True),
Column('last_update', UTCDateTime, nullable=True),
Column('type', String),
Column('status', EnumType(Status, by_name=True), nullable=False, server_default=Status.Ok.__str__()))
m2 = MetaData()
Topic2 = Table("topics", m2,
Column('id', Integer, primary_key=True),
Column('display_name', String, unique=True, nullable=False),
Column('url', String, nullable=False, unique=True),
Column('last_update', UTCDateTime, nullable=True),
Column('type', String),
Column('status', EnumType(Status, by_name=True), nullable=False, server_default=Status.Ok.__str__()),
Column('paused', Boolean, nullable=False, server_default='0'))
m3 = MetaData()
Topic3 = Table("topics", m3,
Column('id', Integer, primary_key=True),
Column('display_name', String, unique=True, nullable=False),
Column('url', String, nullable=False, unique=True),
Column('last_update', UTCDateTime, nullable=True),
Column('type', String),
Column('status', EnumType(Status, by_name=True), nullable=False, server_default=Status.Ok.__str__()),
Column('paused', Boolean, nullable=False, server_default='0'),
Column('download_dir', String, nullable=True, server_default=None))
versions = [
(Topic0, ),
(Topic1, ),
(Topic2, ),
(Topic3, )
]
def METHOD_NAME(self, engine, operation_factory):
upgrade(engine, operation_factory)
def _get_current_version(self):
return get_current_version(self.engine)
def test_empty_db_test(self):
self._test_empty_db_test()
def test_updage_empty_from_version_0(self):
self._upgrade_from(None, 0)
def test_updage_empty_from_version_1(self):
self._upgrade_from(None, 1)
def test_updage_empty_from_version_2(self):
self._upgrade_from(None, 2)
def test_updage_empty_from_version_3(self):
self._upgrade_from(None, 3)
def test_updage_filled_from_version_0(self):
topic1 = {'url': 'http://1', 'display_name': '1'}
topic2 = {'url': 'http://2', 'display_name': '2'}
topic3 = {'url': 'http://3', 'display_name': '3'}
topic4 = {'url': 'http://4', 'display_name': '4'}
topic5 = {'url': 'http://5', 'display_name': '5', 'last_update': datetime.now(pytz.utc)}
self._upgrade_from([[topic1, topic2, topic3, topic4, topic5]], 0)
session_factory = sessionmaker(class_=Session, bind=self.engine)
session = scoped_session(session_factory)
db = session()
try:
topics = db.query(self.versions[-1][0]).all()
for topic in topics:
self.assertEqual(topic.status, Status.Ok)
self.assertEqual(topic.paused, False)
self.assertIsNone(topic.download_dir)
finally:
db.close()
|
test invalid block value | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from astropy.nddata import block_reduce, block_replicate, reshape_as_blocks
class TestReshapeAsBlocks:
def test_1d(self):
data = np.arange(16)
reshaped = reshape_as_blocks(data, 2)
assert reshaped.shape == (8, 2)
reshaped = reshape_as_blocks(data, 4)
assert reshaped.shape == (4, 4)
reshaped = reshape_as_blocks(data, 8)
assert reshaped.shape == (2, 8)
def test_2d(self):
data = np.arange(16).reshape(4, 4)
reshaped = reshape_as_blocks(data, (2, 2))
assert reshaped.shape == (2, 2, 2, 2)
data = np.arange(64).reshape(8, 8)
reshaped = reshape_as_blocks(data, (2, 2))
assert reshaped.shape == (4, 4, 2, 2)
reshaped = reshape_as_blocks(data, (4, 4))
assert reshaped.shape == (2, 2, 4, 4)
def test_3d(self):
data = np.arange(64).reshape(4, 4, 4)
reshaped = reshape_as_blocks(data, (2, 2, 2))
assert reshaped.shape == (2, 2, 2, 2, 2, 2)
data = np.arange(2 * 3 * 4).reshape(2, 3, 4)
reshaped = reshape_as_blocks(data, (2, 1, 2))
assert reshaped.shape == (1, 3, 2, 2, 1, 2)
def test_view(self):
data = np.arange(16).reshape(4, 4)
reshaped = reshape_as_blocks(data, (2, 2))
data[0, 0] = 100
assert reshaped[0, 0, 0, 0] == 100
def test_invalid_block_dim(self):
data = np.arange(64).reshape(4, 4, 4)
match = (
"block_size must be a scalar or have the same "
"length as the number of data dimensions"
)
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (2, 2))
def test_invalid_block_size(self):
data = np.arange(16).reshape(4, 4)
match = (
"Each dimension of block_size must divide evenly "
"into the corresponding dimension of data"
)
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (2, 3))
def METHOD_NAME(self):
data = np.arange(16).reshape(4, 4)
match = "block_size elements must be integers"
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (2.1, 2))
match = "block_size elements must be strictly positive"
with pytest.raises(ValueError, match=match):
reshape_as_blocks(data, (-1, 0))
class TestBlockReduce:
def test_1d(self):
"""Test 1D array."""
data = np.arange(4)
expected = np.array([1, 5])
result = block_reduce(data, 2)
assert np.all(result == expected)
def test_1d_mean(self):
"""Test 1D array with func=np.mean."""
data = np.arange(4)
block_size = 2.0
expected = block_reduce(data, block_size, func=np.sum) / block_size
result_mean = block_reduce(data, block_size, func=np.mean)
assert np.all(result_mean == expected)
def test_2d(self):
"""Test 2D array."""
data = np.arange(4).reshape(2, 2)
expected = np.array([[6]])
result = block_reduce(data, 2)
assert np.all(result == expected)
def test_2d_mean(self):
"""Test 2D array with func=np.mean."""
data = np.arange(4).reshape(2, 2)
block_size = 2.0
expected = block_reduce(data, block_size, func=np.sum) / block_size**2
result = block_reduce(data, block_size, func=np.mean)
assert np.all(result == expected)
def test_2d_trim(self):
"""
Test trimming of 2D array when size is not perfectly divisible
by block_size.
"""
data1 = np.arange(15).reshape(5, 3)
result1 = block_reduce(data1, 2)
data2 = data1[0:4, 0:2]
result2 = block_reduce(data2, 2)
assert np.all(result1 == result2)
def test_block_size_broadcasting(self):
"""Test scalar block_size broadcasting."""
data = np.arange(16).reshape(4, 4)
result1 = block_reduce(data, 2)
result2 = block_reduce(data, (2, 2))
assert np.all(result1 == result2)
def test_block_size_len(self):
"""Test block_size length."""
data = np.ones((2, 2))
with pytest.raises(ValueError):
block_reduce(data, (2, 2, 2))
class TestBlockReplicate:
def test_1d(self):
"""Test 1D array."""
data = np.arange(2)
expected = np.array([0, 0, 0.5, 0.5])
result = block_replicate(data, 2)
assert np.all(result == expected)
def test_1d_conserve_sum(self):
"""Test 1D array with conserve_sum=False."""
data = np.arange(2)
block_size = 2.0
expected = block_replicate(data, block_size) * block_size
result = block_replicate(data, block_size, conserve_sum=False)
assert np.all(result == expected)
def test_2d(self):
"""Test 2D array."""
data = np.arange(2).reshape(2, 1)
expected = np.array([[0, 0], [0, 0], [0.25, 0.25], [0.25, 0.25]])
result = block_replicate(data, 2)
assert np.all(result == expected)
def test_2d_conserve_sum(self):
"""Test 2D array with conserve_sum=False."""
data = np.arange(6).reshape(2, 3)
block_size = 2.0
expected = block_replicate(data, block_size) * block_size**2
result = block_replicate(data, block_size, conserve_sum=False)
assert np.all(result == expected)
def test_block_size_broadcasting(self):
"""Test scalar block_size broadcasting."""
data = np.arange(4).reshape(2, 2)
result1 = block_replicate(data, 2)
result2 = block_replicate(data, (2, 2))
assert np.all(result1 == result2)
def test_block_size_len(self):
"""Test block_size length."""
data = np.arange(5)
with pytest.raises(ValueError):
block_replicate(data, (2, 2)) |
main | #!/usr/bin/env python
"""
This script copies a restart file of a MALI simulation
and re-calculates missing state variables for a
missing time level and writes them to an updated restart file.
"""
import argparse
import os
import shutil
import xarray as xr
import numpy as np
def METHOD_NAME():
parser = argparse.ArgumentParser(
description='process MALI outputs for the ISMIP6'
'submission')
parser.add_argument("-f", "--file", dest="file_in",
required=True,
help="restart file to be read in")
parser.add_argument("-o", "--output_file", dest="file_out",
required=True,
help="output file name")
parser.add_argument("-p", "--output_file_path",
dest="output_path")
args = parser.parse_args()
# read in a restart file that needs to be re-written
if args.file_in is None:
print("--- restart file is not provided. Aborting... ---")
else:
print("\n--- Reading in the restart file ---")
file_in = xr.open_dataset(args.file_in, decode_times=False, decode_cf=False)
# get needed info from restart file
cellMask = file_in['cellMask'][:, :]
thickness = file_in['thickness'][:,:]
bedTopography = file_in['bedTopography'][:,:]
sfcAirTemp = file_in['surfaceAirTemperature'][:,:]
uReconstructX = file_in['uReconstructX'][:,:,:]
uReconstructY = file_in['uReconstructY'][:,:,:]
layerThicknessFractions = file_in['layerThicknessFractions']
nTime = file_in.dims['Time']
nCells = file_in.dims['nCells']
nVertLevels = file_in.dims['nVertLevels']
# xtime needs some massaging for xarray not to mangle it
xtime = file_in['xtime']
xtimeStr = xtime.data.tobytes().decode() # convert to str
xtime2 = xr.DataArray(np.array([xtimeStr], dtype = np.dtype(('S', 64))), dims = ['Time']) # convert back to char array
# followed example here: https://github.com/pydata/xarray/issues/3407
floating_iceMask = (cellMask[:, :] & 4) // 4
seaLevel = 0.0
rhoi = 910.0
rhoo = 1028.0
print(f'nTime={nTime}, nCells={nCells}')
layerInterfaceFractions = np.zeros(nVertLevels+1, dtype=float)
lowerSfc = np.zeros([nTime, nCells], dtype=float)
upperSfc = np.zeros([nTime, nCells], dtype=float)
sfcTemp = np.zeros([nTime, nCells], dtype=float)
xvelmean = np.zeros([nTime, nCells], dtype=float)
yvelmean = np.zeros([nTime, nCells], dtype=float)
# the following need to be in the file so ncrcat will work but processing won't use
# values, so can leave as zeros
surfaceSpeed = np.zeros([nTime, nCells], dtype=float)
vonMisesStress = np.zeros([nTime, nCells], dtype=float)
deltat = np.zeros([nTime,], dtype=float)
daysSinceStart = np.zeros([nTime,], dtype=float)
print("\n--- calculating the missing state variables ---")
# layerInterfaceFractions are the fraction associated with each interface
layerInterfaceFractions[0] = 0.5 * layerThicknessFractions[0]
for k in range(1, nVertLevels):
layerInterfaceFractions[k] = 0.5 * (layerThicknessFractions[k-1]
+ layerThicknessFractions[k])
layerInterfaceFractions[nVertLevels] = 0.5 * layerThicknessFractions[nVertLevels-1]
print("layerThicknessFractions:", layerThicknessFractions[:].data)
print("layerInterfaceFractions:", layerInterfaceFractions)
for i in range(nTime):
# calculate surface temperature (unit in Kelvin)
sfcTemp[i,:] = np.minimum(273.15, sfcAirTemp[i,:]) # 0 celsius = 273 Kelvin
print('surfaceTemperature processed')
lowerSfc[i,:] = np.where(floating_iceMask, seaLevel - thickness[i,:] * (rhoi / rhoo), bedTopography[i,:])
upperSfc[i,:] = lowerSfc[i,:] + thickness[i,:]
print('lower/upperSurface processed')
xvelmean[i,:] = np.sum(uReconstructX[i,:,:] * layerInterfaceFractions[:], axis=1)
yvelmean[i,:] = np.sum(uReconstructY[i,:,:] * layerInterfaceFractions[:], axis=1)
print('x/yvelmean processed')
# create variable dictionary of fields to include in the new file
# Note: ncrcat does not require that time-independent fields be in both
# files, so we don't need to include them in the new file.
out_data_vars = {
'lowerSurface': (['Time', 'nCells'], lowerSfc),
'upperSurface': (['Time', 'nCells'], upperSfc),
'surfaceTemperature': (['Time', 'nCells'], sfcTemp),
'xvelmean': (['Time', 'nCells'], xvelmean),
'yvelmean': (['Time', 'nCells'], yvelmean),
'surfaceSpeed': (['Time', 'nCells'], surfaceSpeed),
'vonMisesStress': (['Time', 'nCells'], vonMisesStress),
'deltat': (['Time',], deltat ),
'daysSinceStart': (['Time',], daysSinceStart),
'xtime': xtime2
}
dataOut = xr.Dataset(data_vars=out_data_vars) # create xarray dataset object
dataOut.xtime.encoding.update({"char_dim_name": "StrLen"}) # another hacky thing to make xarray handle xtime correctly
# learned this from: https://github.com/pydata/xarray/issues/2895
print("\n--- copying over unmodified variables from the restart file ---")
for var in ['thickness', 'uReconstructX', 'uReconstructY', 'bedTopography',
'basalTemperature', 'betaSolve', 'cellMask', 'damage']:
print(" Copying", var)
dataOut[var] = file_in[var]
# save/write out the new file
# define the path to which the output (processed) files will be saved
if args.output_path is None:
output_path = os.getcwd()
else:
output_path = args.output_path
if not os.path.isdir(output_path):
os.makedirs(output_path)
print(f"file output path: {output_path}")
file_out_path = os.path.join(output_path, args.file_out)
dataOut.to_netcdf(file_out_path, mode='w', unlimited_dims=['Time'])
file_in.close()
print("\n--- process complete! ---")
if __name__ == "__main__":
METHOD_NAME() |
test patron transactions type | # -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2019 RERO
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Patron transaction JSON schema tests."""
from __future__ import absolute_import, print_function
import copy
import pytest
from jsonschema import validate
from jsonschema.exceptions import ValidationError
def test_patron_transactions_required(
patron_transaction_schema, patron_transaction_overdue_saxon_data):
"""Test required for patron transaction jsonschemas."""
validate(patron_transaction_overdue_saxon_data, patron_transaction_schema)
with pytest.raises(ValidationError):
validate({}, patron_transaction_schema)
def test_patron_transactions_pid(
patron_transaction_schema, patron_transaction_overdue_saxon_data):
"""Test pid for patron transaction jsonschemas."""
validate(patron_transaction_overdue_saxon_data, patron_transaction_schema)
with pytest.raises(ValidationError):
data = copy.deepcopy(patron_transaction_overdue_saxon_data)
data['pid'] = 25
validate(data, patron_transaction_schema)
def test_patron_transactions_note(
patron_transaction_schema, patron_transaction_overdue_saxon_data):
"""Test note for patron transaction jsonschemas."""
validate(patron_transaction_overdue_saxon_data, patron_transaction_schema)
with pytest.raises(ValidationError):
data = copy.deepcopy(patron_transaction_overdue_saxon_data)
data['note'] = 25
validate(data, patron_transaction_schema)
def test_patron_transactions_status(
patron_transaction_schema, patron_transaction_overdue_saxon_data):
"""Test status for patron transaction jsonschemas."""
validate(patron_transaction_overdue_saxon_data, patron_transaction_schema)
with pytest.raises(ValidationError):
data = copy.deepcopy(patron_transaction_overdue_saxon_data)
data['status'] = 25
validate(data, patron_transaction_schema)
def METHOD_NAME(
patron_transaction_schema, patron_transaction_overdue_saxon_data):
"""Test type for patron transaction jsonschemas."""
validate(patron_transaction_overdue_saxon_data, patron_transaction_schema)
with pytest.raises(ValidationError):
data = copy.deepcopy(patron_transaction_overdue_saxon_data)
data['type'] = 25
validate(data, patron_transaction_schema)
def test_patron_transactions_patron(
patron_transaction_schema, patron_transaction_overdue_saxon_data):
"""Test patron for patron transaction jsonschemas."""
validate(patron_transaction_overdue_saxon_data, patron_transaction_schema)
with pytest.raises(ValidationError):
data = copy.deepcopy(patron_transaction_overdue_saxon_data)
data['patron'] = 25
validate(data, patron_transaction_schema)
def test_patron_transactions_notification(
patron_transaction_schema, patron_transaction_overdue_saxon_data):
"""Test notification for patron transaction jsonschemas."""
validate(patron_transaction_overdue_saxon_data, patron_transaction_schema)
with pytest.raises(ValidationError):
data = copy.deepcopy(patron_transaction_overdue_saxon_data)
data['notification'] = 25
validate(data, patron_transaction_schema)
def test_patron_transactions_organisation(
patron_transaction_schema, patron_transaction_overdue_saxon_data):
"""Test organisation for patron transaction jsonschemas."""
validate(patron_transaction_overdue_saxon_data, patron_transaction_schema)
with pytest.raises(ValidationError):
data = copy.deepcopy(patron_transaction_overdue_saxon_data)
data['organisation'] = 25
validate(data, patron_transaction_schema)
def test_patron_transactions_creation_date(
patron_transaction_schema, patron_transaction_overdue_saxon_data):
"""Test creation_date for patron transaction jsonschemas."""
validate(patron_transaction_overdue_saxon_data, patron_transaction_schema)
with pytest.raises(ValidationError):
data = copy.deepcopy(patron_transaction_overdue_saxon_data)
data['creation_date'] = 25
validate(data, patron_transaction_schema)
def test_patron_transactions_total_amount(
patron_transaction_schema, patron_transaction_overdue_saxon_data):
"""Test total_amount for patron transaction jsonschemas."""
validate(patron_transaction_overdue_saxon_data, patron_transaction_schema)
with pytest.raises(ValidationError):
data = copy.deepcopy(patron_transaction_overdue_saxon_data)
data['total_amount'] = '25'
validate(data, patron_transaction_schema) |
id | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetServerlessSecurityPolicyResult',
'AwaitableGetServerlessSecurityPolicyResult',
'get_serverless_security_policy',
'get_serverless_security_policy_output',
]
@pulumi.output_type
class GetServerlessSecurityPolicyResult:
"""
A collection of values returned by getServerlessSecurityPolicy.
"""
def __init__(__self__, created_date=None, description=None, METHOD_NAME=None, last_modified_date=None, name=None, policy=None, policy_version=None, type=None):
if created_date and not isinstance(created_date, str):
raise TypeError("Expected argument 'created_date' to be a str")
pulumi.set(__self__, "created_date", created_date)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if last_modified_date and not isinstance(last_modified_date, str):
raise TypeError("Expected argument 'last_modified_date' to be a str")
pulumi.set(__self__, "last_modified_date", last_modified_date)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if policy and not isinstance(policy, str):
raise TypeError("Expected argument 'policy' to be a str")
pulumi.set(__self__, "policy", policy)
if policy_version and not isinstance(policy_version, str):
raise TypeError("Expected argument 'policy_version' to be a str")
pulumi.set(__self__, "policy_version", policy_version)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> str:
"""
The date the security policy was created.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter
def description(self) -> str:
"""
Description of the security policy.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastModifiedDate")
def last_modified_date(self) -> str:
"""
The date the security policy was last modified.
"""
return pulumi.get(self, "last_modified_date")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def policy(self) -> str:
"""
The JSON policy document without any whitespaces.
"""
return pulumi.get(self, "policy")
@property
@pulumi.getter(name="policyVersion")
def policy_version(self) -> str:
"""
Version of the policy.
"""
return pulumi.get(self, "policy_version")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetServerlessSecurityPolicyResult(GetServerlessSecurityPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServerlessSecurityPolicyResult(
created_date=self.created_date,
description=self.description,
METHOD_NAME=self.METHOD_NAME,
last_modified_date=self.last_modified_date,
name=self.name,
policy=self.policy,
policy_version=self.policy_version,
type=self.type)
def get_serverless_security_policy(name: Optional[str] = None,
type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServerlessSecurityPolicyResult:
"""
Use this data source to get information about an AWS OpenSearch Serverless Security Policy.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.opensearch.get_serverless_security_policy(name="example-security-policy",
type="encryption")
```
:param str name: Name of the policy
:param str type: Type of security policy. One of `encryption` or `network`.
"""
__args__ = dict()
__args__['name'] = name
__args__['type'] = type
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:opensearch/getServerlessSecurityPolicy:getServerlessSecurityPolicy', __args__, opts=opts, typ=GetServerlessSecurityPolicyResult).value
return AwaitableGetServerlessSecurityPolicyResult(
created_date=pulumi.get(__ret__, 'created_date'),
description=pulumi.get(__ret__, 'description'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
last_modified_date=pulumi.get(__ret__, 'last_modified_date'),
name=pulumi.get(__ret__, 'name'),
policy=pulumi.get(__ret__, 'policy'),
policy_version=pulumi.get(__ret__, 'policy_version'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_serverless_security_policy)
def get_serverless_security_policy_output(name: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetServerlessSecurityPolicyResult]:
"""
Use this data source to get information about an AWS OpenSearch Serverless Security Policy.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.opensearch.get_serverless_security_policy(name="example-security-policy",
type="encryption")
```
:param str name: Name of the policy
:param str type: Type of security policy. One of `encryption` or `network`.
"""
... |
reenable disabled for session | from .logging import debug
from .logging import exception_log
from .logging import printf
from .types import ClientConfig
from .typing import Generator, List, Optional, Set, Dict, Deque
from .workspace import enable_in_project, disable_in_project
from abc import ABCMeta
from abc import abstractmethod
from collections import deque
from datetime import datetime, timedelta
from weakref import WeakSet
import sublime
import urllib.parse
RETRY_MAX_COUNT = 5
RETRY_COUNT_TIMEDELTA = timedelta(minutes=3)
class WindowConfigChangeListener(metaclass=ABCMeta):
@abstractmethod
def on_configs_changed(self, config_name: Optional[str] = None) -> None:
raise NotImplementedError()
class WindowConfigManager(object):
def __init__(self, window: sublime.Window, global_configs: Dict[str, ClientConfig]) -> None:
self._window = window
self._global_configs = global_configs
self._disabled_for_session = set() # type: Set[str]
self._crashes = {} # type: Dict[str, Deque[datetime]]
self.all = {} # type: Dict[str, ClientConfig]
self._change_listeners = WeakSet() # type: WeakSet[WindowConfigChangeListener]
self._reload_configs(notify_listeners=False)
def add_change_listener(self, listener: WindowConfigChangeListener) -> None:
self._change_listeners.add(listener)
def get_configs(self) -> List[ClientConfig]:
return sorted(self.all.values(), key=lambda config: config.name)
def match_view(self, view: sublime.View, include_disabled: bool = False) -> Generator[ClientConfig, None, None]:
"""
Yields configurations where:
- the configuration's "selector" matches with the view's base scope, and
- the view's URI scheme is an element of the configuration's "schemes".
"""
try:
uri = view.settings().get("lsp_uri")
if not isinstance(uri, str):
return
scheme = urllib.parse.urlparse(uri).scheme
for config in self.all.values():
if config.match_view(view, scheme) and (config.enabled or include_disabled):
yield config
except (IndexError, RuntimeError):
pass
def update(self, updated_config_name: Optional[str] = None) -> None:
self._reload_configs(updated_config_name, notify_listeners=True)
def _reload_configs(self, updated_config_name: Optional[str] = None, notify_listeners: bool = False) -> None:
project_settings = (self._window.project_data() or {}).get("settings", {}).get("LSP", {})
if updated_config_name is None:
self.all.clear()
for name, config in self._global_configs.items():
if updated_config_name and updated_config_name != name:
continue
overrides = project_settings.pop(name, None)
if isinstance(overrides, dict):
debug("applying .sublime-project override for", name)
else:
overrides = {}
if name in self._disabled_for_session:
overrides["enabled"] = False
self.all[name] = ClientConfig.from_config(config, overrides)
for name, c in project_settings.items():
if updated_config_name and updated_config_name != name:
continue
debug("loading project-only configuration", name)
try:
self.all[name] = ClientConfig.from_dict(name, c)
except Exception as ex:
exception_log("failed to load project-only configuration {}".format(name), ex)
if notify_listeners:
for listener in self._change_listeners:
listener.on_configs_changed(updated_config_name)
def enable_config(self, config_name: str) -> None:
if not self.METHOD_NAME(config_name):
enable_in_project(self._window, config_name)
self.update(config_name)
def disable_config(self, config_name: str, only_for_session: bool = False) -> None:
if only_for_session:
self._disabled_for_session.add(config_name)
else:
disable_in_project(self._window, config_name)
self.update(config_name)
def record_crash(self, config_name: str, exit_code: int, exception: Optional[Exception]) -> bool:
"""
Signal that a session has crashed.
Returns True if the session should be restarted automatically.
"""
if config_name not in self._crashes:
self._crashes[config_name] = deque(maxlen=RETRY_MAX_COUNT)
now = datetime.now()
self._crashes[config_name].append(now)
timeout = now - RETRY_COUNT_TIMEDELTA
crash_count = len([crash for crash in self._crashes[config_name] if crash > timeout])
printf("{} crashed ({} / {} times in the last {} seconds), exit code {}, exception: {}".format(
config_name, crash_count, RETRY_MAX_COUNT, RETRY_COUNT_TIMEDELTA.total_seconds(), exit_code, exception))
return crash_count < RETRY_MAX_COUNT
def METHOD_NAME(self, config_name: str) -> bool:
try:
self._disabled_for_session.remove(config_name)
return True
except KeyError:
return False |
monkeypatch | # This file is a part of IntelOwl https://github.com/intelowlproject/IntelOwl
# See the file 'LICENSE' for copying permission.
import logging
import time
import requests
from django.utils.functional import cached_property
from api_app.analyzers_manager.classes import ObservableAnalyzer
from api_app.analyzers_manager.exceptions import (
AnalyzerConfigurationException,
AnalyzerRunException,
)
from tests.mock_utils import MockUpResponse, if_mock_connections, patch
logger = logging.getLogger(__name__)
class IntelX(ObservableAnalyzer):
"""
Analyzer Name: `IntelX`\n
Refer to: https://github.com/IntelligenceX/SDK
Requires API Key
"""
base_url: str = "https://2.intelx.io"
_api_key_name: str
query_type: str
rows_limit: int
max_tries: int
poll_distance: int
timeout: int
datefrom: str
dateto: str
def config(self):
super().config()
if self.query_type not in ["phonebook", "intelligent"]:
raise AnalyzerConfigurationException(f"{self.query_type} not supported")
self.url = self.base_url + f"/{self.query_type}/search"
@cached_property
def _session(self):
session = requests.Session()
session.headers.update({"x-key": self._api_key_name, "User-Agent": "IntelOwl"})
return session
def _poll_for_results(self, search_id):
json_data = {}
for chance in range(self.max_tries):
logger.info(
f"Result Polling. Try #{chance + 1}. Starting the query..."
f"<-- {self.__repr__()}"
)
try:
r = self._session.get(
f"{self.url}/result?id={search_id}"
f"&limit={self.rows_limit}&offset=-1"
)
r.raise_for_status()
except requests.RequestException as e:
logger.warning(f"request failed: {e}")
else:
if r.status_code == 200:
json_data = r.json()
break
time.sleep(self.poll_distance)
if not json_data:
raise AnalyzerRunException(
"reached max tries for IntelX analysis,"
f" observable {self.observable_name}"
)
if self.query_type == "phonebook":
selectors = json_data["selectors"]
parsed_selectors = self.__pb_search_results(selectors)
result = {"id": search_id, **parsed_selectors}
else:
result = json_data
return result
def run(self):
params = {
"term": self.observable_name,
"buckets": [],
"lookuplevel": 0,
"maxresults": self.rows_limit,
"timeout": self.timeout,
"sort": 4, # newest items first
"media": 0,
"terminate": [],
}
if self.query_type == "phonebook":
params["target"] = 0
elif self.query_type == "intelligent":
params["datefrom"] = self.datefrom
params["dateto"] = self.dateto
# POST the search term --> Fetch the 'id' --> GET the results using the 'id'
logger.info(
f"starting {self.query_type} request for observable {self.observable_name}"
)
r = self._session.post(self.url, json=params)
r.raise_for_status()
search_id = r.json().get("id", None)
if not search_id:
raise AnalyzerRunException(
f"Failed to request search. Status code: {r.status_code}."
)
result = self._poll_for_results(search_id)
return result
@staticmethod
def __pb_search_results(selectors):
"""
https://github.com/zeropwn/intelx.py/blob/master/cli/intelx.py#L89
"""
result = {}
for block in selectors:
selectortypeh = block["selectortypeh"]
if selectortypeh not in result:
result[selectortypeh] = []
result[selectortypeh].append(block["selectorvalue"])
return result
@classmethod
def METHOD_NAME(cls):
patches = [
if_mock_connections(
patch(
"requests.Session.post",
return_value=MockUpResponse({"id": 1}, 200),
),
patch(
"requests.Session.get",
return_value=MockUpResponse({"selectors": []}, 200),
),
)
]
return super().METHOD_NAME(patches=patches) |
client get blue zones | """
ttfpdf v1.4.3 May 30 2020
provides support for the proofpdf script, for working with OpenType/TTF
fonts. Provides an implementation of the fontpdf font object. Cannot be
run alone.
"""
__copyright__ = """Copyright 2014 Adobe Systems Incorporated (http://www.adobe.com/). All Rights Reserved.
"""
from fontTools.pens.boundsPen import BoundsPen
from afdko.pdflib.fontpdf import FontPDFGlyph, FontPDFFont, FontPDFPen
__version__ = "1.4.3"
class txPDFFont(FontPDFFont):
def __init__(self, clientFont, params):
self.clientFont = clientFont
if params.userBaseLine != None:
self.baseLine = params.userBaseLine
else:
self.baseLine = None
self.path = params.rt_filePath
self.isCID = 0
self.psName = None;
self.OTVersion = None;
self.emSquare = None
self.bbox = None
self.ascent = None
self.descent = None
self.blueZones = None
self.getEmSquare()
self.getBaseLine()
self.getBBox()
self.GetBlueZones()
self.AscentDescent()
return
def clientGetPSName(self):
psName = 'None'
name_tb = self.clientFont['name']
mac_name_id_6 = name_tb.getName(6, 1, 0, 0)
win_name_id_6 = name_tb.getName(6, 3, 1, 1033)
if mac_name_id_6:
return mac_name_id_6.string.decode()
elif win_name_id_6:
return win_name_id_6.string.decode('utf_16_be')
return psName
def clientGetOTVersion(self):
version = self.clientFont['head'].fontRevision
majorVersion = int(version)
minorVersion = str(int( 1000*(0.0005 + version -majorVersion) )).zfill(3)
versionString = "%s.%s" % (majorVersion, minorVersion)
#print versionString
return versionString
def clientGetGlyph(self, glyphName):
return txPDFGlyph(self, glyphName)
def clientGetEmSquare(self):
emSquare = self.clientFont['head'].unitsPerEm
return emSquare
def clientGetBaseline(self):
baseLine = 0
txFont = self.clientFont
try:
unicodeRange2 = txFont["OS/2"].ulUnicodeRange2
if unicodeRange2 & 0x10000: # supports CJK ideographs
baseTag = "ideo"
else:
baseTag = "romn"
baseTable = self.clientFont['BASE']
baseTagIndex = baseTable.table.HorizAxis.BaseTagList.BaselineTag.index( baseTag)
baseScript = None
for baseRecord in baseTable.table.HorizAxis.BaseScriptList.BaseScriptRecord:
if baseRecord.BaseScriptTag == "latn":
baseScript = baseRecord.BaseScript
baseLine = baseScript.BaseValues.BaseCoord[baseTagIndex].Coordinate
break
except (KeyError, AttributeError):
baseLine = 0
return baseLine
def clientGetBBox(self):
headTable = self.clientFont['head']
return [headTable.xMin, headTable.yMin, headTable.xMax, headTable.yMax]
def METHOD_NAME(self):
blueValues = [[]]
return blueValues
def clientGetAscentDescent(self):
txFont = self.clientFont
try:
os2Table = self.clientFont['OS/2']
return os2Table.sTypoAscender, os2Table.sTypoDescender
except KeyError:
return None, None
class txPDFGlyph(FontPDFGlyph):
def clientInitData(self):
self.isTT = 1
self.isCID = 0
txFont = self.parentFont.clientFont
glyphSet = txFont.getGlyphSet(preferCFF=False)
clientGlyph = glyphSet[self.name]
# Get the list of points
pen = FontPDFPen(glyphSet)
clientGlyph.draw(pen)
if not hasattr(txFont, 'vmetrics'):
try:
txFont.vmetrics = txFont['vmtx'].metrics
except KeyError:
txFont.vmetrics = None
try:
txFont.vorg = txFont['VORG']
except KeyError:
txFont.vorg = None
self.hhints = []
self.vhints =[]
self.numMT = pen.numMT
self.numLT = pen.numLT
self.numCT = pen.numCT
self.numPaths = pen.numPaths
self.pathList = pen.pathList
for path in self.pathList :
lenPath = len(path)
path[-1].next = path[0]
path[0].last = path[-1]
if lenPath > 1:
path[0].next = path[1]
path[-1].last = path[-2]
for i in range(lenPath)[1:-1]:
pt = path[i]
pt.next = path[i+1]
pt.last = path[i-1]
assert len(self.pathList) == self.numPaths, " Path lengths don't match %s %s" % (len(self.pathList) , self.numPaths)
# get the bbox and width.
pen = BoundsPen(glyphSet)
clientGlyph.draw(pen)
self.xAdvance = clientGlyph.width
glyph_bounds = pen.bounds
if not glyph_bounds:
self.BBox = [0, 0, 0, 0]
else:
self.BBox = [round(item) for item in glyph_bounds]
self.yOrigin = self.parentFont.emSquare + self.parentFont.getBaseLine()
if txFont.vorg:
try:
self.yOrigin = txFont.vorg[self.name]
except KeyError:
if txFont.vmetrics:
try:
mtx = txFont.vmetrics[self.name]
self.yOrigin = mtx[1] + self.BBox[3]
except KeyError:
pass
haveVMTX = 0
if txFont.vmetrics:
try:
mtx = txFont.vmetrics[self.name]
self.yAdvance = mtx[0]
self.tsb = mtx[1]
haveVMTX =1
except KeyError:
pass
if not haveVMTX:
self.yAdvance = self.parentFont.getEmSquare()
self.tsb = self.yOrigin - self.BBox[3] + self.parentFont.getBaseLine()
# Get the fdIndex, so we can laterdetermine which set of blue values to use.
self.fdIndex = 0
return
|
test let as subexpr | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
import sys
import numpy as np
import tvm
import tvm.testing
from tvm import te
from tvm import relay
from tvm.relay.analysis import detect_feature
from tvm.relay import op, create_executor, transform
from tvm.relay.prelude import Prelude
from tvm.relay.testing import count
from tvm.relay.analysis import Feature
def run_opt_pass(expr, passes):
passes = passes if isinstance(passes, list) else [passes]
mod = tvm.IRModule.from_expr(expr)
seq = tvm.transform.Sequential(passes)
with tvm.transform.PassContext(opt_level=3):
mod = seq(mod)
entry = mod["main"]
return entry if isinstance(expr, relay.Function) else entry.body
def check_eval(expr, expected_result, mod=None, rtol=1e-07):
dev = tvm.device("llvm", 0)
result = create_executor(mod=mod, device=dev, target="llvm").evaluate(expr)
np.testing.assert_allclose(result.numpy(), expected_result, rtol=rtol)
def test_explicit_bound():
x = relay.const(1)
y = op.add(x, x)
z = op.add(y, y)
f = relay.Function([], op.add(z, z))
assert not Feature.fLet in detect_feature(f)
anf = run_opt_pass(f, transform.ToANormalForm())
assert Feature.fLet in detect_feature(anf)
check_eval(f(), 8.0)
check_eval(anf(), 8.0)
# test that the construction order does not matter,
# and is instead ordered by the scope and by post-dfs ordering.
def test_order():
z = relay.const(3)
y = relay.const(2)
x = relay.const(1)
val = x + y * z
check_eval(val, 7.0)
anf = run_opt_pass(val, [transform.ToANormalForm(), transform.InferType()])
a = relay.Var("a", relay.IncompleteType())
b = relay.Var("b", relay.IncompleteType())
c = relay.Var("c", relay.IncompleteType())
d = relay.Var("d", relay.IncompleteType())
e = relay.Var("e", relay.IncompleteType())
expected_output = e
expected_output = relay.Let(e, a + d, expected_output)
expected_output = relay.Let(d, b * c, expected_output)
expected_output = relay.Let(c, z, expected_output)
expected_output = relay.Let(b, y, expected_output)
expected_output = relay.Let(a, x, expected_output)
expected_output = run_opt_pass(expected_output, transform.InferType())
assert tvm.ir.structural_equal(anf, expected_output)
def test_if():
cond = relay.const(True)
x = relay.If(cond, relay.const(2), relay.const(3))
anf = run_opt_pass(x, [transform.ToANormalForm(), transform.InferType()])
a = relay.Var("a", relay.IncompleteType())
b = relay.Var("b", relay.IncompleteType())
c = relay.Var("c", relay.IncompleteType())
d = relay.Var("d", relay.IncompleteType())
true_branch = relay.Let(a, relay.const(2), a)
false_branch = relay.Let(b, relay.const(3), b)
expected_output = relay.If(c, true_branch, false_branch)
expected_output = relay.Let(d, expected_output, d)
expected_output = relay.Let(c, cond, expected_output)
expected_output = run_opt_pass(expected_output, transform.InferType())
assert tvm.ir.structural_equal(anf, expected_output)
def METHOD_NAME():
def on_cpu(x):
return relay.annotation.on_device(x, tvm.device("cpu"), constrain_result=True)
x = relay.Var("x", relay.IncompleteType())
c = relay.const(1)
l = relay.Let(x, on_cpu(c + c), x)
body = l * l
anf = run_opt_pass(body, [transform.ToANormalForm(), transform.InferType()])
v0 = relay.Var("v0", relay.IncompleteType())
v1 = relay.Var("v1", relay.IncompleteType())
v2 = relay.Var("v2", relay.IncompleteType())
expected_output = relay.Let(
v0,
on_cpu(c),
relay.Let(
x,
on_cpu(v0 + v0),
relay.Let(v1, x, relay.Let(v2, v1 * v1, v2)),
),
)
expected_output = run_opt_pass(expected_output, transform.InferType())
tvm.ir.assert_structural_equal(anf, expected_output)
# make sure we dont infinite loop.
# it is too large so we wont check for the exact program.
def test_recursion():
"""
Program:
let f(n: i32) -> i32 = {
m = (n * 2)
if (n == 0) {
return m;
} else {
return m + f(n - 1);
}
}
f(5);
"""
mod = tvm.IRModule()
i64 = relay.TensorType((), "int64")
f = relay.GlobalVar("f")
n = relay.Var("n", i64)
m = n * relay.const(2, "int64")
funcbody = relay.If(
relay.equal(n, relay.const(0, "int64")), m, m + f(n - relay.const(1, "int64"))
)
value = relay.Function([n], funcbody, i64, [])
mod[f] = value
check_eval(f(relay.const(5, "int64")), 30.0, mod=mod)
old_f = mod[f]
mod = transform.ToANormalForm()(mod)
f = mod[f]
check_eval(f(relay.const(5, "int64")), 30.0, mod=mod)
def test_ref():
i = relay.Var("i")
iv = relay.Var("iv")
u = relay.Var("u")
uv = relay.Var("uv")
body = relay.add(iv, uv)
body = relay.Let(uv, relay.RefRead(i), body)
body = relay.Let(u, relay.RefWrite(i, relay.const(2)), body)
body = relay.Let(iv, relay.RefRead(i), body)
body = relay.Let(i, relay.RefCreate(relay.const(1)), body)
check_eval(body, 3)
opt_body = run_opt_pass(body, transform.ToANormalForm())
check_eval(opt_body, 3)
def test_nat_add():
mod = tvm.IRModule()
p = Prelude(mod)
p.mod.import_from_std("nat.rly")
nat, z, s = p.mod.get_type("nat")
add = p.mod.get_global_var("nat_add")
dev = tvm.device("llvm", 0)
intrp = create_executor(mod=mod, device=dev, target="llvm")
# CAUTION: Following calls to intrp.evaluate(...) will re-prepare the prelude.
assert mod[add].checked_type == relay.FuncType([nat(), nat()], nat())
assert count(p, intrp.evaluate(add(s(z()), s(z())))) == 2
expr = add(s(z()), s(z()))
f = relay.GlobalVar("f")
mod[f] = relay.Function([], expr)
mod = transform.ToANormalForm()(mod)
expr = mod["f"]
assert count(p, intrp.evaluate(expr.body)) == 2
assert Feature.fLet in detect_feature(mod[add])
def test_let():
x = relay.Var("x")
y = relay.Var("y")
d = relay.const(4.0, "float32")
body = relay.Let(y, x, x + y)
body = relay.Let(x, d, body)
check_eval(body, 8)
opt_body = run_opt_pass(body, transform.ToANormalForm())
check_eval(opt_body, 8)
def test_function():
t = relay.TensorType((), "float32")
x = relay.Var("x", t)
f = relay.Function([x], x + x)
d = relay.const(4.0, "float32")
anf_f = run_opt_pass(f, transform.ToANormalForm())
assert isinstance(anf_f, relay.Function)
check_eval(f(d), 8)
check_eval(anf_f(d), 8)
def test_gradient_if():
x = relay.var("a", shape=(1, 16))
y = relay.var("y", shape=(1, 16))
cond = relay.var("cond", shape=(), dtype="uint1")
net = relay.If(cond, x, x)
net = relay.add(x, net)
net = relay.Function([cond, x, y], net)
mod = tvm.IRModule.from_expr(net)
mod = relay.transform.ToANormalForm()(mod)
mod = relay.transform.InferType()(mod)
mod["main"] = relay.transform.gradient(mod["main"], mode="higher_order")
mod = relay.transform.ToANormalForm()(mod)
if __name__ == "__main__":
tvm.testing.main() |
append bytes | # -*- coding: utf-8 -*-
"""
Module to manage SeedLinkConnection state.
Part of Python implementation of libslink of Chad Trabant and
JSeedLink of Anthony Lomax
:copyright:
The ObsPy Development Team ([email protected]) & Anthony Lomax
:license:
GNU Lesser General Public License, Version 3
(https://www.gnu.org/copyleft/lesser.html)
"""
from ..seedlinkexception import SeedLinkException
from ..slpacket import SLPacket
class SLState(object):
"""
Class to manage SeedLinkConnection state.
:var SL_DOWN: Connection state down.
:type SL_DOWN: int
:var SL_UP: Connection state up.
:type SL_UP: int
:var SL_DATA: Connection state data.
:type SL_DATA: int
:var state: Connection state.
:type state: int
:var NO_QUERY: INFO query state NO_QUERY.
:type NO_QUERY: int
:var INFO_QUERY: INFO query state INFO_QUERY.
:type INFO_QUERY: int
:var KEEP_ALIVE_QUERY: INFO query state KEEP_ALIVE_QUERY.
:type KEEP_ALIVE_QUERY: int
:var query_mode: INFO query state.
:type query_mode: int
:var BUFSIZE: Size of receiving buffer (default is 8192).
:type BUFSIZE: int
:var databuf: Data buffer for received packets.
:type databuf: bytearray
:var recptr: Receive pointer for databuf.
:type recptr: int
:var sendptr: Send pointer for databuf.
:type sendptr: int
:var expect_info: Flag to indicate if an INFO response is expected.
:type expect_info: bool
:var netto_trig: Network timeout trigger.netto_trig
:type netto_trig: int
:var netdly_trig: Network re-connect delay trigger.
:type netdly_trig: int
:var keepalive_trig: Send keepalive trigger.
:type keepalive_trig: int
:var previous_time: Time stamp of last state update.
:type previous_time: float
:var netto_time: Network timeout time stamp.
:type netto_time: float
:var netdly_time: Network re-connect delay time stamp.
:type netdly_time: float
:var keepalive_time: Keepalive time stamp.
:type keepalive_time: float
"""
SL_DOWN = 0
SL_UP = 1
SL_DATA = 2
NO_QUERY = 0
INFO_QUERY = 1
KEEP_ALIVE_QUERY = 2
BUFSIZE = 8192
def __init__(self):
self.state = SLState.SL_DOWN
self.query_mode = SLState.NO_QUERY
# AJL self.databuf = [str() for __idx0 in range(BUFSIZE)]
self.databuf = bytearray(SLState.BUFSIZE)
# AJL packed_buf = [str() for __idx0 in range(BUFSIZE)]
self.packed_buf = bytearray(SLState.BUFSIZE)
self.recptr = 0
self.sendptr = 0
self.expect_info = False
self.netto_trig = -1
self.netdly_trig = 0
self.keepalive_trig = -1
self.previous_time = 0.0
self.netto_time = 0.0
self.netdly_time = 0.0
self.keepalive_time = 0.0
def get_packet(self):
"""
Returns last received packet.
:return: last received packet if data buffer contains a full packet to
send.
:raise SeedLinkException: if there is not a packet ready to send.
See also: :meth:`packet_available`
"""
if not self.packet_available():
raise SeedLinkException("SLPacket not available to send")
return SLPacket(self.databuf, self.sendptr)
def packet_available(self):
"""
Check for full packet available to send.
:return: true if data buffer contains a full packet to send.
See also: :meth:`get_packet`
"""
return self.recptr - self.sendptr >= \
SLPacket.SLHEADSIZE + SLPacket.SLRECSIZE
def bytes_remaining(self):
"""
Return number of bytes remaining in receiving buffer.
:return: number of bytes remaining.
"""
return self.BUFSIZE - self.recptr
def is_error(self):
"""
Check for SeedLink ERROR packet.
:return: true if next send packet is a SeedLink ERROR packet
:raise SeedLinkException: if there are not enough bytes to determine
"""
if self.recptr - self.sendptr < len(SLPacket.ERRORSIGNATURE):
msg = "not enough bytes to determine packet type"
raise SeedLinkException(msg)
return self.databuf[self.sendptr: self.sendptr +
len(SLPacket.ERRORSIGNATURE)].lower() == \
SLPacket.ERRORSIGNATURE.lower() # @UndefinedVariable
def is_end(self):
"""
Check for SeedLink END packet.
:return: true if next send packet is a SeedLink END packet
:raise SeedLinkException: if there are not enough bytes to determine
"""
if self.recptr - self.sendptr < len(SLPacket.ENDSIGNATURE):
msg = "not enough bytes to determine packet type"
raise SeedLinkException(msg)
return self.databuf[self.sendptr: self.sendptr +
len(SLPacket.ENDSIGNATURE)].lower() == \
SLPacket.ENDSIGNATURE.lower() # @UndefinedVariable
def packet_is_info(self):
"""
Check for SeedLink INFO packet.
:return: true if next send packet is a SeedLink INFO packet
:raise SeedLinkException: if there are not enough bytes to determine
packet type
"""
if self.recptr - self.sendptr < len(SLPacket.INFOSIGNATURE):
msg = "not enough bytes to determine packet type"
raise SeedLinkException(msg)
return self.databuf[self.sendptr: self.sendptr +
len(SLPacket.INFOSIGNATURE)].lower() == \
SLPacket.INFOSIGNATURE.lower() # @UndefinedVariable
def increment_send_pointer(self):
"""
Increments the send pointer by size of one packet.
"""
self.sendptr += SLPacket.SLHEADSIZE + SLPacket.SLRECSIZE
def pack_data_buffer(self):
"""
Packs the buffer by removing all sent packets and shifting remaining
bytes to beginning of buffer.
"""
# AJL System.arraycopy(self.databuf, self.sendptr, self.packed_buf, 0,
# self.recptr - self.sendptr)
self.packed_buf[0:self.recptr - self.sendptr] = \
self.databuf[self.sendptr: self.recptr]
temp_buf = self.databuf
self.databuf = self.packed_buf
self.packed_buf = temp_buf
self.recptr -= self.sendptr
self.sendptr = 0
def METHOD_NAME(self, bytes_):
"""
Appends bytes to the receive buffer after the last received data.
"""
if self.bytes_remaining() < len(bytes_):
msg = "not enough bytes remaining in buffer to append new bytes"
raise SeedLinkException(msg)
self.databuf[self.recptr:self.recptr + len(bytes_)] = bytes_
self.recptr += len(bytes_) |
test formatted logger | # -*- coding: utf-8 -*-
# Copyright European Organization for Nuclear Research (CERN) since 2012
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
from re import match
import pytest
from rucio.common.exception import InvalidType
from rucio.common.utils import md5, adler32, parse_did_filter_from_string, Availability, retrying
from rucio.common.logging import formatted_logger
class TestUtils:
"""UTILS (COMMON): test utilisty functions"""
def test_utils_md5(self, file_factory):
"""(COMMON/UTILS): test calculating MD5 of a file"""
temp_file_1 = file_factory.file_generator(data='hello test\n')
ret = md5(temp_file_1)
assert isinstance(ret, str), "Object returned by utils.md5 is not a string"
assert match('[a-fA-F0-9]{32}', ret) is not None, "String returned by utils.md5 is not a md5 hex digest"
assert ret == '31d50dd6285b9ff9f8611d0762265d04', "Hex digest returned by utils.md5 is the MD5 checksum"
with pytest.raises(Exception, match='FATAL - could not get MD5 checksum of file no_file - \\[Errno 2\\] No such file or directory: \'no_file\''):
md5('no_file')
def test_utils_adler32(self, file_factory):
"""(COMMON/UTILS): test calculating Adler32 of a file"""
temp_file_1 = file_factory.file_generator(data='hello test\n')
ret = adler32(temp_file_1)
assert isinstance(ret, str)
assert match('[a-fA-F0-9]', ret) is not None
assert ret == '198d03ff'
with pytest.raises(Exception, match='FATAL - could not get Adler-32 checksum of file no_file: \\[Errno 2\\] No such file or directory: \'no_file\''):
adler32('no_file')
def test_parse_did_filter_string(self):
"""(COMMON/UTILS): test parsing of did filter string"""
test_cases = [{
'input': 'type=all,length=3,length>4,length>=6,length<=7, test=b, created_after=1900-01-01T00:00:00.000Z',
'expected_filter': {'length': 3, 'length.gt': 4, 'length.gte': 6, 'length.lte': 7, 'test': 'b', 'created_after': datetime.datetime.strptime('1900-01-01T00:00:00.000Z', '%Y-%m-%dT%H:%M:%S.%fZ')},
'expected_type': 'all'
}, {
'input': 'type=FILE',
'expected_filter': {},
'expected_type': 'file'
}, {
'input': '',
'expected_filter': {},
'expected_type': 'collection'
}]
for test_case in test_cases:
filters, type_ = parse_did_filter_from_string(test_case['input'])
assert test_case['expected_filter'] == filters
assert test_case['expected_type'] == type_
with pytest.raises(InvalidType):
input_ = 'type=g'
parse_did_filter_from_string(input_)
def test_availability_data_class(self):
availability = Availability()
assert availability.read is None
assert availability.write is None
assert availability.delete is None
availability = Availability(True, False, True)
assert availability.read
assert not availability.write
assert availability.delete
def test_availability_tuple_unpacking(self):
read, write, delete = Availability(True, False, True)
assert read
assert not write
assert delete
def test_availability_hash(self):
hash(Availability(True, True, True))
def test_availability_with_none(self):
assert Availability(write=False).integer == 5
def test_availability_from_integer_None(self):
assert Availability.from_integer(None) == Availability(None, None, None)
@pytest.mark.parametrize(
"integer,tuple_values",
[
# (read, write, delete)
(7, (None, None, None)),
(6, (True, None, False)),
(5, (None, False, None)),
],
)
def test_availability_convert_with_none(integer, tuple_values):
"""
This tests the conversion to an integer with missing values. Missing values
should be interpreted as `True`, since this is the default value.
"""
assert integer == Availability(*tuple_values).integer
@pytest.mark.parametrize(
"before,after",
[
# (read, write, delete)
(7, (True, True, True)),
(6, (True, True, False)),
(5, (True, False, True)),
(4, (True, False, False)),
(3, (False, True, True)),
(2, (False, True, False)),
(1, (False, False, True)),
(0, (False, False, False)),
],
)
def test_availability_translation(before, after):
assert Availability.from_integer(before) == Availability(*after)
assert tuple(Availability.from_integer(before)) == after
assert Availability.from_integer(before).integer == before
assert Availability(*after).integer == before
def METHOD_NAME():
result = None
def log_func(level, msg, *args, **kwargs):
nonlocal result
result = (level, msg)
new_log_func = formatted_logger(log_func, "a %s c")
new_log_func(logging.INFO, "b")
assert result == (logging.INFO, "a b c")
def test_retrying():
attempts = []
start_time = datetime.datetime.now()
@retrying(retry_on_exception=lambda _: True, wait_fixed=550, stop_max_attempt_number=3)
def always_retry():
attempts.append(True)
raise ValueError()
with pytest.raises(ValueError):
always_retry()
assert len(attempts) == 3
assert datetime.datetime.now() - start_time > datetime.timedelta(seconds=1)
attempts = []
@retrying(retry_on_exception=lambda e: isinstance(e, AttributeError), wait_fixed=1, stop_max_attempt_number=3)
def retry_on_attribute_error():
attempts.append(True)
raise ValueError()
with pytest.raises(ValueError):
retry_on_attribute_error()
assert len(attempts) == 1 |
get window | import unittest
with_alazar = True
def get_pulse():
from qupulse.pulses import TablePulseTemplate as TPT, SequencePulseTemplate as SPT, RepetitionPulseTemplate as RPT
ramp = TPT(identifier='ramp', channels={'out', 'trigger'})
ramp.add_entry(0, 'start', channel='out')
ramp.add_entry('duration', 'stop', 'linear', channel='out')
ramp.add_entry(0, 1, channel='trigger')
ramp.add_entry('duration', 1, 'hold', channel='trigger')
ramp.add_measurement_declaration('meas', 0, 'duration')
base = SPT([(ramp, dict(start='min', stop='max', duration='tau/3'), dict(meas='A')),
(ramp, dict(start='max', stop='max', duration='tau/3'), dict(meas='B')),
(ramp, dict(start='max', stop='min', duration='tau/3'), dict(meas='C'))], {'min', 'max', 'tau'})
repeated = RPT(base, 'n')
root = SPT([repeated, repeated, repeated], {'min', 'max', 'tau', 'n'})
return root
def get_alazar_config():
from atsaverage import alazar
from atsaverage.config import ScanlineConfiguration, CaptureClockConfiguration, EngineTriggerConfiguration,\
TRIGInputConfiguration, InputConfiguration
trig_level = int((5 + 0.4) / 10. * 255)
assert 0 <= trig_level < 256
config = ScanlineConfiguration()
config.triggerInputConfiguration = TRIGInputConfiguration(triggerRange=alazar.TriggerRangeID.etr_5V)
config.triggerConfiguration = EngineTriggerConfiguration(triggerOperation=alazar.TriggerOperation.J,
triggerEngine1=alazar.TriggerEngine.J,
triggerSource1=alazar.TriggerSource.external,
triggerSlope1=alazar.TriggerSlope.positive,
triggerLevel1=trig_level,
triggerEngine2=alazar.TriggerEngine.K,
triggerSource2=alazar.TriggerSource.disable,
triggerSlope2=alazar.TriggerSlope.positive,
triggerLevel2=trig_level)
config.captureClockConfiguration = CaptureClockConfiguration(source=alazar.CaptureClockType.internal_clock,
samplerate=alazar.SampleRateID.rate_100MSPS)
config.inputConfiguration = 4*[InputConfiguration(input_range=alazar.InputRangeID.range_1_V)]
config.totalRecordSize = 0
assert config.totalRecordSize == 0
return config
def get_operations():
from atsaverage.operations import Downsample
return [Downsample(identifier='DS_A', maskID='A'),
Downsample(identifier='DS_B', maskID='B'),
Downsample(identifier='DS_C', maskID='C'),
Downsample(identifier='DS_D', maskID='D')]
def METHOD_NAME(card):
from atsaverage.gui import ThreadedStatusWindow
window = ThreadedStatusWindow(card)
window.start()
return window
class TaborTests(unittest.TestCase):
@unittest.skip
def test_all(self):
from qupulse.hardware.awgs.tabor import TaborChannelPair, TaborAWGRepresentation
#import warnings
tawg = TaborAWGRepresentation(r'USB0::0x168C::0x2184::0000216488::INSTR')
tchannelpair = TaborChannelPair(tawg, (1, 2), 'TABOR_AB')
tawg.paranoia_level = 2
#warnings.simplefilter('error', Warning)
from qupulse.hardware.setup import HardwareSetup, PlaybackChannel, MarkerChannel
hardware_setup = HardwareSetup()
hardware_setup.set_channel('TABOR_A', PlaybackChannel(tchannelpair, 0))
hardware_setup.set_channel('TABOR_B', PlaybackChannel(tchannelpair, 1))
hardware_setup.set_channel('TABOR_A_MARKER', MarkerChannel(tchannelpair, 0))
hardware_setup.set_channel('TABOR_B_MARKER', MarkerChannel(tchannelpair, 1))
if with_alazar:
from qupulse.hardware.dacs.alazar import AlazarCard
import atsaverage.server
if not atsaverage.server.Server.default_instance.running:
atsaverage.server.Server.default_instance.start(key=b'guest')
import atsaverage.core
alazar = AlazarCard(atsaverage.core.getLocalCard(1, 1))
alazar.register_mask_for_channel('A', 0)
alazar.register_mask_for_channel('B', 0)
alazar.register_mask_for_channel('C', 0)
alazar.config = get_alazar_config()
alazar.register_operations('test', get_operations())
window = METHOD_NAME(atsaverage.core.getLocalCard(1, 1))
hardware_setup.register_dac(alazar)
repeated = get_pulse()
from qupulse.pulses.sequencing import Sequencer
sequencer = Sequencer()
sequencer.push(repeated,
parameters=dict(n=1000, min=-0.5, max=0.5, tau=192*3),
channel_mapping={'out': 'TABOR_A', 'trigger': 'TABOR_A_MARKER'},
window_mapping=dict(A='A', B='B', C='C'))
instruction_block = sequencer.build()
hardware_setup.register_program('test', instruction_block)
if with_alazar:
from atsaverage.masks import PeriodicMask
m = PeriodicMask()
m.identifier = 'D'
m.begin = 0
m.end = 1
m.period = 1
m.channel = 0
alazar._registered_programs['test'].masks.append(m)
hardware_setup.arm_program('test')
d = 1
|
get enthalpy | import numpy as np
import stdc.utils.params as p
import re
LENGTH_UNITS = ['M', 'CM', 'MM', 'MICROM', 'NM', 'A', 'A0']
TIME_UNITS = ['S', 'MIN', 'H', 'MICROS', 'NS']
MASS_UNITS = ['KG', 'T', 'DG', 'G', 'MG', 'MICROG', 'AMU']
TEMPERATURE_UNITS = ['K', 'C', 'F']
MOLE_UNITS = ['MOL', '#', 'KMOL']
ENERGY_UNITS = ['J', 'KJ', 'MJ', 'GJ', 'CAL', 'KCAL', 'MCAL', 'GCAL', 'HA', 'EV']
FREQUENCY_UNITS = ['1/S','HZ','KHZ','GHZ','MHZ']
# intertia tensor
# -----------------------------
def getInertiaTensor(aElMolWt,aXYZ):
# ===================================
# helper functions to populate
# inertia tensor
#
# diagonal elements
def getDiagMoments(a,b,m):
MolWt = sum(m)
sum1 = 0.0
sum2 = 0.0
sum3 = 0.0
for i,rows in enumerate(m):
sum1 = sum1 + m[i]*(a[i]*a[i]+b[i]*b[i])
sum2 = sum2 + m[i]*a[i]
sum3 = sum3 + m[i]*b[i]
sum2 = sum2*sum2 * 1.0/MolWt
sum3 = sum3*sum3 * 1.0/MolWt
Iaa = sum1 - sum2 - sum3
return Iaa
# off-diagonal elements
# -----------------------------
def getOffDiagMoments(a,b,m):
MolWt = sum(m)
sum1 = 0.0
sum2 = 0.0
sum3 = 0.0
for i,rows in enumerate(m):
sum1 = sum1 + m[i]*a[i]*b[i]
sum2 = sum2 + m[i]*a[i]
sum3 = sum3 + m[i]*b[i]
Iab = -sum1 + 1.0/MolWt*sum2*sum3
return Iab
# ===================================
# init inertia tensor
IT = np.empty([3,3])
# get mass vector and X,Y,Z coordiantes
m = aElMolWt
X = aXYZ[:,0]
Y = aXYZ[:,1]
Z = aXYZ[:,2]
# get diagonal and off-diagonal elements
Ixx = getDiagMoments(Y,Z,m)
Iyy = getDiagMoments(X,Z,m)
Izz = getDiagMoments(X,Y,m)
Ixy = getOffDiagMoments(X,Y,m)
Iyx = Ixy
Ixz = getOffDiagMoments(X,Z,m)
Izx = Ixz
Iyz = getOffDiagMoments(Y,Z,m)
Izy = Iyz
# put everything together
IT = [[Ixx,Ixy,Ixz],[Iyx,Iyy,Iyz],[Izx,Izy,Izz]]
return IT
# Get moments of Inertia
# of a molecule
#---------------------------------
def getMomentsOfInertia(aElMolWt,aXYZ,aGeomType):
InertiaMom = []
#construct the mass and XYZ np arrays
if len(aElMolWt)>0 and len(aXYZ)>0:
IT = getInertiaTensor(aElMolWt,aXYZ)
eigen,V = np.linalg.eig(IT)
InertiaMom = [eigen[0],eigen[1],eigen[2]] # in amu*A^2
return sorted(InertiaMom,reverse=True)
# Entropy of a species from NASA polynomials
#--------------------------
def getEntropy(alow,ahigh,Trange,T):
S = 0.0
if T>0.0:
Tmid = Trange[1]
Ta=[]
Ta.append(np.log(T)) #0
Ta.append(T) #1
Ta.append(T*T/2.0) #2
Ta.append(T*T*T/3.0) #3
Ta.append(T*T*T*T/4.0) #4
if T<=Tmid:
a = alow
else:
a = ahigh
for i in range(len(Ta)):
S = S + a[i]*Ta[i]
S = (S + a[6])*p.R
return S
# Internal Energy of a species from NASA polynomials
#--------------------------
def getInternalEnergy(alow,ahigh,Trange,T):
H = METHOD_NAME(alow,ahigh,Trange,T)
U = H - p.R*T
return U
# Heat Capacity Cv of a species from NASA polynomials
#--------------------------
def getHeatCapacityCv(alow,ahigh,Trange,T):
Cv = getHeatCapacityCp(alow,ahigh,Trange,T) - p.R
return Cv
# Heat Capacity Cp of a species from NASA polynomials
#--------------------------
def getHeatCapacityCp(alow,ahigh,Trange,T):
Cp = 0.0
Tmid = Trange[1]
Ta=[]
Ta.append(1.0) #0
Ta.append(T) #1
Ta.append(T*T) #2
Ta.append(T*T*T) #3
Ta.append(T*T*T*T) #4
if T<=Tmid:
a = alow
else:
a = ahigh
for i in range(len(Ta)):
Cp = Cp + a[i]*Ta[i]
Cp = Cp*p.R
return Cp
# Enthalpy of a species from NASA polynomials
#--------------------------
def METHOD_NAME(alow,ahigh,Trange,T):
H = 0.0
if T>0.0:
Tmid = Trange[1]
Ta=[]
Ta.append(1.0) #0
Ta.append(T/2.0) #1
Ta.append(T*T/3.0) #2
Ta.append(T*T*T/4.0) #3
Ta.append(T*T*T*T/5.0) #4
Ta.append(1.0/T) #5
if T<=Tmid:
a = alow
else:
a = ahigh
for i in range(len(Ta)):
H = H + a[i]*Ta[i]
H = H*p.R*T
return H
# Gibbs Energy of a species from NASA polynomials
#--------------------------
def getGibbsEnergy(alow,ahigh,Trange,T):
H = METHOD_NAME(alow,ahigh,Trange,T)
S = getEntropy(alow,ahigh,Trange,T)
G = H - T*S
return G
def chemFormulaToAtomsCounts(chemFormula):
atomCounts={}
atomCounts, chemFormula = _funcGroupsAtomsCounts(chemFormula, atomCounts)
atomCounts = _chemFormulaToAtomsCounts(chemFormula, atomCounts, multiplier=1.0)
return atomCounts
def _funcGroupsAtomsCounts(chemFormula, atomCounts):
funcGroupCounts = {}
funcGroupRegex=f'(\(.*?\)\d*)'
funcGroupsMatch = re.findall(funcGroupRegex,chemFormula)
if funcGroupsMatch:
for funcGroup in sorted(funcGroupsMatch,reverse=True,key=len):
chemFormula = chemFormula.replace(funcGroup,'')
countMatch = re.search('\)(\d+)$',funcGroup)
if countMatch:
count = countMatch.groups()[0]
funcGroup = funcGroup.replace(count, '')
else:
count = '1'
funcGroup = funcGroup.replace(')','').replace('(','')
if funcGroup not in funcGroupCounts:
funcGroupCounts[funcGroup] = int(count)
else:
funcGroupCounts[funcGroup] += int(count)
for funcGroup, funcGroupCount in funcGroupCounts.items():
atomCounts = _chemFormulaToAtomsCounts(funcGroup, atomCounts, funcGroupCount)
return atomCounts, chemFormula
def _chemFormulaToAtomsCounts(chemFormula, atomCounts, multiplier):
Elements =[
'H', 'He',
'Li','Be','B','C','N','O','F','Ne',
'Na','Mg','Al', 'Si','P','S','Cl','Ar',
'K','Ca',
'Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn',
'Ga','Ge','As','Se','Br','Kr',
'Rb','Sr',
'Y','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ag','Cd',
'In','Sn','Sb','Te','I','Xe',
'Cs','Ba',
'La','Ce','Pr','Nd','Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb','Lu',
'Hf','Ta','W','Re','Os','Ir','Pt','Au','Hg',
'Tl','Pb','Bi','Po','At','Rn',
'Fr','Ra',
'Ac','Th','Pa','U','Np','Pu','Am','Cm','Bk','Cf','Es','Fm','Md','No','Lr',
'Rf','Db','Sg','Bh','Hs','Mt','Ds','Rg','Cn',
'Nh','Fl','Mc','Lv','Ts','Og']
atomCountsRegex=f'('+Elements[0]+'\d*'
for el in Elements[1:]:
atomCountsRegex += f'|'+ el + '\d*'
atomCountsRegex += f')'
atomCountMatch = re.findall(atomCountsRegex,chemFormula)
if atomCountMatch:
for atomCount in atomCountMatch:
countMatch = re.search('(\d+)$',atomCount)
if countMatch:
count = int(countMatch.groups()[0])
atom = atomCount.replace(countMatch.groups()[0], '')
else:
count = 1
atom = atomCount
if atom not in atomCounts:
atomCounts[atom] = int(count*multiplier)
else:
atomCounts[atom] += int(count*multiplier)
return atomCount |
test float type print | import sys
import pytest
import numpy as np
from numpy.testing import assert_, assert_equal, IS_MUSL
from numpy.core.tests._locales import CommaDecimalPointLocale
from io import StringIO
_REF = {np.inf: 'inf', -np.inf: '-inf', np.nan: 'nan'}
@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
def test_float_types(tp):
""" Check formatting.
This is only for the str function, and only for simple types.
The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
for x in [0, 1, -1, 1e20]:
assert_equal(str(tp(x)), str(float(x)),
err_msg='Failed str formatting for type %s' % tp)
if tp(1e16).itemsize > 4:
assert_equal(str(tp(1e16)), str(float('1e16')),
err_msg='Failed str formatting for type %s' % tp)
else:
ref = '1e+16'
assert_equal(str(tp(1e16)), ref,
err_msg='Failed str formatting for type %s' % tp)
@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
def test_nan_inf_float(tp):
""" Check formatting of nan & inf.
This is only for the str function, and only for simple types.
The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
for x in [np.inf, -np.inf, np.nan]:
assert_equal(str(tp(x)), _REF[x],
err_msg='Failed str formatting for type %s' % tp)
@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
def test_complex_types(tp):
"""Check formatting of complex types.
This is only for the str function, and only for simple types.
The precision of np.float32 and np.longdouble aren't the same as the
python float precision.
"""
for x in [0, 1, -1, 1e20]:
assert_equal(str(tp(x)), str(complex(x)),
err_msg='Failed str formatting for type %s' % tp)
assert_equal(str(tp(x*1j)), str(complex(x*1j)),
err_msg='Failed str formatting for type %s' % tp)
assert_equal(str(tp(x + x*1j)), str(complex(x + x*1j)),
err_msg='Failed str formatting for type %s' % tp)
if tp(1e16).itemsize > 8:
assert_equal(str(tp(1e16)), str(complex(1e16)),
err_msg='Failed str formatting for type %s' % tp)
else:
ref = '(1e+16+0j)'
assert_equal(str(tp(1e16)), ref,
err_msg='Failed str formatting for type %s' % tp)
@pytest.mark.parametrize('dtype', [np.complex64, np.cdouble, np.clongdouble])
def test_complex_inf_nan(dtype):
"""Check inf/nan formatting of complex types."""
TESTS = {
complex(np.inf, 0): "(inf+0j)",
complex(0, np.inf): "infj",
complex(-np.inf, 0): "(-inf+0j)",
complex(0, -np.inf): "-infj",
complex(np.inf, 1): "(inf+1j)",
complex(1, np.inf): "(1+infj)",
complex(-np.inf, 1): "(-inf+1j)",
complex(1, -np.inf): "(1-infj)",
complex(np.nan, 0): "(nan+0j)",
complex(0, np.nan): "nanj",
complex(-np.nan, 0): "(nan+0j)",
complex(0, -np.nan): "nanj",
complex(np.nan, 1): "(nan+1j)",
complex(1, np.nan): "(1+nanj)",
complex(-np.nan, 1): "(nan+1j)",
complex(1, -np.nan): "(1+nanj)",
}
for c, s in TESTS.items():
assert_equal(str(dtype(c)), s)
# print tests
def _test_redirected_print(x, tp, ref=None):
file = StringIO()
file_tp = StringIO()
stdout = sys.stdout
try:
sys.stdout = file_tp
print(tp(x))
sys.stdout = file
if ref:
print(ref)
else:
print(x)
finally:
sys.stdout = stdout
assert_equal(file.getvalue(), file_tp.getvalue(),
err_msg='print failed for type%s' % tp)
@pytest.mark.parametrize('tp', [np.float32, np.double, np.longdouble])
def METHOD_NAME(tp):
"""Check formatting when using print """
for x in [0, 1, -1, 1e20]:
_test_redirected_print(float(x), tp)
for x in [np.inf, -np.inf, np.nan]:
_test_redirected_print(float(x), tp, _REF[x])
if tp(1e16).itemsize > 4:
_test_redirected_print(float(1e16), tp)
else:
ref = '1e+16'
_test_redirected_print(float(1e16), tp, ref)
@pytest.mark.parametrize('tp', [np.complex64, np.cdouble, np.clongdouble])
def test_complex_type_print(tp):
"""Check formatting when using print """
# We do not create complex with inf/nan directly because the feature is
# missing in python < 2.6
for x in [0, 1, -1, 1e20]:
_test_redirected_print(complex(x), tp)
if tp(1e16).itemsize > 8:
_test_redirected_print(complex(1e16), tp)
else:
ref = '(1e+16+0j)'
_test_redirected_print(complex(1e16), tp, ref)
_test_redirected_print(complex(np.inf, 1), tp, '(inf+1j)')
_test_redirected_print(complex(-np.inf, 1), tp, '(-inf+1j)')
_test_redirected_print(complex(-np.nan, 1), tp, '(nan+1j)')
def test_scalar_format():
"""Test the str.format method with NumPy scalar types"""
tests = [('{0}', True, np.bool_),
('{0}', False, np.bool_),
('{0:d}', 130, np.uint8),
('{0:d}', 50000, np.uint16),
('{0:d}', 3000000000, np.uint32),
('{0:d}', 15000000000000000000, np.uint64),
('{0:d}', -120, np.int8),
('{0:d}', -30000, np.int16),
('{0:d}', -2000000000, np.int32),
('{0:d}', -7000000000000000000, np.int64),
('{0:g}', 1.5, np.float16),
('{0:g}', 1.5, np.float32),
('{0:g}', 1.5, np.float64),
('{0:g}', 1.5, np.longdouble),
('{0:g}', 1.5+0.5j, np.complex64),
('{0:g}', 1.5+0.5j, np.complex128),
('{0:g}', 1.5+0.5j, np.clongdouble)]
for (fmat, val, valtype) in tests:
try:
assert_equal(fmat.format(val), fmat.format(valtype(val)),
"failed with val %s, type %s" % (val, valtype))
except ValueError as e:
assert_(False,
"format raised exception (fmt='%s', val=%s, type=%s, exc='%s')" %
(fmat, repr(val), repr(valtype), str(e)))
#
# Locale tests: scalar types formatting should be independent of the locale
#
class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
def test_locale_single(self):
assert_equal(str(np.float32(1.2)), str(float(1.2)))
def test_locale_double(self):
assert_equal(str(np.double(1.2)), str(float(1.2)))
@pytest.mark.skipif(IS_MUSL,
reason="test flaky on musllinux")
def test_locale_longdouble(self):
assert_equal(str(np.longdouble('1.2')), str(float(1.2))) |
main | """
Run all examples in a row.
This script should be executed from inside the haddock3/examples/ folder.
This script runs the `*-test.cfg` files. These test cases are not fetched
automatically. If you want to add/remove cases you need to edit this script.
The HADDOCK3 team has defined here only those test cases that are part of the
integration tests.
This script will delete existing run directories which names overlap with those
defined in the test configuration files.
If you see errors related to python import statements, make sure you have
the HADDOCK3 environment activated.
A breaking example means something is wrong in the HADDOCK3 core workflow.
You should work towards solving that problem or contact the HADDOCK3 team.
USAGE:
$ python run_examples-full.py -h
$ python run_examples-full.py # runs all examples regardless of errors
$ python run_examples-full.py -b # stops asap an error is found
"""
import argparse
import subprocess
import sys
from shutil import rmtree
try:
from haddock.libs.libio import working_directory
from haddock.gear.config_reader import read_config
except Exception:
print( # noqa: T001
"Haddock3 could not be imported. "
"Please activate the haddock3 python environment.",
file=sys.stderr,
)
sys.exit(1)
# edit this dictionary to add or remove examples.
# keys are the examples folder, and values are the configuration files
# spacings are anti-pythonic but facilitate reading :-)
examples = (
("docking-protein-DNA" , "docking-protein-DNA-full.cfg"), # noqa: E203, E501
("docking-protein-DNA" , "docking-protein-DNA-cltsel-full.cfg"), # noqa: E203, E501
("docking-protein-DNA" , "docking-protein-DNA-mdref-full.cfg"), # noqa: E203, E501
("docking-protein-homotrimer" , "docking-protein-homotrimer-full.cfg"), # noqa: E203, E501
("docking-protein-ligand" , "docking-protein-ligand-full.cfg"), # noqa: E203, E501
("docking-protein-ligand-shape", "docking-protein-ligand-shape-full.cfg"), # noqa: E203, E501
("docking-protein-peptide" , "docking-protein-peptide-full.cfg"), # noqa: E203, E501
("docking-protein-peptide" , "docking-protein-peptide-cltsel-full.cfg"), # noqa: E203, E501
("docking-protein-peptide" , "docking-protein-peptide-mdref-full.cfg"), # noqa: E203, E501
("docking-protein-protein" , "docking-protein-protein-full.cfg"), # noqa: E203, E501
("docking-protein-protein" , "docking-protein-protein-cltsel-full.cfg"), # noqa: E203, E501
("docking-protein-protein" , "docking-protein-protein-mdref-full.cfg"), # noqa: E203, E501
("docking-antibody-antigen" , "docking-antibody-antigen-CDR-accessible-full"), # noqa: E203, E501
("docking-antibody-antigen" , "docking-antibody-antigen-CDR-accessible-clt-full.cfg"), # noqa: E203, E501
("docking-antibody-antigen" , "docking-antibody-antigen-ranairCDR-full.cfg"), # noqa: E203, E501
("docking-antibody-antigen" , "docking-antibody-antigen-ranairCDR-clt-full.cfg"), # noqa: E203, E501
)
ap = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
ap.add_argument(
'-b',
'--break-on-errors',
action='store_true',
help=(
"Stop execution as soon an example gives an error. "
"If not given, runs all examples regardless of errors."
),
)
def load_args():
"""Load argparse arguments."""
return ap.parse_args()
def METHOD_NAME(examples, break_on_errors=True):
"""Run all the examples."""
for folder, file_ in examples:
print() # noqa: T001
print(f" {file_.upper()} ".center(80, "*")) # noqa: T001
print() # noqa: T001
with working_directory(folder):
params = read_config(file_)
rmtree(params["run_dir"], ignore_errors=True)
subprocess.run(
f"haddock3 {file_}",
shell=True,
check=break_on_errors,
stdout=sys.stdout,
stderr=sys.stderr,
)
return
if __name__ == "__main__":
cmd = load_args()
METHOD_NAME(examples, **vars(cmd)) |
test clear viewport | import pytest
import arcade
def create(ctx, width, height, components=4, layers=1, dtype='f1'):
layers = [ctx.texture((width, height), components=components, dtype=dtype) for _ in range(layers)]
return ctx.framebuffer(color_attachments=layers)
def test_properties(ctx):
"""Test framebuffers"""
fb = create(ctx, 10, 20, components=4)
assert fb.ctx == ctx
assert fb.width == 10
assert fb.height == 20
assert fb.size == (10, 20)
assert fb.samples == 0
assert fb.viewport == (0, 0, 10, 20)
assert fb.depth_attachment is None
assert fb.depth_mask is True
assert repr(fb).startswith('<Framebuffer')
def test_viewport(ctx):
"""Test viewport"""
fb = create(ctx, 10, 20, components=4)
fb.use()
fb.viewport = (1, 2, 3, 4)
assert fb.viewport == (1, 2, 3, 4)
with pytest.raises(ValueError):
fb.viewport = 0
with pytest.raises(ValueError):
fb.viewport = 0, 0, 0
with pytest.raises(ValueError):
fb.viewport = 0, 0, 0, 0, 0
def test_binding(ctx):
"""Ensure bind tracking works"""
ctx.window.use()
fb = create(ctx, 10, 20, components=4)
fb.use()
fb.use()
assert ctx.active_framebuffer == fb
ctx.window.use()
ctx.window.use() # Twice to trigger bind check
assert ctx.active_framebuffer == ctx.screen
def test_clear(ctx):
"""Clear framebuffer with different methods and ensure binding do not change"""
ctx.window.use()
fb = create(ctx, 10, 20, components=4)
fb.clear()
fb.clear(color=(0, 0, 0, 0), normalized=True)
fb.clear(color=(0, 0, 0), normalized=True)
fb.clear(color=arcade.csscolor.AZURE)
fb.clear(color=(0, 0, 0))
fb.clear(color=(0, 0, 0, 0))
assert ctx.active_framebuffer == ctx.screen
def METHOD_NAME(ctx):
fb = create(ctx, 4, 4, components=1)
fb.clear(color=(64, 64, 64, 64))
assert fb.read(components=1) == b'\x40' * 16
# Clear only the center pixels and verify that the rest is unchanged
fb.clear()
fb.clear(color=(255, 255, 255, 255), viewport=(1, 1, 2, 2))
expected = (
b'\x00\x00\x00\x00'
b'\x00\xff\xff\x00'
b'\x00\xff\xff\x00'
b'\x00\x00\x00\x00'
)
assert bytes(fb.read(components=1)) == expected
def test_clear_with_scissor(ctx):
fb = create(ctx, 4, 4, components=1)
fb.clear()
fb.scissor = 1, 1, 2, 2
fb.clear(color=(255, 255, 255, 255))
assert bytes(fb.read(components=1)) == b'\xff' * 16
def test_multi_attachment(ctx):
"""Create framebuffers with multiple layers"""
for i in range(ctx.info.MAX_COLOR_ATTACHMENTS):
fb = create(ctx, 10, 10, components=4, layers=i + 1)
assert len(fb.color_attachments) == i + 1
assert fb.glo.value > 0
def test_depth_mask(ctx):
fb = create(ctx, 10, 10)
fb.use()
assert fb.depth_mask is True
fb.depth_mask = False
assert fb.depth_mask is False
def test_incomplete(ctx):
"""Create empty framebuffer. This might be possible in the future?"""
with pytest.raises(ValueError):
ctx.framebuffer()
def test_varying_attachment_size(ctx):
"""Varying attachment sizes not supported for now"""
fb = create(ctx, 10, 20, components=4)
with pytest.raises(ValueError):
ctx.framebuffer(
color_attachments=[
ctx.texture((10, 10), components=4),
ctx.texture((10, 11), components=4)])
def test_read(ctx):
fb = create(ctx, 2, 2, components=4)
fb.clear(color=(255, 255, 0, 255))
data = fb.read(components=4)
assert len(data) == 16
assert isinstance(fb.read(), bytes)
# Read 3 components
data = fb.read(components=3)
assert len(data) == 12
assert data == b'\xff\xff\x00' * 4
# Read from f2 texture
fb = create(ctx, 2, 2, components=1, layers=1, dtype="f2")
data = fb.read(components=1, dtype="f2")
assert len(data) == 2 * 2 * 2
# Read from f4 texture
fb = create(ctx, 2, 2, components=1, layers=1, dtype="f4")
data = fb.read(components=1, dtype="f4")
assert len(data) == 2 * 2 * 4
# Read from i2 texture
fb = create(ctx, 2, 2, components=1, layers=1, dtype="i2")
data = fb.read(components=1, dtype="i2")
assert len(data) == 2 * 2 * 2
def test_resize(ctx):
tex = ctx.texture((100, 100), components=4)
fbo = ctx.framebuffer(color_attachments=[tex])
assert fbo.size == tex.size
tex.resize((200, 200))
assert tex.size == (200, 200)
fbo.resize()
assert fbo.size == tex.size
assert fbo.viewport == (0, 0, *fbo.size) |
pause | """Support for audio input and output over digital buses
The `audiobusio` module contains classes to provide access to audio IO
over digital buses. These protocols are used to communicate audio to other
chips in the same circuit. It doesn't include audio interconnect protocols
such as S/PDIF.
All classes change hardware state and should be deinitialized when they
are no longer needed. To do so, either call :py:meth:`!deinit` or use a
context manager."""
from __future__ import annotations
import circuitpython_typing
import microcontroller
from circuitpython_typing import WriteableBuffer
class I2SOut:
"""Output an I2S audio signal"""
def __init__(
self,
bit_clock: microcontroller.Pin,
word_select: microcontroller.Pin,
data: microcontroller.Pin,
*,
left_justified: bool,
) -> None:
"""Create a I2SOut object associated with the given pins.
:param ~microcontroller.Pin bit_clock: The bit clock (or serial clock) pin
:param ~microcontroller.Pin word_select: The word select (or left/right clock) pin
:param ~microcontroller.Pin data: The data pin
:param bool left_justified: True when data bits are aligned with the word select clock. False
when they are shifted by one to match classic I2S protocol.
Simple 8ksps 440 Hz sine wave on `Metro M0 Express <https://www.adafruit.com/product/3505>`_
using `UDA1334 Breakout <https://www.adafruit.com/product/3678>`_::
import audiobusio
import audiocore
import board
import array
import time
import math
# Generate one period of sine wave.
length = 8000 // 440
sine_wave = array.array("H", [0] * length)
for i in range(length):
sine_wave[i] = int(math.sin(math.pi * 2 * i / length) * (2 ** 15) + 2 ** 15)
sine_wave = audiocore.RawSample(sine_wave, sample_rate=8000)
i2s = audiobusio.I2SOut(board.D1, board.D0, board.D9)
i2s.play(sine_wave, loop=True)
time.sleep(1)
i2s.stop()
Playing a wave file from flash::
import board
import audiocore
import audiobusio
import digitalio
f = open("cplay-5.1-16bit-16khz.wav", "rb")
wav = audiocore.WaveFile(f)
a = audiobusio.I2SOut(board.D1, board.D0, board.D9)
print("playing")
a.play(wav)
while a.playing:
pass
print("stopped")"""
...
def deinit(self) -> None:
"""Deinitialises the I2SOut and releases any hardware resources for reuse."""
...
def __enter__(self) -> I2SOut:
"""No-op used by Context Managers."""
...
def __exit__(self) -> None:
"""Automatically deinitializes the hardware when exiting a context. See
:ref:`lifetime-and-contextmanagers` for more info."""
...
def play(
self, sample: circuitpython_typing.AudioSample, *, loop: bool = False
) -> None:
"""Plays the sample once when loop=False and continuously when loop=True.
Does not block. Use `playing` to block.
Sample must be an `audiocore.WaveFile`, `audiocore.RawSample`, `audiomixer.Mixer` or `audiomp3.MP3Decoder`.
The sample itself should consist of 8 bit or 16 bit samples."""
...
def stop(self) -> None:
"""Stops playback."""
...
playing: bool
"""True when the audio sample is being output. (read-only)"""
def METHOD_NAME(self) -> None:
"""Stops playback temporarily while remembering the position. Use `resume` to resume playback."""
...
def resume(self) -> None:
"""Resumes sample playback after :py:func:`pause`."""
...
paused: bool
"""True when playback is paused. (read-only)"""
class PDMIn:
"""Record an input PDM audio stream"""
def __init__(
self,
clock_pin: microcontroller.Pin,
data_pin: microcontroller.Pin,
*,
sample_rate: int = 16000,
bit_depth: int = 8,
mono: bool = True,
oversample: int = 64,
startup_delay: float = 0.11,
) -> None:
"""Create a PDMIn object associated with the given pins. This allows you to
record audio signals from the given pins. Individual ports may put further
restrictions on the recording parameters. The overall sample rate is
determined by `sample_rate` x ``oversample``, and the total must be 1MHz or
higher, so `sample_rate` must be a minimum of 16000.
:param ~microcontroller.Pin clock_pin: The pin to output the clock to
:param ~microcontroller.Pin data_pin: The pin to read the data from
:param int sample_rate: Target sample_rate of the resulting samples. Check `sample_rate` for actual value.
Minimum sample_rate is about 16000 Hz.
:param int bit_depth: Final number of bits per sample. Must be divisible by 8
:param bool mono: True when capturing a single channel of audio, captures two channels otherwise
:param int oversample: Number of single bit samples to decimate into a final sample. Must be divisible by 8
:param float startup_delay: seconds to wait after starting microphone clock
to allow microphone to turn on. Most require only 0.01s; some require 0.1s. Longer is safer.
Must be in range 0.0-1.0 seconds.
**Limitations:** On SAMD and RP2040, supports only 8 or 16 bit mono input, with 64x oversampling.
On nRF52840, supports only 16 bit mono input at 16 kHz; oversampling is fixed at 64x. Not provided
on nRF52833 for space reasons. Not available on Espressif.
For example, to record 8-bit unsigned samples to a buffer::
import audiobusio
import board
# Prep a buffer to record into
b = bytearray(200)
with audiobusio.PDMIn(board.MICROPHONE_CLOCK, board.MICROPHONE_DATA, sample_rate=16000) as mic:
mic.record(b, len(b))
To record 16-bit unsigned samples to a buffer::
import audiobusio
import board
# Prep a buffer to record into.
b = array.array("H", [0] * 200)
with audiobusio.PDMIn(board.MICROPHONE_CLOCK, board.MICROPHONE_DATA, sample_rate=16000, bit_depth=16) as mic:
mic.record(b, len(b))
"""
...
def deinit(self) -> None:
"""Deinitialises the PDMIn and releases any hardware resources for reuse."""
...
def __enter__(self) -> PDMIn:
"""No-op used by Context Managers."""
...
def __exit__(self) -> None:
"""Automatically deinitializes the hardware when exiting a context."""
...
def record(self, destination: WriteableBuffer, destination_length: int) -> None:
"""Records destination_length bytes of samples to destination. This is
blocking.
An IOError may be raised when the destination is too slow to record the
audio at the given rate. For internal flash, writing all 1s to the file
before recording is recommended to speed up writes.
:return: The number of samples recorded. If this is less than ``destination_length``,
some samples were missed due to processing time."""
...
sample_rate: int
"""The actual sample_rate of the recording. This may not match the constructed
sample rate due to internal clock limitations.""" |
send message sync model to client | import logging
import numpy as np
from .message_define import MyMessage
from .utils import transform_tensor_to_list
from ....core.distributed.fedml_comm_manager import FedMLCommManager
from ....core.distributed.communication.message import Message
class FedNovaServerManager(FedMLCommManager):
def __init__(
self,
args,
aggregator,
comm=None,
rank=0,
size=0,
backend="MPI",
is_preprocessed=False,
preprocessed_client_lists=None,
):
super().__init__(args, comm, rank, size, backend)
self.args = args
self.aggregator = aggregator
self.round_num = args.comm_round
self.round_idx = 0
self.is_preprocessed = is_preprocessed
self.preprocessed_client_lists = preprocessed_client_lists
def run(self):
super().run()
def send_init_msg(self):
# sampling clients
client_indexes = self.aggregator.client_sampling(
self.round_idx,
self.args.client_num_in_total,
self.args.client_num_per_round,
)
client_schedule = self.aggregator.generate_client_schedule(self.round_idx, client_indexes)
average_weight_dict = self.aggregator.get_average_weight(client_indexes)
global_model_params = self.aggregator.get_global_model_params()
for process_id in range(1, self.size):
self.send_message_init_config(
process_id, global_model_params,
average_weight_dict, client_schedule
)
def register_message_receive_handlers(self):
self.register_message_receive_handler(
MyMessage.MSG_TYPE_C2S_SEND_MODEL_TO_SERVER,
self.handle_message_receive_model_from_client,
)
def handle_message_receive_model_from_client(self, msg_params):
sender_id = msg_params.get(MyMessage.MSG_ARG_KEY_SENDER)
model_params = msg_params.get(MyMessage.MSG_ARG_KEY_MODEL_PARAMS)
# local_sample_number = msg_params.get(MyMessage.MSG_ARG_KEY_NUM_SAMPLES)
client_runtime_info = msg_params.get(MyMessage.MSG_ARG_KEY_CLIENT_RUNTIME_INFO)
self.aggregator.record_client_runtime(sender_id - 1, client_runtime_info)
# self.aggregator.add_local_trained_result(
# sender_id - 1, model_params, local_sample_number
# )
self.aggregator.add_local_trained_result(
sender_id - 1, model_params
)
b_all_received = self.aggregator.check_whether_all_receive()
logging.info("b_all_received = " + str(b_all_received))
if b_all_received:
global_model_params = self.aggregator.aggregate()
self.aggregator.test_on_server_for_all_clients(self.round_idx)
# start the next round
self.round_idx += 1
if self.round_idx == self.round_num:
# post_complete_message_to_sweep_process(self.args)
self.finish()
print("here")
return
if self.is_preprocessed:
if self.preprocessed_client_lists is None:
# sampling has already been done in data preprocessor
client_indexes = [self.round_idx] * self.args.client_num_per_round
else:
client_indexes = self.preprocessed_client_lists[self.round_idx]
else:
# sampling clients
client_indexes = self.aggregator.client_sampling(
self.round_idx,
self.args.client_num_in_total,
self.args.client_num_per_round,
)
client_schedule = self.aggregator.generate_client_schedule(self.round_idx, client_indexes)
average_weight_dict = self.aggregator.get_average_weight(client_indexes)
global_model_params = self.aggregator.get_global_model_params()
print("indexes of clients: " + str(client_indexes))
print("size = %d" % self.size)
for receiver_id in range(1, self.size):
self.METHOD_NAME(
receiver_id, global_model_params,
average_weight_dict, client_schedule
)
def send_message_init_config(self, receive_id, global_model_params,
average_weight_dict, client_schedule):
message = Message(
MyMessage.MSG_TYPE_S2C_INIT_CONFIG, self.get_sender_id(), receive_id
)
message.add_params(MyMessage.MSG_ARG_KEY_MODEL_PARAMS, global_model_params)
# message.add_params(MyMessage.MSG_ARG_KEY_CLIENT_INDEX, str(client_index))
message.add_params(MyMessage.MSG_ARG_KEY_AVG_WEIGHTS, average_weight_dict)
message.add_params(MyMessage.MSG_ARG_KEY_CLIENT_SCHEDULE, client_schedule)
self.send_message(message)
def METHOD_NAME(self, receive_id, global_model_params,
average_weight_dict, client_schedule):
logging.info("send_message_sync_model_to_client. receive_id = %d" % receive_id)
message = Message(
MyMessage.MSG_TYPE_S2C_SYNC_MODEL_TO_CLIENT,
self.get_sender_id(),
receive_id,
)
message.add_params(MyMessage.MSG_ARG_KEY_MODEL_PARAMS, global_model_params)
# message.add_params(MyMessage.MSG_ARG_KEY_CLIENT_INDEX, str(client_index))
message.add_params(MyMessage.MSG_ARG_KEY_AVG_WEIGHTS, average_weight_dict)
message.add_params(MyMessage.MSG_ARG_KEY_CLIENT_SCHEDULE, client_schedule)
self.send_message(message) |
test correct num calls | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# -*- coding: utf-8 -*-
"""
Test the synthetic data.
"""
import pytest
import time
@pytest.fixture(scope="module", autouse=True)
def port(env):
"""${SYNTHETIC_DATA_DB_PORT}
Returns
-------
str
The ${SYNTHETIC_DATA_DB_PORT}.
"""
return env["SYNTHETIC_DATA_DB_PORT"]
def test_correct_dates(cursor):
"""Check that synthetic data container contains the correct dates of data."""
query = "SELECT DISTINCT(datetime::date) FROM events.calls"
cursor.execute(query)
results = set([str(x["datetime"]) for x in cursor.fetchall()])
expected = ["2016-01-01", "2016-01-02", "2016-01-03"]
assert results == set(expected)
def METHOD_NAME(cursor):
"""Checking that synthetic data container contains the correct number of calls for each day."""
query = """
SELECT datetime::date, COUNT(*) as count
FROM events.calls
GROUP BY datetime::date
"""
cursor.execute(query)
results = set([i["count"] for i in cursor.fetchall()])
assert results == set([4000])
def test_correct_cell_indexes(cursor):
"""Cells table should have five indexes - id, site_id, primary key and spatial ones on geom_point and geom_polygon."""
expected_indexes = [
{
"index_definition": "CREATE INDEX cells_id_idx ON infrastructure.cells USING btree (id)"
},
{
"index_definition": "CREATE INDEX cells_site_id_idx ON infrastructure.cells USING btree (site_id)"
},
{
"index_definition": "CREATE INDEX infrastructure_cells_geom_point_index ON infrastructure.cells USING gist (geom_point)"
},
{
"index_definition": "CREATE INDEX infrastructure_cells_geom_polygon_index ON infrastructure.cells USING gist (geom_polygon)"
},
{
"index_definition": "CREATE UNIQUE INDEX cells_id_version_key ON infrastructure.cells USING btree (id, version)"
},
{
"index_definition": "CREATE UNIQUE INDEX cells_pkey ON infrastructure.cells USING btree (cell_id)"
},
]
query = "select pg_get_indexdef(indexrelid) as index_definition from pg_index where indrelid = 'infrastructure.cells'::regclass ORDER BY index_definition;"
cursor.execute(query)
results = cursor.fetchall()
assert expected_indexes == results
def test_correct_site_indexes(cursor):
"""Sites table should have four indexes - id, primary key
and spatial ones on geom_point and geom_polygon.
"""
expected_indexes = [
{
"index_definition": "CREATE INDEX infrastructure_sites_geom_point_index ON infrastructure.sites USING gist (geom_point)"
},
{
"index_definition": "CREATE INDEX infrastructure_sites_geom_polygon_index ON infrastructure.sites USING gist (geom_polygon)"
},
{
"index_definition": "CREATE INDEX sites_id_idx ON infrastructure.sites USING btree (id)"
},
{
"index_definition": "CREATE UNIQUE INDEX sites_id_version_key ON infrastructure.sites USING btree (id, version)"
},
{
"index_definition": "CREATE UNIQUE INDEX sites_pkey ON infrastructure.sites USING btree (site_id)"
},
]
query = "select pg_get_indexdef(indexrelid) index_definition from pg_index where indrelid = 'infrastructure.sites'::regclass ORDER BY index_definition;"
cursor.execute(query)
results = cursor.fetchall()
assert expected_indexes == results
def test_correct_cells(cursor):
"""Checking that synthetic data container contains the correct number of cells."""
query = """SELECT COUNT(*) FROM infrastructure.cells"""
cursor.execute(query)
results = cursor.fetchall()[0]["count"]
assert results == 100
def test_cells_within_geoms(cursor):
"""Check synth cells are in correct location for nepal"""
query = """SELECT st_x(geom_point) as lon, st_y(geom_point) as lat FROM infrastructure.cells"""
cursor.execute(query)
res = cursor.fetchall()[0]
lon = res["lon"]
lat = res["lat"]
assert 80 <= lon <= 89
assert 26 <= lat <= 31
def test_calls_registered_in_available_tables(cursor):
"""Make sure calls tables registered correctly"""
query = """
select
*
from
available_tables
where
table_name = 'calls'
"""
cursor.execute(query)
res = cursor.fetchall()[0]
assert res["has_locations"]
assert res["has_subscribers"]
assert res["has_counterparts"] |
test capture cmd no capture fail | """
Tests for repo2docker/utils.py
"""
import os
import platform
import subprocess
import tempfile
import pytest
import traitlets
from repo2docker import utils
def test_capture_cmd_no_capture_success():
# This should succeed
for line in utils.execute_cmd(["/bin/bash", "-c", "echo test"]):
pass
def METHOD_NAME():
with pytest.raises(subprocess.CalledProcessError):
for line in utils.execute_cmd(["/bin/bash", "-c", "e "]):
pass
def test_capture_cmd_capture_success():
# This should succeed
for line in utils.execute_cmd(["/bin/bash", "-c", "echo test"], capture=True):
assert line == "test\n"
def test_capture_cmd_noeol_capture_success():
# This should succeed
lines = list(
utils.execute_cmd(["/bin/bash", "-c", "echo -en 'test\ntest'"], capture=True)
)
assert lines == ["test\n", "test"]
def test_capture_cmd_capture_fail():
with pytest.raises(subprocess.CalledProcessError):
for line in utils.execute_cmd(
["/bin/bash", "-c", "echo test; exit 1 "], capture=True
):
assert line == "test\n"
def test_chdir(tmpdir):
d = str(tmpdir.mkdir("cwd"))
cur_cwd = os.getcwd()
with utils.chdir(d):
assert os.getcwd() == d
assert os.getcwd() == cur_cwd
def test_byte_spec_validation():
bs = utils.ByteSpecification()
assert bs.validate(None, 1) == 1
assert bs.validate(None, 1.0) == 1.0
assert bs.validate(None, "1K") == 1024
assert bs.validate(None, "1M") == 1024 * 1024
assert bs.validate(None, "1G") == 1024 * 1024 * 1024
assert bs.validate(None, "1T") == 1024 * 1024 * 1024 * 1024
with pytest.raises(traitlets.TraitError):
bs.validate(None, "NK")
with pytest.raises(traitlets.TraitError):
bs.validate(None, "1m")
@pytest.mark.parametrize(
"input,expected",
[
(["8888:8888"], {"8888/tcp": "8888"}),
(["8888:4321"], {"4321/tcp": "8888"}),
(["8888:4321/udp"], {"4321/udp": "8888"}),
(["8888:4321/udp", "8888:4321/tcp"], {"4321/udp": "8888", "4321/tcp": "8888"}),
(["127.0.0.1:80:8000"], {"8000/tcp": ("127.0.0.1", "80")}),
(["8888:4321", "1234:12345"], {"4321/tcp": "8888", "12345/tcp": "1234"}),
],
)
def test_valid_port_mapping(input, expected):
actual = utils.validate_and_generate_port_mapping(input)
assert actual == expected
@pytest.mark.parametrize("port_spec", ["a8888:8888", "888:888/abc"])
def test_invalid_port_mapping(port_spec):
with pytest.raises(ValueError) as e:
utils.validate_and_generate_port_mapping([port_spec])
assert f'Port specification "{port_spec}"' in str(e.value)
def test_deep_get():
data = {"data": {"files": [1, 2, 3]}}
assert utils.deep_get(data, "data.files.0") == 1
assert utils.deep_get(data, "data.files.1") == 2
assert utils.deep_get(data, "data.files") == [1, 2, 3]
assert utils.deep_get(data, "data") == {"files": [1, 2, 3]}
def test_is_doi():
assert utils.is_doi("10.1234/jshd123") != None
assert utils.is_doi("10.1234/JSHD.8192") != None
assert utils.is_doi("doi.org/10.1234/jshd123") != None
assert utils.is_doi("http://doi.org/10.1234/jshd123") != None
assert utils.is_doi("https://doi.org/10.1234/jshd123") != None
assert utils.is_doi("http://dx.doi.org/10.1234/jshd123") != None
assert utils.is_doi("101234/jshd123") == None
assert utils.is_doi("https://mybinder.org") == None
def test_normalize_doi():
assert utils.normalize_doi("10.1234/jshd123") == "10.1234/jshd123"
assert utils.normalize_doi("10.1234/JSHD.8192") == "10.1234/JSHD.8192"
assert utils.normalize_doi("doi.org/10.1234/jshd123") == "10.1234/jshd123"
assert utils.normalize_doi("http://doi.org/10.1234/jshd123") == "10.1234/jshd123"
assert utils.normalize_doi("https://doi.org/10.1234/jshd123") == "10.1234/jshd123"
assert utils.normalize_doi("http://dx.doi.org/10.1234/jshd123") == "10.1234/jshd123"
def test_open_guess_encoding():
data = "Rică nu știa să zică râu, rățușcă, rămurică."
with tempfile.NamedTemporaryFile(mode="wb") as test_file:
test_file.write(str.encode(data, "utf-16"))
test_file.seek(0)
with utils.open_guess_encoding(test_file.name) as fd:
assert fd.read() == data
@pytest.mark.parametrize(
"req, is_local",
[
("-r requirements.txt", True),
("-e .", True),
("--editable=.", True),
(
"--editable=git+https://github.com/popgensims/stdpopsim.git#egg=stdpopsim-master",
False,
),
("file://subdir", True),
("file://./subdir", True),
("git://github.com/jupyterhub/repo2docker", False),
("git+https://github.com/jupyterhub/repo2docker", False),
("numpy", False),
("# -e .", False),
("--pre", False),
# pip ignores the package name and treats this like `--pre` on a line
# by itself
("--pre pandas", False),
# These are invalid lines as far as pip is concerned, check that our
# code is robust and continues running
("--unrecognized", False),
("-e", False),
],
)
def test_local_pip_requirement(req, is_local):
assert utils.is_local_pip_requirement(req) == is_local
@pytest.mark.parametrize(
"machine_name,expected",
[
("x86_64", "linux/amd64"),
("aarch64", "linux/arm64"),
("arm64", "linux/arm64"),
("other", "linux/amd64"),
],
)
def test_get_platform(monkeypatch, machine_name, expected):
monkeypatch.setattr(platform, "machine", lambda: machine_name)
assert utils.get_platform() == expected |
disable rule | #
# This file is part of Dragonfly.
# (c) Copyright 2018-2022 by Dane Finlay
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
GrammarWrapper class for the CMU Pocket Sphinx engine
============================================================================
"""
import logging
from dragonfly.engines.base import GrammarWrapperBase
#---------------------------------------------------------------------------
class GrammarWrapper(GrammarWrapperBase):
# Enable guessing at the type of a given result word.
_dictated_word_guesses_enabled = False
def __init__(self, grammar, engine, search_name):
"""
:type grammar: Grammar
:type engine: SphinxEngine
"""
GrammarWrapperBase.__init__(self, grammar, engine)
self.set_search = True
self._search_name = search_name
self.exclusive = False
# Compile the grammar into a JSGF grammar and set the language.
self._jsgf_grammar = engine.compiler.compile_grammar(grammar)
self._jsgf_grammar.language_name = engine.language
def _get_reference_name(self, name):
return self.engine.compiler.get_reference_name(name)
def enable_rule(self, name):
ref_name = self._get_reference_name(name)
jsgf_rule = self._jsgf_grammar.get_rule_from_name(ref_name)
# Only enable the rule and set the flag if the rule is disabled.
if not jsgf_rule.active:
jsgf_rule.enable()
self.set_search = True
def METHOD_NAME(self, name):
ref_name = self._get_reference_name(name)
jsgf_rule = self._jsgf_grammar.get_rule_from_name(ref_name)
# Only disable the rule and set the flag if the rule is enabled.
if jsgf_rule.active:
jsgf_rule.disable()
self.set_search = True
def update_list(self, lst):
# Recompile the list again.
grammar = self._jsgf_grammar
name = self._get_reference_name(lst.name)
old_rule = grammar.get_rule_from_name(name)
new_rule, unknown_words = self.engine.compiler.recompile_list(
lst, grammar
)
# Only replace the old rule if the list has changed.
if old_rule != new_rule:
grammar.remove_rule(old_rule, ignore_dependent=True)
grammar.add_rule(new_rule)
self.set_search = True
# Log a warning about unknown words if necessary.
if unknown_words:
logger = logging.getLogger("engine.compiler")
logger.warning("List '%s' used words not found in the "
"pronunciation dictionary: %s", name,
", ".join(sorted(unknown_words)))
def compile_jsgf(self):
return self._jsgf_grammar.compile_as_root_grammar()
@property
def search_name(self):
"""
The name of the Pocket Sphinx search that the engine should use to
process speech for the grammar.
:return: str
"""
return self._search_name
def set_dictated_word_guesses(self, value):
self._dictated_word_guesses_enabled = value
def _process_final_rule(self, state, words, results, dispatch_other,
rule, *args):
# Recognition successful! Set the results data.
results.type = args[0]
results.rule = rule
results.grammar = self.grammar
# Call the base class method.
GrammarWrapperBase._process_final_rule(self, state, words, results,
dispatch_other, rule, *args) |
test action and comma | # SPDX-License-Identifier: GPL-3.0
# Copyright (c) 2014-2023 William Edwards <[email protected]>, Benjamin Bean <[email protected]>
"""
yes, these results are wacky, but they are here for regression testing
"""
import unittest
from tuxemon.script.parser import (
parse_action_string,
parse_condition_string,
split_escaped,
)
class TestSplitEscaped(unittest.TestCase):
def test_one_word(self):
result = split_escaped("spam")
self.assertEqual(["spam"], result)
def test_trailing_space(self):
result = split_escaped("spam ")
self.assertEqual(["spam"], result)
def test_leading_space(self):
result = split_escaped(" spam")
self.assertEqual(["spam"], result)
def test_enclosed_space(self):
result = split_escaped(" spam ")
self.assertEqual(["spam"], result)
def test_space_around_arg(self):
result = split_escaped("spam , eggs ")
self.assertEqual(["spam", "eggs"], result)
def test_trailing_comma(self):
result = split_escaped("spam , eggs,")
self.assertEqual(["spam", "eggs", ""], result)
def test_double_comma(self):
result = split_escaped("spam , eggs ,, ")
self.assertEqual(["spam", "eggs", "", ""], result)
def test_empty(self):
result = split_escaped("")
self.assertEqual([""], result)
def test_only_comma(self):
result = split_escaped(",")
self.assertEqual(["", ""], result)
class TestParseActionString(unittest.TestCase):
def test_action_no_arg(self):
result = parse_action_string("spam")
self.assertEqual(("spam", []), result)
def test_action_and_arg(self):
result = parse_action_string("spam eggs")
self.assertEqual(("spam", ["eggs"]), result)
def test_action_and_args(self):
result = parse_action_string("spam eggs,parrot")
self.assertEqual(("spam", ["eggs", "parrot"]), result)
def METHOD_NAME(self):
result = parse_action_string("spam , ")
self.assertEqual(("spam", ["", ""]), result)
def test_action_arg_and_trailing_comma(self):
result = parse_action_string("spam eggs, ")
self.assertEqual(("spam", ["eggs", ""]), result)
def test_no_space_between_comma(self):
result = parse_action_string("spam,eggs")
self.assertEqual(("spam,eggs", []), result)
def test_enclosed_space(self):
result = parse_action_string(" spam ")
self.assertEqual(("", ["spam"]), result)
def test_double_comma(self):
result = parse_action_string("spam ,,")
self.assertEqual(("spam", ["", "", ""]), result)
def test_space_in_arg1(self):
result = parse_action_string("spam ex parrot")
self.assertEqual(("spam", ["ex parrot"]), result)
def test_space_in_arg2(self):
result = parse_action_string("spam eggs, ex parrot")
self.assertEqual(("spam", ["eggs", "ex parrot"]), result)
class TestParseConditionString(unittest.TestCase):
def test_no_type(self):
with self.assertRaises(ValueError):
parse_condition_string("spam")
def test_no_args(self):
result = parse_condition_string("spam eggs")
self.assertEqual(("spam", "eggs", []), result)
def test_enclosed_space(self):
result = parse_condition_string(" spam eggs ")
self.assertEqual(("", "spam", ["eggs"]), result)
def test_trailing_comma(self):
result = parse_condition_string("spam eggs, ")
self.assertEqual(("spam", "eggs,", [""]), result)
def test_with_args(self):
result = parse_condition_string("spam eggs, parrot")
self.assertEqual(("spam", "eggs,", ["parrot"]), result)
def test_with_args_trailing_comma(self):
result = parse_condition_string(" spam eggs parrot, cheese, ")
self.assertEqual(("", "spam", ["eggs parrot", "cheese", ""]), result)
def test_space_in_arg(self):
result = parse_condition_string("spam eggs ex parrot, cheese shop")
self.assertEqual(
("spam", "eggs", ["ex parrot", "cheese shop"]),
result,
) |
test fake quantize dense | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test function extraction"""
import tvm
from tvm import relay
def test_fake_quantize_conv():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
w = relay.var("w", shape=[16, 3, 5, 5], dtype="int8")
zero = relay.const(0)
op = relay.op.nn.conv2d(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
kernel_size=[5, 5],
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.conv2d": 1}
def METHOD_NAME():
x = relay.var("x", shape=[128, 64], dtype="int8")
w = relay.var("w", shape=[256, 64], dtype="int8")
zero = relay.const(0)
op = relay.op.nn.dense(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.dense": 1}
def test_fake_quantize_multiple_regions():
x = relay.var("x", shape=[128, 64], dtype="int8")
w = relay.var("w", shape=[256, 64], dtype="int8")
zero = relay.const(0)
op = relay.op.nn.dense(
relay.qnn.op.dequantize(x, relay.const(2.0), zero),
relay.qnn.op.dequantize(w, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
op = relay.qnn.op.dequantize(op, relay.const(2.0), relay.const(114))
op = relay.op.nn.relu(op)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
w2 = relay.var("w2", shape=[64, 256], dtype="int8")
op = relay.op.nn.dense(
relay.qnn.op.dequantize(op, relay.const(1.0), zero),
relay.qnn.op.dequantize(w2, relay.const(0.5), zero),
)
op = relay.qnn.op.quantize(op, relay.const(1.0), zero, out_dtype="int8")
# We expect to ignore this sigmoid op since it's just outside a fake
# quantized region
op = relay.op.sigmoid(op)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.dense": 2, "nn.relu": 1}
def test_fake_quantize_maxpool():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.nn.max_pool2d(x, [3, 3])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"nn.max_pool2d": 1}
def test_fake_quantize_transpose_reshape():
x = relay.var("x", shape=[1, 3, 224, 224], dtype="int8")
zero = relay.const(0)
x = relay.qnn.op.dequantize(x, relay.const(2.0), zero)
op = relay.op.transpose(x, [1, 0, 2, 3])
op = relay.op.reshape(op, [3, -1])
op = relay.qnn.op.quantize(op, relay.const(2.0), zero)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"transpose": 1, "reshape": 1}
def test_fake_quantize_concat():
zero = relay.const(0)
inputs = []
for i in range(4):
inputs.append(
relay.qnn.op.dequantize(
relay.var("x%d" % i, shape=[1, 4], dtype="int8"), relay.const(i + 0.5), zero
)
)
concat = relay.op.concatenate(inputs, axis=1)
op = relay.qnn.op.quantize(concat, relay.const(3.5), zero)
mod = tvm.IRModule.from_expr(op)
fake_quantized_op_freqs = relay.analysis.list_fake_quantized_op_freqs(mod)
assert dict(fake_quantized_op_freqs) == {"concatenate": 1} |
bsz load v1 lcsr | import numpy as np
import json
import pkgutil
from flavio.classes import Parameter
from flavio.statistics.probability import MultivariateNormalDistribution
FFs = ["A0", "A1", "A12", "V", "T1", "T2", "T23"]
ai = ["a0", "a1", "a2"]
ff_a = [(ff, a) for ff in FFs for a in ai]
a_ff_string = [a + '_' + ff for ff in FFs for a in ai]
tex_a = {'a0': 'a_0', 'a1': 'a_1', 'a2': 'a_2', }
tex_ff = {'A0': 'A_0', 'A1': 'A_1', 'A12': r'A_{12}', 'V': 'V', 'T1': 'T_1', 'T2': 'T_2', 'T23': r'T_{23}', }
def get_ffpar(filename):
f = pkgutil.get_data('flavio.physics', filename)
data = json.loads(f.decode('utf-8'))
central = np.array([data['central'][ff].get(a, np.nan) for ff, a in ff_a])
unc = np.array([data['uncertainty'][ff].get(a, np.nan) for ff, a in ff_a])
corr = np.array([[data['correlation'][ff1 + ff2].get(a1 + a2, np.nan) for ff1, a1 in ff_a] for ff2, a2 in ff_a])
# delete the parameters a0_A12 and a0_T1, which are instead fixed
# using the exact kinematical relations, cf. eq. (16) of arXiv:1503.05534
pos_a0_A12 = ff_a.index(('A12', 'a0'))
pos_a0_T2 = ff_a.index(('T2', 'a0'))
central = np.delete(central, [pos_a0_A12, pos_a0_T2])
unc = np.delete(unc, [pos_a0_A12, pos_a0_T2])
corr = np.delete(corr, [pos_a0_A12, pos_a0_T2], axis=0)
corr = np.delete(corr, [pos_a0_A12, pos_a0_T2], axis=1)
return [central, unc, corr]
def load_parameters(filename, process, constraints):
implementation_name = process + ' BSZ'
parameter_names = [implementation_name + ' ' + coeff_name for coeff_name in a_ff_string]
# a0_A0 and a0_T2 are not treated as independent parameters!
parameter_names.remove(implementation_name + ' a0_A12')
parameter_names.remove(implementation_name + ' a0_T2')
for parameter_name in parameter_names:
try: # check if parameter object already exists
p = Parameter[parameter_name]
except KeyError: # if not, create a new one
p = Parameter(parameter_name)
# get LaTeX representation of coefficient and form factor names
_tex_a = tex_a[parameter_name.split(' ')[-1].split('_')[0]]
_tex_ff = tex_ff[parameter_name.split(' ')[-1].split('_')[-1]]
p.tex = r'$' + _tex_a + r'^{' + _tex_ff + r'}$'
p.description = r'BSZ form factor parametrization coefficient $' + _tex_a + r'$ of $' + _tex_ff + r'$'
else: # if parameter exists, remove existing constraints
constraints.remove_constraint(parameter_name)
[central, unc, corr] = get_ffpar(filename)
constraints.add_constraint(parameter_names,
MultivariateNormalDistribution(central_value=central, covariance=np.outer(unc, unc)*corr))
# Resonance masses used in arXiv:1503.05534
resonance_masses_bsz = {
'B->K*': {
'm0': 5.367,
'm1-': 5.415,
'm1+': 5.830,
},
'B->rho': {
'm0': 5.279,
'm1-': 5.324,
'm1+': 5.716,
},
'B->omega': {
'm0': 5.279,
'm1-': 5.324,
'm1+': 5.716,
},
'Bs->phi': {
'm0': 5.367,
'm1-': 5.415,
'm1+': 5.830,
},
'Bs->K*': {
'm0': 5.279,
'm1-': 5.324,
'm1+': 5.716,
},
}
# Resonance masses used in arXiv:1811.00983
resonance_masses_gkvd = {
'B->K*': {
'm0': 5.336,
'm1-': 5.412,
'm1+': 5.829,
},
'B->rho': {
'm0': 5.279,
'm1-': 5.325,
'm1+': 5.724,
},
'B->D*': {
'm0': 6.275,
'm1-': 6.330,
'm1+': 6.767,
},
}
def transition_filename(tr):
"""Get the part of the filename specifying the transition (e.g. BKstar)
from a transition string (e.g. B->K*)."""
return tr.replace('->', '').replace('*', 'star')
def bsz_load(version, fit, transitions, constraints):
"""Load the form factor parameters given in arXiv:1503.05534"""
for tr in transitions:
for m, v in resonance_masses_bsz[tr].items():
constraints.set_constraint('{} BCL {}'.format(tr, m), v)
filename = 'data/arXiv-1503-05534{}/{}_{}.json'.format(version, transition_filename(tr), fit)
load_parameters(filename, tr, constraints)
def METHOD_NAME(constraints):
bsz_load('v1', 'LCSR', ('B->K*', 'B->omega', 'B->rho', 'Bs->phi', 'Bs->K*'), constraints)
def bsz_load_v1_combined(constraints):
bsz_load('v1', 'LCSR-Lattice', ('B->K*', 'Bs->phi', 'Bs->K*'), constraints)
def bsz_load_v2_lcsr(constraints):
bsz_load('v2', 'LCSR', ('B->K*', 'B->omega', 'B->rho', 'Bs->phi', 'Bs->K*'), constraints)
def bsz_load_v2_combined(constraints):
bsz_load('v2', 'LCSR-Lattice', ('B->K*', 'Bs->phi', 'Bs->K*'), constraints)
def gkvd_load(version, fit, transitions, constraints):
"""Load the form factor parameters given in arXiv:1811.00983"""
for tr in transitions:
for m, v in resonance_masses_gkvd[tr].items():
constraints.set_constraint('{} BCL {}'.format(tr, m), v)
filename = 'data/arXiv-1811-00983{}/{}_{}.json'.format(version, transition_filename(tr), fit)
load_parameters(filename, tr, constraints) |
context | """
Unit test for MicroBatchHandler class.
"""
import json
import random
import sys
from pathlib import Path
import pytest
from torchvision.models.resnet import ResNet18_Weights
from ts.torch_handler.image_classifier import ImageClassifier
from ts.torch_handler.unit_tests.test_utils.mock_context import MockContext
from ts.torch_handler.unit_tests.test_utils.model_dir import copy_files, download_model
REPO_DIR = Path(__file__).parents[3]
def read_image_bytes(filename):
with open(
filename,
"rb",
) as fin:
image_bytes = fin.read()
return image_bytes
@pytest.fixture(scope="module")
def kitten_image_bytes():
return read_image_bytes(
REPO_DIR.joinpath(
"examples/image_classifier/resnet_152_batch/images/kitten.jpg"
).as_posix()
)
@pytest.fixture(scope="module")
def dog_image_bytes():
return read_image_bytes(
REPO_DIR.joinpath(
"examples/image_classifier/resnet_152_batch/images/dog.jpg"
).as_posix()
)
@pytest.fixture(scope="module")
def model_name():
return "image_classifier"
@pytest.fixture(scope="module")
def model_dir(tmp_path_factory, model_name):
model_dir = tmp_path_factory.mktemp("image_classifier_model_dir")
src_dir = REPO_DIR.joinpath("examples/image_classifier/resnet_18/")
model_url = ResNet18_Weights.DEFAULT.url
download_model(model_url, model_dir)
files = {
"model.py": model_name + ".py",
"index_to_name.json": "index_to_name.json",
}
copy_files(src_dir, model_dir, files)
sys.path.append(model_dir.as_posix())
yield model_dir
sys.path.pop()
@pytest.fixture(scope="module")
def METHOD_NAME(model_dir, model_name):
micro_batching_params = {
"mb_size": 2,
"mb_parallelism": {
"preprocess": 1,
"inference": 2,
"postprocess": 3,
},
}
config_file = Path(model_dir).joinpath("micro_batching.json")
with open(config_file, "w") as f:
json.dump(micro_batching_params, f)
METHOD_NAME = MockContext(
model_name="mnist",
model_dir=model_dir.as_posix(),
model_file=model_name + ".py",
)
METHOD_NAME.model_yaml_config = micro_batching_params
yield METHOD_NAME
@pytest.fixture(scope="module", params=[1, 8])
def handler(METHOD_NAME, request):
handler = ImageClassifier()
from ts.handler_utils.micro_batching import MicroBatching
mb_handle = MicroBatching(handler, micro_batch_size=request.param)
handler.initialize(METHOD_NAME)
handler.handle = mb_handle
handler.handle.parallelism = METHOD_NAME.model_yaml_config["mb_parallelism"]
yield handler
mb_handle.shutdown()
@pytest.fixture(scope="module", params=[1, 16])
def mixed_batch(kitten_image_bytes, dog_image_bytes, request):
batch_size = request.param
labels = [
"tiger_cat" if random.random() > 0.5 else "golden_retriever"
for _ in range(batch_size)
]
test_data = []
for l in labels:
test_data.append(
{"data": kitten_image_bytes}
if l == "tiger_cat"
else {"data": dog_image_bytes}
)
return test_data, labels
def test_handle(METHOD_NAME, mixed_batch, handler):
test_data, labels = mixed_batch
results = handler.handle(test_data, METHOD_NAME)
assert len(results) == len(labels)
for l, r in zip(labels, results):
assert l in r
def test_handle_explain(METHOD_NAME, kitten_image_bytes, handler):
METHOD_NAME.explain = True
test_data = [{"data": kitten_image_bytes, "target": 0}] * 2
results = handler.handle(test_data, METHOD_NAME)
assert len(results) == 2
assert results[0]
def test_micro_batching_handler_threads(handler):
assert len(handler.handle.thread_groups["preprocess"]) == 1
assert len(handler.handle.thread_groups["inference"]) == 2
assert len(handler.handle.thread_groups["postprocess"]) == 3
def test_spin_up_down_threads(handler):
assert len(handler.handle.thread_groups["preprocess"]) == 1
assert len(handler.handle.thread_groups["inference"]) == 2
assert len(handler.handle.thread_groups["postprocess"]) == 3
new_parallelism = {
"preprocess": 2,
"inference": 3,
"postprocess": 4,
}
handler.handle.parallelism = new_parallelism
assert len(handler.handle.thread_groups["preprocess"]) == 2
assert len(handler.handle.thread_groups["inference"]) == 3
assert len(handler.handle.thread_groups["postprocess"]) == 4
new_parallelism = {
"preprocess": 1,
"inference": 2,
"postprocess": 3,
}
handler.handle.parallelism = new_parallelism
assert len(handler.handle.thread_groups["preprocess"]) == 1
assert len(handler.handle.thread_groups["inference"]) == 2
assert len(handler.handle.thread_groups["postprocess"]) == 3 |
session | """Provider implementation based on boto library for AWS-compatible clouds."""
import logging
import boto3
from botocore.client import Config
from cloudbridge.base import BaseCloudProvider
from cloudbridge.base.helpers import get_env
from .services import AWSComputeService
from .services import AWSDnsService
from .services import AWSNetworkingService
from .services import AWSSecurityService
from .services import AWSStorageService
log = logging.getLogger(__name__)
class AWSCloudProvider(BaseCloudProvider):
'''AWS cloud provider interface'''
PROVIDER_ID = 'aws'
def __init__(self, config):
super(AWSCloudProvider, self).__init__(config)
# Initialize cloud connection fields
# These are passed as-is to Boto
self._region_name = self._get_config_value('aws_region_name',
'us-east-1')
self._zone_name = self._get_config_value('aws_zone_name')
self.session_cfg = {
'aws_access_key_id': self._get_config_value(
'aws_access_key', get_env('AWS_ACCESS_KEY')),
'aws_secret_access_key': self._get_config_value(
'aws_secret_key', get_env('AWS_SECRET_KEY')),
'aws_session_token': self._get_config_value(
'aws_session_token', None)
}
self.ec2_cfg = {
'use_ssl': self._get_config_value('ec2_is_secure', True),
'verify': self._get_config_value('ec2_validate_certs', True),
'endpoint_url': self._get_config_value('ec2_endpoint_url'),
'config': Config(
retries={
'max_attempts': self._get_config_value('ec2_retries_value', 4),
'mode': 'standard'})
}
self.s3_cfg = {
'use_ssl': self._get_config_value('s3_is_secure', True),
'verify': self._get_config_value('s3_validate_certs', True),
'endpoint_url': self._get_config_value('s3_endpoint_url'),
'config': Config(
signature_version=self._get_config_value(
's3_signature_version', 's3v4'))
}
# service connections, lazily initialized
self._session = None
self._ec2_conn = None
self._vpc_conn = None
self._s3_conn = None
# Initialize provider services
self._compute = AWSComputeService(self)
self._networking = AWSNetworkingService(self)
self._security = AWSSecurityService(self)
self._storage = AWSStorageService(self)
self._dns = AWSDnsService(self)
@property
def METHOD_NAME(self):
'''Get a low-level session object or create one if needed'''
if not self._session:
if self.config.debug_mode:
boto3.set_stream_logger(level=log.DEBUG)
self._session = boto3.METHOD_NAME.Session(
region_name=self.region_name, **self.session_cfg)
return self._session
@property
def ec2_conn(self):
if not self._ec2_conn:
self._ec2_conn = self._connect_ec2()
return self._ec2_conn
@property
def s3_conn(self):
if not self._s3_conn:
self._s3_conn = self._connect_s3()
return self._s3_conn
@property
def compute(self):
return self._compute
@property
def networking(self):
return self._networking
@property
def security(self):
return self._security
@property
def storage(self):
return self._storage
@property
def dns(self):
return self._dns
def _connect_ec2(self):
"""
Get a boto ec2 connection object.
"""
return self._connect_ec2_region(region_name=self.region_name)
def _connect_ec2_region(self, region_name=None):
'''Get an EC2 resource object'''
return self.METHOD_NAME.resource(
'ec2', region_name=region_name, **self.ec2_cfg)
def _connect_s3(self):
'''Get an S3 resource object'''
return self.METHOD_NAME.resource(
's3', region_name=self.region_name, **self.s3_cfg) |
include table | from functools import partial
import attr
from alembic.migration import MigrationContext
def get_migration_context(connection, table_names=None):
opts = {'compare_type': True}
if table_names:
opts['include_name'] = partial(include_name, table_names)
opts['include_object'] = partial(include_object, table_names)
return MigrationContext.configure(connection, opts=opts)
def include_name(tables_to_include, name, type_, parent_names):
"""Checks if the object should be included. This is called prior
to object reflection and is only called for existing database objects"""
return METHOD_NAME(tables_to_include, type_, name)
def include_object(tables_to_include, object, name, type_, reflected, compare_to):
"""Checks if the object should be included. This runs after reflection and will
also be called with new objects that are only in the metadata"""
return METHOD_NAME(tables_to_include, type_, name)
def METHOD_NAME(tables_to_include, type_, name):
if type_ == "table":
return name in tables_to_include
return True
def get_tables_to_rebuild(diffs):
return {diff.table_name for diff in diffs if diff.type in DiffTypes.TYPES_FOR_REBUILD}
def reformat_alembic_diffs(raw_diffs):
"""
See: http://alembic.readthedocs.io/en/latest/api/autogenerate.html
:param raw_diffs: from alembic
:return: list of ``SimpleDiff`` tuples
"""
diffs = []
def _simplify_diff(raw_diff):
type_ = raw_diff[0]
if type_ in DiffTypes.TABLE_TYPES:
diffs.append(
SimpleDiff(type_, raw_diff[1].name, None, raw_diff)
)
elif type_ in DiffTypes.CONSTRAINT_TYPES:
any_column = list(raw_diff[1].columns.values())[0]
table_name = any_column.table.name
diffs.append(
SimpleDiff(type_, table_name, raw_diff[1].name, raw_diff)
)
elif type_ in DiffTypes.MODIFY_TYPES:
diffs.append(
SimpleDiff(type_, raw_diff[2], raw_diff[3], raw_diff)
)
elif type_ == DiffTypes.ADD_COLUMN and raw_diff[3].nullable:
diffs.append(
SimpleDiff(DiffTypes.ADD_NULLABLE_COLUMN, raw_diff[2], raw_diff[3].name, raw_diff)
)
elif type_ in DiffTypes.COLUMN_TYPES:
diffs.append(
SimpleDiff(type_, raw_diff[2], raw_diff[3].name, raw_diff)
)
elif type_ in DiffTypes.INDEX_TYPES:
diffs.append(SimpleDiff(type_, diff[1].table.name, diff[1].name, raw_diff))
else:
diffs.append(SimpleDiff(type_, None, None, None))
for diff in raw_diffs:
if isinstance(diff, list):
for d in diff:
_simplify_diff(d)
else:
_simplify_diff(diff)
return diffs
class DiffTypes(object):
ADD_TABLE = 'add_table'
REMOVE_TABLE = 'remove_table'
TABLE_TYPES = (ADD_TABLE, REMOVE_TABLE)
ADD_COLUMN = 'add_column'
REMOVE_COLUMN = 'remove_column'
COLUMN_TYPES = (ADD_COLUMN, REMOVE_COLUMN)
MODIFY_NULLABLE = 'modify_nullable'
MODIFY_TYPE = 'modify_type'
MODIFY_DEFAULT = 'modify_default'
MODIFY_TYPES = (MODIFY_TYPE, MODIFY_DEFAULT, MODIFY_NULLABLE)
ADD_CONSTRAINT = 'add_constraint'
REMOVE_CONSTRAINT = 'remove_constraint'
ADD_INDEX = 'add_index'
REMOVE_INDEX = 'remove_index'
INDEX_TYPES = (ADD_INDEX, REMOVE_INDEX)
ADD_NULLABLE_COLUMN = 'add_nullable_column'
MIGRATEABLE_TYPES = (ADD_NULLABLE_COLUMN,) + INDEX_TYPES
CONSTRAINT_TYPES = (ADD_CONSTRAINT, REMOVE_CONSTRAINT) + INDEX_TYPES
ALL = TABLE_TYPES + COLUMN_TYPES + MODIFY_TYPES + CONSTRAINT_TYPES
TYPES_FOR_REBUILD = TABLE_TYPES + COLUMN_TYPES + (MODIFY_TYPE, MODIFY_NULLABLE)
TYPES_FOR_MIGRATION = INDEX_TYPES + (ADD_NULLABLE_COLUMN,)
@attr.s(frozen=True)
class SimpleDiff(object):
type = attr.ib()
table_name = attr.ib()
item_name = attr.ib()
raw = attr.ib(cmp=False)
def to_dict(self):
return {
'type': self.type,
'item_name': self.item_name
}
@property
def column(self):
return self._item(3, DiffTypes.COLUMN_TYPES + (DiffTypes.ADD_NULLABLE_COLUMN,))
@property
def index(self):
return self._item(1, DiffTypes.INDEX_TYPES)
@property
def constraint(self):
return self._item(1, DiffTypes.CONSTRAINT_TYPES)
def _item(self, index, supported_types):
if self.type not in supported_types:
raise NotImplementedError
return self.raw[index] |
recv raw | import asyncio
import concurrent
import hashlib
import struct
import base58
from configured_logger import logger
from messages import schema
from messages.crypto import PublicKey, Signature
from messages.network import (EdgeInfo, GenesisId, Handshake, PeerChainInfoV2,
PeerMessage, RoutedMessage, PeerIdOrHash)
from serializer import BinarySerializer
from nacl.signing import SigningKey
from typing import Optional
ED_PREFIX = "ed25519:"
class Connection:
def __init__(self, reader: asyncio.StreamReader,
writer: asyncio.StreamWriter):
self.reader = reader
self.writer = writer
self.is_closed = False
async def send(self, message):
raw_message = BinarySerializer(schema).serialize(message)
await self.send_raw(raw_message)
async def send_raw(self, raw_message):
length = struct.pack('I', len(raw_message))
self.writer.write(length)
self.writer.write(raw_message)
await self.writer.drain()
# returns None on timeout
async def recv(self, expected=None):
while True:
response_raw = await self.METHOD_NAME()
# Connection was closed on the other side
if response_raw is None:
return None
# TODO(CP-85): when removing borsh support, fix this to use protobufs,
# (or preferably reimplement the test in rust).
try:
response = BinarySerializer(schema).deserialize(
response_raw, PeerMessage)
except IndexError:
# unparsable message, ignore.
continue
if expected is None or response.enum == expected or (
callable(expected) and expected(response)):
return response
async def METHOD_NAME(self):
length = await self.reader.read(4)
if len(length) == 0:
self.is_closed = True
return None
else:
length = struct.unpack('I', length)[0]
response = b''
while len(response) < length:
response += await self.reader.read(length - len(response))
if len(response) < length:
logger.info(f"Downloading message {len(response)}/{length}")
return response
async def close(self):
self.writer.close()
await self.writer.wait_closed()
def do_send(self, message):
loop = asyncio.get_event_loop()
loop.create_task(self.send(message))
def do_send_raw(self, raw_message):
loop = asyncio.get_event_loop()
loop.create_task(self.send_raw(raw_message))
async def connect(addr) -> Connection:
reader, writer = await asyncio.open_connection(*addr)
conn = Connection(reader, writer)
return conn
def create_handshake(my_key_pair_nacl,
their_pk_serialized,
listen_port,
version=0):
"""
Create handshake message but with placeholders in:
- version
- genesis_id.chain_id
- genesis_id.hash
- edge_info.signature
"""
handshake = Handshake()
handshake.version = version
handshake.oldest_supported_version = version
handshake.peer_id = PublicKey()
handshake.target_peer_id = PublicKey()
handshake.listen_port = listen_port
handshake.chain_info = PeerChainInfoV2()
handshake.edge_info = EdgeInfo()
handshake.peer_id.keyType = 0
handshake.peer_id.data = bytes(my_key_pair_nacl.verify_key)
handshake.target_peer_id.keyType = 0
handshake.target_peer_id.data = base58.b58decode(
their_pk_serialized[len(ED_PREFIX):])
handshake.chain_info.genesis_id = GenesisId()
handshake.chain_info.height = 0
handshake.chain_info.tracked_shards = []
handshake.chain_info.archival = False
handshake.chain_info.genesis_id.chain_id = 'moo'
handshake.chain_info.genesis_id.hash = bytes([0] * 32)
handshake.edge_info.nonce = 1
handshake.edge_info.signature = Signature()
handshake.edge_info.signature.keyType = 0
handshake.edge_info.signature.data = bytes([0] * 64)
peer_message = PeerMessage()
peer_message.enum = 'Handshake'
peer_message.Handshake = handshake
return peer_message
def create_peer_request():
peer_message = PeerMessage()
peer_message.enum = 'PeersRequest'
peer_message.PeersRequest = ()
return peer_message
def sign_handshake(my_key_pair_nacl, handshake):
peer0 = handshake.peer_id
peer1 = handshake.target_peer_id
if peer1.data < peer0.data:
peer0, peer1 = peer1, peer0
arr = bytes(
bytearray([0]) + peer0.data + bytearray([0]) + peer1.data +
struct.pack('Q', handshake.edge_info.nonce))
handshake.edge_info.signature.data = my_key_pair_nacl.sign(
hashlib.sha256(arr).digest()).signature
async def run_handshake(conn: Connection,
target_public_key: PublicKey,
key_pair: SigningKey,
listen_port=12345):
handshake = create_handshake(key_pair, target_public_key, listen_port)
async def send_handshake():
sign_handshake(key_pair, handshake.Handshake)
await conn.send(handshake)
# The peer might sent us an unsolicited message before replying to
# a successful handshake. This is because node is multi-threaded and
# peers are added to PeerManager before the reply is sent. Since we
# don’t care about those messages, ignore them and wait for some kind of
# Handshake reply.
return await conn.recv(lambda msg: msg.enum.startswith('Handshake'))
response = await send_handshake()
if response.enum == 'HandshakeFailure' and response.HandshakeFailure[
1].enum == 'ProtocolVersionMismatch':
pvm = response.HandshakeFailure[1].ProtocolVersionMismatch.version
handshake.Handshake.version = pvm
response = await send_handshake()
if response.enum == 'HandshakeFailure' and response.HandshakeFailure[
1].enum == 'GenesisMismatch':
gm = response.HandshakeFailure[1].GenesisMismatch
handshake.Handshake.chain_info.genesis_id.chain_id = gm.chain_id
handshake.Handshake.chain_info.genesis_id.hash = gm.hash
response = await send_handshake()
assert response.enum == 'Handshake', response.enum if response.enum != 'HandshakeFailure' else response.HandshakeFailure[
1].enum
def create_and_sign_routed_peer_message(routed_msg_body, target_node,
my_key_pair_nacl):
routed_msg = RoutedMessage()
routed_msg.target = PeerIdOrHash()
routed_msg.target.enum = 'PeerId'
routed_msg.target.PeerId = PublicKey()
routed_msg.target.PeerId.keyType = 0
routed_msg.target.PeerId.data = base58.b58decode(
target_node.node_key.pk[len(ED_PREFIX):])
routed_msg.author = PublicKey()
routed_msg.author.keyType = 0
routed_msg.author.data = bytes(my_key_pair_nacl.verify_key)
routed_msg.ttl = 100
routed_msg.body = routed_msg_body
routed_msg.signature = Signature()
routed_msg.signature.keyType = 0
routed_msg_arr = bytes(
bytearray([0, 0]) + routed_msg.target.PeerId.data + bytearray([0]) +
routed_msg.author.data +
BinarySerializer(schema).serialize(routed_msg.body))
routed_msg_hash = hashlib.sha256(routed_msg_arr).digest()
routed_msg.signature.data = my_key_pair_nacl.sign(routed_msg_hash).signature
peer_message = PeerMessage()
peer_message.enum = 'Routed'
peer_message.Routed = routed_msg
return peer_message |
test scatter ellipsis value | """
Unit tests of Ellipsis (...) in __getitem__
"""
import numpy as np
import holoviews as hv
from holoviews.element.comparison import ComparisonTestCase
class TestEllipsisCharts(ComparisonTestCase):
def test_curve_ellipsis_slice_x(self):
sliced = hv.Curve([(i,2*i) for i in range(10)])[2:7,...]
self.assertEqual(sliced.range('x'), (2,6))
def test_curve_ellipsis_slice_y(self):
sliced = hv.Curve([(i,2*i) for i in range(10)])[..., 3:9]
self.assertEqual(sliced.range('y'), (4,8))
def test_points_ellipsis_slice_x(self):
sliced = hv.Points([(i,2*i) for i in range(10)])[2:7,...]
self.assertEqual(sliced.range('x'), (2,6))
def METHOD_NAME(self):
hv.Scatter(range(10))[...,'y']
def test_scatter_ellipsis_value_missing(self):
try:
hv.Scatter(range(10))[...,'Non-existent']
except Exception as e:
if str(e) != "'Non-existent' is not an available value dimension":
raise AssertionError("Incorrect exception raised.")
def test_points_ellipsis_slice_y(self):
sliced = hv.Points([(i,2*i) for i in range(10)])[..., 3:9]
self.assertEqual(sliced.range('y'), (4,8))
def test_histogram_ellipsis_slice_value(self):
frequencies, edges = np.histogram(range(20), 20)
sliced = hv.Histogram((frequencies, edges))[..., 'Frequency']
self.assertEqual(len(sliced.dimension_values(0)), 20)
def test_histogram_ellipsis_slice_range(self):
frequencies, edges = np.histogram(range(20), 20)
sliced = hv.Histogram((edges, frequencies))[0:5, ...]
self.assertEqual(len(sliced.dimension_values(0)), 5)
def test_histogram_ellipsis_slice_value_missing(self):
frequencies, edges = np.histogram(range(20), 20)
with self.assertRaises(IndexError):
hv.Histogram((frequencies, edges))[..., 'Non-existent']
class TestEllipsisTable(ComparisonTestCase):
def setUp(self):
keys = [('M',10), ('M',16), ('F',12)]
values = [(15, 0.8), (18, 0.6), (10, 0.8)]
self.table =hv.Table(zip(keys,values),
kdims = ['Gender', 'Age'],
vdims=['Weight', 'Height'])
super().setUp()
def test_table_ellipsis_slice_value_weight(self):
sliced = self.table[..., 'Weight']
assert sliced.vdims==['Weight']
def test_table_ellipsis_slice_value_height(self):
sliced = self.table[..., 'Height']
assert sliced.vdims==['Height']
def test_table_ellipsis_slice_key_gender(self):
sliced = self.table['M',...]
if not all(el=='M' for el in sliced.dimension_values('Gender')):
raise AssertionError("Table key slicing on 'Gender' failed.")
class TestEllipsisRaster(ComparisonTestCase):
def test_raster_ellipsis_slice_value(self):
data = np.random.rand(10,10)
sliced = hv.Raster(data)[...,'z']
self.assertEqual(sliced.data, data)
def test_raster_ellipsis_slice_value_missing(self):
data = np.random.rand(10,10)
try:
hv.Raster(data)[...,'Non-existent']
except Exception as e:
if "\'z\' is the only selectable value dimension" not in str(e):
raise AssertionError("Unexpected exception.")
def test_image_ellipsis_slice_value(self):
data = np.random.rand(10,10)
sliced = hv.Image(data)[...,'z']
self.assertEqual(sliced.data, data)
def test_image_ellipsis_slice_value_missing(self):
data = np.random.rand(10,10)
try:
hv.Image(data)[...,'Non-existent']
except Exception as e:
if str(e) != "'Non-existent' is not an available value dimension":
raise AssertionError("Unexpected exception.")
def test_rgb_ellipsis_slice_value(self):
data = np.random.rand(10,10,3)
sliced = hv.RGB(data)[:,:,'R']
self. assertEqual(sliced.data, data[:,:,0])
def test_rgb_ellipsis_slice_value_missing(self):
rgb = hv.RGB(np.random.rand(10,10,3))
try:
rgb[...,'Non-existent']
except Exception as e:
if str(e) != repr("'Non-existent' is not an available value dimension"):
raise AssertionError("Incorrect exception raised.")
class TestEllipsisDeepIndexing(ComparisonTestCase):
def test_deep_ellipsis_curve_slicing_1(self):
hmap = hv.HoloMap({i:hv.Curve([(j,j) for j in range(10)])
for i in range(10)})
sliced = hmap[2:5,...]
self.assertEqual(sliced.keys(), [2, 3, 4])
def test_deep_ellipsis_curve_slicing_2(self):
hmap = hv.HoloMap({i:hv.Curve([(j,j) for j in range(10)])
for i in range(10)})
sliced = hmap[2:5,1:8,...]
self.assertEqual(sliced.last.range('x'), (1,7))
def test_deep_ellipsis_curve_slicing_3(self):
hmap = hv.HoloMap({i:hv.Curve([(j,2*j) for j in range(10)])
for i in range(10)})
sliced = hmap[...,2:5]
self.assertEqual(sliced.last.range('y'), (2, 4)) |
test no access | # pylint: disable=missing-docstring
from smtplib import SMTPException
from unittest import mock
import pytest
import ddt
from django.db import IntegrityError
from django.test import TestCase
from openedx.core.djangoapps.api_admin.models import ApiAccessConfig, ApiAccessRequest
from openedx.core.djangoapps.api_admin.models import log as model_log
from openedx.core.djangoapps.api_admin.tests.factories import ApiAccessRequestFactory
from openedx.core.djangoapps.site_configuration.tests.factories import SiteFactory
from openedx.core.djangolib.testing.utils import skip_unless_lms
from common.djangoapps.student.tests.factories import UserFactory
@ddt.ddt
@skip_unless_lms
class ApiAccessRequestTests(TestCase):
def setUp(self):
super().setUp()
self.user = UserFactory()
self.request = ApiAccessRequestFactory(user=self.user)
def test_default_status(self):
assert self.request.status == ApiAccessRequest.PENDING
assert not ApiAccessRequest.has_api_access(self.user)
def test_approve(self):
self.request.approve()
assert self.request.status == ApiAccessRequest.APPROVED
def test_deny(self):
self.request.deny()
assert self.request.status == ApiAccessRequest.DENIED
def test_nonexistent_request(self):
"""Test that users who have not requested API access do not get it."""
other_user = UserFactory()
assert not ApiAccessRequest.has_api_access(other_user)
@ddt.data(
(ApiAccessRequest.PENDING, False),
(ApiAccessRequest.DENIED, False),
(ApiAccessRequest.APPROVED, True),
)
@ddt.unpack
def test_has_access(self, status, should_have_access):
self.request.status = status
self.request.save()
assert ApiAccessRequest.has_api_access(self.user) == should_have_access
def test_unique_per_user(self):
with pytest.raises(IntegrityError):
ApiAccessRequestFactory(user=self.user)
def METHOD_NAME(self):
self.request.delete()
assert ApiAccessRequest.api_access_status(self.user) is None
def test_unicode(self):
request_unicode = str(self.request)
assert self.request.website in request_unicode
assert self.request.status in request_unicode
def test_retire_user_success(self):
retire_result = self.request.retire_user(self.user)
assert retire_result
assert self.request.company_address == ''
assert self.request.company_name == ''
assert self.request.website == ''
assert self.request.reason == ''
def test_retire_user_do_not_exist(self):
user2 = UserFactory()
retire_result = self.request.retire_user(user2)
assert not retire_result
class ApiAccessConfigTests(TestCase):
def test_unicode(self):
assert str(ApiAccessConfig(enabled=True)) == 'ApiAccessConfig [enabled=True]'
assert str(ApiAccessConfig(enabled=False)) == 'ApiAccessConfig [enabled=False]'
@skip_unless_lms
class ApiAccessRequestSignalTests(TestCase):
def setUp(self):
super().setUp()
self.user = UserFactory()
self.api_access_request = ApiAccessRequest(user=self.user, site=SiteFactory())
self.send_new_pending_email_function = 'openedx.core.djangoapps.api_admin.models._send_new_pending_email'
self.send_decision_email_function = 'openedx.core.djangoapps.api_admin.models._send_decision_email'
def test_save_signal_success_new_email(self):
""" Verify that initial save sends new email and no decision email. """
with mock.patch(self.send_new_pending_email_function) as mock_new_email:
with mock.patch(self.send_decision_email_function) as mock_decision_email:
self.api_access_request.save()
mock_new_email.assert_called_once_with(self.api_access_request)
assert not mock_decision_email.called
def test_save_signal_success_decision_email(self):
""" Verify that updating request status sends decision email and no new email. """
self.api_access_request.save()
with mock.patch(self.send_new_pending_email_function) as mock_new_email:
with mock.patch(self.send_decision_email_function) as mock_decision_email:
self.api_access_request.approve()
mock_decision_email.assert_called_once_with(self.api_access_request)
assert not mock_new_email.called
def test_save_signal_success_no_emails(self):
""" Verify that updating request status again sends no emails. """
self.api_access_request.save()
self.api_access_request.approve()
with mock.patch(self.send_new_pending_email_function) as mock_new_email:
with mock.patch(self.send_decision_email_function) as mock_decision_email:
self.api_access_request.deny()
assert not mock_decision_email.called
assert not mock_new_email.called
def test_save_signal_failure_email(self):
""" Verify that saving still functions even on email errors. """
assert self.api_access_request.id is None
mail_function = 'openedx.core.djangoapps.api_admin.models.send_mail'
with mock.patch(mail_function, side_effect=SMTPException):
with mock.patch.object(model_log, 'exception') as mock_model_log_exception:
self.api_access_request.save()
# Verify that initial save logs email errors properly
mock_model_log_exception.assert_called_once_with(
'Error sending API user notification email for request [%s].', self.api_access_request.id
)
# Verify object saved
assert self.api_access_request.id is not None
with mock.patch(mail_function, side_effect=SMTPException):
with mock.patch.object(model_log, 'exception') as mock_model_log_exception:
self.api_access_request.approve()
# Verify that updating request status logs email errors properly
mock_model_log_exception.assert_called_once_with(
'Error sending API user notification email for request [%s].', self.api_access_request.id
)
# Verify object saved
assert self.api_access_request.status == ApiAccessRequest.APPROVED |
test docstring copy | # Test case for property
# more tests are in test_descr
import sys
import unittest
from test.test_support import run_unittest
class PropertyBase(Exception):
pass
class PropertyGet(PropertyBase):
pass
class PropertySet(PropertyBase):
pass
class PropertyDel(PropertyBase):
pass
class BaseClass(object):
def __init__(self):
self._spam = 5
@property
def spam(self):
"""BaseClass.getter"""
return self._spam
@spam.setter
def spam(self, value):
self._spam = value
@spam.deleter
def spam(self):
del self._spam
class SubClass(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""SubClass.getter"""
raise PropertyGet(self._spam)
@spam.setter
def spam(self, value):
raise PropertySet(self._spam)
@spam.deleter
def spam(self):
raise PropertyDel(self._spam)
class PropertyDocBase(object):
_spam = 1
def _get_spam(self):
return self._spam
spam = property(_get_spam, doc="spam spam spam")
class PropertyDocSub(PropertyDocBase):
@PropertyDocBase.spam.getter
def spam(self):
"""The decorator does not use this doc string"""
return self._spam
class PropertySubNewGetter(BaseClass):
@BaseClass.spam.getter
def spam(self):
"""new docstring"""
return 5
class PropertyNewGetter(object):
@property
def spam(self):
"""original docstring"""
return 1
@spam.getter
def spam(self):
"""new docstring"""
return 8
class PropertyTests(unittest.TestCase):
def test_property_decorator_baseclass(self):
# see #1620
base = BaseClass()
self.assertEqual(base.spam, 5)
self.assertEqual(base._spam, 5)
base.spam = 10
self.assertEqual(base.spam, 10)
self.assertEqual(base._spam, 10)
delattr(base, "spam")
self.assertTrue(not hasattr(base, "spam"))
self.assertTrue(not hasattr(base, "_spam"))
base.spam = 20
self.assertEqual(base.spam, 20)
self.assertEqual(base._spam, 20)
def test_property_decorator_subclass(self):
# see #1620
sub = SubClass()
self.assertRaises(PropertyGet, getattr, sub, "spam")
self.assertRaises(PropertySet, setattr, sub, "spam", None)
self.assertRaises(PropertyDel, delattr, sub, "spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_subclass_doc(self):
sub = SubClass()
self.assertEqual(sub.__class__.spam.__doc__, "SubClass.getter")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_decorator_baseclass_doc(self):
base = BaseClass()
self.assertEqual(base.__class__.spam.__doc__, "BaseClass.getter")
def test_property_decorator_doc(self):
base = PropertyDocBase()
sub = PropertyDocSub()
self.assertEqual(base.__class__.spam.__doc__, "spam spam spam")
self.assertEqual(sub.__class__.spam.__doc__, "spam spam spam")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_property_getter_doc_override(self):
newgettersub = PropertySubNewGetter()
self.assertEqual(newgettersub.spam, 5)
self.assertEqual(newgettersub.__class__.spam.__doc__, "new docstring")
newgetter = PropertyNewGetter()
self.assertEqual(newgetter.spam, 8)
self.assertEqual(newgetter.__class__.spam.__doc__, "new docstring")
# Issue 5890: subclasses of property do not preserve method __doc__ strings
class PropertySub(property):
"""This is a subclass of property"""
class PropertySubSlots(property):
"""This is a subclass of property that defines __slots__"""
__slots__ = ()
class PropertySubclassTests(unittest.TestCase):
def test_slots_docstring_copy_exception(self):
try:
class Foo(object):
@PropertySubSlots
def spam(self):
"""Trying to copy this docstring will raise an exception"""
return 1
#This raises a TypeError in Jython.
except (AttributeError, TypeError):
pass
else:
raise Exception("AttributeError not raised")
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def METHOD_NAME(self):
class Foo(object):
@PropertySub
def spam(self):
"""spam wrapped in property subclass"""
return 1
self.assertEqual(
Foo.spam.__doc__,
"spam wrapped in property subclass")
@unittest.skipIf(sys.flags.optimize <= 2,
"Docstrings are omitted with -O2 and above")
def test_property_setter_copies_getter_docstring(self):
class Foo(object):
def __init__(self): self._spam = 1
@PropertySub
def spam(self):
"""spam wrapped in property subclass"""
return self._spam
@spam.setter
def spam(self, value):
"""this docstring is ignored"""
self._spam = value
foo = Foo()
self.assertEqual(foo.spam, 1)
foo.spam = 2
self.assertEqual(foo.spam, 2)
self.assertEqual(
Foo.spam.__doc__,
"spam wrapped in property subclass")
class FooSub(Foo):
@Foo.spam.setter
def spam(self, value):
"""another ignored docstring"""
self._spam = 'eggs'
foosub = FooSub()
self.assertEqual(foosub.spam, 1)
foosub.spam = 7
self.assertEqual(foosub.spam, 'eggs')
self.assertEqual(
FooSub.spam.__doc__,
"spam wrapped in property subclass")
@unittest.skipIf(sys.flags.optimize <= 2,
"Docstrings are omitted with -O2 and above")
def test_property_new_getter_new_docstring(self):
class Foo(object):
@PropertySub
def spam(self):
"""a docstring"""
return 1
@spam.getter
def spam(self):
"""a new docstring"""
return 2
self.assertEqual(Foo.spam.__doc__, "a new docstring")
class FooBase(object):
@PropertySub
def spam(self):
"""a docstring"""
return 1
class Foo2(FooBase):
@FooBase.spam.getter
def spam(self):
"""a new docstring"""
return 2
self.assertEqual(Foo.spam.__doc__, "a new docstring")
def test_main():
run_unittest(PropertyTests, PropertySubclassTests)
if __name__ == '__main__':
test_main() |
read |
from __future__ import print_function, division, absolute_import
import os
import numpy as np
from astropy.io import fits
from RMS.Formats.FFStruct import FFStruct
def METHOD_NAME(directory, filename, array=False, full_filename=False):
""" Read a FF structure from a FITS file.
Arguments:
directory: [str] Path to directory containing file
filename: [str] Name of FF*.fits file (either with FF and extension or without)
Keyword arguments:
array: [ndarray] True in order to populate structure's array element (default is False)
full_filename: [bool] True if full file name is given explicitly, a name which may differ from the
usual FF*.fits format. False by default.
Return:
[ff structure]
"""
# Make sure the file starts with "FF_"
if (filename.startswith('FF') and ('.fits' in filename)) or full_filename:
fid = open(os.path.join(directory, filename), "rb")
else:
fid = open(os.path.join(directory, "FF_" + filename + ".fits"), "rb")
# Init an empty FF structure
ff = FFStruct()
# Read in the FITS
hdulist = fits.open(fid)
# Read the header
head = hdulist[0].header
# Read in the data from the header
ff.nrows = head['NROWS']
ff.ncols = head['NCOLS']
ff.nbits = head['NBITS']
ff.nframes = head['NFRAMES']
ff.first = head['FIRST']
ff.camno = head['CAMNO']
ff.fps = head['FPS']
# Read in the image data
ff.maxpixel = hdulist[1].data
ff.maxframe = hdulist[2].data
ff.avepixel = hdulist[3].data
ff.stdpixel = hdulist[4].data
if array:
ff.array = np.dstack([ff.maxpixel, ff.maxframe, ff.avepixel, ff.stdpixel])
ff.array = np.swapaxes(ff.array, 0, 1)
ff.array = np.swapaxes(ff.array, 0, 2)
# CLose the FITS file
hdulist.close()
return ff
def write(ff, directory, filename):
""" Write a FF structure to a FITS file in specified directory.
Arguments:
ff: [ff bin struct] FF bin file loaded in the FF structure
directory: [str] path to the directory where the file will be written
filename: [str] name of the file which will be written
Return:
None
"""
# Make sure the file starts with "FF"
if filename[:3] == "FF_":
file_path = os.path.join(directory, filename)
else:
file_path = os.path.join(directory, "FF_" + filename + ".fits")
# Create a new FITS file
# Create the header
head = fits.Header()
head['NROWS'] = ff.nrows
head['NCOLS'] = ff.ncols
head['NBITS'] = ff.nbits
head['NFRAMES'] = ff.nframes
head['FIRST'] = ff.first
head['CAMNO'] = ff.camno
head['FPS'] = ff.fps
# Deconstruct the 3D array into individual images
if ff.array is not None:
ff.maxpixel, ff.maxframe, ff.avepixel, ff.stdpixel = np.split(ff.array, 4, axis=0)
ff.maxpixel = ff.maxpixel[0]
ff.maxframe = ff.maxframe[0]
ff.avepixel = ff.avepixel[0]
ff.stdpixel = ff.stdpixel[0]
# Add the maxpixle to the list
maxpixel_hdu = fits.ImageHDU(ff.maxpixel, name='MAXPIXEL')
maxframe_hdu = fits.ImageHDU(ff.maxframe, name='MAXFRAME')
avepixel_hdu = fits.ImageHDU(ff.avepixel, name='AVEPIXEL')
stdpixel_hdu = fits.ImageHDU(ff.stdpixel, name='STDPIXEL')
# Create the primary part
prim = fits.PrimaryHDU(header=head)
# Combine everything into into FITS
hdulist = fits.HDUList([prim, maxpixel_hdu, maxframe_hdu, avepixel_hdu, stdpixel_hdu])
# Save the FITS
hdulist.writeto(file_path, overwrite=True)
if __name__ == "__main__":
dir_path = '.'
file_name = 'FF_test.fits'
wid = 720
ht = 576
ff = FFStruct()
ff.ncols = wid
ff.nrows = ht
# ff.maxpixel = np.zeros((ht, wid), dtype=np.uint8)
# ff.avepixel = np.zeros((ht, wid), dtype=np.uint8) + 10
# ff.stdpixel = np.zeros((ht, wid), dtype=np.uint8) + 20
# ff.maxframe = np.zeros((ht, wid), dtype=np.uint8) + 30
maxpixel = np.zeros((ht, wid), dtype=np.uint8)
avepixel = np.zeros((ht, wid), dtype=np.uint8) + 10
stdpixel = np.zeros((ht, wid), dtype=np.uint8) + 20
maxframe = np.zeros((ht, wid), dtype=np.uint8) + 30
ff.array = np.stack([maxpixel, maxframe, avepixel, stdpixel], axis=0)
# Write the FF to FITS
write(ff, dir_path, file_name)
# Read the FITS
ff = METHOD_NAME(dir_path, file_name)
print(ff)
print(ff.maxpixel)
print(ff.maxframe)
print(ff.avepixel)
print(ff.stdpixel) |
test panel ols | from itertools import product
import pickle
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
from linearmodels.panel.data import PanelData
from linearmodels.panel.model import AmbiguityError, PanelOLS
from linearmodels.panel.utility import AbsorbingEffectError
from linearmodels.shared.hypotheses import WaldTestStatistic
from linearmodels.tests.panel._utility import datatypes, generate_data, lsdv
pytestmark = pytest.mark.filterwarnings(
"ignore::linearmodels.shared.exceptions.MissingValueWarning"
)
PERC_MISSING = [0, 0.02, 0.10, 0.33]
TYPES = datatypes
@pytest.fixture(
params=list(product(PERC_MISSING, TYPES)),
ids=list(
map(
lambda x: str(int(100 * x[0])) + "-" + str(x[1]),
product(PERC_MISSING, TYPES),
)
),
)
def data(request):
missing, datatype = request.param
rng = np.random.RandomState(12345)
return generate_data(missing, datatype, ntk=(131, 4, 3), rng=rng)
def METHOD_NAME(data):
PanelOLS(data.y, data.x).fit()
PanelOLS(data.y, data.x, entity_effects=True).fit()
PanelOLS(data.y, data.x, time_effects=True).fit()
def test_valid_weight_shape(data):
# Same size
n = np.prod(data.y.shape)
weights = 1 + np.random.random_sample(n)
mod = PanelOLS(data.y, data.x, weights=weights)
mod.fit()
w = mod.weights.values2d
missing = PanelData(data.y).isnull | PanelData(data.x).isnull
expected = weights[~missing.squeeze()][:, None]
expected = expected / expected.mean()
assert w == pytest.approx(expected)
# Per time
if isinstance(data.x, pd.DataFrame):
n = len(data.y.index.levels[1])
k = len(data.y.index.levels[0])
elif isinstance(data.x, np.ndarray):
n = data.y.shape[0]
k = data.y.shape[1]
else:
n = data.y.shape[1]
k = data.y.shape[2]
weights = 1 + np.random.random_sample(n)
mod = PanelOLS(data.y, data.x, weights=weights)
mod.fit()
w = mod.weights.values2d
expected = weights[:, None] @ np.ones((1, k))
expected = expected.T.ravel()
expected = expected[~missing.squeeze()][:, None]
expected = expected / expected.mean()
assert w == pytest.approx(expected)
# Per entity
if isinstance(data.x, pd.DataFrame):
n = len(data.y.index.levels[0])
k = len(data.y.index.levels[1])
elif isinstance(data.x, np.ndarray):
n = data.y.shape[1]
k = data.y.shape[0]
else:
n = data.y.shape[2]
k = data.y.shape[1]
weights = 1 + np.random.random_sample(n)
mod = PanelOLS(data.y, data.x, weights=weights)
mod.fit()
w = mod.weights.values2d
expected = np.ones((k, 1)) @ weights[None, :]
expected = expected.T.ravel()
expected = expected[~missing.squeeze()][:, None]
expected = expected / expected.mean()
assert w == pytest.approx(expected)
weights = 1 + np.random.random_sample(data.y.shape)
mod = PanelOLS(data.y, data.x, weights=weights)
mod.fit()
w = mod.weights.values2d
expected = weights.T.ravel()
expected = expected[~missing.squeeze()][:, None]
expected = expected / expected.mean()
assert w == pytest.approx(expected)
def test_weight_incorrect_shape(data):
weights = np.ones(np.prod(data.y.shape) - 1)
with pytest.raises(ValueError):
PanelOLS(data.y, data.x, weights=weights)
weights = np.ones((data.y.shape[0], data.y.shape[1] - 1))
with pytest.raises(ValueError):
PanelOLS(data.y, data.x, weights=weights)
def test_invalid_weight_values(data):
w = PanelData(data.w)
w.dataframe.iloc[::13, :] = 0.0
with pytest.raises(ValueError):
PanelOLS(data.y, data.x, weights=w)
w = PanelData(data.w)
w.dataframe.iloc[::13, :] = -0.0
with pytest.raises(ValueError):
PanelOLS(data.y, data.x, weights=w)
w = PanelData(data.w)
w.dataframe.iloc[::29, :] = -1.0
with pytest.raises(ValueError):
PanelOLS(data.y, data.x, weights=w)
def test_panel_lsdv(data):
mod = PanelOLS(data.y, data.x, entity_effects=True)
y, x = mod.dependent.dataframe, mod.exog.dataframe
res = mod.fit()
expected = lsdv(y, x, has_const=False, entity=True)
assert_allclose(res.params.squeeze(), expected)
mod = PanelOLS(data.y, data.x, time_effects=True)
res = mod.fit()
expected = lsdv(y, x, has_const=False, time=True)
assert_allclose(res.params.squeeze(), expected)
mod = PanelOLS(data.y, data.x, entity_effects=True, time_effects=True)
res = mod.fit()
expected = lsdv(y, x, has_const=False, entity=True, time=True)
assert_allclose(res.params.squeeze(), expected, rtol=1e-4)
other = y.copy()
other.iloc[:, :] = 0
other = other.astype(np.int64)
skip = other.shape[0] // 3
for i in range(skip):
other.iloc[i::skip] = i
mod = PanelOLS(y, x, other_effects=other)
res = mod.fit()
expected = lsdv(y, x, has_const=False, general=other.iloc[:, 0].values)
assert_allclose(res.params.squeeze(), expected, rtol=1e-4)
def test_incorrect_weight_shape(data):
w = data.w
if isinstance(w, pd.DataFrame):
entities = w.index.levels[0][:4]
w = w.loc[pd.IndexSlice[entities[0] : entities[-1]], :]
elif isinstance(w, np.ndarray):
w = w[:3]
w = w[None, :, :]
else: # xarray
return
with pytest.raises(ValueError):
PanelOLS(data.y, data.x, weights=w)
def test_weight_ambiguity(data):
if isinstance(data.x, pd.DataFrame):
t = len(data.y.index.levels[1])
entities = data.x.index.levels[0]
index_slice = pd.IndexSlice[entities[0] : entities[t - 1]]
x = data.x.loc[index_slice, :]
else:
t = data.x.shape[1]
x = data.x[:, :, :t]
y = data.y
weights = 1 + np.random.random_sample(t)
with pytest.raises(AmbiguityError):
PanelOLS(y, x, weights=weights)
@pytest.mark.parametrize("intercept", [True, False])
def test_absorbing_effect(data, intercept):
x = data.x.copy()
if isinstance(data.x, pd.DataFrame):
nentity = len(x.index.levels[0])
ntime = len(x.index.levels[1])
temp = data.x.iloc[:, 0].copy()
temp.loc[:] = 1.0
temp.iloc[: (ntime * (nentity // 2))] = 0
if intercept:
x["Intercept"] = 1.0
x["absorbed"] = temp
else:
intercept_vals = np.ones((1, x.shape[1], x.shape[2]))
absorbed = np.ones((1, x.shape[1], x.shape[2]))
absorbed[:, :, : x.shape[2] // 2] = 0
if intercept:
extra = [x, intercept_vals, absorbed]
else:
extra = [x, absorbed]
x = np.concatenate(extra, 0)
with pytest.raises(AbsorbingEffectError) as exc_info:
mod = PanelOLS(data.y, x, entity_effects=True)
mod.fit()
var_names = mod.exog.vars
assert var_names[3] in str(exc_info.value)
assert (" " * (2 - intercept) + var_names[-1]) in str(exc_info.value)
@pytest.mark.filterwarnings("ignore::DeprecationWarning")
def test_all_missing(data):
y = PanelData(data.y)
x = PanelData(data.x)
missing = y.isnull | x.isnull
y.drop(missing)
x.drop(missing)
import warnings
with warnings.catch_warnings(record=True) as w:
PanelOLS(y.dataframe, x.dataframe).fit()
assert len(w) == 0
def test_pickle(data):
mod = PanelOLS(data.y, data.x, entity_effects=True, time_effects=True)
remod = pickle.loads(pickle.dumps(mod))
res = mod.fit()
reres = remod.fit()
rereres = pickle.loads(pickle.dumps(res))
assert_allclose(res.params, reres.params)
assert_allclose(res.params, rereres.params)
assert_allclose(res.cov, reres.cov)
assert_allclose(res.cov, rereres.cov)
assert isinstance(res.f_statistic_robust, WaldTestStatistic)
assert isinstance(reres.f_statistic_robust, WaldTestStatistic)
assert isinstance(res.f_statistic_robust, WaldTestStatistic) |
collect gaussian | # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import paddle
import numpy as np
import math
from paddle.framework import ParamAttr
from paddle.nn import Layer
from paddle.nn.initializer import Constant
from paddle.utils import unique_name
from paddle.quantization.factory import QuanterFactory
from .lsq_func import LsqFunc, round
from .base_fake_quanter import BaseFakeQuanterLayer
class WeightLSQplusQuanter(QuanterFactory):
r"""
Weight quantizer. More details can be found in
https://arxiv.org/pdf/1902.08153.pdf and https://arxiv.org/pdf/2004.09576.pdf.
Args:
per_channel(bool): Whether layer-wise or channel-wise quantization, where True for layer-wise quantization and False for channel-wise quantization.
batch_init(int): Number of batches that collect Gaussian approximation for the weight distribution in each layer.
quant_linear(bool): whether the weight is from Linear.
dtype(str): Trainable data type.
name(str): The name of the layer.
reduce_type(str): The reduce type which is needed when parallel training.
Examples:
.. code-block:: python
from paddle.quantization import QuantConfig
from paddle.quantization.quanters import ActLSQplusQuanter, WeightLSQplusQuanter
weight_quanter = WeightLSQplusQuanter()
act_quanter = ActLSQplusQuanter()
q_config = QuantConfig(activation=act_quanter, weight=weight_quanter)
"""
def __init__(self,
quant_bits=8,
sign=True,
symmetric=True,
per_channel=False,
batch_init=20,
quant_linear=False,
channel_num=None,
reduce_type=None,
dtype='float32',
name=None):
super(WeightLSQplusQuanter, self).__init__(
quant_bits=quant_bits,
sign=sign,
symmetric=symmetric,
per_channel=per_channel,
batch_init=batch_init,
quant_linear=quant_linear,
channel_num=channel_num,
reduce_type=reduce_type,
dtype=dtype,
name=name)
def _get_class(self):
return WeightLSQplusQuanterLayer
class WeightLSQplusQuanterLayer(BaseFakeQuanterLayer):
def __init__(self,
layer,
quant_bits=8,
sign=True,
symmetric=True,
per_channel=False,
all_postive=False,
batch_init=20,
quant_linear=False,
channel_num=None,
reduce_type=None,
dtype='float32',
name=None):
super(WeightLSQplusQuanterLayer, self).__init__()
self._per_channel = per_channel
self._quant_linear = quant_linear
self._batch_init = batch_init
self._name = name
self._quant_axis = 1 if quant_linear else 0
self._collect_axis = 0 if quant_linear else 1
self._reduce_type = reduce_type
self.div = 2**self._quant_bits - 1
self.qmin, self.qmax = self.qmin_qmax
self._current_batch_id = 0
self._init_state = 0
scale_prefix = ("{}.scale".format(name)
if name else 'quant_dequant.scale')
self._scale_name = unique_name.generate(scale_prefix)
s_attr = ParamAttr(
name=self._scale_name, initializer=Constant(1.0), trainable=True)
channel_num = layer.weight.shape[
self._quant_axis] if self._per_channel else 1
self._scale = self.create_parameter(
shape=[channel_num], attr=s_attr, dtype=dtype)
self._scale.stop_gradient = False
def init_params(self, weight):
self.g = paddle.to_tensor(1.0 / math.sqrt(weight.numel() * self.qmax))
if self._per_channel:
weight_tmp = weight.detach().reshape((weight.shape[0], -1))
mean = paddle.mean(weight_tmp, axis=self._collect_axis)
std = paddle.std(weight_tmp, axis=self._collect_axis)
s = paddle.max(
paddle.stack(
[paddle.abs(mean - 3 * std),
paddle.abs(mean + 3 * std)]),
axis=0, )
self._scale.set_value(s / self.div)
else:
mean = paddle.mean(weight.detach())
std = paddle.std(weight.detach())
self._scale.set_value(
max([paddle.abs(mean - 3 * std),
paddle.abs(mean + 3 * std)]) / self.div)
self._init_state += 1
def METHOD_NAME(self, weight):
if self._per_channel:
weight_tmp = weight.detach().reshape((weight.shape[0], -1))
mean = paddle.mean(weight_tmp, axis=self._collect_axis)
std = paddle.std(weight_tmp, axis=self._collect_axis)
s = paddle.max(
paddle.stack(
[paddle.abs(mean - 3 * std),
paddle.abs(mean + 3 * std)]),
axis=0, )
self._scale.set_value(s * 0.9 + 0.1 * s / self.div)
else:
mean = paddle.mean(weight.detach())
std = paddle.std(weight.detach())
self._scale.set_value(self._scale * 0.9 + 0.1 * max(
[paddle.abs(mean - 3 * std),
paddle.abs(mean + 3 * std)]) / self.div)
self._init_state += 1
def forward(self, weight):
if self._reduce_type == "max":
paddle.distributed.all_reduce(
self._scale, op=paddle.distributed.ReduceOp.MAX)
if self._init_state == 0:
self.init_params(weight)
elif self._init_state < self._batch_init:
self.METHOD_NAME(weight)
weight.stop_gradient = False
w_q = LsqFunc.apply(
weight,
self._scale,
self.g,
self.qmin,
self.qmax,
self._per_channel,
self._quant_axis, )
return w_q
def bit_length(self):
""" Return the bit length of quantized data.
"""
return self._quant_bits
def quant_axis(self):
""" Return quantization axis.
"""
return self._quant_axis
def scales(self):
""" Return output scales.
"""
return self._scale
def zero_points(self):
""" Return output zero points.
"""
if self._zero_point is None:
if self._symmetric:
if self._sign:
self._zero_point = 0
else:
self._zero_point = (self.qmax + self.qmin) / 2
else:
self._zero_point = self.qmin - round(self.qmin / self._scale)
self._zero_point = paddle.clip(self._zero_point, self.qmin,
self.qmax)
return self._zero_point |