label
stringlengths 1
61
| code
stringlengths 4k
8k
|
---|---|
build schema on 200 201 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"network perimeter profile create",
)
class Create(AAZCommand):
"""Creates or updates a network profile.
:example: Create NSP Profile
az network perimeter profile create -n MyProfile --perimeter-name MyPerimeter -g MyResourceGroup
"""
_aaz_info = {
"version": "2021-02-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.network/networksecurityperimeters/{}/profiles/{}", "2021-02-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.perimeter_name = AAZStrArg(
options=["--perimeter-name"],
help="The name of the network security perimeter.",
required=True,
)
_args_schema.profile_name = AAZStrArg(
options=["-n", "--name", "--profile-name"],
help="The name of the NSP profile.",
required=True,
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
# define Arg Group "Parameters"
_args_schema = cls._args_schema
_args_schema.location = AAZResourceLocationArg(
arg_group="Parameters",
help="Resource location.",
fmt=AAZResourceLocationArgFormat(
resource_group_arg="resource_group",
),
)
_args_schema.tags = AAZDictArg(
options=["--tags"],
arg_group="Parameters",
help="Resource tags.",
)
tags = cls._args_schema.tags
tags.Element = AAZStrArg()
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.NspProfilesCreateOrUpdate(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class NspProfilesCreateOrUpdate(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200, 201]:
return self.on_200_201(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityPerimeters/{networkSecurityPerimeterName}/profiles/{profileName}",
**self.url_parameters
)
@property
def method(self):
return "PUT"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"networkSecurityPerimeterName", self.ctx.args.perimeter_name,
required=True,
),
**self.serialize_url_param(
"profileName", self.ctx.args.profile_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-02-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("location", AAZStrType, ".location")
_builder.set_prop("name", AAZStrType, ".profile_name")
_builder.set_prop("tags", AAZDictType, ".tags")
tags = _builder.get(".tags")
if tags is not None:
tags.set_elements(AAZStrType, ".")
return self.serialize_content(_content_value)
def on_200_201(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self.METHOD_NAME
)
_schema_on_200_201 = None
@classmethod
def METHOD_NAME(cls):
if cls._schema_on_200_201 is not None:
return cls._schema_on_200_201
cls._schema_on_200_201 = AAZObjectType()
_schema_on_200_201 = cls._schema_on_200_201
_schema_on_200_201.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200_201.location = AAZStrType()
_schema_on_200_201.name = AAZStrType()
_schema_on_200_201.properties = AAZObjectType()
_schema_on_200_201.tags = AAZDictType()
_schema_on_200_201.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200_201.properties
properties.access_rules_version = AAZStrType(
serialized_name="accessRulesVersion",
flags={"read_only": True},
)
properties.diagnostic_settings_version = AAZStrType(
serialized_name="diagnosticSettingsVersion",
flags={"read_only": True},
)
tags = cls._schema_on_200_201.tags
tags.Element = AAZStrType()
return cls._schema_on_200_201
class _CreateHelper:
"""Helper class for Create"""
__all__ = ["Create"] |
test rows created event type | import pytest
from baserow.contrib.database.fields.handler import FieldHandler
from baserow.contrib.database.rows.handler import RowHandler
from baserow.contrib.database.webhooks.registries import webhook_event_type_registry
from baserow.contrib.database.ws.rows.signals import before_rows_update
@pytest.mark.django_db()
def METHOD_NAME(data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
field = data_fixture.create_text_field(table=table, primary=True, name="Test 1")
model = table.get_model()
row = model.objects.create()
webhook = data_fixture.create_table_webhook(
table=table,
request_method="POST",
url="http://localhost",
use_user_field_names=False,
)
payload = webhook_event_type_registry.get("rows.created").get_payload(
event_id="1", webhook=webhook, model=model, table=table, rows=[row]
)
assert payload == {
"table_id": table.id,
"event_id": "1",
"event_type": "rows.created",
"items": [
{
"id": 1,
"order": "1.00000000000000000000",
f"field_{field.id}": None,
}
],
}
webhook.use_user_field_names = True
webhook.save()
payload = webhook_event_type_registry.get("rows.created").get_payload(
event_id="1", webhook=webhook, model=model, table=table, rows=[row]
)
assert payload == {
"table_id": table.id,
"event_id": "1",
"event_type": "rows.created",
"items": [
{
"id": 1,
"order": "1.00000000000000000000",
"Test 1": None,
}
],
}
@pytest.mark.django_db()
def test_rows_updated_event_type(data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
table_2 = data_fixture.create_database_table(database=table.database)
text_field = data_fixture.create_text_field(
table=table, primary=True, name="Test 1"
)
table_2_primary_field = data_fixture.create_text_field(
table=table_2, name="Primary Field", primary=True
)
link_row_field = FieldHandler().create_field(
user=user,
table=table,
type_name="link_row",
name="Link",
link_row_table=table_2,
)
lookup_model = table_2.get_model()
i1 = lookup_model.objects.create(
**{f"field_{table_2_primary_field.id}": "Lookup 1"}
)
grid = data_fixture.create_grid_view(table=table)
data_fixture.create_grid_view_field_option(grid, link_row_field, hidden=False)
model = table.get_model()
row = model.objects.create(**{f"field_{text_field.id}": "Old Test value"})
getattr(row, f"field_{link_row_field.id}").add(i1.id)
before_return = {
before_rows_update: before_rows_update(
rows=[row],
model=model,
table=table,
sender=None,
user=None,
updated_field_ids=None,
)
}
row = RowHandler().update_row_by_id(
user=user,
table=table,
row_id=row.id,
values={f"field_{text_field.id}": "New Test value"},
)
row.refresh_from_db()
webhook = data_fixture.create_table_webhook(
table=table,
request_method="POST",
url="http://localhost",
use_user_field_names=False,
)
payload = webhook_event_type_registry.get("rows.updated").get_payload(
event_id="1",
webhook=webhook,
model=model,
table=table,
rows=[row],
before_return=before_return,
)
assert payload == {
"table_id": table.id,
"event_id": "1",
"event_type": "rows.updated",
"items": [
{
"id": 1,
"order": "1.00000000000000000000",
f"field_{text_field.id}": "New Test value",
f"field_{link_row_field.id}": [{"id": 1, "value": "Lookup 1"}],
}
],
"old_items": [
{
"id": 1,
"order": "1.00000000000000000000",
f"field_{text_field.id}": "Old Test value",
f"field_{link_row_field.id}": [{"id": 1, "value": "Lookup 1"}],
}
],
}
webhook.use_user_field_names = True
webhook.save()
payload = webhook_event_type_registry.get("rows.updated").get_payload(
event_id="1",
webhook=webhook,
model=model,
table=table,
rows=[row],
before_return=before_return,
)
assert payload == {
"table_id": table.id,
"event_id": "1",
"event_type": "rows.updated",
"items": [
{
"id": 1,
"order": "1.00000000000000000000",
f"{text_field.name}": "New Test value",
f"{link_row_field.name}": [{"id": 1, "value": "Lookup 1"}],
}
],
"old_items": [
{
"id": 1,
"order": "1.00000000000000000000",
f"{text_field.name}": "Old Test value",
f"{link_row_field.name}": [{"id": 1, "value": "Lookup 1"}],
}
],
}
@pytest.mark.django_db()
def test_rows_deleted_event_type(data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
data_fixture.create_text_field(table=table, primary=True, name="Test 1")
model = table.get_model()
row = model.objects.create()
webhook = data_fixture.create_table_webhook(
table=table,
request_method="POST",
url="http://localhost",
use_user_field_names=False,
)
payload = webhook_event_type_registry.get("rows.deleted").get_payload(
event_id="1",
webhook=webhook,
model=model,
table=table,
rows=[row],
)
assert payload == {
"table_id": table.id,
"event_id": "1",
"event_type": "rows.deleted",
"row_ids": [row.id],
} |
from data | import warnings
from django.utils.functional import cached_property
from .utils import OrderBy, OrderByTuple, segment
class TableData:
"""
Base class for table data containers.
"""
def __init__(self, data):
self.data = data
def __getitem__(self, key):
"""
Slicing returns a new `.TableData` instance, indexing returns a single record.
"""
return self.data[key]
def __iter__(self):
"""
for ... in ... default to using this. There's a bug in Django 1.3
with indexing into QuerySets, so this side-steps that problem (as well
as just being a better way to iterate).
"""
return iter(self.data)
def set_table(self, table):
"""
`Table.__init__` calls this method to inject an instance of itself into the
`TableData` instance.
Good place to do additional checks if Table and TableData instance will work
together properly.
"""
self.table = table
@property
def model(self):
return getattr(self.data, "model", None)
@property
def ordering(self):
return None
@property
def verbose_name(self):
return "item"
@property
def verbose_name_plural(self):
return "items"
@staticmethod
def METHOD_NAME(data):
# allow explicit child classes of TableData to be passed to Table()
if isinstance(data, TableData):
return data
if TableQuerysetData.validate(data):
return TableQuerysetData(data)
elif TableListData.validate(data):
return TableListData(list(data))
raise ValueError(
"data must be QuerySet-like (have count() and order_by()) or support"
f" list(data) -- {type(data).__name__} has neither"
)
class TableListData(TableData):
"""
Table data container for a list of dicts, for example::
[
{'name': 'John', 'age': 20},
{'name': 'Brian', 'age': 25}
]
.. note::
Other structures might have worked in the past, but are not explicitly
supported or tested.
"""
@staticmethod
def validate(data):
"""
Validates `data` for use in this container
"""
return hasattr(data, "__iter__") or (
hasattr(data, "__len__") and hasattr(data, "__getitem__")
)
def __len__(self):
return len(self.data)
@property
def verbose_name(self):
return getattr(self.data, "verbose_name", super().verbose_name)
@property
def verbose_name_plural(self):
return getattr(self.data, "verbose_name_plural", super().verbose_name_plural)
def order_by(self, aliases):
"""
Order the data based on order by aliases (prefixed column names) in the
table.
Arguments:
aliases (`~.utils.OrderByTuple`): optionally prefixed names of
columns ('-' indicates descending order) in order of
significance with regard to data ordering.
"""
accessors = []
for alias in aliases:
bound_column = self.table.columns[OrderBy(alias).bare]
# bound_column.order_by reflects the current ordering applied to
# the table. As such we need to check the current ordering on the
# column and use the opposite if it doesn't match the alias prefix.
if alias[0] != bound_column.order_by_alias[0]:
accessors += bound_column.order_by.opposite
else:
accessors += bound_column.order_by
self.data.sort(key=OrderByTuple(accessors).key)
class TableQuerysetData(TableData):
"""
Table data container for a queryset.
"""
@staticmethod
def validate(data):
"""
Validates `data` for use in this container
"""
return (
hasattr(data, "count")
and callable(data.count)
and hasattr(data, "order_by")
and callable(data.order_by)
)
def __len__(self):
"""Cached data length"""
if not hasattr(self, "_length") or self._length is None:
if hasattr(self.table, "paginator"):
# for paginated tables, use QuerySet.count() as we are interested in total number of records.
self._length = self.data.count()
else:
# for non-paginated tables, use the length of the QuerySet
self._length = len(self.data)
return self._length
def set_table(self, table):
super().set_table(table)
if self.model and getattr(table._meta, "model", None) and self.model != table._meta.model:
warnings.warn(
f"Table data is of type {self.model} but {table._meta.model} is specified in Table.Meta.model"
)
@property
def ordering(self):
"""
Returns the list of order by aliases that are enforcing ordering on the
data.
If the data is unordered, an empty sequence is returned. If the
ordering can not be determined, `None` is returned.
This works by inspecting the actual underlying data. As such it's only
supported for querysets.
"""
aliases = {}
for bound_column in self.table.columns:
aliases[bound_column.order_by_alias] = bound_column.order_by
try:
return next(segment(self.data.query.order_by, aliases))
except StopIteration:
pass
def order_by(self, aliases):
"""
Order the data based on order by aliases (prefixed column names) in the
table.
Arguments:
aliases (`~.utils.OrderByTuple`): optionally prefixed names of
columns ('-' indicates descending order) in order of
significance with regard to data ordering.
"""
modified_any = False
accessors = []
for alias in aliases:
bound_column = self.table.columns[OrderBy(alias).bare]
# bound_column.order_by reflects the current ordering applied to
# the table. As such we need to check the current ordering on the
# column and use the opposite if it doesn't match the alias prefix.
if alias[0] != bound_column.order_by_alias[0]:
accessors += bound_column.order_by.opposite
else:
accessors += bound_column.order_by
if bound_column:
queryset, modified = bound_column.order(self.data, alias[0] == "-")
if modified:
self.data = queryset
modified_any = True
# custom ordering
if modified_any:
return
# Traditional ordering
if accessors:
order_by_accessors = (a.for_queryset() for a in accessors)
self.data = self.data.order_by(*order_by_accessors)
@cached_property
def verbose_name(self):
"""
The full (singular) name for the data.
Model's `~django.db.Model.Meta.verbose_name` is honored.
"""
return self.data.model._meta.verbose_name
@cached_property
def verbose_name_plural(self):
"""
The full (plural) name for the data.
Model's `~django.db.Model.Meta.verbose_name` is honored.
"""
return self.data.model._meta.verbose_name_plural |
exec scan | # -*- coding: utf-8 -*-
# SPDX-FileCopyrightText: 2014-2023 Greenbone AG
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=unused-argument
# pylint: disable=disallowed-name
import time
from unittest.mock import Mock
from xml.etree import ElementTree as et
from ospd.ospd import OSPDaemon
def assert_called_once(mock: Mock):
if hasattr(mock, 'assert_called_once'):
return mock.assert_called_once()
if not mock.call_count == 1:
# pylint: disable=protected-access
msg = (
f"Expected '{mock._mock_name or 'mock'}' to have "
f"been called once. Called {mock.call_count} "
f"times.{mock._calls_repr()}"
)
raise AssertionError(msg)
def assert_called(mock: Mock):
"""assert that the mock was called at least once"""
if mock.call_count == 0:
# pylint: disable=protected-access
msg = f"Expected '{mock._mock_name or 'mock'}' to have been called."
raise AssertionError(msg)
class FakePsutil:
def __init__(self, available=None):
self.available = available
class FakeStream:
def __init__(self, return_value=True):
self.response = b''
self.return_value = return_value
def write(self, data):
self.response = self.response + data
return self.return_value
def get_response(self):
return et.fromstring(self.response)
class FakeDataManager:
def __init__(self):
pass
def dict(self):
return dict()
def __enter__(self):
pass
def __exit__(self, foo=None, bar=None, bar1=None, foo1=None):
pass
class DummyXML:
@staticmethod
def get_custom_vt_as_xml_str(vt_id, custom):
return '<custom><mytest>static test</mytest></custom>'
@staticmethod
def get_params_vt_as_xml_str(vt_id, vt_params):
return (
'<params><param id="abc" type="string">'
'<name>ABC</name><description>Test ABC</description>'
'<default>yes</default></param>'
'<param id="def" type="string">'
'<name>DEF</name><description>Test DEF</description>'
'<default>no</default></param></params>'
)
@staticmethod
def get_refs_vt_as_xml_str(vt_id, vt_refs):
response = (
'<refs><ref type="cve" id="CVE-2010-4480"/>'
'<ref type="url" id="http://example.com"/></refs>'
)
return response
@staticmethod
def get_dependencies_vt_as_xml_str(vt_id, vt_dependencies):
response = (
'<dependencies>'
'<dependency vt_id="1.3.6.1.4.1.25623.1.0.50282" />'
'<dependency vt_id="1.3.6.1.4.1.25623.1.0.50283" />'
'</dependencies>'
)
return response
@staticmethod
def get_severities_vt_as_xml_str(vt_id, severities):
response = (
'<severities><severity cvss_base="5.0" cvss_'
'type="cvss_base_v2">AV:N/AC:L/Au:N/C:N/I:N/'
'A:P</severity></severities>'
)
return response
@staticmethod
def get_detection_vt_as_xml_str(
vt_id, detection=None, qod_type=None, qod=None
):
response = '<detection qod_type="package">some detection</detection>'
return response
@staticmethod
def get_summary_vt_as_xml_str(vt_id, summary):
response = '<summary>Some summary</summary>'
return response
@staticmethod
def get_affected_vt_as_xml_str(vt_id, affected):
response = '<affected>Some affected</affected>'
return response
@staticmethod
def get_impact_vt_as_xml_str(vt_id, impact):
response = '<impact>Some impact</impact>'
return response
@staticmethod
def get_insight_vt_as_xml_str(vt_id, insight):
response = '<insight>Some insight</insight>'
return response
@staticmethod
def get_solution_vt_as_xml_str(
vt_id, solution, solution_type=None, solution_method=None
):
response = '<solution>Some solution</solution>'
return response
@staticmethod
def get_creation_time_vt_as_xml_str(
vt_id, vt_creation_time
): # pylint: disable=arguments-differ
response = f'<creation_time>{vt_creation_time}</creation_time>'
return response
@staticmethod
def get_modification_time_vt_as_xml_str(
vt_id, vt_modification_time
): # pylint: disable=arguments-differ
response = (
f'<modification_time>{vt_modification_time}</modification_time>'
)
return response
class DummyWrapper(OSPDaemon):
def __init__(self, results, checkresult=True):
super().__init__()
self.checkresult = checkresult
self.results = results
self.initialized = True
self.scan_collection.data_manager = FakeDataManager()
self.scan_collection.file_storage_dir = '/tmp'
self.scan_collection.scan_collection_lock = FakeDataManager()
def check(self):
return self.checkresult
def METHOD_NAME(self, scan_id):
time.sleep(0.01)
for res in self.results:
if res.result_type == 'log':
self.add_scan_log(
scan_id,
res.host,
res.hostname,
res.name,
res.value,
res.port,
)
if res.result_type == 'error':
self.add_scan_error(
scan_id,
res.host,
res.hostname,
res.name,
res.value,
res.port,
)
elif res.result_type == 'host-detail':
self.add_scan_host_detail(
scan_id,
res.host,
res.hostname,
res.name,
res.value,
)
elif res.result_type == 'alarm':
self.add_scan_alarm(
scan_id,
res.host,
res.hostname,
res.name,
res.value,
res.port,
res.test_id,
res.severity,
res.qod,
)
else:
raise ValueError(res.result_type) |
test popen initializer | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test PopenPoolExecutor."""
import pytest
import os
import psutil
import time
from tvm.contrib.popen_pool import PopenWorker, PopenPoolExecutor
from tvm.testing import (
identity_after,
terminate_self,
initializer,
after_initializer,
register_ffi,
call_py_ffi,
call_cpp_ffi,
call_cpp_py_ffi,
fast_summation,
slow_summation,
timeout_job,
)
def test_popen_worker():
proc = PopenWorker()
with pytest.raises(TimeoutError):
proc.send(identity_after, [1, 100], timeout=0.01)
proc.recv()
with pytest.raises(ChildProcessError):
proc.send(terminate_self)
proc.recv()
proc.send(identity_after, [2, 0])
assert proc.recv() == 2
proc.send(identity_after, [4, 0.0001])
assert proc.recv() == 4
def test_popen_worker_reuses():
proc = PopenWorker(maximum_uses=None)
proc.send(os.getpid)
initial_pid = proc.recv()
proc.send(os.getpid)
assert proc.recv() == initial_pid
def test_popen_worker_recycles():
proc = PopenWorker(maximum_uses=2)
proc.send(os.getpid)
initial_pid = proc.recv()
assert psutil.pid_exists(initial_pid)
proc.send(os.getpid)
assert proc.recv() == initial_pid
assert psutil.pid_exists(initial_pid)
proc.send(os.getpid)
assert proc.recv() != initial_pid
assert not psutil.pid_exists(initial_pid)
def test_popen_pool_executor():
import tvm
pool = PopenPoolExecutor(max_workers=2, timeout=0.01)
value1 = pool.submit(identity_after, 1, 100)
value2 = pool.submit(terminate_self)
value3 = pool.submit(identity_after, 3, 0)
value4 = pool.submit(tvm.runtime.String, "xyz")
with pytest.raises(TimeoutError):
value1.result()
with pytest.raises(ChildProcessError):
value2.result()
assert value3.result() == 3
value = value4.result()
assert isinstance(value, tvm.runtime.String)
assert value == "xyz"
pool = PopenPoolExecutor(max_workers=4, timeout=None)
values = pool.map_with_error_catching(lambda x: x, range(100))
for idx, val in enumerate(values):
assert val.value == idx
def METHOD_NAME():
initargs = [1, 2, 3]
proc = PopenWorker(initializer=initializer, initargs=initargs)
proc.send(after_initializer)
test_global_state_1, test_global_state_2, test_global_state_3 = proc.recv()
assert test_global_state_1 == initargs[0]
assert test_global_state_2 == initargs[1]
assert test_global_state_3 == initargs[2]
def test_popen_worker_recycles_with_initializer():
initargs = [1, 2, 3]
proc = PopenWorker(initializer=initializer, initargs=initargs, maximum_uses=3)
proc.send(os.getpid)
initial_pid = proc.recv()
proc.send(after_initializer)
assert list(proc.recv()) == initargs
proc.send(os.getpid)
assert proc.recv() == initial_pid
# The process should be recycled with this send.
proc.send(os.getpid)
assert proc.recv() != initial_pid
# But the initializer should've run this time as well.
proc.send(after_initializer)
assert list(proc.recv()) == initargs
def test_popen_ffi():
proc = PopenWorker(register_ffi)
# call python function via ffi
initargs = [0]
proc.send(call_py_ffi, initargs)
assert proc.recv() == initargs[0]
# call cpp function via ffi
initargs = [1]
proc.send(call_cpp_ffi, initargs)
assert proc.recv() == initargs[0]
# call python function from cpp function via ffi
initargs = [2]
proc.send(call_cpp_py_ffi, initargs)
assert proc.recv() == initargs[0]
def test_popen_pool_executor_timeout():
timeout = 0.5
pool = PopenPoolExecutor(timeout=timeout)
f1 = pool.submit(timeout_job, timeout)
while not f1.done():
pass
try:
res = f1.result()
except Exception as ex:
assert isinstance(ex, TimeoutError)
def test_popen_pool_executor_recycles():
pool = PopenPoolExecutor(max_workers=1, timeout=None, maximum_process_uses=2)
initial_pid = pool.submit(os.getpid).result()
assert initial_pid == pool.submit(os.getpid).result()
assert initial_pid != pool.submit(os.getpid).result()
if __name__ == "__main__":
test_popen_worker()
test_popen_worker_recycles()
test_popen_pool_executor()
METHOD_NAME()
test_popen_worker_recycles_with_initializer()
test_popen_ffi()
test_popen_pool_executor_timeout()
test_popen_pool_executor_recycles() |
start | #!/usr/bin/env python
# This program is dedicated to the public domain under the CC0 license.
# pylint: disable=import-error,unused-argument
"""
Simple example of a bot that uses a custom webhook setup and handles custom updates.
For the custom webhook setup, the libraries `flask`, `asgiref` and `uvicorn` are used. Please
install them as `pip install flask[async]~=2.3.2 uvicorn~=0.23.2 asgiref~=3.7.2`.
Note that any other `asyncio` based web server framework can be used for a custom webhook setup
just as well.
Usage:
Set bot Token, URL, admin CHAT_ID and PORT after the imports.
You may also need to change the `listen` value in the uvicorn configuration to match your setup.
Press Ctrl-C on the command line or send a signal to the process to stop the bot.
"""
import asyncio
import html
import logging
from dataclasses import dataclass
from http import HTTPStatus
import uvicorn
from asgiref.wsgi import WsgiToAsgi
from flask import Flask, Response, abort, make_response, request
from telegram import Update
from telegram.constants import ParseMode
from telegram.ext import (
Application,
CallbackContext,
CommandHandler,
ContextTypes,
ExtBot,
TypeHandler,
)
# Enable logging
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", level=logging.INFO
)
# set higher logging level for httpx to avoid all GET and POST requests being logged
logging.getLogger("httpx").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
# Define configuration constants
URL = "https://domain.tld"
ADMIN_CHAT_ID = 123456
PORT = 8000
TOKEN = "123:ABC" # nosec B105
@dataclass
class WebhookUpdate:
"""Simple dataclass to wrap a custom update type"""
user_id: int
payload: str
class CustomContext(CallbackContext[ExtBot, dict, dict, dict]):
"""
Custom CallbackContext class that makes `user_data` available for updates of type
`WebhookUpdate`.
"""
@classmethod
def from_update(
cls,
update: object,
application: "Application",
) -> "CustomContext":
if isinstance(update, WebhookUpdate):
return cls(application=application, user_id=update.user_id)
return super().from_update(update, application)
async def METHOD_NAME(update: Update, context: CustomContext) -> None:
"""Display a message with instructions on how to use this bot."""
payload_url = html.escape(f"{URL}/submitpayload?user_id=<your user id>&payload=<payload>")
text = (
f"To check if the bot is still running, call <code>{URL}/healthcheck</code>.\n\n"
f"To post a custom update, call <code>{payload_url}</code>."
)
await update.message.reply_html(text=text)
async def webhook_update(update: WebhookUpdate, context: CustomContext) -> None:
"""Handle custom updates."""
chat_member = await context.bot.get_chat_member(chat_id=update.user_id, user_id=update.user_id)
payloads = context.user_data.setdefault("payloads", [])
payloads.append(update.payload)
combined_payloads = "</code>\n• <code>".join(payloads)
text = (
f"The user {chat_member.user.mention_html()} has sent a new payload. "
f"So far they have sent the following payloads: \n\n• <code>{combined_payloads}</code>"
)
await context.bot.send_message(chat_id=ADMIN_CHAT_ID, text=text, parse_mode=ParseMode.HTML)
async def main() -> None:
"""Set up PTB application and a web application for handling the incoming requests."""
context_types = ContextTypes(context=CustomContext)
# Here we set updater to None because we want our custom webhook server to handle the updates
# and hence we don't need an Updater instance
application = (
Application.builder().token(TOKEN).updater(None).context_types(context_types).build()
)
# register handlers
application.add_handler(CommandHandler("start", METHOD_NAME))
application.add_handler(TypeHandler(type=WebhookUpdate, callback=webhook_update))
# Pass webhook settings to telegram
await application.bot.set_webhook(url=f"{URL}/telegram", allowed_updates=Update.ALL_TYPES)
# Set up webserver
flask_app = Flask(__name__)
@flask_app.post("/telegram") # type: ignore[misc]
async def telegram() -> Response:
"""Handle incoming Telegram updates by putting them into the `update_queue`"""
await application.update_queue.put(Update.de_json(data=request.json, bot=application.bot))
return Response(status=HTTPStatus.OK)
@flask_app.route("/submitpayload", methods=["GET", "POST"]) # type: ignore[misc]
async def custom_updates() -> Response:
"""
Handle incoming webhook updates by also putting them into the `update_queue` if
the required parameters were passed correctly.
"""
try:
user_id = int(request.args["user_id"])
payload = request.args["payload"]
except KeyError:
abort(
HTTPStatus.BAD_REQUEST,
"Please pass both `user_id` and `payload` as query parameters.",
)
except ValueError:
abort(HTTPStatus.BAD_REQUEST, "The `user_id` must be a string!")
await application.update_queue.put(WebhookUpdate(user_id=user_id, payload=payload))
return Response(status=HTTPStatus.OK)
@flask_app.get("/healthcheck") # type: ignore[misc]
async def health() -> Response:
"""For the health endpoint, reply with a simple plain text message."""
response = make_response("The bot is still running fine :)", HTTPStatus.OK)
response.mimetype = "text/plain"
return response
webserver = uvicorn.Server(
config=uvicorn.Config(
app=WsgiToAsgi(flask_app),
port=PORT,
use_colors=False,
host="127.0.0.1",
)
)
# Run application and webserver together
async with application:
await application.METHOD_NAME()
await webserver.serve()
await application.stop()
if __name__ == "__main__":
asyncio.run(main()) |
mobile net v2 x0 75 | # copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
from paddle import ParamAttr
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.nn import Conv2D, BatchNorm, Linear, Dropout
from paddle.nn import AdaptiveAvgPool2D, MaxPool2D, AvgPool2D
from paddleseg.cvlibs import manager
from paddleseg import utils
__all__ = [
"MobileNetV2_x0_25",
"MobileNetV2_x0_5",
"MobileNetV2_x0_75",
"MobileNetV2_x1_0",
"MobileNetV2_x1_5",
"MobileNetV2_x2_0",
]
class MobileNetV2(nn.Layer):
"""
The MobileNetV2 implementation based on PaddlePaddle.
The original article refers to
Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, Liang-Chieh Chen
"MobileNetV2: Inverted Residuals and Linear Bottlenecks"
(https://arxiv.org/abs/1801.04381).
Args:
scale (float, optional): The scale of channel. Default: 1.0
in_channels (int, optional): The channels of input image. Default: 3.
pretrained (str, optional): The path or url of pretrained model. Default: None
"""
def __init__(self, scale=1.0, in_channels=3, pretrained=None):
super().__init__()
self.scale = scale
self.pretrained = pretrained
prefix_name = ""
bottleneck_params_list = [
(1, 16, 1, 1),
(6, 24, 2, 2), # x4
(6, 32, 3, 2), # x8
(6, 64, 4, 2),
(6, 96, 3, 1), # x16
(6, 160, 3, 2),
(6, 320, 1, 1), # x32
]
self.out_index = [1, 2, 4, 6]
self.conv1 = ConvBNLayer(
num_channels=in_channels,
num_filters=int(32 * scale),
filter_size=3,
stride=2,
padding=1,
name=prefix_name + "conv1_1")
self.block_list = []
i = 1
in_c = int(32 * scale)
for layer_setting in bottleneck_params_list:
t, c, n, s = layer_setting
i += 1
block = self.add_sublayer(
prefix_name + "conv" + str(i),
sublayer=InvresiBlocks(
in_c=in_c,
t=t,
c=int(c * scale),
n=n,
s=s,
name=prefix_name + "conv" + str(i)))
self.block_list.append(block)
in_c = int(c * scale)
out_channels = [
bottleneck_params_list[idx][1] for idx in self.out_index
]
self.feat_channels = [int(c * scale) for c in out_channels]
self.init_weight()
def forward(self, inputs):
feat_list = []
y = self.conv1(inputs, if_act=True)
for idx, block in enumerate(self.block_list):
y = block(y)
if idx in self.out_index:
feat_list.append(y)
return feat_list
def init_weight(self):
if self.pretrained is not None:
utils.load_entire_model(self, self.pretrained)
class ConvBNLayer(nn.Layer):
def __init__(self,
num_channels,
filter_size,
num_filters,
stride,
padding,
channels=None,
num_groups=1,
name=None,
use_cudnn=True):
super(ConvBNLayer, self).__init__()
self._conv = Conv2D(
in_channels=num_channels,
out_channels=num_filters,
kernel_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
weight_attr=ParamAttr(name=name + "_weights"),
bias_attr=False)
self._batch_norm = BatchNorm(
num_filters,
param_attr=ParamAttr(name=name + "_bn_scale"),
bias_attr=ParamAttr(name=name + "_bn_offset"),
moving_mean_name=name + "_bn_mean",
moving_variance_name=name + "_bn_variance")
def forward(self, inputs, if_act=True):
y = self._conv(inputs)
y = self._batch_norm(y)
if if_act:
y = F.relu6(y)
return y
class InvertedResidualUnit(nn.Layer):
def __init__(self, num_channels, num_in_filter, num_filters, stride,
filter_size, padding, expansion_factor, name):
super(InvertedResidualUnit, self).__init__()
num_expfilter = int(round(num_in_filter * expansion_factor))
self._expand_conv = ConvBNLayer(
num_channels=num_channels,
num_filters=num_expfilter,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
name=name + "_expand")
self._bottleneck_conv = ConvBNLayer(
num_channels=num_expfilter,
num_filters=num_expfilter,
filter_size=filter_size,
stride=stride,
padding=padding,
num_groups=num_expfilter,
use_cudnn=False,
name=name + "_dwise")
self._linear_conv = ConvBNLayer(
num_channels=num_expfilter,
num_filters=num_filters,
filter_size=1,
stride=1,
padding=0,
num_groups=1,
name=name + "_linear")
def forward(self, inputs, ifshortcut):
y = self._expand_conv(inputs, if_act=True)
y = self._bottleneck_conv(y, if_act=True)
y = self._linear_conv(y, if_act=False)
if ifshortcut:
y = paddle.add(inputs, y)
return y
class InvresiBlocks(nn.Layer):
def __init__(self, in_c, t, c, n, s, name):
super(InvresiBlocks, self).__init__()
self._first_block = InvertedResidualUnit(
num_channels=in_c,
num_in_filter=in_c,
num_filters=c,
stride=s,
filter_size=3,
padding=1,
expansion_factor=t,
name=name + "_1")
self._block_list = []
for i in range(1, n):
block = self.add_sublayer(
name + "_" + str(i + 1),
sublayer=InvertedResidualUnit(
num_channels=c,
num_in_filter=c,
num_filters=c,
stride=1,
filter_size=3,
padding=1,
expansion_factor=t,
name=name + "_" + str(i + 1)))
self._block_list.append(block)
def forward(self, inputs):
y = self._first_block(inputs, ifshortcut=False)
for block in self._block_list:
y = block(y, ifshortcut=True)
return y
@manager.BACKBONES.add_component
def MobileNetV2_x0_25(**kwargs):
model = MobileNetV2(scale=0.25, **kwargs)
return model
@manager.BACKBONES.add_component
def MobileNetV2_x0_5(**kwargs):
model = MobileNetV2(scale=0.5, **kwargs)
return model
@manager.BACKBONES.add_component
def METHOD_NAME(**kwargs):
model = MobileNetV2(scale=0.75, **kwargs)
return model
@manager.BACKBONES.add_component
def MobileNetV2_x1_0(**kwargs):
model = MobileNetV2(scale=1.0, **kwargs)
return model
@manager.BACKBONES.add_component
def MobileNetV2_x1_5(**kwargs):
model = MobileNetV2(scale=1.5, **kwargs)
return model
@manager.BACKBONES.add_component
def MobileNetV2_x2_0(**kwargs):
model = MobileNetV2(scale=2.0, **kwargs)
return model |
has add permission | from django.contrib import admin
from django.db.models import Prefetch
from olympia import amo
from olympia.addons.models import Addon
from olympia.amo.admin import (
AMOModelAdmin,
DateRangeFilter,
MultipleRelatedListFilter,
)
from olympia.translations.utils import truncate_text
from olympia.zadmin.admin import related_single_content_link
from .models import DeniedRatingWord, Rating
class RatingTypeFilter(admin.SimpleListFilter):
# Human-readable title which will be displayed in the
# right admin sidebar just above the filter options.
title = 'Type'
# Parameter for the filter that will be used in the URL query.
parameter_name = 'type'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return (
('rating', 'User Rating'),
('reply', 'Developer/Admin Reply'),
)
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value() == 'rating':
return queryset.filter(reply_to__isnull=True)
elif self.value() == 'reply':
return queryset.filter(reply_to__isnull=False)
return queryset
class AddonFilterForIPSearch(MultipleRelatedListFilter):
"""Filter for `addon`, only available on IP search as it's expensive."""
title = 'By Addon'
parameter_name = 'addon'
def lookups(self, request, model_admin):
# This filter needs to find all addon ids from the main queryset, which
# causes it to be expensive, so only do that if we're doing an IP
# search. If the IP search is cancelled or changed though, the user
# might end up with an add-on filter still active that no longer
# matches, so we support filter values that are already present in the
# querystring, allowing the user to remove them.
lookups = {
int(addon_id): '??'
for addon_id in self._used_parameters.get(self.parameter_name, [])
}
if (
search_term := model_admin.get_search_query(request)
) and model_admin.ip_addresses_and_networks_from_query(search_term):
qs, search_use_distinct = model_admin.get_search_results(
request, model_admin.get_queryset(request), search_term
)
lookups_from_queryset = dict(
qs.values_list('addon', 'addon__guid').distinct().order_by('addon_id')
)
lookups.update(lookups_from_queryset)
return [
(addon_id, f'{addon_id}: {addon_guid}')
for addon_id, addon_guid in lookups.items()
]
def queryset(self, request, queryset):
value = self.value()
if value is None:
return queryset
return queryset.filter(addon__in=value)
class RatingAdmin(AMOModelAdmin):
raw_id_fields = (
'addon',
'version',
'user',
'reply_to',
)
readonly_fields = (
'addon',
'addon_link',
'version',
'user',
'reply_to',
'known_ip_adresses',
'body',
'rating',
'deleted',
'user_link',
)
fields = (
'addon_link',
'version',
'body',
'rating',
'known_ip_adresses',
'user_link',
'deleted',
)
list_display = (
'id',
'addon_link',
'created',
'user_link',
'known_ip_adresses',
'rating',
'is_reply',
'flag',
'deleted',
'truncated_body',
)
list_filter = (
'deleted',
RatingTypeFilter,
'rating',
('created', DateRangeFilter),
AddonFilterForIPSearch,
)
actions = ('delete_selected',)
list_select_related = ('user',) # For addon/reply_to see get_queryset()
search_fields = ('body',)
extra_list_display_for_ip_searches = ()
search_by_ip_actions = (
amo.LOG.ADD_RATING.id,
amo.LOG.EDIT_RATING.id,
)
search_by_ip_activity_accessor = 'ratinglog__activity_log'
search_by_ip_activity_reverse_accessor = 'activity_log__ratinglog__rating'
def get_search_id_field(self, request):
# Numeric searches are by add-on for ratings (the rating id rarely
# matters, it's more important to be able to search by add-on id).
return 'addon'
def get_queryset(self, request):
base_qs = Rating.unfiltered.all()
return base_qs.prefetch_related(
Prefetch('addon', queryset=Addon.unfiltered.all().only_translations()),
Prefetch('reply_to', queryset=base_qs),
)
def METHOD_NAME(self, request):
return False
def truncated_body(self, obj):
return truncate_text(obj.body, 140)[0] if obj.body else ''
def is_reply(self, obj):
return bool(obj.reply_to)
is_reply.boolean = True
is_reply.admin_order_field = 'reply_to'
def addon_link(self, obj):
return related_single_content_link(obj, 'addon')
addon_link.short_description = 'Add-on'
def user_link(self, obj):
return related_single_content_link(obj, 'user')
user_link.short_description = 'User'
class DeniedRatingWordAdmin(AMOModelAdmin):
readonly_fields = (
'created',
'modified',
)
fields = (
'created',
'modified',
'word',
'moderation',
)
list_display = (
'created',
'word',
'moderation',
)
actions = ('delete_selected',)
admin.site.register(Rating, RatingAdmin)
admin.site.register(DeniedRatingWord, DeniedRatingWordAdmin) |
stub update distribution | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Stub functions that are used by the Amazon CloudFront unit tests.
When tests are run against an actual AWS account, the stubber class does not
set up stubs and passes all calls through to the Boto3 client.
"""
import datetime
from test_tools.example_stubber import ExampleStubber
class CloudFrontStubber(ExampleStubber):
"""
A class that implements stub functions used by CloudFront unit tests.
The stubbed functions expect certain parameters to be passed to them as
part of the tests, and raise errors if the parameters are not as expected.
"""
def __init__(self, client, use_stubs=True):
"""
Initializes the object with a specific client and configures it for
stubbing or AWS passthrough.
:param client: A Boto3 CloudFront client.
:param use_stubs: When True, use stubs to intercept requests. Otherwise,
pass requests through to AWS.
"""
super().__init__(client, use_stubs)
def stub_list_distributions(self, distribs, error_code=None):
expected_params = {}
response = {
'DistributionList': {
'Marker': 'marker',
'MaxItems': 100,
'IsTruncated': False,
'Quantity': len(distribs),
'Items': [{
'ARN': f'arn:aws:cloudfront::123456789012:distribution/{index}',
'Status': 'Deployed',
'LastModifiedTime': datetime.datetime.now(),
'Aliases': {'Quantity': 0},
'Origins': {'Quantity': 0, 'Items': [{'Id': 'test-id', 'DomainName': 'test'}]},
'DefaultCacheBehavior': {'TargetOriginId': '', 'ViewerProtocolPolicy': ''},
'CacheBehaviors': {'Quantity': 0},
'CustomErrorResponses': {'Quantity': 0},
'Comment': 'Testing!',
'PriceClass': 'PriceClass_All',
'Enabled': True,
'Restrictions': {'GeoRestriction': {'Quantity': 0, 'RestrictionType': ''}},
'WebACLId': '',
'HttpVersion': 'http2',
'IsIPV6Enabled': True,
'DomainName': distrib['name'],
'Id': distrib['id'],
'ViewerCertificate': {
'CertificateSource': distrib['cert_source'],
'Certificate': distrib['cert']
},
'Staging': False,
} for index, distrib in enumerate(distribs)]
}
}
self._stub_bifurcator(
'list_distributions', expected_params, response, error_code=error_code)
def stub_get_distribution_config(self, distrib_id, comment, etag, error_code=None):
expected_params = {'Id': distrib_id}
response = {
'DistributionConfig': {
'CallerReference': 'test',
'Origins': {'Quantity': 0, 'Items': [{'Id': 'test-id', 'DomainName': 'test'}]},
'DefaultCacheBehavior': {'TargetOriginId': '', 'ViewerProtocolPolicy': ''},
'Enabled': True,
'Comment': comment
},
'ETag': etag}
self._stub_bifurcator(
'get_distribution_config', expected_params, response, error_code=error_code)
def METHOD_NAME(self, distrib_id, comment, etag, error_code=None):
expected_params = {
'Id': distrib_id,
'DistributionConfig': {
'CallerReference': 'test',
'Origins': {'Quantity': 0, 'Items': [{'Id': 'test-id', 'DomainName': 'test'}]},
'DefaultCacheBehavior': {'TargetOriginId': '', 'ViewerProtocolPolicy': ''},
'Enabled': True,
'Comment': comment
},
'IfMatch': etag}
response = {}
self._stub_bifurcator(
'update_distribution', expected_params, response, error_code=error_code) |
openurl rds | import random
from urllib.parse import urlparse
from visidata import VisiData, vd, Sheet, options, anytype, asyncthread, ColumnItem
__all__ = ['openurl_postgres', 'openurl_postgresql', 'openurl_rds', 'PgTable', 'PgTablesSheet']
vd.option('postgres_schema', 'public', 'The desired schema for the Postgres database')
def codeToType(type_code, colname):
psycopg2 = vd.importExternal('psycopg2', 'psycopg2-binary')
try:
tname = psycopg2._psycopg.string_types[type_code].name
if 'INTEGER' in tname:
return int
if 'STRING' in tname:
return str
except KeyError:
vd.status('unknown postgres type_code %s for %s' % (type_code, colname))
return anytype
@VisiData.api
def METHOD_NAME(vd, url, filetype=None):
boto3 = vd.importExternal('boto3')
psycopg2 = vd.importExternal('psycopg2', 'psycopg2-binary')
rds = boto3.client('rds')
url = urlparse(url.given)
_, region, dbname = url.path.split('/')
token = rds.generate_db_auth_token(url.hostname, url.port, url.username, region)
conn = psycopg2.connect(
user=url.username,
dbname=dbname,
host=url.hostname,
port=url.port,
password=token)
return PgTablesSheet(dbname+"_tables", sql=SQL(conn))
@VisiData.api
def openurl_postgres(vd, url, filetype=None):
psycopg2 = vd.importExternal('psycopg2', 'psycopg2-binary')
url = urlparse(url.given)
dbname = url.path[1:]
conn = psycopg2.connect(
user=url.username,
dbname=dbname,
host=url.hostname,
port=url.port,
password=url.password)
return PgTablesSheet(dbname+"_tables", sql=SQL(conn))
VisiData.openurl_postgresql=VisiData.openurl_postgres
class SQL:
def __init__(self, conn):
self.conn = conn
def cur(self, qstr):
import string
randomname = ''.join(random.choice(string.ascii_uppercase) for _ in range(6))
cur = self.conn.cursor(randomname)
cur.execute(qstr)
return cur
@asyncthread
def query_async(self, qstr, callback=None):
with self.cur(qstr) as cur:
callback(cur)
cur.close()
@VisiData.api
def postgresGetColumns(vd, cur):
for i, coldesc in enumerate(cur.description):
yield ColumnItem(coldesc.name, i, type=codeToType(coldesc.type_code, coldesc.name))
# rowdef: (table_name, ncols)
class PgTablesSheet(Sheet):
rowtype = 'tables'
def reload(self):
schema = options.postgres_schema
qstr = f'''
SELECT relname table_name, column_count.ncols, reltuples::bigint est_nrows
FROM pg_class, pg_namespace, (
SELECT table_name, COUNT(column_name) AS ncols FROM information_schema.COLUMNS WHERE table_schema = '{schema}' GROUP BY table_name
) AS column_count
WHERE pg_class.relnamespace = pg_namespace.oid AND pg_namespace.nspname = '{schema}' AND column_count.table_name = relname;
'''
with self.sql.cur(qstr) as cur:
self.nrowsPerTable = {}
self.rows = []
# try to get first row to make cur.description available
r = cur.fetchone()
if r:
self.addRow(r)
self.columns = []
for c in vd.postgresGetColumns(cur):
self.addColumn(c)
self.setKeys(self.columns[0:1]) # table_name is the key
for r in cur:
self.addRow(r)
def openRow(self, row):
return PgTable(self.name+"."+row[0], source=row[0], sql=self.sql)
# rowdef: tuple of values as returned by fetchone()
class PgTable(Sheet):
@asyncthread
def reload(self):
if self.options.postgres_schema:
source = f"{self.options.postgres_schema}.{self.source}"
else:
source = self.source
with self.sql.cur(f"SELECT * FROM {source}") as cur:
self.rows = []
r = cur.fetchone()
if r:
self.addRow(r)
self.columns = []
for c in vd.postgresGetColumns(cur):
self.addColumn(c)
for r in cur:
self.addRow(r) |
resource apply sparse | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adadelta for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend_config
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.ops import array_ops
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.optimizers.Adadelta')
class Adadelta(optimizer_v2.OptimizerV2):
r"""Optimizer that implements the Adadelta algorithm.
Adadelta optimization is a stochastic gradient descent method that is based on
adaptive learning rate per dimension to address two drawbacks:
1) the continual decay of learning rates throughout training
2) the need for a manually selected global learning rate
Two accumulation steps are required:
1) the accumulation of gradients squared,
2) the accumulation of updates squared.
Initialization:
$$E[g^2]_0 := 0 \text{(Initialize gradient 2nd order moment vector)}$$
$$E[\Delta x^2]_0 := 0 \text{(Initialize 2nd order variable update)}$$
$$t := t + 1$$
$$E[g^2]_t := \rho * E[g^2]_{t-1} + (1 - \rho) * g^2$$
$$\Delta x_t = -RMS[\Delta x]_{t-1} * g_t / RMS[g]_t$$
$$E[\Delta x^2]_t := \rho * E[\Delta x^2]_{t-1} + (1 - \rho) * \Delta x_t^2$$
$$x_t := x_{t-1} + \Delta x_{t}$$
References
See [M. D. Zeiler](http://arxiv.org/abs/1212.5701)
([pdf](http://arxiv.org/pdf/1212.5701v1.pdf))
"""
def __init__(self,
learning_rate=0.001,
rho=0.95,
epsilon=1e-7,
name='Adadelta',
**kwargs):
"""Construct a new Adadelta optimizer.
Adadelta is a more robust extension of Adagrad that adapts learning rates
based on a moving window of gradient updates, instead of accumulating all
past gradients. This way, Adadelta continues learning even when many updates
have been done. Compared to Adagrad, in the original version of Adadelta you
don't have to set an initial learning rate. In this version, initial
learning rate can be set, as in most other Keras optimizers.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
To match the exact form in the original paper use 1.0.
rho: A `Tensor` or a floating point value. The decay rate.
epsilon: A `Tensor` or a floating point value. A constant epsilon used
to better conditioning the grad update.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adadelta".
**kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`,
`decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip
gradients by value, `decay` is included for backward compatibility to
allow time inverse decay of learning rate. `lr` is included for backward
compatibility, recommended to use `learning_rate` instead.
@compatibility(eager)
When eager execution is enabled, `learning_rate`, `rho`, and `epsilon` can
each be a callable that takes no arguments and returns the actual value to
use. This can be useful for changing these values across different
invocations of optimizer functions.
@end_compatibility
"""
super(Adadelta, self).__init__(name, **kwargs)
self._set_hyper('learning_rate', kwargs.get('lr', learning_rate))
self._set_hyper('decay', self._initial_decay)
self._set_hyper('rho', rho)
self.epsilon = epsilon or backend_config.epsilon()
def _create_slots(self, var_list):
# Separate for-loops to respect the ordering of slot variables from v1.
for v in var_list:
self.add_slot(v, 'accum_grad')
for v in var_list:
self.add_slot(v, 'accum_var')
def _prepare_local(self, var_device, var_dtype, apply_state):
super(Adadelta, self)._prepare_local(var_device, var_dtype, apply_state)
apply_state[(var_device, var_dtype)].update(dict(
epsilon=ops.convert_to_tensor(self.epsilon, var_dtype),
rho=array_ops.identity(self._get_hyper('rho', var_dtype))
))
def set_weights(self, weights):
params = self.weights
# Override set_weights for backward compatibility of Keras V1 optimizer
# since it does not include iteration at head of the weight list. Set
# iteration to 0.
if len(params) == len(weights) + 1:
weights = [np.array(0)] + weights
super(Adadelta, self).set_weights(weights)
def _resource_apply_dense(self, grad, var, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
accum_grad = self.get_slot(var, 'accum_grad')
accum_var = self.get_slot(var, 'accum_var')
return training_ops.resource_apply_adadelta(
var.handle,
accum_grad.handle,
accum_var.handle,
coefficients['lr_t'],
coefficients['rho'],
coefficients['epsilon'],
grad,
use_locking=self._use_locking)
def METHOD_NAME(self, grad, var, indices, apply_state=None):
var_device, var_dtype = var.device, var.dtype.base_dtype
coefficients = ((apply_state or {}).get((var_device, var_dtype))
or self._fallback_apply_state(var_device, var_dtype))
accum_grad = self.get_slot(var, 'accum_grad')
accum_var = self.get_slot(var, 'accum_var')
return training_ops.resource_sparse_apply_adadelta(
var.handle,
accum_grad.handle,
accum_var.handle,
coefficients['lr_t'],
coefficients['rho'],
coefficients['epsilon'],
grad,
indices,
use_locking=self._use_locking)
def get_config(self):
config = super(Adadelta, self).get_config()
config.update({
'learning_rate': self._serialize_hyperparameter('learning_rate'),
'decay': self._serialize_hyperparameter('decay'),
'rho': self._serialize_hyperparameter('rho'),
'epsilon': self.epsilon,
})
return config |
test pow send transaction | #!/usr/bin/env python3
# encoding: utf-8
import unittest as ut
from SEEDBlockchain import Wallet
from web3 import Web3
from seedemu import *
class MultipleChainsTestCase(ut.TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.wallet1 = Wallet(chain_id=1337)
cls.wallet2 = Wallet(chain_id=1338)
for name in ['Alice', 'Bob', 'Charlie', 'David', 'Eve']:
cls.wallet1.createAccount(name)
cls.wallet2.createAccount(name)
url_1 = 'http://10.152.0.71:8540'
url_2 = 'http://10.160.0.71:8545'
cls.wallet1.connectToBlockchain(url_1)
cls.wallet2.connectToBlockchain(url_2, isPOA=True)
def test_pow_chain_connection(self):
self.assertTrue(self.wallet1._web3.isConnected())
def test_poa_chain_connection(self):
self.assertTrue(self.wallet2._web3.isConnected())
def test_pow_chain_id(self):
self.assertEqual(self.wallet1._web3.eth.chain_id, 1337)
def test_poa_chain_id(self):
self.assertEqual(self.wallet2._web3.eth.chain_id, 1338)
def METHOD_NAME(self):
recipient = self.wallet1.getAccountAddressByName('Bob')
txhash = self.wallet1.sendTransaction(recipient, 0.1, sender_name='David', wait=True, verbose=False)
self.assertTrue(self.wallet1.getTransactionReceipt(txhash)["status"], 1)
def test_poa_send_transaction(self):
recipient = self.wallet2.getAccountAddressByName('Alice')
txhash = self.wallet2.sendTransaction(recipient, 0.1, sender_name='Eve', wait=True, verbose=False)
self.assertTrue(self.wallet2.getTransactionReceipt(txhash)["status"], 1)
def test_pow_chain_consensus(self):
config = dict(self.wallet1._web3.geth.admin.nodeInfo().protocols.eth.config)
self.assertTrue("ethash" in config.keys())
def test_poa_chain_consensus(self):
config = dict(self.wallet2._web3.geth.admin.nodeInfo().protocols.eth.config)
self.assertTrue("clique" in config.keys())
def test_pow_peer_counts(self):
peer_counts = len(self.wallet1._web3.geth.admin.peers())
self.assertEqual(peer_counts, 3)
def test_poa_peer_counts(self):
peer_counts = len(self.wallet2._web3.geth.admin.peers())
self.assertEqual(peer_counts, 2)
def test_import_account(self):
self.assertEqual(self.wallet1._web3.eth.getBalance(Web3.toChecksumAddress("9f189536def35811e1a759860672fe49a4f89e94")), 10)
def test_pow_emulator_account(self):
accounts = []
for i in range(1,5):
accounts.extend(EthAccount.createEmulatorAccountsFromMnemonic(i, mnemonic="great awesome fun seed security lab protect system network prevent attack future", balance=32*EthUnit.ETHER.value, total=1, password="admin"))
for account in accounts:
self.assertTrue(self.wallet1._web3.eth.getBalance(account.address) >= 32*EthUnit.ETHER.value)
def test_poa_emulator_account(self):
accounts = []
for i in range(5,9):
accounts.extend(EthAccount.createEmulatorAccountsFromMnemonic(i, mnemonic="great awesome fun seed security lab protect system network prevent attack future", balance=32*EthUnit.ETHER.value, total=1, password="admin"))
for account in accounts:
self.assertTrue(self.wallet2._web3.eth.getBalance(account.address) >= 32*EthUnit.ETHER.value)
def test_pow_create_account(self):
account = EthAccount.createEmulatorAccountFromMnemonic(3, mnemonic="great awesome fun seed security lab protect system network prevent attack future", balance=20*EthUnit.ETHER.value, index=1, password="admin")
self.assertTrue(self.wallet1._web3.eth.getBalance(account.address) >= 20*EthUnit.ETHER.value)
def test_pow_create_accounts(self):
accounts = []
for index in range(1, 4):
accounts.append(EthAccount.createEmulatorAccountFromMnemonic(7, mnemonic="great awesome fun seed security lab protect system network prevent attack future", balance=30*EthUnit.ETHER.value, index=index, password="admin"))
for account in accounts:
self.assertTrue(self.wallet2._web3.eth.getBalance(account.address) >= 30*EthUnit.ETHER.value)
if __name__ == "__main__":
test_suite = ut.TestLoader().loadTestsFromTestCase(MultipleChainsTestCase)
res = ut.TextTestRunner(verbosity=2).run(test_suite)
num, errs, fails = res.testsRun, len(res.errors), len(res.failures)
print("score: %d of %d (%d errors, %d failures)" % (num - (errs+fails), num, errs, fails)) |
notifications | # notifications.py - notification endpoints
from mastodon.versions import _DICT_VERSION_NOTIFICATION
from mastodon.errors import MastodonIllegalArgumentError
from mastodon.utility import api_version
from mastodon.internals import Mastodon as Internals
from mastodon.types import Notification, IdType, PaginatableList, Account
from typing import Union, Optional, List
class Mastodon(Internals):
###
# Reading data: Notifications
###
@api_version("1.0.0", "3.5.0", _DICT_VERSION_NOTIFICATION)
def METHOD_NAME(self, id: Optional[Union[Notification, IdType]] = None, account_id: Optional[Union[Account, IdType]] = None, max_id: Optional[Union[Notification, IdType]] = None,
min_id: Optional[Union[Notification, IdType]] = None, since_id: Optional[Union[Notification, IdType]] = None, limit: Optional[int] = None,
exclude_types: Optional[List[str]] = None, types: Optional[List[str]] = None, mentions_only: Optional[bool] = None) -> PaginatableList[Notification]:
"""
Fetch notifications (mentions, favourites, reblogs, follows) for the logged-in
user. Pass `account_id` to get only notifications originating from the given account.
There are different types of notifications:
* `follow` - A user followed the logged in user
* `follow_request` - A user has requested to follow the logged in user (for locked accounts)
* `favourite` - A user favourited a post by the logged in user
* `reblog` - A user reblogged a post by the logged in user
* `mention` - A user mentioned the logged in user
* `poll` - A poll the logged in user created or voted in has ended
* `update` - A status the logged in user has reblogged (and only those, as of 4.0.0) has been edited
* `status` - A user that the logged in user has enabned notifications for has enabled `notify` (see :ref:`account_follow() <account_follow()>`)
* `admin.sign_up` - For accounts with appropriate permissions (TODO: document which those are when adding the permission API): A new user has signed up
* `admin.report` - For accounts with appropriate permissions (TODO: document which those are when adding the permission API): A new report has been received
Parameters `exclude_types` and `types` are array of these types, specifying them will in- or exclude the
types of notifications given. It is legal to give both parameters at the same tine, the result will then
be the intersection of the results of both filters. Specifying `mentions_only` is a deprecated way to set
`exclude_types` to all but mentions.
Can be passed an `id` to fetch a single notification.
Returns a list of :ref:`notification dicts <notification dicts>`.
"""
if mentions_only is not None:
if exclude_types is None and types is None:
if mentions_only:
if self.verify_minimum_version("3.5.0", cached=True):
types = ["mention"]
else:
exclude_types = ["follow", "favourite", "reblog", "poll", "follow_request"]
else:
raise MastodonIllegalArgumentError('Cannot specify exclude_types/types when mentions_only is present')
del mentions_only
if max_id is not None:
max_id = self.__unpack_id(max_id)
if min_id is not None:
min_id = self.__unpack_id(min_id)
if since_id is not None:
since_id = self.__unpack_id(since_id)
if account_id is not None:
account_id = self.__unpack_id(account_id)
if id is None:
params = self.__generate_params(locals(), ['id'])
return self.__api_request('GET', '/api/v1/notifications', params)
else:
id = self.__unpack_id(id)
return self.__api_request('GET', f"/api/v1/notifications/{id}")
###
# Writing data: Notifications
###
@api_version("1.0.0", "1.0.0", "1.0.0")
def notifications_clear(self):
"""
Clear out a user's notifications
"""
self.__api_request('POST', '/api/v1/notifications/clear')
@api_version("1.3.0", "2.9.2", "2.9.2")
def notifications_dismiss(self, id: Union[Notification, IdType]):
"""
Deletes a single notification
"""
id = self.__unpack_id(id)
if self.verify_minimum_version("2.9.2", cached=True):
self.__api_request('POST', f'/api/v1/notifications/{id}/dismiss')
else:
params = self.__generate_params(locals())
self.__api_request('POST', '/api/v1/notifications/dismiss', params) |
embedding init code | import sys
import types
from _typeshed import Incomplete, ReadableBuffer, WriteableBuffer
from collections.abc import Callable, Hashable
from typing import Any, TypeVar, overload
from typing_extensions import Literal, TypeAlias
import _cffi_backend
from setuptools._distutils.extension import Extension
_T = TypeVar("_T")
basestring: TypeAlias = str # noqa: Y042
class FFI:
CData: TypeAlias = _cffi_backend._CDataBase
CType: TypeAlias = _cffi_backend.CType
buffer: TypeAlias = _cffi_backend.buffer # noqa: Y042
BVoidP: CType
BCharA: CType
NULL: CType
errno: int
def __init__(self, backend: types.ModuleType | None = None) -> None: ...
def cdef(self, csource: str, override: bool = False, packed: bool = False, pack: int | None = None) -> None: ...
def embedding_api(self, csource: str, packed: bool = False, pack: bool | int | None = None) -> None: ...
if sys.platform == "win32":
def dlopen(self, name: str, flags: int = ...) -> _cffi_backend.Lib: ...
else:
def dlopen(self, name: str | None, flags: int = ...) -> _cffi_backend.Lib: ...
def dlclose(self, lib: _cffi_backend.Lib) -> None: ...
def typeof(self, cdecl: str | CData | types.BuiltinFunctionType | types.FunctionType) -> CType: ...
def sizeof(self, cdecl: str | CData) -> int: ...
def alignof(self, cdecl: str | CData) -> int: ...
def offsetof(self, cdecl: str | CData, *fields_or_indexes: str | int) -> int: ...
def new(self, cdecl: str | CType, init: Incomplete | None = None) -> CData: ...
def new_allocator(
self,
alloc: Callable[[int], CData] | None = None,
free: Callable[[CData], Any] | None = None,
should_clear_after_alloc: bool = True,
) -> _cffi_backend._Allocator: ...
def cast(self, cdecl: str | CType, source: CData | int) -> CData: ...
def string(self, cdata: CData, maxlen: int = -1) -> bytes | str: ...
def unpack(self, cdata: CData, length: int) -> bytes | str | list[Any]: ...
@overload
def from_buffer(self, cdecl: ReadableBuffer, require_writable: Literal[False] = False) -> CData: ...
@overload
def from_buffer(self, cdecl: WriteableBuffer, require_writable: Literal[True]) -> CData: ...
@overload
def from_buffer(
self, cdecl: str | CType, python_buffer: ReadableBuffer, require_writable: Literal[False] = False
) -> CData: ...
@overload
def from_buffer(self, cdecl: str | CType, python_buffer: WriteableBuffer, require_writable: Literal[True]) -> CData: ...
def memmove(self, dest: CData | WriteableBuffer, src: CData | ReadableBuffer, n: int) -> None: ...
@overload
def callback(
self,
cdecl: str | CType,
python_callable: None = None,
error: Any = None,
onerror: Callable[[Exception, Any, Any], None] | None = None,
) -> Callable[[Callable[..., _T]], Callable[..., _T]]: ...
@overload
def callback(
self,
cdecl: str | CType,
python_callable: Callable[..., _T],
error: Any = None,
onerror: Callable[[Exception, Any, Any], None] | None = None,
) -> Callable[..., _T]: ...
def getctype(self, cdecl: str | CType, replace_with: str = "") -> str: ...
@overload
def gc(self, cdata: CData, destructor: Callable[[CData], Any], size: int = 0) -> CData: ...
@overload
def gc(self, cdata: CData, destructor: None, size: int = 0) -> None: ...
def verify(self, source: str = "", tmpdir: str | None = None, **kwargs: Any) -> _cffi_backend.Lib: ...
# Technically exists on all OSs, but crashes on all but Windows. So we hide it in stubs
if sys.platform == "win32":
def getwinerror(self, code: int = -1) -> tuple[int, str] | None: ...
def addressof(self, cdata: CData, *fields_or_indexes: str | int) -> CData: ...
def include(self, ffi_to_include: FFI) -> None: ...
def new_handle(self, x: Any) -> CData: ...
def from_handle(self, x: CData) -> Any: ...
def release(self, x: CData) -> None: ...
def set_unicode(self, enabled_flag: bool) -> None: ...
def set_source(self, module_name: str, source: str, source_extension: str = ".c", **kwds: Any) -> None: ...
def set_source_pkgconfig(
self, module_name: str, pkgconfig_libs: list[str], source: str, source_extension: str = ".c", **kwds: Any
) -> None: ...
def distutils_extension(self, tmpdir: str = "build", verbose: bool = True) -> Extension: ...
def emit_c_code(self, filename: str) -> None: ...
def emit_python_code(self, filename: str) -> None: ...
def compile(self, tmpdir: str = ".", verbose: int = 0, target: str | None = None, debug: bool | None = None) -> str: ...
def init_once(self, func: Callable[[], Any], tag: Hashable) -> Any: ...
def METHOD_NAME(self, pysource: str) -> None: ...
def def_extern(self, *args: Any, **kwds: Any) -> None: ...
def list_types(self) -> tuple[list[str], list[str], list[str]]: ... |
repr | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import re
from PyQt5 import QtWidgets
import peacock
import mooseutils
from .plugins.ExodusPlugin import ExodusPlugin
class ExodusPluginManager(QtWidgets.QWidget, peacock.base.PluginManager):
def __init__(self, plugins=[]):
super(ExodusPluginManager, self).__init__(plugins=plugins, plugin_base=ExodusPlugin)
self.setSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
self.MainLayout = QtWidgets.QHBoxLayout(self)
self.LeftLayout = QtWidgets.QVBoxLayout()
self.RightLayout = QtWidgets.QVBoxLayout()
self.MainLayout.addLayout(self.LeftLayout)
self.MainLayout.addLayout(self.RightLayout)
self.setup()
self.LeftLayout.addStretch(1)
# Set the width of the left-side widgets to that the VTK window gets the space
self.fixLayoutWidth('LeftLayout')
def METHOD_NAME(self):
"""
Build the python script.
"""
# Compile output from the plugins
output = dict()
for plugin in self._plugins.values():
for key, value in plugin.METHOD_NAME().items():
if key in output:
output[key] += value
else:
output[key] = value
# Add colorbar to window
if 'colorbar' in output:
output['window'][0] = 'window = chigger.RenderWindow(result, cbar)'
# Make import unique
mooseutils.unique_list(output['imports'], output['imports'])
# Apply the filters, if they exist
if 'filters' in output:
filters = []
for match in re.findall(r'^(\w+)\s*=', '\n'.join(output['filters']), flags=re.MULTILINE):
filters.append(match)
output['result'] += ['result.setOptions(filters=[{}])'.format(', '.join(filters))]
# Build the script
string = ''
for key in ['imports', 'camera', 'reader', 'filters', 'result', 'colorbar', 'window']:
if key in output:
string += '\n{}\n'.format('\n'.join(output.pop(key)))
# Error if keys exist, this means data is missing from the script
if output:
raise mooseutils.MooseException('The output data was not completely written, the following keys remain: {}'.format(str(output.keys())))
return string
def addToMainMenu(self, menubar):
exodus_menu = menubar.addMenu("&Results")
for plugin in self._all_plugins:
plugin.addToMenu(exodus_menu)
def main(size=None):
"""
Run window widget alone
"""
from peacock.ExodusViewer.ExodusPluginManager import ExodusPluginManager
from .plugins.VTKWindowPlugin import VTKWindowPlugin
from .plugins.FilePlugin import FilePlugin
from .plugins.BlockPlugin import BlockPlugin
from .plugins.GoldDiffPlugin import GoldDiffPlugin
from .plugins.ColorbarPlugin import ColorbarPlugin
from .plugins.MeshPlugin import MeshPlugin
from .plugins.BackgroundPlugin import BackgroundPlugin
from .plugins.ClipPlugin import ClipPlugin
from .plugins.ContourPlugin import ContourPlugin
from .plugins.OutputPlugin import OutputPlugin
from .plugins.CameraPlugin import CameraPlugin
from .plugins.MediaControlPlugin import MediaControlPlugin
plugins = [lambda: VTKWindowPlugin(size=size),
FilePlugin,
BlockPlugin,
MediaControlPlugin,
GoldDiffPlugin,
ColorbarPlugin,
MeshPlugin,
ClipPlugin,
ContourPlugin,
CameraPlugin,
BackgroundPlugin,
OutputPlugin]
widget = ExodusPluginManager(plugins=plugins)
main_window = QtWidgets.QMainWindow()
main_window.setCentralWidget(widget)
menubar = main_window.menuBar()
menubar.setNativeMenuBar(False)
widget.addToMainMenu(menubar)
main_window.show()
return widget, main_window
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
from peacock.utils import Testing
filenames = Testing.get_chigger_input_list('mug_blocks_out.e', 'displace.e', 'vector_out.e', 'mesh_only.e')
widget, main_window = main()
widget.FilePlugin.onSetFilenames(filenames)
sys.exit(app.exec_()) |
reset | # A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2006-2021 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Babbage B.V., Bill Dengler,
# Julien Cochuyt
import re
from .commands import (
# Commands that are used in this file.
LangChangeCommand,
EndUtteranceCommand,
)
from .types import (
SpeechSequence,
logBadSequenceTypes,
GeneratorWithReturn,
)
from typing import (
Optional,
Generator,
Callable,
)
def _yieldIfNonEmpty(seq: SpeechSequence):
"""Helper method to yield the sequence if it is not None or empty."""
if seq:
yield seq
class SpeechWithoutPauses:
_pendingSpeechSequence: SpeechSequence
re_last_pause = re.compile(
r"^(.*(?<=[^\s.!?])[.!?][\"'”’)]?(?:\s+|$))(.*$)",
re.DOTALL | re.UNICODE
)
def __init__(
self,
speakFunc: Callable[[SpeechSequence], None]
):
"""
:param speakFunc: Function used by L{speakWithoutPauses} to speak. This will likely be speech.speak.
"""
self.speak = speakFunc
self.METHOD_NAME()
def METHOD_NAME(self):
self._pendingSpeechSequence = []
def speakWithoutPauses(
self,
speechSequence: Optional[SpeechSequence],
detectBreaks: bool = True
) -> bool:
"""
Speaks the speech sequences given over multiple calls,
only sending to the synth at acceptable phrase or sentence boundaries,
or when given None for the speech sequence.
@return: C{True} if something was actually spoken,
C{False} if only buffering occurred.
"""
speech = GeneratorWithReturn(self.getSpeechWithoutPauses(
speechSequence,
detectBreaks
))
for seq in speech:
self.speak(seq)
return speech.returnValue
def getSpeechWithoutPauses( # noqa: C901
self,
speechSequence: Optional[SpeechSequence],
detectBreaks: bool = True
) -> Generator[SpeechSequence, None, bool]:
"""
Generate speech sequences over multiple calls,
only returning a speech sequence at acceptable phrase or sentence boundaries,
or when given None for the speech sequence.
@return: The speech sequence that can be spoken without pauses. The 'return' for this generator function,
is a bool which indicates whether this sequence should be considered valid speech. Use
L{GeneratorWithReturn} to retain the return value. A generator is used because the previous
implementation had several calls to speech, this approach replicates that.
"""
if speechSequence is not None:
logBadSequenceTypes(speechSequence)
# Break on all explicit break commands
if detectBreaks and speechSequence:
speech = GeneratorWithReturn(self._detectBreaksAndGetSpeech(speechSequence))
yield from speech
return speech.returnValue # Don't fall through to flush / normal speech
if speechSequence is None: # Requesting flush
pending = self._flushPendingSpeech()
yield from _yieldIfNonEmpty(pending)
return bool(pending) # Don't fall through to handle normal speech
# Handling normal speech
speech = self._getSpeech(speechSequence)
yield from _yieldIfNonEmpty(speech)
return bool(speech)
def _detectBreaksAndGetSpeech(
self,
speechSequence: SpeechSequence
) -> Generator[SpeechSequence, None, bool]:
lastStartIndex = 0
sequenceLen = len(speechSequence)
gotValidSpeech = False
for index, item in enumerate(speechSequence):
if isinstance(item, EndUtteranceCommand):
if index > 0 and lastStartIndex < index:
subSequence = speechSequence[lastStartIndex:index]
yield from _yieldIfNonEmpty(
self._getSpeech(subSequence)
)
yield from _yieldIfNonEmpty(
self._flushPendingSpeech()
)
gotValidSpeech = True
lastStartIndex = index + 1
if lastStartIndex < sequenceLen:
subSequence = speechSequence[lastStartIndex:]
seq = self._getSpeech(subSequence)
gotValidSpeech = bool(seq)
yield from _yieldIfNonEmpty(seq)
return gotValidSpeech
def _flushPendingSpeech(self) -> SpeechSequence:
"""
@return: may be empty sequence
"""
# Place the last incomplete phrase in to finalSpeechSequence to be spoken now
pending = self._pendingSpeechSequence
self._pendingSpeechSequence = []
return pending
def _getSpeech(
self,
speechSequence: SpeechSequence
) -> SpeechSequence:
"""
@return: May be an empty sequence
"""
finalSpeechSequence: SpeechSequence = [] # To be spoken now
pendingSpeechSequence: speechSequence = [] # To be saved off for speaking later
# Scan the given speech and place all completed phrases in finalSpeechSequence to be spoken,
# And place the final incomplete phrase in pendingSpeechSequence
for index in range(len(speechSequence) - 1, -1, -1):
item = speechSequence[index]
if isinstance(item, str):
m = self.re_last_pause.match(item)
if m:
before, after = m.groups()
if after:
pendingSpeechSequence.append(after)
if before:
finalSpeechSequence.extend(self._flushPendingSpeech())
finalSpeechSequence.extend(speechSequence[0:index])
finalSpeechSequence.append(before)
# Apply the last language change to the pending sequence.
# This will need to be done for any other speech change commands introduced in future.
for changeIndex in range(index - 1, -1, -1):
change = speechSequence[changeIndex]
if not isinstance(change, LangChangeCommand):
continue
pendingSpeechSequence.append(change)
break
break
else:
pendingSpeechSequence.append(item)
else:
pendingSpeechSequence.append(item)
if pendingSpeechSequence:
pendingSpeechSequence.reverse()
self._pendingSpeechSequence.extend(pendingSpeechSequence)
return finalSpeechSequence |
init hyper parameters | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import numpy as np
#from net import DIENLayer, StaticDIENLayer
from net import StaticDIENLayer
class StaticModel():
def __init__(self, config):
self.cost = None
self.infer_target_var = None
self.config = config
self.METHOD_NAME()
def METHOD_NAME(self):
self.is_distributed = False
self.distributed_embedding = False
if self.config.get("hyper_parameters.distributed_embedding", 0) == 1:
self.distributed_embedding = True
self.item_emb_size = self.config.get("hyper_parameters.item_emb_size",
64)
self.cat_emb_size = self.config.get("hyper_parameters.cat_emb_size",
64)
self.act = self.config.get("hyper_parameters.act", "sigmoid")
self.is_sparse = self.config.get("hyper_parameters.is_sparse", False)
self.use_DataLoader = self.config.get(
"hyper_parameters.use_DataLoader", False)
self.item_count = self.config.get("hyper_parameters.item_count", 63001)
self.cat_count = self.config.get("hyper_parameters.cat_count", 801)
self.learning_rate_base_lr = self.config.get(
"hyper_parameters.optimizer.learning_rate_base_lr")
def create_feeds(self, is_infer=False):
seq_len = -1
self.data_var = []
hist_item_seq = paddle.static.data(
name="hist_item_seq", shape=[None, seq_len], dtype="int64")
self.data_var.append(hist_item_seq)
hist_cat_seq = paddle.static.data(
name="hist_cat_seq", shape=[None, seq_len], dtype="int64")
self.data_var.append(hist_cat_seq)
target_item = paddle.static.data(
name="target_item", shape=[None], dtype="int64")
self.data_var.append(target_item)
target_cat = paddle.static.data(
name="target_cat", shape=[None], dtype="int64")
self.data_var.append(target_cat)
label = paddle.static.data(
name="label", shape=[-1, 1], dtype="float32")
self.data_var.append(label)
mask = paddle.static.data(
name="mask", shape=[None, seq_len, 1], dtype="float32")
self.data_var.append(mask)
target_item_seq = paddle.static.data(
name="target_item_seq", shape=[None, seq_len], dtype="int64")
self.data_var.append(target_item_seq)
target_cat_seq = paddle.static.data(
name="target_cat_seq", shape=[None, seq_len], dtype="int64")
self.data_var.append(target_cat_seq)
neg_hist_item_seq = paddle.static.data(
name="neg_hist_item_seq", shape=[None, seq_len], dtype="int64")
self.data_var.append(neg_hist_item_seq)
neg_hist_cat_seq = paddle.static.data(
name="neg_hist_cat_seq", shape=[None, seq_len], dtype="int64")
self.data_var.append(neg_hist_cat_seq)
train_inputs = [hist_item_seq] + [hist_cat_seq] + [target_item] + [
target_cat
] + [label] + [mask] + [target_item_seq] + [target_cat_seq] + [
neg_hist_item_seq
] + [neg_hist_cat_seq]
return train_inputs
def net(self, inputs, is_infer=False):
self.hist_item_seq = inputs[0]
self.hist_cat_seq = inputs[1]
self.target_item = inputs[2]
self.target_cat = inputs[3]
self.label = inputs[4].reshape([-1, 1])
self.mask = inputs[5]
self.target_item_seq = inputs[6]
self.target_cat_seq = inputs[7]
self.neg_hist_item_seq = inputs[8] # neg item sampling for aux loss
self.neg_hist_cat_seq = inputs[9] # neg cat sampling for aux loss
dien_model = StaticDIENLayer(
self.item_emb_size, self.cat_emb_size, self.act, self.is_sparse,
self.use_DataLoader, self.item_count, self.cat_count)
logit, aux_loss = dien_model.forward(
self.hist_item_seq, self.hist_cat_seq, self.target_item,
self.target_cat, self.label, self.mask, self.target_item_seq,
self.target_cat_seq, self.neg_hist_item_seq, self.neg_hist_cat_seq)
avg_loss = paddle.nn.functional.binary_cross_entropy_with_logits(
logit, self.label, reduction='mean')
self._cost = aux_loss + avg_loss
self.predict = paddle.nn.functional.sigmoid(logit)
predict_2d = paddle.concat([1 - self.predict, self.predict], 1)
label_int = paddle.cast(self.label, 'int64')
auc, batch_auc, _ = paddle.static.auc(input=predict_2d,
label=label_int,
slide_steps=0)
if is_infer:
fetch_dict = {'auc': auc}
return fetch_dict
fetch_dict = {'auc': auc, 'cost': self._cost}
return fetch_dict
def create_optimizer(self, strategy=None):
# optimizer = paddle.optimizer.Adam(learning_rate=self.learning_rate)
# if strategy != None:
# import paddle.distributed.fleet as fleet
# optimizer = fleet.distributed_optimizer(optimizer, strategy)
# optimizer.minimize(self._cost)
boundaries = [410000]
values = [self.learning_rate_base_lr, 0.2]
optimizer = paddle.optimizer.SGD(
learning_rate=paddle.optimizer.lr.PiecewiseDecay(
boundaries=boundaries, values=values))
if strategy != None:
import paddle.distributed.fleet as fleet
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(self._cost)
return optimizer
def infer_net(self, input):
return self.net(input, is_infer=True) |
find dep path | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2020 NV Access Limited
# This file may be used under the terms of the GNU General Public License, version 2 or later.
# For more details see: https://www.gnu.org/licenses/gpl-2.0.html
"""
This module is designed to construct and install the speechSpyGlobalPlugin, speechSpySynthDriver, and NVDA
NVDA config before NVDA is started by the system tests.
"""
from os.path import join as _pJoin
from .getLib import _getLib
import sys
from typing import Optional
# Imported for type information
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.OperatingSystem import OperatingSystem as _OpSysLib
from robot.libraries.Process import Process as _Process
builtIn: BuiltIn = BuiltIn()
opSys: _OpSysLib = _getLib('OperatingSystem')
process: _Process = _getLib('Process')
def METHOD_NAME(depFileName, searchPaths):
import os
for path in searchPaths:
filePath = _pJoin(path, f"{depFileName}.py")
if os.path.isfile(filePath):
return filePath
elif os.path.isfile(_pJoin(path, depFileName, "__init__.py")):
return _pJoin(path, depFileName)
raise AssertionError("Unable to find required system test spy dependency: {}".format(depFileName))
def _installSystemTestSpyToScratchPad(repoRoot: str, scratchPadDir: str):
""" Assembles the required files for the system test spy.
Most notably this includes:
- speechSpyGlobalPlugin - The actual remote Robot library used to get information out of NVDA
- speechSpySynthDriver - A synth driver that captures and caches speech to provide to speechSpyGlobalPlugin
"""
# The globalPlugin will modify the python path to include to this sub dir
spyPackageLibsDir = _pJoin(scratchPadDir, "globalPlugins", "speechSpyGlobalPlugin", "libs")
opSys.create_directory(spyPackageLibsDir)
# copy in required dependencies for global plugin
_copyPythonLibs(
pythonImports=[ # relative to the python path
r"robotremoteserver",
],
libsDest=spyPackageLibsDir
)
try:
opSys.directory_should_exist(_pJoin(spyPackageLibsDir, "xmlrpc"))
except AssertionError:
# installed copies of NVDA <= 2020.4 don't copy this over
_copyPythonLibs(
pythonImports=[ # relative to the python path
"xmlrpc",
],
libsDest=spyPackageLibsDir
)
# install the global plugin
# Despite duplication, specify full paths for clarity.
opSys.copy_file(
_pJoin(repoRoot, "tests", "system", "libraries", "SystemTestSpy", "speechSpyGlobalPlugin.py"),
_pJoin(scratchPadDir, "globalPlugins", "speechSpyGlobalPlugin", "__init__.py")
)
opSys.copy_file(
_pJoin(repoRoot, "tests", "system", "libraries", "SystemTestSpy", "blockUntilConditionMet.py"),
_pJoin(scratchPadDir, "globalPlugins", "speechSpyGlobalPlugin")
)
# install the test spy speech synth
opSys.copy_file(
_pJoin(repoRoot, "tests", "system", "libraries", "SystemTestSpy", "speechSpySynthDriver.py"),
_pJoin(scratchPadDir, "synthDrivers", "speechSpySynthDriver.py")
)
def _copyPythonLibs(pythonImports, libsDest):
import os
searchPaths = sys.path
for lib in pythonImports:
libSource = METHOD_NAME(lib, searchPaths)
if os.path.isdir(libSource):
opSys.copy_directory(libSource, libsDest)
elif os.path.isfile(libSource):
opSys.copy_file(libSource, libsDest)
def setupProfile(
repoRoot: str,
settingsFileName: str,
stagingDir: str,
gesturesFileName: Optional[str] = None,
):
builtIn.log("Copying files into NVDA profile", level='DEBUG')
opSys.copy_file(
# Despite duplication, specify full paths for clarity.
_pJoin(repoRoot, "tests", "system", "nvdaSettingsFiles", settingsFileName),
_pJoin(stagingDir, "nvdaProfile", "nvda.ini")
)
if gesturesFileName is not None:
opSys.copy_file(
# Despite duplication, specify full paths for clarity.
_pJoin(repoRoot, "tests", "system", "nvdaSettingsFiles", gesturesFileName),
_pJoin(stagingDir, "nvdaProfile", "gestures.ini")
)
# create a package to use as the globalPlugin
_installSystemTestSpyToScratchPad(
repoRoot,
_pJoin(stagingDir, "nvdaProfile", "scratchpad")
)
def teardownProfile(stagingDir: str):
""" Cleans up the profile directory
@todo: this could have an option to preserve the profile for debugging purposes.
@param stagingDir: Where the profile was constructed
"""
builtIn.log("Cleaning up NVDA profile", level='DEBUG')
opSys.remove_directory(
_pJoin(stagingDir, "nvdaProfile"),
recursive=True
) |
hybridsim template | # -*- coding: utf-8 -*-
from sst_unittest import *
from sst_unittest_support import *
import os
################################################################################
# Code to support a single instance module initialize, must be called setUp method
module_init = 0
module_sema = threading.Semaphore()
def initializeTestModule_SingleInstance(class_inst):
global module_init
global module_sema
module_sema.acquire()
if module_init != 1:
try:
# Put your single instance Init Code Here
pass
except:
pass
module_init = 1
module_sema.release()
################################################################################
################################################################################
################################################################################
class testcase_memHierarchy_hybridsim(SSTTestCase):
def initializeClass(self, testName):
super(type(self), self).initializeClass(testName)
# Put test based setup code here. it is called before testing starts
# NOTE: This method is called once for every test
def setUp(self):
super(type(self), self).setUp()
initializeTestModule_SingleInstance(self)
# Put test based setup code here. it is called once before every test
def tearDown(self):
# Put test based teardown code here. it is called once after every test
super(type(self), self).tearDown()
#####
@skip_on_sstsimulator_conf_empty_str("HYBRIDSIM", "LIBDIR", "HYBRIDSIM is not included as part of this build")
def test_hybridsim_hybridsim(self):
self.METHOD_NAME("hybridsim")
#####
def METHOD_NAME(self, testcase, testtimeout=120):
# Get the path to the test files
test_path = self.get_testsuite_dir()
outdir = self.get_test_output_run_dir()
tmpdir = self.get_test_output_tmp_dir()
# Set the Path of the HybridSim Lib into the Env so that the SDL file
# can pull it
lib_dir = sstsimulator_conf_get_value_str("HYBRIDSIM", "LIBDIR", "LIBDIR_UNDEFINED")
os.environ['SST_HYBRIDSIM_LIB_DIR'] = lib_dir
# Set the various file paths
testDataFileName=("test_{0}".format(testcase))
sdlfile = "{0}/{1}.py".format(test_path, testDataFileName)
reffile = "{0}/refFiles/{1}.out".format(test_path, testDataFileName)
outfile = "{0}/{1}.out".format(outdir, testDataFileName)
errfile = "{0}/{1}.err".format(outdir, testDataFileName)
mpioutfiles = "{0}/{1}.testfile".format(outdir, testDataFileName)
log_debug("testcase = {0}".format(testcase))
log_debug("sdl file = {0}".format(sdlfile))
log_debug("ref file = {0}".format(reffile))
log_debug("out file = {0}".format(outfile))
log_debug("err file = {0}".format(errfile))
# Run SST in the tests directory
self.run_sst(sdlfile, outfile, errfile, set_cwd=test_path,
mpi_out_files=mpioutfiles, timeout_sec=testtimeout)
testing_remove_component_warning_from_file(outfile)
# NOTE: THE PASS / FAIL EVALUATIONS ARE PORTED FROM THE SQE BAMBOO
# BASED testSuite_XXX.sh THESE SHOULD BE RE-EVALUATED BY THE
# DEVELOPER AGAINST THE LATEST VERSION OF SST TO SEE IF THE
# TESTS & RESULT FILES ARE STILL VALID
# Perform the tests
# This test uses DRAMSim2 which dumps data to the error output, we cannot
# test for an empty errfile.
if os_test_file(errfile, "-s"):
log_testing_note("hybridsim test {0} has a Non-Empty Error File {1}".format(testDataFileName, errfile))
cmp_result = testing_compare_sorted_diff(testcase, outfile, reffile)
if not cmp_result:
diffdata = testing_get_diff_data(testcase)
log_failure(diffdata)
self.assertTrue(cmp_result, "Diffed compared Output file {0} does not match Reference File {1}".format(outfile, reffile))
|
pmts submitted by date | from collections import defaultdict
from datetime import date, datetime, timedelta
from typing import Dict, Iterator
from django.db.models import Q
from django.utils.functional import cached_property
from django.utils.translation import gettext as _
from dateutil.rrule import DAILY, FR, MO, SA, TH, TU, WE, rrule
from corehq.apps.reports.datatables import DataTablesColumn, DataTablesHeader
from corehq.apps.reports.filters.dates import DatespanFilter
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin
from corehq.form_processor.models import XFormInstance
from custom.abt.reports.filters_2020 import (
LevelFourFilter,
LevelOneFilter,
LevelThreeFilter,
LevelTwoFilter,
)
from custom.abt.reports.fixture_utils import get_locations
INDICATORS_FORM_XMLNS = 'http://openrosa.org/formdesigner/00CEB41B-2967-4370-9EA3-BFD9BD7AF785'
class LatePmt2020Report(GenericTabularReport, CustomProjectReport, DatespanMixin):
report_title = "Late PMT"
slug = 'late_pmt_2020'
name = "Late PMT"
languages = (
'en',
'fra',
'por'
)
fields = [
DatespanFilter,
LevelOneFilter,
LevelTwoFilter,
LevelThreeFilter,
LevelFourFilter,
]
@property
def report_config(self):
return {
'domain': self.domain,
'startdate': self.startdate,
'enddate': self.enddate,
'level_1': self.request.GET.get('level_1', ''),
'level_2': self.request.GET.get('level_2', ''),
'level_3': self.request.GET.get('level_3', ''),
'level_4': self.request.GET.get('level_4', ''),
}
@property
def startdate(self):
return self.request.datespan.startdate
@property
def enddate(self):
return self.request.datespan.end_of_end_day
@property
def headers(self):
return DataTablesHeader(
DataTablesColumn(_("Missing Report Date")),
DataTablesColumn(_("Name")),
DataTablesColumn(_("Country")),
DataTablesColumn(_("Level 1")),
DataTablesColumn(_("Level 2")),
DataTablesColumn(_("Level 3")),
DataTablesColumn(_("Level 4")),
DataTablesColumn(_("Submission Status")),
)
@cached_property
def METHOD_NAME(self) -> Dict[datetime.date, set]:
pmts_submitted = defaultdict(set)
forms = iter_forms_by_xmlns_received_on(
self.domain, INDICATORS_FORM_XMLNS,
midnight_starting(self.startdate),
midnight_ending(self.enddate),
)
for form in forms:
location_id = form.form_data['location_operation_site']
pmts_submitted[form.received_on.date()].add(location_id)
return pmts_submitted
@property
def rows(self):
def _to_report_format(date_, location, error_msg):
return [
date_.strftime("%Y-%m-%d"),
location.name,
location.country,
location.level_1,
location.level_2,
location.level_3,
location.level_4,
error_msg
]
error_msg = _('Incorrect or no PMT data submitted')
dates = rrule(
DAILY,
dtstart=self.startdate,
until=self.enddate,
byweekday=(MO, TU, WE, TH, FR, SA)
)
rows = []
for date_ in dates:
for location in get_locations(self.domain, self.report_config):
pmt_submitted = location.id in self.METHOD_NAME[date_.date()]
if not pmt_submitted:
rows.append(_to_report_format(date_, location, error_msg))
return rows
def iter_forms_by_xmlns_received_on(
domain: str,
xmlns: str,
start_datetime: datetime,
end_datetime: datetime,
) -> Iterator[XFormInstance]:
"""
Iterates form submissions of a given ``xmlns`` from
``start_datetime`` (incl) to ``end_datetime`` (excl).
"""
# ``start_datetime`` is inclusive and ``end_datetime`` is
# exclusive so that a form submitted at midnight will be
# returned for the day that is starting, not the day that is
# ending. That seems to be intuitive.
from corehq.sql_db.util import paginate_query_across_partitioned_databases
q_expr = (
Q(domain=domain)
& Q(state=XFormInstance.NORMAL)
& Q(xmlns=xmlns)
& Q(received_on__gte=start_datetime, received_on__lt=end_datetime)
)
return paginate_query_across_partitioned_databases(
XFormInstance, q_expr, load_source='forms_by_xmlns_received_on'
)
def midnight_starting(
date_: date
) -> datetime:
"""
Returns the start of the day
>>> jan_1 = date(2000, 1, 1)
>>> new_year = midnight_starting(jan_1)
>>> new_year.isoformat()
'2000-01-01T00:00:00'
"""
return datetime(date_.year, date_.month, date_.day)
def midnight_ending(
date_: date
) -> datetime:
"""
Returns the end of the day
>>> dec_31 = date(1999, 12, 31)
>>> party_like_its = midnight_ending(dec_31)
>>> party_like_its.isoformat()
'2000-01-01T00:00:00'
"""
return midnight_starting(date_ + timedelta(days=1)) |
get project arg | class ArgumentsTo(object):
KEY = ''
def __init__(self, args):
self.args = args
def build(self):
raise NotImplementedError
@property
def database_name(self):
return self.args['database'].name
def get_database_arg(self):
return "Database: {}".format(self.database_name)
def get_environment_arg(self):
return "Environment: {}".format(self.args['environment'])
def get_plan_arg(self):
return "Plan: {}".format(self.args['plan'])
def METHOD_NAME(self):
return "Project: {}".format(self.args['project'])
def get_user_arg(self):
return "User: {}".format(self.args['user'])
def get_clone_arg(self):
return "Clone: {}".format(self.args['clone_name'])
class ArgumentsToCreateDatabase(ArgumentsTo):
KEY = 'notification.tasks.create_database'
def build(self):
return [
self.get_database_arg(),
self.get_environment_arg(),
self.METHOD_NAME(),
self.get_plan_arg(),
]
@property
def database_name(self):
return self.args['name']
class ArgumentsToResizeDatabase(ArgumentsTo):
KEY = 'notification.tasks.resize_database'
def build(self):
return [
self.get_database_arg(),
"New VM Offering: {}".format(self.args['offering']),
]
class ArgumentsToUpgradeDatabase(ArgumentsTo):
KEY = 'notification.tasks.upgrade_database'
def build(self):
return [
self.get_database_arg(),
"Target plan: {}".format(
self.args['database'].databaseinfra.plan.engine_equivalent_plan
),
]
class ArgumentsToUpgradeDatabasePatch(ArgumentsTo):
KEY = 'notification.tasks.upgrade_database_patch'
def build(self):
return [
self.get_database_arg(),
"New patch: {}".format(self.args['patch']),
]
class ArgumentsToReinstallVM(ArgumentsTo):
KEY = 'notification.tasks.reinstall_vm'
def build(self):
return [
self.get_database_arg(),
"Instance: {}".format(
self.args['instance']
),
]
class ArgumentsToDiskResize(ArgumentsTo):
KEY = 'notification.tasks.database_disk_resize'
def build(self):
return [
self.get_database_arg(),
"New Disk Offering: {}".format(self.args['disk_offering']),
]
class ArgumentsToRestoreSnapshot(ArgumentsTo):
KEY = 'backup.tasks.restore_snapshot'
def build(self):
return [
self.get_database_arg(),
"Description: Restoring to an older version. It will finish soon.",
]
class ArgumentsToDestroyDatabase(ArgumentsTo):
KEY = 'notification.tasks.destroy_database'
def build(self):
return [
self.get_database_arg(),
self.get_user_arg(),
]
class ArgumentsToCloneDatabase(ArgumentsTo):
KEY = 'notification.tasks.clone_database'
def build(self):
return [
self.get_database_arg(),
self.get_clone_arg(),
self.get_environment_arg(),
self.get_plan_arg(),
]
@property
def database_name(self):
return self.args['origin_database'].name
class ArgumentsToAnalyzeDatabases(ArgumentsTo):
KEY = 'dbaas_services.analyzing.tasks.analyze.analyze_databases'
def build(self):
return [
"Description: Analyzing all databases",
]
class ArgumentsToUpgradeMongo24To30(ArgumentsTo):
KEY = 'notification.tasks.upgrade_mongodb_24_to_30'
def build(self):
return [
self.get_database_arg(),
]
class ArgumentsToUnbindAddress(ArgumentsTo):
KEY = 'dbaas_aclapi.tasks.unbind_address_on_database'
def build(self):
return [
"Removing Binds For: {}".format(self.args['database_bind']),
self.get_database_arg(),
]
@property
def database_name(self):
return self.args['database_bind'].database.name
class ArgumentsToBindAddress(ArgumentsTo):
KEY = 'dbaas_aclapi.tasks.bind_address_on_database'
def build(self):
return [
"Creating Binds For: {}".format(self.args['database_bind']),
self.get_database_arg(),
]
@property
def database_name(self):
return self.args['database_bind'].database.name
class ArgumentsToRemoveReadOnlyInstance(ArgumentsTo):
KEY = 'notification.tasks.remove_readonly_instance'
def build(self):
return [
"Removing read only instance from {}".format(self.get_database_arg()),
"Instance: {}".format(self.args['instance'])
] |
pytest pycollect makeitem | try:
# installed by bootstrap.py
import sqla_plugin_base as plugin_base
except ImportError:
# assume we're a package, use traditional import
from . import plugin_base
import pytest
import argparse
import inspect
import collections
import os
try:
import xdist # noqa
has_xdist = True
except ImportError:
has_xdist = False
def pytest_addoption(parser):
group = parser.getgroup("sqlalchemy")
def make_option(name, **kw):
callback_ = kw.pop("callback", None)
if callback_:
class CallableAction(argparse.Action):
def __call__(self, parser, namespace,
values, option_string=None):
callback_(option_string, values, parser)
kw["action"] = CallableAction
group.addoption(name, **kw)
plugin_base.setup_options(make_option)
plugin_base.read_config()
def pytest_configure(config):
if hasattr(config, "slaveinput"):
plugin_base.restore_important_follower_config(config.slaveinput)
plugin_base.configure_follower(
config.slaveinput["follower_ident"]
)
if config.option.write_idents:
with open(config.option.write_idents, "a") as file_:
file_.write(config.slaveinput["follower_ident"] + "\n")
else:
if config.option.write_idents and \
os.path.exists(config.option.write_idents):
os.remove(config.option.write_idents)
plugin_base.pre_begin(config.option)
plugin_base.set_coverage_flag(bool(getattr(config.option,
"cov_source", False)))
plugin_base.set_skip_test(pytest.skip.Exception)
def pytest_sessionstart(session):
plugin_base.post_begin()
def pytest_sessionfinish(session):
plugin_base.final_process_cleanup()
if has_xdist:
import uuid
def pytest_configure_node(node):
# the master for each node fills slaveinput dictionary
# which pytest-xdist will transfer to the subprocess
plugin_base.memoize_important_follower_config(node.slaveinput)
node.slaveinput["follower_ident"] = "test_%s" % uuid.uuid4().hex[0:12]
from sqlalchemy.testing import provision
provision.create_follower_db(node.slaveinput["follower_ident"])
def pytest_testnodedown(node, error):
from sqlalchemy.testing import provision
provision.drop_follower_db(node.slaveinput["follower_ident"])
def pytest_collection_modifyitems(session, config, items):
# look for all those classes that specify __backend__ and
# expand them out into per-database test cases.
# this is much easier to do within pytest_pycollect_makeitem, however
# pytest is iterating through cls.__dict__ as makeitem is
# called which causes a "dictionary changed size" error on py3k.
# I'd submit a pullreq for them to turn it into a list first, but
# it's to suit the rather odd use case here which is that we are adding
# new classes to a module on the fly.
rebuilt_items = collections.defaultdict(list)
items[:] = [
item for item in
items if isinstance(item.parent, pytest.Instance)
and not item.parent.parent.name.startswith("_")]
test_classes = set(item.parent for item in items)
for test_class in test_classes:
for sub_cls in plugin_base.generate_sub_tests(
test_class.cls, test_class.parent.module):
if sub_cls is not test_class.cls:
list_ = rebuilt_items[test_class.cls]
for inst in pytest.Class(
sub_cls.__name__,
parent=test_class.parent.parent).collect():
list_.extend(inst.collect())
newitems = []
for item in items:
if item.parent.cls in rebuilt_items:
newitems.extend(rebuilt_items[item.parent.cls])
rebuilt_items[item.parent.cls][:] = []
else:
newitems.append(item)
# seems like the functions attached to a test class aren't sorted already?
# is that true and why's that? (when using unittest, they're sorted)
items[:] = sorted(newitems, key=lambda item: (
item.parent.parent.parent.name,
item.parent.parent.name,
item.name
))
def METHOD_NAME(collector, name, obj):
if inspect.isclass(obj) and plugin_base.want_class(obj):
return pytest.Class(name, parent=collector)
elif inspect.isfunction(obj) and \
isinstance(collector, pytest.Instance) and \
plugin_base.want_method(collector.cls, obj):
return pytest.Function(name, parent=collector)
else:
return []
_current_class = None
def pytest_runtest_setup(item):
# here we seem to get called only based on what we collected
# in pytest_collection_modifyitems. So to do class-based stuff
# we have to tear that out.
global _current_class
if not isinstance(item, pytest.Function):
return
# ... so we're doing a little dance here to figure it out...
if _current_class is None:
class_setup(item.parent.parent)
_current_class = item.parent.parent
# this is needed for the class-level, to ensure that the
# teardown runs after the class is completed with its own
# class-level teardown...
def finalize():
global _current_class
class_teardown(item.parent.parent)
_current_class = None
item.parent.parent.addfinalizer(finalize)
test_setup(item)
def pytest_runtest_teardown(item):
# ...but this works better as the hook here rather than
# using a finalizer, as the finalizer seems to get in the way
# of the test reporting failures correctly (you get a bunch of
# py.test assertion stuff instead)
test_teardown(item)
def test_setup(item):
plugin_base.before_test(item, item.parent.module.__name__,
item.parent.cls, item.name)
def test_teardown(item):
plugin_base.after_test(item)
def class_setup(item):
plugin_base.start_test_class(item.cls)
def class_teardown(item):
plugin_base.stop_test_class(item.cls) |
real extract | import re
from .common import InfoExtractor
from ..utils import (
int_or_none,
unified_strdate,
determine_ext,
join_nonempty,
update_url_query,
)
class DisneyIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?P<domain>(?:[^/]+\.)?(?:disney\.[a-z]{2,3}(?:\.[a-z]{2})?|disney(?:(?:me|latino)\.com|turkiye\.com\.tr|channel\.de)|(?:starwars|marvelkids)\.com))/(?:(?:embed/|(?:[^/]+/)+[\w-]+-)(?P<id>[a-z0-9]{24})|(?:[^/]+/)?(?P<display_id>[^/?#]+))'''
_TESTS = [{
# Disney.EmbedVideo
'url': 'http://video.disney.com/watch/moana-trailer-545ed1857afee5a0ec239977',
'info_dict': {
'id': '545ed1857afee5a0ec239977',
'ext': 'mp4',
'title': 'Moana - Trailer',
'description': 'A fun adventure for the entire Family! Bring home Moana on Digital HD Feb 21 & Blu-ray March 7',
'upload_date': '20170112',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
# Grill.burger
'url': 'http://www.starwars.com/video/rogue-one-a-star-wars-story-intro-featurette',
'info_dict': {
'id': '5454e9f4e9804a552e3524c8',
'ext': 'mp4',
'title': '"Intro" Featurette: Rogue One: A Star Wars Story',
'upload_date': '20170104',
'description': 'Go behind-the-scenes of Rogue One: A Star Wars Story in this featurette with Director Gareth Edwards and the cast of the film.',
},
'params': {
# m3u8 download
'skip_download': True,
}
}, {
'url': 'http://videos.disneylatino.com/ver/spider-man-de-regreso-a-casa-primer-adelanto-543a33a1850bdcfcca13bae2',
'only_matching': True,
}, {
'url': 'http://video.en.disneyme.com/watch/future-worm/robo-carp-2001-544b66002aa7353cdd3f5114',
'only_matching': True,
}, {
'url': 'http://video.disneyturkiye.com.tr/izle/7c-7-cuceler/kimin-sesi-zaten-5456f3d015f6b36c8afdd0e2',
'only_matching': True,
}, {
'url': 'http://disneyjunior.disney.com/embed/546a4798ddba3d1612e4005d',
'only_matching': True,
}, {
'url': 'http://www.starwars.com/embed/54690d1e6c42e5f09a0fb097',
'only_matching': True,
}, {
'url': 'http://spiderman.marvelkids.com/embed/522900d2ced3c565e4cc0677',
'only_matching': True,
}, {
'url': 'http://spiderman.marvelkids.com/videos/contest-of-champions-part-four-clip-1',
'only_matching': True,
}, {
'url': 'http://disneyjunior.en.disneyme.com/dj/watch-my-friends-tigger-and-pooh-promo',
'only_matching': True,
}, {
'url': 'http://disneychannel.de/sehen/soy-luna-folge-118-5518518987ba27f3cc729268',
'only_matching': True,
}, {
'url': 'http://disneyjunior.disney.com/galactech-the-galactech-grab-galactech-an-admiral-rescue',
'only_matching': True,
}]
def METHOD_NAME(self, url):
domain, video_id, display_id = self._match_valid_url(url).groups()
if not video_id:
webpage = self._download_webpage(url, display_id)
grill = re.sub(r'"\s*\+\s*"', '', self._search_regex(
r'Grill\.burger\s*=\s*({.+})\s*:',
webpage, 'grill data'))
page_data = next(s for s in self._parse_json(grill, display_id)['stack'] if s.get('type') == 'video')
video_data = page_data['data'][0]
else:
webpage = self._download_webpage(
'http://%s/embed/%s' % (domain, video_id), video_id)
page_data = self._parse_json(self._search_regex(
r'Disney\.EmbedVideo\s*=\s*({.+});',
webpage, 'embed data'), video_id)
video_data = page_data['video']
for external in video_data.get('externals', []):
if external.get('source') == 'vevo':
return self.url_result('vevo:' + external['data_id'], 'Vevo')
video_id = video_data['id']
title = video_data['title']
formats = []
for flavor in video_data.get('flavors', []):
flavor_format = flavor.get('format')
flavor_url = flavor.get('url')
if not flavor_url or not re.match(r'https?://', flavor_url) or flavor_format == 'mp4_access':
continue
tbr = int_or_none(flavor.get('bitrate'))
if tbr == 99999:
# wrong ks(Kaltura Signature) causes 404 Error
flavor_url = update_url_query(flavor_url, {'ks': ''})
m3u8_formats = self._extract_m3u8_formats(
flavor_url, video_id, 'mp4',
m3u8_id=flavor_format, fatal=False)
for f in m3u8_formats:
# Apple FairPlay
if '/fpshls/' in f['url']:
continue
formats.append(f)
continue
ext = determine_ext(flavor_url)
if flavor_format == 'applehttp' or ext == 'm3u8':
ext = 'mp4'
width = int_or_none(flavor.get('width'))
height = int_or_none(flavor.get('height'))
formats.append({
'format_id': join_nonempty(flavor_format, tbr),
'url': flavor_url,
'width': width,
'height': height,
'tbr': tbr,
'ext': ext,
'vcodec': 'none' if (width == 0 and height == 0) else None,
})
if not formats and video_data.get('expired'):
self.raise_no_formats(
'%s said: %s' % (self.IE_NAME, page_data['translations']['video_expired']),
expected=True)
subtitles = {}
for caption in video_data.get('captions', []):
caption_url = caption.get('url')
caption_format = caption.get('format')
if not caption_url or caption_format.startswith('unknown'):
continue
subtitles.setdefault(caption.get('language', 'en'), []).append({
'url': caption_url,
'ext': {
'webvtt': 'vtt',
}.get(caption_format, caption_format),
})
return {
'id': video_id,
'title': title,
'description': video_data.get('description') or video_data.get('short_desc'),
'thumbnail': video_data.get('thumb') or video_data.get('thumb_secure'),
'duration': int_or_none(video_data.get('duration_sec')),
'upload_date': unified_strdate(video_data.get('publish_date')),
'formats': formats,
'subtitles': subtitles,
} |
find copyright | #!/usr/bin/env python3
# Copyright 2021-2023 The Mumble Developers. All rights reserved.
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file at the root of the
# Mumble source tree or at <https://www.mumble.info/LICENSE>.
import argparse
import os
import platform
import subprocess
import datetime
from string import Template
import re
def cmd(args):
shell = platform.system() == 'Windows'
p = subprocess.Popen(args, shell=shell, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception('cmd(): {0} failed with status {1}: {2}'.format(args, p.returncode, stderr))
return stdout.decode('utf-8')
licenseHeaderTemplate = Template(
"""${commentChar} Copyright ${copyright} The Mumble Developers. All rights reserved.
${commentChar} Use of this source code is governed by a BSD-style license
${commentChar} that can be found in the LICENSE file at the root of the
${commentChar} Mumble source tree or at <https://www.mumble.info/LICENSE>.""")
blackListFiles = []
class InvalidFileError(Exception):
pass
class LicenseNotFoundError(Exception):
pass
def generateHeaderForFile(filePath):
if filePath in blackListFiles:
raise InvalidFileError("File is blacklisted: \"" + filePath + "\"")
fileName = os.path.basename(filePath)
extension = os.path.splitext(fileName)[-1]
commentChar = None
if extension in [".cpp", ".c", ".h", ".hpp", ".cs", ".ice", ".proto", ".mm", ".vs", ".m", ".ps", ".hlsl", ".fx"]:
commentChar = "//"
elif extension in [".bash", ".sh", ".py", ".yaml", ".yml", ".ps1", ".pl", ".cmake", ".rb", ".init"] or fileName == "CMakeLists.txt":
commentChar = "#"
elif extension in [".bat", ".cmd"]:
commentChar = "::"
if commentChar is None:
raise InvalidFileError("Can't handle extension: \"" + extension + "\"")
# Get file's creation date
creationDateStr = cmd(["git", "log", "--diff-filter=A", "--format=%ci", "-1", "--", filePath]).strip()
if not creationDateStr:
raise InvalidFileError("File not in git index: \"" + filePath + "\"")
# We only need the day, month and year
creationDate = datetime.datetime.fromisoformat(creationDateStr.split()[0])
copyright = None
if creationDate.year == datetime.datetime.today().year:
copyright = str(creationDate.year)
else:
copyright = str(creationDate.year) + "-" + str(datetime.datetime.today().year)
return licenseHeaderTemplate.substitute(commentChar=commentChar, copyright=copyright)
def METHOD_NAME(content, pattern):
match = re.search(pattern, content)
if match:
return (match.start(), match.end())
else:
raise LicenseNotFoundError()
def exclude(path, excludes):
for current in excludes:
if path.startswith(current):
return True
return False
def main():
parser = argparse.ArgumentParser(description="Updates (and adds) license headers of the Mumble source files")
parser.add_argument("-a", "--add-new", action="store_true", default=False, help="Also add license headers to files that did not contain one before")
args = parser.parse_args()
scriptDir = os.path.dirname(os.path.realpath(__file__))
rootDir = os.path.realpath(os.path.join(scriptDir, ".."))
excludePaths = ["build", "docs", "3rdparty", "3rdPartylLicenses", "themes", ".git", "man"]
for i in range(len(excludePaths)):
excludePaths[i] = os.path.join(rootDir, excludePaths[i])
for i in range(len(blackListFiles)):
blackListFiles[i] = os.path.join(rootDir, blackListFiles[i])
# Convert the license template into a pattern that matches the license declaration inside the source files
licensePattern = re.compile(licenseHeaderTemplate.substitute(copyright="\d+(-\d+)?", commentChar=".+?").strip().replace("\n", "\\r?\\n").replace(" ",
"\\s*"), re.IGNORECASE)
modifiedFiles = 0
for root, dirs, files in os.walk(rootDir, topdown = True):
for currentFile in files:
fullPath = os.path.join(root, currentFile)
if exclude(fullPath, excludePaths):
continue
try:
generatedCopyright = generateHeaderForFile(fullPath)
fileHandle = open(fullPath, "r")
fileContent = fileHandle.read()
try:
start,end = METHOD_NAME(fileContent, licensePattern)
except LicenseNotFoundError:
# no license yet
if not args.add_new:
print("No license header found for", fullPath)
continue
else:
print("Adding license header to", fullPath)
if not fileContent.startswith("#!"):
# File does not start with shebang -> write license to the top
start = 0
end = 0
generatedCopyright += "\n\n"
else:
# File starts with shebang -> append license after the first line (and add
# an extra blank line)
start = fileContent.find("\n") + 1
assert start > 0
end = start
generatedCopyright = "\n" + generatedCopyright + "\n\n"
modifiedContent = fileContent[ : start] + generatedCopyright + fileContent[end : ]
if modifiedContent != fileContent:
fileHandle.close()
# Open for writing
fileHandle = open(fullPath, "w")
modifiedFiles += 1
fileHandle.write(modifiedContent)
except InvalidFileError:
continue
print("Modified {} files".format(modifiedFiles))
if __name__ == "__main__":
main() |
add arguments | # ------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <[email protected]>
#
# ------------------------------------------------------------------------------
# Copyright (C) 2017 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ------------------------------------------------------------------------------
from django.core.management.base import CommandError, BaseCommand
from django.db import transaction
from eoxserver.resources.coverages import models
from eoxserver.resources.coverages.management.commands import (
CommandOutputMixIn, SubParserMixIn
)
class Command(CommandOutputMixIn, SubParserMixIn, BaseCommand):
""" Command to manage grids. This command uses sub-commands for the
specific tasks: create, delete
"""
def METHOD_NAME(self, parser):
create_parser = self.add_subparser(parser, 'create')
delete_parser = self.add_subparser(parser, 'delete')
for parser in [create_parser, delete_parser]:
parser.add_argument(
'name', nargs=1, help='The grid name'
)
create_parser.add_argument(
'coordinate_reference_system', nargs=1,
help=(
'The definition of the coordinate reference system. Either '
'an integer (the EPSG code), or the URL, WKT or XML definiton.'
)
)
create_parser.add_argument(
'--name', '--axis-name', '-n', dest='axis_names', default=[],
action='append',
help=(
'The name of one axis. Must be passed at least once and up to '
'four times.'
)
)
create_parser.add_argument(
'--type', '--axis-type', '-t', dest='axis_types', default=[],
action='append',
choices=[choice[1] for choice in models.Grid.AXIS_TYPES],
help=(
'The type of one axis. Must be passed at least once and up to '
'four times.'
)
)
create_parser.add_argument(
'--offset', '--axis-offset', '-o', dest='axis_offsets', default=[],
action='append',
help=(
'The offset for one axis. Must be passed at least once and up '
'to four times.'
)
)
@transaction.atomic
def handle(self, subcommand, name, *args, **kwargs):
""" Dispatch sub-commands: create, delete.
"""
name = name[0]
if subcommand == "create":
self.handle_create(name, *args, **kwargs)
elif subcommand == "delete":
self.handle_delete(name, *args, **kwargs)
def handle_create(self, name, coordinate_reference_system, **kwargs):
""" Handle the creation of a new product
"""
axis_names = kwargs['axis_names']
axis_types = kwargs['axis_types']
axis_offsets = kwargs['axis_offsets']
if not axis_names:
raise CommandError('Must supply at least one axis definition.')
if len(axis_types) != len(axis_names):
raise CommandError(
'Invalid number of axis-types supplied. Expected %d, got %d.'
% (len(axis_names), len(axis_types))
)
if len(axis_offsets) != len(axis_names):
raise CommandError(
'Invalid number of axis-offsets supplied. Expected %d, got %d.'
% (len(axis_names), len(axis_offsets))
)
if len(axis_names) > 4:
raise CommandError('Currently only at most four axes are supported.')
type_name_to_id = dict(
(name, id_) for id_, name in models.Grid.AXIS_TYPES
)
iterator = enumerate(zip(axis_names, axis_types, axis_offsets), start=1)
definition = {
'name': name,
'coordinate_reference_system': coordinate_reference_system[0]
}
for i, (name, type_, offset) in iterator:
definition['axis_%d_name' % i] = name
definition['axis_%d_type' % i] = type_name_to_id[type_]
definition['axis_%d_offset' % i] = offset
models.Grid.objects.create(**definition)
def handle_delete(self, name, **kwargs):
""" Handle the deregistration a product
"""
try:
models.Grid.objects.get(name=name).delete()
except models.Grid.DoesNotExist:
raise CommandError('No such Grid %r' % name) |
doc to changes | from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from casexml.apps.phone.models import SyncLogSQL
from corehq.sql_db.util import handle_connection_failure
from dimagi.utils.parsing import string_to_utc_datetime
from pillowtop.checkpoints.manager import KafkaPillowCheckpoint
from pillowtop.feed.interface import Change
from pillowtop.pillow.interface import ConstructedPillow
from pillowtop.processors.interface import PillowProcessor
from pillowtop.reindexer.reindexer import Reindexer, ReindexerFactory
from corehq.apps.change_feed import topics
from corehq.apps.change_feed.consumer.feed import (
KafkaChangeFeed,
KafkaCheckpointEventHandler,
)
from corehq.apps.receiverwrapper.util import get_version_from_build_id
from corehq.apps.users.models import (
CommCareUser,
CouchUser,
DeviceAppMeta,
UserReportingMetadataStaging,
WebUser,
)
from corehq.apps.users.util import (
update_device_meta,
update_last_sync,
update_latest_builds,
)
from corehq.util.doc_processor.couch import CouchDocumentProvider
from corehq.util.doc_processor.interface import (
BaseDocProcessor,
DocumentProcessorController,
)
from settings import SYNCLOGS_SQL_DB_ALIAS
SYNCLOG_SQL_USER_SYNC_GROUP_ID = "synclog_sql_user_sync"
def _synclog_pillow_dbs():
return {SYNCLOGS_SQL_DB_ALIAS, DEFAULT_DB_ALIAS}
def get_user_sync_history_pillow(
pillow_id='UpdateUserSyncHistoryPillow', num_processes=1, process_num=0, **kwargs):
"""Synclog pillow
Processors:
- :py:func:`corehq.pillows.synclog.UserSyncHistoryProcessor`
"""
change_feed = KafkaChangeFeed(
topics=[topics.SYNCLOG_SQL], client_id=SYNCLOG_SQL_USER_SYNC_GROUP_ID,
num_processes=num_processes, process_num=process_num)
checkpoint = KafkaPillowCheckpoint(pillow_id, [topics.SYNCLOG_SQL])
return ConstructedPillow(
name=pillow_id,
checkpoint=checkpoint,
change_feed=change_feed,
processor=UserSyncHistoryProcessor(),
change_processed_event_handler=KafkaCheckpointEventHandler(
checkpoint=checkpoint, checkpoint_frequency=100, change_feed=change_feed
),
)
class UserSyncHistoryProcessor(PillowProcessor):
"""Updates the user document with reporting metadata when a user syncs
Note when USER_REPORTING_METADATA_BATCH_ENABLED is True that this is written to a postgres table.
Entries in that table are then batched and processed separately.
Reads from:
- CouchDB (user)
- SynclogSQL table
Writes to:
- CouchDB (user) (when batch processing disabled) (default)
- UserReportingMetadataStaging (SQL) (when batch processing enabled)
"""
@handle_connection_failure(get_db_aliases=_synclog_pillow_dbs)
def process_change(self, change):
synclog = change.get_document()
if not synclog:
return
user_id = synclog.get('user_id')
domain = synclog.get('domain')
if not user_id or not domain:
return
try:
sync_date = string_to_utc_datetime(synclog.get('date'))
except (ValueError, AttributeError):
return
build_id = synclog.get('build_id')
device_id = synclog.get('device_id')
app_id = synclog.get('app_id')
# WebApps syncs do not provide the app_id.
# For those syncs we go ahead and mark the last synclog synchronously.
if app_id and settings.USER_REPORTING_METADATA_BATCH_ENABLED:
UserReportingMetadataStaging.add_sync(domain, user_id, app_id, build_id, sync_date, device_id)
else:
user = CouchUser.get_by_user_id(user_id)
if not user:
return
device_app_meta = None
if device_id and app_id:
device_app_meta = DeviceAppMeta(app_id=app_id, build_id=build_id, last_sync=sync_date)
mark_last_synclog(domain, user, app_id, build_id, sync_date, sync_date, device_id, device_app_meta)
def mark_last_synclog(domain, user, app_id, build_id, sync_date, latest_build_date, device_id,
device_app_meta, commcare_version=None, build_profile_id=None, fcm_token=None,
fcm_token_timestamp=None, save_user=True):
version = None
if build_id:
version = get_version_from_build_id(domain, build_id)
local_save = False
if sync_date:
# sync_date could be null if this is called from a heartbeat request
local_save |= update_last_sync(user, app_id, sync_date, version)
if version:
local_save |= update_latest_builds(user, app_id, latest_build_date, version,
build_profile_id=build_profile_id)
if device_id:
local_save |= update_device_meta(user, device_id, commcare_version=commcare_version,
device_app_meta=device_app_meta, fcm_token=fcm_token,
fcm_token_timestamp=fcm_token_timestamp, save=False)
if local_save and save_user:
user.save(fire_signals=False)
return local_save
class UserSyncHistoryReindexerDocProcessor(BaseDocProcessor):
def __init__(self, pillow_processor):
self.pillow_processor = pillow_processor
def process_doc(self, doc):
synclog_changes = self.METHOD_NAME(doc)
for change in synclog_changes:
try:
self.pillow_processor.process_change(change)
except Exception:
return False
return True
def handle_skip(self, doc):
print('Unable to process user {}'.format(
doc['_id'],
))
return True
def METHOD_NAME(self, doc):
# creates a change object for the last 10 synclogs
# of the given user, for the synclog pillow to process.
# this means we wont have to iterate through all synclogs
# when reindexing.
synclogs = SyncLogSQL.objects.filter(user_id=doc['_id']).order_by('date')[:10]
changes = [Change(
id=res.doc['_id'],
sequence_id=None,
document=res.doc
) for res in synclogs]
return changes
class UserSyncHistoryReindexer(Reindexer):
def __init__(self, doc_provider, chunk_size=1000, reset=False):
self.reset = reset
self.doc_provider = doc_provider
self.chunk_size = chunk_size
self.doc_processor = UserSyncHistoryReindexerDocProcessor(UserSyncHistoryProcessor())
def reindex(self):
processor = DocumentProcessorController(
self.doc_provider,
self.doc_processor,
reset=self.reset,
chunk_size=self.chunk_size,
)
processor.run()
class UpdateUserSyncHistoryReindexerFactory(ReindexerFactory):
slug = 'user-sync-history'
arg_contributors = [
ReindexerFactory.resumable_reindexer_args,
]
def build(self):
iteration_key = "UpdateUserSyncHistoryPillow_reindexer"
doc_provider = CouchDocumentProvider(iteration_key, doc_type_tuples=[
CommCareUser,
WebUser
])
return UserSyncHistoryReindexer(doc_provider, **self.options) |
exp decay | #!/usr/bin/env python
import numpy as np
from colander import SchemaNode
from gnome.persist.extend_colander import NumpyArray
from gnome.persist.base_schema import ObjTypeSchema
from gnome.array_types import gat
from gnome.utilities.time_utils import date_to_sec, sec_to_datetime
from gnome.exceptions import ReferencedObjectNotSet
from gnome.movers.movers import Process, ProcessSchema
class WeathererSchema(ProcessSchema):
pass
class Weatherer(Process):
'''
Base Weathering agent. This is almost exactly like the base Mover
in the way that it acts upon the model. It contains the same API
as the mover as well. Not Serializable since it does is partial
implementation
'''
_schema = WeathererSchema # nothing new added so use this schema
def __init__(self, **kwargs):
'''
Base weatherer class; defines the API for all weatherers
Passes optional arguments to base (Process) class via super. See base
class for optional arguments: `gnome.movers.mover.Process`
adds 'mass_components', 'mass' to array_types since all weatherers
need these.
'''
super(Weatherer, self).__init__(**kwargs)
# arrays that all weatherers will update - use this to ask
self.array_types.update({'mass_components': gat('mass_components'),
'fate_status': gat('fate_status'),
'mass': gat('mass'),
'init_mass': gat('init_mass')})
def __repr__(self):
return ('{0.__class__.__module__}.{0.__class__.__name__}('
'active_range={0.active_range!r}, '
'on={0.on}, '
'active={0.active})'
.format(self))
def initialize_data(self, sc, num_released):
'''
Let weatherers have a way to customize the initialization of
data arrays. Currently, only some weatherers use this to customize
initialization of data arrays. If movers also move towards this
implementation, then move to 'Process' base class.
'''
pass
def prepare_for_model_run(self, sc):
"""
Override for weatherers so they can initialize correct 'mass_balance'
key and set initial value to 0.0
"""
if self.on:
# almost all weatherers require wind, water, waves so raise
# exception here if none is found
for attr in ('wind', 'water', 'waves'):
if hasattr(self, attr) and getattr(self, attr) is None:
msg = (attr + " object not defined for " +
self.__class__.__name__)
raise ReferencedObjectNotSet(msg)
def weather_elements(self, sc, time_step, model_time):
'''
Run the equivalent of get_move for weathering processes. It modifies
the SpillContainer's data arrays; most weatherers update
'mass_components' and 'mass'
Some objects do not implement this since they update arrays like 'area'
in model_step_is_done()
'''
pass
def _halflife(self, M_0, factors, time):
'Assumes our factors are half-life values'
half = np.float64(0.5)
total_mass = M_0 * (half ** (time / factors))
return total_mass
def METHOD_NAME(self, M_0, lambda_, time):
'''
Exponential decay: x(t) = exp(lambda_*time)
The `lambda_` should be 'negative' in order for function to decay
'''
mass_remain = M_0 * np.exp(lambda_ * time)
return mass_remain
def get_wind_speed(self, points, model_time, min_val = 0,
coord_sys='r', fill_value=1.0):
'''
Wrapper for the weatherers so they can get wind speeds
'''
if hasattr(self.wind,'ice_concentration'):
retval = self.wind.at(points, model_time, min_val, coord_sys=coord_sys).reshape(-1)
else:
retval = self.wind.at(points, model_time, coord_sys=coord_sys).reshape(-1)
retval[retval < min_val] = min_val
if isinstance(retval, np.ma.MaskedArray):
return retval.filled(fill_value)
else:
return retval
def check_time(self, wind, model_time):
'''
Should have an option to extrapolate but for now we do by default
TODO, FIXME: This function does not appear to be used by anything.
Removing it does not break any of the unit tests.
If it is not used, it should probably go away.
'''
new_model_time = model_time
if wind is not None:
if model_time is not None:
timeval = date_to_sec(model_time)
start_time = wind.get_start_time()
end_time = wind.get_end_time()
if end_time == start_time:
return model_time
if timeval < start_time:
new_model_time = sec_to_datetime(start_time)
if timeval > end_time:
new_model_time = sec_to_datetime(end_time)
else:
return model_time
return new_model_time
class HalfLifeWeathererSchema(WeathererSchema):
half_lives = SchemaNode(
NumpyArray(), save=True, update=True
)
class HalfLifeWeatherer(Weatherer):
'''
Give half-life for all components and decay accordingly
'''
_schema = HalfLifeWeathererSchema
def __init__(self, half_lives=(15.*60, ), **kwargs):
'''
The half_lives are a property of HalfLifeWeatherer. If the
len(half_lives) != gnome.array_types.mass_components.shape[0]
then, only keep the number of elements of half_lives that equal the
length of half_lives and consequently the mass_components array.
The default is 5, it is possible to change default but not easily done.
HalfLifeWeatherer is currently more for testing, so will change this if
it becomes more widely used and there is a need for user to change
default number of mass components.
half_lives could be constants or could be something more complex like
a function of time (not implemented yet). Not storing 'half_lives' in
data_arrays since they are neither time-varying nor varying per LE.
'''
super(HalfLifeWeatherer, self).__init__(**kwargs)
self.half_lives = half_lives
@property
def half_lives(self):
return self._half_lives
@half_lives.setter
def half_lives(self, half_lives):
self._half_lives = np.asarray(half_lives, dtype=np.float64)
def weather_elements(self, sc, time_step, model_time):
'''
weather elements over time_step
'''
if not self.active:
return
if sc.num_released == 0:
return
for _, data in sc.itersubstancedata(self.array_types):
hl = self._halflife(data['mass_components'],
self.half_lives, time_step)
data['mass_components'][:] = hl
data['mass'][:] = data['mass_components'].sum(1)
sc.update_from_fatedataview() |
get 2d sincos pos embed from grid | # --------------------------------------------------------
# Position embedding utils
# --------------------------------------------------------
from typing import Tuple
import numpy as np
import torch
# --------------------------------------------------------
# 2D sine-cosine position embedding
# References:
# Transformer: https://github.com/tensorflow/models/blob/master/official/nlp/transformer/model_utils.py
# MoCo v3: https://github.com/facebookresearch/moco-v3
# --------------------------------------------------------
def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False):
"""
grid_size: int of the grid height and width
return:
pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token)
"""
grid_h = np.arange(grid_size, dtype=np.float32)
grid_w = np.arange(grid_size, dtype=np.float32)
grid = np.meshgrid(grid_w, grid_h) # here w goes first
grid = np.stack(grid, axis=0)
grid = grid.reshape([2, 1, grid_size, grid_size])
pos_embed = METHOD_NAME(embed_dim, grid)
if cls_token:
pos_embed = np.concatenate([np.zeros([1, embed_dim]), pos_embed], axis=0)
return pos_embed
def METHOD_NAME(embed_dim, grid):
assert embed_dim % 2 == 0
# use half of dimensions to encode grid_h
emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2)
emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2)
emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D)
return emb
def get_1d_sincos_pos_embed_from_grid(embed_dim, pos):
"""
embed_dim: output dimension for each position
pos: a list of positions to be encoded: size (M,)
out: (M, D)
"""
assert embed_dim % 2 == 0
omega = np.arange(embed_dim // 2, dtype=np.float)
omega /= embed_dim / 2.0
omega = 1.0 / 10000 ** omega # (D/2,)
pos = pos.reshape(-1) # (M,)
out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product
emb_sin = np.sin(out) # (M, D/2)
emb_cos = np.cos(out) # (M, D/2)
emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D)
return emb
# --------------------------------------------------------
# Interpolate position embeddings for high-resolution
# References:
# DeiT: https://github.com/facebookresearch/deit
# --------------------------------------------------------
def interpolate_pos_embed(model, checkpoint_model, pos_embed_key):
if pos_embed_key in checkpoint_model:
pos_embed_checkpoint = checkpoint_model[pos_embed_key]
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.num_patches
if pos_embed_key.startswith("decoder"):
num_extra_tokens = model.decoder_pos_embed.shape[-2] - num_patches
else:
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print(
"Position interpolate from %dx%d to %dx%d"
% (orig_size, orig_size, new_size, new_size)
)
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(
-1, orig_size, orig_size, embedding_size
).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_size, new_size),
mode="bicubic",
align_corners=False,
)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model[pos_embed_key] = new_pos_embed
def interpolate_pos_embed_online(
pos_embed, orig_size: Tuple[int], new_size: Tuple[int], num_extra_tokens: int
):
extra_tokens = pos_embed[:, :num_extra_tokens]
pos_tokens = pos_embed[:, num_extra_tokens:]
embedding_size = pos_tokens.shape[-1]
pos_tokens = pos_tokens.reshape(
-1, orig_size[0], orig_size[1], embedding_size
).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=new_size, mode="bicubic", align_corners=False,
)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
return new_pos_embed |
test sir regularized numdiff | import numpy as np
import pandas as pd
import pytest
from statsmodels.regression.dimred import (
SlicedInverseReg, SAVE, PHD, CORE)
from numpy.testing import (assert_equal, assert_allclose)
from statsmodels.tools.numdiff import approx_fprime
def test_poisson():
np.random.seed(43242)
# Generate a non-orthogonal design matrix
xmat = np.random.normal(size=(500, 5))
xmat[:, 1] = 0.5*xmat[:, 0] + np.sqrt(1 - 0.5**2) * xmat[:, 1]
xmat[:, 3] = 0.5*xmat[:, 2] + np.sqrt(1 - 0.5**2) * xmat[:, 3]
b = np.r_[0, 1, -1, 0, 0.5]
lpr = np.dot(xmat, b)
ev = np.exp(lpr)
y = np.random.poisson(ev)
for method in range(6):
if method == 0:
model = SlicedInverseReg(y, xmat)
rslt = model.fit()
elif method == 1:
model = SAVE(y, xmat)
rslt = model.fit(slice_n=100)
elif method == 2:
model = SAVE(y, xmat, bc=True)
rslt = model.fit(slice_n=100)
elif method == 3:
df = pd.DataFrame({"y": y,
"x0": xmat[:, 0],
"x1": xmat[:, 1],
"x2": xmat[:, 2],
"x3": xmat[:, 3],
"x4": xmat[:, 4]})
model = SlicedInverseReg.from_formula(
"y ~ 0 + x0 + x1 + x2 + x3 + x4", data=df)
rslt = model.fit()
elif method == 4:
model = PHD(y, xmat)
rslt = model.fit()
elif method == 5:
model = PHD(y, xmat)
rslt = model.fit(resid=True)
# Check for concentration in one direction (this is
# a single index model)
assert_equal(np.abs(rslt.eigs[0] / rslt.eigs[1]) > 5, True)
# Check that the estimated direction aligns with the true
# direction
params = np.asarray(rslt.params)
q = np.dot(params[:, 0], b)
q /= np.sqrt(np.sum(params[:, 0]**2))
q /= np.sqrt(np.sum(b**2))
assert_equal(np.abs(q) > 0.95, True)
def METHOD_NAME():
# Use numeric gradients to check the analytic gradient
# for the regularized SIRobjective function.
np.random.seed(93482)
n = 1000
p = 10
xmat = np.random.normal(size=(n, p))
y1 = np.dot(xmat, np.linspace(-1, 1, p))
y2 = xmat.sum(1)
y = y2 / (1 + y1**2) + np.random.normal(size=n)
model = SlicedInverseReg(y, xmat)
_ = model.fit()
# Second difference penalty matrix.
fmat = np.zeros((p-2, p))
for i in range(p-2):
fmat[i, i:i+3] = [1, -2, 1]
with pytest.warns(UserWarning, match="SIR.fit_regularized did not"):
_ = model.fit_regularized(2, 3*fmat)
# Compare the gradients to the numerical derivatives
for _ in range(5):
pa = np.random.normal(size=(p, 2))
pa, _, _ = np.linalg.svd(pa, 0)
gn = approx_fprime(pa.ravel(), model._regularized_objective, 1e-7)
gr = model._regularized_grad(pa.ravel())
assert_allclose(gn, gr, atol=1e-5, rtol=1e-4)
def test_sir_regularized_1d():
# Compare regularized SIR to traditional SIR, in a setting where the
# regularization is compatible with the true parameters (i.e. there
# is no regularization bias).
np.random.seed(93482)
n = 1000
p = 10
xmat = np.random.normal(size=(n, p))
y = np.dot(xmat[:, 0:4], np.r_[1, 1, -1, -1]) + np.random.normal(size=n)
model = SlicedInverseReg(y, xmat)
rslt = model.fit()
# The penalty drives p[0] ~ p[1] and p[2] ~ p[3]]
fmat = np.zeros((2, p))
fmat[0, 0:2] = [1, -1]
fmat[1, 2:4] = [1, -1]
rslt2 = model.fit_regularized(1, 3*fmat)
pa0 = np.zeros(p)
pa0[0:4] = [1, 1, -1, -1]
pa1 = rslt.params[:, 0]
pa2 = rslt2.params[:, 0:2]
# Compare two 1d subspaces
def sim(x, y):
x = x / np.sqrt(np.sum(x * x))
y = y / np.sqrt(np.sum(y * y))
return 1 - np.abs(np.dot(x, y))
# Regularized SIRshould be closer to the truth than traditional SIR
assert_equal(sim(pa0, pa1) > sim(pa0, pa2), True)
# Regularized SIR should be close to the truth
assert_equal(sim(pa0, pa2) < 1e-3, True)
# Regularized SIR should have a smaller penalty value than traditional SIR
assert_equal(np.sum(np.dot(fmat, pa1)**2) > np.sum(np.dot(fmat, pa2)**2),
True)
def test_sir_regularized_2d():
# Compare regularized SIR to traditional SIR when there is no penalty.
# The two procedures should agree exactly.
np.random.seed(93482)
n = 1000
p = 10
xmat = np.random.normal(size=(n, p))
y1 = np.dot(xmat[:, 0:4], np.r_[1, 1, -1, -1])
y2 = np.dot(xmat[:, 4:8], np.r_[1, 1, -1, -1])
y = y1 + np.arctan(y2) + np.random.normal(size=n)
model = SlicedInverseReg(y, xmat)
rslt1 = model.fit()
fmat = np.zeros((1, p))
for d in 1, 2, 3, 4:
if d < 3:
rslt2 = model.fit_regularized(d, fmat)
else:
with pytest.warns(UserWarning, match="SIR.fit_regularized did"):
rslt2 = model.fit_regularized(d, fmat)
pa1 = rslt1.params[:, 0:d]
pa1, _, _ = np.linalg.svd(pa1, 0)
pa2 = rslt2.params
_, s, _ = np.linalg.svd(np.dot(pa1.T, pa2))
assert_allclose(np.sum(s), d, atol=1e-1, rtol=1e-1)
def test_covreduce():
np.random.seed(34324)
p = 4
endog = []
exog = []
for k in range(3):
c = np.eye(p)
x = np.random.normal(size=(2, 2))
# The differences between the covariance matrices
# are all in the first 2 rows/columns.
c[0:2, 0:2] = np.dot(x.T, x)
cr = np.linalg.cholesky(c)
m = 1000*k + 50*k
x = np.random.normal(size=(m, p))
x = np.dot(x, cr.T)
exog.append(x)
endog.append(k * np.ones(m))
endog = np.concatenate(endog)
exog = np.concatenate(exog, axis=0)
for dim in 1, 2, 3:
cr = CORE(endog, exog, dim)
pt = np.random.normal(size=(p, dim))
pt, _, _ = np.linalg.svd(pt, 0)
gn = approx_fprime(pt.ravel(), cr.loglike, 1e-7)
g = cr.score(pt.ravel())
assert_allclose(g, gn, 1e-5, 1e-5)
rslt = cr.fit()
proj = rslt.params
assert_equal(proj.shape[0], p)
assert_equal(proj.shape[1], dim)
assert_allclose(np.dot(proj.T, proj), np.eye(dim), 1e-8, 1e-8)
if dim == 2:
# Here we know the approximate truth
projt = np.zeros((p, 2))
projt[0:2, 0:2] = np.eye(2)
assert_allclose(np.trace(np.dot(proj.T, projt)), 2,
rtol=1e-3, atol=1e-3) |
update | from typing import Callable, Optional, Union
from unittest.mock import patch
import pytest
import torch
import torchvision
from ignite.metrics.gan.utils import _BaseInceptionMetric, InceptionModel
class DummyInceptionMetric(_BaseInceptionMetric):
def __init__(
self,
num_features: Optional[int] = None,
feature_extractor: Optional[torch.nn.Module] = None,
output_transform: Callable = lambda x: x,
device: Union[str, torch.device] = torch.device("cpu"),
) -> None:
super(DummyInceptionMetric, self).__init__(
num_features=num_features,
feature_extractor=feature_extractor,
output_transform=output_transform,
device=device,
)
def reset(self):
pass
def compute(self):
pass
def METHOD_NAME(self, output):
self._extract_features(output)
def test_dummy_metric():
with pytest.raises(ValueError, match=r"Argument num_features must be greater to zero, got:"):
DummyInceptionMetric(num_features=-1, feature_extractor=torch.nn.Identity()).METHOD_NAME(torch.rand(2, 0))
with pytest.raises(ValueError, match=r"feature_extractor output must be a tensor of dim 2, got: 1"):
DummyInceptionMetric(num_features=1000, feature_extractor=torch.nn.Identity()).METHOD_NAME(torch.rand(3))
with pytest.raises(ValueError, match=r"Batch size should be greater than one, got: 0"):
DummyInceptionMetric(num_features=1000, feature_extractor=torch.nn.Identity()).METHOD_NAME(torch.rand(0, 0))
with pytest.raises(ValueError, match=r"num_features returned by feature_extractor should be 1000, got: 0"):
DummyInceptionMetric(num_features=1000, feature_extractor=torch.nn.Identity()).METHOD_NAME(torch.rand(2, 0))
with pytest.raises(ValueError, match=r"Argument num_features must be provided, if feature_extractor is specified."):
DummyInceptionMetric(feature_extractor=torch.nn.Identity())
with pytest.raises(TypeError, match=r"Argument feature_extractor must be of type torch.nn.Module, got"):
DummyInceptionMetric(num_features=1000, feature_extractor=lambda x: x)
assert isinstance(DummyInceptionMetric(num_features=10)._feature_extractor, torch.nn.Identity)
def test_inception_extractor_wrong_inputs():
with pytest.raises(ValueError, match=r"Inputs should be a tensor of dim 4"):
InceptionModel(return_features=True)(torch.rand(2))
with pytest.raises(ValueError, match=r"Inputs should be a tensor with 3 channels"):
InceptionModel(return_features=True)(torch.rand(2, 2, 2, 0))
def test_inception_model_probability():
x = torch.rand(2, 3, 299, 299)
y = InceptionModel(return_features=False)(x)
assert pytest.approx(torch.sum(y[0]).item()) == 1.0
assert pytest.approx(torch.sum(y[1]).item()) == 1.0
assert torch.all(0 <= y)
@pytest.fixture()
def mock_no_torchvision():
with patch.dict("sys.modules", {"torchvision": None}):
yield torchvision
def test_no_torchvision(mock_no_torchvision):
with pytest.raises(ModuleNotFoundError, match=r"This module requires torchvision to be installed."):
InceptionModel(return_features=True)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="Skip if no GPU")
def test_device_mismatch_cuda():
images = torch.rand(10, 3, 299, 299)
result = InceptionModel(return_features=False, device="cuda")(images)
assert result.is_cuda
assert result.shape == torch.Size([10, 1000])
result = InceptionModel(return_features=False)(images.cuda())
assert not result.is_cuda
assert result.shape == torch.Size([10, 1000])
images = torch.rand(10, 5)
result = DummyInceptionMetric(num_features=5, device="cuda")._extract_features(images)
assert result.is_cuda
assert result.shape == torch.Size([10, 5])
result = DummyInceptionMetric(num_features=5)._extract_features(images.cuda())
assert not result.is_cuda
assert result.shape == torch.Size([10, 5]) |
test push set | import pytest
import time
from mastodon.Mastodon import MastodonNotFoundError
import mastodon.compat
if not mastodon.compat.IMPL_HAS_CRYPTO or not mastodon.compat.IMPL_HAS_ECE:
pytest.skip("webpush dependencies missing, skipping webpush tests", allow_module_level=True)
def test_decrypt(api):
priv = {
'auth': b'\xe7y\x0fp\xb9\x92\xe0\xa0\xc5\xd5~Qr\xd7\xaa\x16',
'privkey': 86377660131518823691452242868806126400423244879788569019894692873283793307753
}
# Yes, I am aware that there is an access token in there. It's not valid anymore.
encryption_header = "salt=O14vjCdbxxhRUTkrsp98vw"
crypto_key_header = "dh=BKz_CMlF6kVURVImPDsz3bNbTv-9QTGtXpE4Fd3wQGF44fVTj32-APndoccYdjXY2U-mdTen1PDm_pHacpEmD0M;p256ecdsa=BDSAB3e_l5Qp4X50UYSSrKiZFZWDAgHlWIDhgBjXJuUzb0HrpqoCdFhMCh4o2xYHTqpvyTJ3SfFtrILLiXXWT5k"
data = b'\x10\\b<\xddi\xacd\x86\xc8J1\xb6}5\x01K\x85;\xd2\xd4WzN\xab\x0b|3D\xe9_YPcsG\x9fh\xae\xfe\xbb:z&\xc4\x8ek\x89\xde\xa2\xdbF\xdc\xdd[p<h\x9e\x95\x8d\xd4\xf0\xd0\xc1\x89\t\x01\xebuV\xb1\xa4Fp\xe3\xbf\x91g\x93\xbe \xe5\xd4\xee\xe2\xb0FaB\x8a\xd0\x00b\xe4Q\x83\xd5\xd9\x83\x9a\x1d\xd5j\xdb"\xc5\xb0\xf5W\xa72r4r]aLs\xa8\x8c\x1a\x19h\xfeX)_t\xd4p\xc9\xd2d\x1b?\x19\xc8X(\x02\xd5\x18\xe4\x93\xe2\xda\x01\xb4b\xe4\xd0F\x08`\x13;>\xc4\x89\xbc\xc3\x8e\xb8\x9bJ~\xc4}]\xdb\xdc\xf1wY\x16g\xf8\x91N\xee\xfd\x92\x1e\xcd\xd2~\xf2\x06\x89\xcd\xa5\xcd\x97\xb7{\xc5\xe1\xe4\xb0\x9f7\xc6\x8a5\xda\xbbm\xce\xc5\x8d\x93`&\x0e\xa9\x83\xa2|p;\xa4\x8b)\xc8\x07\rb!a\x82\xf5E\x92\x00Y{\xd4\x94\xf8\xf0\r\xb5c\x86\xfb\xd0*\xbb\xa1!\x14\xd5\x11\xc8\xafI\xb3j\xca7\xc4\x9c\xe0\x9c0\x12\xc0\xd1\x8a{\xcd\xc4~\\\xc2\x99\xf0d)\x03E\x91;m\xbe\xdb\x86\xef\xd7\xa7>\xd1a\xf1\x83!\xaeB\xaa\xf0\xda\x1b;\x86\xd8;]\x9e\xe3\xfa*!\x07,\t\xbd\xe7\xfc\xa7\xa8\xba[\xcf\x89e\xac\'\xdb\x88g\xd9\\\xe4C\x08Lb\xb6CAT\xcc!\xa4\xce\x92t3\x1c1\x01'
decrypted = api.push_subscription_decrypt_push(
data,
priv,
encryption_header,
crypto_key_header
)
assert decrypted
assert decrypted.title == 'You were mentioned by fake halcy'
@pytest.mark.vcr(match_on=['path'])
def METHOD_NAME(api):
priv, pub = api.push_subscription_generate_keys()
sub = api.push_subscription_set("example.com", pub, follow_events=True, favourite_events=True, reblog_events=True, mention_events=True, poll_events=True, follow_request_events=True, status_events=True, policy='none')
assert sub == api.push_subscription()
assert sub.endpoint == "https://example.com"
should_throw = False
try:
sub = api.push_subscription_set("example.com", pub, follow_events=True, favourite_events=True, reblog_events=True, mention_events=True, poll_events=True, follow_request_events=True, status_events=True, policy='not a valid value')
should_throw = True
except:
pass
assert not should_throw
@pytest.mark.vcr(match_on=['path'])
def test_push_update(api):
priv, pub = api.push_subscription_generate_keys()
sub = api.push_subscription_set("example.com", pub,follow_events=False,
favourite_events=False, reblog_events=False,
mention_events=False)
sub2 = api.push_subscription_update(follow_events=True, favourite_events=True,
reblog_events=True, mention_events=True)
time.sleep(1)
assert sub2 == api.push_subscription()
sub3 = api.push_subscription_update(follow_events=False, favourite_events=False,
reblog_events=False, mention_events=False)
time.sleep(1)
assert sub3 == api.push_subscription()
print(sub3)
print(api.push_subscription())
assert sub3.alerts.follow is False
assert sub3.alerts.favourite is False
assert sub3.alerts.reblog is False
assert sub3.alerts.mention is False
assert sub2.alerts.follow is True
assert sub2.alerts.favourite is True
assert sub2.alerts.reblog is True
assert sub2.alerts.mention is True
@pytest.mark.vcr(match_on=['path'])
def test_push_delete(api):
priv, pub = api.push_subscription_generate_keys()
sub = api.push_subscription_set("example.com", pub)
assert sub
api.push_subscription_delete()
with pytest.raises(MastodonNotFoundError):
api.push_subscription() |
regenerate search table | from itertools import chain
import inflection
from sqlalchemy.dialects import postgresql
from sqlalchemy.dialects.postgresql import TSVECTOR
from sqlalchemy.orm import aliased
from ereuse_devicehub.db import db
from ereuse_devicehub.resources import search
from ereuse_devicehub.resources.action.models import Action, ActionWithMultipleDevices, \
ActionWithOneDevice
from ereuse_devicehub.resources.agent.models import Organization
from ereuse_devicehub.resources.device.models import Component, Computer, Device
from ereuse_devicehub.resources.tag.model import Tag
class DeviceSearch(db.Model):
"""Temporary table that stores full-text device documents.
It provides methods to auto-run
"""
device_id = db.Column(db.BigInteger,
db.ForeignKey(Device.id, ondelete='CASCADE'),
primary_key=True)
device = db.relationship(Device, primaryjoin=Device.id == device_id)
properties = db.Column(TSVECTOR, nullable=False)
tags = db.Column(TSVECTOR)
devicehub_ids = db.Column(TSVECTOR)
__table_args__ = (
# todo to add concurrency this should be commited separately
# see https://docs.sqlalchemy.org/en/latest/dialects/postgresql.html#indexes-with-concurrently
db.Index('properties gist', properties, postgresql_using='gist'),
db.Index('tags gist', tags, postgresql_using='gist'),
db.Index('devicehub_ids gist', devicehub_ids, postgresql_using='gist'),
{
'prefixes': ['UNLOGGED']
# Only for temporal tables, can cause table to empty on turn on
}
)
@classmethod
def update_modified_devices(cls, session: db.Session):
"""Updates the documents of the devices that are part of a
modified action, or tag in the passed-in session.
This method is registered as a SQLAlchemy listener in the
Devicehub class.
"""
devices_to_update = set()
for model in chain(session.new, session.dirty):
if isinstance(model, Action):
if isinstance(model, ActionWithMultipleDevices):
devices_to_update |= model.devices
elif isinstance(model, ActionWithOneDevice):
devices_to_update.add(model.device)
if model.parent:
devices_to_update.add(model.parent)
devices_to_update |= model.components
elif isinstance(model, Tag) and model.device:
devices_to_update.add(model.device)
# this flush is controversial:
# see https://groups.google.com/forum/#!topic/sqlalchemy/hBzfypgPfYo
# todo probably should replace it with what the solution says
session.flush()
for device in (d for d in devices_to_update if not isinstance(d, Component)):
cls.set_device_tokens(session, device)
@classmethod
def set_all_devices_tokens_if_empty(cls, session: db.Session):
"""Generates the search docs if the table is empty.
This can happen if Postgres' shut down unexpectedly, as
it deletes unlogged tables as ours.
"""
if not DeviceSearch.query.first():
cls.METHOD_NAME(session)
@classmethod
def METHOD_NAME(cls, session: db.Session):
"""Deletes and re-computes all the search table."""
DeviceSearch.query.delete()
for device in Device.query:
if not isinstance(device, Component):
cls.set_device_tokens(session, device)
@classmethod
def set_device_tokens(cls, session: db.Session, device: Device):
"""(Re)Generates the device search tokens."""
assert not isinstance(device, Component)
tokens = [
(str(device.id), search.Weight.A),
(inflection.humanize(device.type), search.Weight.B),
(Device.model, search.Weight.B),
(Device.manufacturer, search.Weight.C),
(Device.serial_number, search.Weight.A)
]
if device.manufacturer:
# todo this has to be done using a dictionary
manufacturer = device.manufacturer.lower()
if 'asus' in manufacturer:
tokens.append(('asus', search.Weight.B))
if 'hewlett' in manufacturer or 'hp' in manufacturer or 'h.p' in manufacturer:
tokens.append(('hp', search.Weight.B))
tokens.append(('h.p', search.Weight.C))
tokens.append(('hewlett', search.Weight.C))
tokens.append(('packard', search.Weight.C))
if isinstance(device, Computer):
# Aggregate the values of all the components of pc
Comp = aliased(Component)
tokens.extend((
(db.func.string_agg(db.cast(Comp.id, db.TEXT), ' '), search.Weight.D),
(db.func.string_agg(Comp.model, ' '), search.Weight.C),
(db.func.string_agg(Comp.manufacturer, ' '), search.Weight.D),
(db.func.string_agg(Comp.serial_number, ' '), search.Weight.B),
(db.func.string_agg(Comp.type, ' '), search.Weight.B),
('Computer', search.Weight.C),
('PC', search.Weight.C),
))
properties = session \
.query(search.Search.vectorize(*tokens)) \
.filter(Device.id == device.id)
if isinstance(device, Computer):
# Join to components
properties = properties \
.outerjoin(Comp, Computer.components) \
.group_by(Device.id)
tags = session.query(
search.Search.vectorize(
(db.func.string_agg(Tag.id, ' '), search.Weight.A),
(db.func.string_agg(Tag.secondary, ' '), search.Weight.A),
(db.func.string_agg(Organization.name, ' '), search.Weight.B)
)
).filter(Tag.device_id == device.id).join(Tag.org)
devicehub_ids = session.query(
search.Search.vectorize(
(db.func.string_agg(Device.devicehub_id, ' '), search.Weight.A),
)
).filter(Device.devicehub_id == device.devicehub_id)
# Note that commit flushes later
# todo see how to get rid of the one_or_none() by embedding those as subqueries
# I don't like this but I want the 'on_conflict_on_update' thingie
device_document = dict(properties=properties.one_or_none(), tags=tags.one_or_none(), devicehub_ids=devicehub_ids.one_or_none())
insert = postgresql.insert(DeviceSearch.__table__) \
.values(device_id=device.id, **device_document) \
.on_conflict_do_update(constraint='device_search_pkey', set_=device_document)
session.execute(insert) |
outputs | # encoding: utf-8
"""IO capturing utilities."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function, absolute_import
import sys
from IPython.utils.py3compat import PY3
if PY3:
from io import StringIO
else:
from StringIO import StringIO
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class RichOutput(object):
def __init__(self, data=None, metadata=None, transient=None, update=False):
self.data = data or {}
self.metadata = metadata or {}
self.transient = transient or {}
self.update = update
def display(self):
from IPython.display import publish_display_data
publish_display_data(data=self.data, metadata=self.metadata,
transient=self.transient, update=self.update)
def _repr_mime_(self, mime):
if mime not in self.data:
return
data = self.data[mime]
if mime in self.metadata:
return data, self.metadata[mime]
else:
return data
def _repr_html_(self):
return self._repr_mime_("text/html")
def _repr_latex_(self):
return self._repr_mime_("text/latex")
def _repr_json_(self):
return self._repr_mime_("application/json")
def _repr_javascript_(self):
return self._repr_mime_("application/javascript")
def _repr_png_(self):
return self._repr_mime_("image/png")
def _repr_jpeg_(self):
return self._repr_mime_("image/jpeg")
def _repr_svg_(self):
return self._repr_mime_("image/svg+xml")
class CapturedIO(object):
"""Simple object for containing captured stdout/err and rich display StringIO objects
Each instance `c` has three attributes:
- ``c.stdout`` : standard output as a string
- ``c.stderr`` : standard error as a string
- ``c.outputs``: a list of rich display outputs
Additionally, there's a ``c.show()`` method which will print all of the
above in the same order, and can be invoked simply via ``c()``.
"""
def __init__(self, stdout, stderr, METHOD_NAME=None):
self._stdout = stdout
self._stderr = stderr
if METHOD_NAME is None:
METHOD_NAME = []
self._outputs = METHOD_NAME
def __str__(self):
return self.stdout
@property
def stdout(self):
"Captured standard output"
if not self._stdout:
return ''
return self._stdout.getvalue()
@property
def stderr(self):
"Captured standard error"
if not self._stderr:
return ''
return self._stderr.getvalue()
@property
def METHOD_NAME(self):
"""A list of the captured rich display outputs, if any.
If you have a CapturedIO object ``c``, these can be displayed in IPython
using::
from IPython.display import display
for o in c.outputs:
display(o)
"""
return [ RichOutput(**kargs) for kargs in self._outputs ]
def show(self):
"""write my output to sys.stdout/err as appropriate"""
sys.stdout.write(self.stdout)
sys.stderr.write(self.stderr)
sys.stdout.flush()
sys.stderr.flush()
for kargs in self._outputs:
RichOutput(**kargs).display()
__call__ = show
class capture_output(object):
"""context manager for capturing stdout/err"""
stdout = True
stderr = True
display = True
def __init__(self, stdout=True, stderr=True, display=True):
self.stdout = stdout
self.stderr = stderr
self.display = display
self.shell = None
def __enter__(self):
from IPython.core.getipython import get_ipython
from IPython.core.displaypub import CapturingDisplayPublisher
from IPython.core.displayhook import CapturingDisplayHook
self.sys_stdout = sys.stdout
self.sys_stderr = sys.stderr
if self.display:
self.shell = get_ipython()
if self.shell is None:
self.save_display_pub = None
self.display = False
stdout = stderr = METHOD_NAME = None
if self.stdout:
stdout = sys.stdout = StringIO()
if self.stderr:
stderr = sys.stderr = StringIO()
if self.display:
self.save_display_pub = self.shell.display_pub
self.shell.display_pub = CapturingDisplayPublisher()
METHOD_NAME = self.shell.display_pub.METHOD_NAME
self.save_display_hook = sys.displayhook
sys.displayhook = CapturingDisplayHook(shell=self.shell,
METHOD_NAME=METHOD_NAME)
return CapturedIO(stdout, stderr, METHOD_NAME)
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.sys_stdout
sys.stderr = self.sys_stderr
if self.display and self.shell:
self.shell.display_pub = self.save_display_pub
sys.displayhook = self.save_display_hook
|
is complete | #!/usr/bin/python
##################
# HDFDataSource.py
#
# Copyright David Baddeley, 2009
# [email protected]
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################
#from PYME.ParallelTasks.relativeFiles import getFullFilename
#import tables
from .BaseDataSource import XYZTCDataSource
#import httplib
#import urllib
#import requests
#import cPickle as pickle
import time
import json
#import pandas as pd
import numpy as np
SHAPE_LIFESPAN = 5
from PYME.IO import clusterIO
from PYME.IO import PZFFormat
from PYME.IO import MetaDataHandler
class DataSource(XYZTCDataSource):
moduleName = 'ClusterPZFDataSource'
def __init__(self, url, queue=None):
self.seriesName = url
#print url
self.clusterfilter = url.split('://')[1].split('/')[0]
#print self.clusterfilter
self.sequenceName = url.split('://%s/' % self.clusterfilter)[1]
#print self.sequenceName
self.lastShapeTime = 0
mdfn = '/'.join([self.sequenceName, 'metadata.json'])
#print mdfn
self.mdh = MetaDataHandler.NestedClassMDHandler()
self.mdh.update(json.loads(clusterIO.get_file(mdfn, self.clusterfilter)))
self.fshape = None#(self.mdh['Camera.ROIWidth'],self.mdh['Camera.ROIHeight'])
self._getNumFrames()
dimorder= self.mdh.get('DimOrder', 'XYZTC')
size_z = self.mdh.get('SizeZ', -1)
size_c = self.mdh.get('SizeC', 1)
size_t = self.mdh.get('SizeT', 1)
# if the series is complete when we start, we don't need to update the number of slices
self._complete = clusterIO.exists(self.eventFileName, self.clusterfilter)
XYZTCDataSource.__init__(self, dimorder, size_z=size_z, size_t=size_t, size_c=size_c)
def _getNumFrames(self):
frameNames = [f for f in clusterIO.listdir(self.sequenceName, self.clusterfilter) if f.endswith('.pzf')]
self.numFrames = len(frameNames)
self.lastShapeTime = time.time()
def getSlice(self, ind):
frameName = '%s/frame%05d.pzf' % (self.sequenceName, ind)
sl = PZFFormat.loads(clusterIO.get_file(frameName, self.clusterfilter))[0]
#print sl.shape, sl.dtype
return sl.squeeze()
def getSliceShape(self):
if self.fshape is None:
self.fshape = self.getSlice(0).shape
return self.fshape
def getNumSlices(self):
if not self._complete:
t = time.time()
if (t-self.lastShapeTime) > SHAPE_LIFESPAN:
self._getNumFrames()
return self.numFrames
@property
def eventFileName(self):
return self.sequenceName + '/events.json'
def getEvents(self):
from PYME.IO import events
import json
try:
ev = json.loads(clusterIO.get_file(self.eventFileName, self.clusterfilter, timeout=10))
return events.EventLogger.list_to_array(ev)
except (IOError, ValueError):
#our series might not have any events
return []
def getMetadata(self):
return self.mdh
@property
def METHOD_NAME(self):
if not self._complete:
# if cached property is false, check to see if anything has changed
#TODO - add check to see if we have an updated number of frames
self._complete = clusterIO.exists(self.eventFileName, self.clusterfilter)
return self._complete
|
test area multiple lats and lons | # Copyright Iris contributors
#
# This file is part of Iris and is released under the LGPL license.
# See COPYING and COPYING.LESSER in the root of the repository for full
# licensing details.
"""Unit tests for the `iris.analysis.cartography._quadrant_area` function"""
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests # isort:skip
import cf_units
import numpy as np
from iris.analysis.cartography import (
DEFAULT_SPHERICAL_EARTH_RADIUS,
_quadrant_area,
)
class TestExampleCases(tests.IrisTest):
def _radian_bounds(self, coord_list, dtype):
bound_deg = np.array(coord_list, dtype=dtype)
bound_deg = np.atleast_2d(bound_deg)
degrees = cf_units.Unit("degrees")
radians = cf_units.Unit("radians")
return degrees.convert(bound_deg, radians)
def _as_bounded_coords(self, lats, lons, dtype=np.float64):
return (
self._radian_bounds(lats, dtype=dtype),
self._radian_bounds(lons, dtype=dtype),
)
def test_area_in_north(self):
lats, lons = self._as_bounded_coords([0, 10], [0, 10])
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, [[1228800593851.443115234375]])
def test_area_in_far_north(self):
lats, lons = self._as_bounded_coords([70, 80], [0, 10])
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, [[319251845980.7646484375]])
def test_area_in_far_south(self):
lats, lons = self._as_bounded_coords([-80, -70], [0, 10])
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, [[319251845980.763671875]])
def test_area_in_north_with_reversed_lats(self):
lats, lons = self._as_bounded_coords([10, 0], [0, 10])
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, [[1228800593851.443115234375]])
def test_area_multiple_lats(self):
lats, lons = self._as_bounded_coords(
[[-80, -70], [0, 10], [70, 80]], [0, 10]
)
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(
area,
[
[319251845980.763671875],
[1228800593851.443115234375],
[319251845980.7646484375],
],
)
def METHOD_NAME(self):
lats, lons = self._as_bounded_coords(
[[-80, -70], [0, 10], [70, 80]], [[0, 10], [10, 30]]
)
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(
area,
[
[3.19251846e11, 6.38503692e11],
[1.22880059e12, 2.45760119e12],
[3.19251846e11, 6.38503692e11],
],
)
def test_symmetric_64_bit(self):
lats, lons = self._as_bounded_coords(
[[-90, -89.375], [89.375, 90]], [0, 10], dtype=np.float64
)
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, area[::-1])
def test_symmetric_32_bit(self):
lats, lons = self._as_bounded_coords(
[[-90, -89.375], [89.375, 90]], [0, 10], dtype=np.float32
)
area = _quadrant_area(lats, lons, DEFAULT_SPHERICAL_EARTH_RADIUS)
self.assertArrayAllClose(area, area[::-1])
class TestErrorHandling(tests.IrisTest):
def test_lat_bounds_1d_error(self):
self._assert_error_on_malformed_bounds([0, 10], [[0, 10]])
def test_lon_bounds_1d_error(self):
self._assert_error_on_malformed_bounds([[0, 10]], [0, 10])
def test_too_many_lat_bounds_error(self):
self._assert_error_on_malformed_bounds([[0, 10, 20]], [[0, 10]])
def test_too_many_lon_bounds_error(self):
self._assert_error_on_malformed_bounds([[0, 10]], [[0, 10, 20]])
def _assert_error_on_malformed_bounds(self, lat_bnds, lon_bnds):
with self.assertRaisesRegex(
ValueError, r"Bounds must be \[n,2\] array"
):
_quadrant_area(np.array(lat_bnds), np.array(lon_bnds), 1.0)
if __name__ == "__main__":
tests.main() |
test preloop | """
Console module tests
"""
import contextlib
import io
import os
import tempfile
import unittest
from txtai.console import Console
from txtai.embeddings import Embeddings
APPLICATION = """
path: %s
workflow:
test:
tasks:
- task: console
"""
class TestConsole(unittest.TestCase):
"""
Console tests.
"""
@classmethod
def setUpClass(cls):
"""
Initialize test data.
"""
cls.data = [
"US tops 5 million confirmed virus cases",
"Canada's last fully intact ice shelf has suddenly collapsed, forming a Manhattan-sized iceberg",
"Beijing mobilises invasion craft along coast as Taiwan tensions escalate",
"The National Park Service warns against sacrificing slower friends in a bear attack",
"Maine man wins $1M from $25 lottery ticket",
"Make huge profits without work, earn up to $100,000 a day",
]
# Create embeddings model, backed by sentence-transformers & transformers
cls.embeddings = Embeddings({"path": "sentence-transformers/nli-mpnet-base-v2", "content": True})
# Create an index for the list of text
cls.embeddings.index([(uid, text, None) for uid, text in enumerate(cls.data)])
# Create app paths
cls.apppath = os.path.join(tempfile.gettempdir(), "console.yml")
cls.embedpath = os.path.join(tempfile.gettempdir(), "embeddings.console")
# Create app.yml
with open(cls.apppath, "w", encoding="utf-8") as out:
out.write(APPLICATION % cls.embedpath)
# Save index as uncompressed and compressed
cls.embeddings.save(cls.embedpath)
cls.embeddings.save(f"{cls.embedpath}.tar.gz")
# Create console
cls.console = Console(cls.embedpath)
def testApplication(self):
"""
Test application
"""
self.assertNotIn("Traceback", self.command(f".load {self.apppath}"))
self.assertIn("1", self.command(".limit 1"))
self.assertIn("Maine man wins", self.command("feel good story"))
def testConfig(self):
"""
Test .config command
"""
self.assertIn("tasks", self.command(".config"))
def testEmbeddings(self):
"""
Test embeddings index
"""
self.assertNotIn("Traceback", self.command(f".load {self.embedpath}.tar.gz"))
self.assertNotIn("Traceback", self.command(f".load {self.embedpath}"))
self.assertIn("1", self.command(".limit 1"))
self.assertIn("Maine man wins", self.command("feel good story"))
def testEmbeddingsNoDatabase(self):
"""
Test embeddings with no database/content
"""
console = Console()
# Create embeddings model, backed by sentence-transformers & transformers
embeddings = Embeddings({"path": "sentence-transformers/nli-mpnet-base-v2"})
# Create an index for the list of text
embeddings.index([(uid, text, None) for uid, text in enumerate(self.data)])
# Set embeddings on console
console.app = embeddings
self.assertIn("4", self.command("feel good story", console))
def testEmpty(self):
"""
Test empty console instance
"""
console = Console()
self.assertIn("AttributeError", self.command("search", console))
def testHighlight(self):
"""
Test .highlight command
"""
self.assertIn("highlight", self.command(".highlight"))
self.assertIn("wins", self.command("feel good story"))
self.assertIn("Taiwan", self.command("asia"))
def METHOD_NAME(self):
"""
Test preloop
"""
self.assertIn("txtai console", self.preloop())
def testWorkflow(self):
"""
Test .workflow command
"""
self.command(f".load {self.apppath}")
self.assertIn("echo", self.command(".workflow test echo"))
def command(self, command, console=None):
"""
Runs a console command.
Args:
command: command to run
console: console instance, defaults to self.console
Returns:
command output
"""
# Run info
output = io.StringIO()
with contextlib.redirect_stdout(output):
if not console:
console = self.console
console.onecmd(command)
return output.getvalue()
def preloop(self):
"""
Runs console.preloop and redirects stdout.
Returns:
preloop output
"""
# Run info
output = io.StringIO()
with contextlib.redirect_stdout(output):
self.console.preloop()
return output.getvalue() |
match resource sub properties | """
Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
SPDX-License-Identifier: MIT-0
"""
import regex as re
from cfnlint.helpers import RESOURCE_SPECS
from cfnlint.rules import CloudFormationLintRule, RuleMatch
class AllowedPattern(CloudFormationLintRule):
"""Check if properties have a valid value"""
id = "E3031"
shortdesc = "Check if property values adhere to a specific pattern"
description = "Check if properties have a valid value in case of a pattern (Regular Expression)"
source_url = "https://github.com/awslabs/cfn-python-lint/blob/main/docs/cfn-resource-specification.md#allowedpattern"
tags = ["resources", "property", "allowed pattern", "regex"]
def __init__(self):
"""Init"""
super().__init__()
self.config_definition = {
"exceptions": {
"default": [],
"type": "list",
"itemtype": "string",
}
}
self.configure()
def initialize(self, cfn):
"""Initialize the rule"""
for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(
"ResourceTypes"
):
self.resource_property_types.append(resource_type_spec)
for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get(
"PropertyTypes"
):
self.resource_sub_property_types.append(property_type_spec)
def check_value(self, value, path, property_name, **kwargs):
"""Check Value"""
matches = []
# Get the Allowed Pattern Regex
value_pattern_regex = kwargs.get("value_specs", {}).get(
"AllowedPatternRegex", {}
)
# Get the "Human Readable" version for the error message. Optional, if not specified,
# the RegEx itself is used.
value_pattern = kwargs.get("value_specs", {}).get(
"AllowedPattern", value_pattern_regex
)
if isinstance(value, (int, float)):
value = str(value)
if isinstance(value, str):
if value_pattern_regex:
regex = re.compile(value_pattern_regex, re.ASCII)
# Ignore values with dynamic references. Simple check to prevent false-positives
# See: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/dynamic-references.html
if "{{resolve:" not in value:
if not regex.match(value):
for exception in self.config.get("exceptions"):
exception_regex = re.compile(exception)
if exception_regex.match(value):
return matches
full_path = "/".join(str(x) for x in path)
message = "{} contains invalid characters (Pattern: {}) at {}"
matches.append(
RuleMatch(
path,
message.format(property_name, value_pattern, full_path),
)
)
return matches
def check(self, cfn, properties, value_specs, property_specs, path):
"""Check itself"""
matches = []
for p_value, p_path in properties.items_safe(path[:]):
for prop in p_value:
if prop in value_specs:
value = value_specs.get(prop).get("Value", {})
if value:
value_type = value.get("ValueType", "")
property_type = (
property_specs.get("Properties").get(prop).get("Type")
)
value_specs = (
RESOURCE_SPECS.get(cfn.regions[0])
.get("ValueTypes")
.get(value_type, {})
)
if value_specs == "CACHED":
value_specs = (
RESOURCE_SPECS.get("us-east-1")
.get("ValueTypes")
.get(value_type, {})
)
matches.extend(
cfn.check_value(
p_value,
prop,
p_path,
check_value=self.check_value,
value_specs=value_specs,
cfn=cfn,
property_type=property_type,
property_name=prop,
)
)
return matches
def METHOD_NAME(self, properties, property_type, path, cfn):
"""Match for sub properties"""
matches = []
specs = (
RESOURCE_SPECS.get(cfn.regions[0])
.get("PropertyTypes")
.get(property_type, {})
.get("Properties", {})
)
property_specs = (
RESOURCE_SPECS.get(cfn.regions[0]).get("PropertyTypes").get(property_type)
)
matches.extend(self.check(cfn, properties, specs, property_specs, path))
return matches
def match_resource_properties(self, properties, resource_type, path, cfn):
"""Check CloudFormation Properties"""
matches = []
specs = (
RESOURCE_SPECS.get(cfn.regions[0])
.get("ResourceTypes")
.get(resource_type, {})
.get("Properties", {})
)
resource_specs = (
RESOURCE_SPECS.get(cfn.regions[0]).get("ResourceTypes").get(resource_type)
)
matches.extend(self.check(cfn, properties, specs, resource_specs, path))
return matches |
data tree1 | """
merge_data_tree
===============
Autogenerated DPF operator classes.
"""
from warnings import warn
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs
from ansys.dpf.core.operators.specification import PinSpecification, Specification
class merge_data_tree(Operator):
"""Merges a list of data trees. Attributes names shouldn't be shared
accross data tree instances.
Parameters
----------
data_tree1 : DataTree
Either a vector of data trees or data trees
from pin 0 to ... to merge.
data_tree2 : DataTree
Either a vector of data trees or data trees
from pin 0 to ... to merge.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.utility.merge_data_tree()
>>> # Make input connections
>>> my_data_tree1 = dpf.DataTree()
>>> op.inputs.data_tree1.connect(my_data_tree1)
>>> my_data_tree2 = dpf.DataTree()
>>> op.inputs.data_tree2.connect(my_data_tree2)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.utility.merge_data_tree(
... data_tree1=my_data_tree1,
... data_tree2=my_data_tree2,
... )
>>> # Get output data
>>> result_any = op.outputs.any()
"""
def __init__(self, METHOD_NAME=None, data_tree2=None, config=None, server=None):
super().__init__(name="merge::data_tree", config=config, server=server)
self._inputs = InputsMergeDataTree(self)
self._outputs = OutputsMergeDataTree(self)
if METHOD_NAME is not None:
self.inputs.METHOD_NAME.connect(METHOD_NAME)
if data_tree2 is not None:
self.inputs.data_tree2.connect(data_tree2)
@staticmethod
def _spec():
description = """Merges a list of data trees. Attributes names shouldn't be shared
accross data tree instances."""
spec = Specification(
description=description,
map_input_pin_spec={
0: PinSpecification(
name="data_tree",
type_names=["data_tree"],
optional=False,
document="""Either a vector of data trees or data trees
from pin 0 to ... to merge.""",
),
1: PinSpecification(
name="data_tree",
type_names=["data_tree"],
optional=False,
document="""Either a vector of data trees or data trees
from pin 0 to ... to merge.""",
),
},
map_output_pin_spec={
0: PinSpecification(
name="any",
type_names=["any"],
optional=False,
document="""""",
),
},
)
return spec
@staticmethod
def default_config(server=None):
"""Returns the default config of the operator.
This config can then be changed to the user needs and be used to
instantiate the operator. The Configuration allows to customize
how the operation will be processed by the operator.
Parameters
----------
server : server.DPFServer, optional
Server with channel connected to the remote or local instance. When
``None``, attempts to use the global server.
"""
return Operator.default_config(name="merge::data_tree", server=server)
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsMergeDataTree
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluating it
Returns
--------
outputs : OutputsMergeDataTree
"""
return super().outputs
class InputsMergeDataTree(_Inputs):
"""Intermediate class used to connect user inputs to
merge_data_tree operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.merge_data_tree()
>>> my_data_tree1 = dpf.DataTree()
>>> op.inputs.data_tree1.connect(my_data_tree1)
>>> my_data_tree2 = dpf.DataTree()
>>> op.inputs.data_tree2.connect(my_data_tree2)
"""
def __init__(self, op: Operator):
super().__init__(merge_data_tree._spec().inputs, op)
self._data_tree1 = Input(merge_data_tree._spec().input_pin(0), 0, op, 0)
self._inputs.append(self._data_tree1)
self._data_tree2 = Input(merge_data_tree._spec().input_pin(1), 1, op, 1)
self._inputs.append(self._data_tree2)
@property
def METHOD_NAME(self):
"""Allows to connect data_tree1 input to the operator.
Either a vector of data trees or data trees
from pin 0 to ... to merge.
Parameters
----------
my_data_tree1 : DataTree
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.merge_data_tree()
>>> op.inputs.data_tree1.connect(my_data_tree1)
>>> # or
>>> op.inputs.data_tree1(my_data_tree1)
"""
return self._data_tree1
@property
def data_tree2(self):
"""Allows to connect data_tree2 input to the operator.
Either a vector of data trees or data trees
from pin 0 to ... to merge.
Parameters
----------
my_data_tree2 : DataTree
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.merge_data_tree()
>>> op.inputs.data_tree2.connect(my_data_tree2)
>>> # or
>>> op.inputs.data_tree2(my_data_tree2)
"""
return self._data_tree2
class OutputsMergeDataTree(_Outputs):
"""Intermediate class used to get outputs from
merge_data_tree operator.
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.merge_data_tree()
>>> # Connect inputs : op.inputs. ...
>>> result_any = op.outputs.any()
"""
def __init__(self, op: Operator):
super().__init__(merge_data_tree._spec().outputs, op)
self._any = Output(merge_data_tree._spec().output_pin(0), 0, op)
self._outputs.append(self._any)
@property
def any(self):
"""Allows to get any output of the operator
Returns
----------
my_any : Any
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.utility.merge_data_tree()
>>> # Connect inputs : op.inputs. ...
>>> result_any = op.outputs.any()
""" # noqa: E501
return self._any |
test periodic value is parameterized | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import sympy
import cirq
def test_periodic_value_equality():
eq = cirq.testing.EqualsTester()
eq.add_equality_group(
cirq.PeriodicValue(1, 2),
cirq.PeriodicValue(1, 2),
cirq.PeriodicValue(3, 2),
cirq.PeriodicValue(3, 2),
cirq.PeriodicValue(5, 2),
cirq.PeriodicValue(-1, 2),
)
eq.add_equality_group(cirq.PeriodicValue(1.5, 2.0), cirq.PeriodicValue(1.5, 2.0))
eq.add_equality_group(cirq.PeriodicValue(0, 2))
eq.add_equality_group(cirq.PeriodicValue(1, 3))
eq.add_equality_group(cirq.PeriodicValue(2, 4))
def test_periodic_value_approx_eq_basic():
assert cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.0, 2.0), atol=0.1)
assert cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.2, 2.0), atol=0.3)
assert not cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.2, 2.0), atol=0.1)
assert not cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.0, 2.2), atol=0.3)
assert not cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.0, 2.2), atol=0.1)
assert not cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.2, 2.2), atol=0.3)
assert not cirq.approx_eq(cirq.PeriodicValue(1.0, 2.0), cirq.PeriodicValue(1.2, 2.2), atol=0.1)
def test_periodic_value_approx_eq_normalized():
assert cirq.approx_eq(cirq.PeriodicValue(1.0, 3.0), cirq.PeriodicValue(4.1, 3.0), atol=0.2)
assert cirq.approx_eq(cirq.PeriodicValue(1.0, 3.0), cirq.PeriodicValue(-2.1, 3.0), atol=0.2)
def test_periodic_value_approx_eq_boundary():
assert cirq.approx_eq(cirq.PeriodicValue(0.0, 2.0), cirq.PeriodicValue(1.9, 2.0), atol=0.2)
assert cirq.approx_eq(cirq.PeriodicValue(0.1, 2.0), cirq.PeriodicValue(1.9, 2.0), atol=0.3)
assert cirq.approx_eq(cirq.PeriodicValue(1.9, 2.0), cirq.PeriodicValue(0.1, 2.0), atol=0.3)
assert not cirq.approx_eq(cirq.PeriodicValue(0.1, 2.0), cirq.PeriodicValue(1.9, 2.0), atol=0.1)
assert cirq.approx_eq(cirq.PeriodicValue(0, 1.0), cirq.PeriodicValue(0.5, 1.0), atol=0.6)
assert not cirq.approx_eq(cirq.PeriodicValue(0, 1.0), cirq.PeriodicValue(0.5, 1.0), atol=0.1)
assert cirq.approx_eq(cirq.PeriodicValue(0.4, 1.0), cirq.PeriodicValue(0.6, 1.0), atol=0.3)
def test_periodic_value_types_mismatch():
assert not cirq.approx_eq(cirq.PeriodicValue(0.0, 2.0), 0.0, atol=0.2)
assert not cirq.approx_eq(0.0, cirq.PeriodicValue(0.0, 2.0), atol=0.2)
@pytest.mark.parametrize(
'value, is_parameterized, parameter_names',
[
(cirq.PeriodicValue(1.0, 3.0), False, set()),
(cirq.PeriodicValue(0.0, sympy.Symbol('p')), True, {'p'}),
(cirq.PeriodicValue(sympy.Symbol('v'), 3.0), True, {'v'}),
(cirq.PeriodicValue(sympy.Symbol('v'), sympy.Symbol('p')), True, {'p', 'v'}),
],
)
@pytest.mark.parametrize('resolve_fn', [cirq.resolve_parameters, cirq.resolve_parameters_once])
def METHOD_NAME(value, is_parameterized, parameter_names, resolve_fn):
assert cirq.is_parameterized(value) == is_parameterized
assert cirq.parameter_names(value) == parameter_names
resolved = resolve_fn(value, {p: 1 for p in parameter_names})
assert not cirq.is_parameterized(resolved)
@pytest.mark.parametrize(
'val',
[
cirq.PeriodicValue(0.4, 1.0),
cirq.PeriodicValue(0.0, 2.0),
cirq.PeriodicValue(1.0, 3),
cirq.PeriodicValue(-2.1, 3.0),
cirq.PeriodicValue(sympy.Symbol('v'), sympy.Symbol('p')),
cirq.PeriodicValue(2.0, sympy.Symbol('p')),
cirq.PeriodicValue(sympy.Symbol('v'), 3),
],
)
def test_periodic_value_repr(val):
cirq.testing.assert_equivalent_repr(val) |
reraise | # -*- coding: utf-8 -*-
"""
jinja2._compat
~~~~~~~~~~~~~~
Some py2/py3 compatibility support based on a stripped down
version of six so we don't have to depend on a specific version
of it.
:copyright: Copyright 2013 by the Jinja team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
PY2 = sys.version_info[0] == 2
PYPY = hasattr(sys, 'pypy_translation_info')
_identity = lambda x: x
if not PY2:
unichr = chr
range_type = range
text_type = str
string_types = (str,)
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
import pickle
from io import BytesIO, StringIO
NativeStringIO = StringIO
def METHOD_NAME(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
ifilter = filter
imap = map
izip = zip
intern = sys.intern
implements_iterator = _identity
implements_to_string = _identity
encode_filename = _identity
get_next = lambda x: x.__next__
else:
unichr = unichr
text_type = unicode
range_type = xrange
string_types = (str, unicode)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
import cPickle as pickle
from cStringIO import StringIO as BytesIO, StringIO
NativeStringIO = BytesIO
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
from itertools import imap, izip, ifilter
intern = intern
def implements_iterator(cls):
cls.next = cls.__next__
del cls.__next__
return cls
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
get_next = lambda x: x.next
def encode_filename(filename):
if isinstance(filename, unicode):
return filename.encode('utf-8')
return filename
try:
next = next
except NameError:
def next(it):
return it.next()
def with_metaclass(meta, *bases):
# This requires a bit of explanation: the basic idea is to make a
# dummy metaclass for one level of class instanciation that replaces
# itself with the actual metaclass. Because of internal type checks
# we also need to make sure that we downgrade the custom metaclass
# for one level to something closer to type (that's why __call__ and
# __init__ comes back from type etc.).
#
# This has the advantage over six.with_metaclass in that it does not
# introduce dummy classes into the final MRO.
class metaclass(meta):
__call__ = type.__call__
__init__ = type.__init__
def __new__(cls, name, this_bases, d):
if this_bases is None:
return type.__new__(cls, name, (), d)
return meta(name, bases, d)
return metaclass('temporary_class', None, {})
try:
from collections import Mapping as mapping_types
except ImportError:
import UserDict
mapping_types = (UserDict.UserDict, UserDict.DictMixin, dict)
# common types. These do exist in the special types module too which however
# does not exist in IronPython out of the box. Also that way we don't have
# to deal with implementation specific stuff here
class _C(object):
def method(self): pass
def _func():
yield None
function_type = type(_func)
generator_type = type(_func())
method_type = type(_C().method)
code_type = type(_C.method.__code__)
try:
raise TypeError()
except TypeError:
_tb = sys.exc_info()[2]
traceback_type = type(_tb)
frame_type = type(_tb.tb_frame)
try:
from urllib.parse import quote_from_bytes as url_quote
except ImportError:
from urllib import quote as url_quote
try:
from thread import allocate_lock
except ImportError:
try:
from threading import Lock as allocate_lock
except ImportError:
from dummy_thread import allocate_lock |
product | ''' BLS Multi-Signatures
| From: "Dan Boneh, Manu Drijvers, Gregory Neven. BLS Multi-Signatures With Public-Key Aggregation".
| Available from: https://crypto.stanford.edu/~dabo/pubs/papers/BLSmultisig.html
* type: signature (identity-based)
* setting: bilinear groups (asymmetric)
:Authors: Lovesh Harchandani
:Date: 5/2018
'''
from functools import reduce
from charm.toolbox.pairinggroup import PairingGroup, ZR, G1, G2, pair
from charm.core.engine.util import objectToBytes
debug = False
class BLSAggregation:
def __init__(self, groupObj):
global group
group = groupObj
def keygen(self, g, secparam=None):
x = group.random()
g_x = g ** x
pk = {'g^x': g_x, 'g': g, 'identity': str(g_x), 'secparam': secparam}
sk = {'x': x}
return pk, sk
def sign(self, x, message):
M = self.dump(message)
if debug:
print("Message => '%s'" % M)
return group.hash(M, G1) ** x
def verify(self, pk, sig, message):
M = self.dump(message)
h = group.hash(M, G1)
return pair(pk['g'], sig) == pair(h, pk['g^x'])
def aggregate_sigs_vulnerable(self, signatures):
"""
This method of aggregation is vulnerable to rogue public key attack
"""
return self.METHOD_NAME(signatures)
def verify_aggregate_sig_vulnerable(self, message, aggregate_sig, public_keys):
# This method of verification is vulnerable to rogue public key attack
g = self.check_and_return_same_generator_in_public_keys(public_keys)
M = self.dump(message)
h = group.hash(M, G1)
combined_pk = self.METHOD_NAME([pk['g^x'] for pk in public_keys])
return pair(g, aggregate_sig) == pair(combined_pk, h)
def aggregate_sigs_safe(self, pubkey_signatures):
# This method of aggregation is resistant to rogue public key attack
sigs = []
all_pubkeys = [i[0] for i in pubkey_signatures]
for pk, sig in pubkey_signatures:
e = sig ** self.hash_keys(pk, all_pubkeys)
sigs.append(e)
return self.METHOD_NAME(sigs)
def verify_aggregate_sig_safe(self, message, aggregate_sig, public_keys):
# This method of verification is resistant to rogue public key attack
g = self.check_and_return_same_generator_in_public_keys(public_keys)
aggregated_pk = self.aggregate_pub_key(public_keys)
M = self.dump(message)
h = group.hash(M, G1)
return pair(g, aggregate_sig) == pair(aggregated_pk, h)
@staticmethod
def METHOD_NAME(seq):
return reduce(lambda x, y: x * y, seq)
@staticmethod
def dump(obj):
return objectToBytes(obj, group)
@staticmethod
def check_and_return_same_generator_in_public_keys(public_keys):
gs = {pk['g'] for pk in public_keys}
assert len(gs) == 1, 'All public keys should have same generator'
return next(iter(gs))
@staticmethod
def hash_keys(pk, all_pks):
acc = BLSAggregation.dump(pk['g^x'])
for p in all_pks:
acc += BLSAggregation.dump(p['g^x'])
return group.hash(acc, ZR)
@staticmethod
def aggregate_pub_key(pks):
r = []
for pk in pks:
h = BLSAggregation.hash_keys(pk, pks)
r.append(pk['g^x'] ** h)
return BLSAggregation.METHOD_NAME(r)
def vulnerable():
groupObj = PairingGroup('MNT224')
m = {'a': "hello world!!!", 'b': "test message"}
bls = BLSAggregation(groupObj)
g = group.random(G2)
pk1, sk1 = bls.keygen(g)
pk2, sk2 = bls.keygen(g)
pk3, sk3 = bls.keygen(g)
sig1 = bls.sign(sk1['x'], m)
sig2 = bls.sign(sk2['x'], m)
sig3 = bls.sign(sk3['x'], m)
if debug:
print("Message: '%s'" % m)
print("Signature1: '%s'" % sig1)
print("Signature2: '%s'" % sig2)
print("Signature3: '%s'" % sig3)
assert bls.verify(pk1, sig1, m), 'Failure!!!'
assert bls.verify(pk2, sig2, m), 'Failure!!!'
assert bls.verify(pk3, sig3, m), 'Failure!!!'
if debug:
print('VERIFICATION SUCCESS!!!')
aggregate_sig = bls.aggregate_sigs_vulnerable([sig1, sig2, sig3])
if debug:
print("Aggregate signature: '%s'" % aggregate_sig)
assert bls.verify_aggregate_sig_vulnerable(m, aggregate_sig, [pk1, pk2, pk3]), \
'Failure!!!'
if debug:
print('AGGREGATION VERIFICATION SUCCESS!!!')
assert not bls.verify_aggregate_sig_vulnerable(m, aggregate_sig, [pk1, pk2])
if debug:
print('AGGREGATION VERIFICATION SUCCESS AGAIN!!!')
def demo_rogue_public_key_attack():
# Attack mentioned here https://crypto.stanford.edu/~dabo/pubs/papers/BLSmultisig.html
groupObj = PairingGroup('MNT224')
m = {'a': "hello world!!!", 'b': "test message"}
bls = BLSAggregation(groupObj)
g = group.random(G2)
pk0, sk0 = bls.keygen(g)
pk1, sk1 = bls.keygen(g)
# Construct the attacker's public key (pk2) as `g^beta * (pk1*pk2)^-1`,
# i.e inverse of the product of all public keys that the attacker wants
# to forge the multi-sig over
pk_inverse = 1 / (BLSAggregation.METHOD_NAME([pk0['g^x'], pk1['g^x']]))
beta = group.random()
pk2, _ = bls.keygen(g)
pk2['g^x'] = (g ** beta) * pk_inverse
M = BLSAggregation.dump(m)
h = group.hash(M, G1)
fake_aggregate_sig = h ** beta
assert bls.verify_aggregate_sig_vulnerable(m, fake_aggregate_sig, [pk0, pk1, pk2]), \
'Failure!!!'
if debug:
print('ROGUE PUBLIC KEY ATTACK SUCCESS!!!')
def safe():
groupObj = PairingGroup('MNT224')
m = {'a': "hello world!!!", 'b': "test message"}
bls = BLSAggregation(groupObj)
g = group.random(G2)
pk1, sk1 = bls.keygen(g)
pk2, sk2 = bls.keygen(g)
pk3, sk3 = bls.keygen(g)
sig1 = bls.sign(sk1['x'], m)
sig2 = bls.sign(sk2['x'], m)
sig3 = bls.sign(sk3['x'], m)
if debug:
print("Message: '%s'" % m)
print("Signature1: '%s'" % sig1)
print("Signature2: '%s'" % sig2)
print("Signature3: '%s'" % sig3)
assert bls.verify(pk1, sig1, m), 'Failure!!!'
assert bls.verify(pk2, sig2, m), 'Failure!!!'
assert bls.verify(pk3, sig3, m), 'Failure!!!'
if debug:
print('VERIFICATION SUCCESS!!!')
aggregate_sig = bls.aggregate_sigs_safe([(pk1, sig1), (pk2, sig2),
(pk3, sig3)])
if debug:
print("Aggregate signature: '%s'" % aggregate_sig)
assert bls.verify_aggregate_sig_safe(m, aggregate_sig, [pk1, pk2, pk3]), \
'Failure!!!'
if debug:
print('NEW AGGREGATION VERIFICATION SUCCESS!!!')
assert not bls.verify_aggregate_sig_safe(m, aggregate_sig, [pk1, pk2])
if debug:
print('NEW AGGREGATION VERIFICATION SUCCESS AGAIN!!!')
def defend_rogue_public_key_attack():
# Defence mentioned here https://crypto.stanford.edu/~dabo/pubs/papers/BLSmultisig.html
groupObj = PairingGroup('MNT224')
m = {'a': "hello world!!!", 'b': "test message"}
bls = BLSAggregation(groupObj)
g = group.random(G2)
pk0, sk0 = bls.keygen(g)
pk1, sk1 = bls.keygen(g)
# Construct the attacker's public key (pk2) as `g^beta * (pk1*pk2)^-1`,
# i.e inverse of the product of all public keys that the attacker wants
# to forge the multi-sig over
pk_inverse = 1 / (BLSAggregation.METHOD_NAME([pk0['g^x'], pk1['g^x']]))
beta = group.random()
pk2, _ = bls.keygen(g)
pk2['g^x'] = (g ** beta) * pk_inverse
M = BLSAggregation.dump(m)
h = group.hash(M, G1)
fake_aggregate_sig = h ** beta
assert not bls.verify_aggregate_sig_safe(m, fake_aggregate_sig, [pk0, pk1, pk2]), \
'Failure!!!'
if debug:
print('ROGUE PUBLIC KEY ATTACK DEFENDED!!!')
if __name__ == "__main__":
debug = True
vulnerable()
demo_rogue_public_key_attack()
safe()
defend_rogue_public_key_attack() |
test shapes invalid bidx | """Tests for ``$ rio shapes``."""
import json
import re
import numpy as np
import pytest
from shapely.geometry import shape
import rasterio
from rasterio.rio.main import main_group
DEFAULT_SHAPE = (10, 10)
def bbox(*args):
return ' '.join([str(x) for x in args])
def test_shapes(runner, pixelated_image_file):
with pytest.warns(None):
result = runner.invoke(main_group, ['shapes', '--collection', pixelated_image_file])
assert result.exit_code == 0
assert result.output.count('"FeatureCollection"') == 1
assert result.output.count('"Feature"') == 4
assert np.allclose(
json.loads(result.output)['features'][0]['geometry']['coordinates'],
[[[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]]])
def METHOD_NAME(runner, pixelated_image_file):
result = runner.invoke(
main_group, ['shapes', '--collection', pixelated_image_file, '--bidx', 4])
assert result.exit_code == 1
# Underlying exception message trapped by shapes
def test_shapes_sequence(runner, pixelated_image_file):
"""
--sequence option should produce 4 features in series rather than
inside a feature collection.
"""
with pytest.warns(None):
result = runner.invoke(
main_group, ['shapes', '--collection', pixelated_image_file, '--sequence'])
assert result.exit_code == 0
assert result.output.count('"FeatureCollection"') == 0
assert result.output.count('"Feature"') == 4
assert result.output.count('\n') == 4
def test_shapes_sequence_rs(runner, pixelated_image_file):
""" --rs option should use the feature separator character. """
result = runner.invoke(
main_group, ['shapes', '--collection', pixelated_image_file, '--sequence', '--rs'])
assert result.exit_code == 0
assert result.output.count('"FeatureCollection"') == 0
assert result.output.count('"Feature"') == 4
assert result.output.count(u'\u001e') == 4
def test_shapes_with_nodata(runner, pixelated_image, pixelated_image_file):
"""
An area of nodata should also be represented with a shape when using
--with-nodata option
"""
pixelated_image[0:2, 8:10] = 255
with rasterio.open(pixelated_image_file, 'r+') as out:
out.write(pixelated_image, indexes=1)
result = runner.invoke(
main_group, ['shapes', '--collection', pixelated_image_file, '--with-nodata'])
assert result.exit_code == 0
assert result.output.count('"FeatureCollection"') == 1
assert result.output.count('"Feature"') == 5
def test_shapes_indent(runner, pixelated_image_file):
"""
--indent option should produce lots of newlines and contiguous spaces
"""
with pytest.warns(None):
result = runner.invoke(
main_group, ['shapes', '--collection', pixelated_image_file, '--indent', 2])
assert result.exit_code == 0
assert result.output.count('"FeatureCollection"') == 1
assert result.output.count('"Feature"') == 4
assert result.output.count("\n") > 100
assert result.output.count(" ") > 100
def test_shapes_compact(runner, pixelated_image_file):
with pytest.warns(None):
result = runner.invoke(
main_group, ['shapes', '--collection', pixelated_image_file, '--compact'])
assert result.exit_code == 0
assert result.output.count('"FeatureCollection"') == 1
assert result.output.count('"Feature"') == 4
assert result.output.count(', ') == 0
assert result.output.count(': ') == 0
def test_shapes_sampling(runner, pixelated_image_file):
""" --sampling option should remove the single pixel features """
result = runner.invoke(
main_group, ['shapes', '--collection', pixelated_image_file, '--sampling', 2])
assert result.exit_code == 0
assert result.output.count('"FeatureCollection"') == 1
assert result.output.count('"Feature"') == 2
def test_shapes_precision(runner, pixelated_image_file):
""" Output numbers should have no more than 1 decimal place """
result = runner.invoke(
main_group, ['shapes', '--collection', pixelated_image_file, '--precision', 1])
assert result.exit_code == 0
assert result.output.count('"FeatureCollection"') == 1
assert result.output.count('"Feature"') == 4
assert re.search(r'\s\d*\.\d{2,}', result.output) is None
def test_shapes_mask(runner, pixelated_image, pixelated_image_file):
""" --mask should extract the nodata area of the image """
pixelated_image[0:5, 0:10] = 255
pixelated_image[0:10, 0:3] = 255
pixelated_image[8:10, 8:10] = 255
with rasterio.open(pixelated_image_file, 'r+') as out:
out.write(pixelated_image, indexes=1)
with pytest.warns(None):
result = runner.invoke(
main_group, ['shapes', '--collection', pixelated_image_file, '--mask'])
assert result.exit_code == 0
assert result.output.count('"FeatureCollection"') == 1
assert result.output.count('"Feature"') == 1
assert shape(json.loads(result.output)["features"][0]["geometry"]).area == 31.0
def test_shapes_mask_sampling(runner, pixelated_image, pixelated_image_file):
"""using --sampling with the mask should snap coordinates to the nearest
factor of 5
"""
pixelated_image[0:5, 0:10] = 255
pixelated_image[0:10, 0:3] = 255
pixelated_image[8:10, 8:10] = 255
with rasterio.open(pixelated_image_file, 'r+') as out:
out.write(pixelated_image, indexes=1)
with pytest.warns(None):
result = runner.invoke(
main_group,
['shapes', '--collection', pixelated_image_file, '--mask', '--sampling', 5])
assert result.exit_code == 0
assert result.output.count('"FeatureCollection"') == 1
assert result.output.count('"Feature"') == 1
assert shape(json.loads(result.output)["features"][0]["geometry"]).area == 25.0
def test_shapes_band1_as_mask(runner, pixelated_image, pixelated_image_file):
"""
When using --as-mask option, pixel value should not matter, only depends
on pixels being contiguous.
"""
pixelated_image[2:3, 2:3] = 4
with rasterio.open(pixelated_image_file, 'r+') as out:
out.write(pixelated_image, indexes=1)
with pytest.warns(None):
result = runner.invoke(
main_group,
['shapes', '--collection', pixelated_image_file, '--band', '--bidx', '1', '--as-mask'])
assert result.exit_code == 0
assert result.output.count('"FeatureCollection"') == 1
assert result.output.count('"Feature"') == 3
assert shape(json.loads(result.output)["features"][0]["geometry"]).area == 1.0 |
test find violations | # Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for LogSinkScanner."""
from builtins import range
import json
import unittest
import unittest.mock as mock
from tests.scanner.test_data import fake_log_sink_scanner_data as flsd
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.scanner.scanners import log_sink_scanner
def _mock_gcp_resource_iter(_, resource_type):
"""Creates a list of GCP resource mocks retrieved by the scanner."""
resources = []
if resource_type == 'sink':
for resource in flsd.LOG_SINK_RESOURCES:
sink = mock.MagicMock()
# For testing the scanner, only look at the sink name.
sink.data = json.dumps({'name': resource['sink_name']})
sink.parent_type_name = resource['parent']
sink.parent = mock.MagicMock()
resources.append(sink)
else:
for full_name in flsd.GCP_RESOURCES[resource_type]:
parent = mock.MagicMock()
name = full_name.split('/')[-2]
parent.name = name
parent.type_name = resource_type + '/' + name
parent.full_name = full_name
resources.append(parent)
return resources
class LogSinkScannerTest(ForsetiTestCase):
@mock.patch(
'google.cloud.forseti.scanner.scanners.log_sink_scanner.'
'log_sink_rules_engine',
autospec=True)
def setUp(self, mock_rules_engine):
self.scanner = log_sink_scanner.LogSinkScanner(
{}, {}, mock.MagicMock(), '', '', '')
def test_retrieve(self):
"""Tests _retrieve gets all log sinks and parent resources."""
mock_data_access = mock.MagicMock()
mock_data_access.scanner_iter.side_effect = _mock_gcp_resource_iter
mock_service_config = mock.MagicMock()
mock_service_config.model_manager = mock.MagicMock()
mock_service_config.model_manager.get.return_value = (
mock.MagicMock(), mock_data_access)
self.scanner.service_config = mock_service_config
log_sink_data = self.scanner._retrieve()
expected_parents = [
'organization/234/',
'organization/234/billing_account/ABCD-1234/',
'organization/234/folder/56/',
'organization/234/project/proj-1/',
'organization/234/folder/56/project/proj-2/',
'organization/234/project/proj-3/',
]
expected_log_sinks = [
['org_sink_1', 'org_sink_2'],
['billing_sink'],
['folder_sink'],
[],
['p2_sink_1', 'p2_sink_2'],
['p3_sink'],
]
self.assertEqual(len(expected_log_sinks), len(log_sink_data))
for i in range(len(expected_log_sinks)):
actual_parent, actual_log_sink_configs = log_sink_data[i]
self.assertEqual(expected_parents[i], actual_parent.full_name)
actual_log_sinks = [sink.id for sink in actual_log_sink_configs]
self.assertEqual(expected_log_sinks[i], actual_log_sinks)
def METHOD_NAME(self):
"""Tests _find_violations passes log sink configs to the rule engine."""
log_sink_data = [
('resource-1', 'resource-1-log-sinks'),
('resource-2', 'resource-2-log-sinks'),
('resource-3', 'resource-3-log-sinks')
]
self.scanner.rules_engine.find_violations.side_effect = [
['viol-1', 'viol-2'], [], ['viol-3']]
violations = self.scanner._find_violations(log_sink_data)
self.scanner.rules_engine.find_violations.assert_has_calls(
[mock.call(parent, data) for parent, data in log_sink_data])
self.assertEqual(['viol-1', 'viol-2', 'viol-3'], violations)
@mock.patch.object(
log_sink_scanner.LogSinkScanner,
'_output_results_to_db', autospec=True)
def test_output_results(self, mock_output_results_to_db):
"""Tests _output_results() flattens results & writes them to db."""
self.scanner._output_results(flsd.LOG_SINK_VIOLATIONS)
mock_output_results_to_db.assert_called_once_with(
self.scanner, flsd.FLATTENED_LOG_SINK_VIOLATIONS)
if __name__ == '__main__':
unittest.main() |
check sub dim positive |
from collections import defaultdict
from sage.all import cached_function, psi, RR, Integer, prod
from psycopg2.sql import SQL, Identifier
from .verification import TableChecker, overall
@cached_function
def kbarbar(weight):
# The weight part of the analytic conductor
return psi(RR(weight)/2).exp() / (2*RR.pi())
def analytic_conductor(level, weight):
return level * kbarbar(weight)**2
def check_analytic_conductor(level, weight, analytic_conductor_stored, verbose=False, threshold=1e-12):
success = (abs(analytic_conductor(level, weight) - analytic_conductor_stored)/analytic_conductor(level, weight)) < threshold
if not success and verbose:
print("Analytic conductor failure", analytic_conductor(level, weight), analytic_conductor_stored)
return success
@cached_function
def level_attributes(level):
# returns level_radical, level_primes, level_is_prime, level_is_prime_power, level_is_squarefree, level_is_square
fact = Integer(level).factor()
level_primes = [elt[0] for elt in fact]
level_radical = prod(level_primes)
level_is_prime_power = len(fact) == 1
level_is_prime = level_is_prime_power and level_radical == level
level_is_square = all( elt[1] % 2 == 0 for elt in fact)
level_is_squarefree = all( elt[1] == 1 for elt in fact)
return [level_radical, level_primes, level_is_prime, level_is_prime_power, level_is_squarefree, level_is_square]
class MfChecker(TableChecker):
def _box_query(self, box, extras={}, drop=[]):
"""
INPUT:
- ``box`` -- a dictionary, a row in mf_boxes
- ``extras`` -- extra conditions to set on the returned query
(e.g. dim <= 20, which would be {'dim':{'$lte':20}})
"""
query = defaultdict(dict)
for bcol, col in [('N','level'), ('k', 'weight'), ('o', 'char_order'), ('Nk2', 'Nk2'), ('D', 'dim')]:
for mm, code in [('min', '$gte'), ('max', '$lte')]:
constraint = box.get(bcol + mm)
if constraint is not None:
query[col][code] = constraint
for col, D in extras.items():
for code, val in D.items():
query[col][code] = val
for col in drop:
query.pop(col, None)
return query
def _check_level(self, rec, verbose=False):
"""
check level_* attributes (radical,primes,is_prime,...)
"""
attributes = ['level_radical', 'level_primes', 'level_is_prime',
'level_is_prime_power', 'level_is_squarefree',
'level_is_square']
stored = [rec[attr] for attr in attributes]
computed = level_attributes(rec['level'])
success = stored == computed
if not success and verbose:
for attr, a, b in zip(attributes, stored, computed):
if a != b:
print(attr, a, b)
return success
hecke_orbit_code = []
@overall
def check_hecke_orbit_code(self):
"""
hecke_orbit_code is as defined
"""
if self.hecke_orbit_code:
# test enabled
assert len(self.hecke_orbit_code) == 2
hoc_column = self.hecke_orbit_code[0]
if len(self.hecke_orbit_code[1]) == 4:
N_column, k_column, i_column, x_column = self.hecke_orbit_code[1]
else:
assert len(self.hecke_orbit_code[1]) == 3
x_column = None
N_column, k_column, i_column = self.hecke_orbit_code[1]
# N + (k<<24) + ((i-1)<<36) + ((x-1)<<52)
if x_column is None:
return self._run_query(SQL("{0} != {1}::bigint + ({2}::integer::bit(64)<<24)::bigint + (({3}-1)::integer::bit(64)<<36)::bigint").format(*tuple(map(Identifier, [hoc_column, N_column, k_column, i_column]))))
else:
return self._run_query(SQL("{0} != {1}::bigint + ({2}::integer::bit(64)<<24)::bigint + (({3}-1)::integer::bit(64)<<36)::bigint + (({4}-1)::integer::bit(64)<<52)::bigint").format(*tuple(map(Identifier, [hoc_column, N_column, k_column, i_column, x_column]))))
class SubspacesChecker(MfChecker):
@overall
def check_sub_mul_positive(self):
"""
sub_mult is positive
"""
return self._run_query(SQL("{0} <= 0").format(Identifier('sub_mult')))
@overall
def METHOD_NAME(self):
"""
sub_dim is positive
"""
return self._run_query(SQL("{0} <= 0").format(Identifier('sub_dim')))
@overall
def check_level_divides(self):
"""
check that sub_level divides level
"""
return self.check_divisible('level', 'sub_level')
class TracesChecker(MfChecker):
uniqueness_constraints = [['hecke_orbit_code', 'n']]
label_col = 'hecke_orbit_code'
@overall
def check_total_count(self):
"""
check that hecke_orbit_code is present in mf_newforms
"""
return self.check_count(1000 * self.base_table.count(self.base_constraint)) |
test entity class id | ######################################################################################################################
# Copyright (C) 2017-2022 Spine project consortium
# This file is part of Spine Toolbox.
# Spine Toolbox is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option)
# any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
# Public License for more details. You should have received a copy of the GNU Lesser General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
######################################################################################################################
"""
Unit tests for Database editor's ``graphics_items`` module.
"""
import unittest
from unittest import mock
from PySide6.QtCore import QPointF
from PySide6.QtWidgets import QApplication
from spinetoolbox.spine_db_editor.graphics_items import RelationshipItem
from spinetoolbox.spine_db_editor.widgets.spine_db_editor import SpineDBEditor
from ..mock_helpers import TestSpineDBManager
class TestRelationshipItem(unittest.TestCase):
_db_mngr = None
@classmethod
def setUpClass(cls):
# SpineDBEditor takes long to construct hence we make only one of them for the entire suite.
if not QApplication.instance():
QApplication()
def setUp(self):
with mock.patch("spinetoolbox.spine_db_editor.widgets.spine_db_editor.SpineDBEditor.restore_ui"), mock.patch(
"spinetoolbox.spine_db_editor.widgets.spine_db_editor.SpineDBEditor.show"
):
mock_settings = mock.Mock()
mock_settings.value.side_effect = lambda *args, **kwargs: 0
self._db_mngr = TestSpineDBManager(mock_settings, None)
logger = mock.MagicMock()
self._db_map = self._db_mngr.get_db_map("sqlite://", logger, codename="database", create=True)
self._spine_db_editor = SpineDBEditor(self._db_mngr, {"sqlite://": "database"})
self._spine_db_editor.pivot_table_model = mock.MagicMock()
self._db_mngr.add_object_classes({self._db_map: [{"name": "oc", "id": 1}]})
self._db_mngr.add_objects({self._db_map: [{"name": "o", "class_id": 1, "id": 1}]})
self._db_mngr.add_relationship_classes(
{self._db_map: [{"name": "rc", "id": 2, "object_class_id_list": [1], "object_class_name_list": "oc"}]}
)
self._db_mngr.add_relationships(
{
self._db_map: [
{
"name": "r",
"id": 2,
"class_id": 2,
"class_name": "rc",
"object_id_list": [1],
"object_name_list": ["o"],
}
]
}
)
with mock.patch.object(RelationshipItem, "refresh_icon"):
self._item = RelationshipItem(self._spine_db_editor, 0.0, 0.0, 0, ((self._db_map, 2),))
@classmethod
def tearDownClass(cls):
QApplication.removePostedEvents(None) # Clean up unfinished fetcher signals
def tearDown(self):
with mock.patch(
"spinetoolbox.spine_db_editor.widgets.spine_db_editor.SpineDBEditor.save_window_state"
) as mock_save_w_s, mock.patch("spinetoolbox.spine_db_manager.QMessageBox"):
self._spine_db_editor.close()
mock_save_w_s.assert_called_once()
self._db_mngr.close_all_sessions()
self._db_mngr.clean_up()
self._spine_db_editor.deleteLater()
self._spine_db_editor = None
def test_entity_type(self):
self.assertEqual(self._item.entity_type, "relationship")
def test_entity_name(self):
self.assertEqual(self._item.entity_name, "r")
def test_entity_class_type(self):
self.assertEqual(self._item.entity_class_type, "relationship_class")
def METHOD_NAME(self):
self.assertEqual(self._item.entity_class_id(self._db_map), 2)
def test_entity_class_name(self):
self.assertEqual(self._item.entity_class_name, "rc")
def test_db_map(self):
self.assertIs(self._item.first_db_map, self._db_map)
def test_entity_id(self):
self.assertEqual(self._item.entity_id(self._db_map), 2)
def test_first_db_map(self):
self.assertIs(self._item.first_db_map, self._db_map)
def test_display_data(self):
self.assertEqual(self._item.display_data, "r")
def test_display_database(self):
self.assertEqual(self._item.display_database, "database")
def test_db_maps(self):
self.assertEqual(self._item.db_maps, [self._db_map])
def test_db_map_data(self):
self.assertEqual(
self._item.db_map_data(self._db_map),
{
'name': 'r',
'id': 2,
'class_id': 2,
'class_name': 'rc',
'object_id_list': (1,),
'object_name_list': ['o'],
'object_class_id_list': (1,),
'commit_id': 2,
},
)
def test_db_map_id_equals_entity_id(self):
self.assertEqual(self._item.db_map_id(self._db_map), self._item.entity_id(self._db_map))
def test_add_arc_item(self):
arc = mock.MagicMock()
self._item.add_arc_item(arc)
self.assertEqual(self._item.arc_items, [arc])
arc.update_line.assert_called_once()
def test_apply_zoom(self):
self._item.apply_zoom(0.5)
self.assertEqual(self._item.scale(), 0.5)
self._item.apply_zoom(1.5)
self.assertEqual(self._item.scale(), 1.0)
def test_apply_rotation(self):
arc = mock.MagicMock()
self._item.add_arc_item(arc)
rotation_center = QPointF(100.0, 0.0)
self._item.apply_rotation(-90.0, rotation_center)
self.assertEqual(self._item.pos(), QPointF(100.0, -100.0))
arc.update_line.assert_has_calls([])
if __name__ == "__main__":
unittest.main() |
get measurement | # coding=utf-8
from collections import OrderedDict
import copy
from flask_babel import lazy_gettext
from mycodo.inputs.base_input import AbstractInput
# Measurements
measurements_dict = OrderedDict()
for each_channel in range(8):
measurements_dict[each_channel] = {
'measurement': 'electrical_potential',
'unit': 'V'
}
# Input information
INPUT_INFORMATION = {
'input_name_unique': 'ADS1256',
'input_manufacturer': 'Texas Instruments',
'input_name': 'ADS1256',
'input_library': 'wiringpi, kizniche/PiPyADC-py3',
'measurements_name': 'Voltage (Waveshare, Analog-to-Digital Converter)',
'measurements_dict': measurements_dict,
'measurements_rescale': True,
'scale_from_min': 0.0,
'scale_from_max': 5.0,
'options_enabled': [
'measurements_select',
'adc_gain',
'adc_sample_speed',
'period',
'pre_output'
],
'options_disabled': ['interface'],
'dependencies_module': [
('pip-pypi', 'wiringpi', 'wiringpi'),
('pip-pypi', 'pipyadc_py3', 'git+https://github.com/kizniche/PiPyADC-py3.git') # PiPyADC ported to Python3
],
'interfaces': ['UART'],
'adc_gain': [
(1, '1'),
(2, '2'),
(4, '4'),
(8, '8'),
(16, '16'),
(32, '32'),
(64, '64')
],
'adc_sample_speed': [
('30000', '30,000'),
('15000', '15,000'),
('7500', '7,500'),
('3750', '3,750'),
('2000', '2,000'),
('1000', '1,000'),
('500', '500'),
('100', '100'),
('60', '60'),
('50', '50'),
('30', '30'),
('25', '25'),
('15', '15'),
('10', '10'),
('5', '5'),
('2d5', '2.5')
],
'custom_options': [
{
'id': 'adc_calibration',
'type': 'select',
'default_value': '',
'options_select': [
('', 'No Calibration'),
('SELFOCAL', 'Self Offset'),
('SELFGCAL', 'Self Gain'),
('SELFCAL', 'Self Offset + Self Gain'),
('SYSOCAL', 'System Offset'),
('SYSGCAL', 'System Gain')
],
'name': lazy_gettext('Calibration'),
'phrase': lazy_gettext('Set the calibration method to perform during Input activation')
}
],
}
class InputModule(AbstractInput):
"""ADC Read."""
def __init__(self, input_dev, testing=False):
super().__init__(input_dev, testing=testing, name=__name__)
self.sensor = None
self.CH_SEQUENCE = None
self.adc_gain = None
self.adc_sample_speed = None
self.adc_calibration = None
if not testing:
self.setup_custom_options(
INPUT_INFORMATION['custom_options'], input_dev)
self.try_initialize()
def initialize(self):
import glob
from pipyadc_py3 import ADS1256
from pipyadc_py3.ADS1256_definitions import POS_AIN0
from pipyadc_py3.ADS1256_definitions import POS_AIN1
from pipyadc_py3.ADS1256_definitions import POS_AIN2
from pipyadc_py3.ADS1256_definitions import POS_AIN3
from pipyadc_py3.ADS1256_definitions import POS_AIN4
from pipyadc_py3.ADS1256_definitions import POS_AIN5
from pipyadc_py3.ADS1256_definitions import POS_AIN6
from pipyadc_py3.ADS1256_definitions import POS_AIN7
from pipyadc_py3.ADS1256_definitions import NEG_AINCOM
# Input pin for the potentiometer on the Waveshare Precision ADC board
POTI = POS_AIN0 | NEG_AINCOM
# Light dependant resistor
LDR = POS_AIN1 | NEG_AINCOM
# The other external input screw terminals of the Waveshare board
EXT2, EXT3, EXT4 = POS_AIN2 | NEG_AINCOM, POS_AIN3 | NEG_AINCOM, POS_AIN4 | NEG_AINCOM
EXT5, EXT6, EXT7 = POS_AIN5 | NEG_AINCOM, POS_AIN6 | NEG_AINCOM, POS_AIN7 | NEG_AINCOM
channels = {
0: POTI,
1: LDR,
2: EXT2,
3: EXT3,
4: EXT4,
5: EXT5,
6: EXT6,
7: EXT7
}
# Generate the channel sequence for enabled channels
self.CH_SEQUENCE = []
for channel in self.channels_measurement:
if self.is_enabled(channel):
self.CH_SEQUENCE.append(channels[channel])
self.CH_SEQUENCE = tuple(self.CH_SEQUENCE)
self.adc_gain = self.input_dev.adc_gain
self.adc_sample_speed = self.input_dev.adc_sample_speed
if glob.glob('/dev/spi*'):
self.sensor = ADS1256()
# Perform selected calibration
if self.adc_calibration == 'SELFOCAL':
self.sensor.cal_self_offset()
elif self.adc_calibration == 'SELFGCAL':
self.sensor.cal_self_gain()
elif self.adc_calibration == 'SELFCAL':
self.sensor.cal_self()
elif self.adc_calibration == 'SYSOCAL':
self.sensor.cal_system_offset()
elif self.adc_calibration == 'SYSGCAL':
self.sensor.cal_system_gain()
else:
raise Exception(
"SPI device /dev/spi* not found. Ensure SPI is enabled and the device is recognized/setup by linux.")
def METHOD_NAME(self):
if not self.sensor:
self.logger.error("Error 101: Device not set up. See https://kizniche.github.io/Mycodo/Error-Codes#error-101 for more info.")
return
self.return_dict = copy.deepcopy(measurements_dict)
# 2 attempts to get valid measurement
voltages_list = []
for _ in range(2):
raw_channels = self.sensor.read_sequence(self.CH_SEQUENCE)
voltages_list = [i * self.sensor.v_per_digit for i in raw_channels]
if 0 not in voltages_list:
break
if not voltages_list or 0 in voltages_list:
self.logger.error("ADC returned measurement of 0 (indicating something is wrong).")
return
index = 0
for channel in self.channels_measurement:
if self.is_enabled(channel):
self.value_set(channel, voltages_list[index])
index += 1
return self.return_dict |
get description | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""Outline Explorer Plugin."""
# Third party imports
from qtpy.QtCore import Qt, Slot
# Local imports
from spyder.api.plugin_registration.decorators import (
on_plugin_available, on_plugin_teardown)
from spyder.api.translations import _
from spyder.api.plugins import SpyderDockablePlugin, Plugins
from spyder.plugins.outlineexplorer.main_widget import OutlineExplorerWidget
class OutlineExplorer(SpyderDockablePlugin):
NAME = 'outline_explorer'
CONF_SECTION = 'outline_explorer'
REQUIRES = [Plugins.Completions, Plugins.Editor]
OPTIONAL = []
CONF_FILE = False
WIDGET_CLASS = OutlineExplorerWidget
# ---- SpyderDockablePlugin API
# -------------------------------------------------------------------------
@staticmethod
def get_name() -> str:
"""Return widget title."""
return _('Outline Explorer')
@staticmethod
def METHOD_NAME() -> str:
"""Return the description of the outline explorer widget."""
return _("Explore functions, classes and methods in open files. Note "
"that if you disable the 'Completion and linting' plugin, "
"this one won't work.")
@classmethod
def get_icon(cls):
"""Return the outline explorer icon."""
return cls.create_icon('outline_explorer')
def on_initialize(self):
if self.main:
self.main.restore_scrollbar_position.connect(
self._restore_scrollbar_position)
self.sig_mainwindow_state_changed.connect(
self._on_mainwindow_state_changed)
@on_plugin_available(plugin=Plugins.Completions)
def on_completions_available(self):
completions = self.get_plugin(Plugins.Completions)
completions.sig_language_completions_available.connect(
self.start_symbol_services)
completions.sig_stop_completions.connect(
self.stop_symbol_services)
@on_plugin_available(plugin=Plugins.Editor)
def on_editor_available(self):
editor = self.get_plugin(Plugins.Editor)
editor.sig_open_files_finished.connect(
self.update_all_editors)
@on_plugin_teardown(plugin=Plugins.Completions)
def on_completions_teardown(self):
completions = self.get_plugin(Plugins.Completions)
completions.sig_language_completions_available.disconnect(
self.start_symbol_services)
completions.sig_stop_completions.disconnect(
self.stop_symbol_services)
@on_plugin_teardown(plugin=Plugins.Editor)
def on_editor_teardown(self):
editor = self.get_plugin(Plugins.Editor)
editor.sig_open_files_finished.disconnect(
self.update_all_editors)
# ----- Private API
# -------------------------------------------------------------------------
@Slot(object)
def _on_mainwindow_state_changed(self, window_state):
"""Actions to take when the main window has changed its state."""
if window_state == Qt.WindowMinimized:
# There's no need to update the treewidget when the plugin is
# minimized.
self.get_widget().change_tree_visibility(False)
else:
self.get_widget().change_tree_visibility(True)
def _restore_scrollbar_position(self):
"""Restoring scrollbar position after main window is visible"""
scrollbar_pos = self.get_conf('scrollbar_position', None)
explorer = self.get_widget()
if scrollbar_pos is not None:
explorer.treewidget.set_scrollbar_position(scrollbar_pos)
# ----- Public API
# -------------------------------------------------------------------------
@Slot(dict, str)
def start_symbol_services(self, capabilities, language):
"""Enable LSP symbols functionality."""
explorer = self.get_widget()
symbol_provider = capabilities.get('documentSymbolProvider', False)
if symbol_provider:
explorer.start_symbol_services(language)
def stop_symbol_services(self, language):
"""Disable LSP symbols functionality."""
explorer = self.get_widget()
explorer.stop_symbol_services(language)
def update_all_editors(self):
"""Update all editors with an associated LSP server."""
explorer = self.get_widget()
explorer.update_all_editors()
def get_supported_languages(self):
"""List of languages with symbols support."""
return self.get_widget().get_supported_languages() |
get connection string | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import tempfile
import shutil
from azure.cli.testsdk.preparers import AbstractPreparer
class StorageScenarioMixin:
profile = None
def get_current_profile(self):
if not self.profile:
self.profile = self.cmd('cloud show --query profile -otsv').output
return self.profile
def get_account_key(self, group, name):
if self.get_current_profile() == '2017-03-09-profile':
template = 'storage account keys list -n {} -g {} --query "key1" -otsv'
else:
template = 'storage account keys list -n {} -g {} --query "[0].value" -otsv'
return self.cmd(template.format(name, group)).output
def METHOD_NAME(self, group, name):
return self.cmd('storage account show-connection-string -n {} -g {} '
'--query connectionString -otsv'.format(name, group)).output.strip()
def get_account_id(self, group, name):
return self.cmd('storage account show -n {} -g {} --query id -otsv'.format(name, group)).output.strip()
def get_account_info(self, group, name):
"""Returns the storage account name and key in a tuple"""
return name, self.get_account_key(group, name)
def oauth_cmd(self, cmd, *args, **kwargs):
if args:
cmd = cmd.format(*args)
args = ()
return self.cmd(cmd + ' --auth-mode login', *args, **kwargs)
def file_oauth_cmd(self, cmd, *args, **kwargs):
if args:
cmd = cmd.format(*args)
args = ()
return self.cmd(cmd + ' --auth-mode login --backup-intent', *args, **kwargs)
def storage_cmd(self, cmd, account_info, *args):
cmd = cmd.format(*args)
cmd = '{} --account-name {} --account-key {}'.format(cmd, *account_info)
return self.cmd(cmd)
def storage_cmd_negative(self, cmd, account_info, *args):
cmd = cmd.format(*args)
cmd = '{} --account-name {} --account-key {}'.format(cmd, *account_info)
return self.cmd(cmd, expect_failure=True)
def create_container(self, account_info, prefix='cont', length=24):
container_name = self.create_random_name(prefix=prefix, length=length)
self.storage_cmd('storage container create -n {}', account_info, container_name)
return container_name
def create_share(self, account_info, prefix='share', length=24):
share_name = self.create_random_name(prefix=prefix, length=length)
self.storage_cmd('storage share create -n {}', account_info, share_name)
return share_name
def create_file_system(self, account_info, prefix='filesystem', length=24):
filesystem_name = self.create_random_name(prefix=prefix, length=length)
self.storage_cmd('storage fs create -n {}', account_info, filesystem_name)
return filesystem_name
class StorageTestFilesPreparer(AbstractPreparer):
def __init__(self, parameter_name='test_dir'):
super(StorageTestFilesPreparer, self).__init__(name_prefix='test', name_len=24)
self.parameter_name = parameter_name
def create_resource(self, name, **kwargs):
temp_dir = os.path.join(tempfile.gettempdir(), self.random_name)
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
with open(os.path.join(temp_dir, 'readme'), 'w') as f:
f.write('This directory contains test files generated by Azure CLI storage command '
'module tests.')
for folder_name in ['apple', 'butter', 'butter/charlie', 'duff/edward']:
for file_index in range(10):
file_path = os.path.join(temp_dir, folder_name, 'file_%s' % file_index)
if not os.path.exists(os.path.dirname(file_path)):
os.makedirs(os.path.dirname(file_path))
with open(file_path, 'w') as f:
f.write('Azure CLI storage command module test sample file. origin:'
' %s' % file_path)
setattr(self, '_temp_dir', temp_dir)
return {self.parameter_name: temp_dir}
def remove_resource(self, name, **kwargs):
temp_dir = self.get_temp_dir()
if temp_dir:
shutil.rmtree(temp_dir, ignore_errors=True)
def get_temp_dir(self):
return getattr(self, '_temp_dir', None) |
main | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Uniformize streamlines' endpoints according to a defined axis.
Useful for tractometry or models creation.
The --auto option will automatically calculate the main orientation.
If the input bundle is poorly defined, it is possible heuristic will be wrong.
The default is to flip each streamline so their first point's coordinate in the
defined axis is smaller than their last point (--swap does the opposite).
The --target option will use the barycenter of the target mask to define the
axis. The target mask can be a binary mask or an atlas. If an atlas is
used, labels are expected in the form of --target atlas.nii.gz 2 3 5:7.
"""
import argparse
import logging
from dipy.io.streamline import save_tractogram
import nibabel as nib
from scilpy.image.labels import get_data_as_labels
from scilpy.io.image import merge_labels_into_mask
from scilpy.io.streamlines import load_tractogram_with_reference
from scilpy.io.utils import (add_overwrite_arg,
add_reference_arg,
add_verbose_arg,
assert_outputs_exist,
assert_inputs_exist)
from scilpy.segment.streamlines import filter_grid_roi
from scilpy.utils.streamlines import (uniformize_bundle_sft,
uniformize_bundle_sft_using_mask)
def _build_arg_parser():
p = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
p.add_argument('in_bundle',
help='Input path of the tractography file.')
p.add_argument('out_bundle',
help='Output path of the uniformized file.')
method = p.add_mutually_exclusive_group(required=True)
method.add_argument('--axis', choices=['x', 'y', 'z'],
help='Match endpoints of the streamlines along this axis.'
'\nSUGGESTION: Commissural = x, Association = y, '
'Projection = z')
method.add_argument('--auto', action='store_true',
help='Match endpoints of the streamlines along an '
'automatically determined axis.')
method.add_argument('--centroid', metavar='FILE',
help='Match endpoints of the streamlines along an '
'automatically determined axis.')
method.add_argument('--target_roi', nargs='+',
help='Provide a target ROI and the labels to use.\n'
'Align heads to be closest to the mask barycenter.\n'
'If no labels are provided, all labels will be used.')
p.add_argument('--swap', action='store_true',
help='Swap head <-> tail convention. '
'Can be useful when the reference is not in RAS.')
add_reference_arg(p)
add_verbose_arg(p)
add_overwrite_arg(p)
return p
def METHOD_NAME():
parser = _build_arg_parser()
args = parser.parse_args()
if args.verbose:
logging.getLogger().setLevel(logging.INFO)
assert_inputs_exist(parser, args.in_bundle)
assert_outputs_exist(parser, args, args.out_bundle)
sft = load_tractogram_with_reference(parser, args, args.in_bundle)
if args.auto:
uniformize_bundle_sft(sft, None, swap=args.swap)
if args.centroid:
centroid_sft = load_tractogram_with_reference(parser, args,
args.centroid)
uniformize_bundle_sft(sft, args.axis, swap=args.swap,
ref_bundle=centroid_sft)
if args.target_roi:
img = nib.load(args.target_roi[0])
atlas = get_data_as_labels(img)
if len(args.target_roi) == 1:
mask = atlas > 0
else:
mask = merge_labels_into_mask(atlas, " ".join(args.target_roi[1:]))
# Uncomment if the user wants to filter the streamlines
# sft, _ = filter_grid_roi(sft, mask, 'either_end', False)
uniformize_bundle_sft_using_mask(sft, mask, swap=args.swap)
if args.axis:
uniformize_bundle_sft(sft, args.axis, swap=args.swap)
save_tractogram(sft, args.out_bundle)
if __name__ == "__main__":
METHOD_NAME() |
test communication | #!/usr/bin/env python
import unittest
import os
import subprocess
import time
class TestNetworkEndpointConfig(unittest.TestCase):
def checkDNSDistExitCode(self, configTemplate, expectedCode, clientMode=False, verboseMode=False):
conffile = 'configs/dnsdist_TestNetworkEndpointConfig.conf'
with open(conffile, 'w') as conf:
conf.write("-- Autogenerated by dnsdisttests.py\n")
conf.write(configTemplate)
dnsdistcmd = [os.environ['DNSDISTBIN'], '-C', conffile, '--check-config']
if clientMode:
dnsdistcmd.append('-c')
if verboseMode:
dnsdistcmd.append('-v')
output = None
returnCode = None
try:
output = subprocess.check_output(dnsdistcmd, stderr=subprocess.STDOUT, close_fds=True)
returnCode = 0
except subprocess.CalledProcessError as exc:
output = exc.output
returnCode = exc.returncode
print(output)
self.assertEqual(returnCode, expectedCode)
def testNonExistingEndpoint(self):
"""
NetworkBindings: Non existing endpoint
"""
configTemplate = """
newServer{address="127.0.0.1:53"}
local endpoint = newNetworkEndpoint('/this/path/does/not/exist')
if endpoint == nil then
os.exit(1)
end
if endpoint:isValid() then
os.exit(2)
end
if endpoint:send('test') then
os.exit(3)
end
os.exit(0)
"""
self.checkDNSDistExitCode(configTemplate, 0)
def testClientMode(self):
"""
NetworkBindings: Client mode
"""
configTemplate = """
newServer{address="127.0.0.1:53"}
local endpoint = newNetworkEndpoint('/this/path/does/not/exist')
if endpoint == nil then
os.exit(1)
end
if endpoint:isValid() then
os.exit(2)
end
if endpoint:send('test') then
os.exit(3)
end
local listener = newNetworkListener()
if listener == nil then
os.exit(4)
end
local endpointId = 1
local function callback(_, _, _)
end
if listener:addUnixListeningEndpoint('/path', 1, callback) then
os.exit(5)
end
listener:start()
os.exit(0)
"""
self.checkDNSDistExitCode(configTemplate, 0, clientMode=True)
def testGetResolvers(self):
"""
NetworkBindings: getResolvers
"""
configTemplate = """
newServer{address="127.0.0.1:53"}
local resolvers = getResolvers('resolv.conf.sample')
if #resolvers ~= 2 then
os.exit(1)
end
if resolvers[1] ~= '9.9.9.9' then
os.exit(2)
end
if resolvers[2] ~= '2620:fe::fe' then
os.exit(2)
end
os.exit(0)
"""
self.checkDNSDistExitCode(configTemplate, 0, clientMode=True)
def METHOD_NAME(self):
"""
NetworkBindings: Communication
"""
configTemplate = """
newServer{address="127.0.0.1:53"}
local listener = newNetworkListener()
if listener == nil then
os.exit(1)
end
local endpointId = 1
local function callback(id, dgram, _)
-- this function will never get called because we are holding the Lua lock
end
if not listener:addUnixListeningEndpoint('/tmp/dnsdist.network-bindings.test', 1, callback) then
os.exit(4)
end
--listener:start()
local endpoint = newNetworkEndpoint('/tmp/dnsdist.network-bindings.test')
if endpoint == nil then
os.exit(5)
end
if not endpoint:isValid() then
os.exit(6)
end
if not endpoint:send('test') then
os.exit(7)
end
os.exit(0)
"""
self.checkDNSDistExitCode(configTemplate, 0) |
mymethod | ##############################################################################
#
# Copyright (c) 2004, 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Unit tests for the registerClass directive.
"""
from AccessControl.class_init import InitializeClass
from AccessControl.SecurityInfo import ClassSecurityInfo
from OFS.SimpleItem import SimpleItem
from zope.interface import Interface
from zope.interface import implementer
class ISimpleContent(Interface):
pass
@implementer(ISimpleContent)
class SimpleContent(SimpleItem):
meta_type = 'SimpleContent'
security = ClassSecurityInfo()
def __init__(self, id, title):
self.id = id
self.title = title
@security.public
def METHOD_NAME(self):
return "Hello world"
@security.public
def direct(self):
"""Should be able to traverse directly to this as there is no view.
"""
return "Direct traversal worked"
InitializeClass(SimpleContent)
def test_registerClass():
"""
Testing registerClass
>>> from zope.component.testing import setUp, tearDown
>>> setUp()
>>> import Products
>>> import Zope2.App
>>> from Zope2.App import zcml
>>> from persistent.interfaces import IPersistent
Use the five:registerClass directive::
>>> configure_zcml = '''
... <configure
... xmlns="http://namespaces.zope.org/zope"
... xmlns:five="http://namespaces.zope.org/five"
... i18n_domain="foo">
... <permission id="foo.add" title="Add Foo"/>
... <five:registerClass
... class="OFS.tests.test_registerclass.SimpleContent"
... meta_type="Foo Type"
... permission="foo.add"
... addview="addfoo.html"
... global="false"
... />
... </configure>'''
>>> zcml.load_config('meta.zcml', Zope2.App)
>>> zcml.load_string(configure_zcml)
Make sure that the class attributes are set correctly::
>>> from OFS.tests.test_registerclass import SimpleContent
>>> SimpleContent.meta_type
'Foo Type'
And the meta_type is registered correctly::
>>> for info in Products.meta_types:
... if info['name'] == 'Foo Type':
... break
>>> info['product']
'OFS'
>>> info['permission']
'Add Foo'
>>> from OFS.tests.test_registerclass import ISimpleContent
>>> ISimpleContent in info['interfaces']
True
>>> IPersistent in info['interfaces']
True
>>> info['visibility'] is None
True
>>> info['instance'] is SimpleContent
True
>>> info['action']
'+/addfoo.html'
>>> info['container_filter'] is None
True
Now reset everything and see what happens without optional parameters::
>>> tearDown()
>>> setUp()
Use the five:registerClass directive again::
>>> configure_zcml = '''
... <configure
... xmlns="http://namespaces.zope.org/zope"
... xmlns:five="http://namespaces.zope.org/five"
... i18n_domain="bar">
... <permission id="bar.add" title="Add Bar"/>
... <five:registerClass
... class="OFS.tests.test_registerclass.SimpleContent"
... meta_type="Bar Type"
... permission="bar.add"
... />
... </configure>'''
>>> import Zope2.App
>>> zcml.load_config('meta.zcml', Zope2.App)
>>> zcml.load_string(configure_zcml)
Make sure that the class attributes are set correctly::
>>> SimpleContent.meta_type
'Bar Type'
And the meta_type is registered correctly::
>>> for info in Products.meta_types:
... if info['name'] == 'Bar Type':
... break
>>> info['product']
'OFS'
>>> info['permission']
'Add Bar'
>>> ISimpleContent in info['interfaces']
True
>>> IPersistent in info['interfaces']
True
>>> info['visibility']
'Global'
>>> info['instance'] is SimpleContent
True
>>> info['action']
''
>>> info['container_filter'] is None
True
Clean up:
>>> tearDown()
>>> SimpleContent.meta_type
'simple item'
>>> [info for info in Products.meta_types if info['name'] == 'Bar Type']
[]
"""
def test_suite():
from Testing.ZopeTestCase import ZopeDocTestSuite
return ZopeDocTestSuite() |
rewrite extra info | """Plugin that loads yaml resources for data packs and resource packs."""
__all__ = [
"LoadYamlOptions",
"YamlPackLoader",
"load_yaml",
]
import logging
from dataclasses import InitVar, dataclass, field
from typing import Any, Dict, List, Optional, Type, TypeVar
import yaml
from beet import (
Context,
DataPack,
ExtraContainer,
File,
FileOrigin,
Pack,
PluginOptions,
ResourcePack,
configurable,
)
logger = logging.getLogger(__name__)
PackType = TypeVar("PackType", bound=Pack[Any])
PackFile = File[Any, Any]
class LoadYamlOptions(PluginOptions):
resource_pack: List[str] = []
data_pack: List[str] = []
def beet_default(ctx: Context):
ctx.require(load_yaml)
@configurable(validator=LoadYamlOptions)
def load_yaml(ctx: Context, opts: LoadYamlOptions):
"""Plugin that loads yaml resources for data packs and resource packs."""
yaml_pack_loader = ctx.inject(YamlPackLoader)
for pattern in opts.resource_pack:
for path in ctx.directory.glob(pattern):
yaml_pack_loader.load_resource_pack(ctx.directory / path)
for pattern in opts.data_pack:
for path in ctx.directory.glob(pattern):
yaml_pack_loader.load_data_pack(ctx.directory / path)
@dataclass
class YamlPackLoader:
"""Loader for data packs and resource packs using yaml files."""
ctx: InitVar[Optional[Context]] = None
assets: ResourcePack = field(default_factory=ResourcePack)
data: DataPack = field(default_factory=DataPack)
def __post_init__(self, ctx: Optional[Context]):
logger.warning("Deprecated in favor of beet.contrib.auto_yaml.")
if ctx:
self.assets = ctx.assets
self.data = ctx.data
def load_resource_pack(self, origin: FileOrigin):
extended_pack = self.create_extended_pack(ResourcePack)
extended_pack.load(origin)
self.merge_extended_pack(self.assets, extended_pack)
def load_data_pack(self, origin: FileOrigin):
extended_pack = self.create_extended_pack(DataPack)
extended_pack.load(origin)
self.merge_extended_pack(self.data, extended_pack)
def create_extended_pack(self, pack_type: Type[PackType]) -> PackType:
class ExtendedNamespace(pack_type.namespace_type): # type: ignore
@classmethod
def get_extra_info(cls) -> Dict[str, Type[PackFile]]:
return self.METHOD_NAME(super().get_extra_info()) # type: ignore
ExtendedNamespace.field_map = pack_type.namespace_type.field_map
ExtendedNamespace.scope_map = {
(scope, yaml_extension): key
for yaml_extension in [".yml", ".yaml"]
for (scope, extension), key in pack_type.namespace_type.scope_map.items()
if extension == ".json"
}
class ExtendedPack(pack_type): # type: ignore
@classmethod
def get_extra_info(cls) -> Dict[str, Type[PackFile]]:
return self.METHOD_NAME(super().get_extra_info()) # type: ignore
ExtendedPack.namespace_type = ExtendedNamespace
return ExtendedPack() # type: ignore
def METHOD_NAME(
self,
original: Dict[str, Type[PackFile]],
) -> Dict[str, Type[PackFile]]:
return {
filename[:-5] + yaml_extension: file_type
for yaml_extension in [".yml", ".yaml"]
for filename, file_type in original.items()
if filename.endswith(".json")
}
def merge_extended_pack(self, destination: PackType, extended_pack: PackType):
for _, yaml_file in extended_pack.list_files():
yaml_file.ensure_deserialized(yaml.safe_load)
self.rename_extra_files(extended_pack.extra)
for namespace in extended_pack.values():
self.rename_extra_files(namespace.extra)
destination.merge(extended_pack)
def rename_extra_files(self, extra: ExtraContainer):
renamed = {
path.replace(".yml", ".json").replace(".yaml", ".json"): item
for path, item in extra.items()
if path.endswith((".yml", ".yaml"))
}
extra.clear()
extra.update(renamed) |
compute file hash | # Copyright (c) Yugabyte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
import os
import logging
import time
from typing import Optional, List, Dict, Tuple, Any
from yugabyte.file_util import compute_file_sha256
class TimestampSaver:
"""
A facility for recording and restoring file modification timestamps in case the files have not
changed, as determined using their checksums, to avoid unnecessary rebuilds.
Usage:
with TimestampSaver(root_dir, extension='.h') as timestamp_saver:
# Do something that may modify files in root_dir.
"""
file_paths: List[str]
# Maps file path to a hash, file size, and st_mtime_ns (modification timestamp in nanoseconds)
# obtained from os.stat.
path_to_hash_and_stat: Dict[str, Tuple[str, int, int]]
root_dir: str
file_suffix: Optional[str]
timestamp_save_overhead_sec: float
def __init__(
self,
root_dir: str,
file_suffix: Optional[str]) -> None:
self.file_paths = []
self.path_to_hash_and_stat = {}
self.root_dir = root_dir
self.file_suffix = file_suffix
def __enter__(self) -> 'TimestampSaver':
start_time_sec = time.time()
self.add_files_recursively(self.root_dir, self.file_suffix)
self.record_timestamps()
self.timestamp_save_overhead_sec = time.time() - start_time_sec
return self
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
self.restore_timestamps()
def add_file(self, file_path: str) -> None:
self.file_paths.append(file_path)
def add_files_recursively(self, dir_path: str, file_suffix: Optional[str]) -> None:
for root, _, files in os.walk(dir_path):
for file_name in files:
if file_suffix is None or file_name.endswith(file_suffix):
file_path = os.path.join(root, file_name)
if not os.path.islink(file_path) and os.path.exists(file_path):
self.add_file(file_path)
def METHOD_NAME(self, file_path: str) -> str:
return compute_file_sha256(file_path)
def record_timestamps(self) -> None:
for file_path in self.file_paths:
stat_result = os.stat(file_path)
self.path_to_hash_and_stat[file_path] = (
self.METHOD_NAME(file_path),
stat_result.st_size,
stat_result.st_mtime_ns,
)
def restore_timestamps(self) -> None:
num_restored = 0
start_time_sec = time.time()
for file_path, hash_and_stat in self.path_to_hash_and_stat.items():
old_hash, old_file_size, old_st_mtime_ns = hash_and_stat
if os.path.exists(file_path):
new_stat = os.stat(file_path)
# Check size first before checking the hash.
if (new_stat.st_size == old_file_size and
self.METHOD_NAME(file_path) == old_hash and
new_stat.st_mtime_ns != old_st_mtime_ns):
num_restored += 1
# We rely on checking that file_path is not a symlink.
os.utime(file_path, ns=(new_stat.st_atime_ns, old_st_mtime_ns))
check_restore_time_sec = time.time() - start_time_sec
overhead_sec = check_restore_time_sec + self.timestamp_save_overhead_sec
if num_restored > 0 or overhead_sec > 0.05:
logging.info(
"Saved timestamps of %d files in directory %s in %.3f sec and then checked/restored"
" them in %.3f sec (total overhead: %.3f sec)",
len(self.file_paths), self.root_dir, self.timestamp_save_overhead_sec,
check_restore_time_sec, overhead_sec) |
run | import copy
from functools import cached_property
import d20
import draconic
import math
from utils.dice import RerollableStringifier
from . import Effect
from .. import utils
from ..results import RollResult
class Roll(Effect):
def __init__(
self,
dice: str,
name: str,
higher: dict = None,
cantripScale: bool = None,
hidden: bool = False,
displayName: str = None,
fixedValue: bool = None,
**kwargs,
):
super().__init__("roll", **kwargs)
self.dice = dice
self.name = name
self.higher = higher
self.cantripScale = cantripScale
self.hidden = hidden
self.displayName = displayName
self.fixedValue = fixedValue
def to_dict(self):
out = super().to_dict()
out.update({"dice": self.dice, "name": self.name, "hidden": self.hidden})
if self.higher is not None:
out["higher"] = self.higher
if self.cantripScale is not None:
out["cantripScale"] = self.cantripScale
if self.displayName is not None:
out["displayName"] = self.displayName
if self.fixedValue is not None:
out["fixedValue"] = self.fixedValue
return out
def METHOD_NAME(self, autoctx):
super().METHOD_NAME(autoctx)
dice_ast = copy.copy(d20.parse(autoctx.parse_annostr(self.dice)))
dice_ast = utils.upcast_scaled_dice(self, autoctx, dice_ast)
if not (self.fixedValue or self.hidden):
d = autoctx.args.join("d", "+", ephem=True)
# add on combatant damage effects (#224)
effect_d = autoctx.caster_active_effects(
mapper=lambda effect: effect.effects.damage_bonus, reducer="+".join
)
if effect_d:
if d:
d = f"{d}+{effect_d}"
else:
d = effect_d
if d:
d_ast = d20.parse(d)
dice_ast.roll = d20.ast.BinOp(dice_ast.roll, "+", d_ast.roll)
if not self.hidden:
maxdmg = autoctx.args.last("max", None, bool, ephem=True)
mi = autoctx.args.last("mi", None, int)
# -mi # (#527)
if mi:
dice_ast = d20.utils.tree_map(utils.mi_mapper(mi), dice_ast)
if maxdmg:
dice_ast = d20.utils.tree_map(utils.max_mapper, dice_ast)
rolled = d20.roll(dice_ast)
if not self.hidden:
name_out = self.displayName
if not name_out:
name_out = self.name.title()
autoctx.meta_queue(f"**{name_out}**: {rolled.result}")
simplified_expr = copy.deepcopy(rolled.expr)
d20.utils.simplify_expr(simplified_expr)
simplified_metavar = RollEffectMetaVar(simplified_expr)
autoctx.metavars[self.name] = simplified_metavar
autoctx.metavars["lastRoll"] = rolled.total # #1335
return RollResult(result=rolled.total, roll=rolled, simplified_metavar=simplified_metavar, hidden=self.hidden)
def build_str(self, caster, evaluator):
super().build_str(caster, evaluator)
try:
evaluator.builtins[self.name] = evaluator.transformed_str(self.dice)
except draconic.DraconicException:
evaluator.builtins[self.name] = self.dice
evaluator.builtins["lastRoll"] = 0
return ""
class RollEffectMetaVar:
"""
Proxy type for the rerollable string generated in Roll effects. This is its own class to allow checking if a
metavar was generated as the result of a Roll.
"""
def __init__(self, simplified_expr: d20.Expression):
self._expr = simplified_expr
# cached props
@cached_property
def _str(self):
return RerollableStringifier().stringify(self._expr.roll)
@cached_property
def _total(self):
return self._expr.total
# magic methods
def __str__(self):
return self._str
def __int__(self):
return int(self._total)
def __float__(self):
return float(self._total)
def __bool__(self):
return bool(self._total)
def __eq__(self, other):
return self._total == other
def __lt__(self, other):
return self._total < other
def __le__(self, other):
return self._total <= other
def __ne__(self, other):
return self._total != other
def __gt__(self, other):
return self._total > other
def __ge__(self, other):
return self._total >= other
def __floor__(self):
return math.floor(self._total)
def __ceil__(self):
return math.ceil(self._total)
def __add__(self, other):
return self._lbin_op(other, "+")
def __sub__(self, other):
return self._lbin_op(other, "-")
def __mul__(self, other):
return self._lbin_op(other, "*")
def __floordiv__(self, other):
return self._lbin_op(other, "//")
def __truediv__(self, other):
return self._lbin_op(other, "/")
def __mod__(self, other):
return self._lbin_op(other, "%")
def __radd__(self, other):
return self._rbin_op(other, "+")
def __rsub__(self, other):
return self._rbin_op(other, "-")
def __rmul__(self, other):
return self._rbin_op(other, "*")
def __rfloordiv__(self, other):
return self._rbin_op(other, "//")
def __rtruediv__(self, other):
return self._rbin_op(other, "/")
def __rmod__(self, other):
return self._rbin_op(other, "%")
def _lbin_op(self, other, op):
if isinstance(other, (int, float)):
return RollEffectMetaVar(d20.Expression(d20.BinOp(self._expr, op, d20.Literal(other)), self._expr.comment))
elif isinstance(other, RollEffectMetaVar):
return RollEffectMetaVar(d20.Expression(d20.BinOp(self._expr, op, other._expr), self._expr.comment))
raise NotImplementedError
def _rbin_op(self, other, op):
if isinstance(other, (int, float)):
return RollEffectMetaVar(d20.Expression(d20.BinOp(d20.Literal(other), op, self._expr), self._expr.comment))
elif isinstance(other, RollEffectMetaVar):
return RollEffectMetaVar(d20.Expression(d20.BinOp(other._expr, op, self._expr), self._expr.comment))
raise NotImplementedError
def __pos__(self):
return RollEffectMetaVar(d20.Expression(d20.UnOp("+", self._expr), self._expr.comment))
def __neg__(self):
return RollEffectMetaVar(d20.Expression(d20.UnOp("-", self._expr), self._expr.comment)) |
read header | from __future__ import absolute_import, division, print_function
import copy
import re
from iotbx.detectors.detectorbase import DetectorImageBase
from iotbx.detectors import ImageException
from iotbx.detectors.eiger import vendortype_from_size
class EigerCBFImage(DetectorImageBase):
def __init__(self,filename):
DetectorImageBase.__init__(self,filename)
self.vendortype = "Eiger"
self.vendor_specific_null_value = -1
mandatory_keys = ['PIXEL_SIZE_UNITS', 'DISTANCE', 'PHI', 'WAVELENGTH', 'SIZE1',
'SIZE2', 'TWOTHETA', 'DISTANCE_UNITS', 'OSC_RANGE',
'BEAM_CENTER_X', 'BEAM_CENTER_Y',
'CCD_IMAGE_SATURATION', 'OSC_START', 'DETECTOR_SN', 'PIXEL_SIZE',
'AXIS']
def fileLength(self):
raise ImageException("file length not computed for miniCBF")
def getEndian(self):
raise ImageException("endian-ness not computed for miniCBF")
def endian_swap_required(self):
return False
def read(self,algorithm="buffer_based"):
self.METHOD_NAME()
if self.linearintdata != None and\
self.linearintdata.size()==self.size1*self.size2:
#data has already been read
return
if self.bin==2:
raise ImageException("2-by-2 binning not supported for miniCBF")
try:
from cbflib_adaptbx import cbf_binary_adaptor # optional package
self.adaptor = cbf_binary_adaptor(self.filename)
# assert algorithm in ["cbflib","cbflib_optimized","buffer_based"]
data = self.adaptor.uncompress_implementation( algorithm
).uncompress_data(self.size1,self.size2)
self.bin_safe_set_data( data )
except Exception as e:
raise ImageException(
"unable to read miniCBF data; contact authors; error=\"%s\"" % \
str(e).strip())
def METHOD_NAME(self,maxlength=12288): # usually 1024 is OK; require 12288 for ID19
if not self.parameters:
with open(self.filename,"rb") as fh:
rawdata = fh.read(maxlength)
# The tag _array_data.header_convention "SLS_1.0" could be with/without quotes "..."
# SLS_match = re.findall(b'_array_data.header_convention[ "]*SLS', rawdata)
# PILATUS_match = re.findall(b'_array_data.header_convention[ "]*PILATUS', rawdata)
#assert len(SLS_match) + len(PILATUS_match)>=1
# read SLS header
headeropen = rawdata.index(b"_array_data.header_contents")
headerclose = rawdata.index(b"_array_data.data")
self.header = rawdata[headeropen+1:headerclose].decode("latin-1")
self.headerlines = [x.strip() for x in self.header.split("#")]
character_filter = re.compile(r"[\r\n,\(\);]")
self.headerlines = [character_filter.sub("", x) for x in self.headerlines]
self.parameters={'CCD_IMAGE_SATURATION':65535}
for tag,search,idx,datatype in [
('CCD_IMAGE_SATURATION','Count_cutoff',1,int),
('DETECTOR_SN','Detector:',-1,str),
('PIXEL_SIZE','Pixel_size',1,float),
('PIXEL_SIZE_UNITS','Pixel_size',2,str),
('OSC_START','Start_angle',1,float),
('DISTANCE','Detector_distance',1,float),
('DISTANCE_UNITS','Detector_distance',2,str),
('WAVELENGTH',r'Wavelength',1,float),
('BEAM_CENTER_X',r'Beam_xy',1,float),
('BEAM_CENTER_Y',r'Beam_xy',2,float),
('OSC_RANGE','Angle_increment',1,float),
('TWOTHETA','Detector_2theta',1,float),
('AXIS','Oscillation_axis',1,str),
('PHI','Phi',1,float),
('OMEGA','OMEGA',1,float),
('DATE','DATE',1,str),
]:
for line in self.headerlines:
if line.find(search)==0:
if idx==-1:
tokens=line.split(" ")
self.parameters[tag] = " ".join(tokens[1:len(tokens)])
break
self.parameters[tag] = datatype(line.split(" ")[idx])
break
#unit fixes
self.parameters['DISTANCE']*={
'mm':1,'m':1000}[self.parameters['DISTANCE_UNITS']]
self.parameters['PIXEL_SIZE']*={
'mm':1,'m':1000}[self.parameters['PIXEL_SIZE_UNITS']]
self.parameters['BEAM_CENTER_X']*=self.parameters['PIXEL_SIZE']
self.parameters['BEAM_CENTER_Y']*=self.parameters['PIXEL_SIZE']
# x,y beam center swap; do not know why
swp = copy.copy(self.parameters['BEAM_CENTER_X'])
self.parameters['BEAM_CENTER_X']=copy.copy(self.parameters['BEAM_CENTER_Y'])
self.parameters['BEAM_CENTER_Y']=copy.copy(swp)
# read array size
header_lines = []
found_array_data_data = False
for record in rawdata.decode("latin-1").splitlines():
if "_array_data.data" in record:
found_array_data_data = True
elif not found_array_data_data:
continue
elif len(record.strip()) == 0:
# http://sourceforge.net/apps/trac/cbflib/wiki/ARRAY_DATA%20Category
# In an imgCIF file, the encoded binary data begins after
# the empty line terminating the header.
break
header_lines.append(record)
self.header = "\n".join(header_lines)
self.headerlines = [x.strip() for x in self.header.split("\n")]
self.headerlines = [character_filter.sub("", x) for x in self.headerlines]
for tag,search,idx,datatype in [
('SIZE1','X-Binary-Size-Second-Dimension',-1,int),
('SIZE2','X-Binary-Size-Fastest-Dimension',-1,int),
]:
for line in self.headerlines:
if line.find(search)==0:
self.parameters[tag] = datatype(line.split(" ")[idx])
break
self.vendortype = vendortype_from_size.get((self.size2,self.size1),self.vendortype)
if __name__=='__main__':
import sys
i = sys.argv[1]
a = EigerCBFImage(i)
a.read()
print(a)
print(a.parameters)
print(a.rawdata, len(a.rawdata), a.size1*a.size2) |
test named class raises | import pytest
import pyccl as ccl
import functools
def all_subclasses(cls):
"""Get all subclasses of ``cls``. NOTE: Used in ``conftest.py``."""
return set(cls.__subclasses__()).union(
[s for c in cls.__subclasses__() for s in all_subclasses(c)])
def test_method_control_raises():
# All method control subclasses must contain a default implementation.
with pytest.raises(ValueError):
class MyMethodControl(ccl._core.schema._CustomMethod, method="name"):
pass
def test_repr_control():
# Test custom repr controls.
cosmo = ccl.CosmologyVanillaLCDM()
ccl.CustomRepr.disable()
assert repr(cosmo) == object.__repr__(cosmo)
ccl.CustomRepr.enable()
assert repr(cosmo) != object.__repr__(cosmo)
def test_eq_control():
# Test custom eq controls.
cosmo = [ccl.CosmologyVanillaLCDM() for _ in range(2)]
assert id(cosmo[0]) != id(cosmo[1])
ccl.CustomEq.disable()
assert cosmo[0] != cosmo[1]
ccl.CustomEq.enable()
assert cosmo[0] == cosmo[1]
def check_eq_repr_hash(self, other, *, equal=True):
# Helper to ensure `__eq__`, `__repr__`, `__hash__` are consistent.
if equal:
return (self == other
and repr(self) == repr(other)
and hash(self) == hash(other))
return (self != other
and repr(self) != repr(other)
and hash(self) != hash(other))
def test_CCLObject_immutability():
# These tests check the behavior of immutable objects, i.e. instances
# of classes where `Funlock` or `unlock_instance` is not used.
# test `CCLObject` lock
obj = ccl.CCLObject()
obj._object_lock.unlock()
assert "locked=False" in repr(obj._object_lock)
obj._object_lock.lock()
assert "locked=True" in repr(obj._object_lock)
# `update_parameters` not implemented.
cosmo = ccl.CosmologyVanillaLCDM()
# with pytest.raises(AttributeError): # TODO: Uncomment for CCLv3.
# cosmo.my_attr = "hello_world"
with pytest.raises(NotImplementedError):
cosmo.update_parameters(A_SPLINE_NA=120)
# `update_parameters` implemented.
prof = ccl.halos.HaloProfilePressureGNFW(mass_def="200c", mass_bias=0.5)
# with pytest.raises(AttributeError): # TODO: Uncomment for CCLv3.
# prof.mass_bias = 0.7
assert prof.mass_bias == 0.5
prof.update_parameters(mass_bias=0.7)
assert prof.mass_bias == 0.7
def test_CCLObject_default_behavior():
# Test that if `__repr__` is not defined the fall back is safe.
MyType = type("MyType", (ccl.CCLObject,), {"test": 0})
instances = [MyType() for _ in range(2)]
assert check_eq_repr_hash(*instances, equal=False)
# Test that all subclasses of ``CCLAutoRepr`` use Python's default
# ``repr`` if no ``__repr_attrs__`` has been defined.
instances = [ccl.CCLAutoRepr() for _ in range(2)]
assert check_eq_repr_hash(*instances, equal=False)
MyType = type("MyType", (ccl.CCLAutoRepr,), {"test": 0})
instances = [MyType() for _ in range(2)]
assert instances[0] != instances[1]
def METHOD_NAME():
# Test that an error is raised if `create_instance` gets the wrong type.
with pytest.raises(AttributeError):
ccl.halos.MassDef.create_instance(1)
def test_ccl_parameters_abstract():
# Test that the Parameters base class is abstract and cannot instantiate
# if `instance` or `factory` are not specified.
with pytest.raises(TypeError):
ccl.CCLParameters()
with pytest.raises(ValueError):
class MyPars(ccl.CCLParameters):
pass
# +==========================================================================+
# | The following functions are used by `conftest.py` to check correct setup.|
# +==========================================================================+
def init_decorator(func):
"""Check that all attributes listed in ``__repr_attrs__`` are defined in
the constructor of all subclasses of ``CCLAutoRepr``.
NOTE: Used in ``conftest.py``.
"""
def in_mro(self):
"""Determine if `__repr_attrs__` is defined somewhere in the MRO."""
# NOTE: This helper function makes sure that an AttributeError is not
# raised when `super().__init__` is called inside another `__init__`.
mro = self.__class__.__mro__[1:] # MRO excluding this class
for cls in mro:
if hasattr(cls, "__repr_attrs__"):
return True
return False
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
if not hasattr(self, "__repr_attrs__"):
# If `__repr_attrs__` is not specified, use local repr or inherit.
return
flag = [attr for attr in self.__repr_attrs__
if not (hasattr(self, attr) or in_mro(self))]
if flag:
# NOTE: Set the attributes before calling `super`.
raise AttributeError(f"{self.__class__.__name__}: attribute(s) "
f"{flag} not set in __init__.")
return wrapper
def test_unlock_instance_errors():
# Test that unlock_instance gives the correct errors.
# 1. Developer error
with pytest.raises(NameError):
@ccl.unlock_instance(name="hello")
def func1(item, pk, a0=0, *, a1=None, a2):
return
# 2. User error
@ccl.unlock_instance(name="pk")
def func2(item, pk, a0=0, *, a1=None, a2):
return
with pytest.raises(TypeError):
func2()
# 3. Doesn't do anything if instance is not CCLObject.
with ccl.UnlockInstance(True, mutate=False):
pass |
backlight | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Original code found at:
# https://gist.github.com/DenisFromHR/cc863375a6e19dce359d
"""
Compiled, mashed and generally mutilated 2014-2015 by Denis Pleic
Made available under GNU GENERAL PUBLIC LICENSE
# Modified Python I2C library for Raspberry Pi
# as found on http://www.recantha.co.uk/blog/?p=4849
# Joined existing 'i2c_lib.py' and 'lcddriver.py' into a single library
# added bits and pieces from various sources
# By DenisFromHR (Denis Pleic)
# 2015-02-10, ver 0.1
"""
# i2c bus (0 -- original Pi, 1 -- Rev 2 Pi)
I2CBUS = 1
# LCD Address
ADDRESS = 0x27
import smbus
from time import sleep
class i2c_device:
def __init__(self, addr, port=I2CBUS):
self.addr = addr
self.bus = smbus.SMBus(port)
# Write a single command
def write_cmd(self, cmd):
self.bus.write_byte(self.addr, cmd)
sleep(0.0001)
# Write a command and argument
def write_cmd_arg(self, cmd, data):
self.bus.write_byte_data(self.addr, cmd, data)
sleep(0.0001)
# Write a block of data
def write_block_data(self, cmd, data):
self.bus.write_block_data(self.addr, cmd, data)
sleep(0.0001)
# Read a single byte
def read(self):
return self.bus.read_byte(self.addr)
# Read
def read_data(self, cmd):
return self.bus.read_byte_data(self.addr, cmd)
# Read a block of data
def read_block_data(self, cmd):
return self.bus.read_block_data(self.addr, cmd)
# commands
LCD_CLEARDISPLAY = 0x01
LCD_RETURNHOME = 0x02
LCD_ENTRYMODESET = 0x04
LCD_DISPLAYCONTROL = 0x08
LCD_CURSORSHIFT = 0x10
LCD_FUNCTIONSET = 0x20
LCD_SETCGRAMADDR = 0x40
LCD_SETDDRAMADDR = 0x80
# flags for display entry mode
LCD_ENTRYRIGHT = 0x00
LCD_ENTRYLEFT = 0x02
LCD_ENTRYSHIFTINCREMENT = 0x01
LCD_ENTRYSHIFTDECREMENT = 0x00
# flags for display on/off control
LCD_DISPLAYON = 0x04
LCD_DISPLAYOFF = 0x00
LCD_CURSORON = 0x02
LCD_CURSOROFF = 0x00
LCD_BLINKON = 0x01
LCD_BLINKOFF = 0x00
# flags for display/cursor shift
LCD_DISPLAYMOVE = 0x08
LCD_CURSORMOVE = 0x00
LCD_MOVERIGHT = 0x04
LCD_MOVELEFT = 0x00
# flags for function set
LCD_8BITMODE = 0x10
LCD_4BITMODE = 0x00
LCD_2LINE = 0x08
LCD_1LINE = 0x00
LCD_5x10DOTS = 0x04
LCD_5x8DOTS = 0x00
# flags for backlight control
LCD_BACKLIGHT = 0x08
LCD_NOBACKLIGHT = 0x00
En = 0b00000100 # Enable bit
Rw = 0b00000010 # Read/Write bit
Rs = 0b00000001 # Register select bit
class lcd:
#initializes objects and lcd
def __init__(self):
self.lcd_device = i2c_device(ADDRESS)
self.lcd_write(0x03)
self.lcd_write(0x03)
self.lcd_write(0x03)
self.lcd_write(0x02)
self.lcd_write(LCD_FUNCTIONSET | LCD_2LINE | LCD_5x8DOTS | LCD_4BITMODE)
self.lcd_write(LCD_DISPLAYCONTROL | LCD_DISPLAYON)
self.lcd_write(LCD_CLEARDISPLAY)
self.lcd_write(LCD_ENTRYMODESET | LCD_ENTRYLEFT)
sleep(0.2)
# clocks EN to latch command
def lcd_strobe(self, data):
self.lcd_device.write_cmd(data | En | LCD_BACKLIGHT)
sleep(.0005)
self.lcd_device.write_cmd(((data & ~En) | LCD_BACKLIGHT))
sleep(.0001)
def lcd_write_four_bits(self, data):
self.lcd_device.write_cmd(data | LCD_BACKLIGHT)
self.lcd_strobe(data)
# write a command to lcd
def lcd_write(self, cmd, mode=0):
self.lcd_write_four_bits(mode | (cmd & 0xF0))
self.lcd_write_four_bits(mode | ((cmd << 4) & 0xF0))
# write a character to lcd (or character rom) 0x09: backlight | RS=DR<
# works!
def lcd_write_char(self, charvalue, mode=1):
self.lcd_write_four_bits(mode | (charvalue & 0xF0))
self.lcd_write_four_bits(mode | ((charvalue << 4) & 0xF0))
# put string function with optional char positioning
def lcd_display_string(self, string, line=1, pos=0):
if line == 1:
pos_new = pos
elif line == 2:
pos_new = 0x40 + pos
elif line == 3:
pos_new = 0x14 + pos
elif line == 4:
pos_new = 0x54 + pos
self.lcd_write(0x80 + pos_new)
for char in string:
self.lcd_write(ord(char), Rs)
# clear lcd and set to home
def lcd_clear(self):
self.lcd_write(LCD_CLEARDISPLAY)
self.lcd_write(LCD_RETURNHOME)
# define backlight on/off (lcd.backlight(1); off= lcd.backlight(0)
def METHOD_NAME(self, state): # for state, 1 = on, 0 = off
if state == 1:
self.lcd_device.write_cmd(LCD_BACKLIGHT)
elif state == 0:
self.lcd_device.write_cmd(LCD_NOBACKLIGHT)
# add custom characters (0 - 7)
def lcd_load_custom_chars(self, fontdata):
self.lcd_write(0x40);
for char in fontdata:
for line in char:
self.lcd_write_char(line)
|
tags | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVolumeGroupResult',
'AwaitableGetVolumeGroupResult',
'get_volume_group',
'get_volume_group_output',
]
@pulumi.output_type
class GetVolumeGroupResult:
"""
Response for Volume Group request.
"""
def __init__(__self__, encryption=None, id=None, name=None, network_acls=None, protocol_type=None, provisioning_state=None, system_data=None, METHOD_NAME=None, type=None):
if encryption and not isinstance(encryption, str):
raise TypeError("Expected argument 'encryption' to be a str")
pulumi.set(__self__, "encryption", encryption)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_acls and not isinstance(network_acls, dict):
raise TypeError("Expected argument 'network_acls' to be a dict")
pulumi.set(__self__, "network_acls", network_acls)
if protocol_type and not isinstance(protocol_type, str):
raise TypeError("Expected argument 'protocol_type' to be a str")
pulumi.set(__self__, "protocol_type", protocol_type)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", METHOD_NAME)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def encryption(self) -> Optional[str]:
"""
Type of encryption
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter
def id(self) -> str:
"""
Azure resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkAcls")
def network_acls(self) -> Optional['outputs.NetworkRuleSetResponse']:
"""
A collection of rules governing the accessibility from specific network locations.
"""
return pulumi.get(self, "network_acls")
@property
@pulumi.getter(name="protocolType")
def protocol_type(self) -> Optional[str]:
"""
Type of storage target
"""
return pulumi.get(self, "protocol_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
State of the operation on the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Resource metadata required by ARM RPC
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
Azure resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetVolumeGroupResult(GetVolumeGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVolumeGroupResult(
encryption=self.encryption,
id=self.id,
name=self.name,
network_acls=self.network_acls,
protocol_type=self.protocol_type,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
METHOD_NAME=self.METHOD_NAME,
type=self.type)
def get_volume_group(elastic_san_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
volume_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVolumeGroupResult:
"""
Get an VolumeGroups.
:param str elastic_san_name: The name of the ElasticSan.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str volume_group_name: The name of the VolumeGroup.
"""
__args__ = dict()
__args__['elasticSanName'] = elastic_san_name
__args__['resourceGroupName'] = resource_group_name
__args__['volumeGroupName'] = volume_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:elasticsan/v20211120preview:getVolumeGroup', __args__, opts=opts, typ=GetVolumeGroupResult).value
return AwaitableGetVolumeGroupResult(
encryption=pulumi.get(__ret__, 'encryption'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
network_acls=pulumi.get(__ret__, 'network_acls'),
protocol_type=pulumi.get(__ret__, 'protocol_type'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
METHOD_NAME=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_volume_group)
def get_volume_group_output(elastic_san_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
volume_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVolumeGroupResult]:
"""
Get an VolumeGroups.
:param str elastic_san_name: The name of the ElasticSan.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str volume_group_name: The name of the VolumeGroup.
"""
... |
exists iov | '''
Helper Script for StripO2O
@author: Huilin Qu
'''
import os
import subprocess
import logging
import json
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import sqlite3
def kill_subproc_noexcept(p):
'''Kill a subprocess without throwing OSError.
Used for cleaning up subprocesses when the main script crashes.'''
try:
p.terminate()
except OSError:
pass
def configLogger(logfile,loglevel=logging.INFO):
'''Setting up logging to both file and console.
@see: https://docs.python.org/2/howto/logging-cookbook.html
'''
# set up logging to file
logging.basicConfig(level=loglevel,
format='[%(asctime)s] %(levelname)s: %(message)s',
filename=logfile,
filemode='w')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(loglevel)
# set a format which is simpler for console use
formatter = logging.Formatter('[%(levelname)s] %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
def insert_to_file(template, target, replace_dict):
'''Update the template file based on the replace_dict, and write to the target.'''
logging.debug('Creating "%s" from template "%s" using dictionary:'%(target, template))
logging.debug(replace_dict)
with open(template, 'r') as input_file:
config=input_file.read()
with open(target, 'w') as output_file:
for key, value in replace_dict.items():
config = config.replace(key, value)
output_file.write(config)
return config
def create_metadata(metadataFilename, inputTag, destTags, destDb, since, userText):
'''Create metadata file for the conditionsUpload service.
@see: uploadConditions.runWizard()
@see: https://twiki.cern.ch/twiki/bin/view/CMS/DropBox
Keyword arguments:
metadataFilename -- output metadata filename
inputTag -- input tag name
destTags -- a list of destination tags
destDb -- [destinationDatabase] in metadata
since -- [since] in metadata
userText -- [userText] in metadata
'''
if isinstance(destTags, str):
destTags = [destTags]
if since:
since = int(since) # convert to int if since is not None (input since can be a str)
destinationTags = {}
for destinationTag in destTags:
destinationTags[destinationTag] = {}
metadata = {
'destinationDatabase': destDb,
'destinationTags': destinationTags,
'inputTag': inputTag,
'since': since,
'userText': userText,
}
logging.info('Writing metadata in %s', metadataFilename)
logging.debug(metadata)
with open(metadataFilename, 'wb') as metadataFile:
metadataFile.write(json.dumps(metadata, sort_keys=True, indent=4))
def upload_payload(dbFile, inputTag, destTags, destDb, since, userText):
'''Upload payload using conditionUploader. '''
if isinstance(destTags, str):
destTags = [destTags]
metadataFilename = dbFile.replace('.db', '.txt')
create_metadata(metadataFilename, inputTag, destTags, destDb, since, userText)
logging.info('Uploading tag [%s] from %s to [%s] in %s:' % (inputTag, dbFile, ','.join(destTags), destDb))
command = "uploadConditions.py %s" % dbFile
pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = pipe.communicate()[0]
logging.info(out)
logging.info('@@@Upload return code = %d@@@' % pipe.returncode)
if pipe.returncode != 0:
raise RuntimeError('Upload FAILED!')
def copy_payload(dbFile, inputTag, destTags, destDb, since, userText):
'''Upload payload using conddb copy.'''
if isinstance(destTags, str):
destTags = [destTags]
if destDb.lower() == 'oracle://cms_orcon_prod/cms_conditions':
copyDestDb = 'onlineorapro'
elif destDb.lower() == 'oracle://cms_orcoff_prep/cms_conditions':
copyDestDb = 'oradev'
else:
copyDestDb = destDb
success = 0
def copy(dest):
command = 'conddb --force --yes --db {db} copy {inputTag} {destTag} --destdb {destDb} --synchronize --note "{note}"'.format(
db=dbFile, inputTag=inputTag, destTag=dest, destDb=copyDestDb, note=userText)
logging.info('Copy tag [%s] from %s to [%s] in %s:' % (inputTag, dbFile, dest, destDb))
logging.debug(command)
pipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = pipe.communicate()[0]
logging.info(out)
return pipe.returncode
for dest in destTags:
returncode = copy(dest)
if returncode == 0: success += 1
logging.info('@@@Upload return code = %d@@@' % (success - len(destTags)))
if success != len(destTags):
raise RuntimeError('Upload FAILED!')
def send_mail(subject, message, send_to, send_from, text_attachments=[]):
'''Send an email. [send_to] needs to be a list.'''
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = send_from
msg['To'] = ','.join(send_to)
msg.attach(MIMEText(message))
for fn in text_attachments:
with open(fn, 'rb') as txtfile:
attachment = MIMEText(txtfile.read())
attachment.add_header('Content-Disposition', 'attachment', filename=os.path.basename(fn))
msg.attach(attachment)
s = smtplib.SMTP('localhost')
s.sendmail(send_from, send_to, msg.as_string())
s.quit()
def METHOD_NAME(dbFile, tag):
'''Check if there exists any IOV for a specific tag in the given sqlite file.'''
dataConnection = sqlite3.connect(dbFile)
dataCursor = dataConnection.cursor()
dataCursor.execute('select SINCE from IOV where TAG_NAME=:tag_name', {'tag_name' : tag})
return len(dataCursor.fetchall()) > 0 |
header parameters | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"maintenance applyupdate list",
)
class List(AAZCommand):
"""Get Configuration records within a subscription
:example: ApplyUpdates_List
az maintenance applyupdate list
"""
_aaz_info = {
"version": "2023-04-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/providers/microsoft.maintenance/applyupdates", "2023-04-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.ApplyUpdatesList(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance.value, client_flatten=True)
return result
class ApplyUpdatesList(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/providers/Microsoft.Maintenance/applyUpdates",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-04-01",
required=True,
),
}
return parameters
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.value = AAZListType()
value = cls._schema_on_200.value
value.Element = AAZObjectType()
_element = cls._schema_on_200.value.Element
_element.id = AAZStrType(
flags={"read_only": True},
)
_element.name = AAZStrType(
flags={"read_only": True},
)
_element.properties = AAZObjectType(
flags={"client_flatten": True},
)
_element.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_element.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.value.Element.properties
properties.last_update_time = AAZStrType(
serialized_name="lastUpdateTime",
)
properties.resource_id = AAZStrType(
serialized_name="resourceId",
)
properties.status = AAZStrType()
system_data = cls._schema_on_200.value.Element.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
)
return cls._schema_on_200
class _ListHelper:
"""Helper class for List"""
__all__ = ["List"] |
test project notequals other project is true | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the Project resource."""
import unittest.mock as mock
import unittest
from tests.unittest_utils import ForsetiTestCase
from google.cloud.forseti.common.gcp_type.folder import Folder
from google.cloud.forseti.common.gcp_type.organization import Organization
from google.cloud.forseti.common.gcp_type.project import ProjectLifecycleState
from google.cloud.forseti.common.gcp_type.project import Project
from google.cloud.forseti.common.gcp_type.resource import ResourceType
class ProjectTest(ForsetiTestCase):
def setUp(self):
self.org = Organization('1234567890', display_name='My org name')
self.folder = Folder('55555', display_name='My folder', parent=self.org)
self.project1 = Project('project-1',
11111,
display_name='Project 1')
self.project2 = Project('project-2',
22222,
display_name='Project 2',
parent=self.org)
self.project3 = Project('project-3',
33333,
display_name='Project 3',
parent=self.folder)
def test_create_project_getters_are_correct(self):
"""Test Project getters returns correct values."""
my_project_id = 'my-projectid-1'
my_project_number = 1234567890
my_project_name = 'My project name'
project = Project(my_project_id, project_number=my_project_number,
display_name=my_project_name,
lifecycle_state=ProjectLifecycleState.ACTIVE)
self.assertEqual(my_project_id, project.id)
self.assertEqual(my_project_number, project.project_number)
self.assertEqual(
Project.RESOURCE_NAME_FMT % my_project_id, project.name)
self.assertEqual(my_project_name, project.display_name)
self.assertEqual(ResourceType.PROJECT, project.type)
self.assertEqual(None, project.parent)
self.assertEqual(ProjectLifecycleState.ACTIVE,
project.lifecycle_state)
def test_project_equals_other_project_is_true(self):
"""Test that Project == another Project."""
id_1 = 'my-project-1'
number_1 = 1234567890
name_1 = 'My project 1'
project1 = Project(id_1, number_1, display_name=name_1)
id_2 = 'my-project-1'
number_2 = 1234567890
name_2 = 'My project 1'
project2 = Project(id_2, number_2, display_name=name_2)
self.assertTrue(project1 == project2)
def METHOD_NAME(self):
"""Test that a Project does not equal a Project of different id."""
id_1 = 'my-project-1'
number_1 = 1234567890
name_1 = 'My project 1'
project1 = Project(id_1, number_1, display_name=name_1)
id_2 = 'my-project-2'
number_2 = 1234567891
name_2 = 'My project 2'
project2 = Project(id_2, number_2, display_name=name_2)
self.assertTrue(project1 != project2)
def test_project_notequals_org_is_true(self):
"""Test that a Project != Organization."""
id_1 = 'my-project-1'
number_1 = 1234567890
name_1 = 'My project 1'
project = Project(id_1, number_1, display_name=name_1)
id_2 = '1234567890'
name_2 = 'My org 1'
org = Organization(id_2, display_name=name_2)
self.assertTrue(project != org)
if __name__ == '__main__':
unittest.main() |
type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetManagedEnvironmentsStorageResult',
'AwaitableGetManagedEnvironmentsStorageResult',
'get_managed_environments_storage',
'get_managed_environments_storage_output',
]
@pulumi.output_type
class GetManagedEnvironmentsStorageResult:
"""
Storage resource for managedEnvironment.
"""
def __init__(__self__, id=None, name=None, properties=None, system_data=None, METHOD_NAME=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", METHOD_NAME)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.ManagedEnvironmentStorageResponseProperties':
"""
Storage properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetManagedEnvironmentsStorageResult(GetManagedEnvironmentsStorageResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetManagedEnvironmentsStorageResult(
id=self.id,
name=self.name,
properties=self.properties,
system_data=self.system_data,
METHOD_NAME=self.METHOD_NAME)
def get_managed_environments_storage(environment_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
storage_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagedEnvironmentsStorageResult:
"""
Get storage for a managedEnvironment.
:param str environment_name: Name of the Environment.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str storage_name: Name of the storage.
"""
__args__ = dict()
__args__['environmentName'] = environment_name
__args__['resourceGroupName'] = resource_group_name
__args__['storageName'] = storage_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:app/v20230502preview:getManagedEnvironmentsStorage', __args__, opts=opts, typ=GetManagedEnvironmentsStorageResult).value
return AwaitableGetManagedEnvironmentsStorageResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
METHOD_NAME=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_managed_environments_storage)
def get_managed_environments_storage_output(environment_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetManagedEnvironmentsStorageResult]:
"""
Get storage for a managedEnvironment.
:param str environment_name: Name of the Environment.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str storage_name: Name of the storage.
"""
... |
broadcast shape | """
A collection of functions used for inferring or correcting things before major computation
"""
import itertools
import numpy as np
import torch
from typing import Tuple, Union
def METHOD_NAME(shape_a: Tuple[int, ...], shape_b: Tuple[int, ...]) -> Tuple[int, ...]:
"""
Infers, if possible, the broadcast output shape of two operands a and b. Inspired by stackoverflow post:
https://stackoverflow.com/questions/24743753/test-if-an-array-is-broadcastable-to-a-shape
Parameters
----------
shape_a : Tuple[int,...]
Shape of first operand
shape_b : Tuple[int,...]
Shape of second operand
Raises
-------
ValueError
If the two shapes cannot be broadcast.
Examples
--------
>>> import heat as ht
>>> ht.core.stride_tricks.broadcast_shape((5,4),(4,))
(5, 4)
>>> ht.core.stride_tricks.broadcast_shape((1,100,1),(10,1,5))
(10, 100, 5)
>>> ht.core.stride_tricks.broadcast_shape((8,1,6,1),(7,1,5,))
(8,7,6,5))
>>> ht.core.stride_tricks.broadcast_shape((2,1),(8,4,3))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "heat/core/stride_tricks.py", line 42, in broadcast_shape
"operands could not be broadcast, input shapes {} {}".format(shape_a, shape_b)
ValueError: operands could not be broadcast, input shapes (2, 1) (8, 4, 3)
"""
try:
resulting_shape = torch.broadcast_shapes(shape_a, shape_b)
except AttributeError: # torch < 1.8
it = itertools.zip_longest(shape_a[::-1], shape_b[::-1], fillvalue=1)
resulting_shape = max(len(shape_a), len(shape_b)) * [None]
for i, (a, b) in enumerate(it):
if a == 0 and b == 1 or b == 0 and a == 1:
resulting_shape[i] = 0
elif a == 1 or b == 1 or a == b:
resulting_shape[i] = max(a, b)
else:
raise ValueError(
f"operands could not be broadcast, input shapes {shape_a} {shape_b}"
)
return tuple(resulting_shape[::-1])
except TypeError:
raise TypeError(f"operand 1 must be tuple of ints, not {type(shape_a)}")
except NameError:
raise TypeError(f"operands must be tuples of ints, not {shape_a} and {shape_b}")
except RuntimeError:
raise ValueError(f"operands could not be broadcast, input shapes {shape_a} {shape_b}")
return tuple(resulting_shape)
def sanitize_axis(
shape: Tuple[int, ...], axis: Union[int, None, Tuple[int, ...]]
) -> Union[int, None, Tuple[int, ...]]:
"""
Checks conformity of an axis with respect to a given shape. The axis will be converted to its positive equivalent
and is checked to be within bounds
Parameters
----------
shape : Tuple[int, ...]
Shape of an array
axis : ints or Tuple[int, ...] or None
The axis to be sanitized
Raises
-------
ValueError
if the axis cannot be sanitized, i.e. out of bounds.
TypeError
if the axis is not integral.
Examples
-------
>>> import heat as ht
>>> ht.core.stride_tricks.sanitize_axis((5,4,4),1)
1
>>> ht.core.stride_tricks.sanitize_axis((5,4,4),-1)
2
>>> ht.core.stride_tricks.sanitize_axis((5, 4), (1,))
(1,)
>>> ht.core.stride_tricks.sanitize_axis((5, 4), 1.0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "heat/heat/core/stride_tricks.py", line 99, in sanitize_axis
raise TypeError("axis must be None or int or tuple, but was {}".format(type(axis)))
TypeError: axis must be None or int or tuple, but was <class 'float'>
"""
# scalars are handled like unsplit matrices
if len(shape) == 0:
axis = None
if axis is not None and not isinstance(axis, int) and not isinstance(axis, tuple):
raise TypeError(f"axis must be None or int or tuple, but was {type(axis)}")
if isinstance(axis, tuple):
axis = tuple(dim + len(shape) if dim < 0 else dim for dim in axis)
for dim in axis:
if dim < 0 or dim >= len(shape):
raise ValueError(f"axis {axis} is out of bounds for shape {shape}")
return axis
if axis is None or 0 <= axis < len(shape):
return axis
elif axis < 0:
axis += len(shape)
if axis < 0 or axis >= len(shape):
raise ValueError(f"axis {axis} is out of bounds for shape {shape}")
return axis
def sanitize_shape(shape: Union[int, Tuple[int, ...]], lval: int = 0) -> Tuple[int, ...]:
"""
Verifies and normalizes the given shape.
Parameters
----------
shape : int or Tupe[int,...]
Shape of an array.
lval : int
Lowest legal value
Raises
-------
ValueError
If the shape contains illegal values, e.g. negative numbers.
TypeError
If the given shape is neither and int or a sequence of ints.
Examples
--------
>>> import heat as ht
>>> ht.core.stride_tricks.sanitize_shape(3)
(3,)
>>> ht.core.stride_tricks.sanitize_shape([1, 2, 3])
(1, 2, 3,)
>>> ht.core.stride_tricks.sanitize_shape(1.0)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "heat/heat/core/stride_tricks.py", line 159, in sanitize_shape
raise TypeError("expected sequence object with length >= 0 or a single integer")
TypeError: expected sequence object with length >= 0 or a single integer
"""
shape = tuple(shape) if hasattr(shape, "__iter__") else (shape,)
for dimension in shape:
if issubclass(type(dimension), np.integer):
dimension = int(dimension)
if not isinstance(dimension, int):
raise TypeError("expected sequence object with length >= 0 or a single integer")
if dimension < lval:
raise ValueError("negative dimensions are not allowed")
return shape
def sanitize_slice(sl: slice, max_dim: int) -> slice:
"""
Remove None-types from a slice
Parameters
----------
sl : slice
slice to adjust
max_dim : int
maximum index for the given slice
Raises
------
TypeError
if sl is not a slice.
"""
if not isinstance(sl, slice):
raise TypeError("This function is only for slices!")
new_sl = [None] * 3
new_sl[0] = 0 if sl.start is None else sl.start
if new_sl[0] < 0:
new_sl[0] += max_dim
new_sl[1] = max_dim if sl.stop is None else sl.stop
if new_sl[1] < 0:
new_sl[1] += max_dim
new_sl[2] = 1 if sl.step is None else sl.step
return slice(new_sl[0], new_sl[1], new_sl[2]) |
raise xeq1 | # Tasks for testing
import time
import sys, shutil
import typing as ty
from pathlib import Path
import functools
import operator
import subprocess as sp
import pytest
from fileformats.generic import File
from ..core import Workflow
from ..submitter import Submitter
from ... import mark
need_docker = pytest.mark.skipif(
shutil.which("docker") is None or sp.call(["docker", "info"]),
reason="no docker within the container",
)
no_win = pytest.mark.skipif(
sys.platform.startswith("win"),
reason="docker command not adjusted for windows docker",
)
need_slurm = pytest.mark.skipif(
not (bool(shutil.which("sbatch")) and bool(shutil.which("sacct"))),
reason="slurm not available",
)
need_sge = pytest.mark.skipif(
not (bool(shutil.which("qsub")) and bool(shutil.which("qacct"))),
reason="sge not available",
)
def result_no_submitter(shell_task, plugin=None):
"""helper function to return result when running without submitter"""
return shell_task()
def result_submitter(shell_task, plugin):
"""helper function to return result when running with submitter
with specific plugin
"""
with Submitter(plugin=plugin) as sub:
shell_task(submitter=sub)
return shell_task.result()
dot_check = sp.run(["which", "dot"], stdout=sp.PIPE, stderr=sp.PIPE)
if dot_check.stdout:
DOT_FLAG = True
else:
DOT_FLAG = False
@mark.task
def op_4var(a, b, c, d) -> str:
return f"{a} {b} {c} {d}"
@mark.task
def fun_addtwo(a: int) -> int:
import time
time.sleep(1)
if a == 3:
time.sleep(2)
return a + 2
@mark.task
def fun_addtwo_notype(a):
import time
time.sleep(1)
if a == 3:
time.sleep(2)
return a + 2
@mark.task
def fun_addtwo_with_threadcount(a: int, sgeThreads: int = 1) -> int:
import time
time.sleep(1)
if a == 3:
time.sleep(2)
return a + 2
@mark.task
def fun_addvar(
a: ty.Union[int, float], b: ty.Union[int, float]
) -> ty.Union[int, float]:
return a + b
@mark.task
def fun_addvar_notype(a, b):
return a + b
@mark.task
@mark.annotate({"return": {"sum": float, "sub": float}})
def fun_addsubvar(a: float, b: float):
return a + b, a - b
@mark.task
def fun_addvar_none(a: int, b: ty.Optional[int]) -> int:
if b is None:
return a
else:
return a + b
@mark.task
def fun_addvar_default(a: int, b: int = 1) -> int:
return a + b
@mark.task
def fun_addvar_default_notype(a, b=1):
return a + b
@mark.task
def fun_addvar3(a: int, b: int, c: int) -> int:
return a + b + c
@mark.task
def fun_addvar4(a: int, b: int, c: int, d: int) -> int:
return a + b + c + d
@mark.task
def moment(lst: ty.List[float], n: float) -> float:
return sum([i**n for i in lst]) / len(lst)
@mark.task
def fun_div(a: ty.Union[int, float], b: ty.Union[int, float]) -> float:
return a / b
@mark.task
def multiply(x: int, y: int) -> int:
return x * y
@mark.task
def multiply_list(x: list, y: int) -> list:
return x * y
@mark.task
def multiply_mixed(x: list, y: int) -> list:
return x * y
@mark.task
def add2(x: int) -> int:
if x == 1 or x == 12:
time.sleep(1)
return x + 2
@mark.task
def METHOD_NAME(x: int) -> int:
if x == 1:
raise Exception("x is 1, so i'm raising an exception!")
return x
@mark.task
@mark.annotate({"return": {"out_add": float, "out_sub": float}})
def add2_sub2_res(res):
"""function that takes entire output as an input"""
return res["out"] + 2, res["out"] - 2
@mark.task
@mark.annotate({"return": {"out_add": ty.List[float], "out_sub": ty.List[float]}})
def add2_sub2_res_list(res):
"""function that takes entire output as an input"""
return [r["out"] + 2 for r in res], [r["out"] - 2 for r in res]
@mark.task
def power(a: int, b: int) -> int:
return a**b
@mark.task
def identity(x):
return x
@mark.task
def identity_2flds(
x1, x2
) -> ty.NamedTuple("Output", [("out1", ty.Any), ("out2", ty.Any)]):
return x1, x2
@mark.task
def ten(x) -> int:
return 10
@mark.task
def add2_wait(x: int) -> int:
time.sleep(2)
return x + 2
@mark.task
def list_output(x: int) -> ty.List[int]:
return [x, 2 * x, 3 * x]
@mark.task
def list_sum(x: ty.Sequence[ty.Union[int, float]]) -> ty.Union[int, float]:
return sum(x)
@mark.task
def fun_dict(d: dict) -> str:
kv_list = [f"{k}:{v}" for (k, v) in d.items()]
return "_".join(kv_list)
@mark.task
def fun_write_file(filename: Path, text="hello") -> File:
with open(filename, "w") as f:
f.write(text)
return File(filename)
@mark.task
def fun_write_file_list(
filename_list: ty.List[ty.Union[str, File, Path]], text="hi"
) -> ty.List[File]:
for ii, filename in enumerate(filename_list):
with open(filename, "w") as f:
f.write(f"from file {ii}: {text}")
filename_list = [Path(filename).absolute() for filename in filename_list]
return filename_list
@mark.task
def fun_write_file_list2dict(
filename_list: ty.List[ty.Union[str, File, Path]], text="hi"
) -> ty.Dict[str, ty.Union[File, int]]:
filename_dict = {}
for ii, filename in enumerate(filename_list):
with open(filename, "w") as f:
f.write(f"from file {ii}: {text}")
filename_dict[f"file_{ii}"] = Path(filename).absolute()
# adding an additional field with int
filename_dict["random_int"] = 20
return filename_dict
@mark.task
def fun_file(filename: File):
with open(filename) as f:
txt = f.read()
return txt
@mark.task
def fun_file_list(filename_list: ty.List[File]):
txt_list = []
for filename in filename_list:
with open(filename) as f:
txt_list.append(f.read())
return " ".join(txt_list)
def gen_basic_wf(name="basic-wf"):
"""
Generates `Workflow` of two tasks
Task Input
----------
x : int (5)
Task Output
-----------
out : int (9)
"""
wf = Workflow(name=name, input_spec=["x"])
wf.inputs.x = 5
wf.add(fun_addtwo(name="task1", a=wf.lzin.x, b=0))
wf.add(fun_addvar(name="task2", a=wf.task1.lzout.out, b=2))
wf.set_output([("out", wf.task2.lzout.out)])
return wf
def gen_basic_wf_with_threadcount(name="basic-wf-with-threadcount"):
"""
Generates `Workflow` of two tasks
Task Input
----------
x : int (5)
Task Output
-----------
out : int (9)
"""
wf = Workflow(name=name, input_spec=["x"])
wf.inputs.x = 5
wf.add(fun_addtwo_with_threadcount(name="task1", a=wf.lzin.x, sgeThreads=4))
wf.add(fun_addvar(name="task2", a=wf.task1.lzout.out, b=2))
wf.set_output([("out", wf.task2.lzout.out)])
return wf
def gen_basic_wf_with_threadcount_concurrent(name="basic-wf-with-threadcount"):
"""
Generates `Workflow` of two tasks
Task Input
----------
x : int (5)
Task Output
-----------
out : int (9)
"""
wf = Workflow(name=name, input_spec=["x"])
wf.inputs.x = 5
wf.add(fun_addtwo_with_threadcount(name="task1_1", a=wf.lzin.x, sgeThreads=4))
wf.add(fun_addtwo_with_threadcount(name="task1_2", a=wf.lzin.x, sgeThreads=2))
wf.add(fun_addvar(name="task2", a=wf.task1_1.lzout.out, b=2))
wf.set_output([("out1", wf.task2.lzout.out), ("out2", wf.task1_2.lzout.out)])
return wf
@mark.task
@mark.annotate({"return": {"sum": int, "products": ty.List[int]}})
def list_mult_sum(scalar: int, in_list: ty.List[int]) -> ty.Tuple[int, ty.List[int]]:
products = [scalar * x for x in in_list]
return functools.reduce(operator.add, products, 0), products
@mark.task
@mark.annotate({"return": {"x": str, "y": int, "z": float}})
def foo(a: str, b: int, c: float) -> ty.Tuple[str, int, float]:
return a, b, c |
get n grouped pixels above threshold | import numpy as np
from scipy.ndimage.filters import median_filter
from itertools import groupby
from operator import itemgetter
from banzai.stages import Stage
from banzai.utils import qc
from banzai.utils.stats import robust_standard_deviation
from banzai.logs import get_logger
logger = get_logger()
class PatternNoiseDetector(Stage):
# Signal to Noise threshold to raise an alert
SNR_THRESHOLD = 10.0
# The fraction of grouped SNR pixels that need to be above the threshold to raise an alert
MIN_FRACTION_PIXELS_ABOVE_THRESHOLD = 0.01
# The minimum number of adjacent pixels to form a group
MIN_ADJACENT_PIXELS = 3
def __init__(self, runtime_context):
super(PatternNoiseDetector, self).__init__(runtime_context)
def do_stage(self, image):
pattern_noise_is_bad, fraction_pixels_above_threshold = self.check_for_pattern_noise(image.data)
logging_tags = {'snr_threshold': self.SNR_THRESHOLD,
'min_fraction_pixels_above_threshold': self.MIN_FRACTION_PIXELS_ABOVE_THRESHOLD,
'min_adjacent_pixels': self.MIN_ADJACENT_PIXELS,
'fraction_pixels_above_threshold': fraction_pixels_above_threshold}
if pattern_noise_is_bad:
logger.error('Image found to have pattern noise.', image=image, extra_tags=logging_tags)
else:
logger.info('No pattern noise found.', image=image, extra_tags=logging_tags)
qc_results = {'pattern_noise.failed': pattern_noise_is_bad,
'pattern_noise.snr_threshold': self.SNR_THRESHOLD,
'pattern_noise.min_fraction_pixels_above_threshold': self.MIN_FRACTION_PIXELS_ABOVE_THRESHOLD,
'pattern_noise.min_adjacent_pixels': self.MIN_ADJACENT_PIXELS,
'patter_noise.fraction_pixels_above_threshold': fraction_pixels_above_threshold}
qc.save_qc_results(self.runtime_context, qc_results, image)
return image
def check_for_pattern_noise(self, data):
"""
Test for pattern noise in an image
Parameters
----------
data : numpy array
Image data to test for pattern noise
Returns
-------
is_bad : bool
Returns true if the image has pattern noise
"""
trimmed_data = trim_image_edges(data)
power_2d = get_2d_power_band(trimmed_data)
snr = compute_snr(power_2d)
fraction_pixels_above_threshold = self.METHOD_NAME(snr) / float(len(snr))
has_pattern_noise = fraction_pixels_above_threshold > self.MIN_FRACTION_PIXELS_ABOVE_THRESHOLD
return has_pattern_noise, fraction_pixels_above_threshold
def METHOD_NAME(self, snr):
"""
Compute the number of grouped pixels above the alert threshold
Parameters
----------
snr : numpy array
The 1D SNR
Returns
-------
n_grouped_pixels_above_threshold : numpy array
The number of SNR pixels with values above SNR_THRESHOLD
that are in groups of at least MIN_ADJACENT_PIXELS
"""
idx_above_thresh = np.where(snr > self.SNR_THRESHOLD)[0]
consecutive_group_lengths = np.array([len(list(map(itemgetter(1), g))) for k, g in
groupby(enumerate(idx_above_thresh), key=lambda x: x[0]-x[1])])
pixel_groups = consecutive_group_lengths >= self.MIN_ADJACENT_PIXELS
n_grouped_pixels_above_threshold = sum(consecutive_group_lengths[pixel_groups])
return n_grouped_pixels_above_threshold
def trim_image_edges(data, fractional_edge_width=0.025):
"""
Clip image edges to avoid edge effects in Fourier transform
Parameters
----------
data : numpy array
The data to be trimmed
fractional_edge_width : float
Fraction of mean(nx, ny) to trim from each edge
Returns
-------
trimmed_data : numpy array
Trimmed data array
"""
ntrim = int(round(np.mean(data.shape) * fractional_edge_width))
return data[ntrim:-ntrim, ntrim:-ntrim]
def get_2d_power_band(data, fractional_band_width=0.25, fractional_inner_edge_to_discard=0.025):
"""
Extract the central region of the 2D Fourier transform
Parameters
----------
data : numpy array
The data for computing the Fourier Transform
fractional_band_width : float
Vertical band width as a fraction of ny
fractional_inner_edge_to_discard : float
Amount of inner area (i.e. where large-scale power is detected) to discard as a fraction of nx
Returns
-------
power_2d : numpy array
Central band of 2d Fourier transform
"""
# Get full 2D power
full_power_2d = abs(np.fft.rfft2(data))
# Extract horizontal band, as corners of 2D FFT can vary significantly between images
ny, nx = full_power_2d.shape
y1 = int(ny * (0.5 - fractional_band_width/2))
y2 = int(ny * (0.5 + fractional_band_width/2))
x1 = int(nx * fractional_inner_edge_to_discard)
return full_power_2d[y1:y2, x1:]
def compute_snr(power_2d, fractional_window_size=0.05):
"""
Extract the central region of the 2D Fourier transform
Parameters
----------
power_2d : numpy array
The 2D Fourier transform of the data
fractional_window_size : float
Median filter window size as a fraction of the 1D power array
Returns
-------
snr : numpy array
The 1D SNR
"""
power = np.median(power_2d, axis=0)
p2p_scatter = abs(power[1:] - power[:-1])
power = power[1:] # Throw away DC term
# Median filter
window_size = get_odd_integer(fractional_window_size * len(power))
continuum = median_filter(power, size=window_size)
pixel_to_pixel_scatter = median_filter(p2p_scatter, size=window_size)
snr = (power - continuum) / pixel_to_pixel_scatter
# Also divide out the global scatter for any residual structure that was not removed with the median filter
global_scatter = robust_standard_deviation(snr)
snr /= global_scatter
return snr
def get_odd_integer(x):
"""
Return the ceiling odd integer given a float
Parameters
----------
x : float
The number to be converted to the closest odd integer
Returns
-------
y : int
Odd integer of x
"""
return int(round(round(x) / 2) * 2) + 1 |
misc | """
ebuild tree manifest/digest support
"""
__all__ = ("parse_manifest", "Manifest")
import errno
import operator
import os
from snakeoil.chksum import get_handler
from snakeoil.mappings import ImmutableDict
from .. import gpg
from ..fs.livefs import iter_scan
from ..package import errors
from . import cpv
def _write_manifest(handle, chf, filename, chksums):
"""Convenient, internal method for writing manifests"""
size = chksums.pop("size")
handle.write("%s %s %i" % (chf.upper(), filename, size))
for chf in sorted(chksums):
handle.write(" %s %s" % (chf.upper(), get_handler(chf).long2str(chksums[chf])))
handle.write("\n")
def convert_chksums(iterable):
for chf, sum in iterable:
chf = chf.lower()
if chf == "size":
# explicit size entries are stupid, format has implicit size
continue
else:
yield chf, int(sum, 16)
def parse_manifest(source, ignore_gpg=True):
types = {"DIST": {}, "AUX": {}, "EBUILD": {}, "MISC": {}}
# manifest v2 format: (see glep 44 for exact rules)
# TYPE filename size (CHF sum)+
# example 'type' entry, all one line
# MISC metadata.xml 219 RMD160 613195ece366b33606e71ff1753be048f2507841 SHA1 d162fb909241ef50b95a3539bdfcde95429bdf81 SHA256 cbd3a20e5c89a48a842f7132fe705bf39959f02c1025052efce8aad8a8baa8dc
# manifest v1 format is
# CHF sum filename size
# note that we do _not_ support manifest1
chf_types = {"size"}
f = None
try:
if isinstance(source, str):
i = f = open(source, "r", 32768)
else:
i = f = source.text_fileobj()
if ignore_gpg:
i = gpg.skip_signatures(f)
for data in i:
line = data.split()
if not line:
continue
d = types.get(line[0])
if d is None:
raise errors.ParseChksumError(
source, f"unknown manifest type: {line[0]}: {line!r}"
)
if len(line) % 2 != 1:
raise errors.ParseChksumError(
source,
"manifest 2 entry doesn't have right "
f"number of tokens, {len(line)}: {line!r}",
)
chf_types.update(line[3::2])
# this is a trick to do pairwise collapsing;
# [size, 1] becomes [(size, 1)]
i = iter(line[3:])
d[line[1]] = [("size", int(line[2]))] + list(convert_chksums(zip(i, i)))
except (IndexError, ValueError):
raise errors.ParseChksumError(source, "invalid data format")
finally:
if f is not None and f.close:
f.close()
for t, d in types.items():
types[t] = ImmutableDict((k, dict(v)) for k, v in d.items())
# ordering annoyingly matters. bad api.
return [types[x] for x in ("DIST", "AUX", "EBUILD", "MISC")]
class Manifest:
def __init__(self, path, enforce_gpg=False, thin=False, allow_missing=False):
self.path = path
self.thin = thin
self.allow_missing = allow_missing
self._gpg = enforce_gpg
self._sourced = False
def _pull_manifest(self):
if self._sourced:
return
try:
data = parse_manifest(self.path, ignore_gpg=self._gpg)
except EnvironmentError as e:
if not (self.thin or self.allow_missing) or e.errno != errno.ENOENT:
raise errors.ParseChksumError(self.path, e) from e
data = {}, {}, {}, {}
except errors.ChksumError as e:
# recreate cpv from manifest path
catpn = os.sep.join(self.path.split(os.sep)[-3:-1])
pkg = cpv.UnversionedCPV(catpn)
raise errors.MetadataException(pkg, "manifest", str(e))
self._dist, self._aux, self._ebuild, self._misc = data
self._sourced = True
def update(self, fetchables, chfs=None):
"""Update the related Manifest file.
:param fetchables: fetchables of the package
"""
if self.thin and not fetchables:
# Manifest files aren't necessary with thin manifests and no distfiles
return
_key_sort = operator.itemgetter(0)
excludes = frozenset(["CVS", ".svn", "Manifest"])
aux, ebuild, METHOD_NAME = {}, {}, {}
if not self.thin:
filesdir = "/files/"
for obj in iter_scan(
"/", offset=os.path.dirname(self.path), chksum_types=chfs
):
if not obj.is_reg:
continue
pathname = obj.location
if excludes.intersection(pathname.split("/")):
continue
if pathname.startswith(filesdir):
d = aux
pathname = pathname[len(filesdir) :]
elif obj.dirname == "/":
pathname = pathname[1:]
if obj.location[-7:] == ".ebuild":
d = ebuild
else:
d = METHOD_NAME
else:
raise Exception(
"Unexpected directory found in %r; %r"
% (self.path, obj.dirname)
)
d[pathname] = dict(obj.chksums)
handle = open(self.path, "w")
# write it in alphabetical order; aux gets flushed now.
for path, chksums in sorted(aux.items(), key=_key_sort):
_write_manifest(handle, "AUX", path, chksums)
# next dist...
for fetchable in sorted(fetchables, key=operator.attrgetter("filename")):
_write_manifest(
handle,
"DIST",
os.path.basename(fetchable.filename),
dict(fetchable.chksums),
)
# then ebuild and misc
for mtype, inst in (("EBUILD", ebuild), ("MISC", METHOD_NAME)):
for path, chksum in sorted(inst.items(), key=_key_sort):
_write_manifest(handle, mtype, path, chksum)
@property
def aux_files(self):
self._pull_manifest()
return self._aux
@property
def distfiles(self):
self._pull_manifest()
return self._dist
@property
def ebuilds(self):
self._pull_manifest()
return self._ebuild
@property
def METHOD_NAME(self):
self._pull_manifest()
return self._misc |
test length | # ----------------------------------------------------------------------------
# MODES: serial
# CLASSES: nightly
#
# Test Case: correlation.py
#
# Tests: Tests database correlations and automatic correlation.
#
# Programmer: Brad Whitlock
# Date: Mon Mar 22 08:28:25 PDT 2004
#
# Modifications:
# Brad Whitlock, Fri Apr 1 09:48:23 PDT 2005
# I renamed some baselines so I could add correlation definition tests.
#
# ----------------------------------------------------------------------------
def GetTruncatedWindowInformationString():
# Get the window information and convert it to a string.
s = str(GetWindowInformation())
# Only use the first 5 or so lines from the string.
lines = s.split("\n")
s = ""
for i in range(5):
if(i < len(lines)):
s = s + lines[i]
s = s + "\n"
return s
#
# Look at the first few lines of the string representation of the
# WindowInformation to see the list of time sliders, etc.
#
def TestWindowInformation(testname):
TestText(testname, GetTruncatedWindowInformationString())
def METHOD_NAME(testname):
tsLength = TimeSliderGetNStates()
testString = "%s has %d states\n" % (GetActiveTimeSlider(), tsLength)
testString = testString + GetTruncatedWindowInformationString()
TestText(testname, testString)
def ComputeStates(nStates, stateIncrement):
lastState = nStates - 1
states = list(range(0, nStates, stateIncrement))
if(states[-1] != lastState):
states = states + [lastState]
return states
def GetTestName(sectionIndex, testIndex):
return "correlation_%d_%02d" % (sectionIndex, testIndex)
def TestTimeSlider(sectionIndex, testIndex):
Test(GetTestName(sectionIndex, testIndex))
METHOD_NAME(GetTestName(sectionIndex, testIndex + 1))
return testIndex + 2
#
# Tests that the database correlations look a certain way.
#
def TestCorrelation(name, sectionIndex, testIndex):
names = GetDatabaseCorrelationNames()
s = ""
if name in names:
c = GetDatabaseCorrelation(name)
s = s + str(c) + "\n"
TestText(GetTestName(sectionIndex, testIndex), s)
return testIndex + 1
#
# Tests the time slider length and the correlation list.
#
def TestLengthAndCorrelationList(testname):
tsLength = TimeSliderGetNStates()
s = "%s has %d states\n" % (GetActiveTimeSlider(), tsLength)
s = s + GetTruncatedWindowInformationString() + "\n\n"
names = GetDatabaseCorrelationNames()
for name in names:
c = GetDatabaseCorrelation(name)
s = s + str(c) + "\n"
TestText(testname, s)
#
# The databases that we'll use for most tests.
#
dbs = (data_path("pdb_test_data/dbA00.pdb"),
data_path("pdb_test_data/dbB00.pdb"),
data_path("pdb_test_data/dbC00.pdb"))
#
# Open each database and create a plot.
#
for db in dbs:
OpenDatabase(db)
AddPlot("FilledBoundary", "material(mesh)")
DrawPlots()
#
# Correlation Types
#
IndexForIndexCorrelation = 0
StretchedIndexCorrelation = 1
TimeCorrelation = 2
CycleCorrelation = 3
correlationTypes = (IndexForIndexCorrelation, StretchedIndexCorrelation,\
TimeCorrelation, CycleCorrelation)
correlationNames = ("ABC_Index", "ABC_Stretch", "ABC_Time", "ABC_Cycle")
correlationTitles = ("Padded index correlation", "Stretched correlation",\
"Time correlation", "Cycle correlation")
#
# Create correlations between the 3 databases
#
sectionIndex = 0
testIndex = 0
for i in range(len(correlationTypes)):
TestSection(correlationTitles[i])
CreateDatabaseCorrelation(correlationNames[i], dbs, correlationTypes[i])
testIndex = TestCorrelation(correlationNames[i], sectionIndex, 0)
states = ComputeStates(TimeSliderGetNStates(), 5)
for state in states:
SetTimeSliderState(state)
testIndex = TestTimeSlider(sectionIndex, testIndex)
sectionIndex = sectionIndex + 1
#
# Make sure that a new window has the same plots and correlations.
#
TestSection("Creating new window")
sectionIndex = 4
testIndex = 0
alteredCorrelation = correlationNames[0]
SetActiveTimeSlider(alteredCorrelation)
SetTimeSliderState(15)
testIndex = TestTimeSlider(sectionIndex, testIndex)
CloneWindow()
SetActiveWindow(2)
InvertBackgroundColor()
DrawPlots()
testIndex = TestTimeSlider(sectionIndex, testIndex)
SetActiveWindow(1)
#
# Now that multiple windows use the same correlation, alter the correlation
# so it only uses 2 databases and make sure that both windows change when
# we change the time slider.
#
TestSection("Altering correlation")
sectionIndex = 5
testIndex = 0
testIndex = TestCorrelation(alteredCorrelation, sectionIndex, testIndex)
AlterDatabaseCorrelation(alteredCorrelation, (dbs[0], dbs[1]), StretchedIndexCorrelation)
testIndex = TestCorrelation(alteredCorrelation, sectionIndex, testIndex)
testIndex = TestTimeSlider(sectionIndex, testIndex)
SetActiveWindow(2)
testIndex = TestTimeSlider(sectionIndex, testIndex)
SetActiveWindow(1)
SetTimeSliderState(19)
testIndex = TestTimeSlider(sectionIndex, testIndex)
SetActiveWindow(2)
SetTimeSliderState(19)
testIndex = TestTimeSlider(sectionIndex, testIndex)
SetActiveWindow(1)
#
# Test deleting correlations
#
TestSection("Deleting correlations")
sectionIndex = 6
testIndex = 0
for n in correlationNames:
DeleteDatabaseCorrelation(n)
TestLengthAndCorrelationList(GetTestName(sectionIndex, testIndex))
#
# Test automatically correlating by setting the correlation options such
# that we automatically create a StretchedIndex correlation when adding
# plots, etc.
#
TestSection("Automatic correlations")
sectionIndex = 7
SetActiveWindow(1)
DeleteAllPlots()
TestLengthAndCorrelationList(GetTestName(sectionIndex, 0))
testIndex = 1
SetDatabaseCorrelationOptions(StretchedIndexCorrelation, 0)
OpenDatabase(dbs[0])
AddPlot("FilledBoundary", "material(mesh)")
OpenDatabase(dbs[1])
AddPlot("FilledBoundary", "material(mesh)")
DrawPlots()
# At this point, the viewer should have automatically created a new correlation
# and it should be the active time slider.
testIndex = TestCorrelation(GetActiveTimeSlider(), sectionIndex, testIndex);
testIndex = TestTimeSlider(sectionIndex, testIndex)
# note: py3 div creates float
SetTimeSliderState(int(TimeSliderGetNStates() / 2))
testIndex = TestTimeSlider(sectionIndex, testIndex)
SetTimeSliderState(TimeSliderGetNStates() - 1)
testIndex = TestTimeSlider(sectionIndex, testIndex)
# Now that we've verified the correlation, add another database to it by
# Creating a plot from yet another database.
OpenDatabase(dbs[2])
AddPlot("FilledBoundary", "material(mesh)")
DrawPlots()
testIndex = TestCorrelation(GetActiveTimeSlider(), sectionIndex, testIndex);
SetTimeSliderState(int(TimeSliderGetNStates() * 0.8))
testIndex = TestTimeSlider(sectionIndex, testIndex)
Exit() |
kind | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetTIDataConnectorResult',
'AwaitableGetTIDataConnectorResult',
'get_ti_data_connector',
'get_ti_data_connector_output',
]
@pulumi.output_type
class GetTIDataConnectorResult:
"""
Represents threat intelligence data connector.
"""
def __init__(__self__, data_types=None, etag=None, id=None, METHOD_NAME=None, name=None, system_data=None, tenant_id=None, tip_lookback_period=None, type=None):
if data_types and not isinstance(data_types, dict):
raise TypeError("Expected argument 'data_types' to be a dict")
pulumi.set(__self__, "data_types", data_types)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if tip_lookback_period and not isinstance(tip_lookback_period, str):
raise TypeError("Expected argument 'tip_lookback_period' to be a str")
pulumi.set(__self__, "tip_lookback_period", tip_lookback_period)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="dataTypes")
def data_types(self) -> 'outputs.TIDataConnectorDataTypesResponse':
"""
The available data types for the connector.
"""
return pulumi.get(self, "data_types")
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Etag of the azure resource
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The kind of the data connector
Expected value is 'ThreatIntelligence'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant id to connect to, and get the data from.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter(name="tipLookbackPeriod")
def tip_lookback_period(self) -> Optional[str]:
"""
The lookback period for the feed to be imported.
"""
return pulumi.get(self, "tip_lookback_period")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetTIDataConnectorResult(GetTIDataConnectorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetTIDataConnectorResult(
data_types=self.data_types,
etag=self.etag,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
system_data=self.system_data,
tenant_id=self.tenant_id,
tip_lookback_period=self.tip_lookback_period,
type=self.type)
def get_ti_data_connector(data_connector_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetTIDataConnectorResult:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
__args__ = dict()
__args__['dataConnectorId'] = data_connector_id
__args__['resourceGroupName'] = resource_group_name
__args__['workspaceName'] = workspace_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:securityinsights/v20230601preview:getTIDataConnector', __args__, opts=opts, typ=GetTIDataConnectorResult).value
return AwaitableGetTIDataConnectorResult(
data_types=pulumi.get(__ret__, 'data_types'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
system_data=pulumi.get(__ret__, 'system_data'),
tenant_id=pulumi.get(__ret__, 'tenant_id'),
tip_lookback_period=pulumi.get(__ret__, 'tip_lookback_period'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_ti_data_connector)
def get_ti_data_connector_output(data_connector_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetTIDataConnectorResult]:
"""
Gets a data connector.
:param str data_connector_id: Connector ID
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str workspace_name: The name of the workspace.
"""
... |
read unsigned byte | """
Copyright (C) 2014 David Boddie <[email protected]>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import struct, time
# Find the number of centiseconds between 1900 and 1970.
between_epochs = ((365 * 70) + 17) * 24 * 360000L
class DiskError(Exception):
pass
class Utilities:
# Little endian reading
def _read_signed_word(self, s):
return struct.unpack("<i", s)[0]
def _read_unsigned_word(self, s):
return struct.unpack("<I", s)[0]
def _read_signed_byte(self, s):
return struct.unpack("<b", s)[0]
def METHOD_NAME(self, s):
return struct.unpack("<B", s)[0]
def _read_unsigned_half_word(self, s):
return struct.unpack("<H", s)[0]
def _read_signed_half_word(self, s):
return struct.unpack("<h", s)[0]
def _read(self, offset, length = 1):
self.file.seek(offset, 0)
return self.file.read(length)
def _write_unsigned_word(self, v):
return struct.pack("<I", v)
def _write_unsigned_half_word(self, v):
return struct.pack("<H", v)
def _write_unsigned_byte(self, v):
return struct.pack("<B", v)
def _write(self, offset, data):
self.file.seek(offset, 0)
self.file.write(data)
def _str2num(self, s):
i = 0
n = 0
while i < len(s):
n = n | (ord(s[i]) << (i*8))
i = i + 1
return n
def _num2str(self, size, n):
i = 0
s = ""
while i < size:
s += chr(n & 0xff)
n = n >> 8
i += 1
return s
def _binary(self, size, n):
new = ""
while (n != 0) & (size > 0):
if (n & 1)==1:
new = "1" + new
else:
new = "0" + new
n = n >> 1
size = size - 1
if size > 0:
new = ("0"*size) + new
return new
def _safe(self, s, with_space = 0):
new = ""
if with_space == 1:
lower = 31
else:
lower = 32
for c in s:
if ord(c) >= 128:
i = ord(c) ^ 128
c = chr(i)
if ord(c) <= lower:
break
new = new + c
return new
def _pad(self, s, length, ch):
s = s[:length]
if len(s) < length:
s += (length - len(s)) * ch
return s
class Directory:
"""directory = Directory(name, address)
The directory created contains name and files attributes containing the
directory name and the objects it contains.
"""
def __init__(self, name, files):
self.name = name
self.files = files
def __repr__(self):
return '<%s instance, "%s", at %x>' % (self.__class__, self.name, id(self))
class File:
"""file = File(name, data, load_address, execution_address, length)
"""
def __init__(self, name, data, load_address, execution_address, length,
locked = False, disk_address = 0):
self.name = name
self.data = data
self.load_address = load_address
self.execution_address = execution_address
self.length = length
self.locked = locked
self.disk_address = disk_address
def __repr__(self):
return '<%s instance, "%s", at %x>' % (self.__class__, self.name, id(self))
def has_filetype(self):
"""Returns True if the file's meta-data contains filetype information."""
return self.load_address & 0xfff00000 == 0xfff00000
def filetype(self):
"""Returns the meta-data containing the filetype information.
Note that a filetype can be obtained for all files, though it may not
necessarily be valid. Use has_filetype() to determine whether the file
is likely to have a valid filetype."""
return "%03x" % ((self.load_address >> 8) & 0xfff)
def time_stamp(self):
"""Returns the time stamp for the file as a tuple of values containing
the local time, or an empty tuple if the file does not have a time stamp."""
# RISC OS time is given as a five byte block containing the
# number of centiseconds since 1900 (presumably 1st January 1900).
# Convert the time to the time elapsed since the Epoch (assuming
# 1970 for this value).
date_num = struct.unpack("<Q",
struct.pack("<IBxxx", self.execution_address, self.load_address & 0xff))[0]
centiseconds = date_num - between_epochs
# Convert this to a value in seconds and return a time tuple.
try:
return time.localtime(centiseconds / 100.0)
except ValueError:
return ()
|
test custom pytcti finalize | #!/usr/bin/python3 -u
# SPDX-License-Identifier: BSD-2
import unittest
from tpm2_pytss import *
from .TSS2_BaseTest import TSS2_EsapiTest
class MyTCTI(PyTCTI):
def __init__(self, subtcti, magic=None):
self._tcti = subtcti
self._is_finalized = False
self._error = None
if magic is not None:
super().__init__(magic=magic)
else:
super().__init__()
@property
def is_finalized(self):
return self._is_finalized
def do_transmit(self, command):
self._tcti.transmit(command)
def do_receive(self, timeout):
return self._tcti.receive()
def do_cancel(self):
self._tcti.cancel()
def do_get_poll_handles(self):
return self._tcti.get_poll_handles()
def do_set_locality(self, locality):
self._tcti.set_locality(locality)
def do_make_sticky(self, handle, is_sticky):
if self._tcti is not None:
self._tcti.make_sticky(handle, is_sticky)
if self._error is not None:
raise self._error
def do_finalize(self):
self._is_finalized = True
if self._error is not None:
raise self._error
class TestTCTI(TSS2_EsapiTest):
def test_init(self):
self.assertEqual(self.tcti.version, 2)
self.assertGreater(int.from_bytes(self.tcti.magic, "big"), 0)
v1ctx = ffi.cast("TSS2_TCTI_CONTEXT_COMMON_V1 *", self.tcti._ctx)
v1ctx.version = 1
tcti = TCTI(self.tcti._ctx)
self.assertEqual(tcti.version, 1)
self.assertEqual(tcti._v2, None)
def test_transmit_receive(self):
startup = b"\x80\x01\x00\x00\x00\x0C\x00\x00\x01\x44\x00\x00"
self.tcti.transmit(startup)
resp = self.tcti.receive()
self.assertEqual(resp, b"\x80\x01\x00\x00\x00\n\x00\x00\x01\x00")
def test_finalize(self):
tcti = TCTI(self.tcti._ctx)
tcti.finalize()
def test_cancel(self):
if getattr(self.tcti, "name", "") == "swtpm":
self.skipTest("cancel not supported by swtpm")
startup = b"\x80\x01\x00\x00\x00\x0C\x00\x00\x01\x44\x00\x00"
self.tcti.transmit(startup)
self.tcti.cancel()
def test_get_poll_handles(self):
tcti_name = getattr(self.tcti, "name", "")
try:
self.tcti.get_poll_handles()
except TSS2_Exception as e:
if e.rc != lib.TSS2_TCTI_RC_NOT_IMPLEMENTED:
raise e
else:
self.skipTest(f"get_poll_handles not supported by {tcti_name}")
def test_set_locality(self):
self.tcti.set_locality(TPMA_LOCALITY.TWO)
def test_make_sticky(self):
tcti_name = getattr(self.tcti, "name", "")
if tcti_name in ("swtpm", "mssim"):
self.skipTest(f"make_sticky not supported by {tcti_name}")
self.tcti.make_sticky(0, 0)
tcti._v2 = None
with self.assertRaises(RuntimeError) as e:
self.tcti.make_sticky(0, 0)
self.assertEqual(str(e.exception), "unsupported by TCTI API version")
def test_tctildr(self):
self.assertIsInstance(self.tcti.name, str)
self.assertIsInstance(self.tcti.conf, str)
with self.assertRaises(TypeError):
TCTILdr(name=None, conf=1234)
with self.assertRaises(TypeError):
TCTILdr(name=1234, conf=None)
def test_custom_pytcti_esapi(self):
t = MyTCTI(self.tcti)
e = ESAPI(t)
e.get_random(4)
e.startup(TPM2_SU.CLEAR)
def test_custom_pytcti_C_wrapper_transmit_receive(self):
t = MyTCTI(self.tcti)
# Go through the C API directly and call transmit and recv
t.transmit(b"\x80\x01\x00\x00\x00\x0C\x00\x00\x01\x44\x00\x00")
resp = t.receive(-1)
self.assertEqual(resp, b"\x80\x01\x00\x00\x00\n\x00\x00\x01\x00")
def test_custom_pytcti_cancel(self):
if getattr(self.tcti, "name", "") == "swtpm":
self.skipTest("cancel not supported by swtpm")
t = MyTCTI(self.tcti)
t.transmit(b"\x80\x01\x00\x00\x00\x0C\x00\x00\x01\x44\x00\x00")
t.cancel()
def METHOD_NAME(self):
t = MyTCTI(self.tcti)
t.finalize()
self.assertTrue(t.is_finalized)
def test_custom_pytcti_get_poll_handles(self):
tcti_name = getattr(self.tcti, "name", "")
t = MyTCTI(self.tcti)
try:
handles = t.get_poll_handles()
for h in handles:
self.assertTrue(isinstance(h, PollData))
except TSS2_Exception as e:
if e.rc != lib.TSS2_TCTI_RC_NOT_IMPLEMENTED:
raise e
else:
self.skipTest(f"get_poll_handles not supported by {tcti_name}")
def test_custom_pytcti_set_locality(self):
t = MyTCTI(self.tcti)
t.set_locality(TPMA_LOCALITY.TWO)
def test_custom_pytcti_make_sticky(self):
t = MyTCTI(None)
t._error = None
t.make_sticky(0, 0)
t.make_sticky(0, 1)
t.make_sticky(0, False)
# Test that throwing an exception shows the originating exception
t._error = RuntimeError("Bills Error")
with self.assertRaises(RuntimeError, msg="Bills Error"):
t.make_sticky(5, True)
t._v2 = None
with self.assertRaises(TSS2_Exception):
t.make_sticky(0, 0)
def test_custom_pytcti_version(self):
t = MyTCTI(None)
self.assertEqual(t.version, 2)
def test_custom_pytcti_magic(self):
t = MyTCTI(None)
magic = b"PYTCTI\x00\x00"
self.assertEqual(t.magic, magic)
# max magic len
magic = b"THISISIT"
t = MyTCTI(None, magic)
self.assertEqual(t.magic, magic)
# small magic len
magic = b"COOL"
t = MyTCTI(None, magic)
self.assertEqual(t.magic, magic)
# min magic
magic = b""
t = MyTCTI(None, magic)
self.assertEqual(t.magic, magic)
with self.assertRaises(ValueError):
MyTCTI(None, b"THISISTOOBIG")
def test_custom_pytcti_ctx_manager_finalize(self):
with MyTCTI(self.tcti) as t:
e = ESAPI(t)
r = e.get_random(4)
self.assertEqual(len(r), 4)
e.startup(TPM2_SU.CLEAR)
self.assertTrue(t.is_finalized)
def test_custom_pytcti_finalize_error(self):
t = MyTCTI(self.tcti)
t._error = RuntimeError("Bills Error 2")
with self.assertRaises(RuntimeError, msg="Bills Error 2"):
t.finalize()
def test_is_available(self):
self.assertTrue(TCTILdr.is_available())
self.assertFalse(TCTILdr.is_available("this-tcti-doesnt-exist"))
if __name__ == "__main__":
unittest.main() |
join started threads | import threading
import time
from contextlib import contextmanager
from queue import Queue
import pytest
from .. import _thread_cache
from .._thread_cache import ThreadCache, start_thread_soon
from .tutil import gc_collect_harder, slow
def test_thread_cache_basics():
q = Queue()
def fn():
raise RuntimeError("hi")
def deliver(outcome):
q.put(outcome)
start_thread_soon(fn, deliver)
outcome = q.get()
with pytest.raises(RuntimeError, match="hi"):
outcome.unwrap()
def test_thread_cache_deref():
res = [False]
class del_me:
def __call__(self):
return 42
def __del__(self):
res[0] = True
q = Queue()
def deliver(outcome):
q.put(outcome)
start_thread_soon(del_me(), deliver)
outcome = q.get()
assert outcome.unwrap() == 42
gc_collect_harder()
assert res[0]
@slow
def test_spawning_new_thread_from_deliver_reuses_starting_thread():
# We know that no-one else is using the thread cache, so if we keep
# submitting new jobs the instant the previous one is finished, we should
# keep getting the same thread over and over. This tests both that the
# thread cache is LIFO, and that threads can be assigned new work *before*
# deliver exits.
# Make sure there are a few threads running, so if we weren't LIFO then we
# could grab the wrong one.
q = Queue()
COUNT = 5
for _ in range(COUNT):
start_thread_soon(lambda: time.sleep(1), lambda result: q.put(result))
for _ in range(COUNT):
q.get().unwrap()
seen_threads = set()
done = threading.Event()
def deliver(n, _):
print(n)
seen_threads.add(threading.current_thread())
if n == 0:
done.set()
else:
start_thread_soon(lambda: None, lambda _: deliver(n - 1, _))
start_thread_soon(lambda: None, lambda _: deliver(5, _))
done.wait()
assert len(seen_threads) == 1
@slow
def test_idle_threads_exit(monkeypatch):
# Temporarily set the idle timeout to something tiny, to speed up the
# test. (But non-zero, so that the worker loop will at least yield the
# CPU.)
monkeypatch.setattr(_thread_cache, "IDLE_TIMEOUT", 0.0001)
q = Queue()
start_thread_soon(lambda: None, lambda _: q.put(threading.current_thread()))
seen_thread = q.get()
# Since the idle timeout is 0, after sleeping for 1 second, the thread
# should have exited
time.sleep(1)
assert not seen_thread.is_alive()
@contextmanager
def METHOD_NAME():
before = frozenset(threading.enumerate())
try:
yield
finally:
for thread in threading.enumerate():
if thread not in before:
thread.join(timeout=1.0)
assert not thread.is_alive()
def test_race_between_idle_exit_and_job_assignment(monkeypatch):
# This is a lock where the first few times you try to acquire it with a
# timeout, it waits until the lock is available and then pretends to time
# out. Using this in our thread cache implementation causes the following
# sequence:
#
# 1. start_thread_soon grabs the worker thread, assigns it a job, and
# releases its lock.
# 2. The worker thread wakes up (because the lock has been released), but
# the JankyLock lies to it and tells it that the lock timed out. So the
# worker thread tries to exit.
# 3. The worker thread checks for the race between exiting and being
# assigned a job, and discovers that it *is* in the process of being
# assigned a job, so it loops around and tries to acquire the lock
# again.
# 4. Eventually the JankyLock admits that the lock is available, and
# everything proceeds as normal.
class JankyLock:
def __init__(self):
self._lock = threading.Lock()
self._counter = 3
def acquire(self, timeout=-1):
got_it = self._lock.acquire(timeout=timeout)
if timeout == -1:
return True
elif got_it:
if self._counter > 0:
self._counter -= 1
self._lock.release()
return False
return True
else:
return False
def release(self):
self._lock.release()
monkeypatch.setattr(_thread_cache, "Lock", JankyLock)
with METHOD_NAME():
tc = ThreadCache()
done = threading.Event()
tc.start_thread_soon(lambda: None, lambda _: done.set())
done.wait()
# Let's kill the thread we started, so it doesn't hang around until the
# test suite finishes. Doesn't really do any harm, but it can be confusing
# to see it in debug output.
monkeypatch.setattr(_thread_cache, "IDLE_TIMEOUT", 0.0001)
tc.start_thread_soon(lambda: None, lambda _: None)
def test_raise_in_deliver(capfd):
seen_threads = set()
def track_threads():
seen_threads.add(threading.current_thread())
def deliver(_):
done.set()
raise RuntimeError("don't do this")
done = threading.Event()
start_thread_soon(track_threads, deliver)
done.wait()
done = threading.Event()
start_thread_soon(track_threads, lambda _: done.set())
done.wait()
assert len(seen_threads) == 1
err = capfd.readouterr().err
assert "don't do this" in err
assert "delivering result" in err |
sample dims 3patch | import functools
import operator
import os.path
import random
import sys
import cv2
import numpy as np
from .extract_patches import extract_patches
from .patterns import patterns_2patch, patterns_3patch, patterns_4patch, patterns_5patch
from .chroma_blur import chroma_blur
# Data paths
label_file = '/p/lscratchh/brainusr/ILSVRC2012/labels/train.txt'
data_dir = '/p/lscratchh/brainusr/ILSVRC2012/original/train'
# Read label files
samples = []
with open(label_file) as f:
for line in f:
line = line.split(' ')
samples.append((line[0], int(line[1])))
# Get sample function
def get_sample_2patch(index):
return get_sample(index, 2)
def get_sample_3patch(index):
return get_sample(index, 3)
def get_sample_4patch(index):
return get_sample(index, 4)
def get_sample_5patch(index):
return get_sample(index, 5)
def get_sample(index, num_patches):
"""Generate data sample.
Extract patches and apply preprocessing tricks.
"""
# Read image from file
file_name, _ = samples[index]
file_name = os.path.join(data_dir, file_name)
img = cv2.imdecode(np.fromfile(file_name, dtype=np.uint8),
cv2.IMREAD_COLOR)
# Crop to get square image
size = min(img.shape[0], img.shape[1])
y = (img.shape[0] - size) // 2
x = (img.shape[1] - size) // 2
img = img[y:y+size, x:x+size, :]
# Extract patches
patterns = None
if num_patches == 2:
patterns = patterns_2patch
if num_patches == 3:
patterns = patterns_3patch
if num_patches == 4:
patterns = patterns_4patch
if num_patches == 5:
patterns = patterns_5patch
patches, label = extract_patches(img, patterns)
# Randomly rotate patches
rotate_type = random.randint(0, 3)
for i, patch in enumerate(patches):
patch = np.rot90(patch, rotate_type, axes=(0,1))
patches[i] = patch
label = label + rotate_type * len(patterns)
# Convert patch to float32
for i, patch in enumerate(patches):
if patch.dtype == np.uint8:
patches[i] = patch.astype(np.float32) / 255
# Chroma blur
for i, patch in enumerate(patches):
patches[i] = chroma_blur(patch)
# Transform to CHW format and normalize
for i, patch in enumerate(patches):
patch = np.transpose(patch, axes=(2, 0, 1))
means = np.array([0.406, 0.456, 0.485]).reshape((3,1,1))
stdevs = np.array([0.225, 0.224, 0.229]).reshape((3,1,1))
patch -= means
patch /= stdevs
patches[i] = patch
# Random aperture
for i, patch in enumerate(patches):
if i == 0:
continue
size = random.randint(64, 96)
y = random.randint(0, 96-size)
x = random.randint(0, 96-size)
new_patch = np.zeros((3, 96, 96), dtype=np.float32)
new_patch[:, y:y+size, x:x+size] = patch[:, y:y+size, x:x+size]
patches[i] = new_patch
# Construct one-hot label vector
label_vec = np.zeros(num_labels(num_patches), dtype=np.float32)
label_vec[label] = 1
# Return flattened data tensors
flat_data = []
for patch in patches:
flat_data.append(patch.reshape(-1))
flat_data.append(label_vec)
return np.concatenate(flat_data)
# Get sample dims functions
patch_dims = (3, 96, 96)
def num_labels(num_patches):
num_patterns = 0
if num_patches == 2:
num_patterns = len(patterns_2patch)
if num_patches == 3:
num_patterns = len(patterns_3patch)
if num_patches == 4:
num_patterns = len(patterns_4patch)
if num_patches == 5:
num_patterns = len(patterns_5patch)
return 4 * num_patterns
def sample_dims(num_patches):
patch_size = functools.reduce(operator.mul, patch_dims)
return (num_patches*patch_size + num_labels(num_patches),)
def sample_dims_2patch():
return sample_dims(2)
def METHOD_NAME():
return sample_dims(3)
def sample_dims_4patch():
return sample_dims(4)
def sample_dims_5patch():
return sample_dims(5)
# Get num samples function
def num_samples():
return len(samples) |
decrease volume | # pylint: disable=C0111,R0903
"""Displays volume and mute status and controls for PulseAudio devices. Use wheel up and down to change volume, left click mutes, right click opens pavucontrol.
**Please prefer this module over the "pulseaudio" module, which will eventually be deprecated
Aliases: pulseout (for outputs, such as headsets, speakers), pulsein (for microphones)
NOTE: Do **not** use this module directly, but rather use either pulseout or pulsein!
NOTE2: For the parameter names below, please also use pulseout or pulsein, instead of pulsectl
Parameters:
* pulsectl.autostart: If set to 'true' (default is 'false'), automatically starts the pulsectl daemon if it is not running
* pulsectl.percent_change: How much to change volume by when scrolling on the module (default is 2%)
* pulsectl.limit: Upper limit for setting the volume (default is 0%, which means 'no limit')
* pulsectl.showbars: 'true' for showing volume bars, requires --markup=pango;
'false' for not showing volume bars (default)
* pulsectl.showdevicename: If set to 'true' (default is 'false'), the currently selected default device is shown.
Per default, the sink/source name returned by "pactl list sinks short" is used as display name.
As this name is usually not particularly nice (e.g "alsa_output.usb-Logitech_Logitech_USB_Headset-00.analog-stereo"),
its possible to map the name to more a user friendly name.
e.g to map "alsa_output.usb-Logitech_Logitech_USB_Headset-00.analog-stereo" to the name "Headset", add the following
bumblebee-status config entry: pulsectl.alsa_output.usb-Logitech_Logitech_USB_Headset-00.analog-stereo=Headset
Furthermore its possible to specify individual (unicode) icons for all sinks/sources. e.g in order to use the icon 🎧 for the
"alsa_output.usb-Logitech_Logitech_USB_Headset-00.analog-stereo" sink, add the following bumblebee-status config entry:
pulsectl.icon.alsa_output.usb-Logitech_Logitech_USB_Headset-00.analog-stereo=🎧
* Per default a left mouse button click mutes/unmutes the device. In case you want to open a dropdown menu to change the current
default device add the following config entry to your bumblebee-status config: pulsectl.left-click=select_default_device_popup
Requires the following Python module:
* pulsectl
"""
import pulsectl
import logging
import functools
import core.module
import core.widget
import core.input
import core.event
import util.cli
import util.graph
import util.format
try:
import util.popup
except ImportError as e:
logging.warning("Couldn't import util.popup: %s. Popups won't work!", e)
class Module(core.module.Module):
def __init__(self, config, theme, type):
super().__init__(config, theme, core.widget.Widget(self.display))
self.background = True
self.__type = type
self.__volume = 0
self.__devicename = "n/a"
self.__muted = False
self.__showbars = util.format.asbool(self.parameter("showbars", False))
self.__show_device_name = util.format.asbool(
self.parameter("showdevicename", False)
)
self.__change = util.format.asint(
self.parameter("percent_change", "2%").strip("%"), 0, 100
)
self.__limit = util.format.asint(self.parameter("limit", "0%").strip("%"), 0)
events = [
{
"type": "mute",
"action": self.toggle_mute,
"button": core.input.LEFT_MOUSE
},
{
"type": "volume",
"action": self.increase_volume,
"button": core.input.WHEEL_UP,
},
{
"type": "volume",
"action": self.METHOD_NAME,
"button": core.input.WHEEL_DOWN,
},
]
for event in events:
core.input.register(self, button=event["button"], cmd=event["action"])
if util.format.asbool(self.parameter("autostart", False)):
util.cli.execute("pulseaudio --start", ignore_errors=True)
self.process(None)
def display(self, _):
res = f"{int(self.__volume*100)}%"
if self.__showbars:
res = f"{res} {util.graph.hbar(self.__volume*100)}"
if self.__show_device_name:
friendly_name = self.parameter(self.__devicename, self.__devicename)
icon = self.parameter("icon." + self.__devicename, "")
res = (
icon + " " + friendly_name + " | " + res
if icon != ""
else friendly_name + " | " + res
)
return res
def toggle_mute(self, _):
with pulsectl.Pulse(self.id + "vol") as pulse:
dev = self.get_device(pulse)
if not dev:
return
pulse.mute(dev, not self.__muted)
def change_volume(self, amount):
with pulsectl.Pulse(self.id + "vol") as pulse:
dev = self.get_device(pulse)
if not dev:
return
vol = dev.volume
vol.value_flat += amount
if self.__limit > 0 and vol.value_flat > self.__limit/100:
vol.value_flat = self.__limit/100
pulse.volume_set(dev, vol)
def increase_volume(self, _):
self.change_volume(self.__change/100.0)
def METHOD_NAME(self, _):
self.change_volume(-self.__change/100.0)
def get_device(self, pulse):
devs = pulse.sink_list() if self.__type == "sink" else pulse.source_list()
default = pulse.server_info().default_sink_name if self.__type == "sink" else pulse.server_info().default_source_name
for dev in devs:
if dev.name == default:
return dev
if len(devs) == 0:
return None
return devs[0] # fallback
def process(self, _):
with pulsectl.Pulse(self.id + "proc") as pulse:
dev = self.get_device(pulse)
if not dev:
self.__volume = 0
self.__devicename = "n/a"
else:
self.__volume = dev.volume.value_flat
self.__muted = dev.mute
self.__devicename = dev.name
core.event.trigger("update", [self.id], redraw_only=True)
core.event.trigger("draw")
def update(self):
with pulsectl.Pulse(self.id) as pulse:
pulse.event_mask_set(self.__type)
pulse.event_callback_set(self.process)
pulse.event_listen()
def select_default_device_popup(self, widget):
with pulsectl.Pulse(self.id) as pulse:
devs = pulse.sink_list() if self.__type == "sink" else pulse.source_list()
menu = util.popup.menu(self.__config)
for dev in devs:
menu.add_menuitem(
dev.description,
callback=functools.partial(self.__on_default_changed, dev),
)
menu.show(widget)
def __on_default_changed(self, dev):
with pulsectl.Pulse(self.id) as pulse:
pulse.default_set(dev)
def state(self, _):
if self.__muted:
return ["warning", "muted"]
return ["unmuted"]
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 |
store settings | #
# ABOUT
# Artisan Sampling Dialog
# LICENSE
# This program or module is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 2 of the License, or
# version 3 of the License, or (at your option) any later version. It is
# provided for educational purposes and is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
# AUTHOR
# Marko Luther, 2023
from typing import Optional, TYPE_CHECKING
from artisanlib.dialogs import ArtisanDialog
from artisanlib.widgets import MyQDoubleSpinBox
try:
from PyQt6.QtCore import Qt, pyqtSlot, QSettings # @UnusedImport @Reimport @UnresolvedImport
from PyQt6.QtWidgets import (QMessageBox, QApplication, QHBoxLayout, QVBoxLayout, QCheckBox, QGridLayout, # @UnusedImport @Reimport @UnresolvedImport
QDialogButtonBox, QLayout) # @UnusedImport @Reimport @UnresolvedImport
except ImportError:
from PyQt5.QtCore import Qt, pyqtSlot, QSettings # type: ignore # @UnusedImport @Reimport @UnresolvedImport
from PyQt5.QtWidgets import (QMessageBox, QApplication, QHBoxLayout, QVBoxLayout, QCheckBox, QGridLayout, # type: ignore # @UnusedImport @Reimport @UnresolvedImport
QDialogButtonBox, QLayout) # type: ignore # @UnusedImport @Reimport @UnresolvedImport
if TYPE_CHECKING:
from PyQt6.QtWidgets import QPushButton # pylint: disable=unused-import
class SamplingDlg(ArtisanDialog):
def __init__(self, parent, aw) -> None:
super().__init__(parent, aw)
self.setWindowTitle(QApplication.translate('Message','Sampling'))
self.setModal(True)
self.keepOnFlag = QCheckBox(QApplication.translate('Label','Keep ON'))
self.keepOnFlag.setFocusPolicy(Qt.FocusPolicy.NoFocus)
self.keepOnFlag.setChecked(bool(self.aw.qmc.flagKeepON))
self.openCompletedFlag = QCheckBox(QApplication.translate('Label','Open Completed Roast in Viewer'))
self.openCompletedFlag.setFocusPolicy(Qt.FocusPolicy.NoFocus)
self.openCompletedFlag.setChecked(bool(self.aw.qmc.flagOpenCompleted))
self.interval = MyQDoubleSpinBox()
self.interval.setSingleStep(1)
self.interval.setValue(self.aw.qmc.delay/1000.)
self.interval.setRange(self.aw.qmc.min_delay/1000.,40.)
self.interval.setDecimals(2)
self.interval.setAlignment(Qt.AlignmentFlag.AlignRight)
self.interval.setSuffix('s')
intervalLayout = QHBoxLayout()
intervalLayout.addStretch()
intervalLayout.addWidget(self.interval)
intervalLayout.addStretch()
# connect the ArtisanDialog standard OK/Cancel buttons
self.dialogbuttons.accepted.connect(self.ok)
self.dialogbuttons.rejected.connect(self.close)
flagGrid = QGridLayout()
flagGrid.addWidget(self.keepOnFlag,0,0)
flagGrid.addWidget(self.openCompletedFlag,1,0)
flagLayout = QHBoxLayout()
flagLayout.addStretch()
flagLayout.addLayout(flagGrid)
flagLayout.addStretch()
buttonsLayout = QHBoxLayout()
buttonsLayout.addStretch()
buttonsLayout.addWidget(self.dialogbuttons)
#incorporate layouts
layout = QVBoxLayout()
layout.addLayout(intervalLayout)
layout.addLayout(flagLayout)
layout.addStretch()
layout.addLayout(buttonsLayout)
self.setLayout(layout)
ok_button: Optional['QPushButton'] = self.dialogbuttons.button(QDialogButtonBox.StandardButton.Ok)
if ok_button is not None:
ok_button.setFocus()
settings = QSettings()
if settings.contains('SamplingPosition'):
self.move(settings.value('SamplingPosition'))
layout.setSizeConstraint(QLayout.SizeConstraint.SetFixedSize)
#window close box
def closeEvent(self,_):
self.close()
#cancel button
@pyqtSlot()
def close(self):
self.METHOD_NAME()
self.reject()
def METHOD_NAME(self):
#save window position (only; not size!)
settings = QSettings()
settings.setValue('SamplingPosition',self.frameGeometry().topLeft())
#ok button
@pyqtSlot()
def ok(self):
self.aw.qmc.flagKeepON = bool(self.keepOnFlag.isChecked())
self.aw.qmc.flagOpenCompleted = bool(self.openCompletedFlag.isChecked())
self.aw.setSamplingRate(int(self.interval.value()*1000.))
if self.aw.qmc.delay < self.aw.qmc.default_delay:
QMessageBox.warning(self.aw,
QApplication.translate('Message', 'Warning', None),
QApplication.translate('Message', 'A tight sampling interval might lead to instability on some machines. We suggest a minimum of 1s.'))
self.METHOD_NAME()
# self.aw.closeEventSettings()
self.accept() |
read input | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
import os
import os.path
import re
import sys
import unittest
from ExtensionClass import Base
from Products.PageTemplates.engine import Program
from zope.component import provideUtility
from zope.pagetemplate.interfaces import IPageTemplateEngine
from zope.pagetemplate.pagetemplate import PageTemplateEngine
# Dummy TestCase to use the assertions outside the actual tests.
TEST_CASE = unittest.TestCase('__init__')
class Bruce(Base):
__allow_access_to_unprotected_subobjects__ = 1
isDocTemp = 0
def __str__(self):
return 'bruce'
def __int__(self):
return 42
def __float__(self):
return 42.0
def keys(self):
return ['bruce'] * 7
def values(self):
return [self] * 7
def items(self):
return [('bruce', self)] * 7
def __len__(self):
return 7
def __getitem__(self, index):
if isinstance(index, int) and (index < 0 or index > 6):
raise IndexError(index)
return self
def __getattr__(self, name):
if name[:1] == '_':
raise AttributeError(name)
return self
bruce = Bruce()
class arg(Base):
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, nn, aa):
self.num, self.arg = nn, aa
def __str__(self):
return str(self.arg)
class argv(Base):
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, argv=sys.argv[1:]):
args = self.args = []
for aa in argv:
args.append(arg(len(args) + 1, aa))
def items(self):
return [('spam%d' % a.num, a) for a in self.args]
def values(self):
return self.args
def getPhysicalRoot(self):
return self
def check_html(s1, s2):
if not isinstance(s2, bytes) and isinstance(s1, bytes):
# convert to common type
s1 = s1.decode("utf-8") # our encoding
s1 = normalize_html(s1)
s2 = normalize_html(s2)
TEST_CASE.assertEqual(s1, s2)
def check_xml(s1, s2):
s1 = normalize_xml(s1)
s2 = normalize_xml(s2)
TEST_CASE.assertEqual(s1, s2, "XML Output Changed")
def normalize_html(s):
s = re.sub(r"[ \t]+", " ", s)
s = re.sub(r"/>", ">", s)
return s
def normalize_xml(s):
s = re.sub(r"\s+", " ", s)
s = re.sub(r"(?s)\s+<", "<", s)
s = re.sub(r"(?s)>\s+", ">", s)
return s
HERE = os.path.dirname(__file__)
input_dir = os.path.join(HERE, 'input')
output_dir = os.path.join(HERE, 'output')
def _open(filename, mode):
# Define explicit encoding for windows platform
return open(filename, mode, encoding='utf-8')
def METHOD_NAME(filename):
filename = os.path.join(input_dir, filename)
with _open(filename, 'r') as fd:
data = fd.read()
return data
def read_output(filename):
filename = os.path.join(output_dir, filename)
with _open(filename, 'r') as fd:
data = fd.read()
return data
def exists_output(filename):
filename = os.path.join(output_dir, filename)
return os.path.exists(filename)
def useChameleonEngine():
# Force the use of the new chameleon rendering engine (the new default).
# Its use depends on a utility registration that is queried in
# zope.pagetemplate,pagetemplate.PageTemplate's _cook method. Unfortunately
# the fallback is the old Zope engine if there is no registration, so we
# force one here for use by unit tests.
provideUtility(Program, IPageTemplateEngine)
def useOldZopeEngine():
# BBB Force the use of the old Zope page template engine, which is needed
# for some tests that test features only supported by it.
provideUtility(PageTemplateEngine, IPageTemplateEngine) |
post with refs and tags | import json
from pathlib import Path
from typing import Any, Dict, Sequence, cast, Tuple
import pytest
import pytest_asyncio
from aleph_message.models import AggregateContent, PostContent
from configmanager import Config
from sqlalchemy import insert
from aleph.chains.chain_service import ChainService
from aleph.db.accessors.aggregates import refresh_aggregate
from aleph.db.models import (
MessageDb,
ChainTxDb,
AggregateElementDb,
message_confirmations,
)
from aleph.db.models.posts import PostDb
from aleph.handlers.message_handler import MessageHandler
from aleph.jobs.process_pending_messages import PendingMessageProcessor
from aleph.storage import StorageService
from aleph.toolkit.timestamp import timestamp_to_datetime
from aleph.types.db_session import DbSessionFactory
import datetime as dt
from in_memory_storage_engine import InMemoryStorageEngine
# TODO: remove the raw parameter, it's just to avoid larger refactorings
async def _load_fixtures(
session_factory: DbSessionFactory, filename: str, raw: bool = True
) -> Sequence[Dict[str, Any]]:
fixtures_dir = Path(__file__).parent / "fixtures"
fixtures_file = fixtures_dir / filename
with fixtures_file.open() as f:
messages_json = json.load(f)
messages = []
tx_hashes = set()
with session_factory() as session:
for message_dict in messages_json:
message_db = MessageDb.from_message_dict(message_dict)
messages.append(message_db)
session.add(message_db)
for confirmation in message_dict.get("confirmations", []):
if (tx_hash := confirmation["hash"]) not in tx_hashes:
chain_tx_db = ChainTxDb.from_dict(confirmation)
tx_hashes.add(tx_hash)
session.add(chain_tx_db)
session.flush()
session.execute(
insert(message_confirmations).values(
item_hash=message_db.item_hash, tx_hash=tx_hash
)
)
session.commit()
return messages_json if raw else messages
@pytest_asyncio.fixture
async def fixture_messages(
session_factory: DbSessionFactory,
) -> Sequence[Dict[str, Any]]:
return await _load_fixtures(session_factory, "fixture_messages.json")
def make_aggregate_element(message: MessageDb) -> AggregateElementDb:
content = cast(AggregateContent, message.parsed_content)
aggregate_element = AggregateElementDb(
key=content.key,
owner=content.address,
content=content.content,
item_hash=message.item_hash,
creation_datetime=timestamp_to_datetime(content.time),
)
return aggregate_element
@pytest_asyncio.fixture
async def fixture_aggregate_messages(
session_factory: DbSessionFactory,
) -> Sequence[MessageDb]:
messages = await _load_fixtures(
session_factory, "fixture_aggregates.json", raw=False
)
aggregate_keys = set()
with session_factory() as session:
for message in messages:
aggregate_element = make_aggregate_element(message) # type: ignore
session.add(aggregate_element)
aggregate_keys.add((aggregate_element.owner, aggregate_element.key))
session.commit()
for owner, key in aggregate_keys:
refresh_aggregate(session=session, owner=owner, key=key)
session.commit()
return messages # type: ignore
def make_post_db(message: MessageDb) -> PostDb:
content = cast(PostContent, message.parsed_content)
return PostDb(
item_hash=message.item_hash,
owner=content.address,
type=content.type,
ref=content.ref,
amends=content.ref if content.type == "amend" else None,
channel=message.channel,
content=content.content,
creation_datetime=timestamp_to_datetime(content.time),
)
@pytest_asyncio.fixture
async def fixture_posts(
session_factory: DbSessionFactory,
) -> Sequence[PostDb]:
messages = await _load_fixtures(session_factory, "fixture_posts.json", raw=False)
posts = [make_post_db(message) for message in messages] # type: ignore
with session_factory() as session:
session.add_all(posts)
session.commit()
return posts
@pytest.fixture
def METHOD_NAME() -> Tuple[MessageDb, PostDb]:
message = MessageDb(
item_hash="1234",
sender="0xdeadbeef",
type="POST",
chain="ETH",
signature=None,
item_type="storage",
item_content=None,
content={"content": {"tags": ["original", "mainnet"], "swap": "this"}},
time=dt.datetime(2023, 5, 1, tzinfo=dt.timezone.utc),
channel=None,
size=254,
)
post = PostDb(
item_hash=message.item_hash,
owner=message.sender,
type=None,
ref="custom-ref",
amends=None,
channel=None,
content=message.content["content"],
creation_datetime=message.time,
latest_amend=None,
)
return message, post
@pytest.fixture
def amended_post_with_refs_and_tags(METHOD_NAME: Tuple[MessageDb, PostDb]):
original_message, original_post = METHOD_NAME
amend_message = MessageDb(
item_hash="5678",
sender="0xdeadbeef",
type="POST",
chain="ETH",
signature=None,
item_type="storage",
item_content=None,
content={"content": {"tags": ["amend", "mainnet"], "don't": "swap"}},
time=dt.datetime(2023, 5, 2, tzinfo=dt.timezone.utc),
channel=None,
size=277,
)
amend_post = PostDb(
item_hash=amend_message.item_hash,
owner=original_message.sender,
type="amend",
ref=original_message.item_hash,
amends=original_message.item_hash,
channel=None,
content=amend_message.content["content"],
creation_datetime=amend_message.time,
latest_amend=None,
)
return amend_message, amend_post
@pytest.fixture
def message_processor(mocker, mock_config: Config, session_factory: DbSessionFactory):
storage_engine = InMemoryStorageEngine(files={})
storage_service = StorageService(
storage_engine=storage_engine,
ipfs_service=mocker.AsyncMock(),
node_cache=mocker.AsyncMock(),
)
chain_service = ChainService(
session_factory=session_factory, storage_service=storage_service
)
message_handler = MessageHandler(
session_factory=session_factory,
chain_service=chain_service,
storage_service=storage_service,
config=mock_config,
)
message_processor = PendingMessageProcessor(
session_factory=session_factory,
message_handler=message_handler,
max_retries=0,
mq_message_exchange=mocker.AsyncMock(),
mq_conn=mocker.AsyncMock(),
)
return message_processor |
configure step | ##
# Copyright 2015-2023 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for building and installing NEMO, implemented as an easyblock
@author: Oriol Mula-Valls (IC3)
"""
import os
import shutil
from easybuild.framework.easyblock import EasyBlock
from easybuild.framework.easyconfig import CUSTOM, MANDATORY
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.filetools import write_file
from easybuild.tools.modules import get_software_root
from easybuild.tools.run import run_cmd
class EB_NEMO(EasyBlock):
"""Support for building/installing NEMO."""
def __init__(self, *args, **kwargs):
"""Initialisation of custom class variables for NEMO."""
super(EB_NEMO, self).__init__(*args, **kwargs)
self.conf_name = 'EB_NEMO_CONFIG'
self.conf_arch_file = 'NEMOGCM/ARCH/arch-eb.fcm'
@staticmethod
def extra_options():
"""Custom easyconfig parameters for NEMO."""
extra_vars = {
'with_components': [None, "List of components to include (e.g. TOP_SRC)", MANDATORY],
'add_keys': [None, "Add compilation keys", CUSTOM],
'del_keys': [None, "Delete compilation keys", CUSTOM]
}
return EasyBlock.extra_options(extra_vars)
def METHOD_NAME(self):
"""Custom configuration procedure for NEMO."""
netcdf_fortran_root = get_software_root('netCDF-Fortran')
if not netcdf_fortran_root:
raise EasyBuildError("netCDF-Fortran is not available, but is a required dependency")
cfg = '\n'.join([
"%%NCDF_INC -I%s/include" % netcdf_fortran_root,
"%%NCDF_LIB -L%s/lib -lnetcdff" % netcdf_fortran_root,
"%%FC %s" % os.getenv('F90'),
"%FCFLAGS -r8 -O3 -traceback",
"%FFLAGS %FCFLAGS",
"%LD %FC",
"%LDFLAGS ",
"%FPPFLAGS -P -C",
"%AR ar",
"%ARFLAGS rs",
"%MK make",
"%USER_INC %NCDF_INC",
"%USER_LIB %NCDF_LIB"
])
write_file(self.conf_arch_file, cfg)
cmd = "./makenemo -n %s -d '%s' -j0 -m eb" % (self.conf_name, ' '.join(self.cfg['with_components']))
if self.cfg['add_keys'] is not None:
cmd += " add_key '%s'" % ' '.join(self.cfg['add_keys'])
if self.cfg['del_keys'] is not None:
cmd += " del_key '%s'" % ' '.join(self.cfg['del_keys'])
try:
dst = 'NEMOGCM/CONFIG'
os.chdir(dst)
self.log.debug("Changed to directory %s", dst)
except OSError as err:
raise EasyBuildError("Failed to change to directory %s: %s", dst, err)
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def build_step(self):
"""Custom build procedure for NEMO."""
cmd = "./makenemo -n %s -m eb" % self.conf_name
run_cmd(cmd, log_all=True, simple=True, log_ok=True)
def install_step(self):
"""Custom install procedure for NEMO."""
binpath = os.path.join(self.cfg['start_dir'], 'NEMOGCM', 'CONFIG', self.conf_name, 'BLD/bin')
try:
shutil.copytree(binpath, os.path.join(self.installdir, 'bin'))
except OSError as err:
raise EasyBuildError("Copying %s to installation dir failed: %s", binpath, err)
def sanity_check_step(self):
"""Custom sanity check for NEMO."""
custom_paths = {
'files': ['bin/nemo.exe'],
'dirs': [],
}
super(EB_NEMO, self).sanity_check_step(custom_paths=custom_paths) |
get actions | # encoding: utf-8
from collections import defaultdict
import ckan.plugins as p
import ckan.tests.legacy.mock_plugin as mock_plugin
class MapperPlugin(p.SingletonPlugin):
p.implements(p.IMapper, inherit=True)
def __init__(self, *args, **kw):
self.calls = []
def _get_instance_name(self, instance):
return getattr(instance, "name", None)
def before_insert(self, mapper, conn, instance):
self.calls.append(("before_insert", self._get_instance_name(instance)))
def after_insert(self, mapper, conn, instance):
self.calls.append(("after_insert", self._get_instance_name(instance)))
def before_delete(self, mapper, conn, instance):
self.calls.append(("before_delete", self._get_instance_name(instance)))
def after_delete(self, mapper, conn, instance):
self.calls.append(("after_delete", self._get_instance_name(instance)))
class MapperPlugin2(MapperPlugin):
p.implements(p.IMapper)
class SessionPlugin(p.SingletonPlugin):
p.implements(p.ISession, inherit=True)
def __init__(self, *args, **kw):
self.added = []
self.deleted = []
def before_insert(self, mapper, conn, instance):
self.added.append(instance)
def before_delete(self, mapper, conn, instance):
self.deleted.append(instance)
class RoutesPlugin(p.SingletonPlugin):
p.implements(p.IRoutes, inherit=True)
def __init__(self, *args, **kw):
self.calls_made = []
def before_map(self, map):
self.calls_made.append("before_map")
return map
def after_map(self, map):
self.calls_made.append("after_map")
return map
class PluginObserverPlugin(mock_plugin.MockSingletonPlugin):
p.implements(p.IPluginObserver)
class ActionPlugin(p.SingletonPlugin):
p.implements(p.IActions)
def METHOD_NAME(self):
return {"status_show": lambda context, data_dict: {}}
class AuthPlugin(p.SingletonPlugin):
p.implements(p.IAuthFunctions)
def get_auth_functions(self):
return {"package_list": lambda context, data_dict: {}}
class MockGroupControllerPlugin(p.SingletonPlugin):
p.implements(p.IGroupController)
def __init__(self, *args, **kw):
self.calls = defaultdict(int)
def read(self, entity):
self.calls["read"] += 1
def create(self, entity):
self.calls["create"] += 1
def edit(self, entity):
self.calls["edit"] += 1
def delete(self, entity):
self.calls["delete"] += 1
def before_view(self, data_dict):
self.calls["before_view"] += 1
return data_dict
class MockPackageControllerPlugin(p.SingletonPlugin):
p.implements(p.IPackageController)
def __init__(self, *args, **kw):
self.calls = defaultdict(int)
def read(self, entity):
self.calls["read"] += 1
def create(self, entity):
self.calls["create"] += 1
def edit(self, entity):
self.calls["edit"] += 1
def delete(self, entity):
self.calls["delete"] += 1
def before_search(self, search_params):
self.calls["before_search"] += 1
return search_params
def after_search(self, search_results, search_params):
self.calls["after_search"] += 1
return search_results
def before_index(self, data_dict):
self.calls["before_index"] += 1
return data_dict
def before_view(self, data_dict):
self.calls["before_view"] += 1
return data_dict
def after_create(self, context, data_dict):
self.calls["after_create"] += 1
self.id_in_dict = "id" in data_dict
return data_dict
def after_update(self, context, data_dict):
self.calls["after_update"] += 1
return data_dict
def after_delete(self, context, data_dict):
self.calls["after_delete"] += 1
return data_dict
def after_show(self, context, data_dict):
self.calls["after_show"] += 1
return data_dict
def update_facet_titles(self, facet_titles):
return facet_titles
class MockResourcePreviewExtension(mock_plugin.MockSingletonPlugin):
p.implements(p.IResourcePreview)
def __init__(self, *args, **kw):
self.calls = defaultdict(int)
def setup_template_variables(self, context, data_dict):
self.calls["setup_template_variables"] += 1
def can_preview(self, data_dict):
assert isinstance(data_dict["resource"], dict)
assert isinstance(data_dict["package"], dict)
assert "on_same_domain" in data_dict["resource"]
self.calls["can_preview"] += 1
return data_dict["resource"]["format"].lower() == "mock"
def preview_template(self, context, data_dict):
assert isinstance(data_dict["resource"], dict)
assert isinstance(data_dict["package"], dict)
self.calls["preview_templates"] += 1
return "tests/mock_resource_preview_template.html"
class JsonMockResourcePreviewExtension(mock_plugin.MockSingletonPlugin):
p.implements(p.IResourcePreview)
def __init__(self, *args, **kw):
self.calls = defaultdict(int)
def setup_template_variables(self, context, data_dict):
self.calls["setup_template_variables"] += 1
def can_preview(self, data_dict):
self.calls["can_preview"] += 1
return data_dict["resource"]["format"].lower() == "json"
def preview_template(self, context, data_dict):
self.calls["preview_templates"] += 1
return "tests/mock_json_resource_preview_template.html"
# importing this file loads all these extensions by default
# so clean up the extensions
# p.plugins_update() |
get field | import logging
import aiohttp
import urllib
import http.client
import asab
#
L = logging.getLogger(__name__)
#
class InfluxDBTarget(asab.Configurable):
"""
InfluxDB 2.0 API parameters:
url - [required] url string of your influxDB
bucket - [required] the destination bucket for writes
org - [required] the parameter value specifies the destination organization for writes
orgid - [optional] the parameter value specifies the ID of the destination organization for writes
NOTE: If both orgID and org are specified, org takes precedence
token - [required] API token to authenticate to the InfluxDB
Example:
[asab:metrics:influxdb]
url=http://localhost:8086
bucket=test
org=test
orgid=test
token=your_token
InfluxDB <1.8 API parameters:
url - [required] url string of your influxDB
username - [required] name of influxDB user
password - [required] password of influxDB user
Example:
[asab:metrics:influxdb]
url=http://localhost:8086
username=test
password=testtest
db=test
"""
ConfigDefaults = {
'url': 'http://localhost:8086/',
'db': 'mydb',
'username': '',
'password': '',
'proactor': True, # Use ProactorService to send metrics on thread
}
def __init__(self, svc, config_section_name, config=None):
super().__init__(config_section_name=config_section_name, config=config)
self.Headers = {}
self.BaseURL = self.Config.get('url').rstrip('/')
self.WriteRequest = '/write?db={}'.format(self.Config.get('db'))
username = self.Config.get('username')
if username is not None and len(username) > 0:
self.WriteRequest += '&u={}'.format(urllib.parse.quote(username, safe=''))
password = self.Config.get('password')
if password is not None and len(password) > 0:
self.WriteRequest += '&p={}'.format(urllib.parse.quote(password, safe=''))
# If org is specified we are buildig write request for InfluxDB 2.0 API
org = self.Config.get('org')
if org is not None:
self.WriteRequest = '/api/v2/write?org={}'.format(org)
bucket = self.Config.get('bucket')
if bucket is not None:
self.WriteRequest += '&bucket={}'.format(bucket)
orgid = self.Config.get('orgid')
if orgid is not None:
self.WriteRequest += '&orgID={}'.format(orgid)
token = self.Config.get('token')
if token is not None:
self.Headers = {'Authorization': 'Token {}'.format(token)}
self.WriteURL = "{}{}".format(self.BaseURL, self.WriteRequest)
# Proactor service is used for alternative delivery of the metrics into the InfluxDB
# It is handly when a main loop can become very busy
if self.Config.getboolean('proactor'):
try:
from ..proactor import Module
svc.App.add_module(Module)
self.ProactorService = svc.App.get_service('asab.ProactorService')
except KeyError:
self.ProactorService = None
else:
self.ProactorService = None
async def process(self, m_tree, now):
rb = influxdb_format(m_tree, now)
if self.ProactorService is not None:
await self.ProactorService.execute(self._worker_upload, m_tree, rb)
else:
try:
async with aiohttp.ClientSession(headers=self.Headers) as session:
async with session.post(self.WriteURL, data=rb) as resp:
response = await resp.text()
if resp.status != 204:
L.warning("Error when sending metrics to Influx: {}\n{}".format(resp.status, response))
except aiohttp.client_exceptions.ClientConnectorError:
L.error("Failed to connect to InfluxDB at {}".format(self.BaseURL))
def _worker_upload(self, m_tree, rb):
if self.BaseURL.startswith("https://"):
conn = http.client.HTTPSConnection(self.BaseURL.replace("https://", ""))
else:
conn = http.client.HTTPConnection(self.BaseURL.replace("http://", ""))
try:
conn.request("POST", self.WriteRequest, rb, self.Headers)
except ConnectionRefusedError:
L.error("Failed to connect to InfluxDB at {}".format(self.BaseURL))
return
response = conn.getresponse()
if response.status != 204:
L.warning("Error when sending metrics to Influx: {}\n{}".format(
response.status, response.read().decode("utf-8"))
)
def METHOD_NAME(fk, fv):
if isinstance(fv, bool):
field = "{}={}".format(fk, 't' if fv else 'f')
elif isinstance(fv, int):
field = "{}={}i".format(fk, fv)
elif isinstance(fv, float):
field = "{}={}".format(fk, fv)
elif isinstance(fv, str):
# Escapes the Field Values and Field Keys if the value is a string
field = '{}="{}"'.format(fk.replace(" ", r"\ ").replace(",", r"\,").replace("=", r"\="), fv.replace("\\", "\\\\").replace('"', "\\\""))
else:
raise RuntimeError("Unknown/invalid type of the metrics field: {} {}".format(type(fv), fk))
return field
def combine_tags_and_field(tags, values, timestamp):
# First escape tags and values
tags = escape_tags(tags)
values = escape_values(values)
# Then combine the tags and then values
tags_string = ",".join(["{}={}".format(tk, tv) for tk, tv in tags.items()])
field_set = ",".join([METHOD_NAME(value_name, value) for value_name, value in values.items()])
return tags_string + " " + field_set + " " + str(int(timestamp * 1e9))
def build_metric_line(tags, values, timestamp, upperbound=None):
if upperbound is not None:
tags["le"] = upperbound
return combine_tags_and_field(tags, values, timestamp)
def get_timestamp(field, now):
if "measured_at" in field:
timestamp = field["measured_at"]
else:
timestamp = now
return timestamp
def metric_to_influxdb(metric_record, now):
name = escape_name(metric_record.get("name"))
fieldset = metric_record.get("fieldset")
metric_type = metric_record.get("type")
values_lines = []
if metric_type in ["Histogram", "HistogramWithDynamicTags"]:
for field in fieldset:
# SKIP empty fields
if all([bucket == {} for bucket in field.get("values").get("buckets").values()]):
continue
timestamp = get_timestamp(field, now)
for upperbound, bucket in field.get("values").get("buckets").items():
upperbound = str(upperbound)
if bucket == {}:
continue
values_lines.append(build_metric_line(field.get("tags").copy(), bucket, timestamp, upperbound))
values_lines.append(build_metric_line(field.get("tags").copy(), {"sum": field.get("values").get("sum")}, timestamp))
values_lines.append(build_metric_line(field.get("tags").copy(), {"count": field.get("values").get("count")}, timestamp))
else:
for field in fieldset:
# SKIP empty fields
if not field.get("values") or field.get("values") == {}:
continue
timestamp = get_timestamp(field, now)
values_lines.append(build_metric_line(field.get("tags"), (field.get("values")), timestamp))
return ["{},{}\n".format(name, line) for line in values_lines]
def escape_name(name: str):
return name.replace(" ", "\\ ").replace(",", "\\,")
def escape_tags(tags: dict):
"""
Escapes special characters in inputted tags to comply with InfluxDB's rules
https://docs.influxdata.com/influxdb/v2.3/reference/syntax/line-protocol/#special-characters
"""
clean: dict = {}
for k, v in tags.items():
if v is None:
v = "unknown"
clean[k.replace(" ", "\\ ").replace(",", "\\,").replace("=", "\\=")] = v.replace(" ", "\\ ").replace(",", "\\,").replace("=", "\\=")
return clean
def escape_values(values: dict):
"""
Escapes special characters in inputted values to comply with InfluxDB's rules
https://docs.influxdata.com/influxdb/v2.3/reference/syntax/line-protocol/#special-characters
"""
clean: dict = {}
for k, v in values.items():
# Escapes the Field Keys
clean[k.replace(" ", r"\ ").replace(",", r"\,").replace("=", r"\=")] = v
return clean
def influxdb_format(m_tree, now):
rb = []
for metric_record in m_tree:
influx_records = metric_to_influxdb(metric_record, now)
rb.extend(influx_records)
return ''.join(rb) |
lookup storage | # This file is part of Checkbox.
#
# Copyright 2012-2014 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <[email protected]>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3,
# as published by the Free Software Foundation.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
:mod:`plainbox.impl.commands.session` -- run sub-command
========================================================
"""
from base64 import b64encode
from logging import getLogger
from shutil import copyfileobj
from shutil import make_archive
import io
import itertools
import os
import sys
from plainbox.i18n import gettext as _
from plainbox.impl.applogic import get_all_exporter_names
from plainbox.impl.exporter import ByteStringStreamTranslator
from plainbox.impl.session import SessionManager
from plainbox.impl.session import SessionPeekHelper
from plainbox.impl.session import SessionResumeError
from plainbox.impl.session.storage import WellKnownDirsHelper
logger = getLogger("plainbox.commands.session")
class SessionInvocation:
"""
Invocation of the 'plainbox session' command.
:ivar ns:
The argparse namespace obtained from SessionCommand
"""
def __init__(self, ns, provider_loader):
self.ns = ns
self.provider_loader = provider_loader
def run(self):
cmd = getattr(self.ns, 'session_cmd', self.ns.default_session_cmd)
if cmd == 'list':
self.list_sessions()
elif cmd == 'remove':
self.remove_session()
elif cmd == 'show':
self.show_session()
elif cmd == 'archive':
self.archive_session()
elif cmd == 'export':
self.export_session()
def list_sessions(self):
storage = None
for storage in WellKnownDirsHelper.get_storage_list():
if self.ns.only_ids:
print(storage.id)
continue
data = storage.load_checkpoint()
if len(data) > 0:
metadata = SessionPeekHelper().peek(data)
print(_("session {0} app:{1}, flags:{2!r}, title:{3!r}")
.format(storage.id, metadata.app_id,
sorted(metadata.flags), metadata.title))
else:
print(_("session {0} (not saved yet)").format(storage.id))
if not self.ns.only_ids and storage is None:
print(_("There are no stored sessions"))
def remove_session(self):
for session_id in self.ns.session_id_list:
storage = self.METHOD_NAME(session_id)
if storage is None:
print(_("No such session"), session_id)
else:
storage.remove()
print(_("Session removed"), session_id)
def show_session(self):
for session_id in self.ns.session_id_list:
storage = self.METHOD_NAME(session_id)
if storage is None:
print(_("No such session"), session_id)
else:
print("[{}]".format(session_id))
print(_("location:"), storage.location)
data = storage.load_checkpoint()
if len(data) == 0:
continue
metadata = SessionPeekHelper().peek(data)
print(_("application ID: {0!r}").format(metadata.app_id))
print(_("application-specific blob: {0}").format(
b64encode(metadata.app_blob).decode('ASCII')
if metadata.app_blob is not None else None))
print(_("session title: {0!r}").format(metadata.title))
print(_("session flags: {0!r}").format(sorted(metadata.flags)))
print(_("current job ID: {0!r}").format(
metadata.running_job_name))
print(_("data size: {0}").format(len(data)))
if self.ns.resume:
print(_("Resuming session {0} ...").format(storage.id))
try:
self.resume_session(storage)
except SessionResumeError as exc:
print(_("Failed to resume session:"), exc)
else:
print(_("session resumed successfully"))
def resume_session(self, storage):
return SessionManager.load_session(
self._get_all_units(), storage, flags=self.ns.flag)
def archive_session(self):
session_id = self.ns.session_id
storage = self.METHOD_NAME(session_id)
if storage is None:
print(_("No such session: {0}").format(self.ns.session_id))
else:
print(_("Archiving session..."))
archive = make_archive(
self.ns.archive, 'gztar',
os.path.dirname(storage.location),
os.path.basename(storage.location))
print(_("Created archive: {0}").format(archive))
def export_session(self):
if self.ns.output_format == _('?'):
self._print_output_format_list()
return 0
elif self.ns.output_options == _('?'):
self._print_output_option_list()
return 0
storage = self.METHOD_NAME(self.ns.session_id)
if storage is None:
print(_("No such session: {0}").format(self.ns.session_id))
else:
print(_("Exporting session..."))
manager = SessionManager.load_session(
self._get_all_units(), storage, flags=self.ns.flag)
exporter = self._create_exporter(manager)
# Get a stream with exported session data.
exported_stream = io.BytesIO()
exporter.dump_from_session_manager(manager, exported_stream)
exported_stream.seek(0) # Need to rewind the file, puagh
# Write the stream to file if requested
if self.ns.output_file is sys.stdout:
# This requires a bit more finesse, as exporters output bytes
# and stdout needs a string.
translating_stream = ByteStringStreamTranslator(
self.ns.output_file, "utf-8")
copyfileobj(exported_stream, translating_stream)
else:
print(_("Saving results to {}").format(
self.ns.output_file.name))
copyfileobj(exported_stream, self.ns.output_file)
if self.ns.output_file is not sys.stdout:
self.ns.output_file.close()
def _get_all_units(self):
return list(
itertools.chain(*[p.unit_list for p in self.provider_loader()]))
def _print_output_format_list(self):
print(_("Available output formats: {}").format(
', '.join(get_all_exporter_names())))
def _print_output_option_list(self):
print(_("Each format may support a different set of options"))
with SessionManager.get_throwaway_manager() as manager:
for name, exporter in manager.exporter_map.items():
print("{}: {}".format(
name, ", ".join(exporter.exporter_cls.supported_option_list)))
def _create_exporter(self, manager):
if self.ns.output_options:
option_list = self.ns.output_options.split(',')
else:
option_list = None
return manager.create_exporter(self.ns.output_format, option_list)
def METHOD_NAME(self, session_id):
for storage in WellKnownDirsHelper.get_storage_list():
if storage.id == session_id:
return storage |
do train | # encoding=utf8
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass, field
from functools import partial
from typing import Optional
import numpy as np
import paddle
from paddle.metric import Accuracy
from utils import load_ds_xnli
from paddlenlp.data import Pad, Stack
from paddlenlp.trainer import PdArgumentParser, Trainer, TrainingArguments, set_seed
from paddlenlp.transformers import (
ChineseBertForSequenceClassification,
ChineseBertTokenizer,
)
@dataclass
class ModelArguments:
max_seq_length: Optional[int] = field(
default=512,
metadata={
"help": (
"The maximum total input sequence length after tokenization. "
"Sequences longer than this will be truncated, sequences shorter will be padded."
)
},
)
@dataclass
class DataArguments:
data_path: Optional[str] = field(
default="./data",
metadata={"help": "The path of datasets to be loaded."},
)
def convert_example(example, tokenizer, max_length=512, is_test=False):
label_map = {"contradictory": 0, "contradiction": 0, "entailment": 2, "neutral": 1}
first, second, third = example["sentence1"], example["sentence2"], example["label"]
encoded_inputs = tokenizer(first, second, max_length=max_length)
input_ids = encoded_inputs["input_ids"]
pinyin_ids = encoded_inputs["pinyin_ids"]
label = np.array([label_map[third]], dtype="int64")
assert len(input_ids) <= max_length
return input_ids, pinyin_ids, label
@dataclass
class DataCollator:
tokenizer: ChineseBertTokenizer
def __call__(self, features):
input_ids = []
pinyin_ids = []
labels = []
batch = {}
for feature in features:
input_idx, pinyin_idx, label = feature
input_ids.append(input_idx)
pinyin_ids.append(pinyin_idx)
labels.append(label)
input_ids = (Pad(axis=0, pad_val=self.tokenizer.pad_token_id)(input_ids),) # input_ids
pinyin_ids = (Pad(axis=0, pad_val=0)(pinyin_ids),) # pinyin_ids
labels = (Stack()(labels),) # labels
batch["input_ids"] = input_ids[0]
batch["pinyin_ids"] = pinyin_ids[0]
batch["labels"] = labels[0]
return batch
def compute_metrics(eval_preds):
labels = paddle.to_tensor(eval_preds.label_ids, dtype="int64")
preds = paddle.to_tensor(eval_preds.predictions)
preds = paddle.nn.functional.softmax(preds, axis=-1)
labels = paddle.argmax(labels, axis=-1)
metric = Accuracy()
correct = metric.compute(preds, labels)
metric.update(correct)
acc = metric.accumulate()
return {"accuracy": acc}
def METHOD_NAME():
parser = PdArgumentParser((ModelArguments, DataArguments, TrainingArguments))
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
paddle.set_device(training_args.device)
if paddle.distributed.get_world_size() > 1:
paddle.distributed.init_parallel_env()
set_seed(training_args.seed)
data_dir = data_args.data_path
train_path = os.path.join(data_dir, "train.tsv")
dev_path = os.path.join(data_dir, "dev.tsv")
test_path = os.path.join(data_dir, "test.tsv")
train_ds, dev_ds, test_ds = load_ds_xnli(datafiles=[train_path, dev_path, test_path])
model = ChineseBertForSequenceClassification.from_pretrained("ChineseBERT-large", num_classes=3)
tokenizer = ChineseBertTokenizer.from_pretrained("ChineseBERT-large")
print(" | load pretrained model state sucessfully.")
# Process the data into a data format that the model can read in.
trans_func = partial(convert_example, tokenizer=tokenizer, max_length=model_args.max_seq_length)
train_ds = train_ds.map(trans_func, lazy=False)
dev_ds = dev_ds.map(trans_func, lazy=False)
test_ds = test_ds.map(trans_func, lazy=False)
# Form data into batch data, such as padding text sequences of different lengths into the maximum length of batch data,
# and stack each data label together
batchify_fn = DataCollator(tokenizer)
criterion = paddle.nn.loss.CrossEntropyLoss()
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_ds if training_args.METHOD_NAME else None,
eval_dataset=dev_ds if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=batchify_fn,
criterion=criterion,
compute_metrics=compute_metrics,
)
if training_args.METHOD_NAME:
train_results = trainer.train(resume_from_checkpoint=training_args.resume_from_checkpoint)
metrics = train_results.metrics
trainer.save_model()
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
if training_args.do_eval:
eval_metrics = trainer.evaluate()
trainer.log_metrics("eval", eval_metrics)
if __name__ == "__main__":
METHOD_NAME() |
streams | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import base64
import codecs
import hashlib
import hmac
import urllib.parse
from enum import Enum
from functools import wraps
from typing import Any, List, Mapping, Tuple
import pendulum
import requests
from airbyte_cdk import AirbyteLogger
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.METHOD_NAME import Stream
from airbyte_cdk.sources.METHOD_NAME.http.auth import HttpAuthenticator
from pendulum.parsing.exceptions import ParserError
from .METHOD_NAME import Addresses, CustomersCart, OrderItems, OrderPayments, Orders, OrderStatuses, Products
class AuthMethod(Enum):
CENTRAL_API_ROUTER = 1
SINGLE_STORE_ACCESS_TOKEN = 2
class CustomHeaderAuthenticator(HttpAuthenticator):
def __init__(self, access_token, store_name):
self.auth_method = AuthMethod.SINGLE_STORE_ACCESS_TOKEN
self._store_name = store_name
self._access_token = access_token
def get_auth_header(self) -> Mapping[str, Any]:
return {"X-AC-Auth-Token": self._access_token}
def url_base(self) -> str:
return f"https://{self._store_name}/api/v1/"
def extra_params(self, stream, params):
return {}
class CentralAPIHeaderAuthenticator(HttpAuthenticator):
def __init__(self, user_name, user_secret, site_id):
self.auth_method = AuthMethod.CENTRAL_API_ROUTER
self.user_name = user_name
self.user_secret = user_secret
self.site_id = site_id
def get_auth_header(self) -> Mapping[str, Any]:
"""
This method is not implemented here because for the Central API Router
needs to build the header for each request based
on path + parameters (next token, pagination, page size)
To solve this the logic was moved to `request_headers` in CartStream class.
"""
return {}
def url_base(self) -> str:
return "https://public.americommerce.com/api/v1/"
def extra_params(self, stream, params):
return self.generate_auth_signature(stream, params)
def generate_auth_signature(self, stream, params) -> Mapping[str, Any]:
"""
How to build signature:
1. build a string concatenated with:
request method (uppercase) & request path and query & provisioning user name
example: GET&/api/v1/customers&myUser
2. Generate HMACSHA256 hash using this string as the input, and the provisioning user secret as the key
3. Base64 this hash to be used as the final value in the header
"""
path_with_params = f"/api/v1/{stream.path()}?{urllib.parse.urlencode(params)}"
msg = codecs.encode(f"GET&{path_with_params}&{self.user_name}")
key = codecs.encode(self.user_secret)
dig = hmac.new(key=key, msg=msg, digestmod=hashlib.sha256).digest()
auth_signature = base64.b64encode(dig).decode()
return {"X-AC-PUB-Site-ID": self.site_id, "X-AC-PUB-User": self.user_name, "X-AC-PUB-Auth-Signature": auth_signature}
class SourceCart(AbstractSource):
def validate_config_values(func):
"""Check input config values for check_connection and stream functions. It will raise an exception if there is an parsing error"""
@wraps(func)
def decorator(self_, *args, **kwargs):
for arg in args:
if isinstance(arg, Mapping):
try:
# parse date strings by the pendulum library. It will raise the exception ParserError if it is some format mistakes.
pendulum.parse(arg["start_date"])
# try to check an end_date value. It can be ussed for different CI tests
end_date = arg.get("end_date")
if end_date:
pendulum.parse(end_date)
except ParserError as e:
raise Exception(f"{str(e)}. Example: 2021-01-01T00:00:00Z")
break
return func(self_, *args, **kwargs)
return decorator
def get_auth(self, config):
credentials = config.get("credentials", {})
auth_method = credentials.get("auth_type")
if auth_method == AuthMethod.CENTRAL_API_ROUTER.name:
authenticator = CentralAPIHeaderAuthenticator(
user_name=credentials["user_name"], user_secret=credentials["user_secret"], site_id=credentials["site_id"]
)
elif auth_method == AuthMethod.SINGLE_STORE_ACCESS_TOKEN.name:
authenticator = CustomHeaderAuthenticator(access_token=credentials["access_token"], store_name=credentials["store_name"])
else:
raise NotImplementedError(f"Authentication method: {auth_method} not implemented.")
return authenticator
@validate_config_values
def check_connection(self, logger: AirbyteLogger, config: Mapping[str, Any]) -> Tuple[bool, Any]:
try:
authenticator = self.get_auth(config)
stream = Products(authenticator=authenticator, start_date=config["start_date"])
records = stream.read_records(sync_mode=SyncMode.full_refresh)
next(records)
return True, None
except Exception as e:
if isinstance(e, requests.exceptions.HTTPError) and e.response.status_code == 401:
return False, f"Please check your access token. Error: {repr(e)}"
if isinstance(e, requests.exceptions.ConnectionError):
err_message = f"Please check your `store_name` or internet connection. Error: {repr(e)}"
return False, err_message
return False, repr(e)
@validate_config_values
def METHOD_NAME(self, config: Mapping[str, Any]) -> List[Stream]:
authenticator = self.get_auth(config)
args = {
"authenticator": authenticator,
"start_date": config["start_date"],
"end_date": config.get("end_date"),
}
return [
CustomersCart(**args),
Orders(**args),
OrderPayments(**args),
OrderStatuses(**args),
OrderItems(**args),
Products(**args),
Addresses(**args),
] |
test pick analysis via api 1 | import numpy as np
import pytest
from libertem.analysis.raw import PickFrameAnalysis
from libertem.io.dataset.memory import MemoryDataSet
from utils import _mk_random
def test_pick_analysis(lt_ctx):
"""
the other tests cover the pick job, this one uses the analysis
"""
data = _mk_random(size=(16, 16, 16, 16))
dataset = MemoryDataSet(
data=data,
tileshape=(1, 16, 16),
num_partitions=2,
sig_dims=2
)
analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 5, "y": 5})
result = lt_ctx.run(analysis)
assert result.intensity.raw_data.shape == (16, 16)
assert np.allclose(result.intensity.raw_data, data[5, 5])
assert np.allclose(result.intensity_lin.raw_data, data[5, 5])
def test_pick_from_3d_ds(lt_ctx):
data = _mk_random(size=(16 * 16, 16, 16))
dataset = MemoryDataSet(
data=data,
tileshape=(1, 16, 16),
num_partitions=2,
sig_dims=2
)
analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 5})
result = lt_ctx.run(analysis)
assert result.intensity.raw_data.shape == (16, 16)
assert np.allclose(result.intensity.raw_data, data[5])
assert np.allclose(result.intensity_lin.raw_data, data[5])
def METHOD_NAME(lt_ctx):
data = _mk_random(size=(16, 16, 16, 16))
dataset = MemoryDataSet(
data=data,
tileshape=(1, 16, 16),
num_partitions=2,
sig_dims=2
)
analysis = lt_ctx.create_pick_analysis(dataset=dataset, x=8, y=7)
result = lt_ctx.run(analysis)
assert result.intensity.raw_data.shape == (16, 16)
assert np.allclose(result.intensity.raw_data, data[7, 8])
assert np.allclose(result.intensity_lin.raw_data, data[7, 8])
def test_pick_analysis_via_api_2_3d_ds(lt_ctx):
data = _mk_random(size=(16 * 16, 16, 16))
dataset = MemoryDataSet(
data=data,
tileshape=(1, 16, 16),
num_partitions=2,
sig_dims=2
)
analysis = lt_ctx.create_pick_analysis(dataset=dataset, x=8)
result = lt_ctx.run(analysis)
assert result.intensity.raw_data.shape == (16, 16)
assert np.allclose(result.intensity.raw_data, data[8])
assert np.allclose(result.intensity_lin.raw_data, data[8])
def test_pick_analysis_via_api_3_3d_ds_fail_1(lt_ctx):
data = _mk_random(size=(16 * 16, 16, 16))
dataset = MemoryDataSet(
data=data,
tileshape=(1, 16, 16),
num_partitions=2,
sig_dims=2
)
analysis = PickFrameAnalysis(dataset=dataset, parameters={})
with pytest.raises(ValueError):
lt_ctx.run(analysis)
analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7, "y": 8})
with pytest.raises(ValueError):
lt_ctx.run(analysis)
analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7, "y": 8, "z": 11})
with pytest.raises(ValueError):
lt_ctx.run(analysis)
def test_pick_analysis_via_api_3_3d_ds_fail_2(lt_ctx):
data = _mk_random(size=(16, 16, 16, 16))
dataset = MemoryDataSet(
data=data,
tileshape=(1, 16, 16),
num_partitions=2,
sig_dims=2
)
analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7, "y": 8, "z": 11})
with pytest.raises(ValueError):
lt_ctx.run(analysis)
analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7})
with pytest.raises(ValueError):
lt_ctx.run(analysis)
def test_pick_analysis_via_api_3_3d_ds_fail_3(lt_ctx):
data = _mk_random(size=(16, 16, 16, 16, 16))
dataset = MemoryDataSet(
data=data,
tileshape=(1, 16, 16),
num_partitions=2,
sig_dims=2
)
analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7})
with pytest.raises(ValueError):
lt_ctx.run(analysis)
analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7, "y": 8})
with pytest.raises(ValueError):
lt_ctx.run(analysis)
def test_pick_analysis_via_api_3_3d_ds_fail_4(lt_ctx):
data = _mk_random(size=(16, 16, 16, 16, 16, 16))
dataset = MemoryDataSet(
data=data,
tileshape=(1, 16, 16),
num_partitions=2,
sig_dims=2
)
analysis = PickFrameAnalysis(dataset=dataset, parameters={})
with pytest.raises(ValueError):
lt_ctx.run(analysis)
analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7})
with pytest.raises(ValueError):
lt_ctx.run(analysis)
analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7, "y": 8})
with pytest.raises(ValueError):
lt_ctx.run(analysis)
analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7, "y": 8, "z": 11})
with pytest.raises(ValueError):
lt_ctx.run(analysis)
def test_pick_analysis_via_api_3_3d_ds_fail_5(lt_ctx):
data = _mk_random(size=(16, 256, 16, 16))
dataset = MemoryDataSet(
data=data,
tileshape=(1, 16, 16),
num_partitions=2,
sig_dims=2
)
analysis = PickFrameAnalysis(dataset=dataset, parameters={"x": 7, "y": 8, "z": 11})
with pytest.raises(ValueError):
lt_ctx.run(analysis) |
has object permission | from ipaddress import IPv4Address, IPv4Network
from rest_framework import permissions
class IsActiveUser(permissions.BasePermission):
"""
Allows access only to activated users.
"""
def has_permission(self, request, view):
# Authenticated users can have is_active = None (pending activation). Those are not considered active.
return request.user and request.user.is_active
class IsAPIToken(permissions.BasePermission):
"""
Allows access only with API token (.mfa is None).
"""
message = "API token required."
code = "api_token_required"
def has_permission(self, request, view):
return request.auth.mfa is None
class IsLoginToken(permissions.BasePermission):
"""
Allows access only with login token (.mfa is not None).
DRF permission negation is flawed, so ~IsAPIToken does not give the correct behavior:
https://github.com/encode/django-rest-framework/issues/6598#issuecomment-484824743
"""
message = "Login token required."
code = "login_token_required"
def has_permission(self, request, view):
return request.auth.mfa is not None
class MFARequiredIfEnabled(permissions.BasePermission):
"""
Allows access only to when
- the token is a human token that has passed MFA, or
- the token is a human token that has not passed MFA, but the user has not enabled MFA at all.
"""
message = "Multi-factor authentication required."
code = "mfa_required"
def has_permission(self, request, view):
return request.auth.mfa or (
request.auth.mfa is False and not request.user.mfa_enabled
)
class IsOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of an object to view or edit it.
"""
def has_object_permission(self, request, view, obj):
return obj.owner == request.user
class IsDomainOwner(permissions.BasePermission):
"""
Custom permission to only allow owners of a domain to view or edit an object owned by that domain.
"""
def has_object_permission(self, request, view, obj):
return obj.domain.owner == request.user
class TokenNoDomainPolicy(permissions.BasePermission):
"""
Permission to check whether a token is unrestricted by any domain policy.
"""
def has_permission(self, request, view):
return request.auth.get_policy(domain=None) is None
class TokenDomainPolicyBasePermission(permissions.BasePermission):
"""
Base permission to check whether a token authorizes specific actions on a domain.
"""
perm_field = None
def METHOD_NAME(self, request, view, obj):
policy = request.auth.get_policy(domain=obj)
# If the token has no domain policy, there are no restrictions
if policy is None:
return True
# Otherwise, return the requested permission
return getattr(policy, self.perm_field)
class TokenHasDomainBasePermission(TokenDomainPolicyBasePermission):
"""
Base permission for checking a token's domain policy, for the view domain.
"""
def has_permission(self, request, view):
return self.METHOD_NAME(request, view, view.domain)
class TokenHasDomainDynDNSPermission(TokenHasDomainBasePermission):
"""
Custom permission to check whether a token authorizes using the dynDNS interface for the view domain.
"""
perm_field = "perm_dyndns"
class TokenHasDomainRRsetsPermission(TokenHasDomainBasePermission):
"""
Custom permission to check whether a token authorizes accessing RRsets for the view domain.
"""
perm_field = "perm_rrsets"
class AuthTokenCorrespondsToViewToken(permissions.BasePermission):
"""
Permission to check whether the view kwargs's token_id corresponds to the current token.
"""
def has_permission(self, request, view):
return view.kwargs["token_id"] == request.auth.pk
class IsVPNClient(permissions.BasePermission):
"""
Permission that requires that the user is accessing using an IP from the VPN net.
"""
message = "Inadmissible client IP."
def has_permission(self, request, view):
ip = IPv4Address(request.META.get("REMOTE_ADDR"))
return ip in IPv4Network("10.8.0.0/24")
class HasManageTokensPermission(permissions.BasePermission):
"""
Permission to check whether a token has "manage tokens" permission.
"""
def has_permission(self, request, view):
return request.auth.perm_manage_tokens
class WithinDomainLimit(permissions.BasePermission):
"""
Permission that requires that the user still has domain limit quota available.
"""
message = (
"Domain limit exceeded. Please contact support to create additional domains."
)
def has_permission(self, request, view):
return (
request.user.limit_domains is None
or request.user.domains.count() < request.user.limit_domains
) |
decompile | from fontTools.misc import sstruct
from fontTools.misc.textTools import bytesjoin, safeEval, readHex
from . import DefaultTable
import sys
import array
GPKGFormat = """
> # big endian
version: H
flags: H
numGMAPs: H
numGlyplets: H
"""
# psFontName is a byte string which follows the record above. This is zero padded
# to the beginning of the records array. The recordsOffsst is 32 bit aligned.
class table_G_P_K_G_(DefaultTable.DefaultTable):
def METHOD_NAME(self, data, ttFont):
dummy, newData = sstruct.unpack2(GPKGFormat, data, self)
GMAPoffsets = array.array("I")
endPos = (self.numGMAPs + 1) * 4
GMAPoffsets.frombytes(newData[:endPos])
if sys.byteorder != "big":
GMAPoffsets.byteswap()
self.GMAPs = []
for i in range(self.numGMAPs):
start = GMAPoffsets[i]
end = GMAPoffsets[i + 1]
self.GMAPs.append(data[start:end])
pos = endPos
endPos = pos + (self.numGlyplets + 1) * 4
glyphletOffsets = array.array("I")
glyphletOffsets.frombytes(newData[pos:endPos])
if sys.byteorder != "big":
glyphletOffsets.byteswap()
self.glyphlets = []
for i in range(self.numGlyplets):
start = glyphletOffsets[i]
end = glyphletOffsets[i + 1]
self.glyphlets.append(data[start:end])
def compile(self, ttFont):
self.numGMAPs = len(self.GMAPs)
self.numGlyplets = len(self.glyphlets)
GMAPoffsets = [0] * (self.numGMAPs + 1)
glyphletOffsets = [0] * (self.numGlyplets + 1)
dataList = [sstruct.pack(GPKGFormat, self)]
pos = len(dataList[0]) + (self.numGMAPs + 1) * 4 + (self.numGlyplets + 1) * 4
GMAPoffsets[0] = pos
for i in range(1, self.numGMAPs + 1):
pos += len(self.GMAPs[i - 1])
GMAPoffsets[i] = pos
gmapArray = array.array("I", GMAPoffsets)
if sys.byteorder != "big":
gmapArray.byteswap()
dataList.append(gmapArray.tobytes())
glyphletOffsets[0] = pos
for i in range(1, self.numGlyplets + 1):
pos += len(self.glyphlets[i - 1])
glyphletOffsets[i] = pos
glyphletArray = array.array("I", glyphletOffsets)
if sys.byteorder != "big":
glyphletArray.byteswap()
dataList.append(glyphletArray.tobytes())
dataList += self.GMAPs
dataList += self.glyphlets
data = bytesjoin(dataList)
return data
def toXML(self, writer, ttFont):
writer.comment("Most of this table will be recalculated by the compiler")
writer.newline()
formatstring, names, fixes = sstruct.getformat(GPKGFormat)
for name in names:
value = getattr(self, name)
writer.simpletag(name, value=value)
writer.newline()
writer.begintag("GMAPs")
writer.newline()
for gmapData in self.GMAPs:
writer.begintag("hexdata")
writer.newline()
writer.dumphex(gmapData)
writer.endtag("hexdata")
writer.newline()
writer.endtag("GMAPs")
writer.newline()
writer.begintag("glyphlets")
writer.newline()
for glyphletData in self.glyphlets:
writer.begintag("hexdata")
writer.newline()
writer.dumphex(glyphletData)
writer.endtag("hexdata")
writer.newline()
writer.endtag("glyphlets")
writer.newline()
def fromXML(self, name, attrs, content, ttFont):
if name == "GMAPs":
if not hasattr(self, "GMAPs"):
self.GMAPs = []
for element in content:
if isinstance(element, str):
continue
itemName, itemAttrs, itemContent = element
if itemName == "hexdata":
self.GMAPs.append(readHex(itemContent))
elif name == "glyphlets":
if not hasattr(self, "glyphlets"):
self.glyphlets = []
for element in content:
if isinstance(element, str):
continue
itemName, itemAttrs, itemContent = element
if itemName == "hexdata":
self.glyphlets.append(readHex(itemContent))
else:
setattr(self, name, safeEval(attrs["value"])) |
magnitude | """
Set of util functions for the section similarity project.
"""
import copy
import numpy as np
import json
import scipy.fftpack
import pylab as plt
def resample_mx(X, incolpos, outcolpos):
"""
Y = resample_mx(X, incolpos, outcolpos)
X is taken as a set of columns, each starting at 'time'
colpos, and continuing until the start of the next column.
Y is a similar matrix, with time boundaries defined by
outcolpos. Each column of Y is a duration-weighted average of
the overlapping columns of X.
2010-04-14 Dan Ellis [email protected] based on samplemx/beatavg
-> python: TBM, 2011-11-05, TESTED
"""
noutcols = len(outcolpos)
Y = np.zeros((X.shape[0], noutcols))
# assign 'end times' to final columns
if outcolpos.max() > incolpos.max():
incolpos = np.concatenate([incolpos,[outcolpos.max()]])
X = np.concatenate([X, X[:,-1].reshape(X.shape[0],1)], axis=1)
outcolpos = np.concatenate([outcolpos, [outcolpos[-1]]])
# durations (default weights) of input columns)
incoldurs = np.concatenate([np.diff(incolpos), [1]])
for c in range(noutcols):
firstincol = np.where(incolpos <= outcolpos[c])[0][-1]
firstincolnext = np.where(incolpos < outcolpos[c+1])[0][-1]
lastincol = max(firstincol,firstincolnext)
# default weights
wts = copy.deepcopy(incoldurs[firstincol:lastincol+1])
# now fix up by partial overlap at ends
if len(wts) > 1:
wts[0] = wts[0] - (outcolpos[c] - incolpos[firstincol])
wts[-1] = wts[-1] - (incolpos[lastincol+1] - outcolpos[c+1])
wts = wts * 1. / float(sum(wts))
Y[:,c] = np.dot(X[:,firstincol:lastincol+1], wts)
# done
return Y
def METHOD_NAME(X):
"""Magnitude of a complex matrix."""
r = np.real(X)
i = np.imag(X)
return np.sqrt(r * r + i * i);
def json_to_bounds(segments_json):
"""Extracts the boundaries from a json file and puts them into
an np array."""
f = open(segments_json)
segments = json.load(f)["segments"]
bounds = []
for segment in segments:
bounds.append(segment["start"])
bounds.append(bounds[-1] + segments[-1]["duration"]) # Add last boundary
f.close()
return np.asarray(bounds)
def json_bounds_to_bounds(bounds_json):
"""Extracts the boundaries from a bounds json file and puts them into
an np array."""
f = open(bounds_json)
segments = json.load(f)["bounds"]
bounds = []
for segment in segments:
bounds.append(segment["start"])
f.close()
return np.asarray(bounds)
def json_to_labels(segments_json):
"""Extracts the labels from a json file and puts them into
an np array."""
f = open(segments_json)
segments = json.load(f)["segments"]
labels = []
str_labels = []
for segment in segments:
if not segment["label"] in str_labels:
str_labels.append(segment["label"])
labels.append(len(str_labels)-1)
else:
label_idx = np.where(np.asarray(str_labels) == segment["label"])[0][0]
labels.append(label_idx)
f.close()
return np.asarray(labels)
def json_to_beats(beats_json_file):
"""Extracts the beats from the beats_json_file and puts them into
an np array."""
f = open(beats_json_file, "r")
beats_json = json.load(f)
beats = []
for beat in beats_json["beats"]:
beats.append(beat["start"])
f.close()
return np.asarray(beats)
def analyze_results(file):
f = open(file, "r")
lines = f.readlines()
F = []
for line in lines:
F.append(float(line.split("\t")[0]))
f.close()
#print np.mean(F)
def compute_ffmc2d(X):
"""Computes the 2D-Fourier Magnitude Coefficients."""
# 2d-fft
fft2 = scipy.fftpack.fft2(X)
# Magnitude
fft2m = METHOD_NAME(fft2)
# FFTshift and flatten
fftshift = scipy.fftpack.fftshift(fft2m).flatten()
#cmap = plt.cm.get_cmap('hot')
#plt.imshow(np.log1p(scipy.fftpack.fftshift(fft2m)).T, interpolation="nearest",
# aspect="auto", cmap=cmap)
#plt.show()
# Take out redundant components
return fftshift[:fftshift.shape[0] // 2 + 1] |
test meta inheritance | import pytest
from iommi.declarative.dispatch import dispatch
from iommi.declarative.namespace import Namespace
from iommi.declarative.with_meta import with_meta
def test_empty():
@with_meta
class Test:
def __init__(self, foo):
assert foo == 'bar'
Test('bar')
def test_constructor():
@with_meta
class Test:
class Meta:
foo = 'bar'
def __init__(self, foo):
assert foo == 'bar'
# noinspection PyArgumentList
Test()
def test_override():
@with_meta
class Test:
class Meta:
foo = 'bar'
def __init__(self, foo):
assert foo == 'baz'
Test(foo='baz')
def test_inheritance():
@with_meta
class Test:
class Meta:
foo = 'bar'
@with_meta
class TestSubclass(Test):
def __init__(self, foo):
assert foo == 'bar'
# noinspection PyArgumentList
TestSubclass()
def test_inheritance_base():
@with_meta
class Test:
def __init__(self, foo):
assert 'bar' == foo
class TestSubclass(Test):
class Meta:
foo = 'bar'
# noinspection PyArgumentList
TestSubclass()
def test_inheritance_with_override():
@with_meta
class Test:
class Meta:
foo = 'bar'
@with_meta
class TestSubclass(Test):
class Meta:
foo = 'baz'
def __init__(self, foo):
assert foo == 'baz'
# noinspection PyArgumentList
TestSubclass()
def test_pos_arg_override():
@with_meta
class Test:
class Meta:
foo = 'foo'
bar = 'bar'
def __init__(self, apa, foo, gapa, **kwargs):
assert apa == 'apa'
assert foo == 'foo'
assert gapa == 'gapa'
assert 'bar' in kwargs
# noinspection PyArgumentList
Test('apa', gapa='gapa')
def test_args_get_by_pos():
@with_meta
class Test:
class Meta:
foo = 'foo'
def __init__(self, foo):
assert foo == 'foo'
# noinspection PyArgumentList
Test()
def test_args_get_by_name():
@with_meta
class Test:
class Meta:
foo = 'foo'
def __init__(self, foo=None):
assert foo == 'foo'
Test()
def test_args_override_by_pos():
@with_meta
class Test:
class Meta:
foo = 'foo'
def __init__(self, foo):
assert foo == 'bar'
Test('bar')
def test_args_override_by_name():
@with_meta
class Test:
class Meta:
foo = 'foo'
def __init__(self, foo):
self.foo = foo
t = Test(foo='bar')
assert t.foo == 'bar'
def test_too_many_args_check():
@with_meta
class Test:
class Meta:
foo = 'foo'
# noinspection PyUnusedLocal
def __init__(self, foo):
pass
with pytest.raises(TypeError) as e:
# noinspection PyArgumentList
Test('foo', 'bar')
assert 'Too many positional arguments' == str(e.value)
# noinspection PyArgumentEqualDefault
def test_add_init_kwargs():
@with_meta(add_init_kwargs=True)
class Test:
class Meta:
foo = 'bar'
_bar = 'baz'
def __init__(self, foo):
assert 'bar' == foo
# noinspection PyArgumentList
Test()
def test_not_add_init_kwargs():
@with_meta(add_init_kwargs=False)
class Test:
class Meta:
foo = 'bar'
def __init__(self):
assert self.get_meta().foo == 'bar'
Test()
def test_namespaciness():
@with_meta(add_init_kwargs=False)
class Foo:
class Meta:
foo = {'bar': 17}
class Bar(Foo):
class Meta:
foo = {'baz': 42}
assert Bar().get_meta() == Namespace(
foo__bar=17,
foo__baz=42,
)
def test_namespaciness_override():
@with_meta()
class Foo:
class Meta:
foo = {'bar': 17}
@dispatch
def __init__(self, **kwargs):
self.kwargs = kwargs
assert Foo(foo__baz=42).kwargs == Namespace(
foo__bar=17,
foo__baz=42,
)
def test_semantics_after_none_from_meta():
@with_meta
class MyForm:
class Meta:
actions = None
@dispatch
def __init__(self, **kwargs):
self.kwargs = kwargs
form = MyForm(actions__magic__display_name="A magic button")
assert form.kwargs == Namespace(actions__magic__display_name="A magic button")
def test_none_semantics_over_meta():
@with_meta
class MyForm:
class Meta:
actions__magic__display_name = "A magic button"
@dispatch
def __init__(self, **kwargs):
self.kwargs = kwargs
form = MyForm(actions=None)
assert form.kwargs == Namespace(actions=None)
def test_dispatch_semantics_after_none_from_meta():
@with_meta
class MyForm:
class Meta:
actions = None
@dispatch(
actions__magic__display_name="A magic button",
)
def __init__(self, **kwargs):
self.kwargs = kwargs
form = MyForm()
assert form.kwargs == Namespace(actions=None)
def test_dispatch_none_semantics_after_meta():
@with_meta
class MyForm:
class Meta:
actions__magic__display_name = "A magic button"
@dispatch(
actions=None,
)
def __init__(self, **kwargs):
self.kwargs = kwargs
form = MyForm()
assert form.kwargs == Namespace(actions__magic__display_name="A magic button")
def test_dispatch_none_semantics_after_superclass_meta():
@with_meta
class MyForm:
class Meta:
actions__magic__display_name = "A magic button"
def __init__(self, **kwargs):
super().__init__(**kwargs)
class SubForm(MyForm):
@dispatch(
actions=None,
)
def __init__(self, **kwargs):
self.kwargs = kwargs
form = SubForm()
assert form.kwargs == Namespace(actions=None)
def test_dispatch_semantics_after_none_superclass_meta():
@with_meta
class MyForm:
class Meta:
actions = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
class SubForm(MyForm):
@dispatch(
actions__magic__display_name="A magic button",
)
def __init__(self, **kwargs):
self.kwargs = kwargs
form = SubForm()
assert form.kwargs == Namespace(actions__magic__display_name="A magic button")
def test_meta_staticmethod():
@with_meta
class Foo:
class Meta:
@staticmethod
def foo(bar):
return bar
def __init__(self, **_):
pass
assert Foo().get_meta().foo(17) == 17
def METHOD_NAME():
class TestMetaMixin:
foo = 'bar'
@with_meta
class Test:
class Meta(TestMetaMixin):
pass
def __init__(self, foo):
assert foo == 'bar'
# noinspection PyArgumentList
Test()
def test_meta_inheritance_with_override():
class TestMetaMixin:
foo = 'bar'
@with_meta
class Test:
class Meta(TestMetaMixin):
foo = 'baz'
def __init__(self, foo):
assert foo == 'baz'
# noinspection PyArgumentList
Test() |
send payment | # Copyright 2017 Akretion
# @author Raphaël Valyi <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
import logging
from odoo import fields, models
from ..constants.br_cobranca import DICT_BRCOBRANCA_CURRENCY, get_brcobranca_bank
_logger = logging.getLogger(__name__)
class AccountMoveLine(models.Model):
_inherit = "account.move.line"
# Campo tecnico para ser usado na busca da account.move.line de
# reconciliação, no caso da Linha de Liquidação é preenchido com
# Nosso Número e nos outros casos é o campo Número do Documento
# TODO: Teria alguma forma de fazer sem esse campo? Ou outro campo
# a ser usado sem a necessidade de criar um novo
cnab_returned_ref = fields.Char(string="CNAB Returned Reference", copy=False)
# see the list of brcobranca boleto fields:
# https://github.com/kivanio/brcobranca/blob/master/lib/
# brcobranca/boleto/base.rb
# and test a here:
# https://github.com/kivanio/brcobranca/blob/master/spec/
# brcobranca/boleto/itau_spec.rb
def METHOD_NAME(self):
# super(AccountMoveLine, self).send_payment()
wrapped_boleto_list = []
for move_line in self:
bank_account_id = move_line.payment_mode_id.fixed_journal_id.bank_account_id
bank_name_brcobranca = get_brcobranca_bank(
bank_account_id, move_line.payment_mode_id.payment_method_code
)
boleto_cnab_api_data = {
"bank": bank_name_brcobranca[0],
"valor": str("%.2f" % move_line.debit),
"cedente": move_line.company_id.partner_id.legal_name,
"cedente_endereco": (move_line.company_id.partner_id.street_name or "")
+ " "
+ (move_line.company_id.partner_id.street_number or "")
+ ", "
+ (move_line.company_id.partner_id.district or "")
+ ", "
+ (move_line.company_id.partner_id.city_id.name or "")
+ " - "
+ (move_line.company_id.partner_id.state_id.code or "")
+ " "
+ ("CEP:" + move_line.company_id.partner_id.zip or ""),
"documento_cedente": move_line.company_id.cnpj_cpf,
"sacado": move_line.partner_id.legal_name,
"sacado_documento": move_line.partner_id.cnpj_cpf,
"agencia": bank_account_id.bra_number,
"conta_corrente": bank_account_id.acc_number,
"convenio": move_line.payment_mode_id.code_convetion,
"carteira": str(move_line.payment_mode_id.boleto_wallet),
"nosso_numero": int(
"".join(i for i in move_line.own_number if i.isdigit())
),
"documento_numero": move_line.document_number,
"data_vencimento": move_line.date_maturity.strftime("%Y/%m/%d"),
"data_documento": move_line.move_id.invoice_date.strftime("%Y/%m/%d"),
"especie": move_line.payment_mode_id.boleto_species,
"moeda": DICT_BRCOBRANCA_CURRENCY["R$"],
"aceite": move_line.payment_mode_id.boleto_accept,
"sacado_endereco": (move_line.partner_id.street_name or "")
+ " "
+ (move_line.partner_id.street_number or "")
+ ", "
+ (move_line.partner_id.district or "")
+ ", "
+ (move_line.partner_id.city_id.name or "")
+ " - "
+ (move_line.partner_id.state_id.code or "")
+ " "
+ ("CEP:" + move_line.partner_id.zip or ""),
"data_processamento": move_line.move_id.invoice_date.strftime(
"%Y/%m/%d"
),
"instrucao1": move_line.payment_mode_id.instructions or "",
}
# Instrução de Juros
if move_line.payment_mode_id.boleto_interest_perc > 0.0:
valor_juros = move_line.currency_id.round(
move_line.debit
* ((move_line.payment_mode_id.boleto_interest_perc / 100) / 30),
)
instrucao_juros = (
"APÓS VENCIMENTO COBRAR PERCENTUAL"
+ " DE %s %% AO MÊS ( R$ %s AO DIA )"
% (
(
"%.2f" % move_line.payment_mode_id.boleto_interest_perc
).replace(".", ","),
("%.2f" % valor_juros).replace(".", ","),
)
)
boleto_cnab_api_data.update(
{
"instrucao3": instrucao_juros,
}
)
# Instrução Multa
if move_line.payment_mode_id.boleto_fee_perc > 0.0:
valor_multa = move_line.currency_id.round(
move_line.debit * (move_line.payment_mode_id.boleto_fee_perc / 100),
)
instrucao_multa = (
"APÓS VENCIMENTO COBRAR MULTA"
+ " DE %s %% ( R$ %s )"
% (
("%.2f" % move_line.payment_mode_id.boleto_fee_perc).replace(
".", ","
),
("%.2f" % valor_multa).replace(".", ","),
)
)
boleto_cnab_api_data.update(
{
"instrucao4": instrucao_multa,
}
)
# Instrução Desconto
if move_line.boleto_discount_perc > 0.0:
valor_desconto = move_line.currency_id.round(
move_line.debit * (move_line.boleto_discount_perc / 100),
)
instrucao_desconto_vencimento = (
"CONCEDER DESCONTO DE" + " %s %% "
"ATÉ O VENCIMENTO EM %s ( R$ %s )"
% (
("%.2f" % move_line.boleto_discount_perc).replace(".", ","),
move_line.date_maturity.strftime("%d/%m/%Y"),
("%.2f" % valor_desconto).replace(".", ","),
)
)
boleto_cnab_api_data.update(
{
"instrucao5": instrucao_desconto_vencimento,
}
)
bank_account = move_line.payment_mode_id.fixed_journal_id.bank_account_id
if bank_account_id.bank_id.code_bc in ("021", "004"):
boleto_cnab_api_data.update(
{
"digito_conta_corrente": bank_account.acc_number_dig,
}
)
# Fields used in Sicredi and Sicoob Banks
if bank_account_id.bank_id.code_bc in ("748", "756"):
boleto_cnab_api_data.update(
{
"byte_idt": move_line.payment_mode_id.boleto_byte_idt,
"posto": move_line.payment_mode_id.boleto_post,
}
)
# Campo usado no Unicred
if bank_account_id.bank_id.code_bc == "136":
boleto_cnab_api_data.update(
{
"conta_corrente_dv": bank_account.acc_number_dig,
}
)
wrapped_boleto_list.append(boleto_cnab_api_data)
return wrapped_boleto_list |
create upstream mcp | import datetime
from datahub.emitter.mce_builder import (
make_data_platform_urn,
make_dataset_urn,
make_data_job_urn_with_flow,
make_data_flow_urn,
)
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.emitter.rest_emitter import DatahubRestEmitter
from datahub.metadata.com.linkedin.pegasus2avro.dataset import UpstreamLineage
from datahub.metadata.schema_classes import (
AuditStampClass,
ChangeTypeClass,
DatasetLineageTypeClass,
DatasetPropertiesClass,
DataFlowInfoClass,
DataJobInputOutputClass,
DataJobInfoClass,
EdgeClass,
MySqlDDLClass,
SchemaFieldClass,
SchemaMetadataClass,
UpstreamClass,
)
from typing import List
from tests.setup.lineage.constants import (
DATASET_ENTITY_TYPE,
DATA_JOB_ENTITY_TYPE,
DATA_FLOW_ENTITY_TYPE,
DATA_FLOW_INFO_ASPECT_NAME,
DATA_JOB_INFO_ASPECT_NAME,
DATA_JOB_INPUT_OUTPUT_ASPECT_NAME,
)
from tests.setup.lineage.helper_classes import (
Dataset,
Pipeline,
)
def create_node(dataset: Dataset) -> List[MetadataChangeProposalWrapper]:
mcps: List[MetadataChangeProposalWrapper] = []
dataset_urn = make_dataset_urn(platform=dataset.platform, name=dataset.id)
data_platform_urn = make_data_platform_urn(dataset.platform)
print(dataset)
print(dataset_urn)
dataset_properties = DatasetPropertiesClass(
name=dataset.id.split(".")[-1],
)
mcps.append(
MetadataChangeProposalWrapper(
entityType=DATASET_ENTITY_TYPE,
entityUrn=dataset_urn,
changeType=ChangeTypeClass.UPSERT,
aspectName="datasetProperties",
aspect=dataset_properties,
)
)
dataset_schema = SchemaMetadataClass(
schemaName="schema",
platform=data_platform_urn,
version=0,
hash="",
platformSchema=MySqlDDLClass(tableSchema=""),
fields=[
SchemaFieldClass(fieldPath=f.name, type=f.type, nativeDataType=str(f.type))
for f in dataset.schema_metadata
]
if dataset.schema_metadata
else [],
)
mcps.append(
MetadataChangeProposalWrapper(
entityType=DATASET_ENTITY_TYPE,
entityUrn=dataset_urn,
changeType=ChangeTypeClass.UPSERT,
aspectName="schemaMetadata",
aspect=dataset_schema,
)
)
return mcps
def create_edge(
source_urn: str,
destination_urn: str,
created_timestamp_millis: int,
updated_timestamp_millis: int,
) -> EdgeClass:
created_audit_stamp: AuditStampClass = AuditStampClass(
time=created_timestamp_millis, actor="urn:li:corpuser:unknown"
)
updated_audit_stamp: AuditStampClass = AuditStampClass(
time=updated_timestamp_millis, actor="urn:li:corpuser:unknown"
)
return EdgeClass(
sourceUrn=source_urn,
destinationUrn=destination_urn,
created=created_audit_stamp,
lastModified=updated_audit_stamp,
)
def create_nodes_and_edges(
airflow_dag: Pipeline,
) -> List[MetadataChangeProposalWrapper]:
mcps = []
data_flow_urn = make_data_flow_urn(
orchestrator=airflow_dag.platform, flow_id=airflow_dag.name
)
data_flow_info = DataFlowInfoClass(name=airflow_dag.name)
mcps.append(
MetadataChangeProposalWrapper(
entityType=DATA_FLOW_ENTITY_TYPE,
changeType=ChangeTypeClass.UPSERT,
entityUrn=data_flow_urn,
aspectName=DATA_FLOW_INFO_ASPECT_NAME,
aspect=data_flow_info,
)
)
for task in airflow_dag.tasks:
data_job_urn = make_data_job_urn_with_flow(
flow_urn=data_flow_urn, job_id=task.name
)
data_job_info = DataJobInfoClass(
name=task.name,
type="SnapshotETL",
flowUrn=data_flow_urn,
)
mcps.append(
MetadataChangeProposalWrapper(
entityType=DATA_JOB_ENTITY_TYPE,
changeType=ChangeTypeClass.UPSERT,
entityUrn=data_job_urn,
aspectName=DATA_JOB_INFO_ASPECT_NAME,
aspect=data_job_info,
)
)
data_job_io = DataJobInputOutputClass(
inputDatasets=[],
outputDatasets=[],
inputDatasetEdges=task.upstream_edges,
outputDatasetEdges=task.downstream_edges,
)
mcps.append(
MetadataChangeProposalWrapper(
entityType=DATA_JOB_ENTITY_TYPE,
changeType=ChangeTypeClass.UPSERT,
entityUrn=data_job_urn,
aspectName=DATA_JOB_INPUT_OUTPUT_ASPECT_NAME,
aspect=data_job_io,
)
)
return mcps
def create_upstream_edge(
upstream_entity_urn: str,
created_timestamp_millis: int,
updated_timestamp_millis: int,
):
created_audit_stamp: AuditStampClass = AuditStampClass(
time=created_timestamp_millis, actor="urn:li:corpuser:unknown"
)
updated_audit_stamp: AuditStampClass = AuditStampClass(
time=updated_timestamp_millis, actor="urn:li:corpuser:unknown"
)
upstream: UpstreamClass = UpstreamClass(
dataset=upstream_entity_urn,
type=DatasetLineageTypeClass.TRANSFORMED,
auditStamp=updated_audit_stamp,
created=created_audit_stamp,
)
return upstream
def METHOD_NAME(
entity_type: str,
entity_urn: str,
upstreams: List[UpstreamClass],
timestamp_millis: int,
run_id: str = "",
) -> MetadataChangeProposalWrapper:
print(f"Creating upstreamLineage aspect for {entity_urn}")
timestamp_millis: int = int(datetime.datetime.now().timestamp() * 1000)
mcp = MetadataChangeProposalWrapper(
entityType=entity_type,
entityUrn=entity_urn,
changeType=ChangeTypeClass.UPSERT,
aspectName="upstreamLineage",
aspect=UpstreamLineage(upstreams=upstreams),
systemMetadata={
"lastObserved": timestamp_millis,
"runId": run_id,
},
)
return mcp
def emit_mcps(
emitter: DatahubRestEmitter, mcps: List[MetadataChangeProposalWrapper]
) -> None:
for mcp in mcps:
emitter.emit_mcp(mcp) |
test inference no head | # Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaModelTester with Roberta->RobertaPreLayerNorm
class FlaxRobertaPreLayerNormModelTester(unittest.TestCase):
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_attention_mask=True,
use_token_type_ids=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=37,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
initializer_range=0.02,
num_choices=4,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_token_type_ids = use_token_type_ids
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.initializer_range = initializer_range
self.num_choices = num_choices
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
token_type_ids = None
if self.use_token_type_ids:
token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)
config = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
is_decoder=False,
initializer_range=self.initializer_range,
)
return config, input_ids, token_type_ids, attention_mask
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, token_type_ids, attention_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def prepare_config_and_inputs_for_decoder(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, token_type_ids, attention_mask = config_and_inputs
config.is_decoder = True
encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class FlaxRobertaPreLayerNormModelTest(FlaxModelTesterMixin, unittest.TestCase):
test_head_masking = True
all_model_classes = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def setUp(self):
self.model_tester = FlaxRobertaPreLayerNormModelTester(self)
@slow
def test_model_from_pretrained(self):
for model_class_name in self.all_model_classes:
model = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=True)
outputs = model(np.ones((1, 1)))
self.assertIsNotNone(outputs)
@require_flax
class TFRobertaPreLayerNormModelIntegrationTest(unittest.TestCase):
@slow
def test_inference_masked_lm(self):
model = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=True)
input_ids = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]], dtype=jnp.int32)
output = model(input_ids)[0]
expected_shape = [1, 11, 50265]
self.assertEqual(list(output.shape), expected_shape)
# compare the actual values for a slice.
EXPECTED_SLICE = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]], dtype=np.float32
)
self.assertTrue(np.allclose(output[:, :3, :3], EXPECTED_SLICE, atol=1e-4))
@slow
def METHOD_NAME(self):
model = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40", from_pt=True)
input_ids = np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]], dtype=jnp.int32)
output = model(input_ids)[0]
# compare the actual values for a slice.
EXPECTED_SLICE = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]], dtype=np.float32
)
self.assertTrue(np.allclose(output[:, :3, :3], EXPECTED_SLICE, atol=1e-4)) |
is url scheme registered | import os
import pathlib
import subprocess
import sys
from PySide6.QtCore import QSettings
from angrmanagement.utils.env import app_path
class AngrUrlScheme:
URL_SCHEME = "angr"
WIN_REG_PATH = "HKEY_CURRENT_USER\\Software\\Classes\\{}"
"""
Registers and handlers URL schemes to the operating system.
"""
def register_url_scheme(self):
if sys.platform.startswith("win"):
self._register_url_scheme_windows()
elif sys.platform.startswith("linux"):
self._register_url_scheme_linux()
else:
raise NotImplementedError("We currently do not support registering angr URL scheme on %s." % sys.platform)
def unregister_url_scheme(self):
if sys.platform.startswith("win"):
self._unregister_url_scheme_windows()
elif sys.platform.startswith("linux"):
self._unregister_url_scheme_linux()
else:
raise NotImplementedError("We currently do not support unregistering angr URL scheme on %s." % sys.platform)
def METHOD_NAME(self):
if sys.platform.startswith("win"):
return self._is_url_scheme_registered_windows()
elif sys.platform.startswith("linux"):
return self._is_url_scheme_registered_linux()
else:
return False, None
def is_url_scheme_supported(self):
return sys.platform.startswith("win") or sys.platform.startswith("linux")
#
# Utils
#
@staticmethod
def _angr_desktop_path():
home_dir = os.path.expanduser("~")
p = os.path.join(home_dir, ".local", "share", "applications", "angr.desktop")
return p
#
# Windows
#
def _register_url_scheme_windows(self):
app_path_ = app_path(pythonw=True)
reg_path = self.WIN_REG_PATH.format(self.URL_SCHEME)
reg = QSettings(reg_path, QSettings.NativeFormat)
reg.setValue("Default", "angr management")
reg.setValue("URL Protocol", "")
# reg.beginGroup("DefaultIcon")
# reg.setValue("Default", TODO)
# reg.endGroup()
reg.beginGroup("shell")
reg.beginGroup("open")
reg.beginGroup("command")
reg.setValue("Default", app_path_ + ' -u "%1"')
reg.endGroup()
reg.endGroup()
reg.endGroup()
def _unregister_url_scheme_windows(self):
reg_path = self.WIN_REG_PATH.format(self.URL_SCHEME)
reg = QSettings(reg_path, QSettings.NativeFormat)
reg.remove("")
def _is_url_scheme_registered_windows(self):
reg_path = self.WIN_REG_PATH.format(self.URL_SCHEME)
reg = QSettings(reg_path, QSettings.NativeFormat)
if reg.contains("Default"):
reg.beginGroup("shell")
reg.beginGroup("open")
reg.beginGroup("command")
if reg.contains("Default"):
return True, reg.value("Default")
return False, None
#
# Linux
#
def _register_url_scheme_linux(self):
cmd_0 = ["xdg-mime", "default", "angr.desktop", f"x-scheme-handler/{self.URL_SCHEME}"]
# test if xdg-mime is available
retcode = subprocess.call(["xdg-mime"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if retcode != 1:
raise FileNotFoundError("xdg-mime is not installed.")
retcode = subprocess.call(["xdg-mime", "--help"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if retcode != 0:
raise FileNotFoundError("xdg-mime is not installed.")
# extract angr.desktop
angr_desktop = """[Desktop Entry]
Comment=angr management
Exec={app_path} -u %U
Hidden=true
Name=angr management
Terminal=false
MimeType=x-scheme-handler/{url_scheme};
Type=Application
"""
angr_desktop_path = self._angr_desktop_path()
angr_desktop_base = os.path.dirname(angr_desktop_path)
pathlib.Path(angr_desktop_base).mkdir(parents=True, exist_ok=True)
with open(angr_desktop_path, "w") as f:
f.write(angr_desktop.format(app_path=app_path(), url_scheme=self.URL_SCHEME))
# register the scheme
retcode = subprocess.call(cmd_0)
if retcode != 0:
raise ValueError('Failed to setup the URL scheme. Command "%s" failed.' % " ".join(cmd_0))
def _unregister_url_scheme_linux(self):
angr_desktop_path = self._angr_desktop_path()
if os.path.isfile(angr_desktop_path):
os.unlink(angr_desktop_path)
def _is_url_scheme_registered_linux(self):
# angr.desktop
angr_desktop_path = self._angr_desktop_path()
if not os.path.isfile(angr_desktop_path):
return False, None
# is xdg-mime available
retcode = subprocess.call(["xdg-mime"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if retcode != 1:
return False, None
# xdg-mine query
proc = subprocess.Popen(
["xdg-mime", "query", "default", f"x-scheme-handler/{self.URL_SCHEME}"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, _ = proc.communicate()
if not stdout:
return False, None
# Load Exec=
with open(angr_desktop_path) as f:
data = f.read()
lines = data.split("\n")
cmdline = None
for line in lines:
if line.startswith("Exec="):
cmdline = line[5:]
break
if cmdline is None:
return False, None
return True, cmdline |
set status led | #!/usr/bin/env python
########################################################################
# DellEMC S5212F
#
# Module contains an implementation of SONiC Platform Base API and
# provides the PSUs' information which are available in the platform
#
########################################################################
try:
from sonic_platform_base.psu_base import PsuBase
from sonic_platform.ipmihelper import IpmiSensor, IpmiFru
from sonic_platform.fan import Fan
except ImportError as e:
raise ImportError(str(e) + "- required module not found")
switch_sku = {
"0K6MG9":('AC', 'exhaust'),
"0GKK8W":('AC', 'intake'),
"0VK93C":('AC', 'exhaust'),
"05JHDM":('AC', 'intake'),
"0D72R7":('AC', 'exhaust'),
"02PC9F":('AC', 'exhaust'),
"0JM5DX":('AC', 'intake'),
"0TPDP8":('AC', 'exhaust'),
"0WND1V":('AC', 'exhaust'),
"05672M":('DC', 'intake'),
"0CJV4K":('DC', 'intake'),
"0X41RN":('AC', 'exhaust'),
"0Y3N82":('AC', 'intake'),
"0W4CMG":('DC', 'exhaust'),
"04T94Y":('DC', 'intake')
}
class Psu(PsuBase):
"""DellEMC Platform-specific PSU class"""
# { PSU-ID: { Sensor-Name: Sensor-ID } }
SENSOR_MAPPING = { 1: { "State": 0x31, "Current": 0x39,
"Power": 0x37, "Voltage": 0x38 },
2: { "State": 0x32, "Current": 0x3F,
"Power": 0x3D, "Voltage": 0x3E } }
# ( PSU-ID: FRU-ID }
FRU_MAPPING = { 1: 0, 2: 0 }
def __init__(self, psu_index):
PsuBase.__init__(self)
# PSU is 1-based in DellEMC platforms
self.index = psu_index + 1
self.state_sensor = IpmiSensor(self.SENSOR_MAPPING[self.index]["State"],
is_discrete=True)
self.voltage_sensor = IpmiSensor(self.SENSOR_MAPPING[self.index]["Voltage"])
self.current_sensor = IpmiSensor(self.SENSOR_MAPPING[self.index]["Current"])
self.power_sensor = IpmiSensor(self.SENSOR_MAPPING[self.index]["Power"])
self.fru = IpmiFru(self.FRU_MAPPING[self.index])
self._fan_list.append(Fan(fan_index=self.index, psu_fan=True,
dependency=self))
def get_name(self):
"""
Retrieves the name of the device
Returns:
string: The name of the device
"""
return "PSU{}".format(self.index)
def get_presence(self):
"""
Retrieves the presence of the Power Supply Unit (PSU)
Returns:
bool: True if PSU is present, False if not
"""
presence = False
is_valid, state = self.state_sensor.get_reading()
if is_valid:
if (state & 0b1) == 1:
presence = True
return presence
def get_temperature(self):
"""
Retrieves current temperature reading from thermal
Returns:
A float number of current temperature in Celcius up to
nearest thousandth of one degree celcius, e.g. 30.125
"""
return 0.0
def get_model(self):
"""
Retrieves the part number of the PSU
Returns:
string: Part number of PSU
"""
return self.fru.get_board_part_number()
def get_serial(self):
"""
Retrieves the serial number of the PSU
Returns:
string: Serial number of PSU
"""
return self.fru.get_board_serial()
def get_revision(self):
"""
Retrives thehardware revision of the device
Returns:
String: revision value of device
"""
serial = self.fru.get_board_serial()
if serial != "NA" and len(serial) == 23:
return serial[-3:]
else:
return "NA"
def is_replaceable(self):
"""
Indicate whether this PSU is replaceable.
Returns:
bool: True if it is replaceable.
"""
return False
def get_status(self):
"""
Retrieves the operational status of the PSU
Returns:
bool: True if PSU is operating properly, False if not
"""
status = False
is_valid, state = self.state_sensor.get_reading()
if is_valid:
if (state == 0x01):
status = True
return status
def get_voltage(self):
"""
Retrieves current PSU voltage output
Returns:
A float number, the output voltage in volts,
e.g. 12.1
"""
return 0.0
def get_current(self):
"""
Retrieves present electric current supplied by PSU
Returns:
A float number, electric current in amperes,
e.g. 15.4
"""
return 0.0
def get_power(self):
"""
Retrieves current energy supplied by PSU
Returns:
A float number, the power in watts,
e.g. 302.6
"""
return 0.0
def get_input_voltage(self):
"""
Retrieves current PSU voltage input
Returns:
A float number, the input voltage in volts,
e.g. 12.1
"""
return 0.0
def get_input_current(self):
"""
Retrieves present electric current supplied to PSU
Returns:
A float number, electric current in amperes,
e.g. 15.4
"""
return 0.0
def get_input_power(self):
"""
Retrieves current energy supplied to PSU
Returns:
A float number, the power in watts,
e.g. 302.6
"""
return 0.0
def get_powergood_status(self):
"""
Retrieves the powergood status of PSU
Returns:
A boolean, True if PSU has stablized its output voltages and
passed all its internal self-tests, False if not.
"""
status = False
is_valid, state = self.state_sensor.get_reading()
if is_valid:
if (state == 0x01):
status = True
return status
def get_mfr_id(self):
"""
Retrives the Manufacturer Id of PSU
Returns:
A string, the manunfacturer id.
"""
return self.fru.get_board_mfr_id()
def get_type(self):
"""
Retrives the Power Type of PSU
Returns :
A string, PSU power type
"""
board_product = self.fru.get_board_product()
if board_product is not None :
info = board_product.split(',')
if 'AC' in info : return 'AC'
if 'DC' in info : return 'DC'
return None
def get_position_in_parent(self):
"""
Retrieves 1-based relative physical position in parent device.
Returns:
integer: The 1-based relative physical position in parent
device or -1 if cannot determine the position
"""
return self.index
def get_voltage_low_threshold(self):
"""
Retrieves the low threshold PSU voltage output
Returns:
A float number, the low threshold output voltage in volts,
e.g. 12.1
"""
return 0.0
def get_voltage_high_threshold(self):
"""
Returns PSU high threshold in Volts
"""
return 0.0
def get_maximum_supplied_power(self):
"""
Retrieves the maximum supplied power by PSU
Returns:
A float number, the maximum power output in Watts.
e.g. 1200.1
"""
return float(750)
def METHOD_NAME(self, color):
"""
Sets the state of the PSU status LED
Args:
color: A string representing the color with which to set the PSU status LED
Note: Only support green and off
Returns:
bool: True if status LED state is set successfully, False if not
"""
# Hardware not supported
return False |
plot3d | #Copyright (c) 2008 Erik Tollerud ([email protected])
import numpy as np
class Pca:
"""
A basic class for Principal Component Analysis (PCA).
p is the number of dimensions, while N is the number of data points
"""
_colors=('r','g','b','c','y','m','k') #defaults
def __calc(self):
A = self.A
M=A-np.mean(A,axis=0)
N=M/np.std(M,axis=0)
self.M = M
self.N = N
self._eig = None
def __init__(self,data,names=None):
"""
p X N matrix input
"""
A = np.array(data).T
n,p = A.shape
self.n,self.p = n,p
if p > n:
from warnings import warn
warn('p > n - intentional?', RuntimeWarning)
self.A = A
self._origA=A.copy()
self.__calc()
self._colors= np.tile(self._colors,int((p-1)/len(self._colors))+1)[:p]
if names is not None and len(names) != p:
raise ValueError('names must match data dimension')
self.names = None if names is None else tuple([str(x) for x in names])
def getCovarianceMatrix(self):
"""
returns the covariance matrix for the dataset
"""
return np.cov(self.N.T)
def getEigensystem(self):
"""
returns a tuple of (eigenvalues,eigenvectors) for the data set.
"""
if self._eig is None:
res = np.linalg.eig(self.getCovarianceMatrix())
sorti=np.argsort(res[0])[::-1]
res=(res[0][sorti],res[1][:,sorti])
self._eig=res
return self._eig
def getEigenvalues(self):
return self.getEigensystem()[0]
def getEigenvectors(self):
return self.getEigensystem()[1]
def getEnergies(self):
"""
"energies" are just normalized eigenvectors
"""
v=self.getEigenvalues()
return v/np.sum(v)
def plot2d(self,ix=0,iy=1,clf=True):
"""
Generates a 2-dimensional plot of the data set and principle components
using matplotlib.
ix specifies which p-dimension to put on the x-axis of the plot
and iy specifies which to put on the y-axis (0-indexed)
"""
import matplotlib.pyplot as plt
x,y=self.N[:,ix],self.N[:,iy]
if clf:
plt.clf()
plt.scatter(x,y)
vals,evs=self.getEigensystem()
#evx,evy=evs[:,ix],evs[:,iy]
xl,xu=plt.xlim()
yl,yu=plt.ylim()
dx,dy=(xu-xl),(yu-yl)
for val,vec,c in zip(vals,evs.T,self._colors):
plt.arrow(0,0,val*vec[ix],val*vec[iy],head_width=0.05*(dx*dy/4)**0.5,fc=c,ec=c)
#plt.arrow(0,0,vals[ix]*evs[ix,ix],vals[ix]*evs[iy,ix],head_width=0.05*(dx*dy/4)**0.5,fc='g',ec='g')
#plt.arrow(0,0,vals[iy]*evs[ix,iy],vals[iy]*evs[iy,iy],head_width=0.05*(dx*dy/4)**0.5,fc='r',ec='r')
if self.names is not None:
plt.xlabel('$'+self.names[ix]+'/\\sigma$')
plt.ylabel('$'+self.names[iy]+'/\\sigma$')
def METHOD_NAME(self,ix=0,iy=1,iz=2,clf=True):
"""
Generates a 3-dimensional plot of the data set and principle components
using mayavi.
ix, iy, and iz specify which of the input p-dimensions to place on each of
the x,y,z axes, respectively (0-indexed).
"""
import enthought.mayavi.mlab as M
if clf:
M.clf()
z3=np.zeros(3)
v=(self.getEigenvectors()*self.getEigenvalues())
M.quiver3d(z3,z3,z3,v[ix],v[iy],v[iz],scale_factor=5)
M.points3d(self.N[:,ix],self.N[:,iy],self.N[:,iz],scale_factor=0.3)
if self.names:
M.axes(xlabel=self.names[ix]+'/sigma',ylabel=self.names[iy]+'/sigma',zlabel=self.names[iz]+'/sigma')
else:
M.axes()
def sigclip(self,sigs):
"""
clips out all data points that are more than a certain number
of standard deviations from the mean.
sigs can be either a single value or a length-p sequence that
specifies the number of standard deviations along each of the
p dimensions.
"""
if np.isscalar(sigs):
sigs=sigs*np.ones(self.N.shape[1])
sigs = sigs*np.std(self.N,axis=1)
n = self.N.shape[0]
m = np.all(np.abs(self.N) < sigs,axis=1)
self.A=self.A[m]
self.__calc()
return n-sum(m)
def reset(self):
self.A = self._origA.copy()
self.__calc()
def project(self,vals=None,enthresh=None,nPCs=None,cumen=None):
"""
projects the normalized values onto the components
enthresh, nPCs, and cumen determine how many PCs to use
if vals is None, the normalized data vectors are the values to project.
Otherwise, it should be convertable to a p x N array
returns n,p(>threshold) dimension array
"""
nonnones = sum([e is not None for e in (enthresh, nPCs, cumen)])
if nonnones == 0:
m = slice(None)
elif nonnones > 1:
raise ValueError("cannot specify more than one threshold")
else:
if enthresh is not None:
m = self.energies() > enthresh
elif nPCs is not None:
m = slice(None,nPCs)
elif cumen is not None:
m = np.cumsum(self.energies()) < cumen
else:
raise RuntimeError('Should be unreachable')
if vals is None:
vals = self.N.T
else:
vals = np.array(vals,copy=False)
if self.N.T.shape[0] != vals.shape[0]:
raise ValueError("shape for vals does not match")
proj = np.matrix(self.getEigenvectors()).T*vals
return proj[m].T
def deproject(self,A,normed=True):
"""
input is an n X q array, where q <= p
output is p X n
"""
A=np.atleast_2d(A)
n,q = A.shape
p = self.A.shape[1]
if q > p :
raise ValueError("q > p")
evinv=np.linalg.inv(np.matrix(self.getEigenvectors()).T)
zs = np.zeros((n,p))
zs[:,:q]=A
proj = evinv*zs.T
if normed:
return np.array(proj.T).T
else:
mns=np.mean(self.A,axis=0)
sds=np.std(self.M,axis=0)
return (np.array(proj.T)*sds+mns).T
def subtractPC(self,pc,vals=None):
"""
pc can be a scalar or any sequence of pc indecies
if vals is None, the source data is self.A, else whatever is in vals
(which must be p x m)
"""
if vals is None:
vals = self.A
else:
vals = vals.T
if vals.shape[1]!= self.A.shape[1]:
raise ValueError("vals do not have the correct number of components")
pcs=self.project()
zpcs=np.zeros_like(pcs)
zpcs[:,pc]=pcs[:,pc]
upc=self.deproject(zpcs,False)
A = vals.T-upc
B = A.T*np.std(self.M,axis=0)
return B+np.mean(self.A,axis=0) |
pick hue | # Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved.
import colorsys
import random
__all__ = ['RandomColor', 'rand_color']
COLORMAP = {
'blue': {
'hue_range': [179, 257],
'lower_bounds': [[20, 100], [30, 86], [40, 80], [50, 74], [60, 60],
[70, 52], [80, 44], [90, 39], [100, 35]]
},
'green': {
'hue_range': [63, 178],
'lower_bounds': [[30, 100], [40, 90], [50, 85], [60, 81], [70, 74],
[80, 64], [90, 50], [100, 40]]
},
'monochrome': {
'hue_range': [0, 0],
'lower_bounds': [[0, 0], [100, 0]]
},
'orange': {
'hue_range': [19, 46],
'lower_bounds': [[20, 100], [30, 93], [40, 88], [50, 86], [60, 85],
[70, 70], [100, 70]]
},
'pink': {
'hue_range': [283, 334],
'lower_bounds': [[20, 100], [30, 90], [40, 86], [60, 84], [80, 80],
[90, 75], [100, 73]]
},
'purple': {
'hue_range': [258, 282],
'lower_bounds': [[20, 100], [30, 87], [40, 79], [50, 70], [60, 65],
[70, 59], [80, 52], [90, 45], [100, 42]]
},
'red': {
'hue_range': [-26, 18],
'lower_bounds': [[20, 100], [30, 92], [40, 89], [50, 85], [60, 78],
[70, 70], [80, 60], [90, 55], [100, 50]]
},
'yellow': {
'hue_range': [47, 62],
'lower_bounds': [[25, 100], [40, 94], [50, 89], [60, 86], [70, 84],
[80, 82], [90, 80], [100, 75]]
}
}
class RandomColor(object):
def __init__(self, seed=None):
self.colormap = COLORMAP
self.random = random.Random(seed)
for color_name, color_attrs in self.colormap.items():
lower_bounds = color_attrs['lower_bounds']
s_min = lower_bounds[0][0]
s_max = lower_bounds[len(lower_bounds) - 1][0]
b_min = lower_bounds[len(lower_bounds) - 1][1]
b_max = lower_bounds[0][1]
self.colormap[color_name]['saturation_range'] = [s_min, s_max]
self.colormap[color_name]['brightness_range'] = [b_min, b_max]
def generate(self, hue=None, luminosity=None, count=1, format_='hex'):
colors = []
for _ in range(count):
# First we pick a hue (H)
H = self.METHOD_NAME(hue)
# Then use H to determine saturation (S)
S = self.pick_saturation(H, hue, luminosity)
# Then use S and H to determine brightness (B).
B = self.pick_brightness(H, S, luminosity)
# Then we return the HSB color in the desired format
colors.append(self.set_format([H, S, B], format_))
return colors
def METHOD_NAME(self, hue):
hue_range = self.get_hue_range(hue)
hue = self.random_within(hue_range)
# Instead of storing red as two seperate ranges,
# we group them, using negative numbers
if (hue < 0):
hue += 360
return hue
def pick_saturation(self, hue, hue_name, luminosity):
if luminosity == 'random':
return self.random_within([0, 100])
if hue_name == 'monochrome':
return 0
saturation_range = self.get_saturation_range(hue)
s_min = saturation_range[0]
s_max = saturation_range[1]
if luminosity == 'bright':
s_min = 55
elif luminosity == 'dark':
s_min = s_max - 10
elif luminosity == 'light':
s_max = 55
return self.random_within([s_min, s_max])
def pick_brightness(self, H, S, luminosity):
b_min = self.get_minimum_brightness(H, S)
b_max = 100
if luminosity == 'dark':
b_max = b_min + 20
elif luminosity == 'light':
b_min = (b_max + b_min) / 2
elif luminosity == 'random':
b_min = 0
b_max = 100
return self.random_within([b_min, b_max])
def set_format(self, hsv, format_):
if 'hsv' in format_:
color = hsv
elif 'rgb' in format_:
color = self.hsv_to_rgb(hsv)
elif 'hex' in format_:
r, g, b = self.hsv_to_rgb(hsv)
return '#%02x%02x%02x' % (r, g, b)
else:
return 'unrecognized format'
if 'Array' in format_ or format_ == 'hex':
return color
else:
prefix = format_[:3]
color_values = [str(x) for x in color]
return '%s(%s)' % (prefix, ', '.join(color_values))
def get_minimum_brightness(self, H, S):
lower_bounds = self.get_color_info(H)['lower_bounds']
for i in range(len(lower_bounds) - 1):
s1 = lower_bounds[i][0]
v1 = lower_bounds[i][1]
s2 = lower_bounds[i + 1][0]
v2 = lower_bounds[i + 1][1]
if s1 <= S <= s2:
m = (v2 - v1) / (s2 - s1)
b = v1 - m * s1
return m * S + b
return 0
def get_hue_range(self, color_input):
if color_input and color_input.isdigit():
number = int(color_input)
if 0 < number < 360:
return [number, number]
elif color_input and color_input in self.colormap:
color = self.colormap[color_input]
if 'hue_range' in color:
return color['hue_range']
else:
return [0, 360]
def get_saturation_range(self, hue):
return self.get_color_info(hue)['saturation_range']
def get_color_info(self, hue):
# Maps red colors to make picking hue easier
if 334 <= hue <= 360:
hue -= 360
for color_name, color in self.colormap.items():
if color['hue_range'] and color['hue_range'][0] <= hue <= color[
'hue_range'][1]:
return self.colormap[color_name]
# this should probably raise an exception
return 'Color not found'
def random_within(self, r):
return self.random.randint(int(r[0]), int(r[1]))
@classmethod
def hsv_to_rgb(cls, hsv):
h, s, v = hsv
h = 1 if h == 0 else h
h = 359 if h == 360 else h
h = float(h) / 360
s = float(s) / 100
v = float(v) / 100
rgb = colorsys.hsv_to_rgb(h, s, v)
return [int(c * 255) for c in rgb]
def rand_color():
generator = RandomColor()
hue = random.choice(list(COLORMAP.keys()))
color = generator.generate(hue=hue, count=1, format_='rgb')[0]
color = color[color.find('(') + 1:color.find(')')]
color = tuple([int(u) for u in color.split(',')])
return color |
internal fit | """
This file is part of the PSL software.
Copyright 2011-2015 University of Maryland
Copyright 2013-2023 The Regents of the University of California
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import abc
import mmap
import os
import struct
import numpy
FLOAT_SIZE_BYTES = 4
INT_SIZE_BYTES = 4
class DeepModel(abc.ABC):
def __init__(self):
self._shared_file = None
self._shared_buffer = None
self._value_count = None
self._data = None
"""
Higher-level methods that are passed nicely-formatted data for implementing classes to extend.
"""
def internal_init_model(self, application, options = {}):
raise NotImplementedError("internal_init_model")
def METHOD_NAME(self, data, gradients, options = {}):
raise NotImplementedError("internal_fit")
def internal_predict(self, data, options = {}):
raise NotImplementedError("internal_predict")
def internal_eval(self, data, options = {}):
raise NotImplementedError("internal_eval")
def internal_save(self, options = {}):
raise NotImplementedError("internal_save")
"""
Low-level methods that take care of moving around data.
"""
def init_weight(self, shared_memory_path, application, options = {}):
raise NotImplementedError("init_weight")
def fit_weight(self, options = {}):
raise NotImplementedError("fit_weight")
def predict_weight(self, options = {}):
raise NotImplementedError("predict_weight")
def predict_weight_learn(self, options = {}):
raise NotImplementedError("predict_weight")
def eval_weight(self, options = {}):
raise NotImplementedError("eval_weight")
def init_predicate(self, shared_memory_path, application, options = {}):
self._shared_file = open(shared_memory_path, 'rb+')
self._shared_buffer = mmap.mmap(self._shared_file.fileno(), 0)
self._value_count = int(options['class-size'])
self._data = []
entity_argument_length = len(options['entity-argument-indexes'].split(","))
with open(os.path.join(options['relative-dir'], options['entity-data-map-path']), 'r') as file:
for row in file:
parts = row.split("\t")
data = parts[entity_argument_length:]
self._data.append([float(value) for value in data])
self._data = numpy.array(self._data)
return self.internal_init_model(application, options = options)
def fit_predicate(self, options = {}):
self._shared_buffer.seek(0)
count = self._read_int()
entity_indexes = self._read_values('>i4', count)
gradients = self._read_values('>f4', count * self._value_count).reshape((count, self._value_count))
data = numpy.array([self._data[index] for index in entity_indexes])
return self.METHOD_NAME(data, gradients, options = options)
def predict_predicate(self, options = {}):
self._predict_predicate(False, options = options)
def predict_predicate_learn(self, options = {}):
self._predict_predicate(True, options = options)
def _predict_predicate(self, learn, options = {}):
options['learn'] = learn
self._shared_buffer.seek(0)
count = self._read_int()
entity_indexes = self._read_values('>i4', count)
data = numpy.array([self._data[index] for index in entity_indexes])
predictions, response = self.internal_predict(data, options=options)
self._shared_buffer.seek(0)
self._write_int(int(options['class-size']) * len(predictions))
predictions = numpy.array(predictions, dtype = '>f4', copy = False)
self._shared_buffer.write(predictions.tobytes(order = 'C'))
return response
def eval_predicate(self, options = {}):
self._shared_buffer.seek(0)
count = self._read_int()
entity_indexes = self._read_values('>i4', count)
data = numpy.array([self._data[index] for index in entity_indexes])
return self.internal_eval(data, options=options)
def save(self, options = {}):
return self.internal_save(options=options)
"""
Helper methods.
"""
def close(self):
if self._shared_buffer is not None:
self._shared_buffer.close()
self._shared_buffer = None
if self._shared_file is not None:
self._shared_file.close()
self._shared_file = None
self._value_count = None
self._data = None
def _read_values(self, value_type, count, byte_size = INT_SIZE_BYTES):
values_buffer = self._shared_buffer.read(count * byte_size)
values_buffer = numpy.frombuffer(values_buffer, dtype = value_type, count = count)
return values_buffer
def _read_int(self):
return struct.unpack('>i', self._shared_buffer.read(INT_SIZE_BYTES))[0]
def _write_int(self, value):
self._shared_buffer.write(struct.pack('>i', value) |
user delete | # coding=utf-8
import logging
from .rest_client import AtlassianRestAPI
log = logging.getLogger(__name__)
class Crowd(AtlassianRestAPI):
"""Crowd API wrapper.
Important to note that you will have to use an application credentials,
not user credentials, in order to access Crowd APIs"""
def __init__(
self,
url,
username,
password,
timeout=60,
api_root="rest",
api_version="latest",
):
super(Crowd, self).__init__(url, username, password, timeout, api_root, api_version)
def _crowd_api_url(self, api, resource):
return "/{api_root}/{api}/{version}/{resource}".format(
api_root=self.api_root,
api=api,
version=self.api_version,
resource=resource,
)
def _user_change_status(self, username, active):
"""
Change user status.
:param username: str - username
:param active: bool - True/False
:return:
"""
user = self.user(username)
user_object = {
"name": username,
"active": active,
"display-name": user.get("display-name"),
"first-name": user.get("first-name"),
"last-name": user.get("last-name"),
"email": user.get("email"),
}
params = {"username": username}
return self.put(
self._crowd_api_url("usermanagement", "user"),
params=params,
data=user_object,
)
def user(self, username):
"""
Get user information
:param username:
:return:
"""
params = {"username": username}
return self.get(self._crowd_api_url("usermanagement", "user"), params=params)
def user_activate(self, username):
"""
Activate user
:param username: str - username
"""
return self._user_change_status(username, True)
def user_create(
self,
username,
active,
first_name,
last_name,
display_name,
email,
password,
):
"""
Create new user method
:param active: bool:
:param username: string: username
:param active: bool:
:param first_name: string:
:param last_name: string:
:param display_name: string:
:param email: string:
:param password: string:
:return:
"""
user_object = {
"name": username,
"password": {"value": password},
"active": active,
"first-name": first_name,
"last-name": last_name,
"display-name": display_name,
"email": email,
}
return self.post(self._crowd_api_url("usermanagement", "user"), data=user_object)
def user_deactivate(self, username):
"""
Deactivate user
:return:
"""
return self._user_change_status(username, False)
def METHOD_NAME(self, username):
"""
Delete user
:param username: str - username
:return:
"""
params = {"username": username}
return self.delete(self._crowd_api_url("usermanagement", "user"), params=params)
def group_add_user(self, username, groupname):
"""
Add user to group
:return:
"""
data = {"name": groupname}
params = {"username": username}
return self.post(
self._crowd_api_url("usermanagement", "user/group/direct"),
params=params,
json=data,
)
def group_nested_members(self, group):
"""
Get nested members of group
:param group:
:return:
"""
params = {"groupname": group}
return self.get(self._crowd_api_url("group", "nested"), params=params)
def health_check(self):
"""
Get health status
https://confluence.atlassian.com/jirakb/how-to-retrieve-health-check-results-using-rest-api-867195158.html
:return:
"""
# check as Troubleshooting & Support Tools Plugin
response = self.get("rest/troubleshooting/1.0/check/")
if not response:
# check as support tools
response = self.get("rest/supportHealthCheck/1.0/check/")
return response
def get_plugins_info(self):
"""
Provide plugins info
:return a json of installed plugins
"""
url = "rest/plugins/1.0/"
return self.get(url, headers=self.no_check_headers, trailing=True)
def get_plugin_info(self, plugin_key):
"""
Provide plugin info
:return a json of installed plugins
"""
url = "rest/plugins/1.0/{plugin_key}-key".format(plugin_key=plugin_key)
return self.get(url, headers=self.no_check_headers, trailing=True)
def get_plugin_license_info(self, plugin_key):
"""
Provide plugin license info
:return a json specific License query
"""
url = "rest/plugins/1.0/{plugin_key}-key/license".format(plugin_key=plugin_key)
return self.get(url, headers=self.no_check_headers, trailing=True)
def upload_plugin(self, plugin_path):
"""
Provide plugin path for upload into Jira e.g. useful for auto deploy
:param plugin_path:
:return:
"""
files = {"plugin": open(plugin_path, "rb")}
upm_token = self.request(
method="GET",
path="rest/plugins/1.0/",
headers=self.no_check_headers,
trailing=True,
).headers["upm-token"]
url = "rest/plugins/1.0/?token={upm_token}".format(upm_token=upm_token)
return self.post(url, files=files, headers=self.no_check_headers)
def delete_plugin(self, plugin_key):
"""
Delete plugin
:param plugin_key:
:return:
"""
url = "rest/plugins/1.0/{}-key".format(plugin_key)
return self.delete(url)
def check_plugin_manager_status(self):
url = "rest/plugins/latest/safe-mode"
return self.request(method="GET", path=url, headers=self.safe_mode_headers)
def update_plugin_license(self, plugin_key, raw_license):
"""
Update license for plugin
:param plugin_key:
:param raw_license:
:return:
"""
app_headers = {
"X-Atlassian-Token": "nocheck",
"Content-Type": "application/vnd.atl.plugins+json",
}
url = "/plugins/1.0/{plugin_key}/license".format(plugin_key=plugin_key)
data = {"rawLicense": raw_license}
return self.put(url, data=data, headers=app_headers) |
is activation checkpoint available | import copy
import re
from typing import Callable
import pytest
import torch
import torchvision.models as tm
from torch.fx import GraphModule
import colossalai
from colossalai.core import global_context as gpc
from colossalai.fx import ColoTracer
from colossalai.fx._compatibility import is_compatible_with_meta
from colossalai.fx.graph_module import ColoGraphModule
# from colossalai.fx.passes.algorithms import chen_greedy, solver_rotor
from colossalai.fx.passes.meta_info_prop import MetaInfoProp
from colossalai.testing import rerun_if_address_is_in_use, spawn
if is_compatible_with_meta():
from colossalai.fx.profiler.tensor import MetaTensor
try:
from colossalai.fx.codegen import ActivationCheckpointCodeGen
with_codegen = True
except:
# fall back to older pytorch version
from colossalai.fx.codegen import python_code_with_activation_checkpoint
with_codegen = False
# SOLVERS = [chen_greedy, solver_rotor]
SOLVERS = []
def METHOD_NAME(gm: GraphModule):
for n in gm.graph.nodes:
if hasattr(n, 'activation_checkpoint') and getattr(n, 'activation_checkpoint') is not None:
return True
def _is_all_gradient_close(m: torch.nn.Module, gm: GraphModule):
for m_p, gm_p in zip(m.parameters(), gm.parameters()):
if not torch.allclose(m_p.grad, gm_p.grad):
return False
return True
def _is_graph_linearized(gm: GraphModule):
code = gm.code
# find patterns like r' return output_1, output_2', which is not expected on a linearized graph
pattern = re.compile(r' return [a-zA-Z0-9_]+(, [a-zA-Z0-9_]+)+')
if pattern.findall(code):
return False
else:
return True
def check_backward_consistency(m: torch.nn.Module, gm: GraphModule, solver: Callable[[GraphModule], GraphModule],
model_cls: Callable[[], torch.nn.Module]):
criterion = torch.nn.MSELoss()
m.cuda()
data = torch.rand(2, 3, 32, 32).cuda()
label = torch.rand(2, 5).cuda()
loss = criterion(m(data), label)
loss.backward()
loss = criterion(gm(data), label)
loss.backward()
assert _is_all_gradient_close(m, gm), f'Solver {solver} did not work correctly in backward pass on {model_cls}'
def _run_ckpt_solver(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
MODEL_LIST = [tm.densenet121]
torch.backends.cudnn.deterministic = True
tracer = ColoTracer(trace_act_ckpt=False)
data = torch.rand(8, 3, 224, 224, device='meta')
for solver in SOLVERS:
for model_cls in MODEL_LIST:
m = model_cls(num_classes=5)
graph = tracer.trace(root=m)
gm = ColoGraphModule(copy.deepcopy(m), graph, m.__class__.__name__)
MetaInfoProp(gm.cuda()).run(MetaTensor(data).cuda())
codegen = ActivationCheckpointCodeGen()
gm.graph.set_codegen(codegen)
if solver == solver_rotor:
gm = solver(gm, data, mem_limit=500 * 1024 * 1024, mem_slots=500)
else:
gm = solver(gm)
assert _is_graph_linearized(gm), f"Solver {solver} did not solve {model_cls} in a linearized manner."
assert METHOD_NAME(
gm), f"Solver {solver} did not annotate {model_cls} with any activation checkpoints"
check_backward_consistency(m, gm, solver, model_cls)
gpc.destroy()
@pytest.mark.skip("TODO(super-dainiu): refactor all tests.")
@pytest.mark.skipif(not with_codegen, reason='torch version is lower than 1.12.0')
@rerun_if_address_is_in_use()
def test_ckpt_solver():
spawn(_run_ckpt_solver, 1)
def _run_ckpt_solver_torch11(rank, world_size, port):
colossalai.launch(config={}, rank=rank, world_size=world_size, host='localhost', port=port, backend='nccl')
MODEL_LIST = [tm.densenet121]
torch.backends.cudnn.deterministic = True
tracer = ColoTracer(trace_act_ckpt=False)
data = torch.rand(8, 3, 32, 32, device='meta')
for solver in SOLVERS:
for model_cls in MODEL_LIST:
m = model_cls(num_classes=5)
graph = tracer.trace(root=m)
gm = ColoGraphModule(copy.deepcopy(m), graph, m.__class__.__name__)
MetaInfoProp(gm).run(data)
gm.graph._python_code = python_code_with_activation_checkpoint.__get__(graph)
if solver == solver_rotor:
gm = solver(gm, data, mem_limit=500 * 1024 * 1024, mem_slots=500, force_python=True)
else:
gm = solver(gm)
assert _is_graph_linearized(gm), f"Solver {solver} did not solve {model_cls} in a linearized manner."
assert METHOD_NAME(
gm), f"Solver {solver} did not annotate {model_cls} with any activation checkpoints"
check_backward_consistency(m, gm, solver, model_cls)
gpc.destroy()
@pytest.mark.skipif(with_codegen, reason='torch version is equal to or higher than 1.12.0')
@pytest.mark.skip(reason="currently torch11 ColoGraphModule is not done")
@rerun_if_address_is_in_use()
def test_ckpt_solver_torch11():
spawn(_run_ckpt_solver_torch11, 1)
if __name__ == '__main__':
_run_ckpt_solver(rank=0)
test_ckpt_solver()
test_ckpt_solver_torch11() |
test function secrets in variables | from unittest import mock
from moto.core import DEFAULT_ACCOUNT_ID
from prowler.providers.aws.services.awslambda.awslambda_service import Function
AWS_REGION = "us-east-1"
class Test_awslambda_function_no_secrets_in_variables:
def test_no_functions(self):
lambda_client = mock.MagicMock
lambda_client.functions = {}
with mock.patch(
"prowler.providers.aws.services.awslambda.awslambda_service.Lambda",
new=lambda_client,
):
# Test Check
from prowler.providers.aws.services.awslambda.awslambda_function_no_secrets_in_variables.awslambda_function_no_secrets_in_variables import (
awslambda_function_no_secrets_in_variables,
)
check = awslambda_function_no_secrets_in_variables()
result = check.execute()
assert len(result) == 0
def test_function_no_variables(self):
lambda_client = mock.MagicMock
function_name = "test-lambda"
function_runtime = "nodejs4.3"
function_arn = (
f"arn:aws:lambda:{AWS_REGION}:{DEFAULT_ACCOUNT_ID}:function/{function_name}"
)
lambda_client.functions = {
"function_name": Function(
name=function_name,
arn=function_arn,
region=AWS_REGION,
runtime=function_runtime,
)
}
with mock.patch(
"prowler.providers.aws.services.awslambda.awslambda_service.Lambda",
new=lambda_client,
):
# Test Check
from prowler.providers.aws.services.awslambda.awslambda_function_no_secrets_in_variables.awslambda_function_no_secrets_in_variables import (
awslambda_function_no_secrets_in_variables,
)
check = awslambda_function_no_secrets_in_variables()
result = check.execute()
assert len(result) == 1
assert result[0].region == AWS_REGION
assert result[0].resource_id == function_name
assert result[0].resource_arn == function_arn
assert result[0].status == "PASS"
assert (
result[0].status_extended
== f"No secrets found in Lambda function {function_name} variables."
)
assert result[0].resource_tags == []
def METHOD_NAME(self):
lambda_client = mock.MagicMock
function_name = "test-lambda"
function_runtime = "nodejs4.3"
function_arn = (
f"arn:aws:lambda:{AWS_REGION}:{DEFAULT_ACCOUNT_ID}:function/{function_name}"
)
lambda_client.functions = {
"function_name": Function(
name=function_name,
arn=function_arn,
region=AWS_REGION,
runtime=function_runtime,
environment={"db_password": "test-password"},
)
}
with mock.patch(
"prowler.providers.aws.services.awslambda.awslambda_service.Lambda",
new=lambda_client,
):
# Test Check
from prowler.providers.aws.services.awslambda.awslambda_function_no_secrets_in_variables.awslambda_function_no_secrets_in_variables import (
awslambda_function_no_secrets_in_variables,
)
check = awslambda_function_no_secrets_in_variables()
result = check.execute()
assert len(result) == 1
assert result[0].region == AWS_REGION
assert result[0].resource_id == function_name
assert result[0].resource_arn == function_arn
assert result[0].status == "FAIL"
assert (
result[0].status_extended
== f"Potential secret found in Lambda function {function_name} variables -> Secret Keyword in variable db_password."
)
assert result[0].resource_tags == []
def test_function_no_secrets_in_variables(self):
lambda_client = mock.MagicMock
function_name = "test-lambda"
function_runtime = "nodejs4.3"
function_arn = (
f"arn:aws:lambda:{AWS_REGION}:{DEFAULT_ACCOUNT_ID}:function/{function_name}"
)
lambda_client.functions = {
"function_name": Function(
name=function_name,
arn=function_arn,
region=AWS_REGION,
runtime=function_runtime,
environment={"db_username": "test-user"},
)
}
with mock.patch(
"prowler.providers.aws.services.awslambda.awslambda_service.Lambda",
new=lambda_client,
):
# Test Check
from prowler.providers.aws.services.awslambda.awslambda_function_no_secrets_in_variables.awslambda_function_no_secrets_in_variables import (
awslambda_function_no_secrets_in_variables,
)
check = awslambda_function_no_secrets_in_variables()
result = check.execute()
assert len(result) == 1
assert result[0].region == AWS_REGION
assert result[0].resource_id == function_name
assert result[0].resource_arn == function_arn
assert result[0].status == "PASS"
assert (
result[0].status_extended
== f"No secrets found in Lambda function {function_name} variables."
)
assert result[0].resource_tags == [] |
absent | """
Infoblox host record management.
functions accept api_opts:
api_verifyssl: verify SSL [default to True or pillar value]
api_url: server to connect to [default to pillar value]
api_username: [default to pillar value]
api_password: [default to pillar value]
"""
def present(name=None, start_addr=None, end_addr=None, data=None, **api_opts):
"""
Ensure range record is present.
infoblox_range.present:
start_addr: '129.97.150.160',
end_addr: '129.97.150.170',
Verbose state example:
.. code-block:: yaml
infoblox_range.present:
data: {
'always_update_dns': False,
'authority': False,
'comment': 'range of IP addresses used for salt.. was used for ghost images deployment',
'ddns_generate_hostname': True,
'deny_all_clients': False,
'deny_bootp': False,
'disable': False,
'email_list': [],
'enable_ddns': False,
'enable_dhcp_thresholds': False,
'enable_email_warnings': False,
'enable_ifmap_publishing': False,
'enable_snmp_warnings': False,
'end_addr': '129.97.150.169',
'exclude': [],
'extattrs': {},
'fingerprint_filter_rules': [],
'high_water_mark': 95,
'high_water_mark_reset': 85,
'ignore_dhcp_option_list_request': False,
'lease_scavenge_time': -1,
'logic_filter_rules': [],
'low_water_mark': 0,
'low_water_mark_reset': 10,
'mac_filter_rules': [],
'member': {'_struct': 'dhcpmember',
'ipv4addr': '129.97.128.9',
'name': 'cn-dhcp-mc.example.ca'},
'ms_options': [],
'nac_filter_rules': [],
'name': 'ghost-range',
'network': '129.97.150.0/24',
'network_view': 'default',
'option_filter_rules': [],
'options': [{'name': 'dhcp-lease-time',
'num': 51,
'use_option': False,
'value': '43200',
'vendor_class': 'DHCP'}],
'recycle_leases': True,
'relay_agent_filter_rules': [],
'server_association_type': 'MEMBER',
'start_addr': '129.97.150.160',
'update_dns_on_lease_renewal': False,
'use_authority': False,
'use_bootfile': False,
'use_bootserver': False,
'use_ddns_domainname': False,
'use_ddns_generate_hostname': True,
'use_deny_bootp': False,
'use_email_list': False,
'use_enable_ddns': False,
'use_enable_dhcp_thresholds': False,
'use_enable_ifmap_publishing': False,
'use_ignore_dhcp_option_list_request': False,
'use_known_clients': False,
'use_lease_scavenge_time': False,
'use_nextserver': False,
'use_options': False,
'use_recycle_leases': False,
'use_unknown_clients': False,
'use_update_dns_on_lease_renewal': False
}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not data:
data = {}
if "name" not in data:
data.update({"name": name})
if "start_addr" not in data:
data.update({"start_addr": start_addr})
if "end_addr" not in data:
data.update({"end_addr": end_addr})
obj = __salt__["infoblox.get_ipv4_range"](
data["start_addr"], data["end_addr"], **api_opts
)
if obj is None:
obj = __salt__["infoblox.get_ipv4_range"](
start_addr=data["start_addr"], end_addr=None, **api_opts
)
if obj is None:
obj = __salt__["infoblox.get_ipv4_range"](
start_addr=None, end_addr=data["end_addr"], **api_opts
)
if obj:
diff = __salt__["infoblox.diff_objects"](data, obj)
if not diff:
ret["result"] = True
ret["comment"] = "supplied fields in correct state"
return ret
if diff:
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "would attempt to update record"
return ret
new_obj = __salt__["infoblox.update_object"](
obj["_ref"], data=data, **api_opts
)
ret["result"] = True
ret["comment"] = "record fields updated"
ret["changes"] = {"diff": diff}
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "would attempt to create record {}".format(name)
return ret
new_obj_ref = __salt__["infoblox.create_ipv4_range"](data, **api_opts)
new_obj = __salt__["infoblox.get_ipv4_range"](
data["start_addr"], data["end_addr"], **api_opts
)
ret["result"] = True
ret["comment"] = "record created"
ret["changes"] = {"old": "None", "new": {"_ref": new_obj_ref, "data": new_obj}}
return ret
def METHOD_NAME(name=None, start_addr=None, end_addr=None, data=None, **api_opts):
"""
Ensure the range is removed
Supplying the end of the range is optional.
State example:
.. code-block:: yaml
infoblox_range.absent:
- name: 'vlan10'
infoblox_range.absent:
- name:
- start_addr: 127.0.1.20
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not data:
data = {}
if "name" not in data:
data.update({"name": name})
if "start_addr" not in data:
data.update({"start_addr": start_addr})
if "end_addr" not in data:
data.update({"end_addr": end_addr})
obj = __salt__["infoblox.get_ipv4_range"](
data["start_addr"], data["end_addr"], **api_opts
)
if obj is None:
obj = __salt__["infoblox.get_ipv4_range"](
start_addr=data["start_addr"], end_addr=None, **api_opts
)
if obj is None:
obj = __salt__["infoblox.get_ipv4_range"](
start_addr=None, end_addr=data["end_addr"], **api_opts
)
if not obj:
ret["result"] = True
ret["comment"] = "already deleted"
return ret
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "would attempt to delete range"
return ret
if __salt__["infoblox.delete_object"](objref=obj["_ref"]):
ret["result"] = True
ret["changes"] = {
"old": "Found {} - {}".format(start_addr, end_addr),
"new": "Removed",
}
return ret |
network receive | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from typing import Callable, Dict
from django.utils.translation import ugettext_lazy as _
from rest_framework.decorators import action
from rest_framework.response import Response
from backend.bcs_web.viewsets import SystemViewSet
from backend.components import bcs_monitor as prom
from backend.container_service.clusters.base.utils import get_cluster_nodes
from backend.container_service.observability.metric import constants
from backend.container_service.observability.metric.serializers import BaseMetricSLZ, FetchMetricOverviewSLZ
from backend.utils.error_codes import error_codes
from backend.utils.url_slug import IPV4_REGEX
class NodeMetricViewSet(SystemViewSet):
lookup_field = 'node_ip'
# 指定匹配 IPV4 地址
lookup_value_regex = IPV4_REGEX
@action(methods=['POST'], url_path='overview', detail=True)
def overview(self, request, project_id, cluster_id, node_ip):
"""节点指标总览"""
params = self.params_validate(FetchMetricOverviewSLZ)
# 默认包含 container_count, pod_count
response_data = {'container_count': '0', 'pod_count': '0'}
container_pod_count = prom.get_container_pod_count(cluster_id, node_ip, bk_biz_id=request.project.cc_app_id)
for count in container_pod_count.get('result') or []:
for k, v in count['metric'].items():
if k == 'metric_name' and count['value']:
response_data[v] = count['value'][1]
# 默认使用全维度,若指定则使用指定的维度
dimensions = params.get('dimensions') or [dim for dim in constants.MetricDimension]
for dimension in dimensions:
if dimension not in constants.NODE_DIMENSIONS_FUNC:
raise error_codes.APIError(_("节点指标维度 {} 不合法").format(dimension))
dimension_func = constants.NODE_DIMENSIONS_FUNC[dimension]
response_data[dimension] = dimension_func(cluster_id, node_ip, bk_biz_id=request.project.cc_app_id)
return Response(response_data)
@action(methods=['GET'], url_path='info', detail=True)
def info(self, request, project_id, cluster_id, node_ip):
"""节点基础指标信息"""
node_list = get_cluster_nodes(request.user.token.access_token, project_id, cluster_id)
node_ip_list = [node["inner_ip"] for node in node_list]
if node_ip not in node_ip_list:
raise error_codes.ValidateError(_('IP {} 不合法或不属于当前集群').format(node_ip))
response_data = {'provider': prom.PROVIDER}
for info in prom.get_node_info(cluster_id, node_ip, bk_biz_id=request.project.cc_app_id).get('result') or []:
for k, v in info['metric'].items():
if k in constants.NODE_UNAME_METRIC:
response_data[k] = v
elif k == 'metric_name' and v in constants.NODE_USAGE_METRIC:
response_data[v] = info['value'][1] if info['value'] else '0'
return Response(response_data)
@action(methods=['GET'], url_path='cpu_usage', detail=True)
def cpu_usage(self, request, project_id, cluster_id, node_ip):
"""节点 CPU 使用率"""
response_data = self._common_query_handler(prom.get_node_cpu_usage_range, cluster_id, node_ip)
return Response(response_data)
@action(methods=['GET'], url_path='memory_usage', detail=True)
def memory_usage(self, request, project_id, cluster_id, node_ip):
"""节点 内存 使用率"""
response_data = self._common_query_handler(prom.get_node_memory_usage_range, cluster_id, node_ip)
return Response(response_data)
@action(methods=['GET'], url_path='network_receive', detail=True)
def METHOD_NAME(self, request, project_id, cluster_id, node_ip):
"""节点 网络入流量"""
response_data = self._common_query_handler(prom.get_node_network_receive, cluster_id, node_ip)
return Response(response_data)
@action(methods=['GET'], url_path='network_transmit', detail=True)
def network_transmit(self, request, project_id, cluster_id, node_ip):
"""节点 网络出流量"""
response_data = self._common_query_handler(prom.get_node_network_transmit, cluster_id, node_ip)
return Response(response_data)
@action(methods=['GET'], url_path='diskio_usage', detail=True)
def diskio_usage(self, request, project_id, cluster_id, node_ip):
"""磁盘 IO 使用情况"""
response_data = self._common_query_handler(prom.get_node_diskio_usage_range, cluster_id, node_ip)
return Response(response_data)
def _common_query_handler(self, query_metric_func: Callable, cluster_id: str, node_ip) -> Dict:
"""
查询 Node 指标通用逻辑
:param query_metric_func: 指标查询方法
:param cluster_id: 集群 ID
:param node_ip: 节点 IP
:return: 指标查询结果
"""
params = self.params_validate(BaseMetricSLZ)
return query_metric_func(
cluster_id, node_ip, params['start_at'], params['end_at'], bk_biz_id=self.request.project.cc_app_id
) |