label
stringlengths 1
61
| code
stringlengths 4k
8k
|
---|---|
run test | #!/usr/bin/env python3
# Copyright (c) 2015-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the preciousblock RPC."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
def unidirectional_node_sync_via_rpc(node_src, node_dest):
blocks_to_copy = []
blockhash = node_src.getbestblockhash()
while True:
try:
assert len(node_dest.getblock(blockhash, False)) > 0
break
except Exception:
blocks_to_copy.append(blockhash)
blockhash = node_src.getblockheader(blockhash, True)['previousblockhash']
blocks_to_copy.reverse()
for blockhash in blocks_to_copy:
blockdata = node_src.getblock(blockhash, False)
assert node_dest.submitblock(blockdata) in (None, 'inconclusive')
def node_sync_via_rpc(nodes):
for node_src in nodes:
for node_dest in nodes:
if node_src is node_dest:
continue
unidirectional_node_sync_via_rpc(node_src, node_dest)
class PreciousTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.supports_cli = False
def setup_network(self):
self.setup_nodes()
def METHOD_NAME(self):
self.log.info("Ensure submitblock can in principle reorg to a competing chain")
self.generate(self.nodes[0], 1, sync_fun=self.no_op)
assert_equal(self.nodes[0].getblockcount(), 1)
hashZ = self.generate(self.nodes[1], 2, sync_fun=self.no_op)[-1]
assert_equal(self.nodes[1].getblockcount(), 2)
node_sync_via_rpc(self.nodes[0:3])
assert_equal(self.nodes[0].getbestblockhash(), hashZ)
self.log.info("Mine blocks A-B-C on Node 0")
hashC = self.generate(self.nodes[0], 3, sync_fun=self.no_op)[-1]
assert_equal(self.nodes[0].getblockcount(), 5)
self.log.info("Mine competing blocks E-F-G on Node 1")
hashG = self.generate(self.nodes[1], 3, sync_fun=self.no_op)[-1]
assert_equal(self.nodes[1].getblockcount(), 5)
assert hashC != hashG
self.log.info("Connect nodes and check no reorg occurs")
# Submit competing blocks via RPC so any reorg should occur before we proceed (no way to wait on inaction for p2p sync)
node_sync_via_rpc(self.nodes[0:2])
self.connect_nodes(0, 1)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block C again")
self.nodes[0].preciousblock(hashC)
assert_equal(self.nodes[0].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block C")
self.nodes[1].preciousblock(hashC)
self.sync_blocks(self.nodes[0:2]) # wait because node 1 may not have downloaded hashC
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Make Node1 prefer block G again")
self.nodes[1].preciousblock(hashG)
assert_equal(self.nodes[1].getbestblockhash(), hashG)
self.log.info("Make Node0 prefer block G again")
self.nodes[0].preciousblock(hashG)
assert_equal(self.nodes[0].getbestblockhash(), hashG)
self.log.info("Make Node1 prefer block C again")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashC)
self.log.info("Mine another block (E-F-G-)H on Node 0 and reorg Node 1")
self.generate(self.nodes[0], 1, sync_fun=self.no_op)
assert_equal(self.nodes[0].getblockcount(), 6)
self.sync_blocks(self.nodes[0:2])
hashH = self.nodes[0].getbestblockhash()
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Node1 should not be able to prefer block C anymore")
self.nodes[1].preciousblock(hashC)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
self.log.info("Mine competing blocks I-J-K-L on Node 2")
self.generate(self.nodes[2], 4, sync_fun=self.no_op)
assert_equal(self.nodes[2].getblockcount(), 6)
hashL = self.nodes[2].getbestblockhash()
self.log.info("Connect nodes and check no reorg occurs")
node_sync_via_rpc(self.nodes[1:3])
self.connect_nodes(1, 2)
self.connect_nodes(0, 2)
assert_equal(self.nodes[0].getbestblockhash(), hashH)
assert_equal(self.nodes[1].getbestblockhash(), hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashL)
self.log.info("Make Node1 prefer block L")
self.nodes[1].preciousblock(hashL)
assert_equal(self.nodes[1].getbestblockhash(), hashL)
self.log.info("Make Node2 prefer block H")
self.nodes[2].preciousblock(hashH)
assert_equal(self.nodes[2].getbestblockhash(), hashH)
if __name__ == '__main__':
PreciousTest().main() |
test build rule book from gcs works | # Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the BucketsRulesEngine."""
import json
import unittest
import unittest.mock as mock
from tests.unittest_utils import ForsetiTestCase
from tests.unittest_utils import get_datafile_path
import yaml
from google.cloud.forseti.common.gcp_type import bucket_access_controls
from google.cloud.forseti.common.util import file_loader
from google.cloud.forseti.scanner.audit import base_rules_engine as bre
from google.cloud.forseti.scanner.audit import buckets_rules_engine as bre
from google.cloud.forseti.scanner.audit.errors import InvalidRulesSchemaError
# TODO: Define more tests
class BucketsRulesEngineTest(ForsetiTestCase):
"""Tests for the BucketsRulesEngine."""
def setUp(self):
"""Set up."""
self.rule_index = 0
self.bre = bre
self.bre.LOGGER = mock.MagicMock()
def test_build_rule_book_from_local_yaml_file_works(self):
"""Test that a RuleBook is built correctly with a yaml file."""
rules_local_path = get_datafile_path(__file__,
'buckets_test_rules_1.yaml')
rules_engine = bre.BucketsRulesEngine(rules_file_path=rules_local_path)
rules_engine.build_rule_book()
self.assertEqual(2, len(rules_engine.rule_book.resource_rules_map))
@mock.patch.object(file_loader,
'_read_file_from_gcs', autospec=True)
def METHOD_NAME(self, mock_load_rules_from_gcs):
"""Test that a RuleBook is built correctly with a mocked gcs file.
Setup:
* Create a mocked GCS object from a test yaml file.
* Get the yaml file content.
Expected results:
There are 4 resources that have rules, in the rule book.
"""
bucket_name = 'bucket-name'
rules_path = 'input/buckets_test_rules_1.yaml'
full_rules_path = 'gs://{}/{}'.format(bucket_name, rules_path)
rules_engine = bre.BucketsRulesEngine(rules_file_path=full_rules_path)
# Read in the rules file
file_content = None
with open(get_datafile_path(__file__, 'buckets_test_rules_1.yaml'),
'r') as rules_local_file:
try:
file_content = yaml.safe_load(rules_local_file)
except yaml.YAMLError:
raise
mock_load_rules_from_gcs.return_value = file_content
rules_engine.build_rule_book()
self.assertEqual(2, len(rules_engine.rule_book.resource_rules_map))
def test_build_rule_book_no_resource_type_fails(self):
"""Test that a rule without a resource cannot be created."""
rules_local_path = get_datafile_path(__file__,
'buckets_test_rules_2.yaml')
rules_engine = bre.BucketsRulesEngine(rules_file_path=rules_local_path)
with self.assertRaises(InvalidRulesSchemaError):
rules_engine.build_rule_book()
def test_find_violation_for_publicly_exposed_acls(self):
rules_local_path = get_datafile_path(__file__,
'buckets_test_rules_1.yaml')
rules_engine = bre.BucketsRulesEngine(rules_file_path=rules_local_path)
rules_engine.build_rule_book()
rules_map = rules_engine.rule_book.resource_rules_map
all_users_rule = rules_map[0]
all_authenticated_users_rule = rules_map[1]
# Everything is allowed.
acl_dict = json.loads(
BUCKET_ACL_TEMPLATE.format(entity='project-owners-123456'))
acl = bucket_access_controls.BucketAccessControls.from_dict(
'test-project', 'fake_inventory_data', acl_dict)
violation = all_users_rule.find_violations(acl)
self.assertEqual(0, len(list(violation)))
# Exposed to everyone in the world.
acl_dict = json.loads(
BUCKET_ACL_TEMPLATE.format(entity='allUsers'))
acl = bucket_access_controls.BucketAccessControls.from_dict(
'test-project', 'fake_inventory_data', acl_dict)
violation = all_users_rule.find_violations(acl)
self.assertEqual(1, len(list(violation)))
# Exposed to all google-authenticated users in the world.
acl_dict = json.loads(
BUCKET_ACL_TEMPLATE.format(entity='allAuthenticatedUsers'))
acl = bucket_access_controls.BucketAccessControls.from_dict(
'test-project', 'fake_inventory_data', acl_dict)
violation = all_authenticated_users_rule.find_violations(acl)
self.assertEqual(1, len(list(violation)))
BUCKET_ACL_TEMPLATE = """
{{
"kind": "storage#bucketAccessControl",
"id": "test-bucket/{entity}",
"selfLink": "https://www.googleapis.com/storage/v1/b/test-bucket/acl/{entity}",
"bucket": "test-bucket",
"entity": "{entity}",
"role": "OWNER",
"projectTeam": {{
"projectNumber": "123456",
"team": "owners"
}},
"etag": "CAE="
}}
"""
if __name__ == '__main__':
unittest.main() |
value | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetApiOperationPolicyResult',
'AwaitableGetApiOperationPolicyResult',
'get_api_operation_policy',
'get_api_operation_policy_output',
]
@pulumi.output_type
class GetApiOperationPolicyResult:
"""
Policy Contract details.
"""
def __init__(__self__, format=None, id=None, name=None, type=None, METHOD_NAME=None):
if format and not isinstance(format, str):
raise TypeError("Expected argument 'format' to be a str")
pulumi.set(__self__, "format", format)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", METHOD_NAME)
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
Format of the policyContent.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Contents of the Policy as defined by the format.
"""
return pulumi.get(self, "value")
class AwaitableGetApiOperationPolicyResult(GetApiOperationPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiOperationPolicyResult(
format=self.format,
id=self.id,
name=self.name,
type=self.type,
METHOD_NAME=self.METHOD_NAME)
def get_api_operation_policy(api_id: Optional[str] = None,
format: Optional[str] = None,
operation_id: Optional[str] = None,
policy_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiOperationPolicyResult:
"""
Get the policy configuration at the API Operation level.
:param str api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param str format: Policy Export Format.
:param str operation_id: Operation identifier within an API. Must be unique in the current API Management service instance.
:param str policy_id: The identifier of the Policy.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['apiId'] = api_id
__args__['format'] = format
__args__['operationId'] = operation_id
__args__['policyId'] = policy_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20220901preview:getApiOperationPolicy', __args__, opts=opts, typ=GetApiOperationPolicyResult).METHOD_NAME
return AwaitableGetApiOperationPolicyResult(
format=pulumi.get(__ret__, 'format'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
type=pulumi.get(__ret__, 'type'),
METHOD_NAME=pulumi.get(__ret__, 'value'))
@_utilities.lift_output_func(get_api_operation_policy)
def get_api_operation_policy_output(api_id: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[Optional[str]]] = None,
operation_id: Optional[pulumi.Input[str]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetApiOperationPolicyResult]:
"""
Get the policy configuration at the API Operation level.
:param str api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param str format: Policy Export Format.
:param str operation_id: Operation identifier within an API. Must be unique in the current API Management service instance.
:param str policy_id: The identifier of the Policy.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str service_name: The name of the API Management service.
"""
... |
delete schedule group | """Handles incoming scheduler requests, invokes methods, returns responses."""
import json
from typing import Any
from urllib.parse import unquote
from moto.core.common_types import TYPE_RESPONSE
from moto.core.responses import BaseResponse
from .models import scheduler_backends, EventBridgeSchedulerBackend
class EventBridgeSchedulerResponse(BaseResponse):
"""Handler for EventBridgeScheduler requests and responses."""
def __init__(self) -> None:
super().__init__(service_name="scheduler")
@property
def scheduler_backend(self) -> EventBridgeSchedulerBackend:
"""Return backend instance specific for this region."""
return scheduler_backends[self.current_account][self.region]
def create_schedule(self) -> str:
description = self._get_param("Description")
end_date = self._get_param("EndDate")
flexible_time_window = self._get_param("FlexibleTimeWindow")
group_name = self._get_param("GroupName")
kms_key_arn = self._get_param("KmsKeyArn")
name = self.uri.split("/")[-1]
schedule_expression = self._get_param("ScheduleExpression")
schedule_expression_timezone = self._get_param("ScheduleExpressionTimezone")
start_date = self._get_param("StartDate")
state = self._get_param("State")
target = self._get_param("Target")
schedule = self.scheduler_backend.create_schedule(
description=description,
end_date=end_date,
flexible_time_window=flexible_time_window,
group_name=group_name,
kms_key_arn=kms_key_arn,
name=name,
schedule_expression=schedule_expression,
schedule_expression_timezone=schedule_expression_timezone,
start_date=start_date,
state=state,
target=target,
)
return json.dumps(dict(ScheduleArn=schedule.arn))
def get_schedule(self) -> str:
group_name = self._get_param("groupName")
full_url = self.uri.split("?")[0]
name = full_url.split("/")[-1]
schedule = self.scheduler_backend.get_schedule(group_name, name)
return json.dumps(schedule.to_dict())
def delete_schedule(self) -> str:
group_name = self._get_param("groupName")
name = self.uri.split("?")[0].split("/")[-1]
self.scheduler_backend.delete_schedule(group_name, name)
return "{}"
def update_schedule(self) -> str:
group_name = self._get_param("GroupName")
name = self.uri.split("?")[0].split("/")[-1]
description = self._get_param("Description")
end_date = self._get_param("EndDate")
flexible_time_window = self._get_param("FlexibleTimeWindow")
kms_key_arn = self._get_param("KmsKeyArn")
schedule_expression = self._get_param("ScheduleExpression")
schedule_expression_timezone = self._get_param("ScheduleExpressionTimezone")
start_date = self._get_param("StartDate")
state = self._get_param("State")
target = self._get_param("Target")
schedule = self.scheduler_backend.update_schedule(
description=description,
end_date=end_date,
flexible_time_window=flexible_time_window,
group_name=group_name,
kms_key_arn=kms_key_arn,
name=name,
schedule_expression=schedule_expression,
schedule_expression_timezone=schedule_expression_timezone,
start_date=start_date,
state=state,
target=target,
)
return json.dumps(dict(ScheduleArn=schedule.arn))
def list_schedules(self) -> str:
group_names = self.querystring.get("ScheduleGroup")
state = self._get_param("State")
schedules = self.scheduler_backend.list_schedules(group_names, state)
return json.dumps({"Schedules": [sch.to_dict(short=True) for sch in schedules]})
def create_schedule_group(self) -> str:
name = self._get_param("Name")
tags = self._get_param("Tags")
schedule_group = self.scheduler_backend.create_schedule_group(
name=name,
tags=tags,
)
return json.dumps(dict(ScheduleGroupArn=schedule_group.arn))
def get_schedule_group(self) -> str:
group_name = self.uri.split("?")[0].split("/")[-1]
group = self.scheduler_backend.get_schedule_group(group_name)
return json.dumps(group.to_dict())
def METHOD_NAME(self) -> str:
group_name = self.uri.split("?")[0].split("/")[-1]
self.scheduler_backend.METHOD_NAME(group_name)
return "{}"
def list_schedule_groups(self) -> str:
schedule_groups = self.scheduler_backend.list_schedule_groups()
return json.dumps(dict(ScheduleGroups=[sg.to_dict() for sg in schedule_groups]))
def list_tags_for_resource(self) -> TYPE_RESPONSE:
resource_arn = unquote(self.uri.split("/tags/")[-1])
tags = self.scheduler_backend.list_tags_for_resource(resource_arn)
return 200, {}, json.dumps(tags)
def tag_resource(self) -> TYPE_RESPONSE:
resource_arn = unquote(self.uri.split("/tags/")[-1])
tags = json.loads(self.body)["Tags"]
self.scheduler_backend.tag_resource(resource_arn, tags)
return 200, {}, "{}"
def untag_resource(self) -> TYPE_RESPONSE:
resource_arn = unquote(self.uri.split("?")[0].split("/tags/")[-1])
tag_keys = self.querystring.get("TagKeys")
self.scheduler_backend.untag_resource(resource_arn, tag_keys) # type: ignore
return 200, {}, "{}"
def tags(self, request: Any, full_url: str, headers: Any) -> TYPE_RESPONSE:
super().setup_class(request, full_url, headers)
if request.method == "POST":
return self.tag_resource()
elif request.method == "DELETE":
return self.untag_resource()
else:
return self.list_tags_for_resource() |
set test params | #!/usr/bin/env python3
# Copyright (c) 2022 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test dust limit mempool policy (`-dustrelayfee` parameter)"""
from decimal import Decimal
from test_framework.messages import (
COIN,
CTxOut,
)
from test_framework.script import (
CScript,
OP_RETURN,
OP_TRUE,
)
from test_framework.script_util import (
key_to_p2pk_script,
key_to_p2pkh_script,
key_to_p2wpkh_script,
keys_to_multisig_script,
output_key_to_p2tr_script,
program_to_witness_script,
script_to_p2sh_script,
script_to_p2wsh_script,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import TestNode
from test_framework.util import (
assert_equal,
get_fee,
)
from test_framework.wallet import MiniWallet
from test_framework.wallet_util import generate_keypair
DUST_RELAY_TX_FEE = 3000 # default setting [sat/kvB]
class DustRelayFeeTest(BitcoinTestFramework):
def METHOD_NAME(self):
self.num_nodes = 1
def test_dust_output(self, node: TestNode, dust_relay_fee: Decimal,
output_script: CScript, type_desc: str) -> None:
# determine dust threshold (see `GetDustThreshold`)
if output_script[0] == OP_RETURN:
dust_threshold = 0
else:
tx_size = len(CTxOut(nValue=0, scriptPubKey=output_script).serialize())
tx_size += 67 if output_script.IsWitnessProgram() else 148
dust_threshold = int(get_fee(tx_size, dust_relay_fee) * COIN)
self.log.info(f"-> Test {type_desc} output (size {len(output_script)}, limit {dust_threshold})")
# amount right on the dust threshold should pass
tx = self.wallet.create_self_transfer()["tx"]
tx.vout.append(CTxOut(nValue=dust_threshold, scriptPubKey=output_script))
tx.vout[0].nValue -= dust_threshold # keep total output value constant
tx_good_hex = tx.serialize().hex()
res = node.testmempoolaccept([tx_good_hex])[0]
assert_equal(res['allowed'], True)
# amount just below the dust threshold should fail
if dust_threshold > 0:
tx.vout[1].nValue -= 1
res = node.testmempoolaccept([tx.serialize().hex()])[0]
assert_equal(res['allowed'], False)
assert_equal(res['reject-reason'], 'dust')
# finally send the transaction to avoid running out of MiniWallet UTXOs
self.wallet.sendrawtransaction(from_node=node, tx_hex=tx_good_hex)
def run_test(self):
self.wallet = MiniWallet(self.nodes[0])
# prepare output scripts of each standard type
_, uncompressed_pubkey = generate_keypair(compressed=False)
_, pubkey = generate_keypair(compressed=True)
output_scripts = (
(key_to_p2pk_script(uncompressed_pubkey), "P2PK (uncompressed)"),
(key_to_p2pk_script(pubkey), "P2PK (compressed)"),
(key_to_p2pkh_script(pubkey), "P2PKH"),
(script_to_p2sh_script(CScript([OP_TRUE])), "P2SH"),
(key_to_p2wpkh_script(pubkey), "P2WPKH"),
(script_to_p2wsh_script(CScript([OP_TRUE])), "P2WSH"),
(output_key_to_p2tr_script(pubkey[1:]), "P2TR"),
# witness programs for segwitv2+ can be between 2 and 40 bytes
(program_to_witness_script(2, b'\x66' * 2), "P2?? (future witness version 2)"),
(program_to_witness_script(16, b'\x77' * 40), "P2?? (future witness version 16)"),
# largest possible output script considered standard
(keys_to_multisig_script([uncompressed_pubkey]*3), "bare multisig (m-of-3)"),
(CScript([OP_RETURN, b'superimportanthash']), "null data (OP_RETURN)"),
)
# test default (no parameter), disabled (=0) and a bunch of arbitrary dust fee rates [sat/kvB]
for dustfee_sat_kvb in (DUST_RELAY_TX_FEE, 0, 1, 66, 500, 1337, 12345, 21212, 333333):
dustfee_btc_kvb = dustfee_sat_kvb / Decimal(COIN)
if dustfee_sat_kvb == DUST_RELAY_TX_FEE:
self.log.info(f"Test default dust limit setting ({dustfee_sat_kvb} sat/kvB)...")
else:
dust_parameter = f"-dustrelayfee={dustfee_btc_kvb:.8f}"
self.log.info(f"Test dust limit setting {dust_parameter} ({dustfee_sat_kvb} sat/kvB)...")
self.restart_node(0, extra_args=[dust_parameter])
for output_script, description in output_scripts:
self.test_dust_output(self.nodes[0], dustfee_btc_kvb, output_script, description)
self.generate(self.nodes[0], 1)
if __name__ == '__main__':
DustRelayFeeTest().main() |
rnq | # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import threading
import time
from typing import Any, Dict, Optional
from requests import Request, RequestException, Response, Session, codes
from requests.adapters import HTTPAdapter
from nvflare.apis.overseer_spec import SP, OverseerAgent
from nvflare.security.logging import secure_format_exception
class HttpOverseerAgent(OverseerAgent):
def __init__(
self,
role,
overseer_end_point,
project,
name: str,
fl_port: str = "",
admin_port: str = "",
heartbeat_interval=5,
):
if role not in ["server", "client", "admin"]:
raise ValueError(f'Expect role in ["server", "client", "admin"] but got {role}')
super().__init__()
self._role = role
self._overseer_end_point = overseer_end_point
self._project = project
self._session = None
self._status_lock = threading.Lock()
self._report_and_query = threading.Thread(target=self._rnq_worker, args=())
self._psp = SP()
self._flag = threading.Event()
self._ca_path = None
self._cert_path = None
self._prv_key_path = None
self._last_service_session_id = ""
self._asked_to_exit = False
self._logger = logging.getLogger(self.__class__.__name__)
self._retry_delay = 4
self._asked_to_stop_retrying = False
self._update_callback = None
self._conditional_cb = False
if self._role == "server":
self._sp_end_point = ":".join([name, fl_port, admin_port])
self._heartbeat_interval = heartbeat_interval
def _send(
self, api_point, headers: Optional[Dict[str, Any]] = None, payload: Optional[Dict[str, Any]] = None
) -> Response:
try_count = 0
while not self._asked_to_stop_retrying:
try:
req = Request("POST", api_point, json=payload, headers=headers)
prepared = self._session.prepare_request(req)
resp = self._session.send(prepared)
return resp
except RequestException as e:
self._logger.debug(f"Overseer error: {secure_format_exception(e)}")
try_count += 1
time.sleep(self._retry_delay)
def set_secure_context(self, ca_path: str, cert_path: str = "", prv_key_path: str = ""):
self._ca_path = ca_path
self._cert_path = cert_path
self._prv_key_path = prv_key_path
def start(self, update_callback=None, conditional_cb=False):
self._session = Session()
adapter = HTTPAdapter(max_retries=1)
self._session.mount("http://", adapter)
self._session.mount("https://", adapter)
if self._ca_path:
self._session.verify = self._ca_path
self._session.cert = (self._cert_path, self._prv_key_path)
self._conditional_cb = conditional_cb
if update_callback:
self._update_callback = update_callback
self._report_and_query.start()
self._flag.set()
def pause(self):
self._asked_to_stop_retrying = True
self._flag.clear()
def resume(self):
self._asked_to_stop_retrying = False
self._flag.set()
def end(self):
self._asked_to_stop_retrying = True
self._flag.set()
self._asked_to_exit = True
self._report_and_query.join()
def is_shutdown(self) -> bool:
"""Return whether the agent receives a shutdown request."""
return self.overseer_info.get("system") == "shutdown"
def get_primary_sp(self) -> SP:
"""Return current primary service provider.
If primary sp not available, such as not reported by SD, connection to SD not established yet
the name and ports will be empty strings.
"""
return self._psp
def promote_sp(self, sp_end_point, headers=None) -> Response:
api_point = self._overseer_end_point + "/promote"
return self._send(api_point, headers=None, payload={"sp_end_point": sp_end_point, "project": self._project})
def set_state(self, state) -> Response:
api_point = self._overseer_end_point + "/state"
return self._send(api_point, payload={"state": state})
def _do_callback(self):
if self._update_callback:
self._update_callback(self)
def _handle_ssid(self, ssid):
if not self._conditional_cb or self._last_service_session_id != ssid:
self._last_service_session_id = ssid
self._do_callback()
def _prepare_data(self):
data = dict(role=self._role, project=self._project)
return data
def _rnq_worker(self):
data = self._prepare_data()
if self._role == "server":
data["sp_end_point"] = self._sp_end_point
api_point = self._overseer_end_point + "/heartbeat"
while not self._asked_to_exit:
self._flag.wait()
self.METHOD_NAME(api_point, headers=None, data=data)
time.sleep(self._heartbeat_interval)
def METHOD_NAME(self, api_point, headers, data):
resp = self._send(api_point, headers=headers, payload=data)
if resp is None:
return
if resp.status_code != codes.ok:
return
self.overseer_info = resp.json()
psp = self.overseer_info.get("primary_sp")
if psp:
name, fl_port, admin_port = psp.get("sp_end_point").split(":")
service_session_id = psp.get("service_session_id", "")
self._psp = SP(name, fl_port, admin_port, service_session_id, True)
# last_heartbeat = psp.get("last_heartbeat", "")
self._handle_ssid(service_session_id)
else:
self._psp = SP()
service_session_id = ""
self._handle_ssid(service_session_id) |
get conv by frontend id | from typing import Optional
from fastapi import APIRouter, Depends
from oasst_backend.api import deps
from oasst_backend.api.v1 import utils
from oasst_backend.models import ApiClient
from oasst_backend.prompt_repository import PromptRepository
from oasst_shared.schemas import protocol
from sqlmodel import Session
router = APIRouter()
@router.get("/{message_id}", response_model=protocol.Message)
def get_message_by_frontend_id(
message_id: str, api_client: ApiClient = Depends(deps.get_api_client), db: Session = Depends(deps.get_db)
):
"""
Get a message by its frontend ID.
"""
pr = PromptRepository(db, api_client)
message = pr.fetch_message_by_frontend_message_id(message_id)
return utils.prepare_message(message)
@router.get("/{message_id}/conversation", response_model=protocol.Conversation)
def METHOD_NAME(
message_id: str, api_client: ApiClient = Depends(deps.get_api_client), db: Session = Depends(deps.get_db)
):
"""
Get a conversation from the tree root and up to the message with given frontend ID.
"""
pr = PromptRepository(db, api_client)
message = pr.fetch_message_by_frontend_message_id(message_id)
messages = pr.fetch_message_conversation(message)
return utils.prepare_conversation(messages)
@router.get("/{message_id}/tree", response_model=protocol.MessageTree)
def get_tree_by_frontend_id(
message_id: str,
include_spam: Optional[bool] = True,
include_deleted: Optional[bool] = False,
api_client: ApiClient = Depends(deps.get_api_client),
db: Session = Depends(deps.get_db),
):
"""
Get all messages belonging to the same message tree.
Message is identified by its frontend ID.
"""
pr = PromptRepository(db, api_client)
message = pr.fetch_message_by_frontend_message_id(message_id)
review_result = None if include_spam else True
deleted = None if include_deleted else False
tree = pr.fetch_message_tree(message.message_tree_id, review_result=review_result, deleted=deleted)
return utils.prepare_tree(tree, message.message_tree_id)
@router.get("/{message_id}/children", response_model=list[protocol.Message])
def get_children_by_frontend_id(
message_id: str, api_client: ApiClient = Depends(deps.get_api_client), db: Session = Depends(deps.get_db)
):
"""
Get all messages belonging to the same message tree.
"""
pr = PromptRepository(db, api_client)
message = pr.fetch_message_by_frontend_message_id(message_id)
messages = pr.fetch_message_children(message.id, review_result=None)
return utils.prepare_message_list(messages)
@router.get("/{message_id}/descendants", response_model=protocol.MessageTree)
def get_descendants_by_frontend_id(
message_id: str, api_client: ApiClient = Depends(deps.get_api_client), db: Session = Depends(deps.get_db)
):
"""
Get a subtree which starts with this message.
The message is identified by its frontend ID.
"""
pr = PromptRepository(db, api_client)
message = pr.fetch_message_by_frontend_message_id(message_id)
descendants = pr.fetch_message_descendants(message)
return utils.prepare_tree(descendants, message.id)
@router.get("/{message_id}/longest_conversation_in_tree", response_model=protocol.Conversation)
def get_longest_conv_by_frontend_id(
message_id: str, api_client: ApiClient = Depends(deps.get_api_client), db: Session = Depends(deps.get_db)
):
"""
Get the longest conversation from the tree of the message.
The message is identified by its frontend ID.
"""
pr = PromptRepository(db, api_client)
message = pr.fetch_message_by_frontend_message_id(message_id)
conv = pr.fetch_longest_conversation(message.message_tree_id)
return utils.prepare_conversation(conv)
@router.get("/{message_id}/max_children_in_tree", response_model=protocol.MessageTree)
def get_max_children_by_frontend_id(
message_id: str, api_client: ApiClient = Depends(deps.get_api_client), db: Session = Depends(deps.get_db)
):
"""
Get message with the most children from the tree of the provided message.
The message is identified by its frontend ID.
"""
pr = PromptRepository(db, api_client)
message = pr.fetch_message_by_frontend_message_id(message_id)
message, children = pr.fetch_message_with_max_children(message.message_tree_id)
return utils.prepare_tree([message, *children], message.id) |
ufl shape | # -*- coding: utf-8 -*-
"""This module defines the single index types and some internal index utilities."""
# Copyright (C) 2008-2016 Martin Sandve Alnæs and Anders Logg
#
# This file is part of UFL (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
#
# Modified by Massimiliano Leoni, 2016.
from ufl.utils.counted import Counted
from ufl.core.ufl_type import ufl_type
from ufl.core.terminal import Terminal
# Export list for ufl.classes
__all_classes__ = ["IndexBase", "FixedIndex", "Index"]
class IndexBase(object):
"""Base class for all indices."""
__slots__ = ()
def __init__(self):
pass
class FixedIndex(IndexBase):
"""UFL value: An index with a specific value assigned."""
__slots__ = ("_value", "_hash")
_cache = {}
def __getnewargs__(self):
return (self._value,)
def __new__(cls, value):
self = FixedIndex._cache.get(value)
if self is None:
if not isinstance(value, int):
raise ValueError("Expecting integer value for fixed index.")
self = IndexBase.__new__(cls)
self._init(value)
FixedIndex._cache[value] = self
return self
def _init(self, value):
IndexBase.__init__(self)
self._value = value
self._hash = hash(("FixedIndex", self._value))
def __init__(self, value):
pass
def __hash__(self):
return self._hash
def __eq__(self, other):
return isinstance(other, FixedIndex) and (self._value == other._value)
def __int__(self):
return self._value
def __str__(self):
return "%d" % self._value
def __repr__(self):
r = "FixedIndex(%d)" % self._value
return r
class Index(IndexBase, Counted):
"""UFL value: An index with no value assigned.
Used to represent free indices in Einstein indexing notation."""
__slots__ = ("_count", "_counted_class")
def __init__(self, count=None):
IndexBase.__init__(self)
Counted.__init__(self, count, Index)
def __hash__(self):
return hash(("Index", self._count))
def __eq__(self, other):
return isinstance(other, Index) and (self._count == other._count)
def __str__(self):
c = str(self._count)
if len(c) > 1:
c = "{%s}" % c
return "i_%s" % c
def __repr__(self):
r = "Index(%d)" % self._count
return r
@ufl_type()
class MultiIndex(Terminal):
"Represents a sequence of indices, either fixed or free."
__slots__ = ("_indices",)
_cache = {}
def __getnewargs__(self):
return (self._indices,)
def __new__(cls, indices):
if not isinstance(indices, tuple):
raise ValueError("Expecting a tuple of indices.")
if all(isinstance(ind, FixedIndex) for ind in indices):
# Cache multiindices consisting of purely fixed indices
# (aka flyweight pattern)
key = tuple(ind._value for ind in indices)
self = MultiIndex._cache.get(key)
if self is not None:
return self
self = Terminal.__new__(cls)
MultiIndex._cache[key] = self
else:
# Create a new object if we have any free indices (too
# many combinations to cache)
if not all(isinstance(ind, IndexBase) for ind in indices):
raise ValueError("Expecting only Index and FixedIndex objects.")
self = Terminal.__new__(cls)
# Initialize here instead of in __init__ to avoid overwriting
# self._indices from cached objects
self._init(indices)
return self
def __init__(self, indices):
pass
def _init(self, indices):
Terminal.__init__(self)
self._indices = indices
def indices(self):
"Return tuple of indices."
return self._indices
def _ufl_compute_hash_(self):
return hash(("MultiIndex",) + tuple(hash(ind) for ind in self._indices))
def __eq__(self, other):
return isinstance(other, MultiIndex) and \
self._indices == other._indices
def evaluate(self, x, mapping, component, index_values):
"Evaluate index."
# Build component from index values
component = []
for i in self._indices:
if isinstance(i, FixedIndex):
component.append(i._value)
elif isinstance(i, Index):
component.append(index_values[i])
return tuple(component)
@property
def METHOD_NAME(self):
"This shall not be used."
raise ValueError("Multiindex has no shape (it is not a tensor expression).")
@property
def ufl_free_indices(self):
"This shall not be used."
raise ValueError("Multiindex has no free indices (it is not a tensor expression).")
@property
def ufl_index_dimensions(self):
"This shall not be used."
raise ValueError("Multiindex has no free indices (it is not a tensor expression).")
def is_cellwise_constant(self):
"Always True."
return True
def ufl_domains(self):
"Return tuple of domains related to this terminal object."
return ()
# --- Adding multiindices ---
def __add__(self, other):
if isinstance(other, tuple):
return MultiIndex(self._indices + other)
elif isinstance(other, MultiIndex):
return MultiIndex(self._indices + other._indices)
return NotImplemented
def __radd__(self, other):
if isinstance(other, tuple):
return MultiIndex(other + self._indices)
elif isinstance(other, MultiIndex):
return MultiIndex(other._indices + self._indices)
return NotImplemented
# --- String formatting ---
def __str__(self):
return ", ".join(str(i) for i in self._indices)
def __repr__(self):
r = "MultiIndex(%s)" % repr(self._indices)
return r
# --- Iteration protocol ---
def __len__(self):
return len(self._indices)
def __getitem__(self, i):
return self._indices[i]
def __iter__(self):
return iter(self._indices)
def indices(n):
"UFL value: Return a tuple of :math:`n` new Index objects."
return tuple(Index() for i in range(n)) |
test domain range scale rg b to | # !/usr/bin/env python
"""Define the unit tests for the :mod:`colour.models.rgb.prismatic` module."""
import numpy as np
import unittest
from itertools import product
from colour.models.rgb import RGB_to_Prismatic, Prismatic_to_RGB
from colour.utilities import domain_range_scale, ignore_numpy_errors
__author__ = "Colour Developers"
__copyright__ = "Copyright 2013 Colour Developers"
__license__ = "BSD-3-Clause - https://opensource.org/licenses/BSD-3-Clause"
__maintainer__ = "Colour Developers"
__email__ = "[email protected]"
__status__ = "Production"
__all__ = [
"TestRGB_to_Prismatic",
"TestPrismatic_to_RGB",
]
class TestRGB_to_Prismatic(unittest.TestCase):
"""
Define :func:`colour.models.rgb.prismatic.TestRGB_to_Prismatic` definition
unit tests methods.
"""
def test_RGB_to_Prismatic(self):
"""Test :func:`colour.models.rgb.prismatic.RGB_to_Prismatic` definition."""
np.testing.assert_array_almost_equal(
RGB_to_Prismatic(np.array([0.0, 0.0, 0.0])),
np.array([0.0, 0.0, 0.0, 0.0]),
decimal=7,
)
np.testing.assert_array_almost_equal(
RGB_to_Prismatic(np.array([0.25, 0.50, 0.75])),
np.array([0.7500000, 0.1666667, 0.3333333, 0.5000000]),
decimal=7,
)
def test_n_dimensional_RGB_to_Prismatic(self):
"""
Test :func:`colour.models.rgb.prismatic.RGB_to_Prismatic` definition
n-dimensional support.
"""
RGB = np.array([0.25, 0.50, 0.75])
Lrgb = RGB_to_Prismatic(RGB)
RGB = np.tile(RGB, (6, 1))
Lrgb = np.tile(Lrgb, (6, 1))
np.testing.assert_array_almost_equal(
RGB_to_Prismatic(RGB), Lrgb, decimal=7
)
RGB = np.reshape(RGB, (2, 3, 3))
Lrgb = np.reshape(Lrgb, (2, 3, 4))
np.testing.assert_array_almost_equal(
RGB_to_Prismatic(RGB), Lrgb, decimal=7
)
def METHOD_NAME(self):
"""
Test :func:`colour.models.rgb.prismatic.RGB_to_Prismatic` definition
domain and range scale support.
"""
RGB = np.array([0.25, 0.50, 0.75])
Lrgb = RGB_to_Prismatic(RGB)
d_r = (("reference", 1), ("1", 1), ("100", 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_array_almost_equal(
RGB_to_Prismatic(RGB * factor), Lrgb * factor, decimal=7
)
@ignore_numpy_errors
def test_nan_RGB_to_Prismatic(self):
"""
Test :func:`colour.models.rgb.prismatic.RGB_to_Prismatic` definition
nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = np.array(list(set(product(cases, repeat=3))))
RGB_to_Prismatic(cases)
class TestPrismatic_to_RGB(unittest.TestCase):
"""
Define :func:`colour.models.rgb.prismatic.Prismatic_to_RGB` definition
unit tests methods.
"""
def test_Prismatic_to_RGB(self):
"""Test :func:`colour.models.rgb.prismatic.Prismatic_to_RGB` definition."""
np.testing.assert_array_almost_equal(
Prismatic_to_RGB(np.array([0.0, 0.0, 0.0, 0.0])),
np.array([0.0, 0.0, 0.0]),
decimal=7,
)
np.testing.assert_array_almost_equal(
Prismatic_to_RGB(
np.array([0.7500000, 0.1666667, 0.3333333, 0.5000000])
),
np.array([0.25, 0.50, 0.75]),
decimal=7,
)
def test_n_dimensional_Prismatic_to_RGB(self):
"""
Test :func:`colour.models.rgb.prismatic.Prismatic_to_RGB` definition
n-dimensional support.
"""
Lrgb = np.array([0.7500000, 0.1666667, 0.3333333, 0.5000000])
RGB = Prismatic_to_RGB(Lrgb)
Lrgb = np.tile(Lrgb, (6, 1))
RGB = np.tile(RGB, (6, 1))
np.testing.assert_array_almost_equal(
Prismatic_to_RGB(Lrgb), RGB, decimal=7
)
Lrgb = np.reshape(Lrgb, (2, 3, 4))
RGB = np.reshape(RGB, (2, 3, 3))
np.testing.assert_array_almost_equal(
Prismatic_to_RGB(Lrgb), RGB, decimal=7
)
def test_domain_range_scale_Prismatic_to_RGB(self):
"""
Test :func:`colour.models.rgb.prismatic.Prismatic_to_RGB` definition
domain and range scale support.
"""
Lrgb = np.array([0.7500000, 0.1666667, 0.3333333, 0.5000000])
RGB = Prismatic_to_RGB(Lrgb)
d_r = (("reference", 1), ("1", 1), ("100", 100))
for scale, factor in d_r:
with domain_range_scale(scale):
np.testing.assert_array_almost_equal(
Prismatic_to_RGB(Lrgb * factor), RGB * factor, decimal=7
)
@ignore_numpy_errors
def test_nan_Prismatic_to_RGB(self):
"""
Test :func:`colour.models.rgb.prismatic.Prismatic_to_RGB` definition
nan support.
"""
cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan]
cases = np.array(list(set(product(cases, repeat=3))))
Prismatic_to_RGB(cases)
if __name__ == "__main__":
unittest.main() |
get fitted params | """Delegator mixin that delegates all methods to wrapped classifiers.
Useful for building estimators where all but one or a few methods are delegated. For
that purpose, inherit from this estimator and then override only the methods that
are not delegated.
"""
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
__author__ = ["fkiraly"]
__all__ = ["_DelegatedClassifier"]
from sktime.classification.base import BaseClassifier
class _DelegatedClassifier(BaseClassifier):
"""Delegator mixin that delegates all methods to wrapped classifier.
Delegates inner classifier methods to a wrapped estimator.
Wrapped estimator is value of attribute with name self._delegate_name.
By default, this is "estimator_", i.e., delegates to self.estimator_
To override delegation, override _delegate_name attribute in child class.
Delegates the following inner underscore methods:
_fit, _predict, _predict_proba
Does NOT delegate get_params, set_params.
get_params, set_params will hence use one additional nesting level by default.
Does NOT delegate or copy tags, this should be done in a child class if required.
"""
# attribute for _Delegatedclassifier, which then delegates
# all non-overridden methods are same as of getattr(self, _delegate_name)
# see further details in _DelegatedClassifier docstring
_delegate_name = "estimator_"
def _get_delegate(self):
return getattr(self, self._delegate_name)
def _fit(self, X, y):
"""Fit time series classifier to training data.
private _fit containing the core logic, called from fit
Writes to self:
Sets fitted model attributes ending in "_".
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
if self.get_tag("X_inner_mtype") = "numpy3D":
3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
if self.get_tag("X_inner_mtype") = "nested_univ":
pd.DataFrame with each column a dimension, each cell a pd.Series
for list of other mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
y : 1D np.array of int, of shape [n_instances] - class labels for fitting
indices correspond to instance indices in X
Returns
-------
self : Reference to self.
"""
estimator = self._get_delegate()
estimator.fit(X=X, y=y)
return self
def _predict(self, X):
"""Predict labels for sequences in X.
private _predict containing the core logic, called from predict
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_"
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
if self.get_tag("X_inner_mtype") = "numpy3D":
3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
if self.get_tag("X_inner_mtype") = "nested_univ":
pd.DataFrame with each column a dimension, each cell a pd.Series
for list of other mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
Returns
-------
y : 1D np.array of int, of shape [n_instances] - predicted class labels
indices correspond to instance indices in X
"""
estimator = self._get_delegate()
return estimator.predict(X=X)
def _predict_proba(self, X):
"""Predicts labels probabilities for sequences in X.
private _predict_proba containing the core logic, called from predict_proba
State required:
Requires state to be "fitted".
Accesses in self:
Fitted model attributes ending in "_"
Parameters
----------
X : guaranteed to be of a type in self.get_tag("X_inner_mtype")
if self.get_tag("X_inner_mtype") = "numpy3D":
3D np.ndarray of shape = [n_instances, n_dimensions, series_length]
if self.get_tag("X_inner_mtype") = "nested_univ":
pd.DataFrame with each column a dimension, each cell a pd.Series
for list of other mtypes, see datatypes.SCITYPE_REGISTER
for specifications, see examples/AA_datatypes_and_datasets.ipynb
Returns
-------
y : 2D array of shape [n_instances, n_classes] - predicted class probabilities
1st dimension indices correspond to instance indices in X
2nd dimension indices correspond to possible labels (integers)
(i, j)-th entry is predictive probability that i-th instance is of class j
"""
estimator = self._get_delegate()
return estimator.predict_proba(X=X)
def METHOD_NAME(self):
"""Get fitted parameters.
private _get_fitted_params, called from get_fitted_params
State required:
Requires state to be "fitted".
Returns
-------
fitted_params : dict with str keys
fitted parameters, keyed by names of fitted parameter
"""
estimator = self._get_delegate()
return estimator.get_fitted_params() |
test case name too many chars | from django.test import TestCase
from dfirtrack_main.forms import CaseForm
from dfirtrack_main.models import Casepriority, Casestatus
class CaseFormTestCase(TestCase):
"""case form tests"""
def setUp(cls):
# create objects
Casepriority.objects.create(casepriority_name='casepriority_1')
Casestatus.objects.create(casestatus_name='casestatus_1')
def test_case_id_external_form_label(self):
"""test form label"""
# get object
form = CaseForm()
# compare
self.assertEqual(form.fields['case_id_external'].label, 'Case external ID')
def test_case_name_form_label(self):
"""test form label"""
# get object
form = CaseForm()
# compare
self.assertEqual(form.fields['case_name'].label, 'Case name (*)')
def test_case_is_incident_form_label(self):
"""test form label"""
# get object
form = CaseForm()
# compare
self.assertEqual(form.fields['case_is_incident'].label, 'Is incident')
def test_case_note_analysisresult_form_label(self):
"""test form label"""
# get object
form = CaseForm()
# compare
self.assertEqual(
form.fields['case_note_analysisresult'].label, 'Analysis result'
)
def test_case_note_external_form_label(self):
"""test form label"""
# get object
form = CaseForm()
# compare
self.assertEqual(form.fields['case_note_external'].label, 'External note')
def test_case_note_internal_form_label(self):
"""test form label"""
# get object
form = CaseForm()
# compare
self.assertEqual(form.fields['case_note_internal'].label, 'Internal note')
def test_casepriority_form_label(self):
"""test form label"""
# get object
form = CaseForm()
# compare
self.assertEqual(form.fields['casepriority'].label, 'Casepriority (*)')
def test_casestatus_form_label(self):
"""test form label"""
# get object
form = CaseForm()
# compare
self.assertEqual(form.fields['casestatus'].label, 'Casestatus (*)')
def test_casetype_form_label(self):
"""test form label"""
# get object
form = CaseForm()
# compare
self.assertEqual(
form.fields['casetype'].empty_label, 'Select casetype (optional)'
)
def test_case_assigned_to_user_id_form_label(self):
"""test form label"""
# get object
form = CaseForm()
# compare
self.assertEqual(
form.fields['case_assigned_to_user_id'].label, 'Assigned to user'
)
def test_case_assigned_to_user_id_form_empty_label(self):
"""test form label"""
# get object
form = CaseForm()
# compare
self.assertEqual(
form.fields['case_assigned_to_user_id'].empty_label,
'Select user (optional)',
)
def test_tag_form_label(self):
"""test form label"""
# get object
form = CaseForm()
# compare
self.assertEqual(form.fields['tag'].label, 'Tags')
def test_case_form_empty(self):
"""test minimum form requirements / INVALID"""
# get object
form = CaseForm(data={})
# compare
self.assertFalse(form.is_valid())
def test_case_name_form_filled(self):
"""test minimum form requirements / IVVALID"""
# get object
form = CaseForm(
data={
'case_name': 'case_1',
}
)
# compare
self.assertFalse(form.is_valid())
def test_casepriority_form_filled(self):
"""test minimum form requirements / INVALID"""
# get objects
casepriority_1 = Casepriority.objects.get(casepriority_name='casepriority_1')
# get object
form = CaseForm(
data={
'case_name': 'case_1',
'casepriority': casepriority_1,
}
)
# compare
self.assertFalse(form.is_valid())
def test_casestatus_form_filled(self):
"""test minimum form requirements / VALID"""
# get objects
casepriority_1 = Casepriority.objects.get(casepriority_name='casepriority_1')
casestatus_1 = Casestatus.objects.get(casestatus_name='casestatus_1')
# get object
form = CaseForm(
data={
'case_name': 'case_1',
'casepriority': casepriority_1,
'casestatus': casestatus_1,
}
)
# compare
self.assertTrue(form.is_valid())
def test_case_name_proper_chars(self):
"""test for max length"""
# get objects
casepriority_1 = Casepriority.objects.get(casepriority_name='casepriority_1')
casestatus_1 = Casestatus.objects.get(casestatus_name='casestatus_1')
# get object
form = CaseForm(
data={
'case_name': 'd' * 255,
'casepriority': casepriority_1,
'casestatus': casestatus_1,
}
)
# compare
self.assertTrue(form.is_valid())
def METHOD_NAME(self):
"""test for max length"""
# get objects
casepriority_1 = Casepriority.objects.get(casepriority_name='casepriority_1')
casestatus_1 = Casestatus.objects.get(casestatus_name='casestatus_1')
# get object
form = CaseForm(
data={
'case_name': 'd' * 256,
'casepriority': casepriority_1,
'casestatus': casestatus_1,
}
)
# compare
self.assertFalse(form.is_valid()) |
compute scores | import numpy as np
from scipy import stats
from sklearn import metrics
import logging
logger = logging.getLogger(__name__)
# Calculate the P@topK, P@bottomK, and Kendall-Tau in predicted topK/bottomK
def p_at_tb_k(predict_scores, true_scores, ks=[1, 5, 10, 20, 25, 30, 50, 75, 100]):
# ratios=[0.001, 0.005, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 1.0]):
predict_scores = np.array(predict_scores)
true_scores = np.array(true_scores)
predict_inds = np.argsort(predict_scores)[::-1]
num_archs = len(predict_scores)
true_ranks = np.zeros(num_archs)
true_ranks[np.argsort(true_scores)] = np.arange(num_archs)[::-1]
patks = []
for k in ks:
# k = int(num_archs * ratio)
if k < 1:
continue
top_inds = predict_inds[:k]
bottom_inds = predict_inds[num_archs - k:]
p_at_topk = len(np.where(true_ranks[top_inds] < k)[0]) / float(k)
p_at_bottomk = len(
np.where(true_ranks[bottom_inds] >= num_archs - k)[0]) / float(k)
kd_at_topk = stats.kendalltau(
predict_scores[top_inds], true_scores[top_inds]).correlation
kd_at_bottomk = stats.kendalltau(
predict_scores[bottom_inds], true_scores[bottom_inds]).correlation
# [ratio, k, P@topK, P@bottomK, KT in predicted topK, KT in predicted bottomK]
patks.append((k / len(true_scores), k, p_at_topk,
p_at_bottomk, kd_at_topk, kd_at_bottomk))
return patks
# Calculate the BR@K, WR@K
def minmax_n_at_k(predict_scores, true_scores, ks=[1, 5, 10, 20, 25, 30, 50, 75, 100]):
true_scores = np.array(true_scores)
predict_scores = np.array(predict_scores)
num_archs = len(true_scores)
true_ranks = np.zeros(num_archs)
true_ranks[np.argsort(true_scores)] = np.arange(num_archs)[::-1]
predict_best_inds = np.argsort(predict_scores)[::-1]
minn_at_ks = []
for k in ks:
ranks = true_ranks[predict_best_inds[:k]]
if len(ranks) < 1:
continue
minn = int(np.min(ranks)) + 1
maxn = int(np.max(ranks)) + 1
minn_at_ks.append((k, k, minn, float(minn) / num_archs,
maxn, float(maxn) / num_archs))
return minn_at_ks
def METHOD_NAME(ytest, test_pred):
ytest = np.array(ytest)
test_pred = np.array(test_pred)
METRICS = [
"mae",
"rmse",
"pearson",
"spearman",
"kendalltau",
"kt_2dec",
"kt_1dec",
"full_ytest",
"full_testpred",
]
metrics_dict = {}
try:
precision_k_metrics = p_at_tb_k(test_pred, ytest)
for metric in precision_k_metrics:
k, p_at_topk, kd_at_topk = metric[1], metric[2], metric[4]
metrics_dict[f'p_at_top{k}'] = p_at_topk
metrics_dict[f'kd_at_top{k}'] = kd_at_topk
best_k_metrics = minmax_n_at_k(test_pred, ytest)
for metric in best_k_metrics:
k, min_at_k = metric[1], metric[3]
metrics_dict[f'br_at_{k}'] = min_at_k
metrics_dict["mae"] = np.mean(abs(test_pred - ytest))
metrics_dict["rmse"] = metrics.mean_squared_error(
ytest, test_pred, squared=False
)
metrics_dict["pearson"] = np.abs(np.corrcoef(ytest, test_pred)[1, 0])
metrics_dict["spearman"] = stats.spearmanr(ytest, test_pred)[0]
metrics_dict["kendalltau"] = stats.kendalltau(ytest, test_pred)[0]
metrics_dict["kt_2dec"] = stats.kendalltau(
ytest, np.round(test_pred, decimals=2)
)[0]
metrics_dict["kt_1dec"] = stats.kendalltau(
ytest, np.round(test_pred, decimals=1)
)[0]
for k in [10, 20]:
top_ytest = np.array(
[y > sorted(ytest)[max(-len(ytest), -k - 1)] for y in ytest]
)
top_test_pred = np.array(
[
y > sorted(test_pred)[max(-len(test_pred), -k - 1)]
for y in test_pred
]
)
metrics_dict["precision_{}".format(k)] = (
sum(top_ytest & top_test_pred) / k
)
metrics_dict["full_ytest"] = ytest.tolist()
metrics_dict["full_testpred"] = test_pred.tolist()
except:
for metric in METRICS:
metrics_dict[metric] = float("nan")
if np.isnan(metrics_dict["pearson"]) or not np.isfinite(
metrics_dict["pearson"]
):
logger.info("Error when computing metrics. ytest and test_pred are:")
logger.info(ytest)
logger.info(test_pred)
return metrics_dict |
parse url | """
Faraday Penetration Test IDE
Copyright (C) 2019 Infobyte LLC (http://www.infobytesec.com/)
See the file 'doc/LICENSE' for the license information
"""
import json
import re
from urllib.parse import urlparse
from faraday_plugins.plugins.plugin import PluginJsonFormat
__author__ = "Nicolas Rebagliati"
__copyright__ = "Copyright (c) 2019, Infobyte LLC"
__credits__ = ["Nicolas Rebagliati"]
__license__ = ""
__version__ = "0.0.1"
__maintainer__ = "Nicolas Rebagliati"
__email__ = "[email protected]"
__status__ = "Development"
class WPScanJsonParser:
def __init__(self, json_output, resolve_hostname):
self.json_data = json.loads(json_output)
self.resolve_hostname = resolve_hostname
def METHOD_NAME(self, url):
# Strips protocol and gets hostname from URL.
url_parse = urlparse(url)
protocol = url_parse.scheme
hostname = url_parse.netloc
port = url_parse.port
if protocol == 'https':
port = 443
elif protocol == 'http':
if not port:
port = 80
address = self.resolve_hostname(hostname)
return {'protocol': protocol, 'hostname': hostname, 'port': port, 'address': address}
class WPScanPlugin(PluginJsonFormat):
""" Handle the WPScan tool. Detects the output of the tool
and adds the information to Faraday.
"""
def __init__(self, *arg, **kwargs):
super().__init__(*arg, **kwargs)
self.id = "wpscan"
self.name = "WPscan"
self.plugin_version = "0.2"
self.version = "3.4.5"
self.json_keys = [{"target_url", "effective_url", "interesting_findings"},
{"target_url", "effective_url", "plugins"}]
self._command_regex = re.compile(r'^(sudo wpscan|wpscan)\s+.*?')
self._use_temp_file = True
self._temp_file_extension = "json"
def parseOutputString(self, output):
parser = WPScanJsonParser(output, self.resolve_hostname)
url_data = parser.METHOD_NAME(parser.json_data['target_url'])
host_id = self.createAndAddHost(url_data['address'], hostnames=[url_data['hostname']])
service_id = self.createAndAddServiceToHost(
host_id,
"WordPress",
url_data['protocol'],
ports=[url_data['port']],
status='open',
version='',
description='')
for user, data in parser.json_data.get('users', {}).items():
self.createAndAddCredToService(host_id, service_id, user, "")
main_theme = parser.json_data.get("main_theme", {})
if main_theme:
for vuln in main_theme.get("vulnerabilities", []):
wpvulndb = ",".join(vuln['references'].get('wpvulndb', []))
self.createAndAddVulnWebToService(host_id, service_id, vuln['title'], ref=vuln['references'].get('url', []),
severity='unclassified', external_id=wpvulndb)
for plugin, plugin_data in parser.json_data.get("plugins", {}).items():
for vuln in plugin_data['vulnerabilities']:
wpvulndb = ",".join(vuln['references'].get('wpvulndb', []))
cve = ["CVE-"+ cve for cve in vuln['references'].get('cve')] if vuln['references'].get('cve') else []
self.createAndAddVulnWebToService(host_id, service_id, f"{plugin}: {vuln['title']}",
ref=vuln['references'].get('url', []),
severity='unclassified', external_id=wpvulndb, cve=cve)
for vuln in parser.json_data.get("interesting_findings", []):
if vuln['to_s'].startswith('http'):
vuln_name = f"{vuln['type']}: {vuln['to_s']}"
else:
vuln_name = vuln['to_s']
self.createAndAddVulnWebToService(host_id, service_id, vuln_name, ref=vuln['references'].get('url', []),
severity='unclassified')
def processCommandString(self, username, current_path, command_string):
"""
Adds the path to a temporary file parameter to get .json output to the command string that the
user has set.
"""
super().processCommandString(username, current_path, command_string)
command_string += f" --output={self._output_file_path} --format json"
return command_string
def createPlugin(*args, **kwargs):
return WPScanPlugin(*args, **kwargs) |
repr | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import itertools
from PyQt5 import QtCore, QtWidgets
import mooseutils
from .PostprocessorPlugin import PostprocessorPlugin
from .LineGroupWidget import LineGroupWidget
class PostprocessorSelectPlugin(QtWidgets.QWidget, PostprocessorPlugin):
"""
Widget that contains the toggles for plotting the individual postprocessor data.
This builds a scrollable box containing LineGroupWidget objects, these toggles control the visibility
and style of the postprocessor line plots.
"""
#: pyqtSignal: Emitted when plot is refreshed, contains the x/y/y2 axis variable names
variablesChanged = QtCore.pyqtSignal(list, list, list)
#: pyqtSignal: Emitted when the LineGroupWidgets change the plot.
axesModified = QtCore.pyqtSignal()
def __init__(self):
super(PostprocessorSelectPlugin, self).__init__()
# Setup this widget
policy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.MinimumExpanding)
policy.setVerticalStretch(100) # We want this widget to be as big as possible vertically
self.setSizePolicy(policy)
# An iteratable color cycle for setting the default line style and color
self.color_cycle = None
# Member variables
self._groups = [] # list of ListGroupWidget objects
# The box and layout that will contain the line toggles
self.LineGroups = QtWidgets.QFrame()
self.LineGroupsLayout = QtWidgets.QVBoxLayout()
self.LineGroupsLayout.setSpacing(10);
self.LineGroupsLayout.setContentsMargins(0, 10, 10, 0);
self.LineGroups.setLayout(self.LineGroupsLayout)
# Creates the area that will be scrollable
self.Scroll = QtWidgets.QScrollArea()
self.Scroll.setWidget(self.LineGroups)
# Main layout to contain the scroll area
self.MainLayout = QtWidgets.QVBoxLayout()
self.MainLayout.setContentsMargins(0, 10, 0, 10)
self.MainLayout.addWidget(self.Scroll)
self.setLayout(self.MainLayout)
# Call the setup methods
self.setup()
def onSetData(self, data):
"""
Called when new data is being supplied to the widget.
Args:
data[list]: A list of PostprocessorDataWidget files.
"""
# Remove existing widgets
current_groups = {}
filenames = [d.filename() for d in data]
for group in self._groups:
group.clear()
if group.filename() not in filenames:
self.LineGroupsLayout.removeWidget(group)
group.setParent(None)
group.disconnect()
else:
current_groups[group.filename()] = group
self._groups = []
self.color_cycle = itertools.product(['-', '--', '-.', ':'], plt.cm.Paired(np.linspace(0, 1, 11)))
# Create the group widgets for each available variable
for d in data:
if d.filename() in current_groups and not current_groups[d.filename()].sameData(d):
group = current_groups[d.filename()]
self.LineGroupsLayout.removeWidget(group)
group.setParent(None)
group.disconnect()
self._newGroup(d)
elif d.filename() in current_groups:
group = current_groups[d.filename()]
group.setData(self.axes(), d)
self._groups.append(group)
self.updateVariables()
else:
self._newGroup(d)
self.updateGeometry()
def _newGroup(self, d):
group = LineGroupWidget(self.axes(), d, self.color_cycle)
self.LineGroupsLayout.addWidget(group)
self._groups.append(group)
group.initialized.connect(self.updateGeometry)
group.variablesChanged.connect(self.updateVariables)
group.axesModified.connect(self.axesModified)
def onTimeChanged(self, time):
"""
Update the time in the GroupLineWidgets.
"""
for group in self._groups:
group.plot(time=time)
def onCurrentChanged(self, index):
"""
Enables/disables the update timer base on the active state of the tab.
"""
active = self._index == index
for group in self._groups:
group._data.setTimerActive(active)
@QtCore.pyqtSlot()
def updateVariables(self):
"""
Updates the complete list of active variables for x/y axis labels.
"""
n = len(self._groups)
x_vars = [[]]*n
y_vars = [[]]*n
y2_vars = [[]]*n
for i in range(n):
group = self._groups[i]
if group.isValid():
x, y, y2 = group.getAxisLabels()
x_vars[i] = [x]
y_vars[i] = y
y2_vars[i] = y2
self.variablesChanged.emit(x_vars, y_vars, y2_vars)
def METHOD_NAME(self):
"""
Produce the script items for this widget.
"""
output = []
imports = []
for group in self._groups:
out, imp = group.METHOD_NAME()
output += out
imports += imp
return output, imports
def _setupScroll(self, qobject):
"""
Setup method for the scroll area widget.
"""
qobject.setWidgetResizable(True)
qobject.setFrameShape(QtWidgets.QFrame.NoFrame)
qobject.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
def _setupLineToggleGroupBox(self, qobject):
"""
Setup method for the group box storing the line toggle widgets.
"""
qobject.setAutoFillBackground(True)
def main(filenames, reader=mooseutils.VectorPostprocessorReader):
"""
Create widgets for running PostprocessorSelectPlugin
"""
"""
Run FigurePlugin by itself.
"""
from ..PostprocessorViewer import PostprocessorViewer
from .FigurePlugin import FigurePlugin
import matplotlib
matplotlib.rcParams["figure.figsize"] = (6.25, 6.25)
matplotlib.rcParams["figure.dpi"] = (100)
widget = PostprocessorViewer(reader, timeout=None, plugins=[FigurePlugin, PostprocessorSelectPlugin])
widget.onSetFilenames(filenames)
control = widget.currentWidget().PostprocessorSelectPlugin
window = widget.currentWidget().FigurePlugin
window.setFixedSize(QtCore.QSize(625, 625))
widget.show()
return control, widget, window
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
filenames = ['../../tests/input/vpp_*.csv']
_, widget, _ = main(filenames)
app.exec_()
os.remove('tmp_001.csv') |
find | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
from robot.model import SuiteVisitor
from robot.utils import html_escape, test_or_task
class Merger(SuiteVisitor):
def __init__(self, result, rpa=False):
self.result = result
self.current = None
self.rpa = rpa
def merge(self, merged):
self.result.set_execution_mode(merged)
merged.suite.visit(self)
self.result.errors.add(merged.errors)
def start_suite(self, suite):
if self.current is None:
old = self._find_root(suite.name)
else:
old = self.METHOD_NAME(self.current.suites, suite.name)
if old is not None:
old.starttime = old.endtime = None
old.doc = suite.doc
old.metadata.update(suite.metadata)
old.setup = suite.setup
old.teardown = suite.teardown
self.current = old
else:
suite.message = self._create_add_message(suite, suite=True)
self.current.suites.append(suite)
return old is not None
def _find_root(self, name):
root = self.result.suite
if root.name != name:
raise DataError(f"Cannot merge outputs containing different root suites. "
f"Original suite is '{root.name}' and merged is '{name}'.")
return root
def METHOD_NAME(self, items, name):
for item in items:
if item.name == name:
return item
return None
def end_suite(self, suite):
self.current = self.current.parent
def visit_test(self, test):
old = self.METHOD_NAME(self.current.tests, test.name)
if old is None:
test.message = self._create_add_message(test)
self.current.tests.append(test)
elif test.skipped:
old.message = self._create_skip_message(old, test)
else:
test.message = self._create_merge_message(test, old)
index = self.current.tests.index(old)
self.current.tests[index] = test
def _create_add_message(self, item, suite=False):
item_type = 'Suite' if suite else test_or_task('Test', self.rpa)
prefix = f'*HTML* {item_type} added from merged output.'
if not item.message:
return prefix
return ''.join([prefix, '<hr>', self._html(item.message)])
def _html(self, message):
if message.startswith('*HTML*'):
return message[6:].lstrip()
return html_escape(message)
def _create_merge_message(self, new, old):
header = (f'*HTML* <span class="merge">{test_or_task("Test", self.rpa)} '
f'has been re-executed and results merged.</span>')
return ''.join([
header,
'<hr>',
self._format_status_and_message('New', new),
'<hr>',
self._format_old_status_and_message(old, header)
])
def _format_status_and_message(self, state, test):
msg = f'{self._status_header(state)} {self._status_text(test.status)}<br>'
if test.message:
msg += f'{self._message_header(state)} {self._html(test.message)}<br>'
return msg
def _status_header(self, state):
return f'<span class="{state.lower()}-status">{state} status:</span>'
def _status_text(self, status):
return f'<span class="{status.lower()}">{status}</span>'
def _message_header(self, state):
return f'<span class="{state.lower()}-message">{state} message:</span>'
def _format_old_status_and_message(self, test, merge_header):
if not test.message.startswith(merge_header):
return self._format_status_and_message('Old', test)
status_and_message = test.message.split('<hr>', 1)[1]
return (
status_and_message
.replace(self._status_header('New'), self._status_header('Old'))
.replace(self._message_header('New'), self._message_header('Old'))
)
def _create_skip_message(self, test, new):
msg = (f'*HTML* {test_or_task("Test", self.rpa)} has been re-executed and '
f'results merged. Latter result had {self._status_text("SKIP")} status '
f'and was ignored. Message:\n{self._html(new.message)}')
if test.message:
msg += f'<hr>Original message:\n{self._html(test.message)}'
return msg |
do show | # Copyright (C) 2013 Kristoffer Gronlund <[email protected]>
# See COPYING for license information.
import os
from . import command
from . import completers
from . import utils
from . import corosync
from . import parallax
from . import bootstrap
from . import log
logger = log.setup_logger(__name__)
def _push_completer(args):
try:
n = utils.list_cluster_nodes()
n.remove(utils.this_node())
if args[-1] in n:
# continue complete
return [args[-1]]
for item in args:
if item in n:
n.remove(item)
return n
except:
n = []
def _diff_nodes(args):
try:
if len(args) > 3:
return []
n = utils.list_cluster_nodes()
if args[-1] in n:
# continue complete
return [args[-1]]
for item in args:
if item in n:
# remove already complete item
n.remove(item)
return n
except:
return []
class Corosync(command.UI):
'''
Corosync is the underlying messaging layer for most HA clusters.
This level provides commands for editing and managing the corosync
configuration.
'''
name = "corosync"
def requires(self):
return corosync.check_tools()
@command.completers(completers.choice(['ring', 'quorum', 'qdevice', 'qnetd']))
def do_status(self, context, status_type="ring"):
'''
Quick cluster health status. Corosync status or QNetd status
'''
if not utils.service_is_active("corosync.service"):
logger.error("corosync.service is not running!")
return False
try:
corosync.query_status(status_type)
except ValueError as err:
logger.error(str(err))
return False
@command.skill_level('administrator')
def do_reload(self, context):
'''
Reload the corosync configuration
'''
return corosync.cfgtool('-R')[0] == 0
@command.skill_level('administrator')
@command.completers_repeating(_push_completer)
def do_push(self, context, *nodes):
'''
Push corosync configuration to other cluster nodes.
If no nodes are provided, configuration is pushed to
all other cluster nodes.
'''
if not nodes:
nodes = utils.list_cluster_nodes()
nodes.remove(utils.this_node())
return corosync.push_configuration(nodes)
@command.skill_level('administrator')
@command.completers(_push_completer)
def do_pull(self, context, node):
'''
Pull corosync configuration from another node.
'''
return corosync.pull_configuration(node)
@command.completers_repeating(_diff_nodes)
def do_diff(self, context, *nodes):
'''
Compare corosync configuration between nodes.
'''
checksum = False
if nodes and nodes[0] == '--checksum':
checksum = True
nodes = nodes[1:]
if not nodes:
nodes = utils.list_cluster_nodes()
return corosync.diff_configuration(nodes, checksum=checksum)
@command.skill_level('administrator')
def do_edit(self, context):
'''
Edit the corosync configuration.
'''
cfg = corosync.conf()
try:
utils.edit_file_ext(cfg, template='')
except IOError as e:
context.fatal_error(str(e))
def METHOD_NAME(self, context):
'''
Display the corosync configuration.
'''
cfg = corosync.conf()
if not os.path.isfile(cfg):
context.fatal_error("No corosync configuration found on this node.")
utils.page_string(open(cfg).read())
def do_log(self, context):
'''
Display the corosync log file (if any).
'''
logfile = corosync.get_value('logging.logfile')
if not logfile:
context.fatal_error("No corosync log file configured")
utils.page_file(logfile)
@command.skill_level('administrator')
def do_get(self, context, path):
"Get a corosync configuration value"
for v in corosync.get_values(path):
print(v)
@command.skill_level('administrator')
def do_set(self, context, path, value, index: int = 0):
"Set a corosync configuration value"
corosync.set_value(path, value, index) |
validate json | from pysys.basetest import BaseTest
import time
import json
"""
Validate tedge-mapper-collectd messages that are published
on tedge/measurements
Given a configured system
When we start the tedge-mapper-collectd with sudo in the background
When we start tedge sub with sudo in the background
When we start two publishers to publish the simulated collectd messages
Publish the messages in 100ms interval
Wait for couple of seconds to publish couple of batch of messages
Then we kill tedge sub with sudo as it is running with a different user account
Then we validate the messages in the output of tedge sub,
"""
class MonitoringSmallInterval(BaseTest):
def setup(self):
self.js_msg = ""
self.time_cnt = 0
self.temp_cnt = 0
self.pres_cnt = 0
self.tedge = "/usr/bin/tedge"
self.sudo = "/usr/bin/sudo"
# stop collectd to avoid mixup of messages
collectd = self.startProcess(
command=self.sudo,
arguments=["systemctl", "stop", "collectd"],
stdouterr="collectd",
)
collectd_mapper = self.startProcess(
command=self.sudo,
arguments=["systemctl", "start", "tedge-mapper-collectd"],
stdouterr="collectd_mapper",
)
self.addCleanupFunction(self.monitoring_cleanup)
def execute(self):
sub = self.startProcess(
command=self.sudo,
arguments=[self.tedge, "mqtt", "sub", "--no-topic", "tedge/#"],
stdouterr="tedge_sub",
background=True,
)
# Wait for a small amount of time to give tedge sub time
# to initialize. This is a heuristic measure.
# Without an additional wait we observe failures in 1% of the test
# runs.
time.sleep(0.1)
for _ in range(10):
timestamp = time.time()
pub = self.startProcess(
command=self.sudo,
arguments=[
self.tedge,
"mqtt",
"pub",
"collectd/host/temperature/temp",
f"{timestamp}:25.5",
],
stdouterr="tedge_temp",
)
timestamp = time.time()
pub = self.startProcess(
command=self.sudo,
arguments=[
self.tedge,
"mqtt",
"pub",
"collectd/host/pressure/pres",
f"{timestamp}:500.5",
],
stdouterr="tedge_pres",
)
# publish every 100ms
time.sleep(0.1)
# wait for tedge-mapper-collectd to batch messages
time.sleep(1)
# Kill the subscriber process explicitly with sudo as PySys does
# not have the rights to do it
kill = self.startProcess(
command=self.sudo,
arguments=["killall", "tedge"],
stdouterr="kill_out",
)
def validate(self):
self.assertThat(
"collectd_msg_validation_result == expected_result",
collectd_msg_validation_result=self.METHOD_NAME(),
expected_result=True,
)
def METHOD_NAME(self):
f = open(self.output + "/tedge_sub.out", "r")
lines = f.readlines()
for line in lines:
self.log.info(line)
self.js_msg = json.loads(line)
if not self.validate_time():
reason = "time validation failed in message: " + str(line)
self.abort(False, reason)
if "temperature" in self.js_msg:
if not self.validate_temperature():
reason = "temperature stat validation failed in message: " + str(
line
)
self.abort(False, reason)
if "pressure" in self.js_msg:
if not self.validate_pressure():
reason = "pressure stat validation failed in message: " + str(line)
self.abort(False, reason)
if self.time_cnt >= 10 and self.temp_cnt == 10 and self.pres_cnt == 10:
return True
else:
return False
def validate_time(self):
if self.js_msg["time"]:
self.time_cnt += 1
return True
else:
return False
def validate_temperature(self):
if self.js_msg["temperature"]:
if "temp" in self.js_msg["temperature"]:
self.temp_cnt += 1
return True
else:
return False
else:
return False
def validate_pressure(self):
if self.js_msg["pressure"]:
if "pres" in self.js_msg["pressure"]:
self.pres_cnt += 1
return True
else:
return False
else:
return False
def monitoring_cleanup(self):
self.log.info("monitoring_cleanup")
collectd = self.startProcess(
command=self.sudo,
arguments=["systemctl", "stop", "tedge-mapper-collectd"],
stdouterr="collectd_mapper",
) |
sample predictor configs | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume, reproduce_failure
import hypothesis.strategies as st
import numpy as np
from functools import partial
from test_elementwise_add_op import check_broadcast
class TestElementwiseMaxOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.X86,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
self.enable_testing_on_place(
TargetType.ARM,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 4])
opencl_valid_places = [
Place(TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.FP16,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.FP32, DataLayoutType.NCHW),
Place(TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageDefault), Place(
TargetType.OpenCL, PrecisionType.Any,
DataLayoutType.ImageFolder),
Place(TargetType.OpenCL, PrecisionType.Any, DataLayoutType.NCHW),
Place(TargetType.Host, PrecisionType.FP32)
]
self.enable_testing_on_place(places=opencl_valid_places)
self.enable_testing_on_place(TargetType.NNAdapter, PrecisionType.FP32)
self.enable_devices_on_nnadapter(
device_names=["kunlunxin_xtcl", "intel_openvino"])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
target_type = predictor_config.target()
input_data_type = program_config.inputs["input_data_x"].dtype
# Check config
if target_type in [TargetType.ARM]:
if predictor_config.precision(
) == PrecisionType.FP16 and input_data_type != np.float32:
return False
return True
def sample_program_configs(self, draw):
input_data_x_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=20), min_size=1, max_size=4))
input_data_y_shape = draw(
st.lists(
st.integers(
min_value=1, max_value=20), min_size=1, max_size=4))
input_data_x_shape = draw(st.sampled_from([input_data_x_shape, []]))
input_data_y_shape = draw(st.sampled_from([input_data_y_shape, []]))
axis = draw(st.integers(min_value=-1, max_value=4))
assume(
check_broadcast(input_data_x_shape, input_data_y_shape, axis) ==
True)
if axis < 0:
axis = abs(len(input_data_x_shape) - len(
input_data_y_shape)) + axis + 1
if self.get_target().upper() == 'X86':
input_data_type = draw(
st.sampled_from([np.float32, np.int32, np.int64]))
elif self.get_target() == 'NNAdapter':
input_data_type = draw(
st.sampled_from([np.float32, np.int32, np.int64]))
elif self.get_target().upper() == 'ARM':
input_data_type = draw(
st.sampled_from([np.float32, np.int32, np.int64]))
elif self.get_target().upper() == 'OPENCL':
input_data_type = draw(st.sampled_from([np.float32]))
elif self.get_target().upper() == 'METAL':
input_data_type = draw(st.sampled_from([np.float32]))
else:
input_data_type = draw(st.sampled_from([np.float32]))
def gen_input_data(*args, **kwargs):
return np.random.randint(
1, 20, size=(kwargs['shape'])).astype(kwargs['dtype'])
elementwise_max_op = OpConfig(
type="elementwise_max",
inputs={"X": ["input_data_x"],
"Y": ["input_data_y"]},
outputs={"Out": ["output_data"]},
attrs={"axis": axis})
program_config = ProgramConfig(
ops=[elementwise_max_op],
weights={},
inputs={
"input_data_x": TensorConfig(data_gen=partial(
gen_input_data,
shape=input_data_x_shape,
dtype=input_data_type)),
"input_data_y": TensorConfig(data_gen=partial(
gen_input_data,
shape=input_data_y_shape,
dtype=input_data_type))
},
outputs=["output_data"])
return program_config
def METHOD_NAME(self):
return self.get_predictor_configs(), ["elementwise_max"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
def _teller3(program_config, predictor_config):
target_type = predictor_config.target()
in_x_shape = list(program_config.inputs["input_data_x"].shape)
in_y_shape = list(program_config.inputs["input_data_y"].shape)
if target_type not in [
TargetType.ARM, TargetType.Host, TargetType.X86,
TargetType.Metal, TargetType.OpenCL
]:
if len(in_x_shape) == 0 or len(in_y_shape) == 0:
return True
self.add_ignore_check_case(
_teller3, IgnoreReasons.PADDLELITE_NOT_SUPPORT,
"0D-tensor is not supported on this target now.")
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=300)
if __name__ == "__main__":
unittest.main(argv=['']) |
set from tanwcs | from django.db import models
#from astrometry.util import sip, starutil
import math
class TanWCS(models.Model):
crval1 = models.FloatField()
crval2 = models.FloatField()
crpix1 = models.FloatField()
crpix2 = models.FloatField()
cd11 = models.FloatField()
cd12 = models.FloatField()
cd21 = models.FloatField()
cd22 = models.FloatField()
imagew = models.FloatField()
imageh = models.FloatField()
# Reverse mappings:
# calibrations_raw -> Calibration
# calibrations_tweaked -> Calibration
def __repr__(self):
return (('TanWCS(crval1=%.18g, crval2=%.18g, crpix1=%.18g, crpix2=%.18g, ' +
'cd11=%.18g, cd12=%.18g, cd21=%.18g, cd22=%.18g, ' +
'imagew=%.18g, imageh=%.18g, id=%i)') %
(self.crval1, self.crval2, self.crpix1, self.crpix2,
self.cd11, self.cd12, self.cd21, self.cd22, self.imagew, self.imageh,
self.id))
def __init__(self, *args, **kwargs):
filename = None
if 'file' in kwargs:
filename = kwargs['file']
del kwargs['file']
super(TanWCS, self).__init__(*args, **kwargs)
if filename:
#wcs = sip.Tan(filename)
#self.set_from_tanwcs(wcs)
pass
def get_center_radecradius(self):
wcs = self.to_tanwcs()
#print 'WCS:', wcs
ra,dec = wcs.radec_center()
radius = self.get_radius()
return ra,dec,radius
def get_radius(self):
wcs = self.to_tanwcs()
return (wcs.pixel_scale() *
math.hypot(wcs.imagew, wcs.imageh)/2. / 3600.)
# from anutil.Tan
def METHOD_NAME(self, wcs):
self.crval1 = wcs.crval[0]
self.crval2 = wcs.crval[1]
self.crpix1 = wcs.crpix[0]
self.crpix2 = wcs.crpix[1]
self.cd11 = wcs.cd[0]
self.cd12 = wcs.cd[1]
self.cd21 = wcs.cd[2]
self.cd22 = wcs.cd[3]
self.imagew = wcs.imagew
self.imageh = wcs.imageh
def __str__(self):
return ('<TanWCS: CRVAL (%f, %f)' % (self.crval1, self.crval2) +
' CRPIX (%f, %f)' % (self.crpix1, self.crpix2) +
' CD (%f, %f; %f %f)' % (self.cd11, self.cd12, self.cd21, self.cd22) +
' Image size (%f, %f)>' % (self.imagew, self.imageh)
)
def _det_cd(self):
return self.cd11 * self.cd22 - self.cd12 * self.cd21
# returns pixel scale in arcseconds per pixel
def get_pixscale(self):
return 3600.0 * math.sqrt(abs(self._det_cd()))
def get_orientation(self):
# From sip.c:
det = self._det_cd()
if det >= 0:
parity = 1.
else:
parity = -1.
T = parity * self.cd11 + self.cd22
A = parity * self.cd21 - self.cd12
orient = -math.degrees(math.atan2(A, T))
return orient
def get_parity(self):
# From sip.c:
det = self._det_cd()
if det >= 0:
parity = 1.
else:
parity = -1.
return parity
# returns the field area in square degrees.
def get_field_area(self):
scale = self.get_pixscale() / 3600.0
return self.imagew * self.imageh * (scale**2)
def get_field_radius(self):
area = self.get_field_area()
return math.sqrt(area) / 2.;
def get_field_bounding_radius(self):
tan = self.to_tanwcs()
## Gah!
(ra0,dec0) = tan.pixelxy2radec(self.imagew/2. - 0.5, self.imageh/2. - 0.5)
(ra1,dec1) = tan.pixelxy2radec(1., 1.)
xyz0 = starutil.radectoxyz(ra0,dec0)
xyz1 = starutil.radectoxyz(ra1,dec1)
d2 = (xyz0[0]-xyz1[0])**2 + (xyz0[1]-xyz1[1])**2 + (xyz0[2]-xyz1[2])**2
return starutil.rad2deg(starutil.distsq2rad(d2))
# returns (ra,dec) in degrees
def get_field_center(self):
tan = self.to_tanwcs()
return tan.pixelxy2radec(self.imagew/2. - 0.5, self.imageh/2. - 0.5)
# returns (w, h, units)
def get_field_size(self):
scale = self.get_pixscale()
(fieldw, fieldh) = (self.imagew * scale, self.imageh * scale)
units = 'arcsec'
if min(fieldw, fieldh) > 3600:
fieldw /= 3600.
fieldh /= 3600.
units = 'deg'
elif min(fieldw, fieldh) > 60:
fieldw /= 60.
fieldh /= 60.
units = 'arcmin'
return (fieldw, fieldh, units)
def radec_bounds(self, nsteps=10):
tanwcs = self.to_tanwcs()
return tanwcs.radec_bounds(nsteps)
def to_tanwcs(self):
from astrometry.util import util as anutil
tan = anutil.Tan(self.crval1, self.crval2, self.crpix1, self.crpix2,
self.cd11, self.cd12, self.cd21, self.cd22,
self.imagew, self.imageh)
return tan
'''
tan = anutil.Tan()
tan.crval[0] = self.crval1
tan.crval[1] = self.crval2
tan.crpix[0] = self.crpix1
tan.crpix[1] = self.crpix2
tan.cd[0] = self.cd11
tan.cd[1] = self.cd12
tan.cd[2] = self.cd21
tan.cd[3] = self.cd22
tan.imagew = self.imagew
tan.imageh = self.imageh
return tan
'''
class SipWCS(models.Model):
tan = models.OneToOneField(TanWCS, on_delete=models.CASCADE)
order = models.PositiveSmallIntegerField(default=2)
aterms = models.TextField(default='')
bterms = models.TextField(default='')
apterms = models.TextField(default='')
bpterms = models.TextField(default='')
def __init__(self, *args, **kwargs):
filename = None
if 'file' in kwargs:
filename = kwargs['file']
del kwargs['file']
tan = TanWCS()
tan.save()
kwargs['tan'] = tan
super(SipWCS, self).__init__(*args, **kwargs)
if filename:
wcs = sip.Sip(filename)
self.set_from_sipwcs(wcs)
def set_from_sipwcs(self, wcs):
self.tan.METHOD_NAME(wcs.wcstan)
self.aterms = ', '.join(['%i:%i:%g' % (i,j,c)
for (i, j, c) in wcs.get_nonzero_a_terms()])
self.bterms = ', '.join(['%i:%i:%g' % (i,j,c)
for (i, j, c) in wcs.get_nonzero_b_terms()])
self.apterms = ', '.join(['%i:%i:%g' % (i,j,c)
for (i, j, c) in wcs.get_nonzero_ap_terms()])
self.bpterms = ', '.join(['%i:%i:%g' % (i,j,c)
for (i, j, c) in wcs.get_nonzero_bp_terms()])
def to_sipwcs(self):
sip = sip.Sip()
sip.tan = self.tan.to_tanwcs()
terms = []
for s in self.aterms.split(', '):
ss = s.split(':')
terms.append((int(ss[0]), int(ss[1]), float(ss[2])))
sip.set_a_terms(terms)
terms = []
for s in self.bterms.split(', '):
ss = s.split(':')
terms.append((int(ss[0]), int(ss[1]), float(ss[2])))
sip.set_b_terms(terms)
terms = []
for s in self.apterms.split(', '):
ss = s.split(':')
terms.append((int(ss[0]), int(ss[1]), float(ss[2])))
sip.set_ap_terms(terms)
terms = []
for s in self.bpterms.split(', '):
ss = s.split(':')
terms.append((int(ss[0]), int(ss[1]), float(ss[2])))
sip.set_bp_terms(terms)
return sip |
test save with parameter of run type | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import pytest
import random
import string
from mock import MagicMock, Mock, patch
from sagemaker.experiments.experiment import Experiment
from sagemaker.experiments.run import Run
from sagemaker.experiments.trial import _Trial
from sagemaker.experiments.trial_component import _TrialComponent
from sagemaker.remote_function.core.stored_function import StoredFunction
from sagemaker.remote_function.core.serialization import deserialize_obj_from_s3
from sagemaker.remote_function.errors import SerializationError
from tests.unit.sagemaker.experiments.helpers import (
TEST_EXP_DISPLAY_NAME,
TEST_EXP_NAME,
TEST_RUN_DISPLAY_NAME,
TEST_RUN_NAME,
mock_tc_load_or_create_func,
mock_trial_load_or_create_func,
)
KMS_KEY = "kms-key"
HMAC_KEY = "some-hmac-key"
mock_s3 = {}
def random_s3_uri():
return "".join(random.choices(string.ascii_uppercase + string.digits, k=10))
def upload_bytes(b, s3_uri, kms_key=None, sagemaker_session=None):
assert kms_key == KMS_KEY
mock_s3[s3_uri] = b
def read_bytes(s3_uri, sagemaker_session=None):
return mock_s3[s3_uri]
def quadratic(x=2, *, a=1, b=0, c=0):
return a * x * x + b * x + c
def log_bigger(a, b, run: Run):
if a >= b:
run.log_metric("bigger", a)
else:
run.log_metric("bigger", b)
@pytest.mark.parametrize(
"args, kwargs",
[([], {}), ([3], {}), ([], {"a": 2, "b": 1, "c": 1})],
)
@patch("sagemaker.s3.S3Uploader.upload_bytes", new=upload_bytes)
@patch("sagemaker.s3.S3Downloader.read_bytes", new=read_bytes)
@patch("sagemaker.s3.S3Uploader.upload")
@patch("sagemaker.s3.S3Downloader.download")
def test_save_and_load(s3_source_dir_download, s3_source_dir_upload, args, kwargs):
session = Mock()
s3_base_uri = random_s3_uri()
stored_function = StoredFunction(
sagemaker_session=session, s3_base_uri=s3_base_uri, s3_kms_key=KMS_KEY, hmac_key=HMAC_KEY
)
stored_function.save(quadratic, *args, **kwargs)
stored_function.load_and_invoke()
assert deserialize_obj_from_s3(
session, s3_uri=f"{s3_base_uri}/results", hmac_key=HMAC_KEY
) == quadratic(*args, **kwargs)
@patch(
"sagemaker.experiments.run.Experiment._load_or_create",
MagicMock(return_value=Experiment(experiment_name=TEST_EXP_NAME)),
)
@patch(
"sagemaker.experiments.run._Trial._load_or_create",
MagicMock(side_effect=mock_trial_load_or_create_func),
)
@patch.object(_Trial, "add_trial_component", MagicMock(return_value=None))
@patch(
"sagemaker.experiments.run._TrialComponent._load_or_create",
MagicMock(side_effect=mock_tc_load_or_create_func),
)
@patch("sagemaker.s3.S3Uploader.upload_bytes", new=upload_bytes)
@patch("sagemaker.s3.S3Downloader.read_bytes", new=read_bytes)
@patch.object(_TrialComponent, "save")
@patch("sagemaker.s3.S3Uploader.upload")
@patch("sagemaker.s3.S3Downloader.download")
def METHOD_NAME(
s3_source_dir_download, s3_source_dir_upload, mock_tc_save
):
session = Mock()
s3_base_uri = random_s3_uri()
session.sagemaker_client.search.return_value = {"Results": []}
run = Run(
experiment_name=TEST_EXP_NAME,
run_name=TEST_RUN_NAME,
experiment_display_name=TEST_EXP_DISPLAY_NAME,
run_display_name=TEST_RUN_DISPLAY_NAME,
sagemaker_session=session,
)
stored_function = StoredFunction(
sagemaker_session=session, s3_base_uri=s3_base_uri, s3_kms_key=KMS_KEY, hmac_key=HMAC_KEY
)
with pytest.raises(SerializationError) as e:
stored_function.save(log_bigger, 1, 2, run)
assert isinstance(e.__cause__, NotImplementedError) |
test adding wrappers ordering | import pytest
from pluggy import HookimplMarker, HookspecMarker
from pluggy.hooks import HookImpl
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
@pytest.fixture
def hc(pm):
class Hooks(object):
@hookspec
def he_method1(self, arg):
pass
pm.add_hookspecs(Hooks)
return pm.hook.he_method1
@pytest.fixture
def addmeth(hc):
def addmeth(tryfirst=False, trylast=False, hookwrapper=False):
def wrap(func):
hookimpl(tryfirst=tryfirst, trylast=trylast, hookwrapper=hookwrapper)(func)
hc._add_hookimpl(HookImpl(None, "<temp>", func, func.example_impl))
return func
return wrap
return addmeth
def funcs(hookmethods):
return [hookmethod.function for hookmethod in hookmethods]
def test_adding_nonwrappers(hc, addmeth):
@addmeth()
def he_method1():
pass
@addmeth()
def he_method2():
pass
@addmeth()
def he_method3():
pass
assert funcs(hc._nonwrappers) == [he_method1, he_method2, he_method3]
def test_adding_nonwrappers_trylast(hc, addmeth):
@addmeth()
def he_method1_middle():
pass
@addmeth(trylast=True)
def he_method1():
pass
@addmeth()
def he_method1_b():
pass
assert funcs(hc._nonwrappers) == [he_method1, he_method1_middle, he_method1_b]
def test_adding_nonwrappers_trylast3(hc, addmeth):
@addmeth()
def he_method1_a():
pass
@addmeth(trylast=True)
def he_method1_b():
pass
@addmeth()
def he_method1_c():
pass
@addmeth(trylast=True)
def he_method1_d():
pass
assert funcs(hc._nonwrappers) == [
he_method1_d,
he_method1_b,
he_method1_a,
he_method1_c,
]
def test_adding_nonwrappers_trylast2(hc, addmeth):
@addmeth()
def he_method1_middle():
pass
@addmeth()
def he_method1_b():
pass
@addmeth(trylast=True)
def he_method1():
pass
assert funcs(hc._nonwrappers) == [he_method1, he_method1_middle, he_method1_b]
def test_adding_nonwrappers_tryfirst(hc, addmeth):
@addmeth(tryfirst=True)
def he_method1():
pass
@addmeth()
def he_method1_middle():
pass
@addmeth()
def he_method1_b():
pass
assert funcs(hc._nonwrappers) == [he_method1_middle, he_method1_b, he_method1]
def METHOD_NAME(hc, addmeth):
@addmeth(hookwrapper=True)
def he_method1():
pass
@addmeth()
def he_method1_middle():
pass
@addmeth(hookwrapper=True)
def he_method3():
pass
assert funcs(hc._nonwrappers) == [he_method1_middle]
assert funcs(hc._wrappers) == [he_method1, he_method3]
def test_adding_wrappers_ordering_tryfirst(hc, addmeth):
@addmeth(hookwrapper=True, tryfirst=True)
def he_method1():
pass
@addmeth(hookwrapper=True)
def he_method2():
pass
assert hc._nonwrappers == []
assert funcs(hc._wrappers) == [he_method2, he_method1]
def test_hookspec(pm):
class HookSpec(object):
@hookspec()
def he_myhook1(arg1):
pass
@hookspec(firstresult=True)
def he_myhook2(arg1):
pass
@hookspec(firstresult=False)
def he_myhook3(arg1):
pass
pm.add_hookspecs(HookSpec)
assert not pm.hook.he_myhook1.spec.opts["firstresult"]
assert pm.hook.he_myhook2.spec.opts["firstresult"]
assert not pm.hook.he_myhook3.spec.opts["firstresult"]
@pytest.mark.parametrize("name", ["hookwrapper", "optionalhook", "tryfirst", "trylast"])
@pytest.mark.parametrize("val", [True, False])
def test_hookimpl(name, val):
@hookimpl(**{name: val})
def he_myhook1(arg1):
pass
if val:
assert he_myhook1.example_impl.get(name)
else:
assert not hasattr(he_myhook1, name)
def test_hookrelay_registry(pm):
"""Verify hook caller instances are registered by name onto the relay
and can be likewise unregistered."""
class Api(object):
@hookspec
def hello(self, arg):
"api hook 1"
pm.add_hookspecs(Api)
hook = pm.hook
assert hasattr(hook, "hello")
assert repr(hook.hello).find("hello") != -1
class Plugin(object):
@hookimpl
def hello(self, arg):
return arg + 1
plugin = Plugin()
pm.register(plugin)
out = hook.hello(arg=3)
assert out == [4]
assert not hasattr(hook, "world")
pm.unregister(plugin)
assert hook.hello(arg=3) == [] |
test partial residueinfo raise error | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8
#
# MDAnalysis --- https://www.mdanalysis.org
# Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
# doi: 10.25080/majora-629e541a-00e
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
import warnings
import pytest
import numpy as np
from numpy.testing import assert_equal
import MDAnalysis as mda
from MDAnalysisTests.topology.base import ParserBase
from MDAnalysisTests.datafiles import mol2_molecule, PDB_helix, SDF_molecule
Chem = pytest.importorskip('rdkit.Chem')
AllChem = pytest.importorskip('rdkit.Chem.AllChem')
class RDKitParserBase(ParserBase):
parser = mda.converters.RDKitParser.RDKitParser
expected_attrs = ['ids', 'names', 'elements', 'masses', 'aromaticities',
'resids', 'resnums', 'chiralities',
'segids',
'bonds',
]
expected_n_atoms = 0
expected_n_residues = 1
expected_n_segments = 1
expected_n_bonds = 0
def test_creates_universe(self, filename):
u = mda.Universe(filename, format='RDKIT')
assert isinstance(u, mda.Universe)
def test_bonds_total_counts(self, top):
assert len(top.bonds.values) == self.expected_n_bonds
class TestRDKitParserMOL2(RDKitParserBase):
ref_filename = mol2_molecule
expected_attrs = RDKitParserBase.expected_attrs + ['charges']
expected_n_atoms = 49
expected_n_residues = 1
expected_n_segments = 1
expected_n_bonds = 51
@pytest.fixture
def filename(self):
return Chem.MolFromMol2File(self.ref_filename, removeHs=False)
def _create_mol_gasteiger_charges(self):
mol = Chem.MolFromMol2File(self.ref_filename, removeHs=False)
AllChem.ComputeGasteigerCharges(mol)
return mol
def _remove_tripos_charges(self, mol):
for atom in mol.GetAtoms():
atom.ClearProp("_TriposPartialCharge")
@pytest.fixture
def top_gas_tripos(self):
mol = self._create_mol_gasteiger_charges()
return self.parser(mol).parse()
@pytest.fixture
def filename_gasteiger(self):
mol = self._create_mol_gasteiger_charges()
self._remove_tripos_charges(mol)
return mol
@pytest.fixture
def top_gasteiger(self):
mol = self._create_mol_gasteiger_charges()
self._remove_tripos_charges(mol)
return self.parser(mol).parse()
def test_bond_orders(self, top, filename):
expected = [bond.GetBondTypeAsDouble() for bond in filename.GetBonds()]
assert top.bonds.order == expected
def test_multiple_charge_priority(self,
top_gas_tripos, filename_gasteiger):
expected = np.array([
a.GetDoubleProp('_GasteigerCharge') for a in
filename_gasteiger.GetAtoms()], dtype=np.float32)
assert_equal(expected, top_gas_tripos.charges.values)
def test_multiple_charge_props_warning(self):
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
mol = self._create_mol_gasteiger_charges()
# Trigger a warning.
top = self.parser(mol).parse()
# Verify the warning
assert len(w) == 1
assert "_GasteigerCharge and _TriposPartialCharge" in str(
w[-1].message)
def test_gasteiger_charges(self, top_gasteiger, filename_gasteiger):
expected = np.array([
a.GetDoubleProp('_GasteigerCharge') for a in
filename_gasteiger.GetAtoms()], dtype=np.float32)
assert_equal(expected, top_gasteiger.charges.values)
def test_tripos_charges(self, top, filename):
expected = np.array([
a.GetDoubleProp('_TriposPartialCharge') for a in filename.GetAtoms()
], dtype=np.float32)
assert_equal(expected, top.charges.values)
def test_aromaticity(self, top, filename):
expected = np.array([
atom.GetIsAromatic() for atom in filename.GetAtoms()])
assert_equal(expected, top.aromaticities.values)
class TestRDKitParserPDB(RDKitParserBase):
ref_filename = PDB_helix
expected_attrs = RDKitParserBase.expected_attrs + [
'resnames', 'altLocs', 'chainIDs', 'occupancies', 'icodes',
'tempfactors']
guessed_attrs = ['types']
expected_n_atoms = 137
expected_n_residues = 13
expected_n_segments = 1
expected_n_bonds = 137
@pytest.fixture
def filename(self):
return Chem.MolFromPDBFile(self.ref_filename, removeHs=False)
def METHOD_NAME(self, filename):
mol = Chem.RemoveHs(filename)
mh = Chem.AddHs(mol)
with pytest.raises(ValueError,
match="ResidueInfo is only partially available"):
mda.Universe(mh)
mh = Chem.AddHs(mol, addResidueInfo=True)
mda.Universe(mh)
class TestRDKitParserSMILES(RDKitParserBase):
ref_filename = "CN1C=NC2=C1C(=O)N(C(=O)N2C)C"
guessed_attrs = ['types']
expected_n_atoms = 24
expected_n_residues = 1
expected_n_segments = 1
expected_n_bonds = 25
@pytest.fixture
def filename(self):
mol = Chem.MolFromSmiles(self.ref_filename)
mol = Chem.AddHs(mol)
return mol
class TestRDKitParserSDF(RDKitParserBase):
ref_filename = SDF_molecule
guessed_attrs = ['types']
expected_n_atoms = 49
expected_n_residues = 1
expected_n_segments = 1
expected_n_bonds = 49
@pytest.fixture
def filename(self):
return Chem.SDMolSupplier(SDF_molecule, removeHs=False)[0]
def test_bond_orders(self, top, filename):
expected = [bond.GetBondTypeAsDouble() for bond in filename.GetBonds()]
assert top.bonds.order == expected |
test list of optional | import sys
from collections.abc import Sequence
from typing import List, Optional, Tuple, Union
import pytest
import strawberry
from strawberry.annotation import StrawberryAnnotation
from strawberry.type import StrawberryList
def test_basic_list():
annotation = StrawberryAnnotation(List[str])
resolved = annotation.resolve()
assert isinstance(resolved, StrawberryList)
assert resolved.of_type is str
assert resolved == StrawberryList(of_type=str)
assert resolved == List[str]
def test_basic_tuple():
annotation = StrawberryAnnotation(Tuple[str])
resolved = annotation.resolve()
assert isinstance(resolved, StrawberryList)
assert resolved.of_type is str
assert resolved == StrawberryList(of_type=str)
assert resolved == Tuple[str]
@pytest.mark.skipif(
sys.version_info < (3, 9),
reason="collections.abc.Sequence supporting [] was added in python 3.9",
)
def test_basic_sequence():
annotation = StrawberryAnnotation(Sequence[str])
resolved = annotation.resolve()
assert isinstance(resolved, StrawberryList)
assert resolved.of_type is str
assert resolved == StrawberryList(of_type=str)
assert resolved == Sequence[str]
def METHOD_NAME():
annotation = StrawberryAnnotation(List[Optional[int]])
resolved = annotation.resolve()
assert isinstance(resolved, StrawberryList)
assert resolved.of_type == Optional[int]
assert resolved == StrawberryList(of_type=Optional[int])
assert resolved == List[Optional[int]]
@pytest.mark.skipif(
sys.version_info < (3, 9),
reason="collections.abc.Sequence supporting [] was added in python 3.9",
)
def test_sequence_of_optional():
annotation = StrawberryAnnotation(Sequence[Optional[int]])
resolved = annotation.resolve()
assert isinstance(resolved, StrawberryList)
assert resolved.of_type == Optional[int]
assert resolved == StrawberryList(of_type=Optional[int])
assert resolved == Sequence[Optional[int]]
def test_tuple_of_optional():
annotation = StrawberryAnnotation(Tuple[Optional[int]])
resolved = annotation.resolve()
assert isinstance(resolved, StrawberryList)
assert resolved.of_type == Optional[int]
assert resolved == StrawberryList(of_type=Optional[int])
assert resolved == Tuple[Optional[int]]
def test_list_of_lists():
annotation = StrawberryAnnotation(List[List[float]])
resolved = annotation.resolve()
assert isinstance(resolved, StrawberryList)
assert resolved.of_type == List[float]
assert resolved == StrawberryList(of_type=List[float])
assert resolved == List[List[float]]
@pytest.mark.skipif(
sys.version_info < (3, 9),
reason="collections.abc.Sequence supporting [] was added in python 3.9",
)
def test_sequence_of_sequence():
annotation = StrawberryAnnotation(Sequence[Sequence[float]])
resolved = annotation.resolve()
assert isinstance(resolved, StrawberryList)
assert resolved.of_type == Sequence[float]
assert resolved == StrawberryList(of_type=Sequence[float])
assert resolved == Sequence[Sequence[float]]
def test_tuple_of_tuple():
annotation = StrawberryAnnotation(Tuple[Tuple[float]])
resolved = annotation.resolve()
assert isinstance(resolved, StrawberryList)
assert resolved.of_type == Tuple[float]
assert resolved == StrawberryList(of_type=Tuple[float])
assert resolved == Tuple[Tuple[float]]
def test_list_of_union():
@strawberry.type
class Animal:
feet: bool
@strawberry.type
class Fungus:
spore: bool
annotation = StrawberryAnnotation(List[Union[Animal, Fungus]])
resolved = annotation.resolve()
assert isinstance(resolved, StrawberryList)
assert resolved.of_type == Union[Animal, Fungus]
assert resolved == StrawberryList(of_type=Union[Animal, Fungus])
assert resolved == List[Union[Animal, Fungus]]
@pytest.mark.skipif(
sys.version_info < (3, 9),
reason="collections.abc.Sequence supporting [] was added in python 3.9",
)
def test_sequence_of_union():
@strawberry.type
class Animal:
feet: bool
@strawberry.type
class Fungus:
spore: bool
annotation = StrawberryAnnotation(Sequence[Union[Animal, Fungus]])
resolved = annotation.resolve()
assert isinstance(resolved, StrawberryList)
assert resolved.of_type == Union[Animal, Fungus]
assert resolved == StrawberryList(of_type=Union[Animal, Fungus])
assert resolved == Sequence[Union[Animal, Fungus]]
@pytest.mark.skipif(
sys.version_info < (3, 9),
reason="built-in generic annotations where added in python 3.9",
)
def test_list_builtin():
annotation = StrawberryAnnotation(list[str])
resolved = annotation.resolve()
assert isinstance(resolved, StrawberryList)
assert resolved.of_type is str
assert resolved == StrawberryList(of_type=str)
assert resolved == List[str]
assert resolved == list[str]
@pytest.mark.skipif(
sys.version_info < (3, 9),
reason="built-in generic annotations where added in python 3.9",
)
def test_tuple_builtin():
annotation = StrawberryAnnotation(tuple[str])
resolved = annotation.resolve()
assert isinstance(resolved, StrawberryList)
assert resolved.of_type is str
assert resolved == StrawberryList(of_type=str)
assert resolved == Tuple[str]
assert resolved == tuple[str] |
test star wildcard in allowed mimetypes | from pathlib import Path
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import SimpleTestCase, override_settings
from rest_framework.exceptions import ValidationError
from ..api import validators
PIXEL_GIF = b"GIF89a\x01\x00\x01\x00\x81\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00!\xf9\x04\x01\x00\x00\x00\x00,\x00\x00\x00\x00\x01\x00\x01\x00\x00\x08\x04\x00\x01\x04\x04\x00;"
PIXEL_PNG = b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01\x08\x02\x00\x00\x00\x90wS\xde\x00\x00\x00\x0cIDATx\x9cc```\x00\x00\x00\x04\x00\x01\xf6\x178U\x00\x00\x00\x00IEND\xaeB`\x82"
TEST_FILES = Path(__file__).parent.resolve() / "files"
class MimeTypeAllowedTests(SimpleTestCase):
def test_mimetype_allowed_wildcard_patterns(self):
patterns = (
("image/*", ("image/png", "image/jpg")),
(
"application/vnd.oasis.opendocument.*",
("application/vnd.oasis.opendocument.text",),
),
("application/foo-*", ("application/foo-bar",)),
("image*", ("image/png",)),
)
for pattern, mime_types in patterns:
for mime_type in mime_types:
with self.subTest(pattern=pattern, mime_type=mime_type):
allowed = validators.mimetype_allowed(mime_type, [], [pattern])
self.assertTrue(allowed)
def test_mimetype_not_allowed_wildcard_patterns(self):
patterns = (
("sub/match*", "pubsub/matchnotitshould"),
("/nonsense*", "absolute/nonsense"),
)
for pattern, mime_type in patterns:
with self.subTest(pattern=pattern, mime_type=mime_type):
allowed = validators.mimetype_allowed(mime_type, [], [pattern])
self.assertFalse(allowed)
@override_settings(LANGUAGE_CODE="en")
class MimeTypeValidatorTests(SimpleTestCase):
CORRECT_GIF = SimpleUploadedFile("pixel.gif", PIXEL_GIF, content_type="image/gif")
def setUp(self):
super().setUp()
self.CORRECT_GIF.seek(0)
def test_accepts_correct_mime_types(self):
validator = validators.MimeTypeValidator()
try:
validator(self.CORRECT_GIF)
except ValidationError as e:
self.fail(f"Correct file failed validation: {e}")
def test_content_inconsistent_with_mime_type(self):
file = SimpleUploadedFile("pixel.png", PIXEL_GIF, content_type="image/png")
validator = validators.MimeTypeValidator()
with self.assertRaisesMessage(
ValidationError, "The provided file is not a .png."
):
validator(file)
def test_fallback_to_extension(self):
file = SimpleUploadedFile(
"pixel.jpg",
PIXEL_PNG,
content_type="application/octet-stream", # Maybe client doesn't know
)
validator = validators.MimeTypeValidator()
with self.assertRaisesMessage(
ValidationError, "The provided file is not a .jpg."
):
validator(file)
def test_accepts_unknown_extensions(self):
# null-byte to force application/octet-stream mime detection and "???" extension
file = SimpleUploadedFile(
"pixel.gif", b"\x00asjdkfl", content_type="application/octet-stream"
)
validator = validators.MimeTypeValidator()
try:
validator(file)
except ValidationError as e:
self.fail(f"Valid file failed validation: {e}")
def test_mime_type_inferred_from_magic(self):
# gif works on Arch Linux, but not on Debian based systems, while PNG does work
# on both
file = SimpleUploadedFile(
"pixel.png", PIXEL_PNG, content_type="application/octet-stream"
)
validator = validators.MimeTypeValidator()
try:
validator(file)
except ValidationError as e:
self.fail(f"Valid file failed validation: {e}")
def METHOD_NAME(self):
validator = validators.MimeTypeValidator({"*"})
try:
validator(self.CORRECT_GIF)
except ValidationError as e:
self.fail(f"Valid file failed validation: {e}")
def test_empty_allowed_mimetypes(self):
validator = validators.MimeTypeValidator({})
try:
validator(self.CORRECT_GIF)
except ValidationError as e:
self.fail(f"Valid file failed validation: {e}")
def test_mime_type_in_allowed_mimetypes(self):
validator = validators.MimeTypeValidator({"image/gif"})
try:
validator(self.CORRECT_GIF)
except ValidationError as e:
self.fail(f"Valid file failed validation: {e}")
def test_mime_type_not_allowed(self):
validator = validators.MimeTypeValidator({"image/png"})
with self.assertRaisesMessage(
ValidationError, "The provided file is not a valid file type."
):
validator(self.CORRECT_GIF)
def test_mif1_brand_heif_files_are_acceptable_heic(self):
# lib magic has a hard time recognizing the HEVC is used and a heif container actutally is heic
validator = validators.MimeTypeValidator({"image/heic"})
sample_1 = SimpleUploadedFile(
"sample1.heic", b"\x00\x00\x00\x18ftypmif1", content_type="image/heif"
)
try:
validator(sample_1)
except ValidationError as e:
self.fail(f"Valid file failed validation: {e}")
def test_heic_brand_heif_files_are_recognized_as_heic(self):
# lib magic has a hard time recognizing the HEVC is used and a heif container actutally is heic
validator = validators.MimeTypeValidator({}) # accept any
sample_2 = SimpleUploadedFile(
"sample2.heic", b"\x00\x00\x00\x18ftypheic", content_type="image/heif"
)
try:
validator(sample_2)
except ValidationError as e:
self.fail(f"Valid file failed validation: {e}")
def test_heic_brand_heif_files_are_not_recognized_as_png(self):
# lib magic has a hard time recognizing the HEVC is used and a heif container actutally is heic
validator = validators.MimeTypeValidator({"image/png"}) # accept any
sample_2 = SimpleUploadedFile(
"sample2.heic", b"\x00\x00\x00\x18ftypheic", content_type="image/heif"
)
with self.assertRaises(ValidationError):
validator(sample_2)
def test_validate_files_multiple_mime_types(self):
"""Assert that validation of files associated with multiple mime types works
A refactoring of `MimeTypeValidator` broke validation for files where the
admissible types are specified in the form 'mime1,mime2,mime3'.
GH #2577"""
odt_file = TEST_FILES / "test.odt"
file = SimpleUploadedFile(
"test.odt",
odt_file.read_bytes(),
content_type="application/vnd.oasis.opendocument.text",
)
validator = validators.MimeTypeValidator(
[
"application/vnd.oasis.opendocument.*,application/vnd.oasis.opendocument.text-template,",
"application/pdf",
]
)
validator(file) |
config env | #-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Stephan Krause <[email protected]>
# Stephan Meissl <[email protected]>
# Fabian Schindler <[email protected]>
# Martin Paces <[email protected]>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2011 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
"""\
This module imports and initializes GDAL; i.e enables exceptions and registers
all available drivers.
"""
import os
import contextlib
if os.environ.get('READTHEDOCS', None) != 'True':
try:
from osgeo.gdal import *
except ImportError:
from gdal import *
try:
from collections import OrderedDict as SortedDict
except ImportError:
from django.utils.datastructures import SortedDict
UseExceptions()
AllRegister()
GCI_TO_NAME = SortedDict((
(GCI_Undefined, "Undefined"),
(GCI_GrayIndex, "GrayIndex"),
(GCI_PaletteIndex, "PaletteIndex"),
(GCI_RedBand, "RedBand"),
(GCI_GreenBand, "GreenBand"),
(GCI_BlueBand, "BlueBand"),
(GCI_AlphaBand, "AlphaBand"),
(GCI_HueBand, "HueBand"),
(GCI_SaturationBand, "SaturationBand"),
(GCI_LightnessBand, "LightnessBand"),
(GCI_CyanBand, "CyanBand"),
(GCI_MagentaBand, "MagentaBand"),
(GCI_YellowBand, "YellowBand"),
(GCI_BlackBand, "BlackBand"),
(GCI_YCbCr_YBand, "YBand"),
(GCI_YCbCr_CbBand, "CbBand"),
(GCI_YCbCr_CrBand, "CrBand"),
))
NAME_TO_GCI = dict((j.lower(), i) for (i, j) in GCI_TO_NAME.items())
GDT_TO_NAME = SortedDict((
(GDT_Byte, "Byte"),
(GDT_UInt16, "UInt16"),
(GDT_Int16, "Int16"),
(GDT_UInt32, "UInt32"),
(GDT_Int32, "Int32"),
(GDT_Float32, "Float32"),
(GDT_Float64, "Float64"),
(GDT_CInt16, "CInt16"),
(GDT_CInt32, "CInt32"),
(GDT_CFloat32, "CFloat32"),
(GDT_CFloat64, "CFloat64"),
))
NAME_TO_GDT = SortedDict((j.lower(), i) for (i, j) in GDT_TO_NAME.items())
GDT_NUMERIC_LIMITS = {
GDT_Byte: (0, 255),
GDT_Int16: (-32768, 32767),
GDT_UInt16: (0, 65535),
GDT_CInt16: (complex(-32768, -32768), complex(32767, 32767)),
GDT_Int32: (-2147483648, 2147483647),
GDT_UInt32: (0, 4294967295),
GDT_CInt32: (
complex(-2147483648, -2147483648), complex(2147483647, 2147483647)
),
GDT_Float32: (-3.40282e+38, 3.40282e+38),
GDT_CFloat32: (
complex(-3.40282e+38, -3.40282e+38),
complex(3.40282e+38, 3.40282e+38)
),
GDT_Float64: (-1.79769e+308, 1.79769e+308),
GDT_CFloat64: (
complex(-1.79769e+308, -1.79769e+308),
complex(1.79769e+308, 1.79769e+308)
)
}
GDT_SIGNIFICANT_FIGURES = {
GDT_Byte: 3,
GDT_Int16: 5,
GDT_UInt16: 5,
GDT_CInt16: 5,
GDT_Int32: 10,
GDT_UInt32: 10,
GDT_CInt32: 10,
GDT_Float32: 38,
GDT_CFloat32: 38,
GDT_Float64: 308,
GDT_CFloat64: 308
}
GDT_INTEGRAL_TYPES = frozenset(
(GDT_Byte, GDT_Int16, GDT_UInt16, GDT_Int32, GDT_UInt32)
)
GDT_INTEGRAL_COMPLEX_TYPES = frozenset((GDT_CInt16, GDT_CInt32))
GDT_FLOAT_TYPES = frozenset((GDT_Float32, GDT_Float64))
GDT_FLOAT_COMPLEX_TYPES = frozenset((GDT_CFloat32, GDT_CFloat64))
GDT_COMPLEX_TYPES = frozenset(
(GDT_CInt16, GDT_CInt32, GDT_CFloat32, GDT_CFloat64)
)
def get_extent(ds):
""" Gets the extent of the GDAL Dataset in the form (min-x, min-y, max-x, max-y).
"""
gt = ds.GetGeoTransform()
x_a = gt[0]
x_b = gt[0] + gt[1] * ds.RasterXSize
y_a = gt[3]
y_b = gt[3] + gt[5] * ds.RasterYSize
return (min(x_a, x_b), min(y_a, y_b), max(x_a, x_b), max(y_a, y_b))
def set_env(env, fail_on_override=False, return_old=False):
old_values = {} if return_old else None
for key, value in env.items():
if fail_on_override or return_old:
# old_value = GetConfigOption(str(key))
old_value = os.environ.get(key)
if fail_on_override and old_value != value:
raise Exception(
'Would override previous value of %s: %s with %s'
% (key, old_value, value)
)
elif old_value != value:
old_values[key] = old_value
# SetConfigOption(str(key), str(value))
if value is not None:
os.environ[key] = value
return old_values
@contextlib.contextmanager
def METHOD_NAME(env, fail_on_override=False, reset_old=True):
old_env = set_env(env, fail_on_override, reset_old)
yield
if reset_old:
set_env(old_env, False, False)
def open_with_env(path, env, shared=True):
with METHOD_NAME(env, False):
return OpenShared(path) if shared else Open(path) |
main | # -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ run_pretrain.py ]
# Synopsis [ scripts for running the pre-training of upstream models ]
# Author [ Andy T. Liu (Andi611) ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
###############
# IMPORTATION #
###############
import os
import re
import yaml
import glob
import random
import argparse
import importlib
from shutil import copyfile
from argparse import Namespace
#-------------#
import torch
import numpy as np
#-------------#
from pretrain.runner import Runner
from utility.helper import override
######################
# PRETRAIN ARGUMENTS #
######################
def get_pretrain_args():
parser = argparse.ArgumentParser()
# use a ckpt as the experiment initialization
# if set, all the following args and config will be overwrited by the ckpt, except args.mode
parser.add_argument('-e', '--past_exp', metavar='{CKPT_PATH,CKPT_DIR}', help='Resume training from a checkpoint')
parser.add_argument('-o', '--override', help='Used to override args and config, this is at the highest priority')
# configuration for the experiment, including runner and downstream
parser.add_argument('-c', '--config', metavar='CONFIG_PATH', help='The yaml file for configuring the whole experiment, except the upstream model')
# upstream settings
parser.add_argument('-u', '--upstream', choices=os.listdir('pretrain/'))
parser.add_argument('-g', '--upstream_config', metavar='CONFIG_PATH', help='The yaml file for configuring the upstream model')
# experiment directory, choose one to specify
# expname uses the default root directory: result/pretrain
parser.add_argument('-n', '--expname', help='Save experiment at expdir/expname')
parser.add_argument('-p', '--expdir', help='Save experiment at expdir')
parser.add_argument('-a', '--auto_resume', action='store_true', help='Auto-resume if the expdir contains checkpoints')
# options
parser.add_argument('--seed', default=1337, type=int)
parser.add_argument('--device', default='cuda', help='model.to(device)')
parser.add_argument('--multi_gpu', action='store_true', help='Enables multi-GPU training')
args = parser.parse_args()
if args.expdir is None:
args.expdir = f'result/pretrain/{args.expname}'
if args.auto_resume:
if os.path.isdir(args.expdir):
ckpt_pths = glob.glob(f'{args.expdir}/states-*.ckpt')
if len(ckpt_pths) > 0:
args.past_exp = args.expdir
if args.past_exp:
# determine checkpoint path
if os.path.isdir(args.past_exp):
ckpt_pths = glob.glob(f'{args.past_exp}/states-*.ckpt')
assert len(ckpt_pths) > 0
ckpt_pths = sorted(ckpt_pths, key=lambda pth: int(pth.split('-')[-1].split('.')[0]))
ckpt_pth = ckpt_pths[-1]
else:
ckpt_pth = args.past_exp
print(f'[Runner] - Resume from {ckpt_pth}')
# load checkpoint
ckpt = torch.load(ckpt_pth, map_location='cpu')
def update_args(old, new):
old_dict = vars(old)
new_dict = vars(new)
old_dict.update(new_dict)
return Namespace(**old_dict)
# overwrite args and config
args = update_args(args, ckpt['Args'])
os.makedirs(args.expdir, exist_ok=True)
args.init_ckpt = ckpt_pth
config = ckpt['Config']
else:
print('[Runner] - Start a new experiment')
args.init_ckpt = None
assert args.expname is not None
if args.expdir is None:
args.expdir = f'result/pretrain/{args.expname}'
os.makedirs(args.expdir, exist_ok=True)
upstream_dirs = [u for u in os.listdir('pretrain/') if re.search(f'^{u}_|^{u}$', args.upstream)]
assert len(upstream_dirs) == 1
if args.config is None:
args.config = f'pretrain/{upstream_dirs[0]}/config_runner.yaml'
with open(args.config, 'r') as file:
config = yaml.load(file, Loader=yaml.FullLoader)
if os.path.isfile(args.config):
copyfile(args.config, f'{args.expdir}/config_runner.yaml')
else:
raise FileNotFoundError('Wrong file path for runner config.')
if args.upstream_config is None:
default_upstream_config = f'pretrain/{upstream_dirs[0]}/config_model.yaml'
assert os.path.isfile(default_upstream_config)
args.upstream_config = default_upstream_config
if os.path.isfile(args.upstream_config):
copyfile(args.upstream_config, f'{args.expdir}/config_model.yaml')
else:
raise FileNotFoundError('Wrong file path for model config.')
if args.override is not None and args.override.lower() != "none":
override(args.override, args, config)
os.makedirs(args.expdir, exist_ok=True)
return args, config
########
# MAIN #
########
def METHOD_NAME():
# get config and arguments
args, config = get_pretrain_args()
# Fix seed and make backends deterministic
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
runner = Runner(args, config)
eval('runner.train')()
runner.logger.close()
if __name__ == '__main__':
METHOD_NAME() |
get tables | from contextlib import closing
import pymssql
import pandas as pd
from mindsdb_sql import parse_sql
from mindsdb_sql.parser.ast.base import ASTNode
from mindsdb.integrations.libs.base import DatabaseHandler
from mindsdb.utilities import log
from mindsdb_sql.render.sqlalchemy_render import SqlalchemyRender
from mindsdb.integrations.libs.response import (
HandlerStatusResponse as StatusResponse,
HandlerResponse as Response,
RESPONSE_TYPE
)
class SqlServerHandler(DatabaseHandler):
"""
This handler handles connection and execution of the Microsoft SQL Server statements.
"""
name = 'mssql'
def __init__(self, name, **kwargs):
super().__init__(name)
self.parser = parse_sql
self.connection_args = kwargs
self.connection_data = self.connection_args.get('connection_data')
self.dialect = 'mssql'
self.database = self.connection_data.get('database')
self.connection = None
self.is_connected = False
def __del__(self):
if self.is_connected is True:
self.disconnect()
def connect(self):
"""
Handles the connection to a SQL Server insance.
"""
if self.is_connected is True:
return self.connection
self.connection = pymssql.connect(**self.connection_data)
self.is_connected = True
return self.connection
def check_connection(self) -> StatusResponse:
"""
Check the connection of the SQL Server database
:return: success status and error message if error occurs
"""
response = StatusResponse(False)
need_to_close = self.is_connected is False
try:
self.connect()
response.success = True
except Exception as e:
log.logger.error(f'Error connecting to SQL Server {self.database}, {e}!')
response.error_message = str(e)
finally:
if response.success is True and need_to_close:
self.disconnect()
if response.success is False and self.is_connected is True:
self.is_connected = False
return response
def native_query(self, query: str) -> Response:
"""
Receive SQL query and runs it
:param query: The SQL query to run in SQL Server
:return: returns the records from the current recordset
"""
need_to_close = self.is_connected is False
connection = self.connect()
# with closing(connection) as con:
with connection.cursor(as_dict=True) as cur:
try:
cur.execute(query)
result = cur.fetchall()
if result:
response = Response(
RESPONSE_TYPE.TABLE,
data_frame=pd.DataFrame(
result,
columns=[x[0] for x in cur.description]
)
)
else:
response = Response(RESPONSE_TYPE.OK)
connection.commit()
except Exception as e:
log.logger.error(f'Error running query: {query} on {self.database}!')
response = Response(
RESPONSE_TYPE.ERROR,
error_message=str(e)
)
connection.rollback()
if need_to_close is True:
self.disconnect()
return response
def query(self, query: ASTNode) -> Response:
"""
Retrieve the data from the SQL statement.
"""
renderer = SqlalchemyRender('mssql')
query_str = renderer.get_string(query, with_failback=True)
return self.native_query(query_str)
def METHOD_NAME(self) -> Response:
"""
Get a list with all of the tabels in MySQL
"""
query = f"""
SELECT
table_schema,
table_name,
table_type
FROM {self.database}.INFORMATION_SCHEMA.TABLES
WHERE TABLE_TYPE in ('BASE TABLE', 'VIEW');
"""
result = self.native_query(query)
return result
def get_columns(self, table_name) -> Response:
"""
Show details about the table
"""
q = f"""
SELECT
column_name as "Field",
data_type as "Type"
FROM
information_schema.columns
WHERE
table_name = '{table_name}'
"""
result = self.native_query(q)
return result |
test query level eventid | import datetime
import pytest
import salt.modules.win_event as win_event
pytestmark = [
pytest.mark.windows_whitelisted,
pytest.mark.skip_unless_on_windows,
]
@pytest.fixture(scope="function")
def application_events():
# This deletes the contents of the Application event log
win_event.clear("Application")
win_event.add("Application", 2011, event_type="Information")
win_event.add("Application", 2011, event_type="Information")
win_event.add("Application", 2011, event_type="Information")
win_event.add("Application", 2011, event_type="Information")
win_event.add("Application", 2020, event_type="Warning")
win_event.add("Application", 2020, event_type="Warning")
yield
# This deletes the contents of the Application event log
win_event.clear("Application")
def test__to_bytes_utf8():
data = {"key1": "item1", "key2": [1, 2, "item2"], "key3": 45, 45: str}
new_data = win_event._to_bytes(data, "utf-8", False)
assert "key1" in new_data
assert new_data["key1"] == b"item1"
assert new_data["key2"][2] == b"item2"
def test__to_bytes_cp1252():
data = {"key1": "item1", "key2": [1, 2, "item2"], "key3": 45, 45: str}
new_data = win_event._to_bytes(data, "CP1252", True)
assert b"key1" in new_data
assert b"key2" in new_data
assert b"key3" in new_data
assert new_data["key1".encode("CP1252")] == "item1".encode("CP1252")
assert new_data["key2".encode("CP1252")][2] == "item2".encode("CP1252")
def test__raw_time():
raw_time = win_event._raw_time(datetime.datetime(2019, 7, 2, 10, 8, 19))
assert raw_time == (2019, 7, 2, 10, 8, 19)
@pytest.mark.destructive_test
def test_count(application_events):
"""
Test win_event.count
"""
ret = win_event.count("Application")
assert ret == 6
@pytest.mark.destructive_test
def test_get(application_events):
ret = win_event.get("Application")
assert len(ret) == 6
@pytest.mark.destructive_test
def test_query(application_events):
ret = win_event.query("Application")
assert len(ret) == 6
@pytest.mark.destructive_test
def test_query_records(application_events):
ret = win_event.query("Application", records=3)
for item in ret:
assert isinstance(item, dict)
assert len(ret) == 3
@pytest.mark.destructive_test
def test_query_raw(application_events):
ret = win_event.query("Application", raw=True)
for item in ret:
assert isinstance(item, str)
assert len(ret) == 6
@pytest.mark.destructive_test
def test_query_level(application_events):
ret = win_event.query("Application", "*[System[(Level=3)]]")
assert len(ret) == 2
@pytest.mark.destructive_test
def METHOD_NAME(application_events):
ret = win_event.query(
"Application", "*[System[(Level=4 or Level=0) and (EventID=2011)]]"
)
assert len(ret) == 4
@pytest.mark.destructive_test
def test_query_last_hour(application_events):
ret = win_event.query(
"Application", "*[System[TimeCreated[timediff(@SystemTime) <= 3600000]]]"
)
assert len(ret) == 6
@pytest.mark.destructive_test
def test_get_filtered(application_events):
ret = win_event.get_filtered("Application")
assert len(ret) == 6
@pytest.mark.destructive_test
def test_get_filtered_event_id(application_events):
ret = win_event.get_filtered("Application", eventID=2011)
assert len(ret) == 4
@pytest.mark.destructive_test
def test_get_filtered_event_type(application_events):
ret = win_event.get_filtered("Application", eventType=2)
assert len(ret) == 2
@pytest.mark.destructive_test
def test_get_filtered_year(application_events):
year = datetime.datetime.now().year
ret = win_event.get_filtered("Application", year=year)
assert len(ret) == 6
@pytest.mark.destructive_test
def test_get_filtered_year_none(application_events):
year = 1999
ret = win_event.get_filtered("Application", year=year)
assert len(ret) == 0
@pytest.mark.destructive_test
def test_clear(application_events):
assert win_event.count("Application") == 6
win_event.clear("Application")
assert win_event.count("Application") == 0
@pytest.mark.destructive_test
def test_clear_backup(application_events, tmp_path):
assert win_event.count("Application") == 6
backup_log = tmp_path / "test.bak"
assert not backup_log.exists()
win_event.clear("Application", str(backup_log))
assert backup_log.exists()
assert win_event.count("Application") == 0 |
is type | from _typeshed import Incomplete, SupportsKeysAndGetItem
from collections.abc import Callable, Generator, Iterable, Iterator, Mapping
from contextlib import contextmanager
from typing import Any, ClassVar
from typing_extensions import TypeAlias
from ._format import FormatChecker
from ._types import TypeChecker
from ._utils import Unset, URIDict
from .exceptions import ValidationError
# these type aliases do not exist at runtime, they're only defined here in the stub
_JsonObject: TypeAlias = Mapping[str, Any]
_JsonValue: TypeAlias = _JsonObject | list[Any] | str | int | float | bool | None
_ValidatorCallback: TypeAlias = Callable[[Any, Any, _JsonValue, _JsonObject], Iterator[ValidationError]]
_Schema: TypeAlias = Mapping[str, Any]
# This class does not exist at runtime. Compatible classes are created at
# runtime by create().
class _Validator:
VALIDATORS: ClassVar[dict[Incomplete, Incomplete]]
META_SCHEMA: ClassVar[dict[Incomplete, Incomplete]]
TYPE_CHECKER: ClassVar[Incomplete]
FORMAT_CHECKER: ClassVar[Incomplete]
@staticmethod
def ID_OF(schema: _Schema) -> str: ...
schema: _Schema
resolver: Incomplete
format_checker: Incomplete
evolve: Incomplete
def __init__(self, schema: _Schema, resolver: Incomplete | None = ..., format_checker: Incomplete | None = ...) -> None: ...
@classmethod
def check_schema(cls, schema: _Schema, format_checker: FormatChecker | Unset = ...) -> None: ...
def iter_errors(self, instance, _schema: _Schema | None = ...) -> Generator[Incomplete, None, None]: ...
def descend(
self, instance, schema: _Schema, path: Incomplete | None = ..., schema_path: Incomplete | None = ...
) -> Generator[Incomplete, None, None]: ...
def validate(self, *args, **kwargs) -> None: ...
def METHOD_NAME(self, instance, type): ...
def is_valid(self, instance, _schema: _Schema | None = ...) -> bool: ...
def validates(version: str) -> Callable[..., Incomplete]: ...
def create(
meta_schema: _Schema,
validators: Mapping[str, _ValidatorCallback] | tuple[()] = (),
version: Incomplete | None = None,
type_checker: TypeChecker = ...,
format_checker: FormatChecker = ...,
id_of: Callable[[_Schema], str] = ...,
applicable_validators: Callable[[_Schema], Iterable[tuple[str, _ValidatorCallback]]] = ...,
) -> type[_Validator]: ...
def extend(
validator,
validators=(),
version: Incomplete | None = None,
type_checker: Incomplete | None = None,
format_checker: Incomplete | None = None,
): ...
# At runtime these are fields that are assigned the return values of create() calls.
class Draft3Validator(_Validator): ...
class Draft4Validator(_Validator): ...
class Draft6Validator(_Validator): ...
class Draft7Validator(_Validator): ...
class Draft201909Validator(_Validator): ...
class Draft202012Validator(_Validator): ...
_Handler: TypeAlias = Callable[[str], Incomplete]
class RefResolver:
referrer: dict[str, Incomplete]
cache_remote: Incomplete
handlers: dict[str, _Handler]
store: URIDict
def __init__(
self,
base_uri: str,
referrer: dict[str, Incomplete],
store: SupportsKeysAndGetItem[str, str] | Iterable[tuple[str, str]] = ...,
cache_remote: bool = True,
handlers: SupportsKeysAndGetItem[str, _Handler] | Iterable[tuple[str, _Handler]] = (),
urljoin_cache: Incomplete | None = None,
remote_cache: Incomplete | None = None,
) -> None: ...
@classmethod
def from_schema(cls, schema: _Schema, id_of=..., *args, **kwargs): ...
def push_scope(self, scope) -> None: ...
def pop_scope(self) -> None: ...
@property
def resolution_scope(self): ...
@property
def base_uri(self): ...
@contextmanager
def in_scope(self, scope) -> Generator[None, None, None]: ...
@contextmanager
def resolving(self, ref) -> Generator[Incomplete, None, None]: ...
def resolve(self, ref): ...
def resolve_from_url(self, url): ...
def resolve_fragment(self, document, fragment): ...
def resolve_remote(self, uri): ...
def validate(instance: object, schema: _Schema, cls: type[_Validator] | None = None, *args: Any, **kwargs: Any) -> None: ...
def validator_for(schema: _Schema | bool, default=...): ... |
jax args to inner func args | import jax
import jax.numpy as jnp
from pytensor.link.jax.dispatch.basic import jax_funcify
from pytensor.scan.op import Scan
@jax_funcify.register(Scan)
def jax_funcify_Scan(op: Scan, **kwargs):
info = op.info
if info.as_while:
raise NotImplementedError("While Scan cannot yet be converted to JAX")
if info.n_mit_mot:
raise NotImplementedError(
"Scan with MIT-MOT (gradients of scan) cannot yet be converted to JAX"
)
# Optimize inner graph
rewriter = op.mode_instance.optimizer
rewriter(op.fgraph)
scan_inner_func = jax_funcify(op.fgraph, **kwargs)
def scan(*outer_inputs):
# Extract JAX scan inputs
outer_inputs = list(outer_inputs)
n_steps = outer_inputs[0] # JAX `length`
seqs = op.outer_seqs(outer_inputs) # JAX `xs`
mit_sot_init = []
for tap, seq in zip(op.info.mit_sot_in_slices, op.outer_mitsot(outer_inputs)):
init_slice = seq[: abs(min(tap))]
mit_sot_init.append(init_slice)
sit_sot_init = [seq[0] for seq in op.outer_sitsot(outer_inputs)]
init_carry = (
mit_sot_init,
sit_sot_init,
op.outer_shared(outer_inputs),
op.outer_non_seqs(outer_inputs),
) # JAX `init`
def METHOD_NAME(carry, x):
"""Convert JAX scan arguments into format expected by scan_inner_func.
scan(carry, x) -> scan_inner_func(seqs, mit_sot, sit_sot, shared, non_seqs)
"""
# `carry` contains all inner taps, shared terms, and non_seqs
(
inner_mit_sot,
inner_sit_sot,
inner_shared,
inner_non_seqs,
) = carry
# `x` contains the inner sequences
inner_seqs = x
mit_sot_flatten = []
for array, index in zip(inner_mit_sot, op.info.mit_sot_in_slices):
mit_sot_flatten.extend(array[jnp.array(index)])
inner_scan_inputs = [
*inner_seqs,
*mit_sot_flatten,
*inner_sit_sot,
*inner_shared,
*inner_non_seqs,
]
return inner_scan_inputs
def inner_func_outs_to_jax_outs(
old_carry,
inner_scan_outs,
):
"""Convert inner_scan_func outputs into format expected by JAX scan.
old_carry + (mit_sot_outs, sit_sot_outs, nit_sot_outs, shared_outs) -> (new_carry, ys)
"""
(
inner_mit_sot,
inner_sit_sot,
inner_shared,
inner_non_seqs,
) = old_carry
inner_mit_sot_outs = op.inner_mitsot_outs(inner_scan_outs)
inner_sit_sot_outs = op.inner_sitsot_outs(inner_scan_outs)
inner_nit_sot_outs = op.inner_nitsot_outs(inner_scan_outs)
inner_shared_outs = op.inner_shared_outs(inner_scan_outs)
# Replace the oldest mit_sot tap by the newest value
inner_mit_sot_new = [
jnp.concatenate([old_mit_sot[1:], new_val[None, ...]], axis=0)
for old_mit_sot, new_val in zip(
inner_mit_sot,
inner_mit_sot_outs,
)
]
# Nothing needs to be done with sit_sot
inner_sit_sot_new = inner_sit_sot_outs
inner_shared_new = inner_shared
# Replace old shared inputs by new shared outputs
inner_shared_new[: len(inner_shared_outs)] = inner_shared_outs
new_carry = (
inner_mit_sot_new,
inner_sit_sot_new,
inner_shared_new,
inner_non_seqs,
)
# Shared variables and non_seqs are not traced
traced_outs = [
*inner_mit_sot_outs,
*inner_sit_sot_outs,
*inner_nit_sot_outs,
]
return new_carry, traced_outs
def jax_inner_func(carry, x):
inner_args = METHOD_NAME(carry, x)
inner_scan_outs = list(scan_inner_func(*inner_args))
new_carry, traced_outs = inner_func_outs_to_jax_outs(carry, inner_scan_outs)
return new_carry, traced_outs
# Extract PyTensor scan outputs
final_carry, traces = jax.lax.scan(
jax_inner_func, init_carry, seqs, length=n_steps
)
def get_partial_traces(traces):
"""Convert JAX scan traces to PyTensor traces.
We need to:
1. Prepend initial states to JAX output traces
2. Slice final traces if Scan was instructed to only keep a portion
"""
init_states = mit_sot_init + sit_sot_init + [None] * op.info.n_nit_sot
buffers = (
op.outer_mitsot(outer_inputs)
+ op.outer_sitsot(outer_inputs)
+ op.outer_nitsot(outer_inputs)
)
partial_traces = []
for init_state, trace, buffer in zip(init_states, traces, buffers):
if init_state is not None:
# MIT-SOT and SIT-SOT: The final output should be as long as the input buffer
trace = jnp.atleast_1d(trace)
init_state = jnp.expand_dims(
init_state, range(trace.ndim - init_state.ndim)
)
full_trace = jnp.concatenate([init_state, trace], axis=0)
buffer_size = buffer.shape[0]
else:
# NIT-SOT: Buffer is just the number of entries that should be returned
full_trace = jnp.atleast_1d(trace)
buffer_size = buffer
partial_trace = full_trace[-buffer_size:]
partial_traces.append(partial_trace)
return partial_traces
def get_shared_outs(final_carry):
"""Retrive last state of shared_outs from final_carry.
These outputs cannot be traced in PyTensor Scan
"""
(
inner_out_mit_sot,
inner_out_sit_sot,
inner_out_shared,
inner_in_non_seqs,
) = final_carry
shared_outs = inner_out_shared[: info.n_shared_outs]
return list(shared_outs)
scan_outs_final = get_partial_traces(traces) + get_shared_outs(final_carry)
if len(scan_outs_final) == 1:
scan_outs_final = scan_outs_final[0]
return scan_outs_final
return scan |
parse as n1 private key | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This module uses code from TLSLlite
# TLSLite Author: Trevor Perrin)
import binascii
from .x509 import ASN1_Node, bytestr_to_int, decode_OID
def a2b_base64(s):
try:
b = bytearray(binascii.a2b_base64(s))
except Exception as e:
raise SyntaxError("base64 error: %s" % e)
return b
def b2a_base64(b):
return binascii.b2a_base64(b)
def dePem(s, name):
"""Decode a PEM string into a bytearray of its payload.
The input must contain an appropriate PEM prefix and postfix
based on the input name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
The first such PEM block in the input will be found, and its
payload will be base64 decoded and returned.
"""
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
start = s.find(prefix)
if start == -1:
raise SyntaxError("Missing PEM prefix")
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s = s[start+len("-----BEGIN %s-----" % name) : end]
retBytes = a2b_base64(s) # May raise SyntaxError
return retBytes
def dePemList(s, name):
"""Decode a sequence of PEM blocks into a list of bytearrays.
The input must contain any number of PEM blocks, each with the appropriate
PEM prefix and postfix based on the input name string, e.g. for
name="TACK BREAK SIG". Arbitrary text can appear between and before and
after the PEM blocks. For example:
" Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:10Z -----BEGIN TACK
BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6Ap0Fgd9SSTOECeAKOUAym8zcYaXUwpk0+WuPYa7Zmm
SkbOlK4ywqt+amhWbg9txSGUwFO5tWUHT3QrnRlE/e3PeNFXLx5Bckg= -----END TACK
BREAK SIG----- Created by TACK.py 0.9.3 Created at 2012-02-01T00:30:11Z
-----BEGIN TACK BREAK SIG-----
ATKhrz5C6JHJW8BF5fLVrnQss6JnWVyEaC0p89LNhKPswvcC9/s6+vWLd9snYTUv
YMEBdw69PUP8JB4AdqA3K6BVCWfcjN36lx6JwxmZQncS6sww7DecFO/qjSePCxwM
+kdDqX/9/183nmjx6bf0ewhPXkA0nVXsDYZaydN8rJU1GaMlnjcIYxY= -----END TACK
BREAK SIG----- "
All such PEM blocks will be found, decoded, and return in an ordered list
of bytearrays, which may have zero elements if not PEM blocks are found.
"""
bList = []
prefix = "-----BEGIN %s-----" % name
postfix = "-----END %s-----" % name
while 1:
start = s.find(prefix)
if start == -1:
return bList
end = s.find(postfix, start+len(prefix))
if end == -1:
raise SyntaxError("Missing PEM postfix")
s2 = s[start+len(prefix) : end]
retBytes = a2b_base64(s2) # May raise SyntaxError
bList.append(retBytes)
s = s[end+len(postfix) : ]
def pem(b, name):
"""Encode a payload bytearray into a PEM string.
The input will be base64 encoded, then wrapped in a PEM prefix/postfix
based on the name string, e.g. for name="CERTIFICATE":
-----BEGIN CERTIFICATE-----
MIIBXDCCAUSgAwIBAgIBADANBgkqhkiG9w0BAQUFADAPMQ0wCwYDVQQDEwRUQUNL
...
KoZIhvcNAQEFBQADAwA5kw==
-----END CERTIFICATE-----
"""
s1 = b2a_base64(b)[:-1] # remove terminating \n
s2 = b""
while s1:
s2 += s1[:64] + b"\n"
s1 = s1[64:]
s = ("-----BEGIN %s-----\n" % name).encode('ascii') + s2 + \
("-----END %s-----\n" % name).encode('ascii')
return s
def pemSniff(inStr, name):
searchStr = "-----BEGIN %s-----" % name
return searchStr in inStr
def parse_private_key(s):
"""Parse a string containing a PEM-encoded <privateKey>."""
if pemSniff(s, "PRIVATE KEY"):
bytes = dePem(s, "PRIVATE KEY")
return _parsePKCS8(bytes)
elif pemSniff(s, "RSA PRIVATE KEY"):
bytes = dePem(s, "RSA PRIVATE KEY")
return _parseSSLeay(bytes)
else:
raise SyntaxError("Not a PEM private key file")
def _parsePKCS8(_bytes):
s = ASN1_Node(_bytes)
root = s.root()
version_node = s.first_child(root)
version = bytestr_to_int(s.get_value_of_type(version_node, 'INTEGER'))
if version != 0:
raise SyntaxError("Unrecognized PKCS8 version")
rsaOID_node = s.next_node(version_node)
ii = s.first_child(rsaOID_node)
rsaOID = decode_OID(s.get_value_of_type(ii, 'OBJECT IDENTIFIER'))
if rsaOID != '1.2.840.113549.1.1.1':
raise SyntaxError("Unrecognized AlgorithmIdentifier")
privkey_node = s.next_node(rsaOID_node)
value = s.get_value_of_type(privkey_node, 'OCTET STRING')
return METHOD_NAME(value)
def _parseSSLeay(bytes):
return METHOD_NAME(ASN1_Node(bytes))
def bytesToNumber(s):
return int(binascii.hexlify(s), 16)
def METHOD_NAME(s):
s = ASN1_Node(s)
root = s.root()
version_node = s.first_child(root)
version = bytestr_to_int(s.get_value_of_type(version_node, 'INTEGER'))
if version != 0:
raise SyntaxError("Unrecognized RSAPrivateKey version")
n = s.next_node(version_node)
e = s.next_node(n)
d = s.next_node(e)
p = s.next_node(d)
q = s.next_node(p)
dP = s.next_node(q)
dQ = s.next_node(dP)
qInv = s.next_node(dQ)
return list(map(lambda x: bytesToNumber(s.get_value_of_type(x, 'INTEGER')), [n, e, d, p, q, dP, dQ, qInv]))
|
test event hub schema validate | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from ..azure_common import BaseTest, arm_template, cassette_name
class EventHubTest(BaseTest):
def METHOD_NAME(self):
with self.sign_out_patch():
p = self.load_policy({
'name': 'test-event-hub-compliance',
'resource': 'azure.eventhub'
}, validate=True)
self.assertTrue(p)
@arm_template('eventhub.json')
def test_find_by_name(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@cassette_name('firewall')
def test_firewall_rules_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'include': ['11.0.0.0/24']}],
})
resources = p.run()
self.assertEqual(len(resources), 1)
@cassette_name('firewall')
def test_firewall_rules_not_include_cidr(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'include': ['11.0.1.0/24']}],
})
resources = p.run()
self.assertEqual(len(resources), 0)
@cassette_name('firewall')
def test_firewall_rules_ranges(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'include': ['11.0.0.0-11.0.0.255']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@cassette_name('firewall')
def test_firewall_rules_not_ranges(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'include': ['11.0.1.0-11.0.1.255']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources))
@cassette_name('firewall')
def test_firewall_rules_equal(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'equal': ['11.0.0.0/24', '10.1.1.1/32']}],
}, validate=True)
resources = p.run()
self.assertEqual(1, len(resources))
@cassette_name('firewall')
def test_firewall_rules_not_equal(self):
p = self.load_policy({
'name': 'test-azure-eventhub',
'resource': 'azure.eventhub',
'filters': [
{'type': 'value',
'key': 'name',
'op': 'contains',
'value_type': 'normalize',
'value': '-cctesteventhubns'},
{'type': 'firewall-rules',
'equal': ['11.0.1.0/24', '10.1.1.1/32']}],
}, validate=True)
resources = p.run()
self.assertEqual(0, len(resources)) |
domain name | import base64
import json
from enum import Enum
from typing import Any, Dict, List, Optional, Union
from responses import Response
from localstack.constants import HEADER_LOCALSTACK_EDGE_URL
from localstack.utils.aws.aws_responses import parse_query_string
from localstack.utils.strings import short_uid, to_str
# type definition for data parameters (i.e., invocation payloads)
InvocationPayload = Union[Dict, str, bytes]
class ApiGatewayVersion(Enum):
V1 = "v1"
V2 = "v2"
class ApiInvocationContext:
"""Represents the context for an incoming API Gateway invocation."""
# basic (raw) HTTP invocation details (method, path, data, headers)
method: str
path: str
data: InvocationPayload
headers: Dict[str, str]
# invocation context
context: Dict[str, Any]
# authentication info for this invocation
auth_context: Dict[str, Any]
# target API/resource details extracted from the invocation
apigw_version: ApiGatewayVersion
api_id: str
stage: str
account_id: str
region_name: str
# resource path, including any path parameter placeholders (e.g., "/my/path/{id}")
resource_path: str
integration: Dict
resource: Dict
# Invocation path with query string, e.g., "/my/path?test". Defaults to "path", can be used
# to overwrite the actual API path, in case the path format "../_user_request_/.." is used.
_path_with_query_string: str
# response templates to be applied to the invocation result
response_templates: Dict
route: Dict
connection_id: str
path_params: Dict
# response object
response: Response
# dict of stage variables (mapping names to values)
stage_variables: Dict[str, str]
# websockets route selection
ws_route: str
def __init__(
self,
method: str,
path: str,
data: Union[str, bytes],
headers: Dict[str, str],
api_id: str = None,
stage: str = None,
context: Dict[str, Any] = None,
auth_context: Dict[str, Any] = None,
):
self.method = method
self.path = path
self.data = data
self.headers = headers
self.context = {"requestId": short_uid()} if context is None else context
self.auth_context = {} if auth_context is None else auth_context
self.apigw_version = None
self.api_id = api_id
self.stage = stage
self.region_name = None
self.account_id = None
self.integration = None
self.resource = None
self.resource_path = None
self.path_with_query_string = None
self.response_templates = {}
self.stage_variables = {}
self.path_params = {}
self.route = None
self.ws_route = None
self.response = None
@property
def resource_id(self) -> Optional[str]:
return (self.resource or {}).get("id")
@property
def invocation_path(self) -> str:
"""Return the plain invocation path, without query parameters."""
path = self.path_with_query_string or self.path
return path.split("?")[0]
@property
def path_with_query_string(self) -> str:
"""Return invocation path with query string - defaults to the value of 'path', unless customized."""
return self._path_with_query_string or self.path
@path_with_query_string.setter
def path_with_query_string(self, new_path: str):
"""Set a custom invocation path with query string (used to handle "../_user_request_/.." paths)."""
self._path_with_query_string = new_path
def query_params(self) -> Dict[str, str]:
"""Extract the query parameters from the target URL or path in this request context."""
query_string = self.path_with_query_string.partition("?")[2]
return parse_query_string(query_string)
@property
def integration_uri(self) -> Optional[str]:
integration = self.integration or {}
return integration.get("uri") or integration.get("integrationUri")
@property
def auth_identity(self) -> Optional[Dict]:
if isinstance(self.auth_context, dict):
if self.auth_context.get("identity") is None:
self.auth_context["identity"] = {}
return self.auth_context["identity"]
@property
def authorizer_type(self) -> str:
if isinstance(self.auth_context, dict):
return self.auth_context.get("authorizer_type") if self.auth_context else None
@property
def authorizer_result(self) -> Dict[str, Any]:
if isinstance(self.auth_context, dict):
return self.auth_context.get("authorizer") if self.auth_context else {}
def is_websocket_request(self) -> bool:
upgrade_header = str(self.headers.get("upgrade") or "")
return upgrade_header.lower() == "websocket"
def is_v1(self) -> bool:
"""Whether this is an API Gateway v1 request"""
return self.apigw_version == ApiGatewayVersion.V1
def cookies(self) -> Optional[List[str]]:
if cookies := self.headers.get("cookie") or "":
return list(cookies.split(";"))
return None
@property
def is_data_base64_encoded(self) -> bool:
try:
json.dumps(self.data) if isinstance(self.data, (dict, list)) else to_str(self.data)
return False
except UnicodeDecodeError:
return True
def data_as_string(self) -> str:
try:
return (
json.dumps(self.data) if isinstance(self.data, (dict, list)) else to_str(self.data)
)
except UnicodeDecodeError:
# we string encode our base64 as string as well
return to_str(base64.b64encode(self.data))
def _extract_host_from_header(self) -> str:
host = self.headers.get(HEADER_LOCALSTACK_EDGE_URL) or self.headers.get("host", "")
return host.split("://")[-1].split("/")[0].split(":")[0]
@property
def METHOD_NAME(self) -> str:
return self._extract_host_from_header()
@property
def domain_prefix(self) -> str:
host = self._extract_host_from_header()
return host.split(".")[0] |
find ground unit source in network | from __future__ import annotations
import logging
from collections import defaultdict
from typing import Optional, TYPE_CHECKING
from game.theater import ControlPoint
from .coalition import Coalition
from .dcs.groundunittype import GroundUnitType
from .theater.transitnetwork import (
NoPathError,
TransitNetwork,
)
from .transfers import TransferOrder
if TYPE_CHECKING:
from .game import Game
class GroundUnitOrders:
def __init__(self, destination: ControlPoint) -> None:
self.destination = destination
# Maps unit type to order quantity.
self.units: dict[GroundUnitType, int] = defaultdict(int)
def __str__(self) -> str:
return f"Pending ground unit delivery to {self.destination}"
def order(self, units: dict[GroundUnitType, int]) -> None:
for k, v in units.items():
self.units[k] += v
def sell(self, units: dict[GroundUnitType, int]) -> None:
for k, v in units.items():
self.units[k] -= v
if self.units[k] == 0:
del self.units[k]
def refund_all(self, coalition: Coalition) -> None:
self._refund(coalition, self.units)
self.units = defaultdict(int)
def _refund(self, coalition: Coalition, units: dict[GroundUnitType, int]) -> None:
for unit_type, count in units.items():
logging.info(f"Refunding {count} {unit_type} at {self.destination.name}")
coalition.adjust_budget(unit_type.price * count)
def pending_orders(self, unit_type: GroundUnitType) -> int:
pending_units = self.units.get(unit_type)
if pending_units is None:
pending_units = 0
return pending_units
def process(self, game: Game) -> None:
coalition = game.coalition_for(self.destination.captured)
ground_unit_source = self.find_ground_unit_source(game)
if ground_unit_source is None:
game.message(
f"{self.destination.name} lost its source for ground unit "
"reinforcements. Refunding purchase price."
)
self.refund_all(coalition)
bought_units: dict[GroundUnitType, int] = {}
units_needing_transfer: dict[GroundUnitType, int] = {}
for unit_type, count in self.units.items():
allegiance = "Ally" if self.destination.captured else "Enemy"
d: dict[GroundUnitType, int]
if self.destination != ground_unit_source:
source = ground_unit_source
d = units_needing_transfer
else:
source = self.destination
d = bought_units
if count < 0:
logging.error(
f"Attempted sale of {unit_type} at {self.destination} but ground "
"units cannot be sold"
)
elif count > 0:
d[unit_type] = count
game.message(
f"{allegiance} reinforcements: {unit_type} x {count} at {source}"
)
self.units = defaultdict(int)
self.destination.base.commission_units(bought_units)
if units_needing_transfer:
if ground_unit_source is None:
raise RuntimeError(
f"Ground unit source could not be found for {self.destination} but "
"still tried to transfer units to there"
)
ground_unit_source.base.commission_units(units_needing_transfer)
self.create_transfer(coalition, ground_unit_source, units_needing_transfer)
def create_transfer(
self,
coalition: Coalition,
source: ControlPoint,
units: dict[GroundUnitType, int],
) -> None:
coalition.transfers.new_transfer(TransferOrder(source, self.destination, units))
def find_ground_unit_source(self, game: Game) -> Optional[ControlPoint]:
# This is running *after* the turn counter has been incremented, so this is the
# reaction to turn 0. On turn zero we allow units to be recruited anywhere for
# delivery on turn 1 so that turn 1 always starts with units on the front line.
if game.turn == 1:
return self.destination
# Fast path if the destination is a valid source.
if self.destination.can_recruit_ground_units(game):
return self.destination
try:
return self.METHOD_NAME(
game.transit_network_for(self.destination.captured), game
)
except NoPathError:
return None
def METHOD_NAME(
self, network: TransitNetwork, game: Game
) -> Optional[ControlPoint]:
sources = []
for control_point in game.theater.control_points_for(self.destination.captured):
if control_point.can_recruit_ground_units(
game
) and network.has_path_between(self.destination, control_point):
sources.append(control_point)
if not sources:
return None
# Fast path to skip the distance calculation if we have only one option.
if len(sources) == 1:
return sources[0]
closest = sources[0]
_, cost = network.shortest_path_with_cost(self.destination, closest)
for source in sources:
_, new_cost = network.shortest_path_with_cost(self.destination, source)
if new_cost < cost:
closest = source
cost = new_cost
return closest |
test str | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
import pytest
from debugpy.common import log
from tests.patterns import some
NONE = None
NAN = float("nan")
def log_repr(x):
s = repr(x)
log.info("{0}", s)
VALUES = [
object(),
True,
False,
0,
-1,
-1.0,
1.23,
b"abc",
b"abcd",
"abc",
"abcd",
(),
(1, 2, 3),
[],
[1, 2, 3],
{},
{"a": 1, "b": 2},
]
@pytest.mark.parametrize("x", VALUES)
def test_value(x):
log_repr(some.object)
assert x == some.object
log_repr(some.object.equal_to(x))
assert x == some.object.equal_to(x)
log_repr(some.object.not_equal_to(x))
assert x != some.object.not_equal_to(x)
log_repr(some.object.same_as(x))
assert x == some.object.same_as(x)
log_repr(some.thing)
assert x == some.thing
log_repr(~some.thing)
assert x != ~some.thing
log_repr(~some.object)
assert x != ~some.object
log_repr(~some.object | x)
assert x == ~some.object | x
def test_none():
assert NONE == some.object
assert NONE == some.object.equal_to(None)
assert NONE == some.object.same_as(None)
assert NONE != some.thing
assert NONE == some.thing | None
def test_equal():
assert 123.0 == some.object.equal_to(123)
assert NAN != some.object.equal_to(NAN)
def test_not_equal():
assert 123.0 != some.object.not_equal_to(123)
assert NAN == some.object.not_equal_to(NAN)
def test_same():
assert 123.0 != some.object.same_as(123)
assert NAN == some.object.same_as(NAN)
def test_inverse():
pattern = ~some.object.equal_to(2)
log_repr(pattern)
assert pattern == 1
assert pattern != 2
assert pattern == 3
assert pattern == "2"
assert pattern == NONE
def test_either():
pattern = some.number | some.str
log_repr(pattern)
assert pattern == 123
pattern = some.str | 123 | some.bool
log_repr(pattern)
assert pattern == 123
def test_in_range():
pattern = some.int.in_range(-5, 5)
log_repr(pattern)
assert all([pattern == x for x in range(-5, 5)])
assert pattern != -6
assert pattern != 5
def METHOD_NAME():
log_repr(some.str)
assert some.str == "abc"
assert b"abc" != some.str
def test_matching():
pattern = some.str.matching(r".(b+).")
log_repr(pattern)
assert pattern == "abbbc"
pattern = some.str.matching(r"bbb")
log_repr(pattern)
assert pattern != "abbbc"
pattern = some.bytes.matching(rb".(b+).")
log_repr(pattern)
assert pattern == b"abbbc"
pattern = some.bytes.matching(rb"bbb")
log_repr(pattern)
assert pattern != b"abbbc"
def test_starting_with():
pattern = some.str.starting_with("aa")
log_repr(pattern)
assert pattern == "aabbbb"
assert pattern != "bbbbaa"
assert pattern != "bbaabb"
assert pattern != "ababab"
pattern = some.bytes.starting_with(b"aa")
log_repr(pattern)
assert pattern == b"aabbbb"
assert pattern != b"bbbbaa"
assert pattern != b"bbaabb"
assert pattern != b"ababab"
def test_ending_with():
pattern = some.str.ending_with("aa")
log_repr(pattern)
assert pattern == "bbbbaa"
assert pattern == "bb\nbb\naa"
assert pattern != "aabbbb"
assert pattern != "bbaabb"
assert pattern != "ababab"
pattern = some.bytes.ending_with(b"aa")
log_repr(pattern)
assert pattern == b"bbbbaa"
assert pattern == b"bb\nbb\naa"
assert pattern != b"aabbbb"
assert pattern != b"bbaabb"
assert pattern != b"ababab"
def test_containing():
pattern = some.str.containing("aa")
log_repr(pattern)
assert pattern == "aabbbb"
assert pattern == "bbbbaa"
assert pattern == "bbaabb"
assert pattern == "bb\naa\nbb"
assert pattern != "ababab"
pattern = some.bytes.containing(b"aa")
log_repr(pattern)
assert pattern == b"aabbbb"
assert pattern == b"bbbbaa"
assert pattern == b"bbaabb"
assert pattern == b"bb\naa\nbb"
assert pattern != b"ababab"
def test_list():
assert [1, 2, 3] == [1, some.thing, 3]
assert [1, 2, 3, 4] != [1, some.thing, 4]
assert [1, 2, 3, 4] == some.list.containing(1)
assert [1, 2, 3, 4] == some.list.containing(2)
assert [1, 2, 3, 4] == some.list.containing(3)
assert [1, 2, 3, 4] == some.list.containing(4)
assert [1, 2, 3, 4] == some.list.containing(1, 2)
assert [1, 2, 3, 4] == some.list.containing(2, 3)
assert [1, 2, 3, 4] == some.list.containing(3, 4)
assert [1, 2, 3, 4] == some.list.containing(1, 2, 3)
assert [1, 2, 3, 4] == some.list.containing(2, 3, 4)
assert [1, 2, 3, 4] == some.list.containing(1, 2, 3, 4)
assert [1, 2, 3, 4] != some.list.containing(5)
assert [1, 2, 3, 4] != some.list.containing(1, 3)
assert [1, 2, 3, 4] != some.list.containing(1, 2, 4)
assert [1, 2, 3, 4] != some.list.containing(2, 3, 5)
def test_dict():
pattern = {"a": some.thing, "b": 2}
log_repr(pattern)
assert pattern == {"a": 1, "b": 2}
pattern = some.dict.containing({"a": 1})
log_repr(pattern)
assert pattern == {"a": 1, "b": 2}
def test_such_that():
pattern = some.thing.such_that(lambda x: x != 1)
log_repr(pattern)
assert 0 == pattern
assert 1 != pattern
assert 2 == pattern
def test_error():
log_repr(some.error)
assert some.error == Exception("error!")
assert some.error != {}
def test_recursive():
pattern = some.dict.containing(
{
"dict": some.dict.containing({"int": some.int.in_range(100, 200)}),
"list": [None, ~some.error, some.number | some.str],
}
)
log_repr(pattern)
assert pattern == {
"list": [None, False, 123],
"bool": True,
"dict": {"int": 123, "str": "abc"},
} |
checked join | # Copyright (c) 2015, Google Inc.
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Extracts archives."""
import hashlib
import optparse
import os
import os.path
import shutil
import sys
import tarfile
import zipfile
def METHOD_NAME(output, path):
"""
CheckedJoin returns os.path.join(output, path). It does sanity checks to
ensure the resulting path is under output, but shouldn't be used on untrusted
input.
"""
path = os.path.normpath(path)
if os.path.isabs(path) or path.startswith('.'):
raise ValueError(path)
return os.path.join(output, path)
class FileEntry(object):
def __init__(self, path, mode, fileobj):
self.path = path
self.mode = mode
self.fileobj = fileobj
class SymlinkEntry(object):
def __init__(self, path, mode, target):
self.path = path
self.mode = mode
self.target = target
def IterateZip(path):
"""
IterateZip opens the zip file at path and returns a generator of entry objects
for each file in it.
"""
with zipfile.ZipFile(path, 'r') as zip_file:
for info in zip_file.infolist():
if info.filename.endswith('/'):
continue
yield FileEntry(info.filename, None, zip_file.open(info))
def IterateTar(path, compression):
"""
IterateTar opens the tar.gz or tar.bz2 file at path and returns a generator of
entry objects for each file in it.
"""
with tarfile.open(path, 'r:' + compression) as tar_file:
for info in tar_file:
if info.isdir():
pass
elif info.issym():
yield SymlinkEntry(info.name, None, info.linkname)
elif info.isfile():
yield FileEntry(info.name, info.mode, tar_file.extractfile(info))
else:
raise ValueError('Unknown entry type "%s"' % (info.name, ))
def main(args):
parser = optparse.OptionParser(usage='Usage: %prog ARCHIVE OUTPUT')
parser.add_option('--no-prefix', dest='no_prefix', action='store_true',
help='Do not remove a prefix from paths in the archive.')
options, args = parser.parse_args(args)
if len(args) != 2:
parser.print_help()
return 1
archive, output = args
if not os.path.exists(archive):
# Skip archives that weren't downloaded.
return 0
with open(archive) as f:
sha256 = hashlib.sha256()
while True:
chunk = f.read(1024 * 1024)
if not chunk:
break
sha256.update(chunk)
digest = sha256.hexdigest()
stamp_path = os.path.join(output, ".boringssl_archive_digest")
if os.path.exists(stamp_path):
with open(stamp_path) as f:
if f.read().strip() == digest:
print "Already up-to-date."
return 0
if archive.endswith('.zip'):
entries = IterateZip(archive)
elif archive.endswith('.tar.gz'):
entries = IterateTar(archive, 'gz')
elif archive.endswith('.tar.bz2'):
entries = IterateTar(archive, 'bz2')
else:
raise ValueError(archive)
try:
if os.path.exists(output):
print "Removing %s" % (output, )
shutil.rmtree(output)
print "Extracting %s to %s" % (archive, output)
prefix = None
num_extracted = 0
for entry in entries:
# Even on Windows, zip files must always use forward slashes.
if '\\' in entry.path or entry.path.startswith('/'):
raise ValueError(entry.path)
if not options.no_prefix:
new_prefix, rest = entry.path.split('/', 1)
# Ensure the archive is consistent.
if prefix is None:
prefix = new_prefix
if prefix != new_prefix:
raise ValueError((prefix, new_prefix))
else:
rest = entry.path
# Extract the file into the output directory.
fixed_path = METHOD_NAME(output, rest)
if not os.path.isdir(os.path.dirname(fixed_path)):
os.makedirs(os.path.dirname(fixed_path))
if isinstance(entry, FileEntry):
with open(fixed_path, 'wb') as out:
shutil.copyfileobj(entry.fileobj, out)
elif isinstance(entry, SymlinkEntry):
os.symlink(entry.target, fixed_path)
else:
raise TypeError('unknown entry type')
# Fix up permissions if needbe.
# TODO(davidben): To be extra tidy, this should only track the execute bit
# as in git.
if entry.mode is not None:
os.chmod(fixed_path, entry.mode)
# Print every 100 files, so bots do not time out on large archives.
num_extracted += 1
if num_extracted % 100 == 0:
print "Extracted %d files..." % (num_extracted,)
finally:
entries.close()
with open(stamp_path, 'w') as f:
f.write(digest)
print "Done. Extracted %d files." % (num_extracted,)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:])) |
archname | """Run foreign-architecture binaries
Overview
--------
So you want to exploit ARM binaries on your Intel PC?
Pwntools has a good level of integration with QEMU user-mode emulation,
in order to run, debug, and pwn foreign architecture binaries.
In general, everything magic happens "behind the scenes", and pwntools
attempts to make your life easier.
When using :class:`.process`, pwntools will attempt to blindly
execute the binary, in case your system is configured to use ``binfmt-misc``.
If this fails, pwntools will attempt to manually launch the binary under
qemu user-mode emulation. Preference is given to statically-linked variants,
i.e. ``qemu-arm-static`` will be selected before ``qemu-arm``.
Debugging
~~~~~~~~~
When debugging binaries with :func:`.gdb.debug`, pwntools automatically adds
the appropriate command-line flags to QEMU to start its GDB stub, and
automatically informs GDB of the correct architecture and sysroot.
Sysroot
~~~~~~~
You can override the default sysroot by setting the ``QEMU_LD_PREFIX``
environment variable. This affects where ``qemu`` will look for files when
``open()`` is called, e.g. when the linker is attempting to resolve ``libc.so``.
Required Setup
--------------
For Ubuntu 16.04 and newer, the setup is relatively straightforward for most
architectures.
First, install the QEMU emulator itself. If your binary is statically-linked,
this is sufficient. ::
$ sudo apt-get install qemu-user
If your binary is dynamically linked, you need to install libraries like libc.
Generally, this package is named ``libc6-$ARCH-cross``, e.g. ``libc-mips-cross``.
ARM comes in both soft-float and hard-float variants, e.g. ``armhf``. ::
$ sudo apt-get install libc6-arm64-cross
If your binary relies on additional libraries, you can generally find them
easily with ``apt-cache search``. For example, if it's a C++ binary it
may require ``libstdc++``. ::
$ apt-cache search 'libstdc++' | grep arm64
Any other libraries that you require you'll have to find some other way.
Telling QEMU Where Libraries Are
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The libraries are now installed on your system at e.g. ``/usr/aarch64-linux-gnu``.
QEMU does not know where they are, and expects them to be at e.g. ``/etc/qemu-binfmt/aarch64``.
If you try to run your library now, you'll probably see an error about ``libc.so.6`` missing.
Create the ``/etc/qemu-binfmt`` directory if it does not exist, and create a symlink to
the appropriate path. ::
$ sudo mkdir /etc/qemu-binfmt
$ sudo ln -s /usr/aarch64-linux-gnu /etc/qemu-binfmt/aarch64
Now QEMU should be able to run the libraries.
"""
from __future__ import absolute_import
from __future__ import division
import os
from pwnlib.context import LocalContext
from pwnlib.context import context
from pwnlib.log import getLogger
from pwnlib.util import misc
log = getLogger(__name__)
@LocalContext
def METHOD_NAME():
"""
Returns the name which QEMU uses for the currently selected
architecture.
>>> pwnlib.qemu.archname()
'i386'
>>> pwnlib.qemu.archname(arch='powerpc')
'ppc'
"""
return {
('amd64', 'little'): 'x86_64',
('arm', 'big'): 'armeb',
('mips', 'little'): 'mipsel',
('mips64', 'little'): 'mips64el',
('powerpc', 'big'): 'ppc',
('powerpc64', 'big'): 'ppc64',
('powerpc64', 'little'): 'ppc64le',
('thumb', 'little'): 'arm',
('thumb', 'big'): 'armeb',
}.get((context.arch, context.endian), context.arch)
@LocalContext
def user_path():
"""
Returns the path to the QEMU-user binary for the currently
selected architecture.
>>> pwnlib.qemu.user_path()
'qemu-i386-static'
>>> pwnlib.qemu.user_path(arch='thumb')
'qemu-arm-static'
"""
arch = METHOD_NAME()
system = 'qemu-system-' + arch
normal = 'qemu-' + arch
static = normal + '-static'
if context.os == 'baremetal':
if misc.which(system):
return system
else:
if misc.which(static):
return static
if misc.which(normal):
return normal
log.warn_once("Neither %r nor %r are available" % (normal, static))
@LocalContext
def ld_prefix(path=None, env=None):
"""Returns the linker prefix for the selected qemu-user binary
>>> pwnlib.qemu.ld_prefix(arch='arm')
'/etc/qemu-binfmt/arm'
"""
if context.os == 'baremetal':
return ""
if path is None:
path = user_path()
# Did we explicitly specify the path in an environment variable?
if env and b'QEMU_LD_PREFIX' in env:
return env[b'QEMU_LD_PREFIX'].decode()
if 'QEMU_LD_PREFIX' in os.environ:
return os.environ['QEMU_LD_PREFIX']
# Cyclic imports!
from pwnlib.tubes.process import process
with context.quiet:
with process([path, '--help'], env=env) as io:
line = io.recvline_regex(b'QEMU_LD_PREFIX *=')
_, libpath = line.split(b'=', 1)
libpath = libpath.strip()
if not isinstance(libpath, str):
libpath = libpath.decode('utf-8')
return libpath
|
mgmt apidoc | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# This script is used to execute pylint within a tox environment. Depending on which package is being executed against,
# a failure may be suppressed.
from subprocess import check_call, CalledProcessError
import argparse
import os
import logging
from prep_sphinx_env import should_build_docs
import sys
import shutil
from ci_tools.parsing import ParsedSetup
logging.getLogger().setLevel(logging.INFO)
root_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), "..", "..", ".."))
generate_mgmt_script = os.path.join(root_dir, "doc/sphinx/generate_doc.py")
def is_mgmt_package(pkg_name):
return "mgmt" in pkg_name or "cognitiveservices" in pkg_name
def copy_existing_docs(source, target):
for file in os.listdir(source):
logging.info("Copying {}".format(file))
shutil.copy(os.path.join(source, file), target)
def sphinx_apidoc(working_directory):
working_doc_folder = os.path.join(args.working_directory, "unzipped", "doc")
command_array = [
"sphinx-apidoc",
"--no-toc",
"--module-first",
"-o",
os.path.join(args.working_directory, "unzipped/docgen"),
os.path.join(args.working_directory, "unzipped/"),
os.path.join(args.working_directory, "unzipped/test*"),
os.path.join(args.working_directory, "unzipped/example*"),
os.path.join(args.working_directory, "unzipped/sample*"),
os.path.join(args.working_directory, "unzipped/setup.py"),
]
try:
# if a `doc` folder exists, just leverage the sphinx sources found therein.
if os.path.exists(working_doc_folder):
logging.info("Copying files into sphinx source folder.")
copy_existing_docs(working_doc_folder, os.path.join(args.working_directory, "unzipped/docgen"))
# otherwise, we will run sphinx-apidoc to generate the sources
else:
logging.info("Sphinx api-doc command: {}".format(command_array))
check_call(
command_array
)
except CalledProcessError as e:
logging.error(
"sphinx-apidoc failed for path {} exited with error {}".format(
args.working_directory, e.returncode
)
)
exit(1)
def METHOD_NAME(working_directory: str, target_folder: str):
command_array = [
sys.executable,
generate_mgmt_script,
"-p",
target_folder,
"-o",
working_directory,
"--verbose",
]
try:
logging.info("Command to generate management sphinx sources: {}".format(command_array))
check_call(
command_array
)
except CalledProcessError as e:
logging.error(
"script failed for path {} exited with error {}".format(
args.working_directory, e.returncode
)
)
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run sphinx-apidoc against target folder. Handles management generation if necessary."
)
parser.add_argument(
"-w",
"--workingdir",
dest="working_directory",
help="The unzipped package directory on disk. Usually {distdir}/unzipped/",
required=True,
)
parser.add_argument(
"-r",
"--root",
dest="package_root",
help="",
required=True,
)
args = parser.parse_args()
target_dir = os.path.abspath(args.working_directory)
package_dir = os.path.abspath(args.package_root)
output_directory = os.path.join(target_dir, "unzipped/docgen")
pkg_details = ParsedSetup.from_path(package_dir)
if should_build_docs(pkg_details.name):
if is_mgmt_package(pkg_details.name):
METHOD_NAME(output_directory, pkg_details.folder)
else:
sphinx_apidoc(args.working_directory)
else:
logging.info("Skipping sphinx source generation for {}".format(pkg_details.name) |
execute patch | # Copyright (c) 2021, Frappe Technologies Pvt. Ltd. and Contributors
# License: MIT. See LICENSE
""" Patch Handler.
This file manages execution of manaully written patches. Patches are script
that apply changes in database schema or data to accomodate for changes in the
code.
Ways to specify patches:
1. patches.txt file specifies patches that run before doctype schema
migration. Each line represents one patch (old format).
2. patches.txt can alternatively also separate pre and post model sync
patches by using INI like file format:
```patches.txt
[pre_model_sync]
app.module.patch1
app.module.patch2
[post_model_sync]
app.module.patch3
```
When different sections are specified patches are executed in this order:
1. Run pre_model_sync patches
2. Reload/resync all doctype schema
3. Run post_model_sync patches
Hence any patch that just needs to modify data but doesn't depend on
old schema should be added to post_model_sync section of file.
3. simple python commands can be added by starting line with `execute:`
`execute:` example: `execute:print("hello world")`
"""
import configparser
import time
from enum import Enum
from textwrap import dedent, indent
import frappe
class PatchError(Exception):
pass
class PatchType(Enum):
pre_model_sync = "pre_model_sync"
post_model_sync = "post_model_sync"
def run_all(skip_failing: bool = False, patch_type: PatchType | None = None) -> None:
"""run all pending patches"""
executed = set(frappe.get_all("Patch Log", filters={"skipped": 0}, fields="patch", pluck="patch"))
frappe.flags.final_patches = []
def run_patch(patch):
try:
if not run_single(patchmodule=patch):
print(patch + ": failed: STOPPED")
raise PatchError(patch)
except Exception:
if not skip_failing:
raise
print("Failed to execute patch")
update_patch_log(patch, skipped=True)
patches = get_all_patches(patch_type=patch_type)
for patch in patches:
if patch and (patch not in executed):
run_patch(patch)
# patches to be run in the end
for patch in frappe.flags.final_patches:
patch = patch.replace("finally:", "")
run_patch(patch)
def get_all_patches(patch_type: PatchType | None = None) -> list[str]:
if patch_type and not isinstance(patch_type, PatchType):
frappe.throw(f"Unsupported patch type specified: {patch_type}")
patches = []
for app in frappe.get_installed_apps():
patches.extend(get_patches_from_app(app, patch_type=patch_type))
return patches
def get_patches_from_app(app: str, patch_type: PatchType | None = None) -> list[str]:
"""Get patches from an app's patches.txt
patches.txt can be:
1. ini like file with section for different patch_type
2. plain text file with each line representing a patch.
"""
patches_file = frappe.get_app_path(app, "patches.txt")
try:
return parse_as_configfile(patches_file, patch_type)
except configparser.MissingSectionHeaderError:
# treat as old format with each line representing a single patch
# backward compatbility with old patches.txt format
if not patch_type or patch_type == PatchType.pre_model_sync:
return frappe.get_file_items(patches_file)
return []
def parse_as_configfile(patches_file: str, patch_type: PatchType | None = None) -> list[str]:
# Attempt to parse as ini file with pre/post patches
# allow_no_value: patches are not key value pairs
# delimiters = '\n' to avoid treating default `:` and `=` in execute as k:v delimiter
parser = configparser.ConfigParser(allow_no_value=True, delimiters="\n")
# preserve case
parser.optionxform = str
parser.read(patches_file)
# empty file
if not parser.sections():
return []
if not patch_type:
return [patch for patch in parser[PatchType.pre_model_sync.value]] + [
patch for patch in parser[PatchType.post_model_sync.value]
]
if patch_type.value in parser.sections():
return [patch for patch in parser[patch_type.value]]
else:
frappe.throw(frappe._("Patch type {} not found in patches.txt").format(patch_type))
def reload_doc(args):
import frappe.modules
run_single(method=frappe.modules.reload_doc, methodargs=args)
def run_single(patchmodule=None, method=None, methodargs=None, force=False):
from frappe import conf
# don't write txt files
conf.developer_mode = 0
if force or method or not executed(patchmodule):
return METHOD_NAME(patchmodule, method, methodargs)
else:
return True
def METHOD_NAME(patchmodule: str, method=None, methodargs=None):
"""execute the patch"""
_patch_mode(True)
if patchmodule.startswith("execute:"):
has_patch_file = False
patch = patchmodule.split("execute:")[1]
docstring = ""
else:
has_patch_file = True
patch = f"{patchmodule.split(maxsplit=1)[0]}.execute"
_patch = frappe.get_attr(patch)
docstring = _patch.__doc__ or ""
if docstring:
docstring = "\n" + indent(dedent(docstring), "\t")
print(
f"Executing {patchmodule or methodargs} in {frappe.local.site} ({frappe.db.cur_db_name}){docstring}"
)
start_time = time.monotonic()
frappe.db.begin()
frappe.db.auto_commit_on_many_writes = 0
try:
if patchmodule:
if patchmodule.startswith("finally:"):
# run run patch at the end
frappe.flags.final_patches.append(patchmodule)
else:
if has_patch_file:
_patch()
else:
exec(patch, globals())
update_patch_log(patchmodule)
elif method:
method(**methodargs)
except Exception:
frappe.db.rollback()
raise
else:
frappe.db.commit()
end_time = time.monotonic()
_patch_mode(False)
print(f"Success: Done in {round(end_time - start_time, 3)}s")
return True
def update_patch_log(patchmodule, skipped=False):
"""update patch_file in patch log"""
patch = frappe.get_doc({"doctype": "Patch Log", "patch": patchmodule})
if skipped:
traceback = frappe.get_traceback(with_context=True)
patch.skipped = 1
patch.traceback = traceback
print(traceback, end="\n\n")
patch.insert(ignore_permissions=True)
def executed(patchmodule):
"""return True if is executed"""
if patchmodule.startswith("finally:"):
# patches are saved without the finally: tag
patchmodule = patchmodule.replace("finally:", "")
return frappe.db.get_value("Patch Log", {"patch": patchmodule, "skipped": 0})
def _patch_mode(enable):
"""stop/start execution till patch is run"""
frappe.local.flags.in_patch = enable
frappe.db.commit() |
arn | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetDocumentResult',
'AwaitableGetDocumentResult',
'get_document',
'get_document_output',
]
@pulumi.output_type
class GetDocumentResult:
"""
A collection of values returned by getDocument.
"""
def __init__(__self__, METHOD_NAME=None, content=None, document_format=None, document_type=None, document_version=None, id=None, name=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", METHOD_NAME)
if content and not isinstance(content, str):
raise TypeError("Expected argument 'content' to be a str")
pulumi.set(__self__, "content", content)
if document_format and not isinstance(document_format, str):
raise TypeError("Expected argument 'document_format' to be a str")
pulumi.set(__self__, "document_format", document_format)
if document_type and not isinstance(document_type, str):
raise TypeError("Expected argument 'document_type' to be a str")
pulumi.set(__self__, "document_type", document_type)
if document_version and not isinstance(document_version, str):
raise TypeError("Expected argument 'document_version' to be a str")
pulumi.set(__self__, "document_version", document_version)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
ARN of the document. If the document is an AWS managed document, this value will be set to the name of the document instead.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def content(self) -> str:
"""
Contents of the document.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter(name="documentFormat")
def document_format(self) -> Optional[str]:
return pulumi.get(self, "document_format")
@property
@pulumi.getter(name="documentType")
def document_type(self) -> str:
"""
Type of the document.
"""
return pulumi.get(self, "document_type")
@property
@pulumi.getter(name="documentVersion")
def document_version(self) -> Optional[str]:
return pulumi.get(self, "document_version")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
class AwaitableGetDocumentResult(GetDocumentResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDocumentResult(
METHOD_NAME=self.METHOD_NAME,
content=self.content,
document_format=self.document_format,
document_type=self.document_type,
document_version=self.document_version,
id=self.id,
name=self.name)
def get_document(document_format: Optional[str] = None,
document_version: Optional[str] = None,
name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDocumentResult:
"""
Gets the contents of the specified Systems Manager document.
## Example Usage
To get the contents of the document owned by AWS.
```python
import pulumi
import pulumi_aws as aws
foo = aws.ssm.get_document(name="AWS-GatherSoftwareInventory",
document_format="YAML")
pulumi.export("content", foo.content)
```
To get the contents of the custom document.
```python
import pulumi
import pulumi_aws as aws
test = aws.ssm.get_document(name=aws_ssm_document["test"]["name"],
document_format="JSON")
```
:param str document_format: Returns the document in the specified format. The document format can be either `JSON`, `YAML` and `TEXT`. JSON is the default format.
:param str document_version: Document version for which you want information.
:param str name: Name of the Systems Manager document.
"""
__args__ = dict()
__args__['documentFormat'] = document_format
__args__['documentVersion'] = document_version
__args__['name'] = name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:ssm/getDocument:getDocument', __args__, opts=opts, typ=GetDocumentResult).value
return AwaitableGetDocumentResult(
METHOD_NAME=pulumi.get(__ret__, 'arn'),
content=pulumi.get(__ret__, 'content'),
document_format=pulumi.get(__ret__, 'document_format'),
document_type=pulumi.get(__ret__, 'document_type'),
document_version=pulumi.get(__ret__, 'document_version'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'))
@_utilities.lift_output_func(get_document)
def get_document_output(document_format: Optional[pulumi.Input[Optional[str]]] = None,
document_version: Optional[pulumi.Input[Optional[str]]] = None,
name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDocumentResult]:
"""
Gets the contents of the specified Systems Manager document.
## Example Usage
To get the contents of the document owned by AWS.
```python
import pulumi
import pulumi_aws as aws
foo = aws.ssm.get_document(name="AWS-GatherSoftwareInventory",
document_format="YAML")
pulumi.export("content", foo.content)
```
To get the contents of the custom document.
```python
import pulumi
import pulumi_aws as aws
test = aws.ssm.get_document(name=aws_ssm_document["test"]["name"],
document_format="JSON")
```
:param str document_format: Returns the document in the specified format. The document format can be either `JSON`, `YAML` and `TEXT`. JSON is the default format.
:param str document_version: Document version for which you want information.
:param str name: Name of the Systems Manager document.
"""
... |
last modified by type | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'DocumentProcessorPropertiesResponse',
'SystemDataResponse',
]
@pulumi.output_type
class DocumentProcessorPropertiesResponse(dict):
"""
Document processor properties
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "provisioningState":
suggest = "provisioning_state"
elif key == "spoTenantId":
suggest = "spo_tenant_id"
elif key == "spoTenantUrl":
suggest = "spo_tenant_url"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DocumentProcessorPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DocumentProcessorPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DocumentProcessorPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
provisioning_state: str,
spo_tenant_id: str,
spo_tenant_url: str):
"""
Document processor properties
:param str provisioning_state: The managed resource provisioning state.
:param str spo_tenant_id: The ID (GUID) of an SharePoint Online (SPO) tenant associated with this document processor resource
:param str spo_tenant_url: The URL of an SharePoint Online (SPO) tenant associated with this document processor resource
"""
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "spo_tenant_id", spo_tenant_id)
pulumi.set(__self__, "spo_tenant_url", spo_tenant_url)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The managed resource provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="spoTenantId")
def spo_tenant_id(self) -> str:
"""
The ID (GUID) of an SharePoint Online (SPO) tenant associated with this document processor resource
"""
return pulumi.get(self, "spo_tenant_id")
@property
@pulumi.getter(name="spoTenantUrl")
def spo_tenant_url(self) -> str:
"""
The URL of an SharePoint Online (SPO) tenant associated with this document processor resource
"""
return pulumi.get(self, "spo_tenant_url")
@pulumi.output_type
class SystemDataResponse(dict):
"""
Metadata pertaining to creation and last modification of the resource.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdAt":
suggest = "created_at"
elif key == "createdBy":
suggest = "created_by"
elif key == "createdByType":
suggest = "created_by_type"
elif key == "lastModifiedAt":
suggest = "last_modified_at"
elif key == "lastModifiedBy":
suggest = "last_modified_by"
elif key == "lastModifiedByType":
suggest = "last_modified_by_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in SystemDataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
SystemDataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
SystemDataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_at: Optional[str] = None,
created_by: Optional[str] = None,
created_by_type: Optional[str] = None,
last_modified_at: Optional[str] = None,
last_modified_by: Optional[str] = None,
METHOD_NAME: Optional[str] = None):
"""
Metadata pertaining to creation and last modification of the resource.
:param str created_at: The timestamp of resource creation (UTC).
:param str created_by: The identity that created the resource.
:param str created_by_type: The type of identity that created the resource.
:param str last_modified_at: The timestamp of resource last modification (UTC)
:param str last_modified_by: The identity that last modified the resource.
:param str last_modified_by_type: The type of identity that last modified the resource.
"""
if created_at is not None:
pulumi.set(__self__, "created_at", created_at)
if created_by is not None:
pulumi.set(__self__, "created_by", created_by)
if created_by_type is not None:
pulumi.set(__self__, "created_by_type", created_by_type)
if last_modified_at is not None:
pulumi.set(__self__, "last_modified_at", last_modified_at)
if last_modified_by is not None:
pulumi.set(__self__, "last_modified_by", last_modified_by)
if METHOD_NAME is not None:
pulumi.set(__self__, "last_modified_by_type", METHOD_NAME)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> Optional[str]:
"""
The timestamp of resource creation (UTC).
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> Optional[str]:
"""
The identity that created the resource.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="createdByType")
def created_by_type(self) -> Optional[str]:
"""
The type of identity that created the resource.
"""
return pulumi.get(self, "created_by_type")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> Optional[str]:
"""
The timestamp of resource last modification (UTC)
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> Optional[str]:
"""
The identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="lastModifiedByType")
def METHOD_NAME(self) -> Optional[str]:
"""
The type of identity that last modified the resource.
"""
return pulumi.get(self, "last_modified_by_type")
|
test missing output keys | # Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from unittest import mock
from .config_exception import ConfigException
from .exec_provider import ExecProvider
from .kube_config import ConfigNode
class ExecProviderTest(unittest.TestCase):
def setUp(self):
self.input_ok = ConfigNode('test', {
'command': 'aws-iam-authenticator',
'args': ['token', '-i', 'dummy'],
'apiVersion': 'client.authentication.k8s.io/v1beta1',
'env': None
})
self.output_ok = """
{
"apiVersion": "client.authentication.k8s.io/v1beta1",
"kind": "ExecCredential",
"status": {
"token": "dummy"
}
}
"""
def test_missing_input_keys(self):
exec_configs = [ConfigNode('test1', {}),
ConfigNode('test2', {'command': ''}),
ConfigNode('test3', {'apiVersion': ''})]
for exec_config in exec_configs:
with self.assertRaises(ConfigException) as context:
ExecProvider(exec_config, None)
self.assertIn('exec: malformed request. missing key',
context.exception.args[0])
@mock.patch('subprocess.Popen')
def test_error_code_returned(self, mock):
instance = mock.return_value
instance.wait.return_value = 1
instance.communicate.return_value = ('', '')
with self.assertRaises(ConfigException) as context:
ep = ExecProvider(self.input_ok, None)
ep.run()
self.assertIn('exec: process returned %d' %
instance.wait.return_value, context.exception.args[0])
@mock.patch('subprocess.Popen')
def test_nonjson_output_returned(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
instance.communicate.return_value = ('', '')
with self.assertRaises(ConfigException) as context:
ep = ExecProvider(self.input_ok, None)
ep.run()
self.assertIn('exec: failed to decode process output',
context.exception.args[0])
@mock.patch('subprocess.Popen')
def METHOD_NAME(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
outputs = [
"""
{
"kind": "ExecCredential",
"status": {
"token": "dummy"
}
}
""", """
{
"apiVersion": "client.authentication.k8s.io/v1beta1",
"status": {
"token": "dummy"
}
}
""", """
{
"apiVersion": "client.authentication.k8s.io/v1beta1",
"kind": "ExecCredential"
}
"""
]
for output in outputs:
instance.communicate.return_value = (output, '')
with self.assertRaises(ConfigException) as context:
ep = ExecProvider(self.input_ok, None)
ep.run()
self.assertIn('exec: malformed response. missing key',
context.exception.args[0])
@mock.patch('subprocess.Popen')
def test_mismatched_api_version(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
wrong_api_version = 'client.authentication.k8s.io/v1'
output = """
{
"apiVersion": "%s",
"kind": "ExecCredential",
"status": {
"token": "dummy"
}
}
""" % wrong_api_version
instance.communicate.return_value = (output, '')
with self.assertRaises(ConfigException) as context:
ep = ExecProvider(self.input_ok, None)
ep.run()
self.assertIn(
'exec: plugin api version %s does not match' %
wrong_api_version,
context.exception.args[0])
@mock.patch('subprocess.Popen')
def test_ok_01(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
instance.communicate.return_value = (self.output_ok, '')
ep = ExecProvider(self.input_ok, None)
result = ep.run()
self.assertTrue(isinstance(result, dict))
self.assertTrue('token' in result)
@mock.patch('subprocess.Popen')
def test_run_in_dir(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
instance.communicate.return_value = (self.output_ok, '')
ep = ExecProvider(self.input_ok, '/some/directory')
ep.run()
self.assertEqual(mock.call_args[1]['cwd'], '/some/directory')
@mock.patch('subprocess.Popen')
def test_ok_no_console_attached(self, mock):
instance = mock.return_value
instance.wait.return_value = 0
instance.communicate.return_value = (self.output_ok, '')
mock_stdout = unittest.mock.patch(
'sys.stdout', new=None) # Simulate detached console
with mock_stdout:
ep = ExecProvider(self.input_ok, None)
result = ep.run()
self.assertTrue(isinstance(result, dict))
self.assertTrue('token' in result)
if __name__ == '__main__':
unittest.main() |
load test data | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test getblockstats rpc call
#
from test_framework.test_framework import MuntTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
import json
import os
import time
TESTSDIR = os.path.dirname(os.path.realpath(__file__))
class GetblockstatsTest(MuntTestFramework):
start_height = 101
max_stat_pos = 2
STATS_NEED_TXINDEX = [
'avgfee',
'avgfeerate',
'maxfee',
'maxfeerate',
'medianfee',
'feerate_percentiles',
'minfee',
'minfeerate',
'totalfee',
'utxo_size_inc',
]
def add_options(self, parser):
parser.add_argument('--gen-test-data', dest='gen_test_data',
default=False, action='store_true',
help='Generate test data')
parser.add_argument('--test-data', dest='test_data',
default='data/rpc_getblockstats.json',
action='store', metavar='FILE',
help='Test data file')
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [['-txindex'], ['-paytxfee=0.003']]
self.setup_clean_chain = True
def get_stats(self):
return [self.nodes[0].getblockstats(hash_or_height=self.start_height + i) for i in range(self.max_stat_pos+1)]
def generate_test_data(self, filename):
mocktime = time.time()
self.nodes[0].generate(101)
self.nodes[0].sendtoaddress(address=self.nodes[1].getnewaddress(), amount=10, subtract_fee_from_amount=True)
self.nodes[0].generate(1)
self.sync_all()
self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=10, subtract_fee_from_amount=True)
self.nodes[0].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=10, subtract_fee_from_amount=False)
self.nodes[1].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=1, subtract_fee_from_amount=True)
self.sync_all()
self.nodes[0].generate(1)
self.expected_stats = self.get_stats()
blocks = []
tip = self.nodes[0].getbestblockhash()
blockhash = None
height = 0
while tip != blockhash:
blockhash = self.nodes[0].getblockhash(height)
blocks.append(self.nodes[0].getblock(blockhash, 0))
height += 1
to_dump = {
'blocks': blocks,
'mocktime': int(mocktime),
'stats': self.expected_stats,
}
with open(filename, 'w', encoding="utf8") as f:
json.dump(to_dump, f, sort_keys=True, indent=2)
def METHOD_NAME(self, filename):
with open(filename, 'r', encoding="utf8") as f:
d = json.load(f)
blocks = d['blocks']
mocktime = d['mocktime']
self.expected_stats = d['stats']
# Set the timestamps from the file so that the nodes can get out of Initial Block Download
self.nodes[0].setmocktime(mocktime)
self.nodes[1].setmocktime(mocktime)
for b in blocks:
self.nodes[0].submitblock(b)
def run_test(self):
test_data = os.path.join(TESTSDIR, self.options.test_data)
if self.options.gen_test_data:
self.generate_test_data(test_data)
else:
self.METHOD_NAME(test_data)
self.sync_all()
stats = self.get_stats()
expected_stats_noindex = []
for stat_row in stats:
expected_stats_noindex.append({k: v for k, v in stat_row.items() if k not in self.STATS_NEED_TXINDEX})
# Make sure all valid statistics are included but nothing else is
expected_keys = self.expected_stats[0].keys()
assert_equal(set(stats[0].keys()), set(expected_keys))
assert_equal(stats[0]['height'], self.start_height)
assert_equal(stats[self.max_stat_pos]['height'], self.start_height + self.max_stat_pos)
for i in range(self.max_stat_pos+1):
self.log.info('Checking block %d\n' % (i))
assert_equal(stats[i], self.expected_stats[i])
# Check selecting block by hash too
blockhash = self.expected_stats[i]['blockhash']
stats_by_hash = self.nodes[0].getblockstats(hash_or_height=blockhash)
assert_equal(stats_by_hash, self.expected_stats[i])
# Check with the node that has no txindex
stats_no_txindex = self.nodes[1].getblockstats(hash_or_height=blockhash, stats=list(expected_stats_noindex[i].keys()))
assert_equal(stats_no_txindex, expected_stats_noindex[i])
# Make sure each stat can be queried on its own
for stat in expected_keys:
for i in range(self.max_stat_pos+1):
result = self.nodes[0].getblockstats(hash_or_height=self.start_height + i, stats=[stat])
assert_equal(list(result.keys()), [stat])
if result[stat] != self.expected_stats[i][stat]:
self.log.info('result[%s] (%d) failed, %r != %r' % (
stat, i, result[stat], self.expected_stats[i][stat]))
assert_equal(result[stat], self.expected_stats[i][stat])
# Make sure only the selected statistics are included (more than one)
some_stats = {'minfee', 'maxfee'}
stats = self.nodes[0].getblockstats(hash_or_height=1, stats=list(some_stats))
assert_equal(set(stats.keys()), some_stats)
# Test invalid parameters raise the proper json exceptions
tip = self.start_height + self.max_stat_pos
assert_raises_rpc_error(-8, 'Target block height %d after current tip %d' % (tip+1, tip),
self.nodes[0].getblockstats, hash_or_height=tip+1)
assert_raises_rpc_error(-8, 'Target block height %d is negative' % (-1),
self.nodes[0].getblockstats, hash_or_height=-1)
# Make sure not valid stats aren't allowed
inv_sel_stat = 'asdfghjkl'
inv_stats = [
[inv_sel_stat],
['minfee' , inv_sel_stat],
[inv_sel_stat, 'minfee'],
['minfee', inv_sel_stat, 'maxfee'],
]
for inv_stat in inv_stats:
assert_raises_rpc_error(-8, 'Invalid selected statistic %s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=inv_stat)
# Make sure we aren't always returning inv_sel_stat as the culprit stat
assert_raises_rpc_error(-8, 'Invalid selected statistic aaa%s' % inv_sel_stat,
self.nodes[0].getblockstats, hash_or_height=1, stats=['minfee' , 'aaa%s' % inv_sel_stat])
###assert_raises_rpc_error(-8, 'One or more of the selected stats requires -txindex enabled',
### self.nodes[1].getblockstats, hash_or_height=1)
###assert_raises_rpc_error(-8, 'One or more of the selected stats requires -txindex enabled',
### self.nodes[1].getblockstats, hash_or_height=self.start_height + self.max_stat_pos)
# Mainchain's genesis block shouldn't be found on regtest
assert_raises_rpc_error(-5, 'Block not found', self.nodes[0].getblockstats,
hash_or_height='000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f')
# Invalid number of args
###assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats, '00', 1, 2)
###assert_raises_rpc_error(-1, 'getblockstats hash_or_height ( stats )', self.nodes[0].getblockstats)
if __name__ == '__main__':
GetblockstatsTest().main() |
test nested class | # Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
global_log = []
def clear_global_log(func):
def wrapper(*args, **kwargs):
del global_log[:]
return func(*args, **kwargs)
return wrapper
class Meta(type):
def __prepare__(*args, **kwargs):
global_log.append(["__prepare__", args, kwargs])
return type.__prepare__(*args, **kwargs)
def __new__(*args, **kwargs):
global_log.append(["__new__", args, kwargs])
return type.__new__(*args, **kwargs)
def __init__(*args, **kwargs):
global_log.append(["__init__", args, kwargs])
return type.__init__(*args, **kwargs)
def __call__(*args, **kwargs):
global_log.append(["__call__", args, kwargs])
return type.__call__(*args, **kwargs)
@clear_global_log
def test_class_construction():
class Foo(metaclass=Meta):
pass
Foo()
assert global_log[0] == ["__prepare__", ("Foo", tuple()), {}]
initial_dict = {'__qualname__': 'test_class_construction.<locals>.Foo', '__module__': 'tests.test_metaclass'}
# if sys.implementation.name == "graalpy":
# initial_dict = {
# '__module__': 'test_metaclass'
# }
assert global_log[1] == ["__new__", (Meta, "Foo", tuple(), initial_dict), {}]
assert global_log[2] == ["__init__", (Foo, "Foo", tuple(), initial_dict), {}]
assert global_log[3] == ["__call__", (Foo,), {}]
@clear_global_log
def test_metaclass_methods():
class MyMeta(type):
def __new__(meta, name, bases, dct):
global_log.append(["__new__", meta, name, bases, dct])
return super(MyMeta, meta).__new__(meta, name, bases, dct)
def __init__(cls, name, bases, dct):
global_log.append(["__init__", cls, name, bases, dct])
super(MyMeta, cls).__init__(name, bases, dct)
def __call__(cls, *args, **kwds):
global_log.append(["__call__", cls, args, kwds])
return type.__call__(cls, *args, **kwds)
def a_method(cls, arg):
return cls, arg
class MyClass(metaclass=MyMeta):
def __init__(self, a, b):
global_log.append(["MyKlass object", a, b])
assert isinstance(MyClass, MyMeta)
ns_dict = {
'__qualname__': 'test_metaclass_methods.<locals>.MyClass',
'__init__': MyClass.__init__,
'__module__': 'tests.test_metaclass'
}
# if sys.implementation.name == "graalpy":
# ns_dict = {
# '__init__': MyClass.__init__,
# '__module__': 'test_metaclass',
# }
assert len(global_log) == 2
assert global_log[0] == ["__new__", MyMeta, "MyClass", (), ns_dict]
assert global_log[1] == ["__init__", MyClass, "MyClass", (), ns_dict]
assert MyClass.a_method(10) == (MyClass, 10)
m = MyClass(1, 2)
assert isinstance(m, MyClass)
assert len(global_log) == 4
assert global_log[2] == ['__call__', MyClass, (1, 2), {}]
assert global_log[3] == ['MyKlass object', 1, 2]
class A:
class B:
pass
def METHOD_NAME():
assert A.__name__ == "A"
assert A.__qualname__ == "A"
assert A.__module__ == __name__, "should be '%s' but was '%s'" % (__name__, A.__module__)
assert A.B.__name__ == "B"
assert A.B.__qualname__ == "A.B"
assert A.B.__module__ == __name__ |
test tuya data enum | """Test units for new Tuya cluster framework."""
from unittest import mock
import pytest
import zigpy.types as t
import zigpy.zcl.foundation as zcl_f
from zhaquirks.tuya import (
TUYA_ACTIVE_STATUS_RPT,
TUYA_GET_DATA,
TUYA_SET_DATA_RESPONSE,
TUYA_SET_TIME,
TuyaCommand,
TuyaData,
TuyaDatapointData,
TuyaNewManufCluster,
)
@pytest.fixture(name="TuyaCluster")
def tuya_cluster(zigpy_device_mock):
"""Mock of the new Tuya manufacturer cluster."""
device = zigpy_device_mock()
endpoint = device.add_endpoint(1)
cluster = TuyaNewManufCluster(endpoint)
return cluster
def test_tuya_data_raw():
"""Test tuya "Raw" datatype."""
class Test(t.Struct):
test_bool: t.Bool
test_uint16_t_be: t.uint16_t_be
data = b"\x00\x00\x03\x01\x02\x46"
extra = b"extra data"
r, rest = TuyaData.deserialize(data + extra)
assert rest == extra
assert r.dp_type == 0
assert r.raw == b"\x01\x02\x46"
assert Test.deserialize(r.payload)[0] == Test(True, 582)
r.payload = Test(False, 314)
assert r.raw == b"\x00\x01\x3a"
def test_tuya_data_value():
"""Test tuya "Value" datatype."""
data = b"\x02\x00\x04\x00\x00\x02\xdb"
extra = b"extra data"
r, rest = TuyaData.deserialize(data + extra)
assert rest == extra
assert r.dp_type == 2
assert r.raw == b"\x00\x00\x02\xdb"
assert r.payload == 731
r.payload = 582
assert r.raw == b"\x00\x00\x02\x46"
def test_tuya_negative_value():
"""Test tuya negative "Value" datatype."""
data = b"\x02\x00\x04\xff\xff\xff\xf8"
extra = b"extra data"
r, rest = TuyaData.deserialize(data + extra)
assert rest == extra
assert r.dp_type == 2
assert r.raw == b"\xff\xff\xff\xf8"
assert r.payload == -8
def test_tuya_data_bool():
"""Test tuya Bool datatype."""
data = b"\x01\x00\x01\x00"
extra = b"extra data"
r, rest = TuyaData.deserialize(data + extra)
assert rest == extra
assert r.dp_type == 1
assert r.raw == b"\x00"
assert not r.payload
r.payload = True
assert r.raw == b"\x01"
data = b"\x01\x00\x01\x01"
extra = b"extra data"
r, rest = TuyaData.deserialize(data + extra)
assert rest == extra
assert r.dp_type == 1
assert r.raw == b"\x01"
assert r.payload
r.payload = False
assert r.raw == b"\x00"
def METHOD_NAME():
"""Test tuya Enum datatype."""
data = b"\x04\x00\x01\x40"
extra = b"extra data"
r, rest = TuyaData.deserialize(data + extra)
assert rest == extra
assert r.dp_type == 4
assert r.raw == b"\x40"
assert r.payload == 0x40
r.payload = 0x42
assert r.raw == b"\x42"
def test_tuya_data_string():
"""Test tuya String datatype."""
data = b"\x03\x00\x04Tuya"
extra = b"extra data"
r, rest = TuyaData.deserialize(data + extra)
assert rest == extra
assert r.dp_type == 3
assert r.raw == b"Tuya"
assert r.payload == "Tuya"
r.payload = "Data"
assert r.raw == b"Data"
def test_tuya_data_bitmap():
"""Test tuya Bitmap datatype."""
data = b"\x05\x00\x01\x40"
extra = b"extra data"
r, rest = TuyaData.deserialize(data + extra)
assert rest == extra
assert r.dp_type == 5
assert r.raw == b"\x40"
assert r.payload == 0x40
r.payload = 0x82
assert r.raw == b"\x82"
data = b"\x05\x00\x02\x40\x02"
r, _ = TuyaData.deserialize(data)
r.payload == 0x4002
r.payload = t.bitmap16(0x2004)
assert r.raw == b"\x20\x04"
data = b"\x05\x00\x04\x40\x02\x80\x01"
r, _ = TuyaData.deserialize(data)
r.payload == 0x40028001
r.payload = t.bitmap32(0x10082004)
assert r.raw == b"\x10\x08\x20\x04"
def test_tuya_data_bitmap_invalid():
"""Test tuya Bitmap datatype."""
data = b"\x05\x00\x03\x4012"
extra = b"extra data"
r, rest = TuyaData.deserialize(data + extra)
assert rest == extra
with pytest.raises(ValueError):
r.payload
def test_tuya_data_unknown():
"""Test tuya unknown datatype."""
data = b"\x06\x00\x04\x03\x02\x01\x00"
extra = b"extra data"
r, rest = TuyaData.deserialize(data + extra)
assert rest == extra
assert r.dp_type == 6
assert r.raw == b"\x03\x02\x01\x00"
with pytest.raises(ValueError):
r.payload
with pytest.raises(ValueError):
r.payload = 0
@pytest.mark.parametrize(
"cmd_id, handler_name, args",
(
(
TUYA_GET_DATA,
"handle_get_data",
(
TuyaCommand(
status=0,
tsn=2,
datapoints=[TuyaDatapointData(2, TuyaData(1, 0, b"\x01\x01"))],
),
),
),
(
TUYA_SET_DATA_RESPONSE,
"handle_set_data_response",
(
TuyaCommand(
status=0,
tsn=2,
datapoints=[TuyaDatapointData(2, TuyaData(1, 0, b"\x01\x01"))],
),
),
),
(
TUYA_ACTIVE_STATUS_RPT,
"handle_active_status_report",
(
TuyaCommand(
status=0,
tsn=2,
datapoints=[TuyaDatapointData(2, TuyaData(1, 0, b"\x01\x01"))],
),
),
),
(TUYA_SET_TIME, "handle_set_time_request", (0x1234,)),
),
)
@mock.patch("zhaquirks.tuya.TuyaNewManufCluster.send_default_rsp")
def test_tuya_cluster_request(
default_rsp_mock, cmd_id, handler_name, args, TuyaCluster
):
"""Test cluster specific request."""
hdr = zcl_f.ZCLHeader.general(1, cmd_id, direction=zcl_f.Direction.Client_to_Server)
hdr.frame_control.disable_default_response = False
with mock.patch.object(TuyaCluster, handler_name) as handler:
handler.return_value = mock.sentinel.status
TuyaCluster.handle_cluster_request(hdr, args)
assert handler.call_count == 1
assert default_rsp_mock.call_count == 1
assert default_rsp_mock.call_args[1]["status"] is mock.sentinel.status
@mock.patch("zhaquirks.tuya.TuyaNewManufCluster.send_default_rsp")
def test_tuya_cluster_request_unk_command(default_rsp_mock, TuyaCluster):
"""Test cluster specific request handler -- no handler."""
hdr = zcl_f.ZCLHeader.general(1, 0xFE, direction=zcl_f.Direction.Client_to_Server)
hdr.frame_control.disable_default_response = False
TuyaCluster.handle_cluster_request(hdr, (mock.sentinel.args,))
assert default_rsp_mock.call_count == 1
assert default_rsp_mock.call_args[1]["status"] == zcl_f.Status.UNSUP_CLUSTER_COMMAND
@mock.patch("zhaquirks.tuya.TuyaNewManufCluster.send_default_rsp")
def test_tuya_cluster_request_no_handler(default_rsp_mock, TuyaCluster):
"""Test cluster specific request handler -- no handler."""
hdr = zcl_f.ZCLHeader.general(1, 0xFE, direction=zcl_f.Direction.Client_to_Server)
hdr.frame_control.disable_default_response = False
new_client_commands = TuyaCluster.client_commands.copy()
new_client_commands[0xFE] = zcl_f.ZCLCommandDef(
"no_such_handler", {}, is_manufacturer_specific=True
)
with mock.patch.object(TuyaCluster, "client_commands", new_client_commands):
TuyaCluster.handle_cluster_request(hdr, (mock.sentinel.args,))
assert default_rsp_mock.call_count == 1
assert default_rsp_mock.call_args[1]["status"] == zcl_f.Status.UNSUP_CLUSTER_COMMAND |
test big dict | from test import test_support
import java
import unittest
from collections import defaultdict
class DictInitTest(unittest.TestCase):
def testInternalSetitemInInit(self):
"""Test for http://jython.org/bugs/1816134
CPython's dict uses an internal setitem method to initialize itself
rather than the one on its subclasses, and this tests that Jython does
as well.
"""
class Subdict(dict):
def __init__(self):
super(Subdict, self).__init__([('a',1)])
self.createdInInit = 1
def __setitem__(self, key, value):
super(Subdict, self).__setitem__(key, value)
assert hasattr(self, 'createdInInit')
self.createdInInit = value
s = Subdict()
s[7] = 'called'
self.assertEquals('called', s.createdInInit)
def testUnhashableKeys(self):
try:
a = {[1]:2}
except TypeError:
pass
else:
self.fail("list as dict key should raise TypeError")
try:
a = {{1:2}:3}
except TypeError:
pass
else:
self.fail("dict as dict key should raise TypeError")
class DictCmpTest(unittest.TestCase):
"Test for http://bugs.jython.org/issue1031"
def testDictCmp(self):
# 'Implicit' comparision of dicts against other types instances
# shouldn't raise exception:
self.assertNotEqual({}, '')
# The same, but explicitly calling __cmp__ should raise TypeError:
self.assertRaises(TypeError, {}.__cmp__, '')
def testDictDerivedCmp(self):
# With derived classes that doesn't override __cmp__, the behaviour
# should be the same that with dicts:
class derived_dict(dict): pass
self.assertEqual(derived_dict(), {})
self.assertNotEqual(derived_dict(), '')
self.assertRaises(TypeError, derived_dict().__cmp__, '')
# But, if they *override* __cmp__ and raise TypeError from there, we
# have exception raised when checking for equality...
class non_comparable_dict(dict):
def __cmp__(self, other):
raise TypeError, "I always raise TypeError"
self.assertRaises(TypeError, lambda: non_comparable_dict() == '')
self.assertRaises(TypeError, non_comparable_dict().__cmp__, '')
# ...unless you compare it with other dicts:
# self.assertEqual(non_comparable_dict(), {})
# The same happens even if the overridden __cmp__ doesn't nothing apart
# from calling super:
class dummy_dict_with_cmp(dict):
def __cmp__(self, other):
return super(dummy_dict_with_cmp, self).__cmp__(other)
self.assertEqual(dummy_dict_with_cmp(), {})
# But TypeError is raised when comparing against other types
self.assertRaises(TypeError, lambda: dummy_dict_with_cmp() == '')
self.assertRaises(TypeError, dummy_dict_with_cmp().__cmp__, '')
# Finally, the Python implementation shouldn't be tricked by not
# implementing __cmp__ on the actual type of the dict-derived instance,
# but implementing it on a superclass.
class derived_dict_with_custom_cmp(dict):
def __cmp__(self, other):
return 0
class yet_another_dict(derived_dict_with_custom_cmp): pass
self.assertEqual(derived_dict_with_custom_cmp(), '')
self.assertEqual(yet_another_dict(), '')
class DerivedDictTest(unittest.TestCase):
"Tests for derived dict behaviour"
def test_raising_custom_key_error(self):
class CustomKeyError(KeyError):
pass
class DerivedDict(dict):
def __getitem__(self, key):
raise CustomKeyError("custom message")
self.assertRaises(CustomKeyError, lambda: DerivedDict()['foo'])
def test_issue1676(self):
#See http://bugs.jython.org/issue1676
x=defaultdict()
#This formerly caused an NPE.
self.assertEqual(None, x.pop(None,None))
def METHOD_NAME(self):
"""Verify that fairly large collection literals of primitives can be constructed."""
# use \n to separate to avoid parser problems
d = eval("{" + ",\n".join(("'key{}': {}".format(x, x) for x in xrange(16000))) +"}")
self.assertEqual(len(d), 16000)
self.assertEqual(sum(d.itervalues()), 127992000)
class JavaIntegrationTest(unittest.TestCase):
"Tests for instantiating dicts from Java maps and hashtables"
def test_hashmap(self):
x = java.util.HashMap()
x.put('a', 1)
x.put('b', 2)
x.put('c', 3)
x.put((1,2), "xyz")
y = dict(x)
self.assertEqual(set(y.items()), set([('a', 1), ('b', 2), ('c', 3), ((1,2), "xyz")]))
def test_hashtable(self):
x = java.util.Hashtable()
x.put('a', 1)
x.put('b', 2)
x.put('c', 3)
x.put((1,2), "xyz")
y = dict(x)
self.assertEqual(set(y.items()), set([('a', 1), ('b', 2), ('c', 3), ((1,2), "xyz")]))
def test_main():
test_support.run_unittest(DictInitTest, DictCmpTest, DerivedDictTest, JavaIntegrationTest)
if __name__ == '__main__':
test_main() |
process request | import hmac
import unittest
import urllib.error
from websockets.exceptions import InvalidStatusCode
from websockets.headers import build_authorization_basic
from websockets.legacy.auth import *
from websockets.legacy.auth import is_credentials
from .test_client_server import ClientServerTestsMixin, with_client, with_server
from .utils import AsyncioTestCase
class AuthTests(unittest.TestCase):
def test_is_credentials(self):
self.assertTrue(is_credentials(("username", "password")))
def test_is_not_credentials(self):
self.assertFalse(is_credentials(None))
self.assertFalse(is_credentials("username"))
class CustomWebSocketServerProtocol(BasicAuthWebSocketServerProtocol):
async def METHOD_NAME(self, path, request_headers):
type(self).used = True
return await super().METHOD_NAME(path, request_headers)
class CheckWebSocketServerProtocol(BasicAuthWebSocketServerProtocol):
async def check_credentials(self, username, password):
return hmac.compare_digest(password, "letmein")
class AuthClientServerTests(ClientServerTestsMixin, AsyncioTestCase):
create_protocol = basic_auth_protocol_factory(
realm="auth-tests", credentials=("hello", "iloveyou")
)
@with_server(create_protocol=create_protocol)
@with_client(user_info=("hello", "iloveyou"))
def test_basic_auth(self):
req_headers = self.client.request_headers
resp_headers = self.client.response_headers
self.assertEqual(req_headers["Authorization"], "Basic aGVsbG86aWxvdmV5b3U=")
self.assertNotIn("WWW-Authenticate", resp_headers)
self.loop.run_until_complete(self.client.send("Hello!"))
self.loop.run_until_complete(self.client.recv())
def test_basic_auth_server_no_credentials(self):
with self.assertRaises(TypeError) as raised:
basic_auth_protocol_factory(realm="auth-tests", credentials=None)
self.assertEqual(
str(raised.exception), "provide either credentials or check_credentials"
)
def test_basic_auth_server_bad_credentials(self):
with self.assertRaises(TypeError) as raised:
basic_auth_protocol_factory(realm="auth-tests", credentials=42)
self.assertEqual(str(raised.exception), "invalid credentials argument: 42")
create_protocol_multiple_credentials = basic_auth_protocol_factory(
realm="auth-tests",
credentials=[("hello", "iloveyou"), ("goodbye", "stillloveu")],
)
@with_server(create_protocol=create_protocol_multiple_credentials)
@with_client(user_info=("hello", "iloveyou"))
def test_basic_auth_server_multiple_credentials(self):
self.loop.run_until_complete(self.client.send("Hello!"))
self.loop.run_until_complete(self.client.recv())
def test_basic_auth_bad_multiple_credentials(self):
with self.assertRaises(TypeError) as raised:
basic_auth_protocol_factory(
realm="auth-tests", credentials=[("hello", "iloveyou"), 42]
)
self.assertEqual(
str(raised.exception),
"invalid credentials argument: [('hello', 'iloveyou'), 42]",
)
async def check_credentials(username, password):
return hmac.compare_digest(password, "iloveyou")
create_protocol_check_credentials = basic_auth_protocol_factory(
realm="auth-tests",
check_credentials=check_credentials,
)
@with_server(create_protocol=create_protocol_check_credentials)
@with_client(user_info=("hello", "iloveyou"))
def test_basic_auth_check_credentials(self):
self.loop.run_until_complete(self.client.send("Hello!"))
self.loop.run_until_complete(self.client.recv())
create_protocol_custom_protocol = basic_auth_protocol_factory(
realm="auth-tests",
credentials=[("hello", "iloveyou")],
create_protocol=CustomWebSocketServerProtocol,
)
@with_server(create_protocol=create_protocol_custom_protocol)
@with_client(user_info=("hello", "iloveyou"))
def test_basic_auth_custom_protocol(self):
self.assertTrue(CustomWebSocketServerProtocol.used)
del CustomWebSocketServerProtocol.used
self.loop.run_until_complete(self.client.send("Hello!"))
self.loop.run_until_complete(self.client.recv())
@with_server(create_protocol=CheckWebSocketServerProtocol)
@with_client(user_info=("hello", "letmein"))
def test_basic_auth_custom_protocol_subclass(self):
self.loop.run_until_complete(self.client.send("Hello!"))
self.loop.run_until_complete(self.client.recv())
# CustomWebSocketServerProtocol doesn't override check_credentials
@with_server(create_protocol=CustomWebSocketServerProtocol)
def test_basic_auth_defaults_to_deny_all(self):
with self.assertRaises(InvalidStatusCode) as raised:
self.start_client(user_info=("hello", "iloveyou"))
self.assertEqual(raised.exception.status_code, 401)
@with_server(create_protocol=create_protocol)
def test_basic_auth_missing_credentials(self):
with self.assertRaises(InvalidStatusCode) as raised:
self.start_client()
self.assertEqual(raised.exception.status_code, 401)
@with_server(create_protocol=create_protocol)
def test_basic_auth_missing_credentials_details(self):
with self.assertRaises(urllib.error.HTTPError) as raised:
self.loop.run_until_complete(self.make_http_request())
self.assertEqual(raised.exception.code, 401)
self.assertEqual(
raised.exception.headers["WWW-Authenticate"],
'Basic realm="auth-tests", charset="UTF-8"',
)
self.assertEqual(raised.exception.read().decode(), "Missing credentials\n")
@with_server(create_protocol=create_protocol)
def test_basic_auth_unsupported_credentials(self):
with self.assertRaises(InvalidStatusCode) as raised:
self.start_client(extra_headers={"Authorization": "Digest ..."})
self.assertEqual(raised.exception.status_code, 401)
@with_server(create_protocol=create_protocol)
def test_basic_auth_unsupported_credentials_details(self):
with self.assertRaises(urllib.error.HTTPError) as raised:
self.loop.run_until_complete(
self.make_http_request(headers={"Authorization": "Digest ..."})
)
self.assertEqual(raised.exception.code, 401)
self.assertEqual(
raised.exception.headers["WWW-Authenticate"],
'Basic realm="auth-tests", charset="UTF-8"',
)
self.assertEqual(raised.exception.read().decode(), "Unsupported credentials\n")
@with_server(create_protocol=create_protocol)
def test_basic_auth_invalid_username(self):
with self.assertRaises(InvalidStatusCode) as raised:
self.start_client(user_info=("goodbye", "iloveyou"))
self.assertEqual(raised.exception.status_code, 401)
@with_server(create_protocol=create_protocol)
def test_basic_auth_invalid_password(self):
with self.assertRaises(InvalidStatusCode) as raised:
self.start_client(user_info=("hello", "ihateyou"))
self.assertEqual(raised.exception.status_code, 401)
@with_server(create_protocol=create_protocol)
def test_basic_auth_invalid_credentials_details(self):
with self.assertRaises(urllib.error.HTTPError) as raised:
authorization = build_authorization_basic("hello", "ihateyou")
self.loop.run_until_complete(
self.make_http_request(headers={"Authorization": authorization})
)
self.assertEqual(raised.exception.code, 401)
self.assertEqual(
raised.exception.headers["WWW-Authenticate"],
'Basic realm="auth-tests", charset="UTF-8"',
)
self.assertEqual(raised.exception.read().decode(), "Invalid credentials\n") |
trigger | '''
File: SCPI.py
Author: Wouter Vlothuizen, TNO/QuTech
Purpose: base class for SCPI ('Standard Commands for Programmable
Instruments') commands
Usage: don't use directly, use a derived class (e.g. QWG)
Notes:
Bugs:
'''
from qcodes import IPInstrument
from qcodes import validators as vals
import socket
"""
FIXME: we would like to be able to choose the base class separately, so the
user can choose (e.g. use VISA for IEE488 bus units, and IpInstrument for
networked units). This would also make the inits cleaner
"""
class SCPI(IPInstrument):
def __init__(self, name, address, port, **kwargs):
super().__init__(name, address, port,
write_confirmation=False, # required for QWG
**kwargs)
# send things immediately
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# beef up buffer, to prevent socket.send() not sending all our data
# in one go
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 512*1024)
# FIXME convert operation etc to parameters
# IDN is implemented in the instrument base class
# example of how the commands could look
self.add_function('reset', call_cmd='*RST')
def _recv(self):
"""
Overwrites base IP recv command to ensuring read till EOM
FIXME: should be in parent class
"""
return self._socket.makefile().readline().rstrip()
###
# Helpers
###
def readBinary(self, size):
data = self._socket.recv(size)
actLen = len(data)
expLen = size
i = 1
while (actLen != expLen):
data += self._socket.recv(expLen-actLen)
actLen = len(data)
i = i+1
return data
def writeBinary(self, binMsg):
self._socket.send(binMsg) # FIXME: should be in parent class
def ask_float(self, str):
return float(self.ask(str))
def ask_int(self, str):
return int(self.ask(str))
###
# Generic SCPI commands from IEEE 488.2 (IEC 625-2) standard
###
def clearStatus(self):
self.write('*CLS')
def setEventStatusEnable(self, value):
self.write('*ESE %d' % value)
def getEventStatusEnable(self):
return self.ask('*ESE?')
def getEventStatusEnableRegister(self):
return self.ask('*ESR?')
def getIdentity(self):
return self.ask('*IDN?')
def operationComplete(self):
self.write('*OPC')
def getOperationComplete(self):
return self.ask('*OPC?')
def get_operation_complete(self): # FIXME: PR #638, all naming should be changed to snake_case
return self.ask('*OPC?')
def getOptions(self):
return self.ask('*OPT?')
def serviceRequestEnable(self, value):
self.write('*SRE %d' % value)
def getServiceRequestEnable(self):
return self.ask_int('*SRE?')
def getStatusByte(self):
return self.ask_int('*STB?')
def getTestResult(self):
# NB: result bits are device dependent
return self.ask_int('*TST?')
def METHOD_NAME(self):
self.write('*TRG')
def wait(self):
self.write('*WAI')
def reset(self):
self.write('*RST')
###
# Required SCPI commands (SCPI std V1999.0 4.2.1)
###
def getError(self):
''' Returns: '0,"No error"' or <error message>
'''
return self.ask('system:err?')
def getSystemErrorCount(self):
return self.ask_int('system:error:count?')
def getSystemVersion(self):
return self.ask('system:version?')
###
# IEEE 488.2 binblock handling
###
def binBlockWrite(self, binBlock, header):
'''
write IEEE488.2 binblock
Args:
binBlock (bytearray): binary data to send
header (string): command string to use
'''
totHdr = header + SCPI.buildHeaderString(len(binBlock))
binMsg = totHdr.encode() + binBlock
self.writeBinary(binMsg)
self.write('') # add a Line Terminator
def binBlockRead(self):
# FIXME: untested
''' read IEEE488.2 binblock
'''
# get and decode header
headerA = self.readBinary(2) # read '#N'
headerAstr = headerA.decode()
if(headerAstr[0] != '#'):
s = 'SCPI header error: received {}'.format(headerA)
raise RuntimeError(s)
digitCnt = int(headerAstr[1])
headerB = self.readBinary(digitCnt)
byteCnt = int(headerB.decode())
binBlock = self.readBinary(byteCnt)
self.readBinary(2) # consume <CR><LF>
return binBlock
@staticmethod
def buildHeaderString(byteCnt):
''' generate IEEE488.2 binblock header
'''
byteCntStr = str(byteCnt)
digitCntStr = str(len(byteCntStr))
binHeaderStr = '#' + digitCntStr + byteCntStr
return binHeaderStr |
remove vlan | # Copyright (c) 2021 Microsoft Open Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT
# LIMITATION ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS
# FOR A PARTICULAR PURPOSE, MERCHANTABILITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
#
# Microsoft would like to thank the following companies for their review and
# assistance with these files: Intel Corporation, Mellanox Technologies Ltd,
# Dell Products, L.P., Facebook, Inc., Marvell International Ltd.
#
#
from sai_thrift.sai_adapter import *
from sai_utils import * # pylint: disable=wildcard-import; lgtm[py/polluting-import]
from typing import TYPE_CHECKING
from data_module.vlan import Vlan
if TYPE_CHECKING:
from sai_test_base import T0TestBase
def t0_vlan_config_helper(test_obj: 'T0TestBase', is_reset_default_vlan=False, is_create_vlan=True):
"""
Make t0 Vlan configurations base on the configuration in the test plan.
Set the configuration in test directly.
Set the following test_obj attributes:
int: default_vlan_id
dict: vlans - vid_id: vlan_object
"""
configer = VlanConfiger(test_obj)
vlans = {}
# Todo add port to vlan member map and vise versa
# Todo maintain the two map (port <-> vlan member) when add or remove
default_vlan_id = configer.get_default_vlan()
if is_reset_default_vlan:
members = configer.get_vlan_member(default_vlan_id)
configer.remove_vlan_members(members)
if is_create_vlan:
vlan = configer.create_vlan(10, [1, 2, 3, 4, 5, 6, 7, 8])
vlans.update({vlan.vlan_id: vlan})
vlan = configer.create_vlan(20, [9, 10, 11, 12, 13, 14, 15, 16])
vlans.update({vlan.vlan_id: vlan})
# todo check and get vlan when skip create vlan
if not hasattr(test_obj, 'vlans'):
test_obj.dut.vlans = {}
for key in vlans:
test_obj.dut.vlans.update({key: vlans[key]})
test_obj.dut.default_vlan_id = default_vlan_id
def remove_default_vlan(test_obj: 'T0TestBase'):
"""
Remove default Vlan
test_obj: test object from a test class
"""
configer = VlanConfiger(test_obj)
default_vlan_id = configer.get_default_vlan()
members = configer.get_vlan_member(default_vlan_id)
configer.remove_vlan_members(members)
def t0_vlan_tear_down_helper(test_obj: 'T0TestBase'):
'''
Args:
test_obj: test object
remove vlan
'''
configer = VlanConfiger(test_obj)
# remove default vlan
default_vlan_id = configer.get_default_vlan()
members = configer.get_vlan_member(default_vlan_id)
configer.remove_vlan_members(members)
# configer.remove_vlan(default_vlan_id)
for _, vlan in test_obj.dut.vlans.items():
members = configer.get_vlan_member(vlan.oid)
configer.remove_vlan_members(members)
configer.METHOD_NAME(vlan.oid)
test_obj.dut.vlans.clear()
class VlanConfiger(object):
"""
Class use to make all the vlan configurations.
"""
def __init__(self, test_obj: 'T0TestBase') -> None:
"""
Init the Vlan configer.
Args:
test_obj: the test object
"""
self.test_obj = test_obj
self.client = test_obj.client
def create_vlan(self, vlan_id, vlan_port_idxs, vlan_tagging_mode=SAI_VLAN_TAGGING_MODE_UNTAGGED):
"""
Create vlan and its members.
Args:
vlan_id: vlan id
vlan_port_idxs: vlan member ports index
vlan_tagging_mode: SAI_VLAN_MEMBER_ATTR_VLAN_TAGGING_MODE
Returns:
Vlan: vlan object
"""
vlan = Vlan(None, None, [], None, [], [])
print("Create vlan {} and it memmber port at {} ...".format(
vlan_id, vlan_port_idxs))
vlan_oid = sai_thrift_create_vlan(self.client, vlan_id=vlan_id)
members = self.create_vlan_member(
vlan_oid, vlan_port_idxs, vlan_tagging_mode)
vlan.vlan_id = vlan_id
vlan.vlan_mport_oids = members
vlan.oid = vlan_oid
vlan.port_idx_list = vlan_port_idxs
return vlan
def create_vlan_member(self, vlan_oid, vlan_ports, vlan_tagging_mode=SAI_VLAN_TAGGING_MODE_UNTAGGED):
"""
Create vlan members for a vlan.
Args:
vlan_oid: vlan oid
vlan_ports: vlan member ports index
vlan_tagging_mode: SAI_VLAN_MEMBER_ATTR_VLAN_TAGGING_MODE
Returns:
list: vlan members oid
"""
vlan_members = []
attr = sai_thrift_get_vlan_attribute(
self.client, vlan_oid, vlan_id=True)
vlan_id = attr['vlan_id']
for port_index in vlan_ports:
vlan_member = sai_thrift_create_vlan_member(self.client,
vlan_id=vlan_oid,
bridge_port_id=self.test_obj.dut.port_obj_list[port_index].bridge_port_oid,
vlan_tagging_mode=vlan_tagging_mode)
vlan_members.append(vlan_member)
sai_thrift_set_port_attribute(
self.client, self.test_obj.dut.port_obj_list[port_index].oid, port_vlan_id=vlan_id)
return vlan_members
def get_default_vlan(self):
"""
Get defaule vlan.
Returns:
default_vlan_id
"""
print("Get default vlan...")
def_attr = sai_thrift_get_switch_attribute(self.client, default_vlan_id=True)
return def_attr['default_vlan_id']
def get_vlan_member(self, vlan_id):
"""
Get vlan member by vlan_id.
Args:
vlan_id: vlan id
Returns:
list: vlan member oid list
"""
vlan_member_size = self.test_obj.dut.active_ports_no + self.test_obj.dut.system_port_no
vlan_member_list = sai_thrift_object_list_t(count=vlan_member_size)
mbr_list = sai_thrift_get_vlan_attribute(self.client, vlan_id, member_list=vlan_member_list)
self.test_obj.assertEqual(self.test_obj.status(), SAI_STATUS_SUCCESS)
return mbr_list['SAI_VLAN_ATTR_MEMBER_LIST'].idlist
def METHOD_NAME(self, vlan_oid):
"""
Remove vlan and its members.
Args:
vlan_ports: vlan member ports index
Returns:
dict: vlan_list[vlan_id][vlan_members]
"""
print("Remove vlan {} and its members ...".format(vlan_oid))
sai_thrift_remove_vlan(self.client, vlan_oid)
self.test_obj.assertEqual(self.test_obj.status(), SAI_STATUS_SUCCESS)
def remove_vlan_members(self, vlan_members):
"""
Remove vlan members.
Args:
vlan_members: vlan member oids
"""
for vlan_member in vlan_members:
sai_thrift_remove_vlan_member(self.client, vlan_member)
self.test_obj.assertEqual(self.test_obj.status(), SAI_STATUS_SUCCESS) |
raise exc | """
This file contains all jobs that are used in tests. Each of these test
fixtures has a slightly different characteristics.
"""
import contextlib
import os
import signal
import subprocess
import sys
import time
from multiprocessing import Process
from redis import Redis
from rq import Connection, Queue, get_current_connection, get_current_job
from rq.command import send_kill_horse_command, send_shutdown_command
from rq.decorators import job
from rq.job import Job
from rq.worker import HerokuWorker, Worker
def say_pid():
return os.getpid()
def say_hello(name=None):
"""A job with a single argument and a return value."""
if name is None:
name = 'Stranger'
return 'Hi there, %s!' % (name,)
async def say_hello_async(name=None):
"""A async job with a single argument and a return value."""
return say_hello(name)
def say_hello_unicode(name=None):
"""A job with a single argument and a return value."""
return str(say_hello(name)) # noqa
def do_nothing():
"""The best job in the world."""
pass
def METHOD_NAME():
raise Exception('raise_exc error')
def raise_exc_mock():
return METHOD_NAME
def div_by_zero(x):
"""Prepare for a division-by-zero exception."""
return x / 0
def long_process():
time.sleep(60)
return
def some_calculation(x, y, z=1):
"""Some arbitrary calculation with three numbers. Choose z smartly if you
want a division by zero exception.
"""
return x * y / z
def rpush(key, value, append_worker_name=False, sleep=0):
"""Push a value into a list in Redis. Useful for detecting the order in
which jobs were executed."""
if sleep:
time.sleep(sleep)
if append_worker_name:
value += ':' + get_current_job().worker_name
redis = get_current_connection()
redis.rpush(key, value)
def check_dependencies_are_met():
return get_current_job().dependencies_are_met()
def create_file(path):
"""Creates a file at the given path. Actually, leaves evidence that the
job ran."""
with open(path, 'w') as f:
f.write('Just a sentinel.')
def create_file_after_timeout(path, timeout):
time.sleep(timeout)
create_file(path)
def create_file_after_timeout_and_setsid(path, timeout):
os.setsid()
create_file_after_timeout(path, timeout)
def launch_process_within_worker_and_store_pid(path, timeout):
p = subprocess.Popen(['sleep', str(timeout)])
with open(path, 'w') as f:
f.write('{}'.format(p.pid))
p.wait()
def access_self():
assert get_current_connection() is not None
assert get_current_job() is not None
def modify_self(meta):
j = get_current_job()
j.meta.update(meta)
j.save()
def modify_self_and_error(meta):
j = get_current_job()
j.meta.update(meta)
j.save()
return 1 / 0
def echo(*args, **kwargs):
return args, kwargs
class Number:
def __init__(self, value):
self.value = value
@classmethod
def divide(cls, x, y):
return x * y
def div(self, y):
return self.value / y
class CallableObject:
def __call__(self):
return u"I'm callable"
class UnicodeStringObject:
def __repr__(self):
return u'é'
class ClassWithAStaticMethod:
@staticmethod
def static_method():
return u"I'm a static method"
with Connection():
@job(queue='default')
def decorated_job(x, y):
return x + y
def black_hole(job, *exc_info):
# Don't fall through to default behaviour (moving to failed queue)
return False
def add_meta(job, *exc_info):
job.meta = {'foo': 1}
job.save()
return True
def save_key_ttl(key):
# Stores key ttl in meta
job = get_current_job()
ttl = job.connection.ttl(key)
job.meta = {'ttl': ttl}
job.save_meta()
def long_running_job(timeout=10):
time.sleep(timeout)
return 'Done sleeping...'
def run_dummy_heroku_worker(sandbox, _imminent_shutdown_delay):
"""
Run the work horse for a simplified heroku worker where perform_job just
creates two sentinel files 2 seconds apart.
:param sandbox: directory to create files in
:param _imminent_shutdown_delay: delay to use for HerokuWorker
"""
sys.stderr = open(os.path.join(sandbox, 'stderr.log'), 'w')
class TestHerokuWorker(HerokuWorker):
imminent_shutdown_delay = _imminent_shutdown_delay
def perform_job(self, job, queue):
create_file(os.path.join(sandbox, 'started'))
# have to loop here rather than one sleep to avoid holding the GIL
# and preventing signals being received
for i in range(20):
time.sleep(0.1)
create_file(os.path.join(sandbox, 'finished'))
w = TestHerokuWorker(Queue('dummy'))
w.main_work_horse(None, None)
class DummyQueue:
pass
def kill_worker(pid: int, double_kill: bool, interval: float = 1.5):
# wait for the worker to be started over on the main process
time.sleep(interval)
os.kill(pid, signal.SIGTERM)
if double_kill:
# give the worker time to switch signal handler
time.sleep(interval)
os.kill(pid, signal.SIGTERM)
class Serializer:
def loads(self):
pass
def dumps(self):
pass
def start_worker(queue_name, conn_kwargs, worker_name, burst):
"""
Start a worker. We accept only serializable args, so that this can be
executed via multiprocessing.
"""
# Silence stdout (thanks to <https://stackoverflow.com/a/28321717/14153673>)
with open(os.devnull, 'w') as devnull:
with contextlib.redirect_stdout(devnull):
w = Worker([queue_name], name=worker_name, connection=Redis(**conn_kwargs))
w.work(burst=burst)
def start_worker_process(queue_name, connection=None, worker_name=None, burst=False):
"""
Use multiprocessing to start a new worker in a separate process.
"""
connection = connection or get_current_connection()
conn_kwargs = connection.connection_pool.connection_kwargs
p = Process(target=start_worker, args=(queue_name, conn_kwargs, worker_name, burst))
p.start()
return p
def burst_two_workers(queue, timeout=2, tries=5, pause=0.1):
"""
Get two workers working simultaneously in burst mode, on a given queue.
Return after both workers have finished handling jobs, up to a fixed timeout
on the worker that runs in another process.
"""
w1 = start_worker_process(queue.name, worker_name='w1', burst=True)
w2 = Worker(queue, name='w2')
jobs = queue.jobs
if jobs:
first_job = jobs[0]
# Give the first worker process time to get started on the first job.
# This is helpful in tests where we want to control which worker takes which job.
n = 0
while n < tries and not first_job.is_started:
time.sleep(pause)
n += 1
# Now can start the second worker.
w2.work(burst=True)
w1.join(timeout)
def save_result(job, connection, result):
"""Store job result in a key"""
connection.set('success_callback:%s' % job.id, result, ex=60)
def save_exception(job, connection, type, value, traceback):
"""Store job exception in a key"""
connection.set('failure_callback:%s' % job.id, str(value), ex=60)
def save_result_if_not_stopped(job, connection, result=""):
connection.set('stopped_callback:%s' % job.id, result, ex=60)
def erroneous_callback(job):
"""A callback that's not written properly"""
pass
def _send_shutdown_command(worker_name, connection_kwargs, delay=0.25):
time.sleep(delay)
send_shutdown_command(Redis(**connection_kwargs), worker_name)
def _send_kill_horse_command(worker_name, connection_kwargs, delay=0.25):
"""Waits delay before sending kill-horse command"""
time.sleep(delay)
send_kill_horse_command(Redis(**connection_kwargs), worker_name)
class CustomJob(Job):
"""A custom job class just to test it""" |
prepare request | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._usages_operations import build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class UsagesOperations:
"""UsagesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.machinelearningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.ListUsagesResult"]:
"""Gets the current usage information as well as limits for AML resources for given subscription
and location.
:param location: The location for which resource usage is queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListUsagesResult or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.machinelearningservices.models.ListUsagesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListUsagesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def METHOD_NAME(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
location=location,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
location=location,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListUsagesResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = METHOD_NAME(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.MachineLearningServices/locations/{location}/usages'} # type: ignore |
is empty | from __future__ import annotations
import contextlib
from abc import ABC, abstractmethod
from dataclasses import InitVar, dataclass, field
from datetime import date, datetime
from enum import Enum
from typing import Callable, ContextManager, Dict, Generic, Iterator, List, Optional, TypeVar
from metricflow.dataflow.sql_column import SqlColumn
from metricflow.dataflow.sql_table import SqlTable
from metricflow.inference.context.base import InferenceContext, InferenceContextProvider
from metricflow.protocols.sql_client import SqlClient
T = TypeVar("T", str, int, float, date, datetime)
class InferenceColumnType(str, Enum):
"""Represents a column type that can be used for inference.
This does not provide a 1 to 1 mapping between SQL types and enum values. For example,
all possible floating point types (FLOAT, DOUBLE etc) are mapped to the same FLOAT
value. Same for datetimes and others.
"""
STRING = "string"
BOOLEAN = "boolean"
INTEGER = "integer"
FLOAT = "float"
DATETIME = "datetime"
UNKNOWN = "unknown"
@dataclass(frozen=True)
class ColumnProperties(Generic[T]):
"""Holds properties about a column that were extracted from the data warehouse."""
column: SqlColumn
type: InferenceColumnType
row_count: int
distinct_row_count: int
is_nullable: bool
null_count: int
min_value: Optional[T]
max_value: Optional[T]
@property
def METHOD_NAME(self) -> bool:
"""Whether the column has any rows."""
return self.row_count == 0
@dataclass(frozen=True)
class TableProperties:
"""Holds properties of a table and its columns that were extracted from the data warehouse."""
column_props: InitVar[List[ColumnProperties]]
table: SqlTable
columns: Dict[SqlColumn, ColumnProperties] = field(default_factory=lambda: {}, init=False)
def __post_init__(self, column_props: List[ColumnProperties]) -> None: # noqa: D
for col in column_props:
self.columns[col.column] = col
@dataclass(frozen=True)
class DataWarehouseInferenceContext(InferenceContext):
"""The inference context for a data warehouse. Holds statistics and metadata about each table and column."""
table_props: InitVar[List[TableProperties]]
tables: Dict[SqlTable, TableProperties] = field(default_factory=lambda: {}, init=False)
columns: Dict[SqlColumn, ColumnProperties] = field(default_factory=lambda: {}, init=False)
def __post_init__(self, table_props: List[TableProperties]) -> None: # noqa: D
for stats in table_props:
self.tables[stats.table] = stats
for column in stats.columns.values():
self.columns[column.column] = column
@contextlib.contextmanager
def _default_table_progress(table: SqlTable, index: int, total: int) -> Iterator[None]:
yield
class DataWarehouseInferenceContextProvider(InferenceContextProvider[DataWarehouseInferenceContext], ABC):
"""Provides inference context from a data warehouse by querying data from its tables."""
def __init__(self, client: SqlClient, tables: List[SqlTable], max_sample_size: int = 10000) -> None:
"""Initialize the class.
client: the underlying SQL engine client that will be used for querying table data.
tables: an exhaustive list of all tables that should be queried.
max_sample_size: max number of rows to sample from each table
"""
self._client = client
self.tables = tables
self.max_sample_size = max_sample_size
@abstractmethod
def _get_table_properties(self, table: SqlTable) -> TableProperties:
"""Fetch properties about a single table by querying the warehouse."""
raise NotImplementedError
def get_context(
self,
table_progress: Callable[[SqlTable, int, int], ContextManager[None]] = _default_table_progress,
) -> DataWarehouseInferenceContext:
"""Query the data warehouse for statistics about all tables and populate a context with it."""
table_props_list: List[TableProperties] = []
for i, table in enumerate(self.tables):
with table_progress(table, i, len(self.tables)):
table_props = self._get_table_properties(table)
table_props_list.append(table_props)
return DataWarehouseInferenceContext(table_props=table_props_list) |
test move l r table prefix to | import sqlglot
from splink.athena.athena_helpers.athena_transforms import cast_concat_as_varchar
from splink.input_column import InputColumn
from splink.spark.spark_helpers.custom_spark_dialect import Dialect # noqa 401
from splink.sql_transform import (
move_l_r_table_prefix_to_column_suffix,
sqlglot_transform_sql,
)
def move_l_r_test(br, expected):
res = move_l_r_table_prefix_to_column_suffix(br)
assert res.lower() == expected.lower()
def METHOD_NAME():
br = "l.first_name = r.first_name"
expected = "first_name_l = first_name_r"
move_l_r_test(br, expected)
br = "substr(l.last_name, 1, 2) = substr(r.last_name, 1, 2)"
expected = "substr(last_name_l, 1, 2) = substr(last_name_r, 1, 2)"
move_l_r_test(br, expected)
br = "l.name['first'] = r.name['first'] and levenshtein(l.dob, r.dob) < 2"
expected = "name_l['first'] = name_r['first'] and levenshtein(dob_l, dob_r) < 2"
move_l_r_test(br, expected)
br = "concat_ws(', ', l.name, r.name)"
expected = "concat_ws(', ', name_l, name_r)"
move_l_r_test(br, expected)
br = "my_custom_function(l.name, r.name)"
expected = "my_custom_function(name_l, name_r)"
move_l_r_test(br, expected)
br = "len(list_filter(l.name_list, x -> list_contains(r.name_list, x))) >= 1"
expected = "len(list_filter(name_list_l, x -> list_contains(name_list_r, x))) >= 1"
move_l_r_test(br, expected)
br = "len(list_filter(l.name_list, x -> list_contains(r.name_list, x))) >= 1"
res = move_l_r_table_prefix_to_column_suffix(br)
expected = "len(list_filter(name_list_l, x -> list_contains(name_list_r, x))) >= 1"
assert res.lower() == expected.lower()
def test_cast_concat_as_varchar():
output = """
select cast(l.source_dataset as varchar) || '-__-' ||
cast(l.unique_id as varchar) as concat_id
"""
output = sqlglot.parse_one(output).sql()
sql = "select l.source_dataset || '-__-' || l.unique_id as concat_id"
transformed_sql = sql = sqlglot_transform_sql(sql, cast_concat_as_varchar)
assert transformed_sql == output
sql = """
select cast(l.source_dataset as varchar) || '-__-' ||
l.unique_id as concat_id
"""
transformed_sql = sql = sqlglot_transform_sql(sql, cast_concat_as_varchar)
assert transformed_sql == output
sql = """
select cast(l.source_dataset as varchar) || '-__-' ||
cast(l.unique_id as varchar) as concat_id
"""
transformed_sql = sql = sqlglot_transform_sql(sql, cast_concat_as_varchar)
assert transformed_sql == output
sql = "select source_dataset || '-__-' || unique_id as concat_id"
transformed_sql = sql = sqlglot_transform_sql(sql, cast_concat_as_varchar)
assert transformed_sql == output.replace("l.", "")
def test_set_numeric_as_double():
sql = "select cast('a' as float8), cast(0.12345 as float8)"
transformed_sql = sqlglot.transpile(sql, write="customspark")[0]
assert transformed_sql == "SELECT aD, 0.12345D"
sql = "select cast('a' as string), cast(0.12345 as float8)"
transformed_sql = sqlglot.transpile(sql, write="customspark")[0]
assert transformed_sql == "SELECT CAST('a' AS STRING), 0.12345D"
def test_add_pref_and_suffix():
dull = InputColumn("dull")
dull_l_r = ['"l"."dull" as "dull_l"', '"r"."dull" as "dull_r"']
assert dull.l_r_names_as_l_r() == dull_l_r
assert dull.bf_name() == '"bf_dull"'
assert dull.tf_name_l() == '"tf_dull_l"'
tf_dull_l_r = ['"l"."tf_dull" as "tf_dull_l"', '"r"."tf_dull" as "tf_dull_r"']
assert dull.l_r_tf_names_as_l_r() == tf_dull_l_r
ll = InputColumn("lat['long']")
assert ll.name_l() == "\"lat_l\"['long']"
ll_tf_l_r = [
'"l"."tf_lat"[\'long\'] as "tf_lat_l"[\'long\']',
'"r"."tf_lat"[\'long\'] as "tf_lat_r"[\'long\']',
]
assert ll.l_r_tf_names_as_l_r() == ll_tf_l_r
group = InputColumn("group")
assert group.name_l() == '"group_l"'
assert group.bf_name() == '"bf_group"'
group_l_r_names = ['"l"."group" as "group_l"', '"r"."group" as "group_r"']
assert group.l_r_names_as_l_r() == group_l_r_names
group_tf_l_r = ['"l"."tf_group" as "tf_group_l"', '"r"."tf_group" as "tf_group_r"']
assert group.l_r_tf_names_as_l_r() == group_tf_l_r
cols = ["unique_id", "SUR name", "group"]
out_cols = ['"unique_id"', '"SUR name"', '"group"']
cols_class = [InputColumn(c) for c in cols]
assert [c.name() for c in cols_class] == out_cols |
parse issue | import re
from collections import OrderedDict
from redash.query_runner import TYPE_STRING, BaseHTTPQueryRunner, register
from redash.utils import json_dumps, json_loads
# TODO: make this more general and move into __init__.py
class ResultSet(object):
def __init__(self):
self.columns = OrderedDict()
self.rows = []
def add_row(self, row):
for key in row.keys():
self.add_column(key)
self.rows.append(row)
def add_column(self, column, column_type=TYPE_STRING):
if column not in self.columns:
self.columns[column] = {
"name": column,
"type": column_type,
"friendly_name": column,
}
def to_json(self):
return json_dumps({"rows": self.rows, "columns": list(self.columns.values())})
def merge(self, set):
self.rows = self.rows + set.rows
def METHOD_NAME(issue, field_mapping): # noqa: C901
result = OrderedDict()
result["key"] = issue["key"]
for k, v in issue["fields"].items(): #
output_name = field_mapping.get_output_field_name(k)
member_names = field_mapping.get_dict_members(k)
if isinstance(v, dict):
if len(member_names) > 0:
# if field mapping with dict member mappings defined get value of each member
for member_name in member_names:
if member_name in v:
result[field_mapping.get_dict_output_field_name(k, member_name)] = v[member_name]
else:
# these special mapping rules are kept for backwards compatibility
if "key" in v:
result["{}_key".format(output_name)] = v["key"]
if "name" in v:
result["{}_name".format(output_name)] = v["name"]
if k in v:
result[output_name] = v[k]
if "watchCount" in v:
result[output_name] = v["watchCount"]
elif isinstance(v, list):
if len(member_names) > 0:
# if field mapping with dict member mappings defined get value of each member
for member_name in member_names:
listValues = []
for listItem in v:
if isinstance(listItem, dict):
if member_name in listItem:
listValues.append(listItem[member_name])
if len(listValues) > 0:
result[field_mapping.get_dict_output_field_name(k, member_name)] = ",".join(listValues)
else:
# otherwise support list values only for non-dict items
listValues = []
for listItem in v:
if not isinstance(listItem, dict):
listValues.append(listItem)
if len(listValues) > 0:
result[output_name] = ",".join(listValues)
else:
result[output_name] = v
return result
def parse_issues(data, field_mapping):
results = ResultSet()
for issue in data["issues"]:
results.add_row(METHOD_NAME(issue, field_mapping))
return results
def parse_count(data):
results = ResultSet()
results.add_row({"count": data["total"]})
return results
class FieldMapping:
def __init__(cls, query_field_mapping):
cls.mapping = []
for k, v in query_field_mapping.items():
field_name = k
member_name = None
# check for member name contained in field name
member_parser = re.search(r"(\w+)\.(\w+)", k)
if member_parser:
field_name = member_parser.group(1)
member_name = member_parser.group(2)
cls.mapping.append(
{
"field_name": field_name,
"member_name": member_name,
"output_field_name": v,
}
)
def get_output_field_name(cls, field_name):
for item in cls.mapping:
if item["field_name"] == field_name and not item["member_name"]:
return item["output_field_name"]
return field_name
def get_dict_members(cls, field_name):
member_names = []
for item in cls.mapping:
if item["field_name"] == field_name and item["member_name"]:
member_names.append(item["member_name"])
return member_names
def get_dict_output_field_name(cls, field_name, member_name):
for item in cls.mapping:
if item["field_name"] == field_name and item["member_name"] == member_name:
return item["output_field_name"]
return None
class JiraJQL(BaseHTTPQueryRunner):
noop_query = '{"queryType": "count"}'
response_error = "JIRA returned unexpected status code"
requires_authentication = True
url_title = "JIRA URL"
username_title = "Username"
password_title = "API Token"
@classmethod
def name(cls):
return "JIRA (JQL)"
def __init__(self, configuration):
super(JiraJQL, self).__init__(configuration)
self.syntax = "json"
def run_query(self, query, user):
jql_url = "{}/rest/api/2/search".format(self.configuration["url"])
query = json_loads(query)
query_type = query.pop("queryType", "select")
field_mapping = FieldMapping(query.pop("fieldMapping", {}))
if query_type == "count":
query["maxResults"] = 1
query["fields"] = ""
else:
query["maxResults"] = query.get("maxResults", 1000)
response, error = self.get_response(jql_url, params=query)
if error is not None:
return None, error
data = response.json()
if query_type == "count":
results = parse_count(data)
else:
results = parse_issues(data, field_mapping)
index = data["startAt"] + data["maxResults"]
while data["total"] > index:
query["startAt"] = index
response, error = self.get_response(jql_url, params=query)
if error is not None:
return None, error
data = response.json()
index = data["startAt"] + data["maxResults"]
addl_results = parse_issues(data, field_mapping)
results.merge(addl_results)
return results.to_json(), None
register(JiraJQL) |
parse arp monitor | #
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
The vyos lag_interfaces fact class
It is in this file the configuration is collected from the device
for a given resource, parsed, and the facts tree is populated
based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from re import findall, search, M
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
utils,
)
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.lag_interfaces.lag_interfaces import (
Lag_interfacesArgs,
)
class Lag_interfacesFacts(object):
""" The vyos lag_interfaces fact class
"""
def __init__(self, module, subspec="config", options="options"):
self._module = module
self.argument_spec = Lag_interfacesArgs.argument_spec
spec = deepcopy(self.argument_spec)
if subspec:
if options:
facts_argument_spec = spec[subspec][options]
else:
facts_argument_spec = spec[subspec]
else:
facts_argument_spec = spec
self.generated_spec = utils.generate_dict(facts_argument_spec)
def populate_facts(self, connection, ansible_facts, data=None):
""" Populate the facts for lag_interfaces
:param module: the module instance
:param connection: the device connection
:param data: previously collected conf
:rtype: dictionary
:returns: facts
"""
if not data:
data = connection.get_config()
objs = []
lag_names = findall(r"^set interfaces bonding (\S+)", data, M)
if lag_names:
for lag in set(lag_names):
lag_regex = r" %s .+$" % lag
cfg = findall(lag_regex, data, M)
obj = self.render_config(cfg)
output = connection.run_commands(
["show interfaces bonding " + lag + " slaves"]
)
lines = output[0].splitlines()
members = []
member = {}
if len(lines) > 1:
for line in lines[2:]:
splitted_line = line.split()
if len(splitted_line) > 1:
member["member"] = splitted_line[0]
members.append(member)
else:
members = []
member = {}
obj["name"] = lag.strip("'")
if members:
obj["members"] = members
if obj:
objs.append(obj)
facts = {}
if objs:
facts["lag_interfaces"] = []
params = utils.validate_config(
self.argument_spec, {"config": objs}
)
for cfg in params["config"]:
facts["lag_interfaces"].append(utils.remove_empties(cfg))
ansible_facts["ansible_network_resources"].update(facts)
return ansible_facts
def render_config(self, conf):
"""
Render config as dictionary structure and delete keys
from spec for null values
:param spec: The facts tree, generated from the argspec
:param conf: The configuration
:rtype: dictionary
:returns: The generated config
"""
arp_monitor_conf = "\n".join(
filter(lambda x: ("arp-monitor" in x), conf)
)
hash_policy_conf = "\n".join(
filter(lambda x: ("hash-policy" in x), conf)
)
lag_conf = "\n".join(filter(lambda x: ("bond" in x), conf))
config = self.parse_attribs(["mode", "primary"], lag_conf)
config["arp_monitor"] = self.METHOD_NAME(arp_monitor_conf)
config["hash_policy"] = self.parse_hash_policy(hash_policy_conf)
return utils.remove_empties(config)
def parse_attribs(self, attribs, conf):
config = {}
for item in attribs:
value = utils.parse_conf_arg(conf, item)
if value:
config[item] = value.strip("'")
else:
config[item] = None
return utils.remove_empties(config)
def METHOD_NAME(self, conf):
arp_monitor = None
if conf:
arp_monitor = {}
target_list = []
interval = search(r"^.*arp-monitor interval (.+)", conf, M)
targets = findall(r"^.*arp-monitor target '(.+)'", conf, M)
if targets:
for target in targets:
target_list.append(target)
arp_monitor["target"] = target_list
if interval:
value = interval.group(1).strip("'")
arp_monitor["interval"] = int(value)
return arp_monitor
def parse_hash_policy(self, conf):
hash_policy = None
if conf:
hash_policy = search(r"^.*hash-policy (.+)", conf, M)
hash_policy = hash_policy.group(1).strip("'")
return hash_policy |
distance in miles | import abc
import json
import math
import random
from typing import NamedTuple
import requests
from django.conf import settings
from django.contrib.gis.geos import Point
class Directions(NamedTuple):
time: float # travel time in seconds
distance: float # distance in metres
mode: str # mode of transport, either "walk" or "drive"
route: str # an encoded polyline (https://developers.google.com/maps/documentation/utilities/polylinealgorithm)
precision: int # passed to Leaflet
source: str # the directions provider, e.g. "Google" or "MapBox"
start: Point # start point
end: Point # end point
@property
def METHOD_NAME(self):
return self.distance / 1609.34
@property
def time_in_minutes(self):
return math.ceil(self.time / 60)
# These two URLs have WGS84 coordinates to four decimal places, using
# https://xkcd.com/2170/ as a guide.
@property
def google_maps_url(self):
return f"https://www.google.com/maps/dir/{self.start.y:.4f},{self.start.x:.4f}/{self.end.y:.4f},{self.end.x:.4f}"
@property
def cyclestreets_url(self):
return f"https://www.cyclestreets.net/journey/{self.start.y:.4f},{self.start.x:.4f}/{self.end.y:.4f},{self.end.x:.4f}/"
def get_google_directions_token():
keys = settings.GOOGLE_API_KEYS
if len(keys) == 0:
return ""
return random.choice(keys)
def get_distance(start, end):
# convert the points to British National Grid first
# so that .distance() will give us a distance in meters
start_bng = start.transform(27700, clone=True)
end_bng = end.transform(27700, clone=True)
return start_bng.distance(end_bng) / 1000
class DirectionsException(Exception):
pass
class DirectionsClient(metaclass=abc.ABCMeta):
@abc.abstractmethod
def get_route(self, start, end):
pass
class GoogleDirectionsClient(DirectionsClient):
precision = 5
def get_base_url(self):
return "{base}&key={key}".format(
base=settings.BASE_GOOGLE_URL, key=get_google_directions_token()
)
def get_data(self, url):
resp = requests.get(url)
if resp.status_code != 200:
raise DirectionsException(
"Google Directions API error: HTTP status code %i" % resp.status_code
)
return resp.json()
def get_route(self, start, end):
distance_km = get_distance(start, end)
if distance_km > 1.5:
transport_verb = {"base": "drive", "gerund": "driving"}
else:
transport_verb = {"base": "walk", "gerund": "walking"}
url = "{base_url}&mode={mode}&origin={origin}&destination={destination}".format(
base_url=self.get_base_url(),
mode=transport_verb["gerund"],
origin="{0},{1}".format(start.y, start.x),
destination="{0},{1}".format(end.y, end.x),
)
directions = self.get_data(url)
if directions["status"] != "OK":
raise DirectionsException(
"Google Directions API error: {}".format(directions["status"])
)
route = directions["routes"][0]["overview_polyline"]["points"]
return Directions(
directions["routes"][0]["legs"][0]["duration"]["value"],
directions["routes"][0]["legs"][0]["distance"]["value"],
transport_verb["base"],
json.dumps(route),
self.precision,
"Google",
start,
end,
)
class MapboxDirectionsClient(DirectionsClient):
precision = 5
def get_data(self, url):
resp = requests.get(url)
if resp.status_code != 200:
raise DirectionsException(
"Mapbox Directions API error: HTTP status code %i" % resp.status_code
)
return resp.json()
def get_route(self, start, end):
distance_km = get_distance(start, end)
if distance_km > 1.5:
transport_verb = {"base": "drive", "gerund": "driving-traffic"}
else:
transport_verb = {"base": "walk", "gerund": "walking"}
url = "{base_url}/{profile}/{coordinates}?&alternatives=false&geometries=polyline&steps=false&access_token={key}".format(
base_url=settings.BASE_MAPBOX_URL,
profile=transport_verb["gerund"],
coordinates="{start_lon},{start_lat};{end_lon},{end_lat}".format(
start_lon=start.x, start_lat=start.y, end_lon=end.x, end_lat=end.y
),
key=settings.MAPBOX_API_KEY,
)
directions = self.get_data(url)
if directions["code"] != "Ok":
raise DirectionsException(
"Mapbox Directions API error: {}".format(directions["code"])
)
return Directions(
directions["routes"][0]["duration"],
directions["routes"][0]["distance"],
transport_verb["base"],
json.dumps(directions["routes"][0]["geometry"]),
self.precision,
"Mapbox",
start,
end,
)
class DirectionsHelper:
def get_directions(self, **kwargs):
if kwargs["start_location"] and kwargs["end_location"]:
clients = (
MapboxDirectionsClient(),
GoogleDirectionsClient(),
)
for client in clients:
try:
return client.get_route(
kwargs["start_location"], kwargs["end_location"]
)
except DirectionsException:
pass
return None
return None |
test python ctor repair | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2014 Glencoe Software, Inc. All Rights Reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
Tests of the omero.client constructors
"""
from __future__ import print_function
from future.utils import native_str
import os
from omero.testlib import ITest
import omero
import Ice
here = os.path.abspath(os.path.dirname(__file__))
class TestClientConstructors(ITest):
def setup_method(self, method):
c = omero.client(pmap=['--Ice.Config='+(os.environ.get("ICE_CONFIG"))])
try:
self.host = c.ic.getProperties().getProperty('omero.host')
self.port = int(c.ic.getProperties().getProperty('omero.port'))
self.rootpasswd = c.ic.getProperties().getProperty(
'omero.rootpass')
finally:
c.__del__()
def testHostConstructor(self):
c = omero.client(host=self.host, port=self.port)
try:
c.createSession("root", self.rootpasswd)
c.closeSession()
c.createSession("root", self.rootpasswd)
except Exception:
c.__del__()
def testEmptyInitializationDataConstructor(self):
id = Ice.InitializationData()
# With no argument id.properties is empty
id.properties = Ice.createProperties()
id.properties.setProperty("omero.host", self.host)
id.properties.setProperty("omero.port", native_str(self.port))
id.properties.setProperty("omero.user", "root")
id.properties.setProperty("omero.pass", self.rootpasswd)
c = omero.client(id=id)
try:
c.createSession()
c.closeSession()
c.createSession()
c.closeSession()
finally:
c.__del__()
def testInitializationDataConstructor(self):
id = Ice.InitializationData()
id.properties = Ice.createProperties([])
id.properties.setProperty("omero.user", "root")
id.properties.setProperty("omero.pass", self.rootpasswd)
c = omero.client(id=id)
try:
c.createSession()
c.closeSession()
c.createSession()
c.closeSession()
finally:
c.__del__()
def testMainArgsConstructor(self):
args = ["--omero.host="+self.host,
"--omero.user=root", "--omero.pass=" + self.rootpasswd]
c = omero.client(args)
try:
c.createSession()
c.closeSession()
c.createSession()
c.closeSession()
finally:
c.__del__()
def testMapConstructor(self):
p = {}
p["omero.host"] = self.host
p["omero.user"] = "root"
p["omero.pass"] = self.rootpasswd
c = omero.client(pmap=p)
try:
c.createSession()
c.closeSession()
c.createSession()
c.closeSession()
finally:
c.__del__()
def testMainArgsGetsIcePrefix(self):
args = ["--omero.host="+self.host,
"--omero.user=root", "--omero.pass=" + self.rootpasswd]
args.append("--Ice.MessageSizeMax=10")
c = omero.client(args)
try:
c.createSession()
assert "10" == c.getProperty("Ice.MessageSizeMax")
c.closeSession()
finally:
c.__del__()
def testMainArgsGetsIceConfig(self):
cfg = os.path.join(here, "client_ctors.cfg")
if not os.path.exists(cfg):
assert False, cfg + " does not exist"
args = ["--Ice.Config=" + cfg, "--omero.host=unimportant"]
c = omero.client(args)
try:
assert "true" == c.getProperty("in.ice.config")
# c.createSession()
# c.closeSession()
finally:
c.__del__()
def testTwoDifferentHosts(self):
try:
c1 = omero.client(host="foo")
c1.createSession()
c1.closeSession()
except Exception:
print("foo failed appropriately")
c2 = omero.client(host=self.host, port=self.port)
try:
user = self.new_user()
c2.createSession(user.omeName.val, user.omeName.val)
c2.closeSession()
finally:
c2.__del__()
def testPorts(self):
c = omero.client("localhost", 1111)
try:
assert "1111" == c.ic.getProperties().getProperty("omero.port")
finally:
c.__del__()
c = omero.client("localhost", ["--omero.port=2222"])
try:
assert "2222" == c.ic.getProperties().getProperty("omero.port")
finally:
c.__del__()
# c = omero.client("localhost")
# assert str(omero.constants.GLACIER2PORT) ==\
# c.ic.getProperties().getProperty("omero.port")
def testBlockSize(self):
c = omero.client("localhost")
try:
assert 5000000 == c.getDefaultBlockSize()
finally:
c.__del__()
c = omero.client("localhost", ["--omero.block_size=1000000"])
try:
assert 1000000 == c.getDefaultBlockSize()
finally:
c.__del__()
def METHOD_NAME(self):
# c = omero.client(self.host, omero.constants.GLACIER2PORT)
c = omero.client(self.host, self.port)
try:
c.createSession("root", self.rootpasswd)
c.closeSession()
finally:
c.__del__() |
test refresh request body | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import logging
import pytest
from airbyte_cdk.sources.streams.http.auth import (
BasicHttpAuthenticator,
MultipleTokenAuthenticator,
NoAuth,
Oauth2Authenticator,
TokenAuthenticator,
)
LOGGER = logging.getLogger(__name__)
def test_token_authenticator():
"""
Should match passed in token, no matter how many times token is retrieved.
"""
token = TokenAuthenticator("test-token")
header = token.get_auth_header()
assert {"Authorization": "Bearer test-token"} == header
header = token.get_auth_header()
assert {"Authorization": "Bearer test-token"} == header
def test_multiple_token_authenticator():
token = MultipleTokenAuthenticator(["token1", "token2"])
header1 = token.get_auth_header()
assert {"Authorization": "Bearer token1"} == header1
header2 = token.get_auth_header()
assert {"Authorization": "Bearer token2"} == header2
header3 = token.get_auth_header()
assert {"Authorization": "Bearer token1"} == header3
def test_no_auth():
"""
Should always return empty body, no matter how many times token is retrieved.
"""
no_auth = NoAuth()
assert {} == no_auth.get_auth_header()
no_auth = NoAuth()
assert {} == no_auth.get_auth_header()
def test_basic_authenticator():
token = BasicHttpAuthenticator("client_id", "client_secret")
header = token.get_auth_header()
assert {"Authorization": "Basic Y2xpZW50X2lkOmNsaWVudF9zZWNyZXQ="} == header
class TestOauth2Authenticator:
"""
Test class for OAuth2Authenticator.
"""
refresh_endpoint = "https://some_url.com/v1"
client_id = "client_id"
client_secret = "client_secret"
refresh_token = "refresh_token"
refresh_access_token_headers = {"Header_1": "value 1", "Header_2": "value 2"}
refresh_access_token_authenticator = BasicHttpAuthenticator(client_id, client_secret)
def test_get_auth_header_fresh(self, mocker):
"""
Should not retrieve new token if current token is valid.
"""
oauth = Oauth2Authenticator(
TestOauth2Authenticator.refresh_endpoint,
TestOauth2Authenticator.client_id,
TestOauth2Authenticator.client_secret,
TestOauth2Authenticator.refresh_token,
)
mocker.patch.object(Oauth2Authenticator, "refresh_access_token", return_value=("access_token", 1000))
header = oauth.get_auth_header()
assert {"Authorization": "Bearer access_token"} == header
def test_get_auth_header_expired(self, mocker):
"""
Should retrieve new token if current token is expired.
"""
oauth = Oauth2Authenticator(
TestOauth2Authenticator.refresh_endpoint,
TestOauth2Authenticator.client_id,
TestOauth2Authenticator.client_secret,
TestOauth2Authenticator.refresh_token,
)
expire_immediately = 0
mocker.patch.object(Oauth2Authenticator, "refresh_access_token", return_value=("access_token_1", expire_immediately))
oauth.get_auth_header() # Set the first expired token.
valid_100_secs = 100
mocker.patch.object(Oauth2Authenticator, "refresh_access_token", return_value=("access_token_2", valid_100_secs))
header = oauth.get_auth_header()
assert {"Authorization": "Bearer access_token_2"} == header
def METHOD_NAME(self):
"""
Request body should match given configuration.
"""
scopes = ["scope1", "scope2"]
oauth = Oauth2Authenticator(
TestOauth2Authenticator.refresh_endpoint,
TestOauth2Authenticator.client_id,
TestOauth2Authenticator.client_secret,
TestOauth2Authenticator.refresh_token,
scopes,
)
body = oauth.get_refresh_request_body()
expected = {
"grant_type": "refresh_token",
"client_id": "client_id",
"client_secret": "client_secret",
"refresh_token": "refresh_token",
"scopes": scopes,
}
assert body == expected
def test_refresh_access_token(self, requests_mock):
mock_refresh_token_call = requests_mock.post(
TestOauth2Authenticator.refresh_endpoint, json={"access_token": "token", "expires_in": 10}
)
oauth = Oauth2Authenticator(
TestOauth2Authenticator.refresh_endpoint,
TestOauth2Authenticator.client_id,
TestOauth2Authenticator.client_secret,
TestOauth2Authenticator.refresh_token,
refresh_access_token_headers=TestOauth2Authenticator.refresh_access_token_headers,
)
token, expires_in = oauth.refresh_access_token()
assert isinstance(expires_in, int)
assert ("token", 10) == (token, expires_in)
for header in self.refresh_access_token_headers:
assert header in mock_refresh_token_call.last_request.headers
assert self.refresh_access_token_headers[header] == mock_refresh_token_call.last_request.headers[header]
assert mock_refresh_token_call.called
@pytest.mark.parametrize("error_code", (429, 500, 502, 504))
def test_refresh_access_token_retry(self, error_code, requests_mock):
oauth = Oauth2Authenticator(
TestOauth2Authenticator.refresh_endpoint,
TestOauth2Authenticator.client_id,
TestOauth2Authenticator.client_secret,
TestOauth2Authenticator.refresh_token
)
requests_mock.post(
TestOauth2Authenticator.refresh_endpoint,
[
{"status_code": error_code}, {"status_code": error_code}, {"json": {"access_token": "token", "expires_in": 10}}
]
)
token, expires_in = oauth.refresh_access_token()
assert (token, expires_in) == ("token", 10)
assert requests_mock.call_count == 3
def test_refresh_access_authenticator(self):
oauth = Oauth2Authenticator(
TestOauth2Authenticator.refresh_endpoint,
TestOauth2Authenticator.client_id,
TestOauth2Authenticator.client_secret,
TestOauth2Authenticator.refresh_token,
refresh_access_token_authenticator=TestOauth2Authenticator.refresh_access_token_authenticator,
)
expected_headers = {"Authorization": "Basic Y2xpZW50X2lkOmNsaWVudF9zZWNyZXQ="}
assert expected_headers == oauth.get_refresh_access_token_headers() |
test actions | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
from mock import Mock
from textwrap import dedent
from c7n import deprecated
class DeprecationTest(BaseTest):
def test_action(self):
deprecation = deprecated.action(
"use modify-db instead with `CopyTagsToSnapshot`", '2021-06-30')
# Always matches.
self.assertTrue(deprecation.check({}))
self.assertEqual(
str(deprecation),
"action has been deprecated (use modify-db instead with `CopyTagsToSnapshot`)"
)
def test_filter(self):
deprecation = deprecated.filter(
"use the 'used' filter with 'state' attribute", '2021-06-30')
# Always matches.
self.assertTrue(deprecation.check({}))
self.assertEqual(
str(deprecation),
"filter has been deprecated (use the 'used' filter with 'state' attribute)"
)
def test_field(self):
deprecation = deprecated.field('severity_normalized', 'severity_label', '2021-06-30')
self.assertTrue(deprecation.check({'severity_normalized': '10'}))
self.assertFalse(deprecation.check({'no-match': 'ignored'}))
self.assertEqual(
str(deprecation),
"field 'severity_normalized' has been deprecated (replaced by 'severity_label')"
)
class ReportTest(BaseTest):
def test_empty(self):
report = deprecated.Report("some-policy")
self.assertFalse(report)
self.assertEqual(report.format(), "policy 'some-policy'")
def test_policy_source_locator(self):
locator = Mock()
locator.find.return_value = "somefile.yml:1234"
report = deprecated.Report("some-policy")
self.assertEqual(report.format(locator), "policy 'some-policy' (somefile.yml:1234)")
locator.find.assert_called_with("some-policy")
def test_conditions(self):
report = deprecated.Report("some-policy", conditions=[
deprecated.field('start', 'value filter in condition block', '2021-06-30'),
deprecated.field('end', 'value filter in condition block', '2021-06-30'),
])
self.assertTrue(report)
self.assertEqual(report.format(), dedent("""
policy 'some-policy'
condition:
field 'start' has been deprecated (replaced by value filter in condition block)
field 'end' has been deprecated (replaced by value filter in condition block)
""")[1:-1])
def test_modes(self):
report = deprecated.Report("some-policy", mode=[
deprecated.field('foo', 'bar', '2021-06-30'),
deprecated.field('baz', 'yet', '2021-06-30'),
])
self.assertTrue(report)
self.assertEqual(report.format(), dedent("""
policy 'some-policy'
mode:
field 'foo' has been deprecated (replaced by 'bar')
field 'baz' has been deprecated (replaced by 'yet')
""")[1:-1])
# No examples of resource deprecation just yet. Looking for one.
def METHOD_NAME(self):
report = deprecated.Report("some-policy", actions=[
deprecated.Context(
'mark-for-op:', deprecated.optional_fields(('hours', 'days'), '2021-06-30')),
deprecated.Context(
'mark-for-op:', deprecated.optional_field('tag', '2021-06-30')),
])
self.assertTrue(report)
self.assertEqual(report.format(), dedent("""
policy 'some-policy'
actions:
mark-for-op: optional fields deprecated (one of 'hours' or 'days' must be specified)
mark-for-op: optional field 'tag' deprecated (must be specified)
""")[1:-1])
def test_footnotes(self):
footnotes = deprecated.Footnotes()
report = deprecated.Report("some-policy", mode=[
deprecated.field('foo', 'bar'),
deprecated.field('baz', 'yet', '2021-06-30'),
], actions=[
deprecated.Context(
'mark-for-op:',
deprecated.optional_fields(('hours', 'days'),
link="http://docs.example.com/deprecations/foo#time")),
deprecated.Context(
'mark-for-op:',
deprecated.optional_field('tag', '2021-06-30',
"http://docs.example.com/deprecations/foo#tag")),
])
self.assertTrue(report)
self.assertEqual(report.format(footnotes=footnotes), dedent("""
policy 'some-policy'
mode:
field 'foo' has been deprecated (replaced by 'bar')
field 'baz' has been deprecated (replaced by 'yet') [1]
actions:
mark-for-op: optional fields deprecated (one of 'hours' or 'days' must be specified) [2]
mark-for-op: optional field 'tag' deprecated (must be specified) [3]
""")[1:-1]) # noqa
self.assertEqual(footnotes(), dedent("""
[1] Will be removed after 2021-06-30
[2] See http://docs.example.com/deprecations/foo#time
[3] See http://docs.example.com/deprecations/foo#tag, will become an error after 2021-06-30
""")[1:-1]) # noqa |
get dates | #!/usr/bin/env python3
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import datetime
import difflib
import glob
import os
import re
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"filenames",
help="list of files to check, all files if unspecified",
nargs='*')
rootdir = os.path.dirname(__file__) + "/../../"
rootdir = os.path.abspath(rootdir)
parser.add_argument(
"--rootdir", default=rootdir, help="root directory to examine")
default_boilerplate_dir = os.path.join(rootdir, "hack/boilerplate")
parser.add_argument(
"--boilerplate-dir", default=default_boilerplate_dir)
parser.add_argument(
"-v", "--verbose",
help="give verbose output regarding why a file does not pass",
action="store_true")
args = parser.parse_args()
verbose_out = sys.stderr if args.verbose else open("/dev/null", "w")
def get_refs():
refs = {}
for path in glob.glob(os.path.join(args.boilerplate_dir, "boilerplate.*.txt")):
extension = os.path.basename(path).split(".")[1]
ref_file = open(path, 'r')
ref = ref_file.read().splitlines()
ref_file.close()
refs[extension] = ref
return refs
def is_generated_file(filename, data, regexs):
for d in skipped_ungenerated_files:
if d in filename:
return False
p = regexs["generated"]
return p.search(data)
def file_passes(filename, refs, regexs):
try:
f = open(filename, 'r')
except Exception as exc:
print("Unable to open %s: %s" % (filename, exc), file=verbose_out)
return False
data = f.read()
f.close()
# determine if the file is automatically generated
generated = is_generated_file(filename, data, regexs)
basename = os.path.basename(filename)
extension = file_extension(filename)
if generated:
if extension == "go":
extension = "generatego"
elif extension == "bzl":
extension = "generatebzl"
if extension != "":
ref = refs[extension]
else:
ref = refs[basename]
# remove extra content from the top of files
if extension == "go" or extension == "generatego":
p = regexs["go_build_constraints"]
(data, found) = p.subn("", data, 1)
elif extension in ["sh", "py"]:
p = regexs["shebang"]
(data, found) = p.subn("", data, 1)
data = data.splitlines()
# if our test file is smaller than the reference it surely fails!
if len(ref) > len(data):
print('File %s smaller than reference (%d < %d)' %
(filename, len(data), len(ref)),
file=verbose_out)
return False
# trim our file to the same number of lines as the reference file
data = data[:len(ref)]
p = regexs["year"]
for d in data:
if p.search(d):
if generated:
print('File %s has the YEAR field, but it should not be in generated file' %
filename, file=verbose_out)
else:
print('File %s has the YEAR field, but missing the year of date' %
filename, file=verbose_out)
return False
if not generated:
# Replace all occurrences of the regex "2014|2015|2016|2017|2018" with "YEAR"
p = regexs["date"]
for i, d in enumerate(data):
(data[i], found) = p.subn('YEAR', d)
if found != 0:
break
# if we don't match the reference at this point, fail
if ref != data:
print("Header in %s does not match reference, diff:" %
filename, file=verbose_out)
if args.verbose:
print(file=verbose_out)
for line in difflib.unified_diff(ref, data, 'reference', filename, lineterm=''):
print(line, file=verbose_out)
print(file=verbose_out)
return False
return True
def file_extension(filename):
return os.path.splitext(filename)[1].split(".")[-1].lower()
skipped_dirs = ['_output', '.git', "hack/boilerplate/test"]
# list all the files contain 'DO NOT EDIT', but are not generated
skipped_ungenerated_files = [
'hack/lib/swagger.sh', 'hack/boilerplate/boilerplate.py']
def normalize_files(files):
newfiles = []
for pathname in files:
if any(x in pathname for x in skipped_dirs):
continue
newfiles.append(pathname)
for i, pathname in enumerate(newfiles):
if not os.path.isabs(pathname):
newfiles[i] = os.path.join(args.rootdir, pathname)
return newfiles
def get_files(extensions):
files = []
if len(args.filenames) > 0:
files = args.filenames
else:
for root, dirs, walkfiles in os.walk(args.rootdir):
# don't visit certain dirs. This is just a performance improvement
# as we would prune these later in normalize_files(). But doing it
# cuts down the amount of filesystem walking we do and cuts down
# the size of the file list
for d in skipped_dirs:
if d in dirs:
dirs.remove(d)
for name in walkfiles:
pathname = os.path.join(root, name)
files.append(pathname)
files = normalize_files(files)
outfiles = []
for pathname in files:
basename = os.path.basename(pathname)
extension = file_extension(pathname)
if extension in extensions or basename in extensions:
outfiles.append(pathname)
return outfiles
def METHOD_NAME():
years = datetime.datetime.now().year
return '(%s)' % '|'.join((str(year) for year in range(2014, years+1)))
def get_regexs():
regexs = {}
# Search for "YEAR" which exists in the boilerplate, but shouldn't in the real thing
regexs["year"] = re.compile('YEAR')
# get_dates return 2014, 2015, 2016, 2017, or 2018 until the current year as a regex like: "(2014|2015|2016|2017|2018)";
# company holder names can be anything
regexs["date"] = re.compile(METHOD_NAME())
# strip the following build constraints/tags:
# //go:build
# // +build \n\n
regexs["go_build_constraints"] = re.compile(
r"^(//(go:build| \+build).*\n)+\n", re.MULTILINE)
# strip #!.* from scripts
regexs["shebang"] = re.compile(r"^(#!.*\n)\n*", re.MULTILINE)
# Search for generated files
regexs["generated"] = re.compile('DO NOT EDIT')
return regexs
def main():
regexs = get_regexs()
refs = get_refs()
filenames = get_files(refs.keys())
for filename in filenames:
if not file_passes(filename, refs, regexs):
print(filename, file=sys.stdout)
return 0
if __name__ == "__main__":
sys.exit(main()) |
on pre build | from __future__ import annotations
import logging
import os
from typing import TYPE_CHECKING, List
from mkdocs import utils
from mkdocs.config import base
from mkdocs.config import config_options as c
from mkdocs.contrib.search.search_index import SearchIndex
from mkdocs.plugins import BasePlugin
if TYPE_CHECKING:
from mkdocs.config.defaults import MkDocsConfig
from mkdocs.structure.pages import Page
from mkdocs.util.templates import TemplateContext
log = logging.getLogger(__name__)
base_path = os.path.dirname(os.path.abspath(__file__))
class LangOption(c.OptionallyRequired[List[str]]):
"""Validate Language(s) provided in config are known languages."""
def get_lunr_supported_lang(self, lang):
fallback = {'uk': 'ru'}
for lang_part in lang.split("_"):
lang_part = lang_part.lower()
lang_part = fallback.get(lang_part, lang_part)
if os.path.isfile(os.path.join(base_path, 'lunr-language', f'lunr.{lang_part}.js')):
return lang_part
def run_validation(self, value: object):
if isinstance(value, str):
value = [value]
if not isinstance(value, list):
raise c.ValidationError('Expected a list of language codes.')
for lang in value[:]:
if lang != 'en':
lang_detected = self.get_lunr_supported_lang(lang)
if not lang_detected:
log.info(f"Option search.lang '{lang}' is not supported, falling back to 'en'")
value.remove(lang)
if 'en' not in value:
value.append('en')
elif lang_detected != lang:
value.remove(lang)
value.append(lang_detected)
log.info(f"Option search.lang '{lang}' switched to '{lang_detected}'")
return value
class _PluginConfig(base.Config):
lang = c.Optional(LangOption())
separator = c.Type(str, default=r'[\s\-]+')
min_search_length = c.Type(int, default=3)
prebuild_index = c.Choice((False, True, 'node', 'python'), default=False)
indexing = c.Choice(('full', 'sections', 'titles'), default='full')
class SearchPlugin(BasePlugin[_PluginConfig]):
"""Add a search feature to MkDocs."""
def on_config(self, config: MkDocsConfig, **kwargs) -> MkDocsConfig:
"Add plugin templates and scripts to config."
if config.theme.get('include_search_page'):
config.theme.static_templates.add('search.html')
if not config.theme.get('search_index_only'):
path = os.path.join(base_path, 'templates')
config.theme.dirs.append(path)
if 'search/main.js' not in config.extra_javascript:
config.extra_javascript.append('search/main.js') # type: ignore
if self.config.lang is None:
# lang setting undefined. Set default based on theme locale
validate = _PluginConfig.lang.run_validation
self.config.lang = validate(config.theme.locale.language)
# The `python` method of `prebuild_index` is pending deprecation as of version 1.2.
# TODO: Raise a deprecation warning in a future release (1.3?).
if self.config.prebuild_index == 'python':
log.info(
"The 'python' method of the search plugin's 'prebuild_index' config option "
"is pending deprecation and will not be supported in a future release."
)
return config
def METHOD_NAME(self, config: MkDocsConfig, **kwargs) -> None:
"Create search index instance for later use."
self.search_index = SearchIndex(**self.config)
def on_page_context(self, context: TemplateContext, page: Page, **kwargs) -> None:
"Add page to search index."
self.search_index.add_entry_from_context(page)
def on_post_build(self, config: MkDocsConfig, **kwargs) -> None:
"Build search index."
output_base_path = os.path.join(config.site_dir, 'search')
search_index = self.search_index.generate_search_index()
json_output_path = os.path.join(output_base_path, 'search_index.json')
utils.write_file(search_index.encode('utf-8'), json_output_path)
assert self.config.lang is not None
if not config.theme.get('search_index_only'):
# Include language support files in output. Copy them directly
# so that only the needed files are included.
files = []
if len(self.config.lang) > 1 or 'en' not in self.config.lang:
files.append('lunr.stemmer.support.js')
if len(self.config.lang) > 1:
files.append('lunr.multi.js')
if 'ja' in self.config.lang or 'jp' in self.config.lang:
files.append('tinyseg.js')
for lang in self.config.lang:
if lang != 'en':
files.append(f'lunr.{lang}.js')
for filename in files:
from_path = os.path.join(base_path, 'lunr-language', filename)
to_path = os.path.join(output_base_path, filename)
utils.copy_file(from_path, to_path) |
hovmoller | import cmd
from fv3net.diagnostics.prognostic_run import load_run_data
import intake
import vcm.catalog
import vcm
import xarray as xr
import fv3viz
import pathlib
import matplotlib.pyplot as plt
import cartopy.crs
import sys
import io
import warnings
from . import iterm
warnings.filterwarnings("ignore")
def meridional_transect(ds: xr.Dataset, lon):
transect_coords = vcm.select.meridional_ring(lon)
ds = vcm.interpolate_unstructured(ds, transect_coords)
return ds.swap_dims({"sample": "lat"})
class PlotTape:
def __init__(self):
self.count = 0
def save_plot(self):
filename = f"image_{self.count}.png"
plt.savefig(filename)
plt.close(plt.gcf())
self.count += 1
class OneFileTape:
"""Useful for working in vscode...updates file in place"""
def save_plot(self):
filename = f"image.png"
plt.savefig(filename)
plt.close(plt.gcf())
class JupyterTape:
def save_plot(self):
pass
class ItermTape:
width = 70
def save_plot(self):
f = io.BytesIO()
plt.savefig(f)
iterm.write_image(
f.getvalue(),
sys.stderr.buffer,
filename="file",
width=self.width,
preserve_aspect_ratio=True,
)
plt.close(plt.gcf())
class State:
def __init__(self):
self.data_3d = None
self.data_2d = None
self.tape = OneFileTape()
self.state = {}
def load(self, url):
self.prognostic = load_run_data.SegmentedRun(url, catalog)
self.data_3d = self.prognostic.data_3d.merge(grid)
self.data_2d = grid.merge(self.prognostic.data_2d, compat="override")
def get_time(self):
i = int(self.state.get("time", "0"))
return self.data_2d.time[i]
def set(self, key, val):
self.state[key] = val
def get(self, key, default):
return self.state.get(key, default)
def get_3d_snapshot(self):
time = self.get_time()
return self.data_3d.sel(time=time, method="nearest").merge(grid)
def get_2d_snapshot(self):
time = self.get_time()
return self.data_2d.sel(time=time)
def print(self):
print("3D Variables:")
for v in self.data_3d:
print(v)
print()
print("2D Variables:")
for v in self.data_2d:
print(v)
def list_artifacts(self):
for art in self.prognostic.artifacts:
print(art)
catalog_path = vcm.catalog.catalog_path
catalog = intake.open_catalog(catalog_path)
grid = load_run_data.load_grid(catalog)
def avg2d(state: State, variable):
x = state.data_2d
avg = vcm.weighted_average(x[variable], x.area, ["x", "y", "tile"])
avg.plot()
state.tape.save_plot()
def avg3d(state: State, variable):
x = state.data_3d
avg = vcm.weighted_average(x[variable], x.area, ["x", "y", "tile"])
avg.plot(y="pressure", yincrease=True)
state.tape.save_plot()
def set_iterm_tape(state: State):
state.tape = ItermTape()
def METHOD_NAME(state: State, variable, vmin=None, vmax=None):
z = state.data_2d[variable]
avg = vcm.zonal_average_approximate(state.data_2d.lat, z)
vmin = None if vmin is None else float(vmin)
vmax = None if vmax is None else float(vmax)
avg.plot(x="time", vmin=vmin, vmax=vmax)
state.tape.save_plot()
def parse_pcolor_arg(arg):
tokens = arg.split()
kwargs = {}
if len(tokens) >= 3:
kwargs["vmin"] = float(tokens[1])
kwargs["vmax"] = float(tokens[2])
if len(tokens) >= 4:
kwargs["cmap"] = tokens[3]
return tokens[0], kwargs
class ProgShell(cmd.Cmd):
intro = (
"Welcome to the ProgRunDiag shell. Type help or ? to list commands.\n" # noqa
)
def __init__(self, state: State, raise_errors: bool = False):
super().__init__()
self.state = state
self.crs = None
self.raise_errors = raise_errors
def do_avg2d(self, arg):
avg2d(self.state, arg)
def do_crs(self, arg):
if arg == "antarctic":
self.crs = cartopy.crs.Orthographic(central_latitude=-90)
else:
raise NotImplementedError(arg)
def do_avg3d(self, arg):
avg3d(self.state, arg)
def do_iterm(self, arg):
set_iterm_tape(self.state)
def do_jupyter(self, arg):
self.state.tape = JupyterTape()
def do_hovmoller(self, arg):
METHOD_NAME(self.state, *arg.split())
def do_artifacts(self, arg):
self.state.list_artifacts()
def do_load(self, arg):
url = arg
self.state.load(url)
def do_set(self, arg):
key, val = arg.split()
self.state.set(key, val)
def do_print(self, arg):
self.state.print()
def do_meridional(self, arg):
variable, kwargs = parse_pcolor_arg(arg)
lon = int(self.state.get("lon", "0"))
transect = meridional_transect(self.state.get_3d_snapshot(), lon)
transect = transect.assign_coords(lon=lon)
plt.figure(figsize=(10, 3))
transect[variable].plot(yincrease=False, y="pressure", **kwargs)
self.state.tape.save_plot()
def do_zonal(self, arg):
variable, kwargs = parse_pcolor_arg(arg)
lat = float(self.state.get("lat", 0))
ds = self.state.get_3d_snapshot()
transect_coords = vcm.select.zonal_ring(lat=lat)
transect = vcm.interpolate_unstructured(ds, transect_coords)
transect = transect.swap_dims({"sample": "lon"})
transect = transect.assign_coords(lat=lat)
plt.figure(figsize=(10, 3))
transect[variable].plot(yincrease=False, y="pressure", **kwargs)
self.state.tape.save_plot()
def do_zonalavg(self, arg):
variable, kwargs = parse_pcolor_arg(arg)
ds = self.state.get_3d_snapshot()
transect = vcm.zonal_average_approximate(ds.lat, ds[variable])
transect.plot(yincrease=False, y="pressure", **kwargs)
self.state.tape.save_plot()
def do_column(self, arg):
variable, kwargs = parse_pcolor_arg(arg)
lon = float(self.state.get("lon", 0))
lat = float(self.state.get("lat", 0))
ds = self.state.get_3d_snapshot()
transect_coords = vcm.select.latlon(lat, lon)
transect = vcm.interpolate_unstructured(ds, transect_coords).squeeze()
transect[variable].plot(yincrease=False, y="pressure", **kwargs)
self.state.tape.save_plot()
def onecmd(self, line):
try:
super().onecmd(line)
except Exception as e:
if self.raise_errors:
raise (e)
else:
print(e)
def do_map2d(self, arg):
variable, kwargs = parse_pcolor_arg(arg)
data = self.state.get_2d_snapshot()
fv3viz.plot_cube(data, variable, projection=self.crs, **kwargs)
time_name = data.time.item().isoformat()
plt.title(f"{time_name} {variable}")
plt.tight_layout()
self.state.tape.save_plot()
def do_exit(self, arg):
sys.exit(0)
def do_eval(self, arg):
f = pathlib.Path(arg)
for line in f.read_text().splitlines():
self.onecmd(line)
def register_parser(subparsers):
parser = subparsers.add_parser(
"shell", help="Open an prognostic run browsing shell"
)
parser.set_defaults(func=main)
parser.add_argument(
"script",
default="",
nargs="?",
help="If provided, a text file of commands to run instead of opening "
"an interactive shell.",
)
def main(args):
if args.script:
shell = ProgShell(State(), raise_errors=True)
shell.do_eval(args.script)
else:
shell = ProgShell(State())
shell.cmdloop() |
wait for response | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root
# for license information.
"""Test helpers for networking.
"""
import os
import re
import requests
import socket
import threading
import time
from debugpy.common import log, util
from tests.patterns import some
def get_test_server_port(start, stop):
"""Returns a server port number that can be safely used for listening without
clashing with another test worker process, when running with pytest-xdist.
If multiple test workers invoke this function with the same min value, each of
them will receive a different number that is not lower than start (but may be
higher). If the resulting value is >=stop, it is a fatal error.
Note that if multiple test workers invoke this function with different ranges
that overlap, conflicts are possible!
"""
try:
worker_id = util.force_ascii(os.environ["PYTEST_XDIST_WORKER"])
except KeyError:
n = 0
else:
assert worker_id == some.bytes.matching(
rb"gw(\d+)"
), "Unrecognized PYTEST_XDIST_WORKER format"
n = int(worker_id[2:])
port = start + n
assert port <= stop
return port
def find_http_url(text):
match = re.search(r"https?://[-.0-9A-Za-z]+(:\d+)/?", text)
return match.group() if match else None
def wait_until_port_is_listening(port, interval=1, max_attempts=1000):
"""Blocks until the specified TCP port on localhost is listening, and can be
connected to.
Tries to connect to the port periodically, and repeats until connection succeeds.
Connection is immediately closed before returning.
"""
for i in range(1, max_attempts + 1):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
log.info("Probing localhost:{0} (attempt {1})...", port, i)
sock.connect(("localhost", port))
except socket.error as exc:
# The first attempt will almost always fail, because the port isn't
# open yet. But if it keeps failing after that, we want to know why.
if i > 1:
log.warning("Failed to connect to localhost:{0}:\n{1}", port, exc)
time.sleep(interval)
else:
log.info("localhost:{0} is listening - server is up!", port)
return
finally:
sock.close()
class WebRequest(object):
"""An async wrapper around requests."""
@staticmethod
def get(*args, **kwargs):
return WebRequest("get", *args, **kwargs)
@staticmethod
def post(*args, **kwargs):
return WebRequest("post", *args, **kwargs)
def __init__(self, method, url, *args, **kwargs):
"""Invokes requests.method(url, *args, **kwargs) on a background thread,
and immediately returns.
If method() raises an exception, it is logged, unless log_errors=False.
"""
self.method = method
self.url = url
self.log_errors = kwargs.pop("log_errors", True)
self.request = None
"""The underlying requests.Request object.
Not set until wait_for_response() returns.
"""
self.exception = None
"""Exception that occurred while performing the request, if any.
Not set until wait_for_response() returns.
"""
log.info("{0}", self)
func = getattr(requests, method)
self._worker_thread = threading.Thread(
target=lambda: self._worker(func, *args, **kwargs),
name=f"WebRequest({self})",
)
self._worker_thread.daemon = True
self._worker_thread.start()
def __str__(self):
return f"HTTP {self.method.upper()} {self.url}"
def _worker(self, func, *args, **kwargs):
try:
self.request = func(self.url, *args, **kwargs)
except Exception as exc:
if self.log_errors:
log.swallow_exception("{0} failed:", self)
self.exception = exc
else:
log.info(
"{0} --> {1} {2}", self, self.request.status_code, self.request.reason
)
def METHOD_NAME(self, timeout=None):
"""Blocks until the request completes, and returns self.request."""
if self._worker_thread.is_alive():
log.info("Waiting for response to {0} ...", self)
self._worker_thread.join(timeout)
if self.exception is not None:
raise self.exception
return self.request
def response_text(self):
"""Blocks until the request completes, and returns the response body."""
return self.METHOD_NAME().text
class WebServer(object):
"""Interacts with a web server listening on localhost on the specified port."""
def __init__(self, port):
self.port = port
self.url = f"http://localhost:{port}"
def __enter__(self):
"""Blocks until the server starts listening on self.port."""
log.info("Web server expected on {0}", self.url)
wait_until_port_is_listening(self.port, interval=3)
return self
def __exit__(self, exc_type, exc_value, exc_tb):
"""Sends an HTTP /exit GET request to the server.
The server is expected to terminate its process while handling that request.
"""
self.get("/exit", log_errors=False)
def get(self, path, *args, **kwargs):
return WebRequest.get(self.url + path, *args, **kwargs)
def post(self, path, *args, **kwargs):
return WebRequest.post(self.url + path, *args, **kwargs) |
forward single image | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import numpy as np
import torch
from torch import nn
from maskrcnn_benchmark.layers.misc import interpolate
from maskrcnn_benchmark.structures.bounding_box import BoxList
# TODO check if want to return a single BoxList or a composite
# object
class MaskPostProcessor(nn.Module):
"""
From the results of the CNN, post process the masks
by taking the mask corresponding to the class with max
probability (which are of fixed size and directly output
by the CNN) and return the masks in the mask field of the BoxList.
If a masker object is passed, it will additionally
project the masks in the image according to the locations in boxes,
"""
def __init__(self, masker=None):
super(MaskPostProcessor, self).__init__()
self.masker = masker
def forward(self, x, boxes):
"""
Arguments:
x (Tensor): the mask logits
boxes (list[BoxList]): bounding boxes that are used as
reference, one for each image
Returns:
results (list[BoxList]): one BoxList for each image, containing
the extra field mask
"""
mask_prob = x.sigmoid()
# select masks coresponding to the predicted classes
num_masks = x.shape[0]
labels = [bbox.get_field("labels") for bbox in boxes]
labels = torch.cat(labels)
index = torch.arange(num_masks, device=labels.device)
mask_prob = mask_prob[index, labels][:, None]
boxes_per_image = [len(box) for box in boxes]
mask_prob = mask_prob.split(boxes_per_image, dim=0)
if self.masker:
mask_prob = self.masker(mask_prob, boxes)
results = []
for prob, box in zip(mask_prob, boxes):
bbox = BoxList(box.bbox, box.size, mode="xyxy")
for field in box.fields():
bbox.add_field(field, box.get_field(field))
bbox.add_field("mask", prob)
results.append(bbox)
return results
class MaskPostProcessorCOCOFormat(MaskPostProcessor):
"""
From the results of the CNN, post process the results
so that the masks are pasted in the image, and
additionally convert the results to COCO format.
"""
def forward(self, x, boxes):
import pycocotools.mask as mask_util
import numpy as np
results = super(MaskPostProcessorCOCOFormat, self).forward(x, boxes)
for result in results:
masks = result.get_field("mask").cpu()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
result.add_field("mask", rles)
return results
# the next two functions should be merged inside Masker
# but are kept here for the moment while we need them
# temporarily gor paste_mask_in_image
def expand_boxes(boxes, scale):
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = torch.zeros_like(boxes)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def expand_masks(mask, padding):
N = mask.shape[0]
M = mask.shape[-1]
pad2 = 2 * padding
scale = float(M + pad2) / M
padded_mask = mask.new_zeros((N, 1, M + pad2, M + pad2))
padded_mask[:, :, padding:-padding, padding:-padding] = mask
return padded_mask, scale
def paste_mask_in_image(mask, box, im_h, im_w, thresh=0.5, padding=1):
# Need to work on the CPU, where fp16 isn't supported - cast to float to avoid this
mask = mask.float()
box = box.float()
padded_mask, scale = expand_masks(mask[None], padding=padding)
mask = padded_mask[0, 0]
box = expand_boxes(box[None], scale)[0]
box = box.to(dtype=torch.int32)
TO_REMOVE = 1
w = int(box[2] - box[0] + TO_REMOVE)
h = int(box[3] - box[1] + TO_REMOVE)
w = max(w, 1)
h = max(h, 1)
# Set shape to [batchxCxHxW]
mask = mask.expand((1, 1, -1, -1))
# Resize mask
mask = mask.to(torch.float32)
mask = interpolate(mask, size=(h, w), mode='bilinear', align_corners=False)
mask = mask[0][0]
if thresh >= 0:
mask = mask > thresh
else:
# for visualization and debugging, we also
# allow it to return an unmodified mask
mask = (mask * 255).to(torch.bool)
im_mask = torch.zeros((im_h, im_w), dtype=torch.bool)
x_0 = max(box[0], 0)
x_1 = min(box[2] + 1, im_w)
y_0 = max(box[1], 0)
y_1 = min(box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - box[1]) : (y_1 - box[1]), (x_0 - box[0]) : (x_1 - box[0])
]
return im_mask
class Masker(object):
"""
Projects a set of masks in an image on the locations
specified by the bounding boxes
"""
def __init__(self, threshold=0.5, padding=1):
self.threshold = threshold
self.padding = padding
def METHOD_NAME(self, masks, boxes):
boxes = boxes.convert("xyxy")
im_w, im_h = boxes.size
res = [
paste_mask_in_image(mask[0], box, im_h, im_w, self.threshold, self.padding)
for mask, box in zip(masks, boxes.bbox)
]
if len(res) > 0:
res = torch.stack(res, dim=0)[:, None]
else:
res = masks.new_empty((0, 1, masks.shape[-2], masks.shape[-1]))
return res
def __call__(self, masks, boxes):
if isinstance(boxes, BoxList):
boxes = [boxes]
# Make some sanity check
assert len(boxes) == len(masks), "Masks and boxes should have the same length."
# TODO: Is this JIT compatible?
# If not we should make it compatible.
results = []
for mask, box in zip(masks, boxes):
assert mask.shape[0] == len(box), "Number of objects should be the same."
result = self.METHOD_NAME(mask, box)
results.append(result)
return results
def make_roi_mask_post_processor(cfg):
if cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS:
mask_threshold = cfg.MODEL.ROI_MASK_HEAD.POSTPROCESS_MASKS_THRESHOLD
masker = Masker(threshold=mask_threshold, padding=1)
else:
masker = None
mask_post_processor = MaskPostProcessor(masker)
return mask_post_processor |
set test params | #!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for skipping signature validation on old blocks.
Test logic for skipping signature validation on blocks which we've assumed
valid (https://github.com/bitcoin/bitcoin/pull/9484)
We build a chain that includes and invalid signature for one of the
transactions:
0: genesis block
1: block 1 with coinbase transaction output.
2-101: bury that block with 100 blocks so the coinbase transaction
output can be spent
102: a block containing a transaction spending the coinbase
transaction output. The transaction has an invalid signature.
103-2202: bury the bad block with just over two weeks' worth of blocks
(2100 blocks)
Start three nodes:
- node0 has no -assumevalid parameter. Try to sync to block 2202. It will
reject block 102 and only sync as far as block 101
- node1 has -assumevalid set to the hash of block 102. Try to sync to
block 2202. node1 will sync all the way to block 2202.
- node2 has -assumevalid set to the hash of block 102. Try to sync to
block 200. node2 will reject block 102 since it's assumed valid, but it
isn't buried by at least two weeks' work.
"""
import time
from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.key import CECKey
from test_framework.messages import (
CBlockHeader,
COutPoint,
CTransaction,
CTxIn,
CTxOut,
msg_block,
msg_headers
)
from test_framework.mininode import P2PInterface
from test_framework.script import (CScript, OP_TRUE)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class BaseNode(P2PInterface):
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
class AssumeValidTest(BitcoinTestFramework):
def METHOD_NAME(self):
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.add_nodes(3)
# Start node0. We don't start the other nodes yet since
# we need to pre-mine a block with an invalid transaction
# signature so we can pass in the block hash as assumevalid.
self.start_node(0)
def send_blocks_until_disconnected(self, p2p_conn):
"""Keep sending blocks to the node until we're disconnected."""
for i in range(len(self.blocks)):
if not p2p_conn.is_connected:
break
try:
p2p_conn.send_message(msg_block(self.blocks[i]))
except IOError as e:
assert not p2p_conn.is_connected
break
def assert_blockchain_height(self, node, height):
"""Wait until the blockchain is no longer advancing and verify it's reached the expected height."""
last_height = node.getblock(node.getbestblockhash())['height']
timeout = 10
while True:
time.sleep(0.25)
current_height = node.getblock(node.getbestblockhash())['height']
if current_height != last_height:
last_height = current_height
if timeout < 0:
assert False, "blockchain too short after timeout: %d" % current_height
timeout - 0.25
continue
elif current_height > height:
assert False, "blockchain too long: %d" % current_height
elif current_height == height:
break
def run_test(self):
p2p0 = self.nodes[0].add_p2p_connection(BaseNode())
# Build the blockchain
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
self.blocks = []
# Get a pubkey for the coinbase TXO
coinbase_key = CECKey()
coinbase_key.set_secretbytes(b"horsebattery")
coinbase_pubkey = coinbase_key.get_pubkey()
# Create the first block with a coinbase output to our key
height = 1
block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time)
self.blocks.append(block)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
# Bury the block 100 deep so the coinbase output is spendable
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
# Create a transaction spending the coinbase output with an invalid (null) signature
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE])))
tx.calc_sha256()
block102 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block102.vtx.extend([tx])
block102.hashMerkleRoot = block102.calc_merkle_root()
block102.rehash()
block102.solve()
self.blocks.append(block102)
self.tip = block102.sha256
self.block_time += 1
height += 1
# Bury the assumed valid block 2100 deep
for i in range(2100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.nVersion = 4
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
self.nodes[0].disconnect_p2ps()
# Start node1 and node2 with assumevalid so they accept a block with a bad signature.
self.start_node(1, extra_args=["-assumevalid=" + hex(block102.sha256)])
self.start_node(2, extra_args=["-assumevalid=" + hex(block102.sha256)])
p2p0 = self.nodes[0].add_p2p_connection(BaseNode())
p2p1 = self.nodes[1].add_p2p_connection(BaseNode())
p2p2 = self.nodes[2].add_p2p_connection(BaseNode())
# send header lists to all three nodes
p2p0.send_header_for_blocks(self.blocks[0:2000])
p2p0.send_header_for_blocks(self.blocks[2000:])
p2p1.send_header_for_blocks(self.blocks[0:2000])
p2p1.send_header_for_blocks(self.blocks[2000:])
p2p2.send_header_for_blocks(self.blocks[0:200])
# Send blocks to node0. Block 102 will be rejected.
self.send_blocks_until_disconnected(p2p0)
self.assert_blockchain_height(self.nodes[0], 101)
# Send all blocks to node1. All blocks will be accepted.
for i in range(2202):
p2p1.send_message(msg_block(self.blocks[i]))
# Syncing 2200 blocks can take a while on slow systems. Give it plenty of time to sync.
p2p1.sync_with_ping(120)
assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)
# Send blocks to node2. Block 102 will be rejected.
self.send_blocks_until_disconnected(p2p2)
self.assert_blockchain_height(self.nodes[2], 101)
if __name__ == '__main__':
AssumeValidTest().main() |
compute image timeout | #!/usr/bin/python3
# SPDX-FileCopyrightText: Red Hat, Inc.
# SPDX-License-Identifier: GPL-2.0-or-later
from __future__ import absolute_import
from __future__ import division
import os
import sys
import traceback
import fcntl
import signal
import subprocess
import struct
import threading
import hooking
from vdsm.common.units import GiB
BLKGETSIZE64 = 0x80081272 # Obtain device size in bytes
FORMAT = 'L'
TIMEPERGIB = 0.02 # Approximate qemu-img check time (in seconds) to check 1GiB
'''
checkimages vdsm hook
=====================
Hook performs consistency check on all qcow2 format disk images of a
particular VM using the QEMU disk image utility.
Accepts optional parameter 'timeout' (in seconds) to specify how long
the hook should wait for the QEMU disk image utility operation to complete.
Without 'timeout' specified, particular timeout is computed based on
image size.
syntax:
checkimages=true(|,timeout:\d+\.{1}\d+); # noqa: W605
example:
checkimages=true,timeout:1.12 # Use 1.12 seconds as timeout
checkimages=true # Compute timeout based on image size
Note: Timeout value is taken in seconds. Check of 1GB image takes ~0.02 s.
'''
def METHOD_NAME(disk_image, driver_type):
'''
Compute expected timeout value for image. Use value of 10s as default
timeout for very small images (where delay in image check launch could
cause the VM to fail to start. Use precomputed value in cases required
timeout is bigger than 10 seconds.
'''
default_timeout = float(10)
image_size = getImageSize(disk_image, driver_type)
image_timeout = float(image_size * TIMEPERGIB)
if image_timeout > default_timeout:
return image_timeout
return default_timeout
def getImageSize(disk_image, driver_type):
'''
Obtain qcow2 image size in GiBs
'''
if driver_type == 'block':
dev_buffer = ' ' * 8
with open(disk_image) as device:
dev_buffer = fcntl.ioctl(device.fileno(), BLKGETSIZE64, dev_buffer)
image_bytes = struct.unpack(FORMAT, dev_buffer)[0]
elif driver_type == 'file':
image_bytes = os.stat(disk_image).st_size
return float(image_bytes / GiB)
def checkImage(path, timeout):
'''
Check qcow2 image using qemu-img QEMU utility
'''
cmd = ['/usr/bin/qemu-img', 'check', '-f', 'qcow2', path]
# Check the image using qemu-img. Enforce check termination
# on timeout expiration
p = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
t = threading.Timer(timeout, p.kill)
t.start()
out, err = p.communicate()
rc = p.returncode
t.cancel()
if rc == -signal.SIGKILL:
sys.stderr.write('checkimages: %s image check operation timed out.' %
path)
sys.stderr.write('Increate timeout or check image availability.')
sys.exit(2)
elif rc == 0:
sys.stderr.write('checkimages: %s image check returned: %s\n' %
(path, out))
else:
sys.stderr.write('checkimages: Error running %s command: %s\n' %
(' '.join(cmd), err))
sys.exit(2)
if 'checkimages' in os.environ:
requested_timeout = None
try:
env_value = os.environ['checkimages']
# checkimages=true,timeout:1.23 case => get requested timeout value
if ',' in env_value:
timeout = (env_value.split(',', 2)[1]).split(':', 2)[1]
requested_timeout = float(timeout)
domxml = hooking.read_domxml()
disks = domxml.getElementsByTagName('disk')
for disk in disks:
disk_device = disk.getAttribute('device')
if disk_device != 'disk':
continue
drivers = disk.getElementsByTagName('driver')
sources = disk.getElementsByTagName('source')
if not drivers or not sources:
continue
driver_type = drivers[0].getAttribute('type') # 'raw' or 'qcow2'
if driver_type != 'qcow2':
continue
disk_type = disk.getAttribute('type') # 'block' or 'file'
disk_image = None
if disk_type == 'block':
disk_image = sources[0].getAttribute('dev')
elif disk_type == 'file':
disk_image = sources[0].getAttribute('file')
if disk_image:
image_timeout = METHOD_NAME(disk_image, disk_type)
# Explicit timeout was requested, use it instead of the
# precomputed one
if requested_timeout is not None:
image_timeout = requested_timeout
sys.stderr.write('checkimages: Checking image %s. ' %
disk_image)
checkImage(disk_image, image_timeout)
except:
sys.stderr.write('checkimages [unexpected error]: %s\n' %
traceback.format_exc())
sys.exit(2) |
on 200 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"sig gallery-application delete",
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete a gallery Application.
"""
_aaz_info = {
"version": "2021-07-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/galleries/{}/applications/{}", "2021-07-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.gallery_application_name = AAZStrArg(
options=["-n", "--name", "--application-name", "--gallery-application-name"],
help="The name of the gallery application.",
required=True,
id_part="child_name_1",
)
_args_schema.gallery_name = AAZStrArg(
options=["-r", "--gallery-name"],
help="Gallery name.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
help="Name of resource group. You can configure the default group using `az configure --defaults group=<name>`.",
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.GalleryApplicationsDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class GalleryApplicationsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.METHOD_NAME,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.METHOD_NAME,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/applications/{galleryApplicationName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"galleryApplicationName", self.ctx.args.gallery_application_name,
required=True,
),
**self.serialize_url_param(
"galleryName", self.ctx.args.gallery_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2021-07-01",
required=True,
),
}
return parameters
def METHOD_NAME(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
provisioning state | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVendorResult',
'AwaitableGetVendorResult',
'get_vendor',
'get_vendor_output',
]
@pulumi.output_type
class GetVendorResult:
"""
Vendor resource.
"""
def __init__(__self__, id=None, name=None, METHOD_NAME=None, skus=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", METHOD_NAME)
if skus and not isinstance(skus, list):
raise TypeError("Expected argument 'skus' to be a list")
pulumi.set(__self__, "skus", skus)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def METHOD_NAME(self) -> str:
"""
The provisioning state of the vendor resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def skus(self) -> Sequence['outputs.SubResourceResponse']:
"""
A list of IDs of the vendor skus offered by the vendor.
"""
return pulumi.get(self, "skus")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system meta data relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetVendorResult(GetVendorResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVendorResult(
id=self.id,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
skus=self.skus,
system_data=self.system_data,
type=self.type)
def get_vendor(vendor_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVendorResult:
"""
Gets information about the specified vendor.
:param str vendor_name: The name of the vendor.
"""
__args__ = dict()
__args__['vendorName'] = vendor_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:hybridnetwork/v20220101preview:getVendor', __args__, opts=opts, typ=GetVendorResult).value
return AwaitableGetVendorResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'provisioning_state'),
skus=pulumi.get(__ret__, 'skus'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_vendor)
def get_vendor_output(vendor_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVendorResult]:
"""
Gets information about the specified vendor.
:param str vendor_name: The name of the vendor.
"""
... |
d eps1 d ep s2 | """The ELL1k model for approximately handling near-circular orbits."""
import astropy.constants as c
import astropy.units as u
import numpy as np
from .ELL1_model import ELL1model
class ELL1kmodel(ELL1model):
"""This is a class for base ELL1k pulsar binary model.
ELL1k model is a generalization of ELL1 model to handle systems with
large advance of periastron.
References
----------
- Susobhanan et al. (2018), MNRAS, 480 (4), 5260-5271 [1]_
.. [1] https://ui.adsabs.harvard.edu/abs/2018MNRAS.480.5260S/abstract
"""
def __init__(self):
super().__init__()
self.binary_name = "ELL1k"
self.binary_delay_funcs = [self.ELL1kdelay]
self.d_binarydelay_d_par_funcs = [self.d_ELL1kdelay_d_par]
self.param_default_value.pop("EPS1DOT")
self.param_default_value.pop("EPS2DOT")
self.param_default_value.pop("EDOT")
self.param_default_value.update(
{"OMDOT": u.Quantity(0, "deg/year"), "LNEDOT": u.Quantity(0, "1/year")}
)
self.binary_params = list(self.param_default_value.keys())
self.set_param_values() # Set parameters to default values.
# self.orbits_func = self.orbits_ELL1
@property
def tt0(self):
return self.ttasc()
###############################
def eps1(self):
"""EPS1 as a function of time
Susobhanan+ 2018 Eq. 15"""
eps10 = self.EPS1
eps20 = self.EPS2
omdot = self.OMDOT
lnedot = self.LNEDOT
dt = self.ttasc()
return (1 + lnedot * dt) * (
eps10 * np.cos(omdot * dt) + eps20 * np.sin(omdot * dt)
)
def d_eps1_d_EPS1(self):
omdot = self.OMDOT
lnedot = self.LNEDOT
dt = self.ttasc()
return (1 + lnedot * dt) * np.cos(omdot * dt)
def METHOD_NAME(self):
omdot = self.OMDOT
lnedot = self.LNEDOT
dt = self.ttasc()
return (1 + lnedot * dt) * np.sin(omdot * dt)
def d_eps1_d_OMDOT(self):
dt = self.ttasc()
return self.eps2() * dt
def d_eps1_d_LNEDOT(self):
lnedot = self.LNEDOT
dt = self.ttasc()
return self.eps1() * dt / (1 + lnedot * dt)
def d_eps1_d_TASC(self):
omdot = self.OMDOT
lnedot = self.LNEDOT
dt = self.ttasc()
return -self.eps1() * lnedot / (1 + lnedot * dt) - self.eps2() * omdot
def eps2(self):
"""EPS2 as a function of time
Susobhanan+ 2018 Eq. 15"""
eps10 = self.EPS1
eps20 = self.EPS2
omdot = self.OMDOT
lnedot = self.LNEDOT
dt = self.ttasc()
return (1 + lnedot * dt) * (
eps20 * np.cos(omdot * dt) - eps10 * np.sin(omdot * dt)
)
def d_eps2_d_EPS1(self):
return -self.METHOD_NAME()
def d_eps2_d_EPS2(self):
return -self.d_eps1_d_EPS1()
def d_eps2_d_OMDOT(self):
dt = self.ttasc()
return -self.eps1() * dt
def d_eps2_d_LNEDOT(self):
lnedot = self.LNEDOT
dt = self.ttasc()
return self.eps2() * dt / (1 + lnedot * dt)
def d_eps2_d_TASC(self):
omdot = self.OMDOT
lnedot = self.LNEDOT
dt = self.ttasc()
return -self.eps2() * lnedot / (1 + lnedot * dt) + self.eps1() * omdot
def delayR(self):
"""ELL1k Roemer delay in proper time.
A Susobhanan et al 2018 Eq. 6
There is an extra term (-3*a1*eps1)/(2*c) as compared to the ELL1 model."""
Phi = self.Phi()
return (
self.a1()
/ c.c
* (
np.sin(Phi)
+ 0.5
* (self.eps2() * np.sin(2 * Phi) - self.eps1() * (np.cos(2 * Phi) + 3))
)
).decompose()
def d_Dre_d_par(self, par):
"""Derivative computation.
Computes::
Dre = delayR = a1/c.c*(sin(phi) - 0.5* eps1*(cos(2*phi) + 3) + 0.5* eps2*sin(2*phi))
d_Dre_d_par = d_a1_d_par/c.c * (sin(phi) - 0.5* eps1*(cos(2*phi) + 3) + 0.5* eps2*sin(2*phi))
+ d_Dre_d_Phi * d_Phi_d_par
+ d_Dre_d_eps1 * d_eps1_d_par
+ d_Dre_d_eps2 * d_eps2_d_par
"""
a1 = self.a1()
Phi = self.Phi()
eps1 = self.eps1()
eps2 = self.eps2()
d_a1_d_par = self.prtl_der("a1", par)
d_Dre_d_Phi = self.Drep()
d_Phi_d_par = self.prtl_der("Phi", par)
d_Dre_d_eps1 = a1 / c.c * (-0.5 * (np.cos(2 * Phi) + 3))
d_Dre_d_eps2 = a1 / c.c * (0.5 * np.sin(2 * Phi))
with u.set_enabled_equivalencies(u.dimensionless_angles()):
d_Dre_d_par = (
d_a1_d_par
/ c.c
* (
np.sin(Phi)
- 0.5 * eps1 * (np.cos(2 * Phi) + 3)
+ 0.5 * eps2 * np.sin(2 * Phi)
)
+ d_Dre_d_Phi * d_Phi_d_par
+ d_Dre_d_eps1 * self.prtl_der("eps1", par)
+ d_Dre_d_eps2 * self.prtl_der("eps2", par)
)
return d_Dre_d_par
def ELL1kdelay(self):
# TODO add aberration delay
return self.delayI() + self.delayS()
def d_ELL1kdelay_d_par(self, par):
return self.d_delayI_d_par(par) + self.d_delayS_d_par(par) |
coerce named tuple | """
Tests for the `TypeChecker`-based type interface.
The actual correctness of the type checking is handled in
`test_jsonschema_test_suite`; these tests check that TypeChecker
functions correctly at a more granular level.
"""
from collections import namedtuple
from unittest import TestCase
from asdf._jsonschema import ValidationError, _validators
from asdf._jsonschema._types import TypeChecker
from asdf._jsonschema.exceptions import UndefinedTypeCheck, UnknownType
from asdf._jsonschema.validators import Draft202012Validator, extend
def equals_2(checker, instance):
return instance == 2
def is_namedtuple(instance):
return isinstance(instance, tuple) and getattr(instance, "_fields", None)
def is_object_or_named_tuple(checker, instance):
if Draft202012Validator.TYPE_CHECKER.is_type(instance, "object"):
return True
return is_namedtuple(instance)
class TestTypeChecker(TestCase):
def test_is_type(self):
checker = TypeChecker({"two": equals_2})
self.assertEqual(
(
checker.is_type(instance=2, type="two"),
checker.is_type(instance="bar", type="two"),
),
(True, False),
)
def test_is_unknown_type(self):
with self.assertRaises(UndefinedTypeCheck) as e:
TypeChecker().is_type(4, "foobar")
self.assertIn(
"'foobar' is unknown to this type checker",
str(e.exception),
)
self.assertTrue(
e.exception.__suppress_context__,
msg="Expected the internal KeyError to be hidden.",
)
def test_checks_can_be_added_at_init(self):
checker = TypeChecker({"two": equals_2})
self.assertEqual(checker, TypeChecker().redefine("two", equals_2))
def test_redefine_existing_type(self):
self.assertEqual(
TypeChecker().redefine("two", object()).redefine("two", equals_2),
TypeChecker().redefine("two", equals_2),
)
def test_remove(self):
self.assertEqual(
TypeChecker({"two": equals_2}).remove("two"),
TypeChecker(),
)
def test_remove_unknown_type(self):
with self.assertRaises(UndefinedTypeCheck) as context:
TypeChecker().remove("foobar")
self.assertIn("foobar", str(context.exception))
def test_redefine_many(self):
self.assertEqual(
TypeChecker().redefine_many({"foo": int, "bar": str}),
TypeChecker().redefine("foo", int).redefine("bar", str),
)
def test_remove_multiple(self):
self.assertEqual(
TypeChecker({"foo": int, "bar": str}).remove("foo", "bar"),
TypeChecker(),
)
def test_type_check_can_raise_key_error(self):
"""
Make sure no one writes:
try:
self._type_checkers[type](...)
except KeyError:
ignoring the fact that the function itself can raise that.
"""
error = KeyError("Stuff")
def raises_keyerror(checker, instance):
raise error
with self.assertRaises(KeyError) as context:
TypeChecker({"foo": raises_keyerror}).is_type(4, "foo")
self.assertIs(context.exception, error)
def test_repr(self):
checker = TypeChecker({"foo": is_namedtuple, "bar": is_namedtuple})
self.assertEqual(repr(checker), "<TypeChecker types={'bar', 'foo'}>")
class TestCustomTypes(TestCase):
def test_simple_type_can_be_extended(self):
def int_or_str_int(checker, instance):
if not isinstance(instance, (int, str)):
return False
try:
int(instance)
except ValueError:
return False
return True
CustomValidator = extend(
Draft202012Validator,
type_checker=Draft202012Validator.TYPE_CHECKER.redefine(
"integer", int_or_str_int,
),
)
validator = CustomValidator({"type": "integer"})
validator.validate(4)
validator.validate("4")
with self.assertRaises(ValidationError):
validator.validate(4.4)
with self.assertRaises(ValidationError):
validator.validate("foo")
def test_object_can_be_extended(self):
schema = {"type": "object"}
Point = namedtuple("Point", ["x", "y"])
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
"object", is_object_or_named_tuple,
)
CustomValidator = extend(
Draft202012Validator,
type_checker=type_checker,
)
validator = CustomValidator(schema)
validator.validate(Point(x=4, y=5))
def test_object_extensions_require_custom_validators(self):
schema = {"type": "object", "required": ["x"]}
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
"object", is_object_or_named_tuple,
)
CustomValidator = extend(
Draft202012Validator,
type_checker=type_checker,
)
validator = CustomValidator(schema)
Point = namedtuple("Point", ["x", "y"])
# Cannot handle required
with self.assertRaises(ValidationError):
validator.validate(Point(x=4, y=5))
def test_object_extensions_can_handle_custom_validators(self):
schema = {
"type": "object",
"required": ["x"],
"properties": {"x": {"type": "integer"}},
}
type_checker = Draft202012Validator.TYPE_CHECKER.redefine(
"object", is_object_or_named_tuple,
)
def METHOD_NAME(fn):
def coerced(validator, value, instance, schema):
if is_namedtuple(instance):
instance = instance._asdict()
return fn(validator, value, instance, schema)
return coerced
required = METHOD_NAME(_validators.required)
properties = METHOD_NAME(_validators.properties)
CustomValidator = extend(
Draft202012Validator,
type_checker=type_checker,
validators={"required": required, "properties": properties},
)
validator = CustomValidator(schema)
Point = namedtuple("Point", ["x", "y"])
# Can now process required and properties
validator.validate(Point(x=4, y=5))
with self.assertRaises(ValidationError):
validator.validate(Point(x="not an integer", y=5))
# As well as still handle objects.
validator.validate({"x": 4, "y": 5})
with self.assertRaises(ValidationError):
validator.validate({"x": "not an integer", "y": 5})
def test_unknown_type(self):
with self.assertRaises(UnknownType) as e:
Draft202012Validator({}).is_type(12, "some unknown type")
self.assertIn("'some unknown type'", str(e.exception)) |
get displayed text | # (C) Copyright 2004-2023 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
from traitsui.qt.table_editor import SimpleEditor
from traitsui.testing.tester.command import (
MouseClick,
MouseDClick,
KeyClick,
KeySequence,
)
from traitsui.testing.tester.locator import Cell
from traitsui.testing.tester.query import (
DisplayedText,
Selected,
SelectedIndices,
)
from traitsui.testing.tester._ui_tester_registry._common_ui_targets import (
BaseSourceWithLocation
)
from traitsui.testing.tester._ui_tester_registry.qt import (
_interaction_helpers
)
def _query_table_editor_selected(wrapper, interaction):
selected = wrapper._target.selected
if not isinstance(selected, list):
if selected is None:
return []
else:
return [selected]
else:
return selected
def _query_table_editor_selected_indices(wrapper, interaction):
selected_indices = wrapper._target.selected_indices
if not isinstance(selected_indices, list):
if selected_indices == -1:
return []
else:
return [selected_indices]
else:
return selected_indices
class _SimpleEditorWithCell(BaseSourceWithLocation):
source_class = SimpleEditor
locator_class = Cell
handlers = [
(MouseClick, lambda wrapper, _: wrapper._target._mouse_click(
delay=wrapper.delay)),
(KeyClick, lambda wrapper, interaction: wrapper._target._key_click(
key=interaction.key,
delay=wrapper.delay,)),
(
KeySequence,
lambda wrapper, interaction: wrapper._target._key_sequence(
sequence=interaction.sequence,
delay=wrapper.delay,
)
),
(
DisplayedText,
lambda wrapper, _: wrapper._target.METHOD_NAME()
),
(MouseDClick, lambda wrapper, _: wrapper._target._mouse_dclick(
delay=wrapper.delay,)),
]
def _get_model_view_index(self):
table_view = self.source.table_view
return dict(
model=table_view.model(),
view=table_view,
index=table_view.model().index(
self.location.row, self.location.column
),
)
def _mouse_click(self, delay=0):
_interaction_helpers.mouse_click_item_view(
**self._get_model_view_index(),
delay=delay,
)
def _mouse_dclick(self, delay=0):
_interaction_helpers.mouse_dclick_item_view(
**self._get_model_view_index(),
delay=delay,
)
def _key_sequence(self, sequence, delay=0):
_interaction_helpers.key_sequence_item_view(
**self._get_model_view_index(),
sequence=sequence,
delay=delay,
)
def _key_click(self, key, delay=0):
_interaction_helpers.key_click_item_view(
**self._get_model_view_index(),
key=key,
delay=delay,
)
def METHOD_NAME(self):
return _interaction_helpers.get_display_text_item_view(
**self._get_model_view_index(),
)
def register(registry):
""" Register interactions for the given registry.
If there are any conflicts, an error will occur.
Parameters
----------
registry : TargetRegistry
The registry being registered to.
"""
_SimpleEditorWithCell.register(registry)
registry.register_interaction(
target_class=SimpleEditor,
interaction_class=Selected,
handler=_query_table_editor_selected
)
registry.register_interaction(
target_class=SimpleEditor,
interaction_class=SelectedIndices,
handler=_query_table_editor_selected_indices
) |
adapt data from data loader | import math
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
import determined as det
from determined import keras
from determined.common import check
ArrayLike = Union[np.ndarray, List[np.ndarray], Dict[str, np.ndarray]]
InputData = Union[tf.keras.utils.Sequence, tf.data.Dataset, "SequenceAdapter", tuple]
def _is_list_of_numpy_array(x: Any) -> bool:
return isinstance(x, (list, tuple)) and all(isinstance(v, np.ndarray) for v in x)
def _is_dict_of_numpy_array(x: Any) -> bool:
return isinstance(x, dict) and all(isinstance(x[k], np.ndarray) for k in x)
def _length_of_multi_arraylike(data: ArrayLike) -> int:
if isinstance(data, np.ndarray):
return len(data)
elif isinstance(data, (list, tuple)):
return len(data[0])
elif isinstance(data, dict):
return len(list(data.values())[0])
else:
raise det.errors.InternalException(f"Unsupported data type: {type(data)}.")
def _get_elements_in_multi_arraylike(data: ArrayLike, start: int, end: int) -> Any:
if isinstance(data, np.ndarray):
return data[start:end]
elif isinstance(data, (list, tuple)):
return [arraylike[start:end] for arraylike in data]
elif isinstance(data, dict):
return {name: data[name][start:end] for name in data}
else:
raise det.errors.InternalException(f"Unsupported data type: {type(data)}.")
class _ArrayLikeAdapter(tf.keras.utils.Sequence): # type: ignore
"""This adapter adapts np.ndarray, a list of np.ndarray, and a dict of
np.ndarray into a tf.keras.utils.Sequence instance.
"""
def __init__(
self,
x: ArrayLike,
y: ArrayLike,
batch_size: int,
sample_weights: Optional[np.ndarray] = None,
drop_leftovers: bool = False,
):
"""
If converting numpy array data to Sequence to optimize performance, consider
using ArrayLikeAdapter.
Args:
x: Input data. It could be:
1) A Numpy array (or array-like), or a list of arrays (in case the model
has multiple inputs).
2) A dict mapping input names to the corresponding array, if the model
has named inputs.
y: Target data. Like the input data x, it could be either Numpy array(s).
batch_size: Number of samples per batch.
sample_weights: Numpy array of weights for the samples.
drop_leftovers: If True, drop the data that cannot complete the last batch. This
argument is ignored if x is a Sequence or a Dataset.
"""
if not (
isinstance(x, np.ndarray) or _is_list_of_numpy_array(x) or _is_dict_of_numpy_array(x)
):
raise det.errors.InvalidDataTypeException(
type(x),
"Data which is not tf.data.Datasets or tf.keras.utils.Sequence objects must be a "
"numpy array or a list/dict of numpy arrays. See the instructions below for "
f"details:\n{keras.TFKerasTrial.build_training_data_loader.__doc__}",
)
if not (
isinstance(y, np.ndarray) or _is_list_of_numpy_array(y) or _is_dict_of_numpy_array(y)
):
raise det.errors.InvalidDataTypeException(
type(y),
"Data which is not tf.data.Datasets or tf.keras.utils.Sequence objects must be a "
"numpy array or a list/dict of numpy arrays. See the instructions below for "
f"details:\n{keras.TFKerasTrial.build_training_data_loader.__doc__}",
)
self._x_length = _length_of_multi_arraylike(x)
self._y_length = _length_of_multi_arraylike(y)
check.eq(self._x_length, self._y_length, "Length of x and y do not match.")
check.check_gt_eq(self._x_length, batch_size, "Batch size is too large for the input data.")
if sample_weights is not None:
check.eq(
self._x_length,
len(sample_weights),
"Lengths of input data and sample weights do not match.",
)
self.x = x
self.y = y
self.sample_weight = sample_weights
self.batch_size = batch_size
self.drop_leftovers = drop_leftovers
def __len__(self) -> int:
# Returns number of batches (keeps last partial batch).
if self.drop_leftovers:
return math.floor(self._x_length / self.batch_size)
else:
return math.ceil(self._x_length / self.batch_size)
def __getitem__(
self, index: int
) -> Union[Tuple[ArrayLike, ArrayLike], Tuple[ArrayLike, ArrayLike, np.ndarray]]:
# Gets batch at position index.
start = index * self.batch_size
# The end is not `(index + 1) * self.batch_size` if the
# last batch is not a full `self.batch_size`
end = min((index + 1) * self.batch_size, self._x_length)
if self.sample_weight is None:
return (
_get_elements_in_multi_arraylike(self.x, start, end),
_get_elements_in_multi_arraylike(self.y, start, end),
)
else:
return (
_get_elements_in_multi_arraylike(self.x, start, end),
_get_elements_in_multi_arraylike(self.y, start, end),
self.sample_weight[start:end],
)
class SequenceAdapter:
"""
Deprecated: use context.configure_fit() instead.
"""
def __init__(
self,
sequence: tf.keras.utils.Sequence,
use_multiprocessing: bool = False,
workers: int = 1,
max_queue_size: int = 10,
):
"""
Deprecated: use context.configure_fit() instead.
"""
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
self.workers = workers
self.max_queue_size = max_queue_size
def METHOD_NAME(
input_data: InputData,
batch_size: int,
) -> Union[tf.keras.utils.Sequence, SequenceAdapter, tf.data.Dataset]:
if isinstance(input_data, tf.data.Dataset):
return input_data
if isinstance(input_data, (tf.keras.utils.Sequence, SequenceAdapter)):
return input_data
if not isinstance(input_data, tuple) or len(input_data) not in (2, 3):
raise det.errors.InvalidDataTypeException(
type(input_data),
"input_data is invalid type. See the instruction below for details: \n"
f"{keras.TFKerasTrial.build_training_data_loader.__doc__}",
)
x = input_data[0]
y = input_data[1]
sample_weight = input_data[2] if len(input_data) == 3 else None
return _ArrayLikeAdapter(x, y, batch_size, sample_weight)
def _adapt_data_from_fit_args(
x: Any,
y: Any,
sample_weight: Any,
batch_size: int,
) -> Any:
"""
This is the in-between layer from the Native API to the Trial API.
"""
if isinstance(x, (tf.data.Dataset, tf.keras.utils.Sequence, SequenceAdapter)):
if y is not None:
raise det.errors.InvalidDataTypeException(
type(y),
"If x is a keras.utils.Sequence or a tf.data.Dataset, "
"y should not be specified (since targets will be obtained from x)."
"See the instruction below for details: "
f"\n{keras.TFKerasTrial.build_training_data_loader.__doc__}",
)
return x
return _ArrayLikeAdapter(x, y, batch_size, sample_weight) |
test values type | import unittest
import six
import test_engine
import testutil
from formula_prompt import (
values_type, column_type, referenced_tables, get_formula_prompt, convert_completion,
)
from objtypes import RaisedException
from records import Record as BaseRecord, RecordSet as BaseRecordSet
class FakeTable(object):
def __init__(self):
class Record(BaseRecord):
_table = self
class RecordSet(BaseRecordSet):
_table = self
self.Record = Record
self.RecordSet = RecordSet
table_id = "Table1"
_identity_relation = None
fake_table = FakeTable()
@unittest.skipUnless(six.PY3, "Python 3 only")
class TestFormulaPrompt(test_engine.EngineTestCase):
def METHOD_NAME(self):
self.assertEqual(values_type([1, 2, 3]), "int")
self.assertEqual(values_type([1.0, 2.0, 3.0]), "float")
self.assertEqual(values_type([1, 2, 3.0]), "float")
self.assertEqual(values_type([1, 2, None]), "Optional[int]")
self.assertEqual(values_type([1, 2, 3.0, None]), "Optional[float]")
self.assertEqual(values_type([1, RaisedException(None), 3]), "int")
self.assertEqual(values_type([1, RaisedException(None), None]), "Optional[int]")
self.assertEqual(values_type(["1", "2", "3"]), "str")
self.assertEqual(values_type([1, 2, "3"]), "Any")
self.assertEqual(values_type([1, 2, "3", None]), "Any")
self.assertEqual(values_type([
fake_table.Record(None),
fake_table.Record(None),
]), "Table1")
self.assertEqual(values_type([
fake_table.Record(None),
fake_table.Record(None),
None,
]), "Optional[Table1]")
self.assertEqual(values_type([
fake_table.RecordSet(None),
fake_table.RecordSet(None),
]), "list[Table1]")
self.assertEqual(values_type([
fake_table.RecordSet(None),
fake_table.RecordSet(None),
None,
]), "Optional[list[Table1]]")
self.assertEqual(values_type([[1, 2, 3]]), "list[int]")
self.assertEqual(values_type([[1, 2, 3], None]), "Optional[list[int]]")
self.assertEqual(values_type([[1, 2, None]]), "list[Optional[int]]")
self.assertEqual(values_type([[1, 2, None], None]), "Optional[list[Optional[int]]]")
self.assertEqual(values_type([[1, 2, "3"]]), "list[Any]")
self.assertEqual(values_type([{1, 2, 3}]), "set[int]")
self.assertEqual(values_type([(1, 2, 3)]), "tuple[int, ...]")
self.assertEqual(values_type([{1: ["2"]}]), "dict[int, list[str]]")
def assert_column_type(self, col_id, expected_type):
self.assertEqual(column_type(self.engine, "Table2", col_id), expected_type)
def assert_prompt(self, table_name, col_id, expected_prompt, lookups=False):
prompt = get_formula_prompt(self.engine, table_name, col_id, "description here",
include_all_tables=False, lookups=lookups)
# print(prompt)
self.assertEqual(prompt, expected_prompt)
def test_column_type(self):
sample = testutil.parse_test_sample({
"SCHEMA": [
[1, "Table2", [
[1, "text", "Text", False, "", "", ""],
[2, "numeric", "Numeric", False, "", "", ""],
[3, "int", "Int", False, "", "", ""],
[4, "bool", "Bool", False, "", "", ""],
[5, "date", "Date", False, "", "", ""],
[6, "datetime", "DateTime", False, "", "", ""],
[7, "attachments", "Attachments", False, "", "", ""],
[8, "ref", "Ref:Table2", False, "", "", ""],
[9, "reflist", "RefList:Table2", False, "", "", ""],
[10, "choice", "Choice", False, "", "", '{"choices": ["a", "b", "c"]}'],
[11, "choicelist", "ChoiceList", False, "", "", '{"choices": ["x", "y", "z"]}'],
[12, "ref_formula", "Any", True, "$ref or None", "", ""],
[13, "numeric_formula", "Any", True, "1 / $numeric", "", ""],
[14, "new_formula", "Numeric", True, "'to be generated...'", "", ""],
]],
],
"DATA": {
"Table2": [
["id", "numeric", "ref"],
[1, 0, 0],
[2, 1, 1],
],
},
})
self.load_sample(sample)
self.assert_column_type("text", "str")
self.assert_column_type("numeric", "float")
self.assert_column_type("int", "int")
self.assert_column_type("bool", "bool")
self.assert_column_type("date", "datetime.date")
self.assert_column_type("datetime", "datetime.datetime")
self.assert_column_type("attachments", "Any")
self.assert_column_type("ref", "Table2")
self.assert_column_type("reflist", "list[Table2]")
self.assert_column_type("choice", "Literal['a', 'b', 'c']")
self.assert_column_type("choicelist", "tuple[Literal['x', 'y', 'z'], ...]")
self.assert_column_type("ref_formula", "Optional[Table2]")
self.assert_column_type("numeric_formula", "float")
self.assertEqual(referenced_tables(self.engine, "Table2"), set())
self.assert_prompt("Table2", "new_formula",
'''\
class Table2:
text: str
numeric: float
int: int
bool: bool
date: datetime.date
datetime: datetime.datetime
attachments: Any
ref: Table2
reflist: list[Table2]
choice: Literal['a', 'b', 'c']
choicelist: tuple[Literal['x', 'y', 'z'], ...]
ref_formula: Optional[Table2]
numeric_formula: float
def new_formula(rec: Table2) -> float:
''')
def test_get_formula_prompt(self):
sample = testutil.parse_test_sample({
"SCHEMA": [
[1, "Table1", [
[1, "text", "Text", False, "", "", ""],
]],
[2, "Table2", [
[2, "ref", "Ref:Table1", False, "", "", ""],
]],
[3, "Table3", [
[3, "reflist", "RefList:Table2", False, "", "", ""],
]],
],
"DATA": {},
})
self.load_sample(sample)
self.assertEqual(referenced_tables(self.engine, "Table3"), {"Table1", "Table2"})
self.assertEqual(referenced_tables(self.engine, "Table2"), {"Table1"})
self.assertEqual(referenced_tables(self.engine, "Table1"), set())
self.assert_prompt("Table1", "text", '''\
class Table1:
def text(rec: Table1) -> str:
''')
# Test the same thing but include the lookup methods as in a real case,
# just to show that the table class would never actually be empty
# (which would be invalid Python and might confuse the model).
self.assert_prompt("Table1", "text", """\
class Table1:
def __len__(self):
return len(Table1.lookupRecords())
@staticmethod
def lookupRecords(sort_by=None) -> list[Table1]:
...
@staticmethod
def lookupOne(sort_by=None) -> Table1:
'''
Filter for one result matching the keys provided.
To control order, use e.g. `sort_by='Key' or `sort_by='-Key'`.
'''
return Table1.lookupRecords(sort_by=sort_by)[0]
def text(rec: Table1) -> str:
""", lookups=True)
self.assert_prompt("Table2", "ref", '''\
class Table1:
text: str
class Table2:
def ref(rec: Table2) -> Table1:
''')
self.assert_prompt("Table3", "reflist", '''\
class Table1:
text: str
class Table2:
ref: Table1
class Table3:
def reflist(rec: Table3) -> list[Table2]:
''')
def test_convert_completion(self):
completion = """
Here's some code:
```python
import os
from x import (
y,
z,
)
class Foo:
bar: Bar
@property
def foo(rec):
'''This is a docstring'''
x = f"hello {rec.name} " + rec.name + "!"
if rec.bar.spam:
return 0
return rec.a * rec.b
```
Hope you like it!
"""
self.assertEqual(convert_completion(completion), """\
import os
from x import (
y,
z,
)
x = f"hello {$name} " + $name + "!"
if $bar.spam:
return 0
$a * $b""") |
create pack dependencies | import os
from pathlib import Path
from typing import Dict, List
from neo4j import Transaction
from demisto_sdk.commands.common.constants import (
DEPRECATED_CONTENT_PACK,
GENERIC_COMMANDS_NAMES,
MarketplaceVersions,
)
from demisto_sdk.commands.common.handlers import JSON_Handler
from demisto_sdk.commands.common.logger import logger
from demisto_sdk.commands.content_graph.common import (
ContentType,
Neo4jRelationshipResult,
RelationshipType,
)
from demisto_sdk.commands.content_graph.interface.neo4j.queries.common import (
is_target_available,
run_query,
to_neo4j_map,
)
json = JSON_Handler()
IGNORED_PACKS_IN_DEPENDENCY_CALC = ["NonSupported", "ApiModules"]
MAX_DEPTH = 5
def get_all_level_packs_relationships(
tx: Transaction,
relationship_type: RelationshipType,
ids_list: List[str],
marketplace: MarketplaceVersions,
mandatorily: bool = False,
**properties,
) -> Dict[int, Neo4jRelationshipResult]:
params_str = to_neo4j_map(properties)
if relationship_type == RelationshipType.DEPENDS_ON:
query = f"""
UNWIND $ids_list AS node_id
MATCH path = shortestPath((p1:{ContentType.PACK}{params_str})-[r:{relationship_type}*..{MAX_DEPTH}]->(p2:{ContentType.PACK}))
WHERE elementId(p1) = node_id AND elementId(p1) <> elementId(p2)
AND all(n IN nodes(path) WHERE "{marketplace}" IN n.marketplaces)
AND all(r IN relationships(path) WHERE NOT r.is_test {"AND r.mandatorily = true)" if mandatorily else ""}
RETURN node_id, collect(r) as relationships, collect(p2) AS nodes_to
"""
if relationship_type == RelationshipType.IMPORTS:
# search all the content items that import the 'node_from' content item
query = f"""UNWIND $ids_list AS node_id
MATCH path=shortestPath((node_from) <- [relationship:{relationship_type}*..{MAX_DEPTH}] - (node_to))
WHERE elementId(node_from) = node_id and node_from <> node_to
return node_id, node_from, collect(relationship) AS relationships,
collect(node_to) AS nodes_to
"""
result = run_query(tx, query, ids_list=list(ids_list))
logger.debug("Found dependencies.")
return {
item.get("node_id"): Neo4jRelationshipResult(
node_from=item.get("node_from"),
nodes_to=item.get("nodes_to"),
relationships=item.get("relationships"),
)
for item in result
}
def METHOD_NAME(tx: Transaction) -> None:
remove_existing_depends_on_relationships(tx)
update_uses_for_integration_commands(tx)
delete_deprecatedcontent_relationship(tx) # TODO decide what to do with this
create_depends_on_relationships(tx)
def delete_deprecatedcontent_relationship(tx: Transaction) -> None:
"""
This will delete any USES relationship between a content item and a content item in the deprecated content pack.
At the moment, we do not want to consider this pack in the dependency calculation.
"""
query = f"""// Deletes USES relationships to content items under DeprecatedContent pack.
MATCH (source) - [r:{RelationshipType.USES}] -> (target) - [:{RelationshipType.IN_PACK}] ->
(:{ContentType.PACK}{{object_id: "{DEPRECATED_CONTENT_PACK}"}})
DELETE r
RETURN source.node_id AS source, target.node_id AS target"""
run_query(tx, query)
def remove_existing_depends_on_relationships(tx: Transaction) -> None:
query = f"""// Removes all existing DEPENDS_ON relationships before recalculation
MATCH ()-[r:{RelationshipType.DEPENDS_ON}]->()
WHERE r.from_metadata = false
DELETE r"""
run_query(tx, query)
def update_uses_for_integration_commands(tx: Transaction) -> None:
"""This query creates a relationships between content items and integrations, based on the commands they use.
If a content item uses a command which is in an integration, we create a relationship between the content item and the integration.
The mandatorily property is calculated as follows:
- If there is only one integration that implements the command, the mandatorily property is the same as the command's mandatorily property.
Otherwise, the mandatorily property is false.
- If there is already a relationship between the content item and the integration,
the mandatorily property is the OR of the existing and the new mandatorily property.
Args:
tx (Transaction): _description_
"""
query = f"""// Creates USES relationships between content items and integrations, based on the commands they use.
MATCH (content_item:{ContentType.BASE_CONTENT})
-[r:{RelationshipType.USES}]->
(command:{ContentType.COMMAND})<-[rcmd:{RelationshipType.HAS_COMMAND}]
-(integration:{ContentType.INTEGRATION})
WHERE {is_target_available("content_item", "integration")}
AND NOT command.object_id IN {list(GENERIC_COMMANDS_NAMES)}
WITH command, count(DISTINCT rcmd) as command_count
MATCH (content_item:{ContentType.BASE_CONTENT})
-[r:{RelationshipType.USES}]->
(command)<-[rcmd:{RelationshipType.HAS_COMMAND}]
-(integration:{ContentType.INTEGRATION})
WHERE {is_target_available("content_item", "integration")}
AND NOT command.object_id IN {list(GENERIC_COMMANDS_NAMES)}
MERGE (content_item)-[u:{RelationshipType.USES}]->(integration)
ON CREATE
SET u.mandatorily = CASE WHEN command_count = 1 THEN r.mandatorily ELSE false END
ON MATCH
SET u.mandatorily = u.mandatorily OR (CASE WHEN command_count = 1 THEN r.mandatorily ELSE false END)
RETURN
content_item.node_id AS content_item,
r.mandatorily AS is_cmd_mandatory,
collect(integration.object_id) AS integrations,
u.mandatorily AS is_integ_mandatory,
command.name AS command"""
run_query(tx, query)
def create_depends_on_relationships(tx: Transaction) -> None:
query = f"""// Creates DEPENDS_ON relationships
MATCH (pack_a:{ContentType.BASE_CONTENT})<-[:{RelationshipType.IN_PACK}]-(a)
-[r:{RelationshipType.USES}]->(b)-[:{RelationshipType.IN_PACK}]->(pack_b:{ContentType.BASE_CONTENT})
WHERE ANY(marketplace IN pack_a.marketplaces WHERE marketplace IN pack_b.marketplaces)
AND elementId(pack_a) <> elementId(pack_b)
AND NOT pack_b.object_id IN pack_a.excluded_dependencies
AND NOT pack_a.name IN {IGNORED_PACKS_IN_DEPENDENCY_CALC}
AND NOT pack_b.name IN {IGNORED_PACKS_IN_DEPENDENCY_CALC}
WITH pack_a, a, r, b, pack_b
MERGE (pack_a)-[dep:{RelationshipType.DEPENDS_ON}]->(pack_b)
ON CREATE
SET dep.is_test = a.is_test,
dep.from_metadata = false,
dep.mandatorily = r.mandatorily
ON MATCH
SET dep.is_test = dep.is_test AND a.is_test,
dep.mandatorily = CASE WHEN dep.from_metadata THEN dep.mandatorily
ELSE r.mandatorily OR dep.mandatorily END
WITH
pack_a.object_id AS pack_a,
pack_b.object_id AS pack_b,
collect({{
source: a.node_id,
target: b.node_id,
mandatorily: r.mandatorily
}}) AS reasons
RETURN
pack_a, pack_b, reasons"""
result = run_query(tx, query)
outputs: Dict[str, Dict[str, list]] = {}
for row in result:
pack_a = row["pack_a"]
pack_b = row["pack_b"]
outputs.setdefault(pack_a, {}).setdefault(pack_b, []).extend(row["reasons"])
if (artifacts_folder := os.getenv("ARTIFACTS_FOLDER")) and Path(
artifacts_folder
).exists():
with open(f"{artifacts_folder}/depends_on.json", "w") as fp:
json.dump(outputs, fp, indent=4) |
test none with another | # Copyright 2014 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unittest
from absl import flags
import mock
from perfkitbenchmarker import sample
from perfkitbenchmarker import test_util
from perfkitbenchmarker import timing_util
flags.FLAGS.mark_as_parsed()
class ValidateMeasurementsFlagTestCase(unittest.TestCase):
"""Tests exercising ValidateMeasurementsFlag."""
def testInvalidValue(self):
"""Passing an unrecognized value is not allowed."""
exp_str = 'test: Invalid value for --timing_measurements'
exp_regex = r'^%s$' % re.escape(exp_str)
with self.assertRaisesRegexp(flags.ValidationError, exp_regex):
timing_util.ValidateMeasurementsFlag(['test'])
def METHOD_NAME(self):
"""Passing none with another value is not allowed."""
exp_str = 'none: Cannot combine with other --timing_measurements options'
exp_regex = r'^%s$' % re.escape(exp_str)
with self.assertRaisesRegexp(flags.ValidationError, exp_regex):
timing_util.ValidateMeasurementsFlag(['none', 'runtimes'])
def testValid(self):
"""Test various valid combinations."""
validate = timing_util.ValidateMeasurementsFlag
self.assertIs(validate([]), True)
self.assertIs(validate(['none']), True)
self.assertIs(validate(['end_to_end_runtime']), True)
self.assertIs(validate(['runtimes']), True)
self.assertIs(validate(['timestamps']), True)
self.assertIs(validate(['end_to_end_runtime', 'runtimes']), True)
self.assertIs(validate(['end_to_end_runtime', 'timestamps']), True)
self.assertIs(validate(['runtimes', 'timestamps']), True)
self.assertIs(
validate(['end_to_end_runtime', 'runtimes', 'timestamps']), True)
class IntervalTimerTestCase(unittest.TestCase, test_util.SamplesTestMixin):
"""Tests exercising IntervalTimer."""
def testMeasureSequential(self):
"""Verify correct interval tuple generation in sequential measurements."""
timer = timing_util.IntervalTimer()
self.assertEqual(timer.intervals, [])
with timer.Measure('First Interval'):
pass
with timer.Measure('Second Interval'):
pass
self.assertEqual(len(timer.intervals), 2)
first_interval = timer.intervals[0]
self.assertEqual(len(first_interval), 3)
first_name = first_interval[0]
first_start = first_interval[1]
first_stop = first_interval[2]
self.assertEqual(first_name, 'First Interval')
second_interval = timer.intervals[1]
self.assertEqual(len(second_interval), 3)
second_name = second_interval[0]
second_start = second_interval[1]
second_stop = second_interval[2]
self.assertEqual(second_name, 'Second Interval')
self.assertLessEqual(first_start, first_stop)
self.assertLessEqual(first_stop, second_start)
self.assertLessEqual(second_start, second_stop)
def testMeasureNested(self):
"""Verify correct interval tuple generation in nested measurements."""
timer = timing_util.IntervalTimer()
self.assertEqual(timer.intervals, [])
with timer.Measure('Outer Interval'):
with timer.Measure('Inner Interval'):
pass
self.assertEqual(len(timer.intervals), 2)
inner_interval = timer.intervals[0]
self.assertEqual(len(inner_interval), 3)
inner_name = inner_interval[0]
inner_start = inner_interval[1]
inner_stop = inner_interval[2]
self.assertEqual(inner_name, 'Inner Interval')
outer_interval = timer.intervals[1]
self.assertEqual(len(outer_interval), 3)
outer_name = outer_interval[0]
outer_start = outer_interval[1]
outer_stop = outer_interval[2]
self.assertEqual(outer_name, 'Outer Interval')
self.assertLessEqual(outer_start, inner_start)
self.assertLessEqual(inner_start, inner_stop)
self.assertLessEqual(inner_stop, outer_stop)
def testGenerateSamplesMeasureNotCalled(self):
"""GenerateSamples should return an empty list if Measure was not called."""
timer = timing_util.IntervalTimer()
self.assertEqual(timer.intervals, [])
samples = timer.GenerateSamples()
self.assertEqual(timer.intervals, [])
self.assertEqual(samples, [])
def testGenerateSamplesRuntimeNoTimestamps(self):
"""Test generating runtime sample but no timestamp samples."""
timer = timing_util.IntervalTimer()
with timer.Measure('First'):
pass
with timer.Measure('Second'):
pass
start0 = timer.intervals[0][1]
stop0 = timer.intervals[0][2]
start1 = timer.intervals[1][1]
stop1 = timer.intervals[1][2]
samples = timer.GenerateSamples()
exp_samples = [
sample.Sample('First Runtime', stop0 - start0, 'seconds'),
sample.Sample('Second Runtime', stop1 - start1, 'seconds')]
self.assertSampleListsEqualUpToTimestamp(samples, exp_samples)
def testGenerateSamplesRuntimeAndTimestamps(self):
"""Test generating both runtime and timestamp samples."""
timer = timing_util.IntervalTimer()
with timer.Measure('First'):
pass
with timer.Measure('Second'):
pass
start0 = timer.intervals[0][1]
stop0 = timer.intervals[0][2]
start1 = timer.intervals[1][1]
stop1 = timer.intervals[1][2]
with mock.patch(
'perfkitbenchmarker.timing_util.TimestampMeasurementsEnabled',
return_value=True):
samples = timer.GenerateSamples()
exp_samples = [
sample.Sample('First Runtime', stop0 - start0, 'seconds'),
sample.Sample('First Start Timestamp', start0, 'seconds'),
sample.Sample('First Stop Timestamp', stop0, 'seconds'),
sample.Sample('Second Runtime', stop1 - start1, 'seconds'),
sample.Sample('Second Start Timestamp', start1, 'seconds'),
sample.Sample('Second Stop Timestamp', stop1, 'seconds')]
self.assertSampleListsEqualUpToTimestamp(samples, exp_samples)
if __name__ == '__main__':
unittest.main() |
test copy raw fields | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2023 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alvaro del Castillo <[email protected]>
# Valerio Cosentino <[email protected]>
#
import logging
import unittest
from base import TestBaseBackend
from grimoire_elk.raw.meetup import MeetupOcean
from grimoire_elk.enriched.utils import REPO_LABELS
class TestMeetup(TestBaseBackend):
"""Test Meetup backend"""
connector = "meetup"
ocean_index = "test_" + connector
enrich_index = "test_" + connector + "_enrich"
ocean_index_anonymized = "test_" + connector + "_anonymized"
enrich_index_anonymized = "test_" + connector + "_enrich_anonymized"
def test_has_identites(self):
"""Test value of has_identities method"""
enrich_backend = self.connectors[self.connector][2]()
self.assertTrue(enrich_backend.has_identities())
def test_items_to_raw(self):
"""Test whether JSON items are properly inserted into ES"""
result = self._test_items_to_raw()
self.assertEqual(result['items'], 3)
self.assertEqual(result['raw'], 3)
def test_raw_to_enrich(self):
"""Test whether the raw index is properly enriched"""
result = self._test_raw_to_enrich()
self.assertEqual(result['raw'], 3)
self.assertEqual(result['enrich'], 19)
enrich_backend = self.connectors[self.connector][2]()
item = self.items[0]
eitem = enrich_backend.get_rich_item(item)
self.assertEqual(eitem['meetup_created'], '2016-03-22T16:36:44+00:00')
self.assertEqual(eitem['meetup_time'], '2016-04-07T16:30:00+00:00')
self.assertEqual(eitem['meetup_updated'], '2016-04-07T21:39:24+00:00')
self.assertEqual(eitem['group_created'], '2016-03-20T15:13:47+00:00')
self.assertEqual(eitem['group_urlname'], 'sqlpass-es')
self.assertEqual(eitem['author_uuid'], '029aa3befc96d386e1c7270586f1ec1d673b0b1b')
self.assertIsNone(eitem['venue_geolocation'])
item = self.items[1]
eitem = enrich_backend.get_rich_item(item)
self.assertEqual(eitem['meetup_created'], '2016-05-31T17:30:48+00:00')
self.assertEqual(eitem['meetup_time'], '2016-06-09T16:45:00+00:00')
self.assertEqual(eitem['meetup_updated'], '2016-06-09T20:18:18+00:00')
self.assertEqual(eitem['group_created'], '2016-03-20T15:13:47+00:00')
self.assertEqual(eitem['group_urlname'], 'sqlpass-es')
self.assertEqual(eitem['author_uuid'], '810d53ef4a9ae2ebd8064ac690b2e13cfc2df924')
self.assertIsNotNone(eitem['venue_geolocation'])
def test_enrich_repo_labels(self):
"""Test whether the field REPO_LABELS is present in the enriched items"""
self._test_raw_to_enrich()
enrich_backend = self.connectors[self.connector][2]()
for item in self.items:
eitem = enrich_backend.get_rich_item(item)
self.assertIn(REPO_LABELS, eitem)
def test_raw_to_enrich_sorting_hat(self):
"""Test enrich with SortingHat"""
result = self._test_raw_to_enrich(sortinghat=True)
self.assertEqual(result['raw'], 3)
self.assertEqual(result['enrich'], 19)
enrich_backend = self.connectors[self.connector][2]()
url = self.es_con + "/" + self.enrich_index + "/_search"
response = enrich_backend.requests.get(url, verify=False).json()
for hit in response['hits']['hits']:
source = hit['_source']
if 'author_uuid' in source:
self.assertIn('author_domain', source)
self.assertIn('author_gender', source)
self.assertIn('author_gender_acc', source)
self.assertIn('author_org_name', source)
self.assertIn('author_bot', source)
self.assertIn('author_multi_org_names', source)
def test_raw_to_enrich_projects(self):
"""Test enrich with Projects"""
result = self._test_raw_to_enrich(projects=True)
self.assertEqual(result['raw'], 3)
self.assertEqual(result['enrich'], 19)
def test_refresh_identities(self):
"""Test refresh identities"""
result = self._test_refresh_identities()
# ... ?
def test_perceval_params(self):
"""Test the extraction of perceval params from an URL"""
url = "South-East-Puppet-User-Group"
expected_params = [
"--tag",
"South-East-Puppet-User-Group",
"South-East-Puppet-User-Group"
]
self.assertListEqual(MeetupOcean.get_perceval_params_from_url(url), expected_params)
def test_items_to_raw_anonymized(self):
"""Test whether JSON items are properly inserted into ES anonymized"""
result = self._test_items_to_raw_anonymized()
self.assertGreater(result['items'], 0)
self.assertGreater(result['raw'], 0)
self.assertEqual(result['items'], result['raw'])
item = self.items[0]['data']
self.assertEqual(item['event_hosts'][0]['name'], '3b3e55fdc7886baea165a854d080caf9808cac97')
self.assertEqual(item['rsvps'][0]['member']['name'], '3b3e55fdc7886baea165a854d080caf9808cac97')
self.assertEqual(item['rsvps'][1]['member']['name'], '9b0740c20617be08bd6b81a02017e63235cc0204')
self.assertEqual(item['rsvps'][2]['member']['name'], 'cbd5438b1e1084c1d85bec65a96ca566d9b2ef2e')
item = self.items[1]['data']
self.assertEqual(item['event_hosts'][0]['name'], 'aff2cc6caa4228a709ac3bba6b303c7e5dcce550')
self.assertEqual(item['event_hosts'][1]['name'], '3b3e55fdc7886baea165a854d080caf9808cac97')
self.assertEqual(item['rsvps'][0]['member']['name'], '3b3e55fdc7886baea165a854d080caf9808cac97')
self.assertEqual(item['rsvps'][1]['member']['name'], '9b0740c20617be08bd6b81a02017e63235cc0204')
self.assertEqual(item['rsvps'][2]['member']['name'], 'cbd5438b1e1084c1d85bec65a96ca566d9b2ef2e')
self.assertEqual(item['comments'][0]['member']['name'], '58668e7669fd564d99db5d581fcdb6a5618440b5')
self.assertEqual(item['comments'][1]['member']['name'], 'c96634ae1100ab91de991e40bb2fe656bd765de1')
def test_raw_to_enrich_anonymized(self):
"""Test whether the raw index is properly enriched"""
result = self._test_raw_to_enrich_anonymized()
self.assertEqual(result['raw'], 3)
self.assertEqual(result['enrich'], 19)
def METHOD_NAME(self):
"""Test copied raw fields"""
self._test_raw_to_enrich()
enrich_backend = self.connectors[self.connector][2]()
for item in self.items:
eitem = enrich_backend.get_rich_item(item)
for attribute in enrich_backend.RAW_FIELDS_COPY:
if attribute in item:
self.assertEqual(item[attribute], eitem[attribute])
else:
self.assertIsNone(eitem[attribute])
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
unittest.main(warnings='ignore') |
execute code | """
Ethereum Virtual Machine (EVM) Interpreter
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. contents:: Table of Contents
:backlinks: none
:local:
Introduction
------------
A straightforward interpreter that executes EVM code.
"""
from dataclasses import dataclass
from typing import Set, Tuple
from ethereum import evm_trace
from ethereum.base_types import U256, Bytes0, Uint
from ..fork_types import Address, Log
from ..state import (
account_has_code_or_nonce,
begin_transaction,
commit_transaction,
destroy_storage,
move_ether,
rollback_transaction,
set_code,
touch_account,
)
from ..vm import Message
from ..vm.gas import GAS_CODE_DEPOSIT, REFUND_SELF_DESTRUCT, charge_gas
from ..vm.precompiled_contracts.mapping import PRE_COMPILED_CONTRACTS
from . import Environment, Evm
from .exceptions import ExceptionalHalt, InvalidOpcode, StackDepthLimitError
from .instructions import Ops, op_implementation
from .runtime import get_valid_jump_destinations
STACK_DEPTH_LIMIT = U256(1024)
@dataclass
class MessageCallOutput:
"""
Output of a particular message call
Contains the following:
1. `gas_left`: remaining gas after execution.
2. `refund_counter`: gas to refund after execution.
3. `logs`: list of `Log` generated during execution.
4. `accounts_to_delete`: Contracts which have self-destructed.
5. `has_erred`: True if execution has caused an error.
"""
gas_left: Uint
refund_counter: U256
logs: Tuple[Log, ...]
accounts_to_delete: Set[Address]
has_erred: bool
def process_message_call(
message: Message, env: Environment
) -> MessageCallOutput:
"""
If `message.current` is empty then it creates a smart contract
else it executes a call from the `message.caller` to the `message.target`.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
output : `MessageCallOutput`
Output of the message call
"""
if message.target == Bytes0(b""):
is_collision = account_has_code_or_nonce(
env.state, message.current_target
)
if is_collision:
return MessageCallOutput(Uint(0), U256(0), tuple(), set(), True)
else:
evm = process_create_message(message, env)
else:
evm = process_message(message, env)
if evm.has_erred:
logs: Tuple[Log, ...] = ()
accounts_to_delete = set()
refund_counter = U256(0)
else:
logs = evm.logs
accounts_to_delete = evm.accounts_to_delete
refund_counter = evm.refund_counter + REFUND_SELF_DESTRUCT * len(
evm.accounts_to_delete
)
return MessageCallOutput(
gas_left=evm.gas_left,
refund_counter=refund_counter,
logs=logs,
accounts_to_delete=accounts_to_delete,
has_erred=evm.has_erred,
)
def process_create_message(message: Message, env: Environment) -> Evm:
"""
Executes a call to create a smart contract.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
evm: :py:class:`~ethereum.tangerine_whistle.vm.Evm`
Items containing execution specific objects.
"""
# take snapshot of state before processing the message
begin_transaction(env.state)
# If the address where the account is being created has storage, it is
# destroyed. This can only happen in the following highly unlikely
# circumstances:
# * The address created by two `CREATE` calls collide.
# * The first `CREATE` left empty code.
destroy_storage(env.state, message.current_target)
evm = process_message(message, env)
if not evm.has_erred:
contract_code = evm.output
contract_code_gas = len(contract_code) * GAS_CODE_DEPOSIT
try:
charge_gas(evm, contract_code_gas)
except ExceptionalHalt:
rollback_transaction(env.state)
evm.gas_left = Uint(0)
evm.has_erred = True
else:
set_code(env.state, message.current_target, contract_code)
commit_transaction(env.state)
else:
rollback_transaction(env.state)
return evm
def process_message(message: Message, env: Environment) -> Evm:
"""
Executes a call to create a smart contract.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
evm: :py:class:`~ethereum.tangerine_whistle.vm.Evm`
Items containing execution specific objects
"""
if message.depth > STACK_DEPTH_LIMIT:
raise StackDepthLimitError("Stack depth limit reached")
# take snapshot of state before processing the message
begin_transaction(env.state)
touch_account(env.state, message.current_target)
if message.should_transfer_value and message.value != 0:
move_ether(
env.state, message.caller, message.current_target, message.value
)
evm = METHOD_NAME(message, env)
if evm.has_erred:
# revert state to the last saved checkpoint
# since the message call resulted in an error
rollback_transaction(env.state)
else:
commit_transaction(env.state)
return evm
def METHOD_NAME(message: Message, env: Environment) -> Evm:
"""
Executes bytecode present in the `message`.
Parameters
----------
message :
Transaction specific items.
env :
External items required for EVM execution.
Returns
-------
evm: `ethereum.vm.EVM`
Items containing execution specific objects
"""
code = message.code
valid_jump_destinations = get_valid_jump_destinations(code)
evm = Evm(
pc=Uint(0),
stack=[],
memory=bytearray(),
code=code,
gas_left=message.gas,
env=env,
valid_jump_destinations=valid_jump_destinations,
logs=(),
refund_counter=U256(0),
running=True,
message=message,
output=b"",
accounts_to_delete=set(),
has_erred=False,
)
try:
if evm.message.code_address in PRE_COMPILED_CONTRACTS:
evm_trace(evm, evm.message.code_address)
PRE_COMPILED_CONTRACTS[evm.message.code_address](evm)
return evm
while evm.running and evm.pc < len(evm.code):
try:
op = Ops(evm.code[evm.pc])
except ValueError:
raise InvalidOpcode(evm.code[evm.pc])
evm_trace(evm, op)
op_implementation[op](evm)
except ExceptionalHalt:
evm.gas_left = Uint(0)
evm.has_erred = True
return evm |
test get bad user page | # Tests in this file use the secretariat credentials
import requests
import json
import random
import string
from src import env, utils
from src.utils import (assert_contains, ok_response_contains,
ok_response_contains_json, response_contains,
response_contains_json)
#### GET /users ####
def test_get_all_users():
""" secretariat users can request a list of all users """
res = requests.get(
f'{env.AWG_BASE_URL}/api/users',
headers=utils.BASE_HEADERS
)
test_user={}
for user in json.loads(res.content.decode())['users']:
if user['username'] == '[email protected]':
test_user = user
break
assert test_user['username'] == '[email protected]'
assert test_user['name']['first'] == 'Jeremy'
assert '"secret"' not in res.content.decode() # check that no secrets are included
assert res.status_code == 200
#### GET /users ####
def test_regular_users_cannot_get_all_users(reg_user_headers):
""" regular users cannot request a list of all users """
res = requests.get(
f'{env.AWG_BASE_URL}/api/users',
headers=reg_user_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'SECRETARIAT_ONLY')
#### GET /users ####
def test_org_admins_cannot_get_all_users(org_admin_headers):
""" org admins cannot request a list of all users """
res = requests.get(
f'{env.AWG_BASE_URL}/api/users',
headers=org_admin_headers
)
assert res.status_code == 403
response_contains_json(res, 'error', 'SECRETARIAT_ONLY')
def test_get_user_page():
""" page must be a positive int """
res = requests.get(
f'{env.AWG_BASE_URL}/api/users',
headers=utils.BASE_HEADERS,
params={
'page': '1',
}
)
assert res.status_code == 200
def METHOD_NAME():
""" page must be a positive int """
# test negative
res = requests.get(
f'{env.AWG_BASE_URL}/api/users',
headers=utils.BASE_HEADERS,
params={
'page': '-1',
}
)
assert res.status_code == 400
response_contains_json(res, 'error', 'BAD_INPUT')
response_contains_json(res, 'details', utils.BAD_PAGE_ERROR_DETAILS)
# test strings
res = requests.get(
f'{env.AWG_BASE_URL}/api/users',
headers=utils.BASE_HEADERS,
params={
'page': 'abc',
}
)
assert res.status_code == 400
response_contains_json(res, 'error', 'BAD_INPUT')
response_contains_json(res, 'details', utils.BAD_PAGE_ERROR_DETAILS)
def test_put_user_update_name():
""" correct error is returned when updating user to same org """
# grab all users to find one to update
res = requests.get(
f'{env.AWG_BASE_URL}/api/users',
headers=utils.BASE_HEADERS
)
assert res.status_code == 200
all_users = json.loads(res.content.decode())['users']
assert len(all_users) > 0
test_user = all_users[0]
# can only use the org shortname in the URL
org_res = requests.get(
f'{env.AWG_BASE_URL}/api/org/{test_user["org_UUID"]}',
headers=utils.BASE_HEADERS
)
assert org_res.status_code == 200
org = json.loads(org_res.content.decode())
# random string for test name
test_name = ''.join(random.choices(string.ascii_letters, k=16))
res = requests.put(
f'{env.AWG_BASE_URL}/api/org/{org["short_name"]}/user/{test_user["username"]}',
headers=utils.BASE_HEADERS,
params={
'name.first': test_name
}
)
assert res.status_code == 200
res = requests.get(
f'{env.AWG_BASE_URL}/api/org/{org["short_name"]}/user/{test_user["username"]}',
headers=utils.BASE_HEADERS
)
assert json.loads(res.content.decode())['name']['first'] == test_name
# put the name back to what it was because tests don't reset the data
res = requests.put(
f'{env.AWG_BASE_URL}/api/org/{org["short_name"]}/user/{test_user["username"]}',
headers=utils.BASE_HEADERS,
params={
'name.first': test_user['name']['first']
}
)
assert res.status_code == 200
def test_put_user_error_for_same_org():
""" correct error is returned when updating user to same org """
# grab all users to find one to update
res = requests.get(
f'{env.AWG_BASE_URL}/api/users',
headers=utils.BASE_HEADERS
)
assert res.status_code == 200
all_users = json.loads(res.content.decode())['users']
assert len(all_users) > 0
# try to update user to same org
test_user = all_users[0]
# can only use the org shortname in the URL
org_res = requests.get(
f'{env.AWG_BASE_URL}/api/org/{test_user["org_UUID"]}',
headers=utils.BASE_HEADERS
)
assert org_res.status_code == 200
org = json.loads(org_res.content.decode())
res = requests.put(
f'{env.AWG_BASE_URL}/api/org/{org["short_name"]}/user/{test_user["username"]}',
headers=utils.BASE_HEADERS,
params={
'org_short_name': org['short_name']
}
)
assert res.status_code == 403
err = json.loads(res.content.decode())
assert err['error'] == 'USER_ALREADY_IN_ORG'
assert err['message'] == f'The user could not be updated because the user \'{test_user["username"]}\' already belongs to the \'{org["short_name"]}\' organization.' |
test no args | """
Unittests for creating a course in an chosen modulestore
"""
from io import StringIO
from django.core.management import CommandError, call_command
from django.test import TestCase
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class TestArgParsing(TestCase):
"""
Tests for parsing arguments for the `create_course` management command
"""
def setUp(self): # lint-amnesty, pylint: disable=useless-super-delegation
super().setUp()
def METHOD_NAME(self):
errstring = "Error: the following arguments are required: modulestore, user, org, number, run"
with self.assertRaisesRegex(CommandError, errstring):
call_command('create_course')
def test_invalid_store(self):
with self.assertRaises(CommandError):
call_command('create_course', "foo", "[email protected]", "org", "course", "run")
def test_nonexistent_user_id(self):
errstring = "No user 99 found"
with self.assertRaisesRegex(CommandError, errstring):
call_command('create_course', "split", "99", "org", "course", "run")
def test_nonexistent_user_email(self):
errstring = "No user [email protected] found"
with self.assertRaisesRegex(CommandError, errstring):
call_command('create_course', "mongo", "[email protected]", "org", "course", "run")
class TestCreateCourse(ModuleStoreTestCase):
"""
Unit tests for creating a course in either old mongo or split mongo via command line
"""
def test_all_stores_user_email(self):
call_command(
"create_course",
ModuleStoreEnum.Type.split,
str(self.user.email),
"org", "course", "run", "dummy-course-name"
)
new_key = modulestore().make_course_key("org", "course", "run")
self.assertTrue(
modulestore().has_course(new_key),
f"Could not find course in {ModuleStoreEnum.Type.split}"
)
# pylint: disable=protected-access
self.assertEqual(
ModuleStoreEnum.Type.split,
modulestore()._get_modulestore_for_courselike(new_key).get_modulestore_type()
)
def test_duplicate_course(self):
"""
Test that creating a duplicate course exception is properly handled
"""
call_command(
"create_course",
"split",
str(self.user.email),
"org", "course", "run", "dummy-course-name"
)
# create the course again
out = StringIO()
call_command(
"create_course",
"split",
str(self.user.email),
"org", "course", "run", "dummy-course-name",
stderr=out
)
expected = "Course already exists"
self.assertIn(out.getvalue().strip(), expected)
def test_get_course_with_different_case(self):
"""
Tests that course can not be accessed with different case.
Scenario:
Create a course with lower case keys inside `bulk_operations` with `ignore_case=True`.
Verify that course is created.
Verify that get course from store using same course id but different case is not accessible.
"""
org = 'org1'
number = 'course1'
run = 'run1'
lowercase_course_id = self.store.make_course_key(org, number, run)
with self.store.bulk_operations(lowercase_course_id, ignore_case=True):
# Create course with lowercase key & Verify that store returns course.
self.store.create_course(
lowercase_course_id.org,
lowercase_course_id.course,
lowercase_course_id.run,
self.user.id
)
course = self.store.get_course(lowercase_course_id)
self.assertIsNotNone(course, 'Course not found using lowercase course key.')
self.assertEqual(str(course.id), str(lowercase_course_id))
# Verify store does not return course with different case.
uppercase_course_id = self.store.make_course_key(org.upper(), number.upper(), run.upper())
course = self.store.get_course(uppercase_course_id)
self.assertIsNone(course, 'Course should not be accessed with uppercase course id.') |
solvevec | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Takes the adjoint of a `LinearOperator`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
from tensorflow.python.ops.linalg import linear_operator_util
from tensorflow.python.util.tf_export import tf_export
__all__ = []
@tf_export("linalg.LinearOperatorAdjoint")
class LinearOperatorAdjoint(linear_operator.LinearOperator):
"""`LinearOperator` representing the adjoint of another operator.
This operator represents the adjoint of another operator.
```python
# Create a 2 x 2 linear operator.
operator = LinearOperatorFullMatrix([[1 - i., 3.], [0., 1. + i]])
operator_adjoint = LinearOperatorAdjoint(operator)
operator_adjoint.to_dense()
==> [[1. + i, 0.]
[3., 1 - i]]
operator_adjoint.shape
==> [2, 2]
operator_adjoint.log_abs_determinant()
==> - log(2)
x = ... Shape [2, 4] Tensor
operator_adjoint.matmul(x)
==> Shape [2, 4] Tensor, equal to operator.matmul(x, adjoint=True)
```
#### Performance
The performance of `LinearOperatorAdjoint` depends on the underlying
operators performance.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operator,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorAdjoint`.
`LinearOperatorAdjoint` is initialized with an operator `A`. The `solve`
and `matmul` methods effectively flip the `adjoint` argument. E.g.
```
A = MyLinearOperator(...)
B = LinearOperatorAdjoint(A)
x = [....] # a vector
assert A.matvec(x, adjoint=True) == B.matvec(x, adjoint=False)
```
Args:
operator: `LinearOperator` object.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix#Extension_for_non-symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is `operator.name +
"_adjoint"`.
Raises:
ValueError: If `operator.is_non_singular` is False.
"""
self._operator = operator
# The congruency of is_non_singular and is_self_adjoint was checked in the
# base operator.
combine_hint = (
linear_operator_util.use_operator_or_provided_hint_unless_contradicting)
is_square = combine_hint(
operator, "is_square", is_square,
"An operator is square if and only if its adjoint is square.")
is_non_singular = combine_hint(
operator, "is_non_singular", is_non_singular,
"An operator is non-singular if and only if its adjoint is "
"non-singular.")
is_self_adjoint = combine_hint(
operator, "is_self_adjoint", is_self_adjoint,
"An operator is self-adjoint if and only if its adjoint is "
"self-adjoint.")
is_positive_definite = combine_hint(
operator, "is_positive_definite", is_positive_definite,
"An operator is positive-definite if and only if its adjoint is "
"positive-definite.")
# Initialization.
if name is None:
name = operator.name + "_adjoint"
with ops.name_scope(name, values=operator.graph_parents):
super(LinearOperatorAdjoint, self).__init__(
dtype=operator.dtype,
graph_parents=operator.graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
@property
def operator(self):
"""The operator before taking the adjoint."""
return self._operator
def _assert_non_singular(self):
return self.operator.assert_non_singular()
def _assert_positive_definite(self):
return self.operator.assert_positive_definite()
def _assert_self_adjoint(self):
return self.operator.assert_self_adjoint()
def _shape(self):
# Rotate last dimension
shape = self.operator.shape
return shape[:-2].concatenate([shape[-1], shape[-2]])
def _shape_tensor(self):
# Rotate last dimension
shape = self.operator.shape_tensor()
return array_ops.concat([
shape[:-2], [shape[-1], shape[-2]]], axis=-1)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
return self.operator.matmul(
x, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
def _matvec(self, x, adjoint=False):
return self.operator.matvec(x, adjoint=(not adjoint))
def _determinant(self):
if self.is_self_adjoint:
return self.operator.determinant()
return math_ops.conj(self.operator.determinant())
def _log_abs_determinant(self):
return self.operator.log_abs_determinant()
def _trace(self):
if self.is_self_adjoint:
return self.operator.trace()
return math_ops.conj(self.operator.trace())
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
return self.operator.solve(
rhs, adjoint=(not adjoint), adjoint_arg=adjoint_arg)
def METHOD_NAME(self, rhs, adjoint=False):
return self.operator.solvevec(rhs, adjoint=(not adjoint))
def _to_dense(self):
if self.is_self_adjoint:
return self.operator.to_dense()
return linalg.adjoint(self.operator.to_dense())
def _add_to_tensor(self, x):
return self.to_dense() + x |
get batch fmtstr | #!/usr/bin/env python3
# ==============================================================================
#
# Copyright (C) 2022 Sophgo Technologies Inc. All rights reserved.
#
# TPU-MLIR is licensed under the 2-Clause BSD License except for the
# third-party components.
#
# ==============================================================================
import numpy as np
import time
import argparse
import pymlir
import onnx
import onnxruntime
from utils.mlir_shell import *
from utils.mlir_parser import *
from tools.model_runner import mlir_inference, model_inference
from utils.preprocess import preprocess
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from enum import Enum
def mlir_validate(val_loader, module, ppa_list, count=-1):
"""https://github.com/pytorch/examples/blob/main/imagenet/main.py"""
criterion = nn.CrossEntropyLoss()
batch_time = AverageMeter('Time', ':6.3f', Summary.NONE)
losses = AverageMeter('Loss', ':.4e', Summary.NONE)
top1 = AverageMeter('Acc@1', ':6.2f', Summary.AVERAGE)
top5 = AverageMeter('Acc@5', ':6.2f', Summary.AVERAGE)
progress = ProgressMeter(
len(val_loader),
[top1, top5, losses, batch_time],
prefix='Test: ')
end = time.time()
for i, item in enumerate(val_loader):
(images,target), (path,_) = item
assert(input_num == 1)
x = ppa_list[0].run(path[0])
module.set_tensor(ppa_list[0].input_name, x)
module.invoke()
tensors = module.get_all_tensor()
assert(len(module.output_names) == 1)
output = torch.from_numpy(tensors[module.output_names[0]])
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 20 == 0:
progress.display(i + 1)
if i == count:
break
class MyImageFolder(datasets.ImageFolder):
def __getitem__(self, index):
return super(MyImageFolder, self).__getitem__(index), self.imgs[index]
class Summary(Enum):
NONE = 0
AVERAGE = 1
SUM = 2
COUNT = 3
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE):
self.name = name
self.fmt = fmt
self.summary_type = summary_type
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def all_reduce(self):
total = torch.FloatTensor([self.sum, self.count])
dist.all_reduce(total, dist.ReduceOp.SUM, async_op=False)
self.sum, self.count = total.tolist()
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def summary(self):
fmtstr = ''
if self.summary_type is Summary.NONE:
fmtstr = ''
elif self.summary_type is Summary.AVERAGE:
fmtstr = '{name} {avg:.3f}'
elif self.summary_type is Summary.SUM:
fmtstr = '{name} {sum:.3f}'
elif self.summary_type is Summary.COUNT:
fmtstr = '{name} {count:.3f}'
else:
raise ValueError('invalid summary type %r' % self.summary_type)
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self.METHOD_NAME(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def display_summary(self):
entries = [" *"]
entries += [meter.summary() for meter in self.meters]
print(' '.join(entries))
def METHOD_NAME(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True, help="mlir file.")
parser.add_argument("--dataset", required=True, help="imagenet dataset")
parser.add_argument('--count', type=int, required=False, default=-1,
help='num of images for eval')
args = parser.parse_args()
val_dataset = MyImageFolder(
args.dataset,
transforms.Compose([
transforms.PILToTensor(),
]))
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=1, shuffle=True,
num_workers=1, pin_memory=True, sampler=None)
if args.model.endswith('.onnx'):
raise RuntimeError("ONNX not supported yet, modle file:{}".format(args.model))
elif args.model.endswith('.mlir'):
print("Running eval on imagenet with Modle file:{}".format(args.model))
# construct ppa_list from mlir
ppa_list = []
module_parsered = MlirParser(args.model)
input_num = module_parsered.get_input_num()
for i in range(input_num):
tmp = preprocess()
tmp.load_config(module_parsered.get_input_op_by_idx(i))
ppa_list.append(tmp)
print(ppa_list)
print(ppa_list[0].input_name)
# validate
module = pymlir.module()
module.load(args.model)
mlir_validate(val_loader, module, ppa_list, args.count)
elif args.model.endswith(".tflite"):
raise RuntimeError("TFLite not supported yet, modle file:{}".format(args.model))
elif args.model.endswith(".bmodel") or args.model.endswith(".cvimodel"):
raise RuntimeError("bmodel not supported yet, modle file:{}".format(args.model))
else:
raise RuntimeError("not support modle file:{}".format(args.model)) |
handler | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"reservations reservation-order change-directory",
)
class ChangeDirectory(AAZCommand):
"""Change directory (tenant) of `ReservationOrder` and all `Reservation` under it to specified tenant id
:example: Change a reservation order to another tenant
az reservations reservation-order change-directory --reservation-order-id 50000000-aaaa-bbbb-cccc-200000000000 --destination-tenant-id 10000000-aaaa-bbbb-cccc-200000000011
"""
_aaz_info = {
"version": "2022-11-01",
"resources": [
["mgmt-plane", "/providers/microsoft.capacity/reservationorders/{}/changedirectory", "2022-11-01"],
]
}
def METHOD_NAME(self, command_args):
super().METHOD_NAME(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.reservation_order_id = AAZStrArg(
options=["--reservation-order-id"],
help="Order Id of the reservation",
required=True,
)
# define Arg Group "Body"
_args_schema = cls._args_schema
_args_schema.destination_tenant_id = AAZStrArg(
options=["--destination-tenant-id"],
arg_group="Body",
help="Tenant id GUID that reservation order is to be transferred to",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.ReservationOrderChangeDirectory(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class ReservationOrderChangeDirectory(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/providers/Microsoft.Capacity/reservationOrders/{reservationOrderId}/changeDirectory",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"reservationOrderId", self.ctx.args.reservation_order_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-11-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
@property
def content(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("destinationTenantId", AAZStrType, ".destination_tenant_id")
return self.serialize_content(_content_value)
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.reservation_order = AAZObjectType(
serialized_name="reservationOrder",
)
_ChangeDirectoryHelper._build_schema_change_directory_result_read(_schema_on_200.reservation_order)
_schema_on_200.reservations = AAZListType()
reservations = cls._schema_on_200.reservations
reservations.Element = AAZObjectType()
_ChangeDirectoryHelper._build_schema_change_directory_result_read(reservations.Element)
return cls._schema_on_200
class _ChangeDirectoryHelper:
"""Helper class for ChangeDirectory"""
_schema_change_directory_result_read = None
@classmethod
def _build_schema_change_directory_result_read(cls, _schema):
if cls._schema_change_directory_result_read is not None:
_schema.error = cls._schema_change_directory_result_read.error
_schema.id = cls._schema_change_directory_result_read.id
_schema.is_succeeded = cls._schema_change_directory_result_read.is_succeeded
_schema.name = cls._schema_change_directory_result_read.name
return
cls._schema_change_directory_result_read = _schema_change_directory_result_read = AAZObjectType()
change_directory_result_read = _schema_change_directory_result_read
change_directory_result_read.error = AAZStrType()
change_directory_result_read.id = AAZStrType()
change_directory_result_read.is_succeeded = AAZBoolType(
serialized_name="isSucceeded",
)
change_directory_result_read.name = AAZStrType()
_schema.error = cls._schema_change_directory_result_read.error
_schema.id = cls._schema_change_directory_result_read.id
_schema.is_succeeded = cls._schema_change_directory_result_read.is_succeeded
_schema.name = cls._schema_change_directory_result_read.name
__all__ = ["ChangeDirectory"] |
genpareto log prob | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Dict, List, Optional, Tuple
import numpy as np
from gluonts.core.component import validated
from gluonts.mx import Tensor
from gluonts.mx.distribution import box_cox_transform, uniform
from gluonts.mx.distribution.distribution import (
MAX_SUPPORT_VAL,
_sample_multiple,
getF,
softplus,
)
from gluonts.mx.distribution.distribution_output import DistributionOutput
from .distribution import Distribution
class GenPareto(Distribution):
r"""
Generalised Pareto distribution.
Parameters
----------
xi
Tensor containing the xi shape parameters, of shape
`(*batch_shape, *event_shape)`.
beta
Tensor containing the beta scale parameters, of shape
`(*batch_shape, *event_shape)`.
"""
is_reparameterizable = False
@validated()
def __init__(self, xi: Tensor, beta: Tensor) -> None:
self.xi = xi
self.beta = beta
@property
def F(self):
return getF(self.xi)
@property
def support_min_max(self) -> Tuple[Tensor, Tensor]:
F = self.F
return (
F.zeros(self.batch_shape),
F.ones(self.batch_shape) * MAX_SUPPORT_VAL,
)
@property
def batch_shape(self) -> Tuple:
return self.xi.shape
@property
def event_shape(self) -> Tuple:
return ()
@property
def event_dim(self) -> int:
return 0
def log_prob(self, x: Tensor) -> Tensor:
F = self.F
xi, beta = self.xi, self.beta
def METHOD_NAME(x, xi, beta):
x_shifted = F.broadcast_div(x, beta)
return -(1 + F.reciprocal(xi)) * F.log1p(xi * x_shifted) - F.log(
beta
)
# The genpareto_log_prob(x) above returns NaNs for x<0. Wherever there
# are NaN in either of the F.where() conditional vectors, then F.where
# () returns NaN at that entry as well, due to its indicator function
# multiplication: 1*f(x) + np.nan*0 = nan, since np.nan*0 return nan.
# Therefore replacing genpareto_log_prob(x) with genpareto_log_prob(abs
# (x) mitigates nan returns in cases of x<0 without altering the value
# in cases of x>=0. This is a known issue in pytorch as well
# https://github.com/pytorch/pytorch/issues/12986.
return F.where(
x < 0,
-(10.0**15) * F.ones_like(x),
METHOD_NAME(F.abs(x), xi, beta),
)
def cdf(self, x: Tensor) -> Tensor:
F = self.F
x_shifted = F.broadcast_div(x, self.beta)
u = 1 - F.power(1 + self.xi * x_shifted, -F.reciprocal(self.xi))
return u
def quantile(self, level: Tensor):
F = self.F
# we consider level to be an independent axis and so expand it
# to shape (num_levels, 1, 1, ...)
for _ in range(self.all_dim):
level = level.expand_dims(axis=-1)
x_shifted = F.broadcast_div(F.power(1 - level, -self.xi) - 1, self.xi)
x = F.broadcast_mul(x_shifted, self.beta)
return x
@property
def mean(self) -> Tensor:
F = self.F
return F.where(
self.xi < 1,
F.broadcast_div(self.beta, 1 - self.xi),
np.nan * F.ones_like(self.xi),
)
@property
def variance(self) -> Tensor:
F = self.F
xi, beta = self.xi, self.beta
return F.where(
xi < 1 / 2,
F.broadcast_div(
beta**2, F.broadcast_mul((1 - xi) ** 2, (1 - 2 * xi))
),
np.nan * F.ones_like(xi),
)
@property
def stddev(self) -> Tensor:
return self.F.sqrt(self.variance)
def sample(
self, num_samples: Optional[int] = None, dtype=np.float32
) -> Tensor:
def s(xi: Tensor, beta: Tensor) -> Tensor:
F = getF(xi)
sample_U = uniform.Uniform(
F.zeros_like(xi), F.ones_like(xi)
).sample()
boxcox = box_cox_transform.BoxCoxTransform(-xi, F.array([0]))
sample_X = -1 * boxcox.f(1 - sample_U) * beta
return sample_X
samples = _sample_multiple(
s,
xi=self.xi,
beta=self.beta,
num_samples=num_samples,
)
return self.F.clip(
data=samples, a_min=np.finfo(dtype).eps, a_max=np.finfo(dtype).max
)
@property
def args(self) -> List:
return [self.xi, self.beta]
class GenParetoOutput(DistributionOutput):
args_dim: Dict[str, int] = {"xi": 1, "beta": 1}
distr_cls: type = GenPareto
@classmethod
def domain_map(cls, F, xi, beta):
r"""
Maps raw tensors to valid arguments for constructing a Generalized
Pareto distribution.
Parameters
----------
F:
xi:
Tensor of shape `(*batch_shape, 1)`
beta:
Tensor of shape `(*batch_shape, 1)`
Returns
-------
Tuple[Tensor, Tensor]:
Two squeezed tensors, of shape `(*batch_shape)`: both have entries
mapped to the positive orthant.
"""
xi = F.maximum(softplus(F, xi), cls.eps())
beta = F.maximum(softplus(F, beta), cls.eps())
return xi.squeeze(axis=-1), beta.squeeze(axis=-1)
@property
def event_shape(self) -> Tuple:
return ()
@property
def value_in_support(self) -> float:
return 0.5 |
create | import astropy.units as u
from sunpy.coordinates.utils import get_rectangle_coordinates
from sunpy.net._attrs import Time, Wavelength
from sunpy.net.attr import AttrAnd, AttrComparison, AttrOr, AttrWalker, DataAttr, SimpleAttr
__all__ = ['Series', 'Protocol', 'Notify', 'Segment', 'PrimeKey', 'Cutout', "Keyword"]
# Define a custom __dir__ to restrict tab-completion to __all__
def __dir__():
return __all__
class Series(SimpleAttr):
"""
The JSOC Series to Download.
This is the list of `Series <http://jsoc.stanford.edu/JsocSeries_DataProducts_map.html>`__.
"""
class PrimeKey(DataAttr):
"""
Prime Keys
Parameters
----------
label : str
value : str
"""
def __init__(self, label, value):
super().__init__()
self.label = label
self.value = value
def __repr__(self):
return f"{object.__repr__(self)}" + "\n" + f"{self.label, self.value}"
def collides(self, other):
return False
class KeywordComparison(AttrComparison):
"""
Allows comparison filtering of the JSOC Keywords with the ability to specify the comparison operator.
Parameters
----------
name : str
operator : str
value : Numeric
"""
class Keyword(SimpleAttr):
"""
Allows comparison filtering of the JSOC Keywords.
Parameters
----------
value : str
"""
def __lt__(self, other):
return KeywordComparison(self.value, '<', other)
def __le__(self, other):
return KeywordComparison(self.value, '<=', other)
def __gt__(self, other):
return KeywordComparison(self.value, '>', other)
def __ge__(self, other):
return KeywordComparison(self.value, '>=', other)
def __eq__(self, other):
return KeywordComparison(self.value, '=', other)
def __ne__(self, other):
return KeywordComparison(self.value, '!=', other)
def collides(self, other):
return isinstance(other, Keyword)
class Segment(SimpleAttr):
"""
Segments choose which files to download when there are more than
one present for each record e.g. 'image'.
"""
def collides(self, other):
return False
class Protocol(SimpleAttr):
"""
The type of download to request one of
("FITS", "JPEG", "MPG", "MP4", or "as-is").
Only FITS is supported, the others will require extra keywords.
"""
class Notify(SimpleAttr):
"""
An email address to get a notification to when JSOC has staged your request.
"""
def __init__(self, value):
super().__init__(value)
if value.find('@') == -1:
raise ValueError("Notify attribute must contain an '@' symbol "
"to be a valid email address")
self.value = value
class Cutout(DataAttr):
"""
Select a cutout region.
The JSOC allows for users to request cutouts. This process is performed server
side so as to allow users to download only the portions of the full-disk images
they are interested in. For a detailed explanation of the routine
used to perform these cutouts on the JSOC server, see
http://jsoc.stanford.edu/doxygen_html/group__im__patch.html.
Parameters
----------
bottom_left : `~astropy.coordinates.SkyCoord`
Coordinate for the bottom left corner of the cutout.
top_right : `~astropy.coordinates.SkyCoord`, optional
Coordinate for the top right corner of the cutout. If this is
not specified, both ``width`` and ``height`` must both be specified.
width : `~astropy.units.Quantity`, optional
Width of the cutout. If this parameter, along with ``height``, is
not specified, ``top_right`` must be specified.
height : `~astropy.units.Quantity`, optional
Height of the cutout. If this parameter, along with ``width``, is
not specified, ``top_right`` must be specified.
tracking : `bool`, optional
If True, the field of view follows the rotation of the Sun
register : `bool`, optional
If True, use sub-pixel registration when cropping to the target location.
nan_off_limb : `bool`, optional
If True, all off-limb pixels are set to NaN
See Also
--------
sunpy.coordinates.utils.get_rectangle_coordinates
"""
@u.quantity_input
def __init__(self, bottom_left, top_right=None, width: u.arcsec = None,
height: u.arcsec = None, tracking=False, register=False,
nan_off_limb=False):
super().__init__()
bl, tr = get_rectangle_coordinates(bottom_left, top_right=top_right, width=width, height=height)
self.value = {
't_ref': bl.obstime.isot,
# JSOC input is disable tracking so take the negative
't': int(not tracking),
'r': int(register),
'c': int(nan_off_limb),
'locunits': 'arcsec',
'boxunits': 'arcsec',
'x': ((bl.Tx + tr.Tx) / 2).to('arcsec').value,
'y': ((bl.Ty + tr.Ty) / 2).to('arcsec').value,
'width': (tr.Tx - bl.Tx).to('arcsec').value,
'height': (tr.Ty - bl.Ty).to('arcsec').value,
}
def collides(self, other):
return isinstance(other, self.__class__)
walker = AttrWalker()
@walker.add_creator(AttrOr)
def _create1(wlk, query):
qblocks = []
for iattr in query.attrs:
qblocks.extend(wlk.create(iattr))
return qblocks
@walker.add_creator(AttrAnd, DataAttr)
def METHOD_NAME(wlk, query):
map_ = {}
wlk.apply(query, map_)
return [map_]
@walker.add_applier(AttrAnd)
def _apply(wlk, query, imap):
for iattr in query.attrs:
wlk.apply(iattr, imap)
@walker.add_applier(SimpleAttr)
def _apply1(wlk, query, imap):
imap[query.__class__.__name__.lower()] = query.value
@walker.add_applier(PrimeKey)
def _apply1(wlk, query, imap):
key = 'primekey'
if key in imap:
imap[key][query.label] = query.value
else:
imap[key] = {query.label: query.value}
@walker.add_applier(Keyword)
def _apply1(wlk, query, imap):
raise ValueError(f"Keyword '{query.value}' needs to have a comparison to a value.")
@walker.add_applier(KeywordComparison)
def _apply1(wlk, query, imap):
key = 'keyword'
if key in imap:
imap[key][query.name] = {"operator": query.operator, "value": query.value}
else:
imap[key] = {f"{query.name}": {"operator": query.operator, "value": query.value}}
@walker.add_applier(Segment)
def _apply1(wlk, query, imap):
key = 'segment'
if key in imap:
imap[key].append(query.value)
else:
imap[key] = [query.value]
@walker.add_applier(Cutout)
def _apply1(wlk, query, imap):
imap[query.__class__.__name__.lower()] = query.value
@walker.add_applier(Time)
def _apply1(wlk, query, imap):
imap['start_time'] = query.start
imap['end_time'] = query.end
@walker.add_applier(Wavelength)
def _apply1(wlk, query, imap):
if query.min != query.max:
raise ValueError(
"For JSOC queries Wavelength.min must equal Wavelength.max")
imap[query.__class__.__name__.lower()] = query.min |
get private endpoint connection | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
A private endpoint connection
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the private endpoint connection.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the private endpoint connection.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private endpoint.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The current provisioning state.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
type=self.type)
def METHOD_NAME(private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
To learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters
Azure REST API version: 2023-04-01.
:param str private_endpoint_connection_name: The name of the private endpoint connection.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the managed cluster resource.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:containerservice:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
private_link_service_connection_state=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(METHOD_NAME)
def get_private_endpoint_connection_output(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
To learn more about private clusters, see: https://docs.microsoft.com/azure/aks/private-clusters
Azure REST API version: 2023-04-01.
:param str private_endpoint_connection_name: The name of the private endpoint connection.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str resource_name: The name of the managed cluster resource.
"""
... |
main | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2018, Stephan Schwarz <[email protected]>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: utm_ca_host_key_cert
author:
- Stephan Schwarz (@stearz)
short_description: Create, update or destroy ca host_key_cert entry in Sophos UTM
description:
- Create, update or destroy a ca host_key_cert entry in SOPHOS UTM.
- This module needs to have the REST Ability of the UTM to be activated.
attributes:
check_mode:
support: none
diff_mode:
support: none
options:
name:
description:
- The name of the object. Will be used to identify the entry.
required: true
type: str
ca:
description:
- A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
required: true
type: str
meta:
description:
- A reference to an existing utm_ca_meta_x509 object.
required: true
type: str
certificate:
description:
- The certificate in PEM format.
required: true
type: str
comment:
description:
- Optional comment string.
type: str
encrypted:
description:
- Optionally enable encryption.
default: false
type: bool
key:
description:
- Optional private key in PEM format.
type: str
extends_documentation_fragment:
- community.general.utm
- community.general.attributes
'''
EXAMPLES = """
- name: Create a ca_host_key_cert entry
community.general.utm_ca_host_key_cert:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestHostKeyCertEntry
ca: REF_ca/signing_ca_OBJECT_STRING
meta: REF_ca/meta_x509_OBJECT_STRING
certificate: |
--- BEGIN CERTIFICATE ---
. . .
. . .
. . .
--- END CERTIFICATE ---
state: present
- name: Remove a ca_host_key_cert entry
community.general.utm_ca_host_key_cert:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestHostKeyCertEntry
state: absent
- name: Read a ca_host_key_cert entry
community.general.utm_ca_host_key_cert:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestHostKeyCertEntry
state: info
"""
RETURN = """
result:
description: The utm object that was created
returned: success
type: complex
contains:
_ref:
description: The reference name of the object
type: str
_locked:
description: Whether or not the object is currently locked
type: bool
_type:
description: The type of the object
type: str
name:
description: The name of the object
type: str
ca:
description: A reference to an existing utm_ca_signing_ca or utm_ca_verification_ca object.
type: str
meta:
description: A reference to an existing utm_ca_meta_x509 object.
type: str
certificate:
description: The certificate in PEM format
type: str
comment:
description: Comment string (may be empty string)
type: str
encrypted:
description: If encryption is enabled
type: bool
key:
description: Private key in PEM format (may be empty string)
type: str
"""
from ansible_collections.community.general.plugins.module_utils.utm_utils import UTM, UTMModule
from ansible.module_utils.common.text.converters import to_native
def METHOD_NAME():
endpoint = "ca/host_key_cert"
key_to_check_for_changes = ["ca", "certificate", "comment", "encrypted", "key", "meta"]
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
ca=dict(type='str', required=True),
meta=dict(type='str', required=True),
certificate=dict(type='str', required=True),
comment=dict(type='str', required=False),
encrypted=dict(type='bool', required=False, default=False),
key=dict(type='str', required=False, no_log=True),
)
)
try:
# This is needed because the bool value only accepts int values in the backend
UTM(module, endpoint, key_to_check_for_changes).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
METHOD_NAME() |
create test suites | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Oppia test suite.
In general, this script should not be run directly. Instead, invoke
it from the command line by running
python -m scripts.run_backend_tests
from the oppia/ root folder.
"""
from __future__ import annotations
import argparse
import os
import sys
import unittest
from typing import Final, List, Optional
sys.path.insert(1, os.getcwd())
from scripts import common # isort:skip pylint: disable=wrong-import-position, wrong-import-order
CURR_DIR: Final = os.path.abspath(os.getcwd())
OPPIA_TOOLS_DIR: Final = os.path.join(CURR_DIR, '..', 'oppia_tools')
THIRD_PARTY_DIR: Final = os.path.join(CURR_DIR, 'third_party')
THIRD_PARTY_PYTHON_LIBS_DIR: Final = os.path.join(
THIRD_PARTY_DIR, 'python_libs'
)
GOOGLE_APP_ENGINE_SDK_HOME: Final = os.path.join(
OPPIA_TOOLS_DIR, 'google-cloud-sdk-335.0.0', 'google-cloud-sdk', 'platform',
'google_appengine')
_PARSER: Final = argparse.ArgumentParser()
_PARSER.add_argument(
'--test_target',
help='optional dotted module name of the test(s) to run',
type=str)
def METHOD_NAME(
test_target: Optional[str] = None
) -> List[unittest.TestSuite]:
"""Creates test suites. If test_target is None, runs all tests.
Args:
test_target: str. The name of the test script.
Default to None if not specified.
Returns:
list. A list of tests within the test script.
Raises:
Exception. The delimeter in the test_target should be a dot (.)
"""
if test_target and '/' in test_target:
raise Exception('The delimiter in test_target should be a dot (.)')
loader = unittest.TestLoader()
master_test_suite = (
loader.loadTestsFromName(test_target)
if test_target else
loader.discover(
CURR_DIR,
pattern='[^core/tests/data]*_test.py',
top_level_dir=CURR_DIR
)
)
return [master_test_suite]
def main(args: Optional[List[str]] = None) -> None:
"""Runs the tests.
Args:
args: list. A list of arguments to parse.
Raises:
Exception. Directory invalid_path does not exist.
"""
parsed_args = _PARSER.parse_args(args=args)
for directory in common.DIRS_TO_ADD_TO_SYS_PATH:
if not os.path.exists(os.path.dirname(directory)):
raise Exception('Directory %s does not exist.' % directory)
sys.path.insert(0, directory)
# Remove coverage from path since it causes conflicts with the Python html
# library. The problem is that coverage library has a file named html.py,
# then when bs4 library attempts to do 'from html.entities import ...',
# it will fail with error "No module named 'html.entities';
# 'html' is not a package". This happens because Python resolves to
# the html.py file in coverage instead of the native html library.
sys.path = [path for path in sys.path if 'coverage' not in path]
# The devappserver function fixes the system path by adding certain google
# appengine libraries that we need in oppia to the system path. The Google
# Cloud SDK comes with certain packages preinstalled including webapp2,
# jinja2, and pyyaml so this function makes sure that those libraries are
# installed.
import dev_appserver
dev_appserver.fix_sys_path()
# We are using both google app engine apis that are contained in the Google
# Cloud SDK folder, and also google cloud apis that are installed in our
# 'third_party/python_libs' directory. Therefore, there is a confusion of
# where the google module is located and which google module to import from.
# The following code ensures that the google module that python looks at
# imports from the 'third_party/python_libs' folder so that the imports are
# correct.
google_path = os.path.join(THIRD_PARTY_PYTHON_LIBS_DIR, 'google')
google_module = sys.modules['google']
# TODO(#15913): Here we use MyPy ignore because MyPy considering
# '__path__' attribute is not defined on Module type and this is
# because internally Module type was pointed wrongly, but this can
# be fixed once we upgraded our library.
google_module.__path__ = [google_path, THIRD_PARTY_PYTHON_LIBS_DIR] # type: ignore[attr-defined]
google_module.__file__ = os.path.join(google_path, '__init__.py')
suites = METHOD_NAME(
test_target=parsed_args.test_target,
)
results = [unittest.TextTestRunner(verbosity=2).run(suite)
for suite in suites]
for result in results:
if result.errors or result.failures:
raise Exception(
'Test suite failed: %s tests run, %s errors, %s failures.' % (
result.testsRun, len(result.errors), len(result.failures)))
if __name__ == '__main__':
main() |
copy | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import annotations
__all__ = ("IndexedOptionForm",)
from collections.abc import Callable
import awkward as ak
from awkward._nplikes.numpylike import NumpyMetadata
from awkward._parameters import parameters_union, type_parameters_equal
from awkward._typing import Iterator, JSONSerializable, Self, final
from awkward._util import UNSET
from awkward.forms.form import Form, index_to_dtype
np = NumpyMetadata.instance()
@final
class IndexedOptionForm(Form):
is_option = True
is_indexed = True
def __init__(
self,
index,
content,
*,
parameters=None,
form_key=None,
):
if not isinstance(index, str):
raise TypeError(
f"{type(self).__name__} 'index' must be of type str, not {index!r}"
)
if not isinstance(content, Form):
raise TypeError(
"{} all 'contents' must be Form subclasses, not {}".format(
type(self).__name__, repr(content)
)
)
self._index = index
self._content = content
self._init(parameters=parameters, form_key=form_key)
@property
def index(self):
return self._index
@property
def content(self):
return self._content
def METHOD_NAME(
self,
index=UNSET,
content=UNSET,
*,
parameters=UNSET,
form_key=UNSET,
):
return IndexedOptionForm(
self._index if index is UNSET else index,
self._content if content is UNSET else content,
parameters=self._parameters if parameters is UNSET else parameters,
form_key=self._form_key if form_key is UNSET else form_key,
)
@classmethod
def simplified(
cls,
index,
content,
*,
parameters=None,
form_key=None,
):
is_cat = parameters is not None and parameters.get("__array__") == "categorical"
if content.is_union and not is_cat:
return content._union_of_optionarrays(index, parameters)
elif content.is_indexed or content.is_option:
return ak.forms.IndexedOptionForm.simplified(
"i64",
content.content,
parameters=parameters_union(content._parameters, parameters),
)
else:
return cls(index, content, parameters=parameters, form_key=form_key)
def __repr__(self):
args = [repr(self._index), repr(self._content), *self._repr_args()]
return "{}({})".format(type(self).__name__, ", ".join(args))
def _to_dict_part(self, verbose, toplevel):
return self._to_dict_extra(
{
"class": "IndexedOptionArray",
"index": self._index,
"content": self._content._to_dict_part(verbose, toplevel=False),
},
verbose,
)
@property
def type(self):
if self.parameter("__array__") == "categorical":
parameters = dict(self._parameters)
del parameters["__array__"]
parameters["__categorical__"] = True
else:
parameters = self._parameters
return ak.types.OptionType(
self._content.type,
parameters=parameters,
).simplify_option_union()
def __eq__(self, other):
if isinstance(other, IndexedOptionForm):
return (
self._form_key == other._form_key
and self._index == other._index
and type_parameters_equal(self._parameters, other._parameters)
and self._content == other._content
)
else:
return False
def purelist_parameters(self, *keys: str) -> JSONSerializable:
if self._parameters is not None:
for key in keys:
if key in self._parameters:
return self._parameters[key]
return self._content.purelist_parameters(*keys)
@property
def purelist_isregular(self):
return self._content.purelist_isregular
@property
def purelist_depth(self):
return self._content.purelist_depth
@property
def is_identity_like(self):
return self._content.is_identity_like
@property
def minmax_depth(self):
return self._content.minmax_depth
@property
def branch_depth(self):
return self._content.branch_depth
@property
def fields(self):
return self._content.fields
@property
def is_tuple(self):
return self._content.is_tuple
@property
def dimension_optiontype(self):
return True
def _columns(self, path, output, list_indicator):
self._content._columns(path, output, list_indicator)
def _prune_columns(self, is_inside_record_or_union: bool) -> Self | None:
next_content = self._content._prune_columns(is_inside_record_or_union)
if next_content is None:
return None
else:
return IndexedOptionForm(
self._index,
next_content,
parameters=self._parameters,
form_key=self._form_key,
)
def _select_columns(self, match_specifier):
return IndexedOptionForm(
self._index,
self._content._select_columns(match_specifier),
parameters=self._parameters,
form_key=self._form_key,
)
def _column_types(self):
return self._content._column_types()
def __setstate__(self, state):
if isinstance(state, dict):
# read data pickled in Awkward 2.x
self.__dict__.update(state)
else:
# read data pickled in Awkward 1.x
# https://github.com/scikit-hep/awkward/blob/main-v1/src/python/forms.cpp#L324-L330
has_identities, parameters, form_key, index, content = state
if form_key is not None:
form_key = "part0-" + form_key # only the first partition
self.__init__(index, content, parameters=parameters, form_key=form_key)
def _expected_from_buffers(
self, getkey: Callable[[Form, str], str]
) -> Iterator[tuple[str, np.dtype]]:
yield (getkey(self, "index"), index_to_dtype[self._index])
yield from self._content._expected_from_buffers(getkey) |
build | import tensorflow as tf
from tensorflow.keras import backend as K
from spektral.layers import ops
from spektral.layers.convolutional.conv import Conv
class GTVConv(Conv):
r"""
A graph total variation convolutional layer (GTVConv) from the paper
> [Total Variation Graph Neural Networks](https://arxiv.org/abs/2211.06218)<br>
> Jonas Berg Hansen and Filippo Maria Bianchi
**Mode**: single, disjoint, batch.
This layer computes
$$
\X' = \sigma\left[\left(\I - \delta\L_\hat{\mathbf{\Gamma}}\right) \tilde{\X} \right]
$$
where
$$
\begin{align}
\tilde{\X} &= \X \W\\[5pt]
\L_\hat{\mathbf{\Gamma}} &= \D_\mathbf{\hat{\Gamma}} - \hat{\mathbf{\Gamma}}\\[5pt]
[\hat{\mathbf{\Gamma}}]_{ij} &= \frac{[\mathbf{A}]_{ij}}{\max\{||\tilde{\x}_i-\tilde{\x}_j||_1, \epsilon\}}\\
\end{align}
$$
**Input**
- Node features of shape `(batch, n_nodes, n_node_features)`;
- Adjacency matrix of shape `(batch, n_nodes, n_nodes)`;
**Output**
- Node features with the same shape as the input, but with the last
dimension changed to `channels`.
**Arguments**
- `channels`: number of output channels;
- `delta_coeff`: step size for gradient descent of GTV
- `epsilon`: small number used to numerically stabilize the computation of new adjacency weights
- `activation`: activation function;
- `use_bias`: bool, add a bias vector to the output;
- `kernel_initializer`: initializer for the weights;
- `bias_initializer`: initializer for the bias vector;
- `kernel_regularizer`: regularization applied to the weights;
- `bias_regularizer`: regularization applied to the bias vector;
- `activity_regularizer`: regularization applied to the output;
- `kernel_constraint`: constraint applied to the weights;
- `bias_constraint`: constraint applied to the bias vector.
"""
def __init__(
self,
channels,
delta_coeff=1.0,
epsilon=0.001,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs,
):
super().__init__(
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
**kwargs,
)
self.channels = channels
self.delta_coeff = delta_coeff
self.epsilon = epsilon
def METHOD_NAME(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[0][-1]
self.kernel = self.add_weight(
shape=(input_dim, self.channels),
initializer=self.kernel_initializer,
name="kernel",
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.channels,),
initializer=self.bias_initializer,
name="bias",
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
)
self.built = True
def call(self, inputs, mask=None):
x, a = inputs
mode = ops.autodetect_mode(x, a)
x = K.dot(x, self.kernel)
if mode == ops.modes.SINGLE:
output = self._call_single(x, a)
elif mode == ops.modes.BATCH:
output = self._call_batch(x, a)
if self.use_bias:
output = K.bias_add(output, self.bias)
if mask is not None:
output *= mask[0]
output = self.activation(output)
return output
def _call_single(self, x, a):
if K.is_sparse(a):
index_i = a.indices[:, 0]
index_j = a.indices[:, 1]
n_nodes = tf.shape(a, out_type=index_i.dtype)[0]
# Compute absolute differences between neighbouring nodes
abs_diff = tf.math.abs(
tf.transpose(tf.gather(x, index_i))
- tf.transpose(tf.gather(x, index_j))
)
abs_diff = tf.math.reduce_sum(abs_diff, axis=0)
# Compute new adjacency matrix
gamma = tf.sparse.map_values(
tf.multiply, a, 1 / tf.math.maximum(abs_diff, self.epsilon)
)
# Compute degree matrix from gamma matrix
d_gamma = tf.sparse.SparseTensor(
tf.stack([tf.range(n_nodes)] * 2, axis=1),
tf.sparse.reduce_sum(gamma, axis=-1),
[n_nodes, n_nodes],
)
# Compute laplcian: L = D_gamma - Gamma
l = tf.sparse.add(d_gamma, tf.sparse.map_values(tf.multiply, gamma, -1.0))
# Compute adjusted laplacian: L_adjusted = I - delta*L
l = tf.sparse.add(
tf.sparse.eye(n_nodes, dtype=x.dtype),
tf.sparse.map_values(tf.multiply, l, -self.delta_coeff),
)
# Aggregate features with adjusted laplacian
output = ops.modal_dot(l, x)
else:
n_nodes = tf.shape(a)[-1]
abs_diff = tf.math.abs(x[:, tf.newaxis, :] - x)
abs_diff = tf.reduce_sum(abs_diff, axis=-1)
gamma = a / tf.math.maximum(abs_diff, self.epsilon)
degrees = tf.math.reduce_sum(gamma, axis=-1)
l = -gamma
l = tf.linalg.set_diag(l, degrees - tf.linalg.diag_part(gamma))
l = tf.eye(n_nodes, dtype=x.dtype) - self.delta_coeff * l
output = tf.matmul(l, x)
return output
def _call_batch(self, x, a):
n_nodes = tf.shape(a)[-1]
abs_diff = tf.reduce_sum(
tf.math.abs(tf.expand_dims(x, 2) - tf.expand_dims(x, 1)), axis=-1
)
gamma = a / tf.math.maximum(abs_diff, self.epsilon)
degrees = tf.math.reduce_sum(gamma, axis=-1)
l = -gamma
l = tf.linalg.set_diag(l, degrees - tf.linalg.diag_part(gamma))
l = tf.eye(n_nodes, dtype=x.dtype) - self.delta_coeff * l
output = tf.matmul(l, x)
return output
@property
def config(self):
return {
"channels": self.channels,
"delta_coeff": self.delta_coeff,
"epsilon": self.epsilon,
} |
run handler | # mautrix-telegram - A Matrix-Telegram puppeting bridge
# Copyright (C) 2021 Tulir Asokan
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Awaitable, Callable, NamedTuple
from telethon.errors import FloodWaitError
from mautrix.bridge.commands import (
CommandEvent as BaseCommandEvent,
CommandHandler as BaseCommandHandler,
CommandHandlerFunc,
CommandProcessor as BaseCommandProcessor,
HelpSection,
command_handler as base_command_handler,
)
from mautrix.types import EventID, MessageEventContent, RoomID
from mautrix.util.format_duration import format_duration
from .. import portal as po, user as u
if TYPE_CHECKING:
from ..__main__ import TelegramBridge
class HelpCacheKey(NamedTuple):
is_management: bool
is_portal: bool
puppet_whitelisted: bool
matrix_puppet_whitelisted: bool
is_admin: bool
is_logged_in: bool
SECTION_AUTH = HelpSection("Authentication", 10, "")
SECTION_CREATING_PORTALS = HelpSection("Creating portals", 20, "")
SECTION_PORTAL_MANAGEMENT = HelpSection("Portal management", 30, "")
SECTION_MISC = HelpSection("Miscellaneous", 40, "")
SECTION_ADMIN = HelpSection("Administration", 50, "")
class CommandEvent(BaseCommandEvent):
sender: u.User
portal: po.Portal
def __init__(
self,
processor: CommandProcessor,
room_id: RoomID,
event_id: EventID,
sender: u.User,
command: str,
args: list[str],
content: MessageEventContent,
portal: po.Portal | None,
is_management: bool,
has_bridge_bot: bool,
) -> None:
super().__init__(
processor,
room_id,
event_id,
sender,
command,
args,
content,
portal,
is_management,
has_bridge_bot,
)
self.bridge = processor.bridge
self.tgbot = processor.tgbot
self.config = processor.config
self.public_website = processor.public_website
@property
def print_error_traceback(self) -> bool:
return self.sender.is_admin
async def get_help_key(self) -> HelpCacheKey:
return HelpCacheKey(
self.is_management,
self.portal is not None,
self.sender.puppet_whitelisted,
self.sender.matrix_puppet_whitelisted,
self.sender.is_admin,
await self.sender.is_logged_in(),
)
class CommandHandler(BaseCommandHandler):
name: str
needs_puppeting: bool
needs_matrix_puppeting: bool
def __init__(
self,
handler: Callable[[CommandEvent], Awaitable[EventID]],
management_only: bool,
name: str,
help_text: str,
help_args: str,
help_section: HelpSection,
needs_auth: bool,
needs_puppeting: bool,
needs_matrix_puppeting: bool,
needs_admin: bool,
**kwargs,
) -> None:
super().__init__(
handler,
management_only,
name,
help_text,
help_args,
help_section,
needs_auth=needs_auth,
needs_puppeting=needs_puppeting,
needs_matrix_puppeting=needs_matrix_puppeting,
needs_admin=needs_admin,
**kwargs,
)
async def get_permission_error(self, evt: CommandEvent) -> str | None:
if self.needs_puppeting and not evt.sender.puppet_whitelisted:
return "That command is limited to users with puppeting privileges."
elif self.needs_matrix_puppeting and not evt.sender.matrix_puppet_whitelisted:
return "That command is limited to users with full puppeting privileges."
return await super().get_permission_error(evt)
def has_permission(self, key: HelpCacheKey) -> bool:
return (
super().has_permission(key)
and (not self.needs_puppeting or key.puppet_whitelisted)
and (not self.needs_matrix_puppeting or key.matrix_puppet_whitelisted)
)
def command_handler(
_func: CommandHandlerFunc | None = None,
*,
needs_auth: bool = True,
needs_puppeting: bool = True,
needs_matrix_puppeting: bool = False,
needs_admin: bool = False,
management_only: bool = False,
name: str | None = None,
help_text: str = "",
help_args: str = "",
help_section: HelpSection = None,
) -> Callable[[CommandHandlerFunc], CommandHandler]:
return base_command_handler(
_func,
_handler_class=CommandHandler,
name=name,
help_text=help_text,
help_args=help_args,
help_section=help_section,
management_only=management_only,
needs_auth=needs_auth,
needs_admin=needs_admin,
needs_puppeting=needs_puppeting,
needs_matrix_puppeting=needs_matrix_puppeting,
)
class CommandProcessor(BaseCommandProcessor):
def __init__(self, bridge: "TelegramBridge") -> None:
super().__init__(event_class=CommandEvent, bridge=bridge)
self.tgbot = bridge.bot
self.public_website = bridge.public_website
@staticmethod
async def METHOD_NAME(
handler: Callable[[CommandEvent], Awaitable[Any]], evt: CommandEvent
) -> Any:
try:
return await handler(evt)
except FloodWaitError as e:
return await evt.reply(f"Flood error: Please wait {format_duration(e.seconds)}") |
select platforms | import Xlib.display
import glob
import warnings
import os
import ctypes.util
import xml.etree.ElementTree
class Request:
def __init__(self, system, width, height, x_display, headless):
self.system = system
self.width = width
self.height = height
self.x_display = x_display
self.headless = headless
class BasePlatform:
enabled = True
@classmethod
def validate(cls, r):
return []
@classmethod
def dependency_instructions(cls, request):
return None
@classmethod
def is_valid(cls, request):
return len(cls.validate(request)) == 0
@classmethod
def name(cls):
return cls.__name__
@classmethod
def launch_env(cls, width, height, x_display):
return {}
class BaseLinuxPlatform(BasePlatform):
@classmethod
def executable_path(cls, base_dir, name):
return os.path.join(base_dir, name)
@classmethod
def old_executable_path(cls, base_dir, name):
return cls.executable_path(base_dir, name)
class Linux64(BaseLinuxPlatform):
@classmethod
def dependency_instructions(cls, request):
message = "Linux64 requires a X11 server to be running with GLX. "
displays = cls._valid_x_displays(request.width, request.height)
if displays:
message += "The following valid displays were found %s" % (
", ".join(displays)
)
else:
message += "If you have a NVIDIA GPU, please run: sudo ai2thor-xorg start"
return message
@classmethod
def _select_x_display(cls, width, height):
valid_displays = cls._valid_x_displays(width, height)
if valid_displays:
return valid_displays[0]
else:
return None
@classmethod
def launch_env(cls, width, height, x_display):
env = dict(DISPLAY=x_display)
if env["DISPLAY"] is None:
env["DISPLAY"] = cls._select_x_display(width, height)
return env
@classmethod
def _validate_screen(cls, display_screen_str, width, height):
errors = []
try:
disp_screen = Xlib.display.Display(
display_screen_str
) # display_screen_str will have the format ":0.1"
screen_parts = display_screen_str.split(".")
if len(screen_parts) > 1:
# this Xlib.display will find a valid screen if an
# invalid screen was passed in (e.g. :0.9999999 -> :0.1)
if screen_parts[1] != str(disp_screen.get_default_screen()):
errors.append(
"Invalid display, non-existent screen: %s" % display_screen_str
)
if "GLX" not in disp_screen.list_extensions():
errors.append(
"Display %s does not have the GLX extension loaded. GLX is required by Unity3D."
% display_screen_str
)
if (
disp_screen.screen()["width_in_pixels"] < width
or disp_screen.screen()["height_in_pixels"] < height
):
errors.append(
"Display %s does not have a large enough resolution for the target resolution: %sx%s vs. %sx%s"
% (
display_screen_str,
width,
height,
disp_screen.screen()["width_in_pixels"],
disp_screen.screen()["height_in_pixels"],
)
)
if disp_screen.screen()["root_depth"] != 24:
errors.append(
"Display %s does not have a color depth of 24: %s"
% (display_screen_str, disp_screen.screen()["root_depth"])
)
except (Xlib.error.DisplayNameError, Xlib.error.DisplayConnectionError) as e:
errors.append(
"Invalid display: %s. Failed to connect %s " % (display_screen_str, e)
)
return errors
@classmethod
def _is_valid_screen(cls, display_screen_str, width, height):
return len(cls._validate_screen(display_screen_str, width, height)) == 0
@classmethod
def _valid_x_displays(cls, width, height):
open_display_strs = [
int(os.path.basename(s)[1:]) for s in glob.glob("/tmp/.X11-unix/X[0-9]*")
]
valid_displays = []
for display_str in open_display_strs:
try:
disp = Xlib.display.Display(":%s" % display_str)
for screen in range(0, disp.screen_count()):
disp_screen_str = ":%s.%s" % (display_str, screen)
if cls._is_valid_screen(disp_screen_str, width, height):
valid_displays.append(disp_screen_str)
except Xlib.error.DisplayConnectionError as e:
warnings.warn(
"could not connect to X Display: %s, %s" % (display_str, e)
)
return valid_displays
@classmethod
def validate(cls, request):
if request.headless:
return []
elif request.x_display:
return cls._validate_screen(
request.x_display, request.width, request.height
)
elif cls._select_x_display(request.width, request.height) is None:
return ["No valid X display found"]
else:
return []
class OSXIntel64(BasePlatform):
@classmethod
def old_executable_path(cls, base_dir, name):
return os.path.join(base_dir, name + ".app", "Contents/MacOS", name)
@classmethod
def executable_path(cls, base_dir, name):
plist = cls.parse_plist(base_dir, name)
return os.path.join(
base_dir, name + ".app", "Contents/MacOS", plist["CFBundleExecutable"]
)
@classmethod
def parse_plist(cls, base_dir, name):
plist_path = os.path.join(base_dir, name + ".app", "Contents/Info.plist")
with open(plist_path) as f:
plist = f.read()
root = xml.etree.ElementTree.fromstring(plist)
keys = [x.text for x in root.findall("dict/key")]
values = [x.text for x in root.findall("dict/string")]
return dict(zip(keys, values))
class CloudRendering(BaseLinuxPlatform):
enabled = True
@classmethod
def dependency_instructions(cls, request):
return "CloudRendering requires libvulkan1. Please install by running: sudo apt-get -y install libvulkan1"
@classmethod
def failure_message(cls):
pass
@classmethod
def validate(cls, request):
if ctypes.util.find_library("vulkan") is not None:
return []
else:
return ["Vulkan API driver missing."]
class WebGL(BasePlatform):
pass
class StandaloneWindows64(BasePlatform):
@classmethod
def executable_path(cls, base_dir, name):
return os.path.join(base_dir, name)
def METHOD_NAME(request):
candidates = []
system_platform_map = dict(Linux=(Linux64,), Darwin=(OSXIntel64,), Windows=(StandaloneWindows64,))
for p in system_platform_map.get(request.system, ()):
if not p.enabled:
continue
#
# if p == CloudRendering and request.x_display is not None:
# continue
candidates.append(p)
return candidates
STR_PLATFORM_MAP = dict(
CloudRendering=CloudRendering, Linux64=Linux64, OSXIntel64=OSXIntel64, WebGL=WebGL, StandaloneWindows64=StandaloneWindows64
) |
rsync | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import platform
import shutil
import subprocess
from pathlib import Path
from typing import List
import mozfile
from mozbuild.util import ensureParentDir
is_linux = platform.system() == "Linux"
is_osx = platform.system() == "Darwin"
def chmod(dir):
"Set permissions of DMG contents correctly"
subprocess.check_call(["chmod", "-R", "a+rX,a-st,u+w,go-w", dir])
def METHOD_NAME(source: Path, dest: Path):
"rsync the contents of directory source into directory dest"
# Ensure a trailing slash on directories so rsync copies the *contents* of source.
raw_source = str(source)
if source.is_dir():
raw_source = str(source) + "/"
subprocess.check_call(["rsync", "-a", "--copy-unsafe-links", raw_source, dest])
def set_folder_icon(dir: Path, tmpdir: Path, hfs_tool: Path = None):
"Set HFS attributes of dir to use a custom icon"
if is_linux:
hfs = tmpdir / "staged.hfs"
subprocess.check_call([hfs_tool, hfs, "attr", "/", "C"])
elif is_osx:
subprocess.check_call(["SetFile", "-a", "C", dir])
def generate_hfs_file(
stagedir: Path, tmpdir: Path, volume_name: str, mkfshfs_tool: Path
):
"""
When cross compiling, we zero fill an hfs file, that we will turn into
a DMG. To do so we test the size of the staged dir, and add some slight
padding to that.
"""
hfs = tmpdir / "staged.hfs"
output = subprocess.check_output(["du", "-s", stagedir])
size = int(output.split()[0]) / 1000 # Get in MB
size = int(size * 1.02) # Bump the used size slightly larger.
# Setup a proper file sized out with zero's
subprocess.check_call(
[
"dd",
"if=/dev/zero",
"of={}".format(hfs),
"bs=1M",
"count={}".format(size),
]
)
subprocess.check_call([mkfshfs_tool, "-v", volume_name, hfs])
def create_app_symlink(stagedir: Path, tmpdir: Path, hfs_tool: Path = None):
"""
Make a symlink to /Applications. The symlink name is a space
so we don't have to localize it. The Applications folder icon
will be shown in Finder, which should be clear enough for users.
"""
if is_linux:
hfs = os.path.join(tmpdir, "staged.hfs")
subprocess.check_call([hfs_tool, hfs, "symlink", "/ ", "/Applications"])
elif is_osx:
os.symlink("/Applications", stagedir / " ")
def create_dmg_from_staged(
stagedir: Path,
output_dmg: Path,
tmpdir: Path,
volume_name: str,
hfs_tool: Path = None,
dmg_tool: Path = None,
):
"Given a prepared directory stagedir, produce a DMG at output_dmg."
if is_linux:
# The dmg tool doesn't create the destination directories, and silently
# returns success if the parent directory doesn't exist.
ensureParentDir(output_dmg)
hfs = os.path.join(tmpdir, "staged.hfs")
subprocess.check_call([hfs_tool, hfs, "addall", stagedir])
subprocess.check_call(
[dmg_tool, "build", hfs, output_dmg],
# dmg is seriously chatty
stdout=subprocess.DEVNULL,
)
elif is_osx:
hybrid = tmpdir / "hybrid.dmg"
subprocess.check_call(
[
"hdiutil",
"makehybrid",
"-hfs",
"-hfs-volume-name",
volume_name,
"-hfs-openfolder",
stagedir,
"-ov",
stagedir,
"-o",
hybrid,
]
)
subprocess.check_call(
[
"hdiutil",
"convert",
"-format",
"UDBZ",
"-imagekey",
"bzip2-level=9",
"-ov",
hybrid,
"-o",
output_dmg,
]
)
def create_dmg(
source_directory: Path,
output_dmg: Path,
volume_name: str,
extra_files: List[tuple],
dmg_tool: Path,
hfs_tool: Path,
mkfshfs_tool: Path,
):
"""
Create a DMG disk image at the path output_dmg from source_directory.
Use volume_name as the disk image volume name, and
use extra_files as a list of tuples of (filename, relative path) to copy
into the disk image.
"""
if platform.system() not in ("Darwin", "Linux"):
raise Exception("Don't know how to build a DMG on '%s'" % platform.system())
with mozfile.TemporaryDirectory() as tmp:
tmpdir = Path(tmp)
stagedir = tmpdir / "stage"
stagedir.mkdir()
# Copy the app bundle over using rsync
METHOD_NAME(source_directory, stagedir)
# Copy extra files
for source, target in extra_files:
full_target = stagedir / target
full_target.parent.mkdir(parents=True, exist_ok=True)
shutil.copyfile(source, full_target)
if is_linux:
# Not needed in osx
generate_hfs_file(stagedir, tmpdir, volume_name, mkfshfs_tool)
create_app_symlink(stagedir, tmpdir, hfs_tool)
# Set the folder attributes to use a custom icon
set_folder_icon(stagedir, tmpdir, hfs_tool)
chmod(stagedir)
create_dmg_from_staged(
stagedir, output_dmg, tmpdir, volume_name, hfs_tool, dmg_tool
)
def extract_dmg_contents(
dmgfile: Path,
destdir: Path,
dmg_tool: Path = None,
hfs_tool: Path = None,
):
if is_linux:
with mozfile.TemporaryDirectory() as tmpdir:
hfs_file = os.path.join(tmpdir, "firefox.hfs")
subprocess.check_call(
[dmg_tool, "extract", dmgfile, hfs_file],
# dmg is seriously chatty
stdout=subprocess.DEVNULL,
)
subprocess.check_call([hfs_tool, hfs_file, "extractall", "/", destdir])
else:
# TODO: find better way to resolve topsrcdir (checkout directory)
topsrcdir = Path(__file__).parent.parent.parent.parent.resolve()
unpack_diskimage = topsrcdir / "build/package/mac_osx/unpack-diskimage"
unpack_mountpoint = Path("/tmp/app-unpack")
subprocess.check_call([unpack_diskimage, dmgfile, unpack_mountpoint, destdir])
def extract_dmg(
dmgfile: Path,
output: Path,
dmg_tool: Path = None,
hfs_tool: Path = None,
dsstore: Path = None,
icon: Path = None,
background: Path = None,
):
if platform.system() not in ("Darwin", "Linux"):
raise Exception("Don't know how to extract a DMG on '%s'" % platform.system())
with mozfile.TemporaryDirectory() as tmp:
tmpdir = Path(tmp)
extract_dmg_contents(dmgfile, tmpdir, dmg_tool, hfs_tool)
applications_symlink = tmpdir / " "
if applications_symlink.is_symlink():
# Rsync will fail on the presence of this symlink
applications_symlink.unlink()
METHOD_NAME(tmpdir, output)
if dsstore:
dsstore.parent.mkdir(parents=True, exist_ok=True)
METHOD_NAME(tmpdir / ".DS_Store", dsstore)
if background:
background.parent.mkdir(parents=True, exist_ok=True)
METHOD_NAME(tmpdir / ".background" / background.name, background)
if icon:
icon.parent.mkdir(parents=True, exist_ok=True)
METHOD_NAME(tmpdir / ".VolumeIcon.icns", icon) |
test client side filters simple | from collections import OrderedDict
from viadot.sources import SAPRFC, SAPRFCV2
sap = SAPRFC()
sap2 = SAPRFCV2()
sql1 = "SELECT a AS a_renamed, b FROM table1 WHERE table1.c = 1"
sql2 = "SELECT a FROM fake_schema.fake_table WHERE a=1 AND b=2 OR c LIKE 'a%' AND d IN (1, 2) LIMIT 5 OFFSET 3"
sql3 = "SELECT b FROM c WHERE testORword=1 AND testANDword=2 AND testLIMITword=3 AND testOFFSETword=4"
sql4 = "SELECT c FROM d WHERE testLIMIT = 1 AND testOFFSET = 2 AND LIMITtest=3 AND OFFSETtest=4"
sql5 = sql3 + " AND longword123=5"
sql6 = "SELECT a FROM fake_schema.fake_table WHERE a=1 AND b=2 OR c LIKE 'a%' AND d IN (1, 2) AND longcolname=3 AND otherlongcolname=5 LIMIT 5 OFFSET 3"
sql7 = """
SELECT a, b
FROM b
WHERE c = 1
AND d = 2
AND longcolname = 12345
AND otherlongcolname = 6789
AND thirdlongcolname = 01234
LIMIT 5
OFFSET 10
"""
def test__get_table_name():
assert sap._get_table_name(sql1) == "table1"
assert sap._get_table_name(sql2) == "fake_schema.fake_table", sap._get_table_name(
sql2
)
assert sap._get_table_name(sql7) == "b"
def test__get_columns():
assert sap._get_columns(sql1) == ["a", "b"]
assert sap._get_columns(sql1, aliased=True) == ["a_renamed", "b"], sap._get_columns(
sql1, aliased=True
)
assert sap._get_columns(sql2) == ["a"]
assert sap._get_columns(sql7) == ["a", "b"]
def test__get_where_condition():
assert sap._get_where_condition(sql1) == "table1.c = 1", sap._get_where_condition(
sql1
)
assert (
sap._get_where_condition(sql2) == "a=1 AND b=2 OR c LIKE 'a%' AND d IN (1, 2)"
), sap._get_where_condition(sql2)
assert (
sap._get_where_condition(sql3)
== "testORword=1 AND testANDword=2 AND testLIMITword=3 AND testOFFSETword=4"
), sap._get_where_condition(sql3)
assert (
sap._get_where_condition(sql4)
== "testLIMIT = 1 AND testOFFSET = 2 AND LIMITtest=3 AND OFFSETtest=4"
), sap._get_where_condition(sql4)
assert (
sap._get_where_condition(sql7)
== "c = 1 AND d = 2 AND longcolname = 12345 AND otherlongcolname = 6789"
), sap._get_where_condition(sql7)
def test__get_limit():
assert sap._get_limit(sql1) is None
assert sap._get_limit(sql2) == 5
assert sap._get_limit(sql7) == 5
def test__get_offset():
assert sap._get_offset(sql1) is None
assert sap._get_offset(sql2) == 3
assert sap._get_offset(sql7) == 10
def METHOD_NAME():
_ = sap._get_where_condition(sql5)
assert sap.client_side_filters == OrderedDict(
{"AND": "longword123=5"}
), sap.client_side_filters
def test_client_side_filters_with_limit_offset():
_ = sap._get_where_condition(sql6)
assert sap.client_side_filters == OrderedDict(
{"AND": "otherlongcolname=5"}
), sap.client_side_filters
_ = sap._get_where_condition(sql7)
assert sap.client_side_filters == OrderedDict(
{"AND": "thirdlongcolname = 01234"}
), sap.client_side_filters
def test___build_pandas_filter_query():
_ = sap._get_where_condition(sql6)
assert (
sap._build_pandas_filter_query(sap.client_side_filters)
== "otherlongcolname == 5"
), sap._build_pandas_filter_query(sap.client_side_filters)
_ = sap._get_where_condition(sql7)
assert (
sap._build_pandas_filter_query(sap.client_side_filters)
== "thirdlongcolname == 01234"
), sap._build_pandas_filter_query(sap.client_side_filters)
def test__get_table_name_v2():
assert sap2._get_table_name(sql1) == "table1"
assert sap2._get_table_name(sql2) == "fake_schema.fake_table", sap2._get_table_name(
sql2
)
assert sap2._get_table_name(sql7) == "b"
def test__get_columns_v2():
assert sap2._get_columns(sql1) == ["a", "b"]
assert sap2._get_columns(sql1, aliased=True) == [
"a_renamed",
"b",
], sap2._get_columns(sql1, aliased=True)
assert sap2._get_columns(sql2) == ["a"]
assert sap2._get_columns(sql7) == ["a", "b"]
def test__get_where_condition_v2():
assert sap2._get_where_condition(sql1) == "table1.c = 1", sap2._get_where_condition(
sql1
)
assert (
sap2._get_where_condition(sql2) == "a=1 AND b=2 OR c LIKE 'a%' AND d IN (1, 2)"
), sap2._get_where_condition(sql2)
assert (
sap2._get_where_condition(sql3)
== "testORword=1 AND testANDword=2 AND testLIMITword=3 AND testOFFSETword=4"
), sap2._get_where_condition(sql3)
assert (
sap2._get_where_condition(sql4)
== "testLIMIT = 1 AND testOFFSET = 2 AND LIMITtest=3 AND OFFSETtest=4"
), sap2._get_where_condition(sql4)
assert (
sap2._get_where_condition(sql7)
== "c = 1 AND d = 2 AND longcolname = 12345 AND otherlongcolname = 6789"
), sap2._get_where_condition(sql7)
def test__get_limit_v2():
assert sap2._get_limit(sql1) is None
assert sap2._get_limit(sql2) == 5
assert sap2._get_limit(sql7) == 5
def test__get_offset_v2():
assert sap2._get_offset(sql1) is None
assert sap2._get_offset(sql2) == 3
assert sap2._get_offset(sql7) == 10
def test_client_side_filters_simple_v2():
_ = sap2._get_where_condition(sql5)
assert sap2.client_side_filters == OrderedDict(
{"AND": "longword123=5"}
), sap2.client_side_filters
def test_client_side_filters_with_limit_offset_v2():
_ = sap2._get_where_condition(sql6)
assert sap2.client_side_filters == OrderedDict(
{"AND": "otherlongcolname=5"}
), sap2.client_side_filters
_ = sap2._get_where_condition(sql7)
assert sap2.client_side_filters == OrderedDict(
{"AND": "thirdlongcolname = 01234"}
), sap2.client_side_filters
def test___build_pandas_filter_query_v2():
_ = sap2._get_where_condition(sql6)
assert (
sap2._build_pandas_filter_query(sap2.client_side_filters)
== "otherlongcolname == 5"
), sap2._build_pandas_filter_query(sap2.client_side_filters)
_ = sap2._get_where_condition(sql7)
assert (
sap2._build_pandas_filter_query(sap2.client_side_filters)
== "thirdlongcolname == 01234"
), sap2._build_pandas_filter_query(sap2.client_side_filters) |
location | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetIotDpsResourceResult',
'AwaitableGetIotDpsResourceResult',
'get_iot_dps_resource',
'get_iot_dps_resource_output',
]
@pulumi.output_type
class GetIotDpsResourceResult:
"""
The description of the provisioning service.
"""
def __init__(__self__, etag=None, id=None, METHOD_NAME=None, name=None, properties=None, resourcegroup=None, sku=None, subscriptionid=None, system_data=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if resourcegroup and not isinstance(resourcegroup, str):
raise TypeError("Expected argument 'resourcegroup' to be a str")
pulumi.set(__self__, "resourcegroup", resourcegroup)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if subscriptionid and not isinstance(subscriptionid, str):
raise TypeError("Expected argument 'subscriptionid' to be a str")
pulumi.set(__self__, "subscriptionid", subscriptionid)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
The Etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal ETag convention.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.IotDpsPropertiesDescriptionResponse':
"""
Service specific properties for a provisioning service
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def resourcegroup(self) -> Optional[str]:
"""
The resource group of the resource.
"""
return pulumi.get(self, "resourcegroup")
@property
@pulumi.getter
def sku(self) -> 'outputs.IotDpsSkuInfoResponse':
"""
Sku info for a provisioning Service.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def subscriptionid(self) -> Optional[str]:
"""
The subscription id of the resource.
"""
return pulumi.get(self, "subscriptionid")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetIotDpsResourceResult(GetIotDpsResourceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetIotDpsResourceResult(
etag=self.etag,
id=self.id,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
properties=self.properties,
resourcegroup=self.resourcegroup,
sku=self.sku,
subscriptionid=self.subscriptionid,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_iot_dps_resource(provisioning_service_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetIotDpsResourceResult:
"""
Get the metadata of the provisioning service without SAS keys.
Azure REST API version: 2022-12-12.
:param str provisioning_service_name: Name of the provisioning service to retrieve.
:param str resource_group_name: Resource group name.
"""
__args__ = dict()
__args__['provisioningServiceName'] = provisioning_service_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:devices:getIotDpsResource', __args__, opts=opts, typ=GetIotDpsResourceResult).value
return AwaitableGetIotDpsResourceResult(
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
METHOD_NAME=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
resourcegroup=pulumi.get(__ret__, 'resourcegroup'),
sku=pulumi.get(__ret__, 'sku'),
subscriptionid=pulumi.get(__ret__, 'subscriptionid'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_iot_dps_resource)
def get_iot_dps_resource_output(provisioning_service_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetIotDpsResourceResult]:
"""
Get the metadata of the provisioning service without SAS keys.
Azure REST API version: 2022-12-12.
:param str provisioning_service_name: Name of the provisioning service to retrieve.
:param str resource_group_name: Resource group name.
"""
... |
strict app | import logging
import pytest
from starlette.types import Receive, Scope, Send
from conftest import FIXTURES_FOLDER, OPENAPI3_SPEC, build_app_from_fixture
@pytest.fixture(scope="session")
def simple_app(spec, app_class):
return build_app_from_fixture(
"simple", app_class=app_class, spec_file=spec, validate_responses=True
)
@pytest.fixture(scope="session")
def simple_openapi_app(app_class):
return build_app_from_fixture(
"simple", app_class=app_class, spec_file=OPENAPI3_SPEC, validate_responses=True
)
@pytest.fixture(scope="session")
def reverse_proxied_app(spec, app_class):
class ReverseProxied:
def __init__(self, app, root_path=None, scheme=None, server=None):
self.app = app
self.root_path = root_path
self.scheme = scheme
self.server = server
async def __call__(self, scope: Scope, receive: Receive, send: Send):
logging.warning(
"this demo is not secure by default!! "
"You'll want to make sure these headers are coming from your proxy, "
"and not directly from users on the web!"
)
root_path = scope.get("root_path") or self.root_path
for header, value in scope.get("headers", []):
if header == b"x-forwarded-path":
root_path = value.decode()
break
if root_path:
scope["root_path"] = "/" + root_path.strip("/")
path_info = scope.get("PATH_INFO", scope.get("path"))
if path_info.startswith(root_path):
scope["PATH_INFO"] = path_info[len(root_path) :]
scope["scheme"] = scope.get("scheme") or self.scheme
scope["server"] = scope.get("server") or (self.server, None)
return await self.app(scope, receive, send)
app = build_app_from_fixture(
"simple", app_class=app_class, spec_file=spec, validate_responses=True
)
app.middleware = ReverseProxied(app.middleware, root_path="/reverse_proxied/")
return app
@pytest.fixture(scope="session")
def snake_case_app(spec, app_class):
return build_app_from_fixture(
"snake_case",
app_class=app_class,
spec_file=spec,
validate_responses=True,
pythonic_params=True,
)
@pytest.fixture(scope="session")
def invalid_resp_allowed_app(spec, app_class):
return build_app_from_fixture(
"simple", app_class=app_class, spec_file=spec, validate_responses=False
)
@pytest.fixture(scope="session")
def METHOD_NAME(spec, app_class):
return build_app_from_fixture(
"simple",
app_class=app_class,
spec_file=spec,
validate_responses=True,
strict_validation=True,
)
@pytest.fixture(scope="session")
def problem_app(spec, app_class):
return build_app_from_fixture(
"problem", app_class=app_class, spec_file=spec, validate_responses=True
)
@pytest.fixture(scope="session")
def schema_app(spec, app_class):
return build_app_from_fixture(
"different_schemas",
app_class=app_class,
spec_file=spec,
validate_responses=True,
)
@pytest.fixture(scope="session")
def secure_endpoint_app(spec, app_class):
return build_app_from_fixture(
"secure_endpoint",
app_class=app_class,
spec_file=spec,
validate_responses=True,
)
@pytest.fixture(scope="session")
def secure_endpoint_strict_app(spec, app_class):
return build_app_from_fixture(
"secure_endpoint",
app_class=app_class,
spec_file=spec,
validate_responses=True,
strict_validation=True,
)
@pytest.fixture(scope="session")
def secure_api_app(spec, app_class):
options = {"swagger_ui": False}
return build_app_from_fixture(
"secure_api",
app_class=app_class,
spec_file=spec,
options=options,
auth_all_paths=True,
)
@pytest.fixture(scope="session")
def unordered_definition_app(spec, app_class):
return build_app_from_fixture(
"unordered_definition", app_class=app_class, spec_file=spec
)
@pytest.fixture(scope="session")
def bad_operations_app(spec, app_class):
return build_app_from_fixture(
"bad_operations", app_class=app_class, spec_file=spec, resolver_error=501
) |
process | def METHOD_NAME(client, edit, invitation):
journal = openreview.journal.Journal()
venue_id = journal.venue_id
decision_approval = client.get_note(edit.note.id)
decision = client.get_note(edit.note.replyto)
## On update or delete return
if decision_approval.tcdate != decision_approval.tmdate:
return
submission = client.get_note(decision.forum)
## Make the decision public
print('Make decision public')
journal.invitation_builder.set_note_decision_release_invitation(submission)
print('Check rejection')
print(decision.content)
if decision.content['recommendation']['value'] == 'Reject':
## Post a reject edit
client.post_note_edit(invitation=journal.get_rejected_id(),
signatures=[venue_id],
note=openreview.api.Note(
id=submission.id,
content={
'_bibtex': {
'value': journal.get_bibtex(submission, journal.rejected_venue_id, anonymous=True)
}
}
)
)
return
## Enable Camera Ready Revision
print('Enable Camera Ready Revision')
journal.invitation_builder.set_note_camera_ready_revision_invitation(submission, journal.get_due_date(weeks = journal.get_camera_ready_period_length()))
## Expire reviewer tasks
print('Expire reviewer tasks')
journal.invitation_builder.expire_invitation(journal.get_review_id(submission.number))
journal.invitation_builder.expire_invitation(journal.get_reviewer_recommendation_id(submission.number))
## Send email to authors
print('Send email to authors')
if decision.content['recommendation']['value'] == 'Accept as is':
client.post_message(
recipients=[journal.get_authors_id(number=submission.number)],
subject=f'''[{journal.short_name}] Decision for your {journal.short_name} submission {submission.number}: {submission.content['title']['value']}''',
message=f'''Hi {{{{fullname}}}},
We are happy to inform you that, based on the evaluation of the reviewers and the recommendation of the assigned Action Editor, your {journal.short_name} submission "{submission.number}: {submission.content['title']['value']}" is accepted as is.
To know more about the decision and submit the deanonymized camera ready version of your manuscript, please follow this link and click on button "Camera Ready Revision": https://openreview.net/forum?id={submission.id}
In addition to your final manuscript, we strongly encourage you to submit a link to 1) code associated with your and 2) a short video presentation of your work. You can provide these links to the corresponding entries on the revision page.
For more details and guidelines on the {journal.short_name} review process, visit {journal.website}.
We thank you for your contribution to {journal.short_name} and congratulate you for your successful submission!
The {journal.short_name} Editors-in-Chief
''',
replyTo=journal.contact_info
)
return
if decision.content['recommendation']['value'] == 'Accept with minor revision':
client.post_message(
recipients=[journal.get_authors_id(number=submission.number)],
subject=f'''[{journal.short_name}] Decision for your {journal.short_name} submission {submission.number}: {submission.content['title']['value']}''',
message=f'''Hi {{{{fullname}}}},
We are happy to inform you that, based on the evaluation of the reviewers and the recommendation of the assigned Action Editor, your {journal.short_name} submission "{submission.number}: {submission.content['title']['value']}" is accepted with minor revision.
To know more about the decision and submit the deanonymized camera ready version of your manuscript, please follow this link and click on button "Camera Ready Revision": https://openreview.net/forum?id={submission.id}
The Action Editor responsible for your submission will have provided a description of the revision expected for accepting your final manuscript.
In addition to your final manuscript, we strongly encourage you to submit a link to 1) code associated with your and 2) a short video presentation of your work. You can provide these links to the corresponding entries on the revision page.
For more details and guidelines on the {journal.short_name} review process, visit {journal.website}.
We thank you for your contribution to {journal.short_name} and congratulate you for your successful submission!
The {journal.short_name} Editors-in-Chief
''',
replyTo=journal.contact_info
) |
centroid x | import math
class Vector:
def __init__(self, x, y):
self.x = x
self.y = y
def __iter__(self):
return iter((self.x, self.y))
def __getitem__(self, key):
return (self.x, self.y)[key]
def __eq__(self, other):
return tuple(self) == tuple(other)
def __repr__(self):
return "Vector(x: %d, y: %d)" % (self.x, self.y)
class Rect:
def __init__(self, left, top, right, bottom):
self.left = left
self.top = top
self.right = right
self.bottom = bottom
def _get_size(self):
return Vector(self.right - self.left, self.bottom - self.top)
def _set_size(self, new_size):
centroid = self.centroid
self.left = centroid[0] - new_size[0] / 2
self.right = centroid[0] + new_size[0] / 2
self.top = centroid[1] - new_size[1] / 2
self.bottom = centroid[1] + new_size[1] / 2
size = property(_get_size, _set_size)
@property
def width(self):
return self.size.x
@property
def height(self):
return self.size.y
def _get_centroid(self):
return Vector((self.left + self.right) / 2, (self.top + self.bottom) / 2)
def _set_centroid(self, new_centroid):
size = self.size
self.left = new_centroid[0] - size[0] / 2
self.right = new_centroid[0] + size[0] / 2
self.top = new_centroid[1] - size[1] / 2
self.bottom = new_centroid[1] + size[1] / 2
centroid = property(_get_centroid, _set_centroid)
@property
def x(self):
return self.centroid.x
@property
def y(self):
return self.centroid.y
@property
def METHOD_NAME(self):
# Included for backwards compatibility
return self.centroid.x
@property
def centroid_y(self):
# Included for backwards compatibility
return self.centroid.y
def as_tuple(self):
# No longer needed, this class should behave like a tuple
# Included for backwards compatibility
return self.left, self.top, self.right, self.bottom
def clone(self):
return type(self)(self.left, self.top, self.right, self.bottom)
def round(self):
"""
Returns a new rect with all attributes rounded to integers
"""
clone = self.clone()
# Round down left and top
clone.left = int(math.floor(clone.left))
clone.top = int(math.floor(clone.top))
# Round up right and bottom
clone.right = int(math.ceil(clone.right))
clone.bottom = int(math.ceil(clone.bottom))
return clone
def move_to_clamp(self, other):
"""
Moves this rect so it is completely covered by the rect in "other" and
returns a new Rect instance.
"""
other = Rect(*other)
clone = self.clone()
if clone.left < other.left:
clone.right -= clone.left - other.left
clone.left = other.left
if clone.top < other.top:
clone.bottom -= clone.top - other.top
clone.top = other.top
if clone.right > other.right:
clone.left -= clone.right - other.right
clone.right = other.right
if clone.bottom > other.bottom:
clone.top -= clone.bottom - other.bottom
clone.bottom = other.bottom
return clone
def move_to_cover(self, other):
"""
Moves this rect so it completely covers the rect specified in the
"other" parameter and returns a new Rect instance.
"""
other = Rect(*other)
clone = self.clone()
if clone.left > other.left:
clone.right -= clone.left - other.left
clone.left = other.left
if clone.top > other.top:
clone.bottom -= clone.top - other.top
clone.top = other.top
if clone.right < other.right:
clone.left += other.right - clone.right
clone.right = other.right
if clone.bottom < other.bottom:
clone.top += other.bottom - clone.bottom
clone.bottom = other.bottom
return clone
def transform(self, transform):
# Transform each corner of the rect
tl_transformed = transform.transform_vector(Vector(self.left, self.top))
tr_transformed = transform.transform_vector(Vector(self.right, self.top))
bl_transformed = transform.transform_vector(Vector(self.left, self.bottom))
br_transformed = transform.transform_vector(Vector(self.right, self.bottom))
# Find extents of the transformed corners
left = min(
[tl_transformed.x, tr_transformed.x, bl_transformed.x, br_transformed.x]
)
right = max(
[tl_transformed.x, tr_transformed.x, bl_transformed.x, br_transformed.x]
)
top = min(
[tl_transformed.y, tr_transformed.y, bl_transformed.y, br_transformed.y]
)
bottom = max(
[tl_transformed.y, tr_transformed.y, bl_transformed.y, br_transformed.y]
)
return Rect(left, top, right, bottom)
def __iter__(self):
return iter((self.left, self.top, self.right, self.bottom))
def __getitem__(self, key):
return (self.left, self.top, self.right, self.bottom)[key]
def __eq__(self, other):
return tuple(self) == tuple(other)
def __repr__(self):
return "Rect(left: %d, top: %d, right: %d, bottom: %d)" % (
self.left,
self.top,
self.right,
self.bottom,
)
@classmethod
def from_point(cls, x, y, width, height):
return cls(
x - width / 2,
y - height / 2,
x + width / 2,
y + height / 2,
) |
call | from __future__ import absolute_import, division, print_function, unicode_literals
import os
from tempfile import gettempdir
import tensorflow as tf
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
from clearml import Task
# Connecting ClearML with the current process,
# from here on everything is logged automatically
task = Task.init(project_name='examples', task_name='TensorFlow v2 MNIST with summaries')
# Load and prepare the MNIST dataset.
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Add a channels dimension
x_train = x_train[..., tf.newaxis].astype('float32')
x_test = x_test[..., tf.newaxis].astype('float32')
# Use tf.data to batch and shuffle the dataset
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(10000).batch(32)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(32)
# Build the tf.keras model using the Keras model subclassing API
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation='relu', dtype=tf.float32)
self.flatten = Flatten()
self.d1 = Dense(128, activation='relu', dtype=tf.float32)
self.d2 = Dense(10, activation='softmax', dtype=tf.float32)
def METHOD_NAME(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
# Create an instance of the model
model = MyModel()
# Choose an optimizer and loss function for training
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
# Select metrics to measure the loss and the accuracy of the model.
# These metrics accumulate the values over epochs and then print the overall result.
train_loss = tf.keras.metrics.Mean(name='train_loss', dtype=tf.float32)
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss', dtype=tf.float32)
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
# Use tf.GradientTape to train the model
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(labels, predictions)
# Test the model
@tf.function
def test_step(images, labels):
predictions = model(images)
t_loss = loss_object(labels, predictions)
test_loss(t_loss)
test_accuracy(labels, predictions)
# Set up summary writers to write the summaries to disk in a different logs directory
train_log_dir = os.path.join(gettempdir(), 'logs', 'gradient_tape', 'train')
test_log_dir = os.path.join(gettempdir(), 'logs', 'gradient_tape', 'test')
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
# Set up checkpoints manager
ckpt = tf.train.Checkpoint(step=tf.Variable(1), optimizer=optimizer, net=model)
manager = tf.train.CheckpointManager(ckpt, os.path.join(gettempdir(), 'tf_ckpts'), max_to_keep=3)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
else:
print("Initializing from scratch.")
# Start training
EPOCHS = 5
for epoch in range(EPOCHS):
for images, labels in train_ds:
train_step(images, labels)
with train_summary_writer.as_default():
tf.summary.scalar('loss', train_loss.result(), step=epoch)
tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)
ckpt.step.assign_add(1)
if int(ckpt.step) % 1 == 0:
save_path = manager.save()
print("Saved checkpoint for step {}: {}".format(int(ckpt.step), save_path))
for test_images, test_labels in test_ds:
test_step(test_images, test_labels)
with test_summary_writer.as_default():
tf.summary.scalar('loss', test_loss.result(), step=epoch)
tf.summary.scalar('accuracy', test_accuracy.result(), step=epoch)
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch+1,
train_loss.result(),
train_accuracy.result()*100,
test_loss.result(),
test_accuracy.result()*100))
# Reset the metrics for the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states() |
formula definition | import os
import types
import pytest
import salt.spm
import salt.utils.files
from tests.support.mock import patch
@pytest.fixture
def METHOD_NAME():
return {
"name": "formula1",
"version": "1.2",
"release": "2",
"summary": "test",
"description": "testing, nothing to see here",
}
@pytest.fixture
def formula_contents(METHOD_NAME):
return (
(
"FORMULA",
(
"name: {name}\n"
"version: {version}\n"
"release: {release}\n"
"summary: {summary}\n"
"description: {description}"
).format(**METHOD_NAME),
),
("modules/mod1.py", "# mod1.py"),
("modules/mod2.py", "# mod2.py"),
("states/state1.sls", "# state1.sls"),
("states/state2.sls", "# state2.sls"),
)
@pytest.fixture
def formula(METHOD_NAME, formula_contents):
return types.SimpleNamespace(
definition=METHOD_NAME, contents=formula_contents
)
class SPMTestUserInterface(salt.spm.SPMUserInterface):
"""
Unit test user interface to SPMClient
"""
def __init__(self):
self._status = []
self._confirm = []
self._error = []
def status(self, msg):
self._status.append(msg)
def confirm(self, action):
self._confirm.append(action)
def error(self, msg):
self._error.append(msg)
@pytest.fixture
def minion_config(tmp_path, minion_opts):
_minion_config = minion_opts.copy()
_minion_config.update(
{
"spm_logfile": str(tmp_path / "log"),
"spm_repos_config": str(tmp_path / "etc" / "spm.repos"),
"spm_cache_dir": str(tmp_path / "cache"),
"spm_build_dir": str(tmp_path / "build"),
"spm_build_exclude": [".git"],
"spm_db_provider": "sqlite3",
"spm_files_provider": "local",
"spm_db": str(tmp_path / "packages.db"),
"extension_modules": str(tmp_path / "modules"),
"file_roots": {"base": [str(tmp_path)]},
"formula_path": str(tmp_path / "spm"),
"pillar_path": str(tmp_path / "pillar"),
"reactor_path": str(tmp_path / "reactor"),
"assume_yes": True,
"root_dir": str(tmp_path),
"force": False,
"verbose": False,
"cache": "localfs",
"cachedir": str(tmp_path / "cache"),
"spm_repo_dups": "ignore",
"spm_share_dir": str(tmp_path / "share"),
}
)
return _minion_config
@pytest.fixture
def client(minion_config):
with patch("salt.client.Caller", return_value=minion_config):
with patch(
"salt.client.get_local_client", return_value=minion_config["conf_file"]
):
yield salt.spm.SPMClient(SPMTestUserInterface(), minion_config)
@pytest.fixture
def formulas_dir(formula, tmp_path):
fdir = tmp_path / formula.definition["name"]
fdir.mkdir()
for path, contents in formula.contents:
path = fdir / path
dirname, _ = os.path.split(str(path))
if not os.path.exists(dirname):
os.makedirs(dirname)
path.write_text(contents)
return str(fdir)
def test_build_install(client, formulas_dir, minion_config, formula):
# Build package
client.run(["build", formulas_dir])
pkgpath = client.ui._status[-1].split()[-1]
assert os.path.exists(pkgpath)
# Install package
client.run(["local", "install", pkgpath])
# Check filesystem
for path, contents in formula.contents:
path = os.path.join(
minion_config["file_roots"]["base"][0],
formula.definition["name"],
path,
)
assert os.path.exists(path)
with salt.utils.files.fopen(path, "r") as rfh:
assert rfh.read().replace("\r\n", "\n") == contents
# Check database
client.run(["info", formula.definition["name"]])
lines = client.ui._status[-1].replace("\r\n", "\n").split("\n")
for key, line in (
("name", "Name: {}"),
("version", "Version: {}"),
("release", "Release: {}"),
("summary", "Summary: {}"),
):
assert line.format(formula.definition[key]) in lines
# Reinstall with force=False, should fail
client.ui._error = []
client.run(["local", "install", pkgpath])
assert len(client.ui._error) > 0
# Reinstall with force=True, should succeed
with patch.dict(minion_config, {"force": True}):
client.ui._error = []
client.run(["local", "install", pkgpath])
assert len(client.ui._error) == 0
def test_repo_paths(client, formulas_dir):
client.run(["create_repo", formulas_dir])
assert len(client.ui._error) == 0
@pytest.mark.parametrize(
"fail_args",
(
["bogus", "command"],
["create_repo"],
["build"],
["build", "/nonexistent/path"],
["info"],
["info", "not_installed"],
["files"],
["files", "not_installed"],
["install"],
["install", "nonexistent.spm"],
["remove"],
["remove", "not_installed"],
["local", "bogus", "command"],
["local", "info"],
["local", "info", "/nonexistent/path/junk.spm"],
["local", "files"],
["local", "files", "/nonexistent/path/junk.spm"],
["local", "install"],
["local", "install", "/nonexistent/path/junk.spm"],
["local", "list"],
["local", "list", "/nonexistent/path/junk.spm"],
# XXX install failure due to missing deps
# XXX install failure due to missing field
),
)
def test_failure_paths(client, fail_args):
client.run(fail_args)
assert len(client.ui._error) > 0 |
far exist |
import os
import string
from pathlib import Path
from typing import Dict
import pynini
from pynini import Far
from pynini.export import export
from pynini.lib import byte, pynutil, utf8
DAMO_CHAR = utf8.VALID_UTF8_CHAR
DAMO_DIGIT = byte.DIGIT
DAMO_LOWER = pynini.union(*string.ascii_lowercase).optimize()
DAMO_UPPER = pynini.union(*string.ascii_uppercase).optimize()
DAMO_ALPHA = pynini.union(DAMO_LOWER, DAMO_UPPER).optimize()
DAMO_ALNUM = pynini.union(DAMO_DIGIT, DAMO_ALPHA).optimize()
DAMO_HEX = pynini.union(*string.hexdigits).optimize()
DAMO_NON_BREAKING_SPACE = "\u00A0"
DAMO_SPACE = " "
DAMO_WHITE_SPACE = pynini.union(" ", "\t", "\n", "\r", "\u00A0").optimize()
DAMO_NOT_SPACE = pynini.difference(DAMO_CHAR, DAMO_WHITE_SPACE).optimize()
DAMO_NOT_QUOTE = pynini.difference(DAMO_CHAR, r'"').optimize()
DAMO_PUNCT = pynini.union(*map(pynini.escape, string.punctuation)).optimize()
DAMO_GRAPH = pynini.union(DAMO_ALNUM, DAMO_PUNCT).optimize()
DAMO_SIGMA = pynini.closure(DAMO_CHAR)
delete_space = pynutil.delete(pynini.closure(DAMO_WHITE_SPACE))
insert_space = pynutil.insert(" ")
delete_extra_space = pynini.cross(pynini.closure(DAMO_WHITE_SPACE, 1), " ")
# French frequently compounds numbers with hyphen.
delete_hyphen = pynutil.delete(pynini.closure("-", 0, 1))
insert_hyphen = pynutil.insert("-")
TO_LOWER = pynini.union(*[pynini.cross(x, y) for x, y in zip(string.ascii_uppercase, string.ascii_lowercase)])
TO_UPPER = pynini.invert(TO_LOWER)
def generator_main(file_name: str, graphs: Dict[str, pynini.FstLike]):
"""
Exports graph as OpenFst finite state archive (FAR) file with given file name and rule name.
Args:
file_name: exported file name
graphs: Mapping of a rule name and Pynini WFST graph to be exported
"""
exporter = export.Exporter(file_name)
for rule, graph in graphs.items():
exporter[rule] = graph.optimize()
exporter.close()
print(f"Created {file_name}")
def convert_space(fst) -> "pynini.FstLike":
"""
Converts space to nonbreaking space.
Used only in tagger grammars for transducing token values within quotes, e.g. name: "hello kitty"
This is making transducer significantly slower, so only use when there could be potential spaces within quotes, otherwise leave it.
Args:
fst: input fst
Returns output fst where breaking spaces are converted to non breaking spaces
"""
return fst @ pynini.cdrewrite(pynini.cross(DAMO_SPACE, DAMO_NON_BREAKING_SPACE), "", "", DAMO_SIGMA)
class GraphFst:
"""
Base class for all grammar fsts.
Args:
name: name of grammar class
kind: either 'classify' or 'verbalize'
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, name: str, kind: str, deterministic: bool = True):
self.name = name
self.kind = kind
self._fst = None
self.deterministic = deterministic
self.far_path = Path(os.path.dirname(__file__) + "/grammars/" + kind + "/" + name + ".far")
if self.METHOD_NAME():
self._fst = Far(self.far_path, mode="r", arc_type="standard", far_type="default").get_fst()
def METHOD_NAME(self) -> bool:
"""
Returns true if FAR can be loaded
"""
return self.far_path.exists()
@property
def fst(self) -> "pynini.FstLike":
return self._fst
@fst.setter
def fst(self, fst):
self._fst = fst
def add_tokens(self, fst) -> "pynini.FstLike":
"""
Wraps class name around to given fst
Args:
fst: input fst
Returns:
Fst: fst
"""
return pynutil.insert(f"{self.name} {{ ") + fst + pynutil.insert(" }")
def delete_tokens(self, fst) -> "pynini.FstLike":
"""
Deletes class name wrap around output of given fst
Args:
fst: input fst
Returns:
Fst: fst
"""
res = (
pynutil.delete(f"{self.name}")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ fst
+ delete_space
+ pynutil.delete("}")
)
return res @ pynini.cdrewrite(pynini.cross("\u00A0", " "), "", "", DAMO_SIGMA) |