id
int64 0
6k
| code
stringlengths 4k
8k
| code_compressed
null |
---|---|---|
300 | # Copyright (C) Jan 2020 Mellanox Technologies Ltd. All rights reserved.
# Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# This software is available to you under a choice of one of two
# licenses. You may choose to be licensed under the terms of the GNU
# General Public License (GPL) Version 2, available from the file
# COPYING in the main directory of this source tree, or the
# OpenIB.org BSD license below:
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# - Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# - Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --
#######################################################
#
# SegmentCreator.py
# Python implementation of the Class SegmentCreator
# Generated by Enterprise Architect
# Created on: 14-Aug-2019 10:12:03 AM
# Original author: talve
#
#######################################################
from segments.SegmentFactory import SegmentFactory
from utils import constants as cs
class SegmentCreator:
"""this class is responsible for splitting the raw data to segments and creating
segments objects.
"""
def create(self, raw_data):
"""convert segments data into a segments objects by using SegmentFactory.
"""
try:
segments = []
raw_data_segments_lst = self._parse_segments(raw_data)
for raw_seg in raw_data_segments_lst:
seg_type = '{:0b}'.format(raw_seg[cs.SEGMENT_TYPE_DWORD_LOCATION]).zfill(32)[cs.SEGMENT_TYPE_START: cs.
SEGMENT_TYPE_END]
seg_type = hex(int(seg_type, 2))
seg_type_for_create = SegmentCreator.METHOD_NAME(seg_type)
seg = SegmentFactory.create(seg_type_for_create, raw_seg)
seg.resource_type = seg_type
segments.append(seg)
except Exception as e:
raise Exception("Failed to create segments with error: {0}".format(e))
return segments
def _parse_segments(self, raw_data):
"""splitting the raw data into segments
raw data is represented as a list of dword's
"""
splitted_segments = []
try:
end_index = len(raw_data) - 1
current_index = 0
while current_index <= end_index:
# seg size specified in dwords
seg_size = '{:032b}'.format(raw_data[cs.SEGMENT_SIZE_DWORD_LOCATION + current_index])[
cs.SEGMENT_SIZE_START: cs.SEGMENT_SIZE_END]
seg_size = int(seg_size, 2)
if seg_size == 0:
raise Exception("Error in segments splitting. raw_data didn't get smaller - found segment_size = 0")
seg_data = raw_data[current_index:seg_size + current_index]
splitted_segments.append(seg_data)
current_index += seg_size
except Exception as e:
raise Exception("Failed to split segments with error: {0}".format(e))
return splitted_segments
@classmethod
def is_resource_segment(cls, seg_type):
"""This method check if the segment type is a inside the interval of a resource segment
"""
return cs.RESOURCE_DUMP_SEGMENT_TYPE_RESOURCE_MAX >= seg_type >= cs.RESOURCE_DUMP_SEGMENT_TYPE_RESOURCE_MIN
@classmethod
def METHOD_NAME(cls, seg_type):
"""This method check if the segment type is a reference segment
and return the right type of that segment.
"""
if cls.is_resource_segment(seg_type):
return cs.RESOURCE_DUMP_SEGMENT_TYPE_RESOURCE
return seg_type | null |
301 | from methods.regular.regular_api import *
from default.tests.test_utils import testing_setup
from shared.tests.test_utils import common_actions, data_mocking
from base64 import b64encode
from unittest.mock import patch
from methods.task.task_template.job_resync import job_resync_core, threaded_job_resync
from shared.utils import job_dir_sync_utils
class TestJobResync(testing_setup.DiffgramBaseTestCase):
"""
"""
def METHOD_NAME(self):
# TODO: this test is assuming the 'my-sandbox-project' exists and some object have been previously created.
# For future tests a mechanism of setting up and tearing down the database should be created.
super(TestJobResync, self).METHOD_NAME()
project_data = data_mocking.create_project_with_context(
{
'users': [
{'username': 'Test',
'email': '[email protected]',
'password': 'diffgram123',
}
]
},
self.session
)
self.project_data = project_data
self.project = project_data['project']
def test_job_resync_api(self):
# Create mock job.
job = data_mocking.create_job({
'name': 'my-test-job',
'project': self.project
}, self.session)
request_data = {
'task_template_id': job.id,
}
endpoint = f"/api/v1/project/{job.project.project_string_id}/job/resync"
auth_api = common_actions.create_project_auth(project = job.project, session = self.session)
credentials = b64encode(f"{auth_api.client_id}:{auth_api.client_secret}".encode()).decode('utf-8')
response = self.client.post(
endpoint,
data = json.dumps(request_data),
headers = {
'directory_id': str(self.project.directory_default_id),
'Authorization': f"Basic {credentials}"
}
)
data = response.json
self.assertEqual(response.status_code, 200)
self.assertEqual(data['resync_result'], True)
def test_job_resync_core(self):
job = data_mocking.create_job({
'name': 'my-test-job',
'project': self.project
}, self.session)
auth_api = common_actions.create_project_auth(project = job.project, session = self.session)
resync_result, log = job_resync_core(session = self.session,
project = self.project,
member = auth_api.member,
task_template_id = job.id,
log = regular_log.default())
self.assertTrue(resync_result)
self.assertEqual(len(log['error'].keys()), 0)
def test_threaded_job_resync(self):
job = data_mocking.create_job({
'name': 'my-test-job',
'status': 'active',
'project': self.project
}, self.session)
auth_api = common_actions.create_project_auth(project = job.project, session = self.session)
file = data_mocking.create_file({'project_id': self.project.id}, self.session)
file_missing1 = data_mocking.create_file({'project_id': self.project.id}, self.session)
file_missing2 = data_mocking.create_file({'project_id': self.project.id}, self.session)
directory = data_mocking.create_directory({
'project': self.project,
'user': self.project_data['users'][0],
'files': [file, file_missing1, file_missing2]
}, self.session)
job.update_attached_directories(self.session,
[{'directory_id': directory.id, 'selected': 'sync'}]
)
log = regular_log.default()
sync_manager = job_dir_sync_utils.JobDirectorySyncManager(
session = self.session,
log = log,
job = job
)
sync_manager.add_file_into_job(
file,
directory,
create_tasks = True
)
self.session.commit()
result = threaded_job_resync(
task_template_id = job.id,
member_id = auth_api.member_id
)
self.assertEqual(len(result), 2) | null |
302 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkhitsdb.endpoint import endpoint_data
class UpgradeLindormInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'hitsdb', '2020-06-15', 'UpgradeLindormInstance','hitsdb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_PhoenixCoreNum(self): # Integer
return self.get_query_params().get('PhoenixCoreNum')
def set_PhoenixCoreNum(self, PhoenixCoreNum): # Integer
self.add_query_param('PhoenixCoreNum', PhoenixCoreNum)
def get_PhoenixCoreSpec(self): # String
return self.get_query_params().get('PhoenixCoreSpec')
def set_PhoenixCoreSpec(self, PhoenixCoreSpec): # String
self.add_query_param('PhoenixCoreSpec', PhoenixCoreSpec)
def get_UpgradeType(self): # String
return self.get_query_params().get('UpgradeType')
def set_UpgradeType(self, UpgradeType): # String
self.add_query_param('UpgradeType', UpgradeType)
def get_TsdbSpec(self): # String
return self.get_query_params().get('TsdbSpec')
def set_TsdbSpec(self, TsdbSpec): # String
self.add_query_param('TsdbSpec', TsdbSpec)
def get_FilestoreSpec(self): # String
return self.get_query_params().get('FilestoreSpec')
def set_FilestoreSpec(self, FilestoreSpec): # String
self.add_query_param('FilestoreSpec', FilestoreSpec)
def get_LogSpec(self): # String
return self.get_query_params().get('LogSpec')
def set_LogSpec(self, LogSpec): # String
self.add_query_param('LogSpec', LogSpec)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_TsdbNum(self): # Integer
return self.get_query_params().get('TsdbNum')
def set_TsdbNum(self, TsdbNum): # Integer
self.add_query_param('TsdbNum', TsdbNum)
def get_LindormSpec(self): # String
return self.get_query_params().get('LindormSpec')
def set_LindormSpec(self, LindormSpec): # String
self.add_query_param('LindormSpec', LindormSpec)
def METHOD_NAME(self): # Integer
return self.get_query_params().get('SolrNum')
def set_SolrNum(self, SolrNum): # Integer
self.add_query_param('SolrNum', SolrNum)
def get_ColdStorage(self): # Integer
return self.get_query_params().get('ColdStorage')
def set_ColdStorage(self, ColdStorage): # Integer
self.add_query_param('ColdStorage', ColdStorage)
def get_LogNum(self): # Integer
return self.get_query_params().get('LogNum')
def set_LogNum(self, LogNum): # Integer
self.add_query_param('LogNum', LogNum)
def get_SolrSpec(self): # String
return self.get_query_params().get('SolrSpec')
def set_SolrSpec(self, SolrSpec): # String
self.add_query_param('SolrSpec', SolrSpec)
def get_CoreSingleStorage(self): # Integer
return self.get_query_params().get('CoreSingleStorage')
def set_CoreSingleStorage(self, CoreSingleStorage): # Integer
self.add_query_param('CoreSingleStorage', CoreSingleStorage)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_FilestoreNum(self): # Integer
return self.get_query_params().get('FilestoreNum')
def set_FilestoreNum(self, FilestoreNum): # Integer
self.add_query_param('FilestoreNum', FilestoreNum)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_LindormNum(self): # Integer
return self.get_query_params().get('LindormNum')
def set_LindormNum(self, LindormNum): # Integer
self.add_query_param('LindormNum', LindormNum)
def get_LtsCoreNum(self): # Integer
return self.get_query_params().get('LtsCoreNum')
def set_LtsCoreNum(self, LtsCoreNum): # Integer
self.add_query_param('LtsCoreNum', LtsCoreNum)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
def get_LtsCoreSpec(self): # String
return self.get_query_params().get('LtsCoreSpec')
def set_LtsCoreSpec(self, LtsCoreSpec): # String
self.add_query_param('LtsCoreSpec', LtsCoreSpec)
def get_ClusterStorage(self): # Integer
return self.get_query_params().get('ClusterStorage')
def set_ClusterStorage(self, ClusterStorage): # Integer
self.add_query_param('ClusterStorage', ClusterStorage)
def get_LogSingleStorage(self): # Integer
return self.get_query_params().get('LogSingleStorage')
def set_LogSingleStorage(self, LogSingleStorage): # Integer
self.add_query_param('LogSingleStorage', LogSingleStorage)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId) | null |
303 | from __future__ import annotations
import numpy as np
import pytest
import dask.array as da
from dask.array.tests.test_dispatch import EncapsulateNDArray, WrappedArray
from dask.array.utils import assert_eq
@pytest.mark.parametrize(
"func",
[
lambda x: np.append(x, x),
lambda x: np.concatenate([x, x, x]),
lambda x: np.cov(x, x),
lambda x: np.dot(x, x),
lambda x: np.dstack((x, x)),
lambda x: np.flip(x, axis=0),
lambda x: np.hstack((x, x)),
lambda x: np.matmul(x, x),
lambda x: np.mean(x),
lambda x: np.stack([x, x]),
lambda x: np.block([x, x]),
lambda x: np.sum(x),
lambda x: np.var(x),
lambda x: np.vstack((x, x)),
lambda x: np.linalg.norm(x),
lambda x: np.min(x),
lambda x: np.amin(x),
lambda x: np.round(x),
lambda x: np.insert(x, 0, 3, axis=0),
lambda x: np.delete(x, 0, axis=0),
lambda x: np.select(
[x < 0.3, x < 0.6, x > 0.7], [x * 2, x, x / 2], default=0.65
),
],
)
def test_array_function_dask(func):
x = np.random.default_rng().random((100, 100))
y = da.from_array(x, chunks=(50, 50))
res_x = func(x)
res_y = func(y)
assert isinstance(res_y, da.Array)
assert_eq(res_y, res_x)
@pytest.mark.parametrize(
"func",
[
lambda x: np.dstack(x),
lambda x: np.hstack(x),
lambda x: np.vstack(x),
],
)
def test_stack_functions_require_sequence_of_arrays(func):
x = np.random.default_rng().random((100, 100))
y = da.from_array(x, chunks=(50, 50))
with pytest.raises(
NotImplementedError, match="expects a sequence of arrays as the first argument"
):
func(y)
@pytest.mark.parametrize("func", [np.fft.fft, np.fft.fft2])
def test_array_function_fft(func):
x = np.random.default_rng().random((100, 100))
y = da.from_array(x, chunks=(100, 100))
res_x = func(x)
res_y = func(y)
if func.__module__ != "mkl_fft._numpy_fft":
assert isinstance(res_y, da.Array)
assert_eq(res_y, res_x)
@pytest.mark.parametrize(
"func",
[
lambda x: np.min_scalar_type(x),
lambda x: np.linalg.det(x),
lambda x: np.linalg.eigvals(x),
],
)
def test_array_notimpl_function_dask(func):
x = np.random.default_rng().random((100, 100))
y = da.from_array(x, chunks=(50, 50))
with pytest.warns(
FutureWarning, match="The `.*` function is not implemented by Dask"
):
func(y)
@pytest.mark.parametrize(
"func", [lambda x: np.real(x), lambda x: np.imag(x), lambda x: np.transpose(x)]
)
def test_array_function_sparse(func):
sparse = pytest.importorskip("sparse")
x = da.random.default_rng().random((500, 500), chunks=(100, 100))
x[x < 0.9] = 0
y = x.map_blocks(sparse.COO)
assert_eq(func(x), func(y))
def test_array_function_sparse_tensordot():
sparse = pytest.importorskip("sparse")
rng = np.random.default_rng()
x = rng.random((2, 3, 4))
x[x < 0.9] = 0
y = rng.random((4, 3, 2))
y[y < 0.9] = 0
xx = sparse.COO(x)
yy = sparse.COO(y)
assert_eq(
np.tensordot(x, y, axes=(2, 0)), np.tensordot(xx, yy, axes=(2, 0)).todense()
)
@pytest.mark.parametrize("chunks", [(100, 100), (500, 100)])
def test_array_function_cupy_svd(chunks):
cupy = pytest.importorskip("cupy")
x = cupy.random.default_rng().random((500, 100))
y = da.from_array(x, chunks=chunks, asarray=False)
u_base, s_base, v_base = da.linalg.svd(y)
u, s, v = np.linalg.svd(y)
assert_eq(u, u_base)
assert_eq(s, s_base)
assert_eq(v, v_base)
@pytest.mark.parametrize(
"func",
[
lambda x: np.concatenate([x, x, x]),
lambda x: np.cov(x, x),
lambda x: np.dot(x, x),
lambda x: np.dstack((x, x)),
lambda x: np.flip(x, axis=0),
lambda x: np.hstack((x, x)),
lambda x: np.matmul(x, x),
lambda x: np.mean(x),
lambda x: np.stack([x, x]),
lambda x: np.sum(x),
lambda x: np.var(x),
lambda x: np.vstack((x, x)),
lambda x: np.linalg.norm(x),
],
)
def test_unregistered_func(func):
# Wrap a procol-based encapsulated ndarray
x = EncapsulateNDArray(np.random.default_rng().random((100, 100)))
# See if Dask holds the array fine
y = da.from_array(x, chunks=(50, 50))
# Check if it's an equivalent array
assert_eq(x, y, check_meta=False, check_type=False)
# Perform two NumPy functions, one on the
# Encapsulated array
xx = func(x)
# And one on the Dask array holding these
# encapsulated arrays
yy = func(y)
# Check that they are equivalent arrays.
assert_eq(xx, yy, check_meta=False, check_type=False)
def test_non_existent_func():
# Regression test for __array_function__ becoming default in numpy 1.17
# dask has no sort function, so ensure that this still calls np.sort
x = da.from_array(np.array([1, 2, 4, 3]), chunks=(2,))
with pytest.warns(
FutureWarning, match="The `numpy.sort` function is not implemented by Dask"
):
assert list(np.sort(x)) == [1, 2, 3, 4]
@pytest.mark.parametrize(
"func",
[
np.equal,
np.matmul,
np.dot,
lambda x, y: np.stack([x, y]),
],
)
@pytest.mark.parametrize(
"arr_upcast, arr_downcast",
[
(
WrappedArray(np.random.default_rng().random((10, 10))),
da.random.default_rng().random((10, 10), chunks=(5, 5)),
),
(
da.random.default_rng().random((10, 10), chunks=(5, 5)),
EncapsulateNDArray(np.random.default_rng().random((10, 10))),
),
(
WrappedArray(np.random.default_rng().random((10, 10))),
EncapsulateNDArray(np.random.default_rng().random((10, 10))),
),
],
)
def METHOD_NAME(func, arr_upcast, arr_downcast):
"""Test proper dispatch on binary NumPy functions"""
assert (
type(func(arr_upcast, arr_downcast))
== type(func(arr_downcast, arr_upcast))
== type(arr_upcast)
)
@pytest.mark.parametrize("func", [da.array, da.asarray, da.asanyarray, da.tri])
def test_like_raises(func):
assert_eq(func(1, like=func(1)), func(1))
@pytest.mark.parametrize("func", [np.array, np.asarray, np.asanyarray])
def test_like_with_numpy_func(func):
assert_eq(func(1, like=da.array(1)), func(1))
@pytest.mark.parametrize("func", [np.array, np.asarray, np.asanyarray])
def test_like_with_numpy_func_and_dtype(func):
assert_eq(func(1, dtype=float, like=da.array(1)), func(1, dtype=float)) | null |
304 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import os
from azure import (
WindowsAzureError,
MANAGEMENT_HOST,
_get_request_body,
_parse_response,
_str,
_update_request_uri_query,
)
from azure.http import (
HTTPError,
HTTPRequest,
)
from azure.http.httpclient import _HTTPClient
from azure.servicemanagement import (
AZURE_MANAGEMENT_CERTFILE,
AZURE_MANAGEMENT_SUBSCRIPTIONID,
_management_error_handler,
_parse_response_for_async_op,
_update_management_header,
)
class _ServiceManagementClient(object):
def __init__(self, subscription_id=None, cert_file=None,
host=MANAGEMENT_HOST):
self.requestid = None
self.subscription_id = subscription_id
self.cert_file = cert_file
self.host = host
if not self.cert_file:
if AZURE_MANAGEMENT_CERTFILE in os.environ:
self.cert_file = os.environ[AZURE_MANAGEMENT_CERTFILE]
if not self.subscription_id:
if AZURE_MANAGEMENT_SUBSCRIPTIONID in os.environ:
self.subscription_id = os.environ[
AZURE_MANAGEMENT_SUBSCRIPTIONID]
if not self.cert_file or not self.subscription_id:
raise WindowsAzureError(
'You need to provide subscription id and certificate file')
self._httpclient = _HTTPClient(
service_instance=self, cert_file=self.cert_file)
self._filter = self._httpclient.perform_request
def with_filter(self, filter):
'''Returns a new service which will process requests with the
specified filter. Filtering operations can include logging, automatic
retrying, etc... The filter is a lambda which receives the HTTPRequest
and another lambda. The filter can perform any pre-processing on the
request, pass it off to the next lambda, and then perform any
post-processing on the response.'''
res = type(self)(self.subscription_id, self.cert_file, self.host)
old_filter = self._filter
def new_filter(request):
return filter(request, old_filter)
res._filter = new_filter
return res
def set_proxy(self, host, port, user=None, password=None):
'''
Sets the proxy server host and port for the HTTP CONNECT Tunnelling.
host: Address of the proxy. Ex: '192.168.0.100'
port: Port of the proxy. Ex: 6000
user: User for proxy authorization.
password: Password for proxy authorization.
'''
self._httpclient.set_proxy(host, port, user, password)
#--Helper functions --------------------------------------------------
def _perform_request(self, request):
try:
resp = self._filter(request)
except HTTPError as ex:
return _management_error_handler(ex)
return resp
def METHOD_NAME(self, path, response_type):
request = HTTPRequest()
request.method = 'GET'
request.host = self.host
request.path = path
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_management_header(request)
response = self._perform_request(request)
if response_type is not None:
return _parse_response(response, response_type)
return response
def _perform_put(self, path, body, async=False):
request = HTTPRequest()
request.method = 'PUT'
request.host = self.host
request.path = path
request.body = _get_request_body(body)
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_management_header(request)
response = self._perform_request(request)
if async:
return _parse_response_for_async_op(response)
return None
def _perform_post(self, path, body, response_type=None, async=False):
request = HTTPRequest()
request.method = 'POST'
request.host = self.host
request.path = path
request.body = _get_request_body(body)
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_management_header(request)
response = self._perform_request(request)
if response_type is not None:
return _parse_response(response, response_type)
if async:
return _parse_response_for_async_op(response)
return None
def _perform_delete(self, path, async=False):
request = HTTPRequest()
request.method = 'DELETE'
request.host = self.host
request.path = path
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_management_header(request)
response = self._perform_request(request)
if async:
return _parse_response_for_async_op(response)
return None
def _get_path(self, resource, name):
path = '/' + self.subscription_id + '/' + resource
if name is not None:
path += '/' + _str(name)
return path | null |
305 | # Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Local deployments connected to a docker MySQL server."""
import logging
import time
from typing import Optional
import docker.errors as docker_errors
from docker.models.containers import Container
from tests.harness.deployment.base import (
DEPLOYMENT_START_TIMEOUT,
MYSQL_DEFAULT_PASSWORD,
MYSQL_DEFAULT_PORT,
MYSQL_DOCKER_IMAGE,
BaseTestDeployment,
)
from tests.harness.model import (
DeploymentConfig,
DeploymentSetup,
DeploymentStoreConfig,
DeploymentType,
)
MYSQL_DOCKER_CONTAINER_NAME_PREFIX = "zenml-mysql-"
class LocalDockerTestDeployment(BaseTestDeployment):
"""A deployment that uses a MySQL Docker container to host the ZenML database."""
def __init__(self, config: DeploymentConfig) -> None:
"""Initializes the deployment.
Args:
config: The deployment configuration.
"""
super().__init__(config)
@property
def container_name(self) -> str:
"""The name of the MySQL container.
Returns:
The name of the MySQL container.
"""
return f"{MYSQL_DOCKER_CONTAINER_NAME_PREFIX}{self.config.name}"
@property
def METHOD_NAME(self) -> Optional[Container]:
"""Returns the Docker container configured for the deployment.
Returns:
The container for the deployment if it exists, None otherwise.
"""
try:
return self.docker_client.containers.get(self.container_name)
except docker_errors.NotFound:
return None
@property
def is_running(self) -> bool:
"""Returns whether the deployment is running.
Returns:
Whether the deployment is running.
"""
# Check if container exists and is running
METHOD_NAME = self.METHOD_NAME
if METHOD_NAME and METHOD_NAME.status == "running":
return True
return False
def up(self) -> None:
"""Starts up the deployment.
Raises:
RuntimeError: If the deployment could not be started.
"""
from zenml.utils.networking_utils import scan_for_available_port
if self.is_running:
logging.info(
f"Deployment '{self.config.name}' is already running. "
f"Skipping provisioning."
)
return
# Cleanup a previous deployment in a failed state
self.down()
port = scan_for_available_port(MYSQL_DEFAULT_PORT)
if port is None:
raise RuntimeError("Could not find an available port for MySQL.")
self.docker_client.containers.run(
name=self.container_name,
image=MYSQL_DOCKER_IMAGE,
detach=True,
environment={"MYSQL_ROOT_PASSWORD": MYSQL_DEFAULT_PASSWORD},
# Enable the primary key requirement for MySQL to catch errors related to
# missing primary keys.
command=["--sql_require_primary_key=on"],
remove=True,
auto_remove=True,
ports={MYSQL_DEFAULT_PORT: port},
labels={
"zenml-test": "true",
},
extra_hosts={"host.docker.internal": "host-gateway"},
)
timeout = DEPLOYMENT_START_TIMEOUT
while True:
logging.info(
f"Trying to connect to deployment '{self.config.name}'..."
)
try:
with self.connect() as client:
_ = client.zen_store
break
except RuntimeError as e:
timeout -= 1
if timeout == 0:
raise RuntimeError(
f"Timed out waiting for the '{self.config.name}' "
f"deployment to start: {e}"
) from e
time.sleep(1)
logging.info(
f"Started container '{self.container_name}' "
f"for deployment '{self.config.name}'."
)
def down(self) -> None:
"""Tears down the deployment."""
METHOD_NAME = self.METHOD_NAME
if METHOD_NAME is None:
logging.info(
f"Deployment '{self.config.name}' is no longer running. "
)
return
while True:
if METHOD_NAME.status == "running":
logging.info(
f"Stopping container '{self.container_name}' "
f"for deployment '{self.config.name}'."
)
METHOD_NAME.stop()
elif METHOD_NAME.status == "exited":
logging.info(
f"Removing container '{self.container_name}' "
f"for deployment '{self.config.name}'."
)
METHOD_NAME.remove()
time.sleep(1)
METHOD_NAME = self.METHOD_NAME
if METHOD_NAME is None:
break
logging.info(f"Container '{self.container_name}' has been removed.")
def get_store_config(self) -> Optional[DeploymentStoreConfig]:
"""Returns the store config for the deployment.
Returns:
The store config for the deployment if it is running, None
otherwise.
Raises:
RuntimeError: If the deployment is not running.
"""
if not self.is_running:
raise RuntimeError(
f"The {self.config.name} deployment is not running."
)
METHOD_NAME = self.METHOD_NAME
# Guaranteed to be non-None by the is_running check
assert METHOD_NAME is not None
try:
port = int(
METHOD_NAME.ports[f"{MYSQL_DEFAULT_PORT}/tcp"][0]["HostPort"]
)
except (KeyError, IndexError):
raise RuntimeError(
f"Could not find the port for the '{self.config.name}' "
f"deployment."
)
return DeploymentStoreConfig(
url=f"mysql://root:{MYSQL_DEFAULT_PASSWORD}@127.0.0.1:{port}/zenml"
)
LocalDockerTestDeployment.register_deployment_class(
type=DeploymentType.LOCAL, setup=DeploymentSetup.DOCKER
) | null |
306 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkpolardb.endpoint import endpoint_data
class DescribeDBClustersRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'polardb', '2017-08-01', 'DescribeDBClusters','polardb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_DBClusterDescription(self): # String
return self.get_query_params().get('DBClusterDescription')
def set_DBClusterDescription(self, DBClusterDescription): # String
self.add_query_param('DBClusterDescription', DBClusterDescription)
def get_DBClusterStatus(self): # String
return self.get_query_params().get('DBClusterStatus')
def set_DBClusterStatus(self, DBClusterStatus): # String
self.add_query_param('DBClusterStatus', DBClusterStatus)
def get_ConnectionString(self): # String
return self.get_query_params().get('ConnectionString')
def set_ConnectionString(self, ConnectionString): # String
self.add_query_param('ConnectionString', ConnectionString)
def METHOD_NAME(self): # Integer
return self.get_query_params().get('RecentExpirationInterval')
def set_RecentExpirationInterval(self, RecentExpirationInterval): # Integer
self.add_query_param('RecentExpirationInterval', RecentExpirationInterval)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_DBNodeIds(self): # String
return self.get_query_params().get('DBNodeIds')
def set_DBNodeIds(self, DBNodeIds): # String
self.add_query_param('DBNodeIds', DBNodeIds)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_RecentCreationInterval(self): # Integer
return self.get_query_params().get('RecentCreationInterval')
def set_RecentCreationInterval(self, RecentCreationInterval): # Integer
self.add_query_param('RecentCreationInterval', RecentCreationInterval)
def get_Expired(self): # Boolean
return self.get_query_params().get('Expired')
def set_Expired(self, Expired): # Boolean
self.add_query_param('Expired', Expired)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DBType(self): # String
return self.get_query_params().get('DBType')
def set_DBType(self, DBType): # String
self.add_query_param('DBType', DBType)
def get_DBVersion(self): # String
return self.get_query_params().get('DBVersion')
def set_DBVersion(self, DBVersion): # String
self.add_query_param('DBVersion', DBVersion)
def get_PayType(self): # String
return self.get_query_params().get('PayType')
def set_PayType(self, PayType): # String
self.add_query_param('PayType', PayType)
def get_DBClusterIds(self): # String
return self.get_query_params().get('DBClusterIds')
def set_DBClusterIds(self, DBClusterIds): # String
self.add_query_param('DBClusterIds', DBClusterIds) | null |
307 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class GetTaskListFilterRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'devops-rdc', '2020-03-03', 'GetTaskListFilter')
self.set_method('POST')
def get_InvolveMembers(self): # String
return self.get_body_params().get('InvolveMembers')
def set_InvolveMembers(self, InvolveMembers): # String
self.add_body_params('InvolveMembers', InvolveMembers)
def get_ExecutorId(self): # String
return self.get_body_params().get('ExecutorId')
def set_ExecutorId(self, ExecutorId): # String
self.add_body_params('ExecutorId', ExecutorId)
def get_OrderCondition(self): # String
return self.get_body_params().get('OrderCondition')
def set_OrderCondition(self, OrderCondition): # String
self.add_body_params('OrderCondition', OrderCondition)
def get_SprintId(self): # String
return self.get_body_params().get('SprintId')
def set_SprintId(self, SprintId): # String
self.add_body_params('SprintId', SprintId)
def get_Extra(self): # String
return self.get_body_params().get('Extra')
def set_Extra(self, Extra): # String
self.add_body_params('Extra', Extra)
def get_PageSize(self): # Integer
return self.get_body_params().get('PageSize')
def METHOD_NAME(self, PageSize): # Integer
self.add_body_params('PageSize', PageSize)
def get_ScenarioFieldConfigId(self): # String
return self.get_body_params().get('ScenarioFieldConfigId')
def set_ScenarioFieldConfigId(self, ScenarioFieldConfigId): # String
self.add_body_params('ScenarioFieldConfigId', ScenarioFieldConfigId)
def get_IsDone(self): # Boolean
return self.get_body_params().get('IsDone')
def set_IsDone(self, IsDone): # Boolean
self.add_body_params('IsDone', IsDone)
def get_ObjectType(self): # String
return self.get_body_params().get('ObjectType')
def set_ObjectType(self, ObjectType): # String
self.add_body_params('ObjectType', ObjectType)
def get_ProjectId(self): # String
return self.get_body_params().get('ProjectId')
def set_ProjectId(self, ProjectId): # String
self.add_body_params('ProjectId', ProjectId)
def get_PageToken(self): # String
return self.get_body_params().get('PageToken')
def set_PageToken(self, PageToken): # String
self.add_body_params('PageToken', PageToken)
def get_Order(self): # String
return self.get_body_params().get('Order')
def set_Order(self, Order): # String
self.add_body_params('Order', Order)
def get_TagId(self): # String
return self.get_body_params().get('TagId')
def set_TagId(self, TagId): # String
self.add_body_params('TagId', TagId)
def get_TaskFlowStatusId(self): # String
return self.get_body_params().get('TaskFlowStatusId')
def set_TaskFlowStatusId(self, TaskFlowStatusId): # String
self.add_body_params('TaskFlowStatusId', TaskFlowStatusId)
def get_DueDateStart(self): # String
return self.get_body_params().get('DueDateStart')
def set_DueDateStart(self, DueDateStart): # String
self.add_body_params('DueDateStart', DueDateStart)
def get_CreatorId(self): # String
return self.get_body_params().get('CreatorId')
def set_CreatorId(self, CreatorId): # String
self.add_body_params('CreatorId', CreatorId)
def get_Priority(self): # String
return self.get_body_params().get('Priority')
def set_Priority(self, Priority): # String
self.add_body_params('Priority', Priority)
def get_DueDateEnd(self): # String
return self.get_body_params().get('DueDateEnd')
def set_DueDateEnd(self, DueDateEnd): # String
self.add_body_params('DueDateEnd', DueDateEnd)
def get_OrgId(self): # String
return self.get_body_params().get('OrgId')
def set_OrgId(self, OrgId): # String
self.add_body_params('OrgId', OrgId)
def get_Name(self): # String
return self.get_body_params().get('Name')
def set_Name(self, Name): # String
self.add_body_params('Name', Name) | null |
308 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkcloudapi.endpoint import endpoint_data
class ModifyApiRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'CloudAPI', '2016-07-14', 'ModifyApi','apigateway')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_WebSocketApiType(self): # String
return self.get_query_params().get('WebSocketApiType')
def set_WebSocketApiType(self, WebSocketApiType): # String
self.add_query_param('WebSocketApiType', WebSocketApiType)
def get_ErrorCodeSamples(self): # String
return self.get_query_params().get('ErrorCodeSamples')
def set_ErrorCodeSamples(self, ErrorCodeSamples): # String
self.add_query_param('ErrorCodeSamples', ErrorCodeSamples)
def get_AppCodeAuthType(self): # String
return self.get_query_params().get('AppCodeAuthType')
def set_AppCodeAuthType(self, AppCodeAuthType): # String
self.add_query_param('AppCodeAuthType', AppCodeAuthType)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_DisableInternet(self): # Boolean
return self.get_query_params().get('DisableInternet')
def set_DisableInternet(self, DisableInternet): # Boolean
self.add_query_param('DisableInternet', DisableInternet)
def get_BackendId(self): # String
return self.get_query_params().get('BackendId')
def set_BackendId(self, BackendId): # String
self.add_query_param('BackendId', BackendId)
def get_ConstantParameters(self): # String
return self.get_query_params().get('ConstantParameters')
def set_ConstantParameters(self, ConstantParameters): # String
self.add_query_param('ConstantParameters', ConstantParameters)
def get_AuthType(self): # String
return self.get_query_params().get('AuthType')
def set_AuthType(self, AuthType): # String
self.add_query_param('AuthType', AuthType)
def get_AllowSignatureMethod(self): # String
return self.get_query_params().get('AllowSignatureMethod')
def set_AllowSignatureMethod(self, AllowSignatureMethod): # String
self.add_query_param('AllowSignatureMethod', AllowSignatureMethod)
def get_ServiceParameters(self): # String
return self.get_query_params().get('ServiceParameters')
def set_ServiceParameters(self, ServiceParameters): # String
self.add_query_param('ServiceParameters', ServiceParameters)
def get_FailResultSample(self): # String
return self.get_query_params().get('FailResultSample')
def set_FailResultSample(self, FailResultSample): # String
self.add_query_param('FailResultSample', FailResultSample)
def get_SystemParameters(self): # String
return self.get_query_params().get('SystemParameters')
def set_SystemParameters(self, SystemParameters): # String
self.add_query_param('SystemParameters', SystemParameters)
def get_ServiceParametersMap(self): # String
return self.get_query_params().get('ServiceParametersMap')
def set_ServiceParametersMap(self, ServiceParametersMap): # String
self.add_query_param('ServiceParametersMap', ServiceParametersMap)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_OpenIdConnectConfig(self): # String
return self.get_query_params().get('OpenIdConnectConfig')
def set_OpenIdConnectConfig(self, OpenIdConnectConfig): # String
self.add_query_param('OpenIdConnectConfig', OpenIdConnectConfig)
def get_RequestParameters(self): # String
return self.get_query_params().get('RequestParameters')
def set_RequestParameters(self, RequestParameters): # String
self.add_query_param('RequestParameters', RequestParameters)
def get_ResultDescriptions(self): # String
return self.get_query_params().get('ResultDescriptions')
def set_ResultDescriptions(self, ResultDescriptions): # String
self.add_query_param('ResultDescriptions', ResultDescriptions)
def get_Visibility(self): # String
return self.get_query_params().get('Visibility')
def set_Visibility(self, Visibility): # String
self.add_query_param('Visibility', Visibility)
def get_GroupId(self): # String
return self.get_query_params().get('GroupId')
def set_GroupId(self, GroupId): # String
self.add_query_param('GroupId', GroupId)
def get_ServiceConfig(self): # String
return self.get_query_params().get('ServiceConfig')
def set_ServiceConfig(self, ServiceConfig): # String
self.add_query_param('ServiceConfig', ServiceConfig)
def get_ResultType(self): # String
return self.get_query_params().get('ResultType')
def set_ResultType(self, ResultType): # String
self.add_query_param('ResultType', ResultType)
def get_ApiName(self): # String
return self.get_query_params().get('ApiName')
def set_ApiName(self, ApiName): # String
self.add_query_param('ApiName', ApiName)
def get_ResultSample(self): # String
return self.get_query_params().get('ResultSample')
def set_ResultSample(self, ResultSample): # String
self.add_query_param('ResultSample', ResultSample)
def get_BackendEnable(self): # Boolean
return self.get_query_params().get('BackendEnable')
def set_BackendEnable(self, BackendEnable): # Boolean
self.add_query_param('BackendEnable', BackendEnable)
def get_ForceNonceCheck(self): # Boolean
return self.get_query_params().get('ForceNonceCheck')
def set_ForceNonceCheck(self, ForceNonceCheck): # Boolean
self.add_query_param('ForceNonceCheck', ForceNonceCheck)
def METHOD_NAME(self): # String
return self.get_query_params().get('RequestConfig')
def set_RequestConfig(self, RequestConfig): # String
self.add_query_param('RequestConfig', RequestConfig)
def get_ResultBodyModel(self): # String
return self.get_query_params().get('ResultBodyModel')
def set_ResultBodyModel(self, ResultBodyModel): # String
self.add_query_param('ResultBodyModel', ResultBodyModel)
def get_ApiId(self): # String
return self.get_query_params().get('ApiId')
def set_ApiId(self, ApiId): # String
self.add_query_param('ApiId', ApiId) | null |
309 | """
Mixins for Ratable model managers and serializers.
"""
import logging
from typing import Type
from sqlalchemy.sql.expression import func
from galaxy.model import ItemRatingAssociation
from galaxy.model.base import transaction
from . import base
log = logging.getLogger(__name__)
class RatableManagerMixin:
rating_assoc: Type[ItemRatingAssociation]
def METHOD_NAME(self, item, user, as_int=True):
"""Returns the integer rating given to this item by the user.
Returns the full rating model if `as_int` is False.
"""
METHOD_NAME = self.query_associated(self.rating_assoc, item).filter_by(user=user).first()
# most common case is assumed to be 'get the number'
if not as_int:
return METHOD_NAME
# get the int value if there's a rating
return METHOD_NAME.METHOD_NAME if METHOD_NAME is not None else None
def ratings(self, item):
"""Returns a list of all rating values given to this item."""
return [r.METHOD_NAME for r in item.ratings]
def ratings_avg(self, item):
"""Returns the average of all ratings given to this item."""
foreign_key = self._foreign_key(self.rating_assoc)
avg = self.session().query(func.avg(self.rating_assoc.METHOD_NAME)).filter(foreign_key == item).scalar()
return avg or 0.0
def ratings_count(self, item):
"""Returns the number of ratings given to this item."""
foreign_key = self._foreign_key(self.rating_assoc)
return self.session().query(func.count(self.rating_assoc.METHOD_NAME)).filter(foreign_key == item).scalar()
def rate(self, item, user, value, flush=True):
"""Updates or creates a rating for this item and user. Returns the rating"""
# TODO?: possible generic update_or_create
# TODO?: update and create to RatingsManager (if not overkill)
METHOD_NAME = self.METHOD_NAME(item, user, as_int=False)
if not METHOD_NAME:
METHOD_NAME = self.rating_assoc(user, item)
self.associate(METHOD_NAME, item)
METHOD_NAME.METHOD_NAME = value
self.session().add(METHOD_NAME)
if flush:
session = self.session()
with transaction(session):
session.commit()
return METHOD_NAME
# TODO?: all ratings for a user
class RatableSerializerMixin:
def add_serializers(self):
self.serializers["user_rating"] = self.serialize_user_rating
self.serializers["community_rating"] = self.serialize_community_rating
def serialize_user_rating(self, item, key, user=None, **context):
"""Returns the integer rating given to this item by the user."""
if not user:
raise base.ModelSerializingError(
"user_rating requires a user", model_class=self.manager.model_class, id=self.serialize_id(item, "id")
)
return self.manager.METHOD_NAME(item, user)
def serialize_community_rating(self, item, key, **context):
"""
Returns a dictionary containing:
`average` the (float) average of all ratings of this object
`count` the number of ratings
"""
# ??: seems like two queries (albeit in-sql functions) would slower
# than getting the rows and calc'ing both here with one query
manager = self.manager
return {
"average": manager.ratings_avg(item),
"count": manager.ratings_count(item),
}
class RatableDeserializerMixin:
def add_deserializers(self):
self.deserializers["user_rating"] = self.deserialize_rating
def deserialize_rating(self, item, key, val, user=None, **context):
if not user:
raise base.ModelDeserializingError(
"user_rating requires a user", model_class=self.manager.model_class, id=self.serialize_id(item, "id")
)
val = self.validate.int_range(key, val, 0, 5)
return self.manager.rate(item, user, val, flush=False)
class RatableFilterMixin:
def _ratings_avg_accessor(self, item):
return self.manager.ratings_avg(item)
def _add_parsers(self):
"""
Adds the following filters:
`community_rating`: filter
"""
self.fn_filter_parsers.update(
{
"community_rating": {
"op": {
"eq": lambda i, v: self._ratings_avg_accessor(i) == v,
# TODO: default to greater than (currently 'eq' due to base/controller.py)
"ge": lambda i, v: self._ratings_avg_accessor(i) >= v,
"le": lambda i, v: self._ratings_avg_accessor(i) <= v,
},
"val": float,
}
}
) | null |
310 | import os
import tempfile
from galaxy.tool_util.parser import get_tool_source
from galaxy.util.compression_utils import CompressedFile
from galaxy.util.resources import resource_path
from galaxy_test.base import api_asserts
from tool_shed.test.base.populators import repo_tars
from ..base.api import ShedApiTestCase
COLUMN_MAKER_PATH = resource_path(__package__, "../test_data/column_maker/column_maker.tar")
class TestShedRepositoriesApi(ShedApiTestCase):
def test_create(self):
populator = self.populator
category_id = populator.new_category(prefix="testcreate").id
repos_by_category = populator.repositories_by_category(category_id)
repos = repos_by_category.repositories
assert len(repos) == 0
populator.new_repository(category_id)
repos_by_category = populator.repositories_by_category(category_id)
repos = repos_by_category.repositories
assert len(repos) == 1
def test_update_repository(self):
populator = self.populator
prefix = "testupdate"
category_id = populator.new_category(prefix=prefix).id
repository = populator.new_repository(category_id, prefix=prefix)
repository_id = repository.id
repository_update = populator.upload_revision(
repository_id,
COLUMN_MAKER_PATH,
)
assert repository_update.is_ok
# used by getRepository in TS client.
def test_metadata_simple(self):
populator = self.populator
repository = populator.setup_column_maker_repo(prefix="repoformetadata")
repository_metadata = populator.get_metadata(repository)
metadata_for_revisions = repository_metadata.__root__
assert len(metadata_for_revisions) == 1
only_key = list(metadata_for_revisions.keys())[0]
assert only_key.startswith("0:")
only_revision = list(metadata_for_revisions.values())[0]
assert only_revision
assert only_revision.downloadable
assert not only_revision.malicious
def test_index_simple(self):
populator = self.populator
repo = populator.setup_column_maker_repo(prefix="repoforindex")
repository_id = repo.id
show_response = self.api_interactor.get(f"repositories/{repository_id}")
index_response = self.api_interactor.get("repositories")
api_asserts.assert_status_code_is_ok(show_response)
api_asserts.assert_status_code_is_ok(index_response)
repository_ids = [r["id"] for r in index_response.json()]
assert repository_id in repository_ids
repository = self.populator.get_repository_for(repo.owner, repo.name)
assert repository.owner == repo.owner
assert repository.name == repo.name
def test_install_info(self):
# actually installing requires a whole Galaxy setup and the install manager but
# we can test the response validates against the future facing InstallInfo pydandic
# models.
populator = self.populator
repo = populator.setup_column_maker_and_get_metadata(prefix="repoforinstallinfo")
populator.get_install_info(repo)
def test_get_ordered_installable_revisions(self):
# Used in ephemeris...
populator = self.populator
repository = populator.setup_column_maker_repo(prefix="repoforindex")
assert repository.owner
assert repository.name
revisions = populator.get_ordered_installable_revisions(repository.owner, repository.name)
assert len(revisions.__root__) == 1
def test_reset_on_repository(self):
populator = self.populator
repository = populator.setup_column_maker_repo(prefix="repoforreseta")
assert repository.owner
assert repository.name
revisions = populator.get_ordered_installable_revisions(repository.owner, repository.name)
assert len(revisions.__root__) == 1
metadata_response = populator.reset_metadata(repository)
assert metadata_response.start_time
assert metadata_response.stop_time
assert metadata_response.status == "ok"
assert len(metadata_response.repository_status) == 1
revisions = populator.get_ordered_installable_revisions(repository.owner, repository.name)
assert len(revisions.__root__) == 1
def test_repository_search(self):
populator = self.populator
repository = populator.setup_column_maker_repo(prefix="repoforreposearch")
populator.reindex()
results = populator.repo_search_query("repoforreposearch")
assert len(results.hits) == 1
first_hit = results.hits[0]
assert first_hit.repository.name == repository.name
assert first_hit.repository.times_downloaded == 0
def test_repo_tars(self):
for index, repo_path in enumerate(repo_tars("column_maker")):
path = CompressedFile(repo_path).extract(tempfile.mkdtemp())
tool_xml_path = os.path.join(path, "column_maker.xml")
tool_source = get_tool_source(config_file=tool_xml_path)
tool_version = tool_source.parse_version()
if index == 0:
assert tool_version == "1.1.0"
elif index == 1:
assert tool_version == "1.2.0"
elif index == 2:
assert tool_version == "1.3.0"
else:
raise AssertionError("Wrong number of repo tars returned...")
def test_reset_on_simple_repository(self):
populator = self.populator
repository = populator.setup_test_data_repo("column_maker")
populator.assert_has_n_installable_revisions(repository, 3)
response = self.api_interactor.post(
"repositories/reset_metadata_on_repository", data={"repository_id": repository.id}
)
api_asserts.assert_status_code_is_ok(response)
populator.assert_has_n_installable_revisions(repository, 3)
def METHOD_NAME(self):
populator = self.populator
# setup a repository with 4 revisions but only 3 installable ones due to no version change in a tool
repository = populator.setup_test_data_repo("column_maker_with_download_gaps")
populator.assert_has_n_installable_revisions(repository, 3)
response = self.api_interactor.post(
"repositories/reset_metadata_on_repository", data={"repository_id": repository.id}
)
api_asserts.assert_status_code_is_ok(response)
populator.assert_has_n_installable_revisions(repository, 3)
def test_reset_all(self):
populator = self.populator
repository = populator.setup_test_data_repo("column_maker_with_download_gaps")
populator.assert_has_n_installable_revisions(repository, 3)
# reseting one at a time or resetting everything via the web controllers works...
# reseting all at once via the API does not work - it breaks the repository
response = self.api_interactor.post(
"repositories/reset_metadata_on_repositories",
data={"payload": "can not be empty because bug in controller"},
)
api_asserts.assert_status_code_is_ok(response)
populator.assert_has_n_installable_revisions(repository, 3) | null |
311 | import unittest
import torch
import torch.nn as nn
import torchvision
from lightly.models import NNCLR
from lightly.models.modules import NNMemoryBankModule
def METHOD_NAME(name: str):
if name == "resnet18":
return torchvision.models.resnet18()
elif name == "resnet50":
return torchvision.models.resnet50()
raise NotImplementedError
def get_backbone(model: nn.Module):
backbone = torch.nn.Sequential(*(list(model.children())[:-1]))
return backbone
class TestNNCLR(unittest.TestCase):
def setUp(self):
self.resnet_variants = dict(
resnet18=dict(
num_ftrs=512,
proj_hidden_dim=512,
pred_hidden_dim=128,
out_dim=512,
),
resnet50=dict(
num_ftrs=2048,
proj_hidden_dim=2048,
pred_hidden_dim=512,
out_dim=2048,
),
)
self.batch_size = 2
self.input_tensor = torch.rand((self.batch_size, 3, 32, 32))
def test_create_variations_cpu(self):
for model_name, config in self.resnet_variants.items():
resnet = METHOD_NAME(model_name)
model = NNCLR(get_backbone(resnet), **config)
self.assertIsNotNone(model)
def test_create_variations_gpu(self):
if not torch.cuda.is_available():
return
for model_name, config in self.resnet_variants.items():
resnet = METHOD_NAME(model_name)
model = NNCLR(get_backbone(resnet), **config).to("cuda")
self.assertIsNotNone(model)
def test_feature_dim_configurable(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name, config in self.resnet_variants.items():
resnet = METHOD_NAME(model_name)
model = NNCLR(get_backbone(resnet), **config).to(device)
# check that feature vector has correct dimension
with torch.no_grad():
out_features = model.backbone(self.input_tensor.to(device))
self.assertEqual(out_features.shape[1], config["num_ftrs"])
# check that projection head output has right dimension
with torch.no_grad():
out_projection = model.projection_mlp(out_features.squeeze())
self.assertEqual(out_projection.shape[1], config["out_dim"])
# check that prediction head output has right dimension
with torch.no_grad():
out_prediction = model.prediction_mlp(out_projection.squeeze())
self.assertEqual(out_prediction.shape[1], config["out_dim"])
def test_tuple_input(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name, config in self.resnet_variants.items():
resnet = METHOD_NAME(model_name)
model = NNCLR(get_backbone(resnet), **config).to(device)
x0 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
x1 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
out = model(x0)
self.assertEqual(out[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out[1].shape, (self.batch_size, config["out_dim"]))
out, features = model(x0, return_features=True)
self.assertEqual(out[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out[1].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(features.shape, (self.batch_size, config["num_ftrs"]))
out0, out1 = model(x0, x1)
self.assertEqual(out0[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out0[1].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out1[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out1[1].shape, (self.batch_size, config["out_dim"]))
(out0, f0), (out1, f1) = model(x0, x1, return_features=True)
self.assertEqual(out0[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out0[1].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out1[0].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(out1[1].shape, (self.batch_size, config["out_dim"]))
self.assertEqual(f0.shape, (self.batch_size, config["num_ftrs"]))
self.assertEqual(f1.shape, (self.batch_size, config["num_ftrs"]))
def test_memory_bank(self):
device = "cuda" if torch.cuda.is_available() else "cpu"
for model_name, config in self.resnet_variants.items():
resnet = METHOD_NAME(model_name)
model = NNCLR(get_backbone(resnet), **config).to(device)
for nn_size in [2**3, 2**8]:
nn_replacer = NNMemoryBankModule(size=nn_size)
with torch.no_grad():
for i in range(10):
x0 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
x1 = torch.rand((self.batch_size, 3, 64, 64)).to(device)
(z0, p0), (z1, p1) = model(x0, x1)
z0 = nn_replacer(z0.detach(), update=False)
z1 = nn_replacer(z1.detach(), update=True) | null |
312 | import pytest
import BioSimSpace.Sandpit.Exscientia.Types as Types
import BioSimSpace.Sandpit.Exscientia.Units as Units
@pytest.mark.parametrize(
"string, dimensions",
[
("kilo Cal oriEs per Mole / angstrom **2", (0, 0, 0, 1, -1, 0, -2)),
("k Cal_per _mOl / nm^2", (0, 0, 0, 1, -1, 0, -2)),
("kj p eR moles / pico METERs2", (0, 0, 0, 1, -1, 0, -2)),
("coul oMbs / secs * ATm os phereS", (0, 1, -1, 1, 0, 0, -3)),
("pm**3 * rads * de grEE", (2, 0, 3, 0, 0, 0, 0)),
],
)
def test_supported_units(string, dimensions):
"""Test that we can create GeneralUnit objects with the correct dimensions
by evaluating strings as unit based algebraic expressions.
"""
# Try to create the GeneralUnit from the string.
general_unit = Types._GeneralUnit(string)
# Assert that the dimensions match.
assert general_unit.dimensions() == dimensions
@pytest.mark.parametrize(
"string, matching_type",
[
("radian * degree**2 / radian^2", Types.Angle),
("angstrom**3 / nanometer", Types.Area),
("coulombs * angstrom**-2 * nanometer**2", Types.Charge),
("(kcal_per_mol / angstrom**2) * nanometer**2", Types.Energy),
("angstrom**3 * nanometer^-1 / picometer", Types.Length),
("bar * kJ_per_mol**2 / (kcal_per_mol * kJ_per_mol)", Types.Pressure),
("coulomb * kelvin^-3 * celsius**2 * kelvin^2 / e_charge", Types.Temperature),
("nanoseconds^3 * kelvin^-3 * celsius**3 / milliseconds**2", Types.Time),
("angstroms cubed * atm^-3 * bar**3", Types.Volume),
],
)
def test_type_conversion(string, matching_type):
"""Test that GeneralUnit objects can be converted to a type with matching
dimensions.
"""
# Try to create the GeneralUnit from the string.
general_unit = Types._GeneralUnit(string)
# Assert that the types match.
assert type(general_unit) is matching_type
@pytest.mark.parametrize(
"string, default_unit",
[
("degree", Units.Angle.radian),
("meters2", Units.Area.angstrom2),
("coulombs", Units.Charge.electron_charge),
("kJ_per_mol", Units.Energy.kcal_per_mol),
("nanometer", Units.Length.angstrom),
("bar", Units.Pressure.atm),
("fahrenheit", Units.Temperature.kelvin),
("days", Units.Time.nanosecond),
("picometers**3", Units.Volume.angstrom3),
],
)
def test_default_conversion(string, default_unit):
"""Test that GeneralUnit objects are always converted to the default
unit for that type.
"""
# Try to create the GeneralUnit from the string.
general_unit = Types._GeneralUnit(string)
# Assert that units match.
assert general_unit.unit() == default_unit.unit()
@pytest.mark.parametrize(
"unit_type",
[
Units.Angle.radian,
Units.Area.angstrom2,
Units.Charge.electron_charge,
Units.Energy.kcal_per_mol,
Units.Length.angstrom,
Units.Pressure.atm,
Units.Temperature.kelvin,
Units.Time.nanosecond,
Units.Volume.angstrom3,
],
)
def test_pos_pow(unit_type):
"""Test that unit-based types can be raised to positive powers."""
# Store the dimensions associated with the original type.
old_dimensions = unit_type.dimensions()
# Square the unit-based type.
unit_type = unit_type**2
# Store the new dimensions.
new_dimensions = unit_type.dimensions()
# Each dimension entry should be twice the old value.
for d0, d1 in zip(old_dimensions, new_dimensions):
assert d1 == 2 * d0
@pytest.mark.parametrize(
"unit_type",
[
Units.Angle.radian,
Units.Area.angstrom2,
Units.Charge.electron_charge,
Units.Energy.kcal_per_mol,
Units.Length.angstrom,
Units.Pressure.atm,
Units.Temperature.kelvin,
Units.Time.nanosecond,
Units.Volume.angstrom3,
],
)
def METHOD_NAME(unit_type):
"""Test that unit-based types can be raised to negative powers."""
# Store the dimensions associated with the original type.
old_dimensions = unit_type.dimensions()
# Invert the unit-based type.
unit_type = unit_type**-1
# Store the new dimensions.
new_dimensions = unit_type.dimensions()
# Each dimension entry should be the inverse of the old value.
for d0, d1 in zip(old_dimensions, new_dimensions):
assert d1 == -d0
@pytest.mark.parametrize(
"string",
[
"degree",
"meters2",
"coulombs",
"kJ_per_mol",
"nanometer",
"bar",
"fahrenheit",
"days",
"picometers**3",
],
)
def test_dimensionless(string):
"""Test that GeneralUnit objects convert to dimensionless float values
when divided by themself.
"""
# Try to create the GeneralUnit from the string.
general_unit = Types._GeneralUnit(string)
# Check that we get back a float when divided by itself.
assert isinstance(general_unit / general_unit, float)
def test_dimensionless_value():
"""Check that conversion to a dimensionless unit preserves the value
of the unit conversion.
"""
value = (Units.Energy.kcal_per_mol / Units.Length.angstrom**2) / (
Units.Energy.kj_per_mol / Units.Length.nanometer**2
)
assert value == pytest.approx(418.4)
def test_value_and_unit():
"""
Regression test to make sure that a general unit with a value and unit can
be parsed correctly.
"""
general_unit = Types._GeneralUnit(2, "kcal per mol / angstrom**2") | null |
313 | #!/usr/bin/env python3
# Copyright (c) 2014-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there is a double-spend conflict."""
from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
find_output,
)
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
self.supports_cli = False
def METHOD_NAME(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super().setup_network()
self.disconnect_nodes(1, 2)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
# All nodes should be out of IBD.
# If the nodes are not all out of IBD, that can interfere with
# blockchain sync later in the test when nodes are connected, due to
# timing issues.
for n in self.nodes:
assert n.getblockchaininfo()["initialblockdownload"] == False
for i in range(3):
assert_equal(self.nodes[i].getbalance(), {"bitcoin": starting_balance})
# Assign coins to foo and bar addresses:
node0_address_foo = self.nodes[0].getnewaddress()
fund_foo_txid = self.nodes[0].sendtoaddress(node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress()
fund_bar_txid = self.nodes[0].sendtoaddress(node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(),
{"bitcoin": starting_balance + fund_foo_tx["fee"]['bitcoin'] + fund_bar_tx["fee"]['bitcoin']})
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress()
# First: use raw transaction API to send 1240 BTC to node1_address,
# but don't broadcast:
doublespend_fee = Decimal('-.02')
rawtx_input_0 = {}
rawtx_input_0["txid"] = fund_foo_txid
rawtx_input_0["vout"] = find_output(self.nodes[0], fund_foo_txid, 1219)
rawtx_input_1 = {}
rawtx_input_1["txid"] = fund_bar_txid
rawtx_input_1["vout"] = find_output(self.nodes[0], fund_bar_txid, 29)
inputs = [rawtx_input_0, rawtx_input_1]
change_address = self.nodes[0].getnewaddress()
outputs = [{node1_address: 1240}, {change_address: 1248 - 1240 + doublespend_fee}, {"fee": (1219+29) - (1240+1248 - 1240 + doublespend_fee)}]
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
doublespend = self.nodes[0].signrawtransactionwithwallet(rawtx)
assert_equal(doublespend["complete"], True)
# Create two spends using 1 50 BTC coin each
txid1 = self.nodes[0].sendtoaddress(node1_address, 40)
txid2 = self.nodes[0].sendtoaddress(node1_address, 20)
# Have node0 mine a block:
if (self.options.mine_block):
self.nodes[0].generate(1)
self.sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus 40, minus 20, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"]['bitcoin'] + fund_bar_tx["fee"]['bitcoin']
if self.options.mine_block:
expected += 50
expected += tx1["amount"]['bitcoin'] + tx1["fee"]['bitcoin']
expected += tx2["amount"]['bitcoin'] + tx2["fee"]['bitcoin']
assert_equal(self.nodes[0].getbalance(), {"bitcoin": expected})
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance(), {"bitcoin": starting_balance - tx1["amount"]['bitcoin'] - tx2["amount"]['bitcoin']})
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Now give doublespend and its parents to miner:
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
doublespend_txid = self.nodes[2].sendrawtransaction(doublespend["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
self.connect_nodes(1, 2)
self.nodes[2].generate(1) # Mine another block to make sure we sync
self.sync_blocks()
assert_equal(self.nodes[0].gettransaction(doublespend_txid)["confirmations"], 2)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Both transactions should be conflicted
assert_equal(tx1["confirmations"], -2)
assert_equal(tx2["confirmations"], -2)
# Node0's total balance should be starting balance, plus 100BTC for
# two more matured blocks, minus 1240 for the double-spend, plus fees (which are
# negative):
expected = starting_balance + 100 - 1240 + fund_foo_tx["fee"]['bitcoin'] + fund_bar_tx["fee"]['bitcoin'] + doublespend_fee
assert_equal(self.nodes[0].getbalance()['bitcoin'], expected)
# Node1's balance should be its initial balance (1250 for 25 block rewards) plus the doublespend:
assert_equal(self.nodes[1].getbalance()['bitcoin'], 1250 + 1240)
if __name__ == '__main__':
TxnMallTest().main() | null |
314 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalb.endpoint import endpoint_data
class UpdateHealthCheckTemplateAttributeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Alb', '2020-06-16', 'UpdateHealthCheckTemplateAttribute','alb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_HealthCheckTimeout(self): # Integer
return self.get_query_params().get('HealthCheckTimeout')
def set_HealthCheckTimeout(self, HealthCheckTimeout): # Integer
self.add_query_param('HealthCheckTimeout', HealthCheckTimeout)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_HealthCheckProtocol(self): # String
return self.get_query_params().get('HealthCheckProtocol')
def set_HealthCheckProtocol(self, HealthCheckProtocol): # String
self.add_query_param('HealthCheckProtocol', HealthCheckProtocol)
def get_UnhealthyThreshold(self): # Integer
return self.get_query_params().get('UnhealthyThreshold')
def set_UnhealthyThreshold(self, UnhealthyThreshold): # Integer
self.add_query_param('UnhealthyThreshold', UnhealthyThreshold)
def get_HealthyThreshold(self): # Integer
return self.get_query_params().get('HealthyThreshold')
def set_HealthyThreshold(self, HealthyThreshold): # Integer
self.add_query_param('HealthyThreshold', HealthyThreshold)
def get_HealthCheckPath(self): # String
return self.get_query_params().get('HealthCheckPath')
def set_HealthCheckPath(self, HealthCheckPath): # String
self.add_query_param('HealthCheckPath', HealthCheckPath)
def get_HealthCheckCodes(self): # Array
return self.get_query_params().get('HealthCheckCodes')
def set_HealthCheckCodes(self, HealthCheckCodes): # Array
for index1, value1 in enumerate(HealthCheckCodes):
self.add_query_param('HealthCheckCodes.' + str(index1 + 1), value1)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_HealthCheckMethod(self): # String
return self.get_query_params().get('HealthCheckMethod')
def METHOD_NAME(self, HealthCheckMethod): # String
self.add_query_param('HealthCheckMethod', HealthCheckMethod)
def get_HealthCheckHost(self): # String
return self.get_query_params().get('HealthCheckHost')
def set_HealthCheckHost(self, HealthCheckHost): # String
self.add_query_param('HealthCheckHost', HealthCheckHost)
def get_HealthCheckInterval(self): # Integer
return self.get_query_params().get('HealthCheckInterval')
def set_HealthCheckInterval(self, HealthCheckInterval): # Integer
self.add_query_param('HealthCheckInterval', HealthCheckInterval)
def get_HealthCheckTemplateName(self): # String
return self.get_query_params().get('HealthCheckTemplateName')
def set_HealthCheckTemplateName(self, HealthCheckTemplateName): # String
self.add_query_param('HealthCheckTemplateName', HealthCheckTemplateName)
def get_HealthCheckTemplateId(self): # String
return self.get_query_params().get('HealthCheckTemplateId')
def set_HealthCheckTemplateId(self, HealthCheckTemplateId): # String
self.add_query_param('HealthCheckTemplateId', HealthCheckTemplateId)
def get_HealthCheckHttpVersion(self): # String
return self.get_query_params().get('HealthCheckHttpVersion')
def set_HealthCheckHttpVersion(self, HealthCheckHttpVersion): # String
self.add_query_param('HealthCheckHttpVersion', HealthCheckHttpVersion)
def get_HealthCheckConnectPort(self): # Integer
return self.get_query_params().get('HealthCheckConnectPort')
def set_HealthCheckConnectPort(self, HealthCheckConnectPort): # Integer
self.add_query_param('HealthCheckConnectPort', HealthCheckConnectPort) | null |
315 | # coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://github.com/openapi-json-schema-tools/openapi-json-schema-generator
"""
from __future__ import annotations
from petstore_api.shared_imports.schema_imports import * # pyright: ignore [reportWildcardImportFromLibrary]
AdditionalProperties: typing_extensions.TypeAlias = schemas.NotAnyTypeSchema
from petstore_api.paths.fake_parameter_collisions1_abab_self_ab.post.parameters.parameter_0 import schema
from petstore_api.paths.fake_parameter_collisions1_abab_self_ab.post.parameters.parameter_1 import schema as schema_2
from petstore_api.paths.fake_parameter_collisions1_abab_self_ab.post.parameters.parameter_2 import schema as schema_3
from petstore_api.paths.fake_parameter_collisions1_abab_self_ab.post.parameters.parameter_3 import schema as schema_5
from petstore_api.paths.fake_parameter_collisions1_abab_self_ab.post.parameters.parameter_4 import schema as schema_4
Properties = typing.TypedDict(
'Properties',
{
"1": typing.Type[schema.Schema],
"aB": typing.Type[schema_2.Schema],
"Ab": typing.Type[schema_3.Schema],
"A-B": typing.Type[schema_4.Schema],
"self": typing.Type[schema_5.Schema],
}
)
class QueryParametersDict(schemas.immutabledict[str, schemas.OUTPUT_BASE_TYPES]):
__required_keys__: typing.FrozenSet[str] = frozenset({
})
__optional_keys__: typing.FrozenSet[str] = frozenset({
"1",
"aB",
"Ab",
"A-B",
"self",
})
def __new__(
cls,
*,
aB: typing.Union[
str,
schemas.Unset
] = schemas.unset,
METHOD_NAME: typing.Union[
str,
schemas.Unset
] = schemas.unset,
configuration_: typing.Optional[schema_configuration.SchemaConfiguration] = None,
):
arg_: typing.Dict[str, typing.Any] = {}
for key, val in (
("aB", aB),
("Ab", METHOD_NAME),
):
if isinstance(val, schemas.Unset):
continue
arg_[key] = val
used_arg_ = typing.cast(QueryParametersDictInput, arg_)
return QueryParameters.validate(used_arg_, configuration=configuration_)
@staticmethod
def from_dict_(
arg: typing.Union[
QueryParametersDictInput,
QueryParametersDict
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> QueryParametersDict:
return QueryParameters.validate(arg, configuration=configuration)
@property
def aB(self) -> typing.Union[str, schemas.Unset]:
val = self.get("aB", schemas.unset)
if isinstance(val, schemas.Unset):
return val
return typing.cast(
str,
val
)
@property
def METHOD_NAME(self) -> typing.Union[str, schemas.Unset]:
val = self.get("Ab", schemas.unset)
if isinstance(val, schemas.Unset):
return val
return typing.cast(
str,
val
)
QueryParametersDictInput = typing.TypedDict(
'QueryParametersDictInput',
{
"1": str,
"aB": str,
"Ab": str,
"A-B": str,
"self": str,
},
total=False
)
@dataclasses.dataclass(frozen=True)
class QueryParameters(
schemas.Schema[QueryParametersDict, tuple]
):
types: typing.FrozenSet[typing.Type] = frozenset({schemas.immutabledict})
properties: Properties = dataclasses.field(default_factory=lambda: schemas.typed_dict_to_instance(Properties)) # type: ignore
additional_properties: typing.Type[AdditionalProperties] = dataclasses.field(default_factory=lambda: AdditionalProperties) # type: ignore
type_to_output_cls: typing.Mapping[
typing.Type,
typing.Type
] = dataclasses.field(
default_factory=lambda: {
schemas.immutabledict: QueryParametersDict
}
)
@classmethod
def validate(
cls,
arg: typing.Union[
QueryParametersDictInput,
QueryParametersDict,
],
configuration: typing.Optional[schema_configuration.SchemaConfiguration] = None
) -> QueryParametersDict:
return super().validate_base(
arg,
configuration=configuration,
)
| null |
316 | """Test otx segmentation task."""
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import os
import numpy as np
import pytest
from otx.algorithms.segmentation.task import OTXSegmentationTask
from otx.api.configuration.helper import create
from otx.api.entities.inference_parameters import InferenceParameters
from otx.api.entities.model_template import (
parse_model_template,
)
from tests.unit.algorithms.segmentation.test_helpers import (
DEFAULT_SEG_TEMPLATE_DIR,
generate_otx_dataset,
generate_otx_label_schema,
init_environment,
)
from otx.api.usecases.tasks.interfaces.export_interface import ExportType
from tests.test_suite.e2e_test_system import e2e_pytest_unit
class MockOTXSegmentationTask(OTXSegmentationTask):
def _infer_model(*args, **kwargs):
return dict(
classes=["background", "rectangle", "ellipse", "triangle"],
eval_predictions=[[np.random.rand(4, 128, 128)]],
feature_vectors=[np.random.rand(600, 1, 1)],
)
def _train_model(*args, **kwargs):
return {"final_ckpt": "dummy.pth"}
def _explain_model(*args, **kwargs):
pass
def _export_model(*args, **kwargs):
return {
"outputs": {"bin": f"/tmp/model.xml", "xml": f"/tmp/model.bin", "onnx": f"/tmp/model.onnx"},
"inference_parameters": {"mean_values": "", "scale_values": ""},
}
class MockModel:
class _Configuration:
def __init__(self, label_schema):
self.label_schema = label_schema
def get_label_schema(self):
return self.label_schema
def __init__(self):
self.model_adapters = ["weights.pth"]
self.data = np.ndarray(1)
label_schema = generate_otx_label_schema()
self.configuration = self._Configuration(label_schema)
def get_data(self, name):
return self.data
def set_data(self, *args, **kwargs):
return
class TestOTXSegmentationTask:
@pytest.fixture(autouse=True)
def setup(self):
model_template = parse_model_template(os.path.join(DEFAULT_SEG_TEMPLATE_DIR, "template.yaml"))
hyper_parameters = create(model_template.hyper_parameters.data)
task_env = init_environment(hyper_parameters, model_template)
self.seg_task = MockOTXSegmentationTask(task_env)
@e2e_pytest_unit
def test_load_model_ckpt(self, mocker):
mocker_torch_load = mocker.patch("torch.load")
self.seg_task._load_model_ckpt(MockModel())
mocker_torch_load.assert_called_once()
@e2e_pytest_unit
def test_train(self, mocker):
dataset = generate_otx_dataset(5)
mocker.patch("torch.load", return_value=np.ndarray([1]))
self.seg_task.train(dataset, MockModel())
assert self.seg_task._model_ckpt == "dummy.pth"
@e2e_pytest_unit
def test_infer(self):
dataset = generate_otx_dataset(5)
predicted_dataset = self.seg_task.infer(
dataset.with_empty_annotations(), inference_parameters=InferenceParameters(is_evaluation=False)
)
assert predicted_dataset[0].annotation_scene.annotations[0]
@e2e_pytest_unit
def test_evaluate(self, mocker):
class _MockScoreMetric:
def __init__(self, value):
self.value = value
class _MockMetric:
def __init__(self):
self.overall_dice = _MockScoreMetric(1.0)
def METHOD_NAME(self):
return 1.0
class _MockResultEntity:
performance = 0.0
mocker.patch(
"otx.algorithms.segmentation.task.MetricsHelper.compute_dice_averaged_over_pixels",
return_value=_MockMetric(),
)
_result_entity = _MockResultEntity()
self.seg_task.evaluate(_result_entity)
assert _result_entity.performance == 1.0
@e2e_pytest_unit
@pytest.mark.parametrize("export_type", [ExportType.ONNX, ExportType.OPENVINO])
def test_export(self, otx_model, mocker, export_type):
mocker_open = mocker.patch("builtins.open")
mocker_open.__enter__.return_value = True
mocker.patch("otx.algorithms.segmentation.task.embed_ir_model_data", return_value=None)
mocker.patch("otx.algorithms.segmentation.task.embed_onnx_model_data", return_value=None)
self.seg_task.export(export_type, otx_model)
mocker_open.assert_called() | null |
317 | # coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long
r"""Finetune a ViT-L/32 on CIFAR-10/100 subpopulation shift with BE+GP.
Checkpoints in this config are useful for BE->BE+GP pretraining->finetuning.
This config is used for models pretrained on either JFT-300M or ImageNet-21K.
"""
# pylint: enable=line-too-long
import ml_collections
from experiments import sweep_utils # local file import from baselines.jft
# CIFAR-10/100 subpopulation datasets.
CIFAR10_SUBPOPL_DATA_FILES = []
CIFAR100_SUBPOPL_DATA_FILES = []
def get_config():
"""Config for finetuning."""
config = ml_collections.ConfigDict()
config.model_init = '' # set in sweep
config.dataset = '' # set in sweep
config.val_split = '' # set in sweep
config.test_split = '' # set in sweep
config.train_split = '' # set in sweep
config.num_classes = None # set in sweep
config.batch_size = 512
config.total_steps = None # set in sweep
config.pp_train = '' # set in sweep
config.pp_eval = '' # set in sweep
config.shuffle_buffer_size = 50_000 # Per host, so small-ish is ok.
config.log_training_steps = 100
config.log_eval_steps = 1000
config.checkpoint_steps = 5000
config.checkpoint_timeout = 1
config.prefetch_to_device = 2
config.trial = 0
# Subpopulation shift evaluation. Parameters set in the sweep. If
# `config.subpopl_cifar_data_file` is None, this evaluation is skipped.
config.subpopl_cifar_data_file = None
config.pp_eval_subpopl_cifar = None
# OOD evaluation. They're all set in the sweep.
config.ood_datasets = []
config.ood_num_classes = []
config.ood_split = ''
config.ood_methods = []
config.pp_eval_ood = []
config.eval_on_cifar_10h = False
config.pp_eval_cifar_10h = ''
config.eval_on_imagenet_real = False
config.pp_eval_imagenet_real = ''
# Subpopulation shift evaluation. Parameters set in the sweep.
config.subpopl_cifar_data_file = None
config.pp_eval_subpopl_cifar = None
# Model section.
config.model = ml_collections.ConfigDict()
config.model.patch_size = [32, 32]
config.model.hidden_size = 1024
config.model.transformer = ml_collections.ConfigDict()
config.model.transformer.mlp_dim = 4096
config.model.transformer.num_heads = 16
config.model.transformer.num_layers = 24
config.model.transformer.attention_dropout_rate = 0.
config.model.transformer.dropout_rate = 0.
config.model.classifier = 'token'
# This is "no head" fine-tuning, which we use by default.
config.model.representation_size = None
# BatchEnsemble config.
config.model.transformer.be_layers = (21, 22, 23)
config.model.transformer.ens_size = 3
config.model.transformer.random_sign_init = -0.5
config.fast_weight_lr_multiplier = 1.0
# GP config.
config.use_gp_layer = True
config.gp_layer = ml_collections.ConfigDict()
config.gp_layer.covmat_momentum = -1
config.gp_layer.ridge_penalty = 1.
# No need to use mean field adjustment for pretraining.
config.gp_layer.mean_field_factor = -1.
# Optimizer section.
config.optim_name = 'Momentum'
config.optim = ml_collections.ConfigDict()
config.grad_clip_norm = 1.0
config.weight_decay = None
config.loss = 'softmax_xent'
config.lr = ml_collections.ConfigDict()
config.lr.base = 0.001 # set in sweep
config.lr.warmup_steps = 0 # set in sweep
config.lr.decay_type = 'cosine'
return config
def METHOD_NAME(hyper):
"""Sweeps over datasets."""
checkpoints = ['/path/to/pretrained_model_ckpt.npz']
# Apply a learning rate sweep following Table 4 of Vision Transformer paper.
cifar10_sweep = hyper.product([
hyper.chainit([
hyper.product(sweep_utils.cifar10(
hyper, steps=int(10_000 * s), warmup=int(500 * s)))
for s in [0.5, 1.0, 1.5, 2.0]
]),
hyper.sweep('config.lr.base', [0.03, 0.01, 0.003, 0.001]),
hyper.sweep('config.gp_layer.mean_field_factor',
[-1., 0.1, 0.2, 0.3, 0.5, 1., 2., 3., 5., 10., 20])
])
cifar100_sweep = hyper.product([
hyper.chainit([
hyper.product(sweep_utils.cifar100(
hyper, steps=int(10_000 * s), warmup=int(500 * s)))
for s in [0.5, 1.0, 1.5, 2.0]
]),
hyper.sweep('config.lr.base', [0.06, 0.03, 0.01, 0.006]),
hyper.sweep('config.fast_weight_lr_multiplier', [0.5, 1.0, 2.0]),
hyper.sweep('config.model.transformer.random_sign_init', [-0.5, 0.5]),
hyper.sweep('config.gp_layer.mean_field_factor',
[-1., 1e-8, 1e-7, 1e-6, 1e-5, 1e-4])
])
return hyper.product([
hyper.chainit([
hyper.product([
cifar10_sweep,
hyper.sweep('config.subpopl_cifar_data_file',
CIFAR10_SUBPOPL_DATA_FILES)
]),
hyper.product([
cifar100_sweep,
hyper.sweep('config.subpopl_cifar_data_file',
CIFAR100_SUBPOPL_DATA_FILES)
]),
]),
hyper.sweep('config.model_init', checkpoints),
]) | null |
318 | # coding=utf-8
# Copyright 2023 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for multimodal_utils."""
from jax import numpy as jnp
import numpy as np
import tensorflow as tf
import multimodal_utils # local file import from experimental.multimodal
class MultimodalUtilsTest(tf.test.TestCase):
def test_contrastive_loss_logits(self):
zimg = jnp.array([[1., 2., 3.], [4., 5., 6.], [1., 0., 0.]])
ztext = jnp.array([[-1., -2., -3.], [1., 2., 3.], [1., 0., 0.]])
_, logits = multimodal_utils.bidirectional_contrastive_loss(zimg, ztext)
np.testing.assert_allclose(
logits,
jnp.array([[-14., 14., 1.], [-32., 32., 4.], [-1., 1., 1.]]))
def test_contrastive_loss_no_reduction_no_mask(self):
zimg = jnp.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
ztext = jnp.array([[1., 0., 0.], [0., 0., 1.], [0., 0., 1.]])
loss, logits = multimodal_utils.bidirectional_contrastive_loss(
zimg, ztext, mask=None, reduction=False)
np.testing.assert_allclose(
logits,
jnp.array([[1., 0., 0.], [0., 0., 0.], [0., 1., 1.]]))
expected_loss = -0.5 * jnp.array([
jnp.log(jnp.e**2 / (jnp.e + 2)**2),
jnp.log(1 / (3 * (jnp.e + 2))),
jnp.log(jnp.e**2 / ((2 + jnp.e) * (1 + 2 * jnp.e)))
])
np.testing.assert_allclose(loss, expected_loss, atol=1e-6, rtol=1e-6)
def test_contrastive_loss_reduction_no_mask(self):
zimg = jnp.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
ztext = jnp.array([[1., 0., 0.], [0., 0., 1.], [0., 0., 1.]])
loss, logits = multimodal_utils.bidirectional_contrastive_loss(
zimg, ztext, mask=None, reduction=True)
np.testing.assert_allclose(
logits,
jnp.array([[1., 0., 0.], [0., 0., 0.], [0., 1., 1.]]))
expected_loss = jnp.mean(-0.5 * jnp.array([
jnp.log(jnp.e**2 / (jnp.e + 2)**2),
jnp.log(1 / (3 * (jnp.e + 2))),
jnp.log(jnp.e**2 / ((2 + jnp.e) * (1 + 2 * jnp.e)))
]))
np.testing.assert_allclose(loss, expected_loss, atol=1e-6, rtol=1e-6)
def test_contrastive_loss_no_reduction_mask(self):
zimg = jnp.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
ztext = jnp.array([[1., 0., 0.], [0., 0., 1.], [0., 0., 1.]])
loss, logits = multimodal_utils.bidirectional_contrastive_loss(
zimg, ztext, mask=jnp.array([1, 1, 0]), reduction=False)
np.testing.assert_allclose(
logits,
jnp.array([[1., 0., -jnp.inf],
[0., 0., -jnp.inf],
[-jnp.inf, -jnp.inf, -jnp.inf]]))
expected_loss = -0.5 * jnp.array([
jnp.log(jnp.e**2 / (jnp.e + 1)**2),
jnp.log(1 / 4),
0
])
np.testing.assert_allclose(loss, expected_loss, atol=1e-6, rtol=1e-6)
def METHOD_NAME(self):
zimg = jnp.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])
ztext = jnp.array([[1., 0., 0.], [0., 0., 1.], [0., 0., 1.]])
loss, logits = multimodal_utils.bidirectional_contrastive_loss(
zimg, ztext, mask=jnp.array([1, 1, 0]), reduction=True)
np.testing.assert_allclose(
logits,
jnp.array([[1., 0., -jnp.inf],
[0., 0., -jnp.inf],
[-jnp.inf, -jnp.inf, -jnp.inf]]))
expected_loss = jnp.sum(-0.5 * jnp.array([
jnp.log(jnp.e**2 / (jnp.e + 1)**2),
jnp.log(1 / 4)
])) / 2
np.testing.assert_allclose(loss, expected_loss, atol=1e-6, rtol=1e-6)
if __name__ == "__main__":
tf.test.main() | null |
319 | """Implements a cat command for xonsh."""
import os
import sys
import time
import xonsh.procs.pipelines as xpp
from xonsh.built_ins import XSH
from xonsh.xoreutils.util import arg_handler, run_alias
def _cat_line(
f, sep, last_was_blank, line_count, opts, out, enc, enc_errors, read_size
):
_r = r = f.readline(80)
restore_newline = False
if isinstance(_r, str):
_r = r = _r.encode(enc, enc_errors)
if r == b"":
return last_was_blank, line_count, read_size, True
if r.endswith(sep):
_r = _r[: -len(sep)]
restore_newline = True
this_one_blank = _r == b""
if last_was_blank and this_one_blank and opts["squeeze_blank"]:
return last_was_blank, line_count, read_size, False
last_was_blank = this_one_blank
if opts["number_all"] or (opts["number_nonblank"] and not this_one_blank):
start = ("%6d " % line_count).encode(enc, enc_errors)
_r = start + _r
line_count += 1
if opts["show_ends"]:
_r = _r + b"$"
if restore_newline:
_r = _r + sep
out.buffer.write(_r)
out.flush()
read_size += len(r)
return last_was_blank, line_count, read_size, False
def _cat_single_file(opts, fname, stdin, out, err, line_count=1):
env = XSH.env
enc = env.get("XONSH_ENCODING")
enc_errors = env.get("XONSH_ENCODING_ERRORS")
read_size = 0
file_size = fobj = None
if fname == "-":
f = stdin or sys.stdin
elif os.path.isdir(fname):
print(f"cat: {fname}: Is a directory.", file=err)
return True, line_count
elif not os.path.exists(fname):
print(f"cat: No such file or directory: {fname}", file=err)
return True, line_count
else:
file_size = os.stat(fname).st_size
if file_size == 0:
file_size = None
fobj = open(fname, "rb")
f = xpp.NonBlockingFDReader(fobj.fileno(), timeout=0.1)
sep = os.linesep.encode(enc, enc_errors)
last_was_blank = False
while file_size is None or read_size < file_size:
try:
last_was_blank, line_count, read_size, endnow = _cat_line(
f,
sep,
last_was_blank,
line_count,
opts,
out,
enc,
enc_errors,
read_size,
)
if endnow:
break
if last_was_blank:
time.sleep(1e-3)
except KeyboardInterrupt:
print("got except", flush=True, file=out)
break
except Exception as e:
print("xonsh:", e, flush=True, file=out)
pass
if fobj is not None:
fobj.close()
return False, line_count
def cat(args, stdin, stdout, stderr):
"""A cat command for xonsh."""
opts = METHOD_NAME(args)
if opts is None:
print(CAT_HELP_STR, file=stdout)
return 0
line_count = 1
errors = False
if len(args) == 0:
args = ["-"]
for i in args:
o = _cat_single_file(opts, i, stdin, stdout, stderr, line_count)
if o is None:
return -1
_e, line_count = o
errors = _e or errors
return int(errors)
def METHOD_NAME(args):
out = {
"number_nonblank": False,
"number_all": False,
"squeeze_blank": False,
"show_ends": False,
}
if "--help" in args:
return
arg_handler(args, out, "-b", "number_nonblank", True, "--number-nonblank")
arg_handler(args, out, "-n", "number_all", True, "--number")
arg_handler(args, out, "-E", "show_ends", True, "--show-ends")
arg_handler(args, out, "-s", "squeeze_blank", True, "--squeeze-blank")
arg_handler(args, out, "-T", "show_tabs", True, "--show-tabs")
return out
CAT_HELP_STR = """This version of cat was written in Python for the xonsh project: http://xon.sh
Based on cat from GNU coreutils: http://www.gnu.org/software/coreutils/
Usage: cat [OPTION]... [FILE]...
Concatenate FILE(s), or standard input, to standard output.
-b, --number-nonblank number nonempty output lines, overrides -n
-E, --show-ends display $ at end of each line
-n, --number number all output lines
-s, --squeeze-blank suppress repeated empty output lines
-T, --show-tabs display TAB characters as ^I
-u (ignored)
--help display this help and exit
With no FILE, or when FILE is -, read standard input.
Examples:
cat f - g Output f's contents, then standard input, then g's contents.
cat Copy standard input to standard output."""
# NOT IMPLEMENTED:
# -A, --show-all equivalent to -vET
# -e equivalent to -vE
# -t equivalent to -vT
# -v, --show-nonprinting use ^ and M- notation, except for LFD and TAB
# --version output version information and exit"""
def main(args=None):
run_alias("cat", args)
if __name__ == "__main__":
main() | null |
320 | from subprocess import CalledProcessError
import pytest
from galaxy.tool_util.deps.container_classes import DOCKER_CONTAINER_TYPE
from galaxy.tool_util.deps.container_resolvers.mulled import (
CachedMulledDockerContainerResolver,
CachedMulledSingularityContainerResolver,
MulledDockerContainerResolver,
)
from galaxy.tool_util.deps.containers import ContainerRegistry
from galaxy.tool_util.deps.dependencies import (
AppInfo,
ToolInfo,
)
from galaxy.tool_util.deps.requirements import ToolRequirement
SINGULARITY_IMAGES = (
"foo:1.0--bar",
"baz:2.22",
"mulled-v2-fe8a3b846bc50d24e5df78fa0b562c43477fe9ce:9f946d13f673ab2903cb0da849ad42916d619d18-0",
)
@pytest.fixture
def appinfo() -> AppInfo:
return AppInfo(
involucro_auto_init=True,
enable_mulled_containers=True,
container_image_cache_path=".",
)
@pytest.fixture
def container_registry():
app_info = AppInfo(
involucro_auto_init=True,
enable_mulled_containers=True,
container_image_cache_path=".",
)
return ContainerRegistry(app_info)
def test_container_registry(container_registry, mocker):
mocker.patch("galaxy.tool_util.deps.mulled.util._get_namespace", return_value=["samtools"])
tool_info = ToolInfo(requirements=[ToolRequirement(name="samtools", version="1.10", type="package")])
container_description = container_registry.find_best_container_description(
[DOCKER_CONTAINER_TYPE],
tool_info,
install=False,
)
assert container_description.type == "docker"
assert "samtools:1.10" in container_description.identifier
def test_docker_container_resolver_detects_docker_cli_absent(appinfo, mocker):
mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled.which", return_value=None)
resolver = CachedMulledDockerContainerResolver(appinfo)
assert resolver._cli_available is False
def test_docker_container_resolver_detects_docker_cli(appinfo, mocker):
"""
- CachedMulledDockerContainerResolver properly detects present docker binary
"""
mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled", return_value="/bin/docker")
resolver = CachedMulledDockerContainerResolver(appinfo)
assert resolver.cli_available
def test_cached_docker_container_docker_cli_absent_resolve(appinfo, mocker) -> None:
mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled.which", return_value=None)
resolver = CachedMulledDockerContainerResolver(appinfo)
assert resolver.cli_available is False
assert resolver.resolve(enabled_container_types=[], tool_info=ToolInfo()) is None
def test_docker_container_docker_cli_absent_resolve(appinfo, mocker):
mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled.which", return_value=None)
resolver = MulledDockerContainerResolver(appinfo)
assert resolver.cli_available is False
requirement = ToolRequirement(name="samtools", version="1.10", type="package")
tool_info = ToolInfo(requirements=[requirement])
mocker.patch(
"galaxy.tool_util.deps.container_resolvers.mulled.targets_to_mulled_name",
return_value="samtools:1.10--h2e538c0_3",
)
container_description = resolver.resolve(enabled_container_types=["docker"], tool_info=tool_info)
assert container_description
assert container_description.type == "docker"
assert container_description.identifier == "quay.io/biocontainers/samtools:1.10--h2e538c0_3"
def METHOD_NAME(appinfo, mocker):
mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled.which", return_value="/bin/docker")
resolver = MulledDockerContainerResolver(appinfo)
assert resolver.cli_available is True
requirement = ToolRequirement(name="samtools", version="1.10", type="package")
tool_info = ToolInfo(requirements=[requirement])
mocker.patch(
"galaxy.tool_util.deps.container_resolvers.mulled.targets_to_mulled_name",
return_value="samtools:1.10--h2e538c0_3",
)
mocker.patch(
"galaxy.tool_util.deps.container_resolvers.mulled.docker_cached_container_description",
side_effect=CalledProcessError(1, "bla"),
)
container_description = resolver.resolve(enabled_container_types=["docker"], tool_info=tool_info, install=True)
assert resolver.cli_available is True
assert container_description
assert container_description.type == "docker"
assert container_description.identifier == "quay.io/biocontainers/samtools:1.10--h2e538c0_3"
def test_cached_singularity_container_resolver_uncached(mocker):
mocker.patch("os.listdir", return_value=SINGULARITY_IMAGES)
mocker.patch("os.path.exists", return_value=True)
mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled.safe_makedirs")
resolver = CachedMulledSingularityContainerResolver(app_info=mocker.Mock(container_image_cache_path="/"))
requirement = ToolRequirement(name="foo", version="1.0", type="package")
tool_info = ToolInfo(requirements=[requirement])
container_description = resolver.resolve(enabled_container_types=["singularity"], tool_info=tool_info)
assert container_description
assert container_description.type == "singularity"
assert container_description.identifier == "/singularity/mulled/foo:1.0--bar"
def test_cached_singularity_container_resolver_dir_mtime_cached(mocker):
mocker.patch("os.listdir", return_value=SINGULARITY_IMAGES)
mocker.patch("os.path.exists", return_value=True)
mocker.patch("galaxy.tool_util.deps.container_resolvers.mulled.safe_makedirs")
mocker.patch("os.stat", return_value=mocker.Mock(st_mtime=42))
resolver = CachedMulledSingularityContainerResolver(
app_info=mocker.Mock(container_image_cache_path="/"), cache_directory_cacher_type="dir_mtime"
)
requirement = ToolRequirement(name="baz", version="2.22", type="package")
tool_info = ToolInfo(requirements=[requirement])
container_description = resolver.resolve(enabled_container_types=["singularity"], tool_info=tool_info)
assert container_description
assert container_description.type == "singularity"
assert container_description.identifier == "/singularity/mulled/baz:2.22"
requirement = ToolRequirement(name="foo", version="1.0", type="package")
tool_info.requirements.append(requirement)
container_description = resolver.resolve(enabled_container_types=["singularity"], tool_info=tool_info)
assert container_description
assert container_description.type == "singularity"
assert (
container_description.identifier
== "/singularity/mulled/mulled-v2-fe8a3b846bc50d24e5df78fa0b562c43477fe9ce:9f946d13f673ab2903cb0da849ad42916d619d18-0"
) | null |
321 | ## \example domino/multiscale.py
# We are interested in applying domino to problems systematically in a
# multiscale manner. This script experiments with those approaches.
from __future__ import print_function
import IMP.domino
import IMP.core
import sys
IMP.setup_from_argv(sys.argv, "multiscale")
m = IMP.Model()
m.set_log_level(IMP.SILENT)
ds = [IMP.core.XYZR.setup_particle(IMP.Particle(m))
for i in range(0, 3)]
for i, d in enumerate(ds):
d.set_radius(1)
IMP.display.Colored.setup_particle(d, IMP.display.get_display_color(i))
k = 1
h = IMP.core.Harmonic(0, k)
r0 = IMP.core.SingletonRestraint(
m, IMP.core.DistanceToSingletonScore(h, IMP.algebra.Vector3D(0, 0, 0)),
ds[0].get_particle_index(), "0 at origin")
r1 = IMP.core.SingletonRestraint(
m, IMP.core.AttributeSingletonScore(h, IMP.core.XYZ.get_xyz_keys()[0]),
ds[1].get_particle_index(), "1 on axis")
rs = [r0, r1]
for pr in [(0, 1), (1, 2), (0, 2)]:
r = IMP.core.PairRestraint(
m, IMP.core.HarmonicSphereDistancePairScore(0, k),
(ds[pr[0]].get_particle_index(), ds[pr[1]].get_particle_index()),
"R for " + str(pr))
rs.append(r)
bb = IMP.algebra.BoundingBox2D(IMP.algebra.Vector2D(0, 0),
IMP.algebra.Vector2D(4, 4))
covers = []
for i in range(0, 6):
cur = IMP.algebra.get_grid_interior_cover_by_spacing(bb, 4.0 / 2 ** i)
print(cur)
covers.append([IMP.algebra.Vector3D(x[0], x[1], 0) for x in cur])
def METHOD_NAME(cover, scale):
pst = IMP.domino.ParticleStatesTable()
st = IMP.domino.XYZStates(cover)
for p in ds:
pst.set_particle_states(p, st)
for r in rs:
r.set_maximum_score(.5 * scale ** 2)
lf = IMP.domino.ListSubsetFilterTable(pst)
rc = IMP.domino.RestraintCache(pst)
rc.add_restraints(rs)
fs = [IMP.domino.RestraintScoreSubsetFilterTable(rc),
lf]
sampler = IMP.domino.DominoSampler(m, pst)
sampler.set_restraints(rs)
sampler.set_subset_filter_tables(fs)
sampler.set_log_level(IMP.SILENT)
return (sampler, lf, pst)
(sampler, lf, pst) = METHOD_NAME(covers[0], 4.0)
subset = IMP.domino.Subset(ds)
ac = sampler.get_sample_assignments(subset)
print(ac)
def get_mapping(cover0, cover1):
nn = IMP.algebra.NearestNeighbor3D(cover0)
ret = [[] for c in cover0]
for i, p in enumerate(cover1):
nns = nn.get_nearest_neighbor(p)
ret[nns].append(i)
return ret
mw = IMP.display.PymolWriter("mapping.pym")
def display_mapping(index, cover0, cover1, mapping):
mw.set_frame(index)
for i, c in enumerate(mapping):
for p in c:
g = IMP.display.PointGeometry(cover1[p])
g.set_color(IMP.display.get_display_color(i))
g.set_name("fine")
mw.add_geometry(g)
for i, c in enumerate(cover0):
g = IMP.display.PointGeometry(c)
g.set_color(IMP.display.get_display_color(i))
g.set_name("coarse")
mw.add_geometry(g)
for curi in range(1, len(covers)):
scale = 4.0 / 2 ** curi
print(scale)
mapping = get_mapping(covers[curi - 1], covers[curi])
print(mapping)
display_mapping(curi - 1, covers[curi - 1], covers[curi], mapping)
(sampler, lf, pst) = METHOD_NAME(covers[curi], scale)
lac = ac
cac = []
for a in lac:
for i, p in enumerate(subset):
s = a[i]
allowed = mapping[s]
lf.set_allowed_states(p, allowed)
ccac = sampler.get_sample_assignments(subset)
print(a, ccac)
cac = cac + ccac
ac = list(set(cac))
print("for scale", scale, "got", ac)
sw = IMP.display.PymolWriter("solutions." + str(curi) + ".pym")
for i, a in enumerate(ac):
IMP.domino.load_particle_states(subset, a, pst)
sw.set_frame(i)
for p in ds:
g = IMP.core.XYZRGeometry(p)
sw.add_geometry(g)
for c in covers[curi]:
g = IMP.display.PointGeometry(c)
g.set_color(IMP.display.Color(1, 1, 1))
g.set_name("grid")
sw.add_geometry(g) | null |
322 | import numpy as np
from base_test import ArkoudaTest
from context import arkouda as ak
SIZE = 10
K = 5
def make_array():
a = ak.randint(0, SIZE, SIZE)
return a
def compare_results(akres, sortedres) -> int:
"""
Compares the numpy and arkouda arrays via the numpy.allclose method with the
default relative and absolute tolerances, returning 0 if the arrays are similar
element-wise within the tolerances, 1 if they are dissimilar.element
:return: 0 (identical) or 1 (dissimilar)
:rtype: int
"""
akres = akres.to_ndarray()
if not np.array_equal(akres, sortedres):
akres = ak.array(akres)
sortedres = ak.array(sortedres)
innp = sortedres[
ak.in1d(ak.array(sortedres), ak.array(akres), True)
] # values in np array, but not ak array
inak = akres[
ak.in1d(ak.array(akres), ak.array(sortedres), True)
] # values in ak array, not not np array
print(f"(values in np but not ak: {innp}) (values in ak but not np: {inak})")
return 1
return 0
def run_test(runMin=True, isInd=True, verbose=True):
"""
The run_test method runs execution of the mink reduction
on a randomized array.
:return:
"""
aka = make_array()
failures = 0
try:
if not isInd:
if runMin:
akres = ak.mink(aka, K)
npres = np.sort(aka.to_ndarray())[:K] # first K elements from sorted array
else:
akres = ak.maxk(aka, K)
npres = np.sort(aka.to_ndarray())[-K:] # last K elements from sorted array
else:
if runMin:
akres = aka[ak.argmink(aka, K)]
npres = np.sort(aka.to_ndarray())[:K] # first K elements from sorted array
else:
akres = aka[ak.argmaxk(aka, K)]
npres = np.sort(aka.to_ndarray())[-K:] # last K elements from sorted array
except RuntimeError as E:
if verbose:
print("Arkouda error: ", E)
return 1
failures += compare_results(akres, npres)
return failures
class MinKTest(ArkoudaTest):
def test_mink(self):
"""
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test for set operations
"""
self.assertEqual(0, run_test())
def test_error_handling(self):
testArray = ak.randint(0, 100, 100)
with self.assertRaises(TypeError):
ak.mink(list(range(0, 10)), 1)
with self.assertRaises(TypeError):
ak.mink(testArray, "1")
with self.assertRaises(ValueError):
ak.mink(testArray, -1)
with self.assertRaises(ValueError):
ak.mink(ak.array([]), 1)
class MaxKTest(ArkoudaTest):
def test_maxk(self):
"""
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test for set operations
"""
self.assertEqual(0, run_test(runMin=False))
def test_error_handling(self):
testArray = ak.randint(0, 100, 100)
with self.assertRaises(TypeError):
ak.maxk(list(range(0, 10)), 1)
with self.assertRaises(TypeError):
ak.maxk(testArray, "1")
with self.assertRaises(ValueError):
ak.maxk(testArray, -1)
with self.assertRaises(ValueError):
ak.maxk(ak.array([]), 1)
class ArgMinKTest(ArkoudaTest):
def METHOD_NAME(self):
"""
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test for set operations
"""
self.assertEqual(0, run_test(isInd=True))
def test_error_handling(self):
testArray = ak.randint(0, 100, 100)
with self.assertRaises(TypeError):
ak.argmink(list(range(0, 10)), 1)
with self.assertRaises(TypeError):
ak.argmink(testArray, "1")
with self.assertRaises(ValueError):
ak.argmink(testArray, -1)
with self.assertRaises(ValueError):
ak.argmink(ak.array([]), 1)
class ArgMaxKTest(ArkoudaTest):
def test_argmaxk(self):
"""
Executes run_test and asserts whether there are any errors
:return: None
:raise: AssertionError if there are any errors encountered in run_test for set operations
"""
self.assertEqual(0, run_test(runMin=False, isInd=True))
def test_error_handling(self):
testArray = ak.randint(0, 100, 100)
with self.assertRaises(TypeError):
ak.argmaxk(list(range(0, 10)), 1)
with self.assertRaises(TypeError):
ak.argmaxk(testArray, "1")
with self.assertRaises(ValueError):
ak.argmaxk(testArray, -1)
with self.assertRaises(ValueError):
ak.argmaxk(ak.array([]), 1)
class ArgMinTest(ArkoudaTest):
def test_argmin(self):
np_arr = np.array([False, False, True, True, False])
ak_arr = ak.array(np_arr)
self.assertEqual(np_arr.argmin(), ak_arr.argmin())
class ArgMaxTest(ArkoudaTest):
def test_argmax(self):
np_arr = np.array([False, False, True, True, False])
ak_arr = ak.array(np_arr)
self.assertEqual(np_arr.argmax(), ak_arr.argmax()) | null |
323 | import random
from sc2 import maps
from sc2.bot_ai import BotAI
from sc2.data import Difficulty, Race
from sc2.ids.ability_id import AbilityId
from sc2.ids.unit_typeid import UnitTypeId
from sc2.ids.upgrade_id import UpgradeId
from sc2.main import run_game
from sc2.player import Bot, Computer
from sc2.position import Point2
from sc2.unit import Unit
from sc2.units import Units
class Hydralisk(BotAI):
def select_target(self) -> Point2:
if self.enemy_structures:
return random.choice(self.enemy_structures).position
return self.enemy_start_locations[0]
# pylint: disable=R0912
async def METHOD_NAME(self, iteration):
larvae: Units = self.larva
forces: Units = self.units.of_type({UnitTypeId.ZERGLING, UnitTypeId.HYDRALISK})
# Send all idle lings + hydras to attack-move if we have at least 10 hydras, every 400th frame
if self.units(UnitTypeId.HYDRALISK).amount >= 10 and iteration % 50 == 0:
for unit in forces.idle:
unit.attack(self.select_target())
# If supply is low, train overlords
if self.supply_left < 2 and larvae and self.can_afford(UnitTypeId.OVERLORD):
larvae.random.train(UnitTypeId.OVERLORD)
return
# If hydra den is ready and idle, research upgrades
hydra_dens = self.structures(UnitTypeId.HYDRALISKDEN)
if hydra_dens:
for hydra_den in hydra_dens.ready.idle:
if self.already_pending_upgrade(UpgradeId.EVOLVEGROOVEDSPINES
) == 0 and self.can_afford(UpgradeId.EVOLVEGROOVEDSPINES):
hydra_den.research(UpgradeId.EVOLVEGROOVEDSPINES)
elif self.already_pending_upgrade(UpgradeId.EVOLVEMUSCULARAUGMENTS
) == 0 and self.can_afford(UpgradeId.EVOLVEMUSCULARAUGMENTS):
hydra_den.research(UpgradeId.EVOLVEMUSCULARAUGMENTS)
# If hydra den is ready, train hydra
if larvae and self.can_afford(UnitTypeId.HYDRALISK) and self.structures(UnitTypeId.HYDRALISKDEN).ready:
larvae.random.train(UnitTypeId.HYDRALISK)
return
# If all our townhalls are dead, send all our units to attack
if not self.townhalls:
for unit in self.units.of_type(
{UnitTypeId.DRONE, UnitTypeId.QUEEN, UnitTypeId.ZERGLING, UnitTypeId.HYDRALISK}
):
unit.attack(self.enemy_start_locations[0])
return
hq: Unit = self.townhalls.first
# Send idle queens with >=25 energy to inject
for queen in self.units(UnitTypeId.QUEEN).idle:
# The following checks if the inject ability is in the queen abilitys - basically it checks if we have enough energy and if the ability is off-cooldown
# abilities = await self.get_available_abilities(queen)
# if AbilityId.EFFECT_INJECTLARVA in abilities:
if queen.energy >= 25:
queen(AbilityId.EFFECT_INJECTLARVA, hq)
# Build spawning pool
if self.structures(UnitTypeId.SPAWNINGPOOL).amount + self.already_pending(UnitTypeId.SPAWNINGPOOL) == 0:
if self.can_afford(UnitTypeId.SPAWNINGPOOL):
await self.build(UnitTypeId.SPAWNINGPOOL, near=hq.position.towards(self.game_info.map_center, 5))
# Upgrade to lair if spawning pool is complete
if self.structures(UnitTypeId.SPAWNINGPOOL).ready:
if hq.is_idle and not self.townhalls(UnitTypeId.LAIR):
if self.can_afford(UnitTypeId.LAIR):
hq.build(UnitTypeId.LAIR)
# If lair is ready and we have no hydra den on the way: build hydra den
if self.townhalls(UnitTypeId.LAIR).ready:
if self.structures(UnitTypeId.HYDRALISKDEN).amount + self.already_pending(UnitTypeId.HYDRALISKDEN) == 0:
if self.can_afford(UnitTypeId.HYDRALISKDEN):
await self.build(UnitTypeId.HYDRALISKDEN, near=hq.position.towards(self.game_info.map_center, 5))
# If we dont have both extractors: build them
if (
self.structures(UnitTypeId.SPAWNINGPOOL)
and self.gas_buildings.amount + self.already_pending(UnitTypeId.EXTRACTOR) < 2
):
if self.can_afford(UnitTypeId.EXTRACTOR):
# May crash if we dont have any drones
for vg in self.vespene_geyser.closer_than(10, hq):
drone: Unit = self.workers.random
drone.build_gas(vg)
break
# If we have less than 22 drones, build drones
if self.supply_workers + self.already_pending(UnitTypeId.DRONE) < 22:
if larvae and self.can_afford(UnitTypeId.DRONE):
larva: Unit = larvae.random
larva.train(UnitTypeId.DRONE)
return
# Saturate gas
for a in self.gas_buildings:
if a.assigned_harvesters < a.ideal_harvesters:
w: Units = self.workers.closer_than(10, a)
if w:
w.random.gather(a)
# Build queen once the pool is done
if self.structures(UnitTypeId.SPAWNINGPOOL).ready:
if not self.units(UnitTypeId.QUEEN) and hq.is_idle:
if self.can_afford(UnitTypeId.QUEEN):
hq.train(UnitTypeId.QUEEN)
# Train zerglings if we have much more minerals than vespene (not enough gas for hydras)
if self.units(UnitTypeId.ZERGLING).amount < 20 and self.minerals > 1000:
if larvae and self.can_afford(UnitTypeId.ZERGLING):
larvae.random.train(UnitTypeId.ZERGLING)
def main():
run_game(
maps.get("(2)CatalystLE"),
[Bot(Race.Zerg, Hydralisk()), Computer(Race.Terran, Difficulty.Medium)],
realtime=False,
save_replay_as="ZvT.SC2Replay",
)
if __name__ == "__main__":
main() | null |
324 | from __future__ import annotations
import ast
from abc import ABC
from typing import Dict, List, Optional, Set, Tuple
from boa3.internal.model import set_internal_call
from boa3.internal.model.expression import IExpression
from boa3.internal.model.type.type import IType, Type
from boa3.internal.model.variable import Variable
from boa3.internal.neo.vm.VMCode import VMCode
class Callable(IExpression, ABC):
"""
A class used to represent a function or a class method
:ivar args: a dictionary that maps each arg with its name. Empty by default.
:ivar is_public: a boolean value that specifies if the method is public. False by default.
:ivar return_type: the return type of the method. None by default.
"""
def __init__(self, args: Dict[str, Variable] = None,
vararg: Optional[Tuple[str, Variable]] = None,
kwargs: Optional[Dict[str, Variable]] = None,
defaults: List[ast.AST] = None,
return_type: IType = Type.none, is_public: bool = False,
decorators: List[Callable] = None,
external_name: str = None,
is_safe: bool = False,
origin_node: Optional[ast.AST] = None):
if args is None:
args = {}
self.args: Dict[str, Variable] = args.copy()
if not isinstance(defaults, list):
defaults = []
self.defaults: List[ast.AST] = defaults
self._vararg: Optional[Tuple[str, Variable]] = None
if (isinstance(vararg, tuple) and len(vararg) == 2
and isinstance(vararg[0], str) and isinstance(vararg[1], Variable)):
from boa3.internal.model.type.typeutils import TypeUtils
vararg_id, vararg_var = vararg
if vararg_var.type is not Type.any:
default_code = "{0}({1}, {2})".format(TypeUtils.cast.raw_identifier,
Type.tuple.build_collection(vararg_var.type),
Type.tuple.default_value)
else:
default_code = "{0}".format(Type.tuple.default_value)
default_value = set_internal_call(ast.parse(default_code).body[0].value)
self.args[vararg_id] = Variable(Type.tuple.build_collection([vararg_var.type]))
self.defaults.append(default_value)
self._vararg = vararg
if kwargs is None:
kwargs = {}
self._kwargs: Dict[str, Variable] = kwargs.copy()
self.return_type: IType = return_type
if decorators is None:
decorators = []
from boa3.internal.model.decorator import IDecorator
self.decorators: List[IDecorator] = [decorator for decorator in decorators
if isinstance(decorator, IDecorator)]
from boa3.internal.model.builtin.decorator import PublicDecorator
public_decorator = next((decorator for decorator in self.decorators
if isinstance(decorator, PublicDecorator)),
None)
self.is_public: bool = is_public or public_decorator is not None
if self.is_public:
if isinstance(public_decorator, PublicDecorator):
external_name = public_decorator.name
elif self.defined_by_entry:
external_name = None
self.external_name: Optional[str] = external_name
self.is_safe: bool = is_safe or (isinstance(public_decorator, PublicDecorator) and public_decorator.safe)
self._self_calls: Set[ast.AST] = set()
super().__init__(origin_node)
self.init_address: Optional[int] = None
self.init_bytecode: Optional[VMCode] = None
self.init_defaults_bytecode: Optional[VMCode] = None
self.end_bytecode: Optional[VMCode] = None
@property
def type(self) -> IType:
return self.return_type
@property
def symbols(self) -> Dict[str, Variable]:
"""
Gets all the symbols in the method
:return: a dictionary that maps each symbol in the module with its name
"""
return self.args.copy()
@property
def METHOD_NAME(self) -> Dict[str, Variable]:
num_defaults = len(self.defaults)
if num_defaults > 0:
return {key: self.args[key] for key in list(self.args.keys())[:-num_defaults]}
return self.args
@property
def has_cls_or_self(self) -> bool:
return any(decorator.has_cls_or_self for decorator in self.decorators)
@property
def cls_or_self_type(self) -> Optional[IType]:
if not self.has_cls_or_self or len(self.args) == 0:
return None
return list(self.args.values())[0].type
@property
def has_starred_argument(self) -> bool:
return self._vararg is not None
@property
def start_address(self) -> Optional[int]:
"""
Gets the address where this method starts in the bytecode
:return: the first address of the method
"""
if self.init_bytecode is None and self.init_defaults_bytecode is None:
return self.init_address
else:
from boa3.internal.compiler.codegenerator.vmcodemapping import VMCodeMapping
return VMCodeMapping.instance().get_start_address(self.init_bytecode)
@property
def start_bytecode(self) -> Optional[VMCode]:
return (self.init_defaults_bytecode if len(self.defaults) > 0
else self.init_bytecode)
@property
def end_address(self) -> Optional[int]:
"""
Gets the address of this method's last operation in the bytecode
:return: the last address of the method
"""
if self.end_bytecode is None:
return self.start_address
else:
from boa3.internal.compiler.codegenerator.vmcodemapping import VMCodeMapping
return VMCodeMapping.instance().get_end_address(self.end_bytecode)
@property
def is_called(self) -> bool:
return len(self._self_calls) > 0
def reset_calls(self):
self._self_calls.clear()
@property
def is_compiled(self) -> bool:
return self.start_address is not None and self.end_address is not None
def add_call_origin(self, origin: ast.AST) -> bool:
try:
self._self_calls.add(origin)
return True
except BaseException:
return False
def __str__(self) -> str:
args_types: List[str] = [str(arg.type) for arg in self.args.values()]
if self.return_type is not Type.none:
signature = '({0}) -> {1}'.format(', '.join(args_types), self.return_type)
else:
signature = '({0})'.format(', '.join(args_types))
public = 'public ' if self.is_public else ''
return '{0}{1}'.format(public, signature)
def __repr__(self) -> str:
name = self.identifier if hasattr(self, 'identifier') else self.__class__.__name__
return f'{name}{str(self)}' | null |
325 | """ testing import """
import pathlib
from unittest.mock import patch
import datetime
import pytz
from django.test import TestCase
from bookwyrm import models
from bookwyrm.importers import GoodreadsImporter
from bookwyrm.models.import_job import handle_imported_book
def make_date(*args):
"""helper function to easily generate a date obj"""
return datetime.datetime(*args, tzinfo=pytz.UTC)
# pylint: disable=consider-using-with
@patch("bookwyrm.suggested_users.rerank_suggestions_task.delay")
@patch("bookwyrm.activitystreams.populate_stream_task.delay")
@patch("bookwyrm.activitystreams.add_book_statuses_task.delay")
class GoodreadsImport(TestCase):
"""importing from goodreads csv"""
# pylint: disable=invalid-name
def setUp(self):
"""use a test csv"""
self.importer = GoodreadsImporter()
datafile = pathlib.Path(__file__).parent.joinpath("../data/goodreads.csv")
self.csv = open(datafile, "r", encoding=self.importer.encoding)
with patch("bookwyrm.suggested_users.rerank_suggestions_task.delay"), patch(
"bookwyrm.activitystreams.populate_stream_task.delay"
), patch("bookwyrm.lists_stream.populate_lists_task.delay"):
self.local_user = models.User.objects.create_user(
"mouse", "[email protected]", "password", local=True
)
models.SiteSettings.objects.create()
work = models.Work.objects.create(title="Test Work")
self.book = models.Edition.objects.create(
title="Example Edition",
remote_id="https://example.com/book/1",
parent_work=work,
)
def METHOD_NAME(self, *_):
"""creates the import job entry and checks csv"""
import_job = self.importer.create_job(
self.local_user, self.csv, False, "public"
)
import_items = models.ImportItem.objects.filter(job=import_job).all()
self.assertEqual(len(import_items), 3)
self.assertEqual(import_items[0].index, 0)
self.assertEqual(import_items[0].data["Book Id"], "42036538")
self.assertEqual(import_items[0].normalized_data["isbn_13"], '="9781250313195"')
self.assertEqual(import_items[0].normalized_data["isbn_10"], '="1250313198"')
self.assertEqual(import_items[1].index, 1)
self.assertEqual(import_items[1].data["Book Id"], "52691223")
self.assertEqual(import_items[2].index, 2)
self.assertEqual(import_items[2].data["Book Id"], "28694510")
def test_create_retry_job(self, *_):
"""trying again with items that didn't import"""
import_job = self.importer.create_job(
self.local_user, self.csv, False, "unlisted"
)
import_items = models.ImportItem.objects.filter(job=import_job).all()[:2]
retry = self.importer.create_retry_job(
self.local_user, import_job, import_items
)
self.assertNotEqual(import_job, retry)
self.assertEqual(retry.user, self.local_user)
self.assertEqual(retry.include_reviews, False)
self.assertEqual(retry.privacy, "unlisted")
retry_items = models.ImportItem.objects.filter(job=retry).all()
self.assertEqual(len(retry_items), 2)
self.assertEqual(retry_items[0].index, 0)
self.assertEqual(retry_items[0].data["Book Id"], "42036538")
self.assertEqual(retry_items[1].index, 1)
self.assertEqual(retry_items[1].data["Book Id"], "52691223")
def test_handle_imported_book(self, *_):
"""goodreads import added a book, this adds related connections"""
shelf = self.local_user.shelf_set.filter(
identifier=models.Shelf.READ_FINISHED
).first()
self.assertIsNone(shelf.books.first())
import_job = self.importer.create_job(
self.local_user, self.csv, False, "public"
)
import_item = import_job.items.first()
import_item.book = self.book
import_item.save()
with patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"):
handle_imported_book(import_item)
shelf.refresh_from_db()
self.assertEqual(shelf.books.first(), self.book)
self.assertEqual(
shelf.shelfbook_set.first().shelved_date, make_date(2020, 10, 21)
)
readthrough = models.ReadThrough.objects.get(user=self.local_user)
self.assertEqual(readthrough.book, self.book)
self.assertEqual(readthrough.start_date, make_date(2020, 10, 21))
self.assertEqual(readthrough.finish_date, make_date(2020, 10, 25))
@patch("bookwyrm.activitystreams.add_status_task.delay")
def test_handle_imported_book_review(self, *_):
"""goodreads review import"""
import_job = self.importer.create_job(
self.local_user, self.csv, True, "unlisted"
)
import_item = import_job.items.get(index=2)
import_item.book = self.book
import_item.save()
with patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"):
handle_imported_book(import_item)
review = models.Review.objects.get(book=self.book, user=self.local_user)
self.assertEqual(review.content, "mixed feelings")
self.assertEqual(review.rating, 2)
self.assertEqual(review.published_date, make_date(2019, 7, 8))
self.assertEqual(review.privacy, "unlisted")
@patch("bookwyrm.activitystreams.add_status_task.delay")
def test_handle_imported_book_rating(self, *_):
"""goodreads rating import"""
import_job = self.importer.create_job(
self.local_user, self.csv, True, "unlisted"
)
import_item = import_job.items.filter(index=0).first()
import_item.book = self.book
import_item.save()
with patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async"):
handle_imported_book(import_item)
review = models.ReviewRating.objects.get(book=self.book, user=self.local_user)
self.assertIsInstance(review, models.ReviewRating)
self.assertEqual(review.rating, 3)
self.assertEqual(review.published_date, make_date(2020, 10, 25))
self.assertEqual(review.privacy, "unlisted") | null |
326 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdomain.endpoint import endpoint_data
class SaveTaskForUpdatingRegistrantInfoByIdentityCredentialRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Domain', '2018-01-29', 'SaveTaskForUpdatingRegistrantInfoByIdentityCredential')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Country(self): # String
return self.get_query_params().get('Country')
def set_Country(self, Country): # String
self.add_query_param('Country', Country)
def get_IdentityCredentialType(self): # String
return self.get_query_params().get('IdentityCredentialType')
def set_IdentityCredentialType(self, IdentityCredentialType): # String
self.add_query_param('IdentityCredentialType', IdentityCredentialType)
def get_City(self): # String
return self.get_query_params().get('City')
def set_City(self, City): # String
self.add_query_param('City', City)
def get_IdentityCredential(self): # String
return self.get_body_params().get('IdentityCredential')
def set_IdentityCredential(self, IdentityCredential): # String
self.add_body_params('IdentityCredential', IdentityCredential)
def get_TransferOutProhibited(self): # Boolean
return self.get_query_params().get('TransferOutProhibited')
def set_TransferOutProhibited(self, TransferOutProhibited): # Boolean
self.add_query_param('TransferOutProhibited', TransferOutProhibited)
def get_ZhCity(self): # String
return self.get_query_params().get('ZhCity')
def set_ZhCity(self, ZhCity): # String
self.add_query_param('ZhCity', ZhCity)
def get_TelExt(self): # String
return self.get_query_params().get('TelExt')
def set_TelExt(self, TelExt): # String
self.add_query_param('TelExt', TelExt)
def get_Province(self): # String
return self.get_query_params().get('Province')
def set_Province(self, Province): # String
self.add_query_param('Province', Province)
def METHOD_NAME(self): # String
return self.get_query_params().get('ZhRegistrantName')
def set_ZhRegistrantName(self, ZhRegistrantName): # String
self.add_query_param('ZhRegistrantName', ZhRegistrantName)
def get_PostalCode(self): # String
return self.get_query_params().get('PostalCode')
def set_PostalCode(self, PostalCode): # String
self.add_query_param('PostalCode', PostalCode)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_Email(self): # String
return self.get_query_params().get('Email')
def set_Email(self, Email): # String
self.add_query_param('Email', Email)
def get_ZhRegistrantOrganization(self): # String
return self.get_query_params().get('ZhRegistrantOrganization')
def set_ZhRegistrantOrganization(self, ZhRegistrantOrganization): # String
self.add_query_param('ZhRegistrantOrganization', ZhRegistrantOrganization)
def get_Address(self): # String
return self.get_query_params().get('Address')
def set_Address(self, Address): # String
self.add_query_param('Address', Address)
def get_TelArea(self): # String
return self.get_query_params().get('TelArea')
def set_TelArea(self, TelArea): # String
self.add_query_param('TelArea', TelArea)
def get_ZhAddress(self): # String
return self.get_query_params().get('ZhAddress')
def set_ZhAddress(self, ZhAddress): # String
self.add_query_param('ZhAddress', ZhAddress)
def get_RegistrantType(self): # String
return self.get_query_params().get('RegistrantType')
def set_RegistrantType(self, RegistrantType): # String
self.add_query_param('RegistrantType', RegistrantType)
def get_DomainNames(self): # RepeatList
return self.get_query_params().get('DomainName')
def set_DomainNames(self, DomainName): # RepeatList
for depth1 in range(len(DomainName)):
self.add_query_param('DomainName.' + str(depth1 + 1), DomainName[depth1])
def get_Telephone(self): # String
return self.get_query_params().get('Telephone')
def set_Telephone(self, Telephone): # String
self.add_query_param('Telephone', Telephone)
def get_ZhProvince(self): # String
return self.get_query_params().get('ZhProvince')
def set_ZhProvince(self, ZhProvince): # String
self.add_query_param('ZhProvince', ZhProvince)
def get_RegistrantOrganization(self): # String
return self.get_query_params().get('RegistrantOrganization')
def set_RegistrantOrganization(self, RegistrantOrganization): # String
self.add_query_param('RegistrantOrganization', RegistrantOrganization)
def get_UserClientIp(self): # String
return self.get_query_params().get('UserClientIp')
def set_UserClientIp(self, UserClientIp): # String
self.add_query_param('UserClientIp', UserClientIp)
def get_IdentityCredentialNo(self): # String
return self.get_query_params().get('IdentityCredentialNo')
def set_IdentityCredentialNo(self, IdentityCredentialNo): # String
self.add_query_param('IdentityCredentialNo', IdentityCredentialNo)
def get_RegistrantName(self): # String
return self.get_query_params().get('RegistrantName')
def set_RegistrantName(self, RegistrantName): # String
self.add_query_param('RegistrantName', RegistrantName) | null |
327 | # Copyright (c) 2022 The Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import tempfile
import os
import shutil
from pathlib import Path
from gem5.resources.md5_utils import md5_file, md5_dir
class MD5FileTestSuite(unittest.TestCase):
"""Test cases for gem5.resources.md5_utils.md5_file()"""
def test_md5FileConsistency(self) -> None:
# This test ensures the md5 algorithm we use does not change the md5
# value over time.
file = tempfile.NamedTemporaryFile(mode="w", delete=False)
file.write("This is a test string, to be put in a temp file")
file.close()
md5 = md5_file(Path(file.name))
os.remove(file.name)
self.assertEquals("b113b29fce251f2023066c3fda2ec9dd", md5)
def METHOD_NAME(self) -> None:
# This test ensures that two files with exactly the same contents have
# the same md5 value.
test_str = "This is a test"
file = tempfile.NamedTemporaryFile(mode="w", delete=False)
file.write(test_str)
file.close()
first_file_md5 = md5_file(Path(file.name))
os.remove(file.name)
file = tempfile.NamedTemporaryFile(mode="w", delete=False)
file.write(test_str)
file.close()
second_file_md5 = md5_file(Path(file.name))
os.remove(file.name)
self.assertEquals(first_file_md5, second_file_md5)
class MD5DirTestSuite(unittest.TestCase):
"""Test cases for gem5.resources.md5_utils.md5_dir()"""
def _create_temp_directory(self) -> Path:
dir = tempfile.mkdtemp()
with open(os.path.join(dir, "file1"), "w") as f:
f.write("Some test data here")
with open(os.path.join(dir, "file2"), "w") as f:
f.write("Some more test data")
os.mkdir(os.path.join(dir, "dir2"))
with open(os.path.join(dir, "dir2", "file1"), "w") as f:
f.write("Yet more data")
return Path(dir)
def test_md5DirConsistency(self) -> None:
# This test ensures the md5 algorithm we use does not change the value
# given for directories over time.
dir = self._create_temp_directory()
md5 = md5_dir(dir)
shutil.rmtree(dir)
self.assertEquals("ad5ac785de44c9fc2fe2798cab2d7b1a", md5)
def test_identicalDirsIdenticalMd5(self) -> None:
# This test ensures that two directories with exactly the same contents
# have the same md5 value.
dir1 = self._create_temp_directory()
first_md5 = md5_dir(dir1)
shutil.rmtree(dir1)
dir2 = self._create_temp_directory()
second_md5 = md5_dir(dir2)
shutil.rmtree(dir2)
self.assertEquals(first_md5, second_md5) | null |
328 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkclickhouse.endpoint import endpoint_data
class OperateLogHubRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'clickhouse', '2019-11-11', 'OperateLogHub')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Description(self): # String
return self.get_query_params().get('Description')
def set_Description(self, Description): # String
self.add_query_param('Description', Description)
def get_UseLorne(self): # Boolean
return self.get_query_params().get('UseLorne')
def set_UseLorne(self, UseLorne): # Boolean
self.add_query_param('UseLorne', UseLorne)
def get_DeliverName(self): # String
return self.get_query_params().get('DeliverName')
def set_DeliverName(self, DeliverName): # String
self.add_query_param('DeliverName', DeliverName)
def get_DeliverTime(self): # String
return self.get_query_params().get('DeliverTime')
def set_DeliverTime(self, DeliverTime): # String
self.add_query_param('DeliverTime', DeliverTime)
def get_DomainUrl(self): # String
return self.get_query_params().get('DomainUrl')
def set_DomainUrl(self, DomainUrl): # String
self.add_query_param('DomainUrl', DomainUrl)
def get_Password(self): # String
return self.get_query_params().get('Password')
def set_Password(self, Password): # String
self.add_query_param('Password', Password)
def get_AccessKey(self): # String
return self.get_query_params().get('AccessKey')
def set_AccessKey(self, AccessKey): # String
self.add_query_param('AccessKey', AccessKey)
def get_Create(self): # Boolean
return self.get_query_params().get('Create')
def set_Create(self, Create): # Boolean
self.add_query_param('Create', Create)
def get_TableName(self): # String
return self.get_query_params().get('TableName')
def set_TableName(self, TableName): # String
self.add_query_param('TableName', TableName)
def get_TaskId(self): # String
return self.get_query_params().get('TaskId')
def set_TaskId(self, TaskId): # String
self.add_query_param('TaskId', TaskId)
def get_ProjectName(self): # String
return self.get_query_params().get('ProjectName')
def set_ProjectName(self, ProjectName): # String
self.add_query_param('ProjectName', ProjectName)
def get_SchemaName(self): # String
return self.get_query_params().get('SchemaName')
def set_SchemaName(self, SchemaName): # String
self.add_query_param('SchemaName', SchemaName)
def get_AccessSecret(self): # String
return self.get_query_params().get('AccessSecret')
def set_AccessSecret(self, AccessSecret): # String
self.add_query_param('AccessSecret', AccessSecret)
def get_LogStoreName(self): # String
return self.get_query_params().get('LogStoreName')
def set_LogStoreName(self, LogStoreName): # String
self.add_query_param('LogStoreName', LogStoreName)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_DBClusterId(self): # String
return self.get_query_params().get('DBClusterId')
def METHOD_NAME(self, DBClusterId): # String
self.add_query_param('DBClusterId', DBClusterId)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_LogHubStoress(self): # RepeatList
return self.get_query_params().get('LogHubStores')
def set_LogHubStoress(self, LogHubStores): # RepeatList
for depth1 in range(len(LogHubStores)):
if LogHubStores[depth1].get('LogKey') is not None:
self.add_query_param('LogHubStores.' + str(depth1 + 1) + '.LogKey', LogHubStores[depth1].get('LogKey'))
if LogHubStores[depth1].get('FieldKey') is not None:
self.add_query_param('LogHubStores.' + str(depth1 + 1) + '.FieldKey', LogHubStores[depth1].get('FieldKey'))
if LogHubStores[depth1].get('Type') is not None:
self.add_query_param('LogHubStores.' + str(depth1 + 1) + '.Type', LogHubStores[depth1].get('Type'))
def get_FilterDirtyData(self): # Boolean
return self.get_query_params().get('FilterDirtyData')
def set_FilterDirtyData(self, FilterDirtyData): # Boolean
self.add_query_param('FilterDirtyData', FilterDirtyData)
def get_UserName(self): # String
return self.get_query_params().get('UserName')
def set_UserName(self, UserName): # String
self.add_query_param('UserName', UserName) | null |
329 | """script for compiling elm source and dumping it to the js folder."""
import functools
import io
import logging
import pygments
from xonsh.color_tools import rgb_to_ints
from xonsh.prompt.base import PromptFormatter, default_prompt
from xonsh.pyghooks import (
Token,
XonshHtmlFormatter,
XonshLexer,
XonshStyle,
xonsh_style_proxy,
)
from xonsh.pygments_cache import get_all_styles
from xonsh.style_tools import partial_color_tokenize
from xonsh.xontribs import Xontrib, get_xontribs
# $RAISE_SUBPROC_ERROR = True
# $XONSH_SHOW_TRACEBACK = False
#
# helper funcs
#
@functools.lru_cache(maxsize=4)
def get_rst_formatter(**kwargs):
from pygments.formatters.html import HtmlFormatter
from pygments.lexers.markup import RstLexer
return RstLexer(), HtmlFormatter(**kwargs)
def METHOD_NAME(s):
return s.replace(r"\n", "<br/>")
def invert_color(orig):
r, g, b = rgb_to_ints(orig)
inverted = [255 - r, 255 - g, 255 - b]
new = [hex(n)[2:] for n in inverted]
new = [n if len(n) == 2 else "0" + n for n in new]
return "".join(new)
def html_format(s, style="default"):
buf = io.StringIO()
proxy_style = xonsh_style_proxy(XonshStyle(style))
# make sure we have a foreground color
fgcolor = proxy_style._styles[Token.Text][0]
if not fgcolor:
fgcolor = invert_color(proxy_style.background_color[1:].strip("#"))
# need to generate stream before creating formatter so that all tokens actually exist
if isinstance(s, str):
token_stream = partial_color_tokenize(s)
else:
token_stream = s
formatter = XonshHtmlFormatter(
wrapcode=True,
noclasses=True,
style=proxy_style,
prestyles="margin: 0em; padding: 0.5em 0.1em; color: #" + fgcolor,
cssstyles="border-style: solid; border-radius: 5px",
)
formatter.format(token_stream, buf)
return buf.getvalue()
def rst_to_html(text):
try:
from pygments import highlight
lexer, formatter = get_rst_formatter(
noclasses=True,
cssstyles="background: transparent",
style="monokai", # a dark bg style
)
return highlight(text, lexer, formatter)
except ImportError:
return text
# render prompts
def get_named_prompts():
return [
(
"default",
default_prompt(),
),
("debian chroot", "{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} {cwd}{RESET}> "),
("minimalist", "{BOLD_GREEN}{cwd_base}{RESET} ) "),
(
"terlar",
"{env_name}{BOLD_GREEN}{user}{RESET}@{hostname}:"
"{BOLD_GREEN}{cwd}{RESET}|{gitstatus}\n{BOLD_INTENSE_RED}➤{RESET} ",
),
(
"default with git status",
"{env_name}{BOLD_GREEN}{user}@{hostname}{BOLD_BLUE} {cwd}"
"{branch_color}{gitstatus: {}}{RESET} {BOLD_BLUE}"
"{prompt_end}{RESET} ",
),
("robbyrussell", "{BOLD_INTENSE_RED}➜ {CYAN}{cwd_base} {gitstatus}{RESET} "),
("just a dollar", "$ "),
(
"simple pythonista",
"{INTENSE_RED}{user}{RESET} at {INTENSE_PURPLE}{hostname}{RESET} "
"in {BOLD_GREEN}{cwd}{RESET}\n↪ ",
),
(
"informative",
"[{localtime}] {YELLOW}{env_name} {BOLD_BLUE}{user}@{hostname} "
"{BOLD_GREEN}{cwd} {gitstatus}{RESET}\n> ",
),
(
"informative Version Control",
"{YELLOW}{env_name} " "{BOLD_GREEN}{cwd} {gitstatus}{RESET} {prompt_end} ",
),
("classic", "{user}@{hostname} {BOLD_GREEN}{cwd}{RESET}> "),
(
"classic with git status",
"{gitstatus} {RESET}{user}@{hostname} {BOLD_GREEN}{cwd}{RESET}> ",
),
("screen savvy", "{YELLOW}{user}@{PURPLE}{hostname}{BOLD_GREEN}{cwd}{RESET}> "),
(
"sorin",
"{CYAN}{cwd} {INTENSE_RED}❯{INTENSE_YELLOW}❯{INTENSE_GREEN}❯{RESET} ",
),
(
"acidhub",
"❰{INTENSE_GREEN}{user}{RESET}❙{YELLOW}{cwd}{RESET}{env_name}❱{gitstatus}≻ ",
),
(
"nim",
"{INTENSE_GREEN}┬─[{YELLOW}{user}{RESET}@{BLUE}{hostname}{RESET}:{cwd}"
"{INTENSE_GREEN}]─[{localtime}]─[{RESET}G:{INTENSE_GREEN}{curr_branch}=]"
"\n{INTENSE_GREEN}╰─>{INTENSE_RED}{prompt_end}{RESET} ",
),
]
def get_initial(env, prompt_format, fields):
template = env.get_stringified("PROMPT")
return {
"value": template,
"display": METHOD_NAME(html_format(prompt_format(template, fields=fields))),
}
def render_prompts(env):
prompt_format = PromptFormatter()
fields = dict(env.get("PROMPT_FIELDS") or {})
fields.update(
cwd="~/snail/stuff",
cwd_base="stuff",
user="lou",
hostname="carcolh",
env_name=fields["env_prefix"] + "env" + fields["env_postfix"],
curr_branch="branch",
gitstatus="{CYAN}branch|{BOLD_BLUE}+2{RESET}⚑7",
branch_color="{BOLD_INTENSE_RED}",
localtime="15:56:07",
)
yield get_initial(env, prompt_format, fields)
for name, template in get_named_prompts():
display = html_format(prompt_format(template, fields=fields))
yield name, {
"value": template,
"display": METHOD_NAME(display),
}
def render_colors():
source = (
"import sys\n"
'echo "Welcome $USER on" @(sys.platform)\n\n'
"def func(x=42):\n"
' d = {"xonsh": True}\n'
' return d.get("xonsh") and you\n\n'
"# This is a comment\n"
"![env | uniq | sort | grep PATH]\n"
)
lexer = XonshLexer()
lexer.add_filter("tokenmerge")
token_stream = list(pygments.lex(source, lexer=lexer))
token_stream = [(t, s.replace("\n", "\\n")) for t, s in token_stream]
styles = sorted(get_all_styles())
styles.insert(0, styles.pop(styles.index("default")))
for style in styles:
try:
display = html_format(token_stream, style=style)
except Exception as ex:
logging.error(
f"Failed to format Xonsh code {ex!r}. {style!r}", exc_info=True
)
display = source
yield style, METHOD_NAME(display)
def format_xontrib(xontrib: Xontrib):
return {
"url": xontrib.url,
"license": xontrib.license,
"display": METHOD_NAME(rst_to_html(xontrib.get_description())),
}
def render_xontribs():
md = get_xontribs()
for xontrib_name, xontrib in md.items():
yield xontrib_name, format_xontrib(xontrib) | null |
330 | from datetime import UTC, datetime
from django.forms.models import model_to_dict
from django.urls import reverse
from .base import AuthenticatedAPITestCase
from pydis_site.apps.api.models import Reminder, User
class UnauthedReminderAPITests(AuthenticatedAPITestCase):
def setUp(self):
super().setUp()
self.client.force_authenticate(user=None)
def test_list_returns_401(self):
url = reverse('api:bot:reminder-list')
response = self.client.get(url)
self.assertEqual(response.status_code, 401)
def METHOD_NAME(self):
url = reverse('api:bot:reminder-list')
response = self.client.post(url, data={'not': 'important'})
self.assertEqual(response.status_code, 401)
def test_delete_returns_401(self):
url = reverse('api:bot:reminder-detail', args=('1234',))
response = self.client.delete(url)
self.assertEqual(response.status_code, 401)
class EmptyDatabaseReminderAPITests(AuthenticatedAPITestCase):
def test_list_all_returns_empty_list(self):
url = reverse('api:bot:reminder-list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), [])
def test_delete_returns_404(self):
url = reverse('api:bot:reminder-detail', args=('1234',))
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
class ReminderCreationTests(AuthenticatedAPITestCase):
@classmethod
def setUpTestData(cls):
cls.author = User.objects.create(
id=1234,
name='Mermaid Man',
discriminator=1234,
)
def test_accepts_valid_data(self):
data = {
'author': self.author.id,
'content': 'Remember to...wait what was it again?',
'expiration': datetime.now(tz=UTC).isoformat(),
'jump_url': "https://www.google.com",
'channel_id': 123,
'mentions': [8888, 9999],
}
url = reverse('api:bot:reminder-list')
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 201)
self.assertIsNotNone(Reminder.objects.filter(id=1).first())
def test_rejects_invalid_data(self):
data = {
'author': self.author.id, # Missing multiple required fields
}
url = reverse('api:bot:reminder-list')
response = self.client.post(url, data=data)
self.assertEqual(response.status_code, 400)
self.assertRaises(Reminder.DoesNotExist, Reminder.objects.get, id=1)
class ReminderDeletionTests(AuthenticatedAPITestCase):
@classmethod
def setUpTestData(cls):
cls.author = User.objects.create(
id=6789,
name='Barnacle Boy',
discriminator=6789,
)
cls.reminder = Reminder.objects.create(
author=cls.author,
content="Don't forget to set yourself a reminder",
expiration=datetime.now(UTC),
jump_url="https://www.decliningmentalfaculties.com",
channel_id=123
)
def test_delete_unknown_reminder_returns_404(self):
url = reverse('api:bot:reminder-detail', args=('something',))
response = self.client.delete(url)
self.assertEqual(response.status_code, 404)
def test_delete_known_reminder_returns_204(self):
url = reverse('api:bot:reminder-detail', args=(self.reminder.id,))
response = self.client.delete(url)
self.assertEqual(response.status_code, 204)
self.assertRaises(Reminder.DoesNotExist, Reminder.objects.get, id=self.reminder.id)
class ReminderListTests(AuthenticatedAPITestCase):
@classmethod
def setUpTestData(cls):
cls.author = User.objects.create(
id=6789,
name='Patrick Star',
discriminator=6789,
)
cls.reminder_one = Reminder.objects.create(
author=cls.author,
content="We should take Bikini Bottom, and push it somewhere else!",
expiration=datetime.now(UTC),
jump_url="https://www.icantseemyforehead.com",
channel_id=123
)
cls.reminder_two = Reminder.objects.create(
author=cls.author,
content="Gahhh-I love being purple!",
expiration=datetime.now(UTC),
jump_url="https://www.goofygoobersicecreampartyboat.com",
channel_id=123,
active=False
)
drf_format = '%Y-%m-%dT%H:%M:%S.%fZ'
cls.rem_dict_one = model_to_dict(cls.reminder_one)
cls.rem_dict_one['expiration'] = cls.rem_dict_one['expiration'].strftime(drf_format)
cls.rem_dict_two = model_to_dict(cls.reminder_two)
cls.rem_dict_two['expiration'] = cls.rem_dict_two['expiration'].strftime(drf_format)
def test_reminders_in_full_list(self):
url = reverse('api:bot:reminder-list')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertCountEqual(response.json(), [self.rem_dict_one, self.rem_dict_two])
def test_filter_search(self):
url = reverse('api:bot:reminder-list')
response = self.client.get(f'{url}?search={self.author.name}')
self.assertEqual(response.status_code, 200)
self.assertCountEqual(response.json(), [self.rem_dict_one, self.rem_dict_two])
def test_filter_field(self):
url = reverse('api:bot:reminder-list')
response = self.client.get(f'{url}?active=true')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json(), [self.rem_dict_one])
class ReminderRetrieveTests(AuthenticatedAPITestCase):
@classmethod
def setUpTestData(cls):
cls.author = User.objects.create(
id=6789,
name='Reminder author',
discriminator=6789,
)
cls.reminder = Reminder.objects.create(
author=cls.author,
content="Reminder content",
expiration=datetime.now(UTC),
jump_url="http://example.com/",
channel_id=123
)
def test_retrieve_unknown_returns_404(self):
url = reverse('api:bot:reminder-detail', args=("not_an_id",))
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_retrieve_known_returns_200(self):
url = reverse('api:bot:reminder-detail', args=(self.reminder.id,))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class ReminderUpdateTests(AuthenticatedAPITestCase):
@classmethod
def setUpTestData(cls):
cls.author = User.objects.create(
id=666,
name='Man Ray',
discriminator=666,
)
cls.reminder = Reminder.objects.create(
author=cls.author,
content="Squash those do-gooders",
expiration=datetime.now(UTC),
jump_url="https://www.decliningmentalfaculties.com",
channel_id=123
)
cls.data = {'content': 'Oops I forgot'}
def test_patch_updates_record(self):
url = reverse('api:bot:reminder-detail', args=(self.reminder.id,))
response = self.client.patch(url, data=self.data)
self.assertEqual(response.status_code, 200)
self.assertEqual(
Reminder.objects.filter(id=self.reminder.id).first().content,
self.data['content']
) | null |
331 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class AddClientUserDefineRuleRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'AddClientUserDefineRule')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ActionType(self): # Integer
return self.get_query_params().get('ActionType')
def METHOD_NAME(self, ActionType): # Integer
self.add_query_param('ActionType', ActionType)
def get_NewFilePath(self): # String
return self.get_query_params().get('NewFilePath')
def set_NewFilePath(self, NewFilePath): # String
self.add_query_param('NewFilePath', NewFilePath)
def get_Type(self): # Integer
return self.get_query_params().get('Type')
def set_Type(self, Type): # Integer
self.add_query_param('Type', Type)
def get_Platform(self): # String
return self.get_query_params().get('Platform')
def set_Platform(self, Platform): # String
self.add_query_param('Platform', Platform)
def get_RegistryKey(self): # String
return self.get_query_params().get('RegistryKey')
def set_RegistryKey(self, RegistryKey): # String
self.add_query_param('RegistryKey', RegistryKey)
def get_Cmdline(self): # String
return self.get_query_params().get('Cmdline')
def set_Cmdline(self, Cmdline): # String
self.add_query_param('Cmdline', Cmdline)
def get_FilePath(self): # String
return self.get_query_params().get('FilePath')
def set_FilePath(self, FilePath): # String
self.add_query_param('FilePath', FilePath)
def get_Md5List(self): # String
return self.get_query_params().get('Md5List')
def set_Md5List(self, Md5List): # String
self.add_query_param('Md5List', Md5List)
def get_ParentProcPath(self): # String
return self.get_query_params().get('ParentProcPath')
def set_ParentProcPath(self, ParentProcPath): # String
self.add_query_param('ParentProcPath', ParentProcPath)
def get_ProcPath(self): # String
return self.get_query_params().get('ProcPath')
def set_ProcPath(self, ProcPath): # String
self.add_query_param('ProcPath', ProcPath)
def get_ParentCmdline(self): # String
return self.get_query_params().get('ParentCmdline')
def set_ParentCmdline(self, ParentCmdline): # String
self.add_query_param('ParentCmdline', ParentCmdline)
def get_IP(self): # String
return self.get_query_params().get('IP')
def set_IP(self, IP): # String
self.add_query_param('IP', IP)
def get_RegistryContent(self): # String
return self.get_query_params().get('RegistryContent')
def set_RegistryContent(self, RegistryContent): # String
self.add_query_param('RegistryContent', RegistryContent)
def get_PortStr(self): # String
return self.get_query_params().get('PortStr')
def set_PortStr(self, PortStr): # String
self.add_query_param('PortStr', PortStr)
def get_Port(self): # Integer
return self.get_query_params().get('Port')
def set_Port(self, Port): # Integer
self.add_query_param('Port', Port)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name) | null |
332 | import asyncio
import base64
import hashlib
import hmac
import json
from typing import Awaitable
from unittest import TestCase
from unittest.mock import MagicMock
from hummingbot.connector.derivative.kucoin_perpetual import kucoin_perpetual_constants as CONSTANTS
from hummingbot.connector.derivative.kucoin_perpetual.kucoin_perpetual_auth import KucoinPerpetualAuth
from hummingbot.core.web_assistant.connections.data_types import RESTMethod, RESTRequest, WSJSONRequest
class KucoinPerpetualAuthTests(TestCase):
def setUp(self) -> None:
super().setUp()
self.api_key = "testApiKey"
self.secret_key = "testSecretKey"
self.passphrase = "testPassphrase"
self.subaccount_name = "test!?Subaccount"
self.mock_time_provider = MagicMock()
self.mock_time_provider.time.return_value = 1000000
self.auth = KucoinPerpetualAuth(
api_key=self.api_key,
passphrase = self.passphrase,
secret_key=self.secret_key,
time_provider=self.mock_time_provider,
)
def async_run_with_timeout(self, coroutine: Awaitable, timeout: int = 1):
ret = asyncio.get_event_loop().run_until_complete(asyncio.wait_for(coroutine, timeout))
return ret
def _sign(self, passphrase: str, key: str) -> str:
signed_message = base64.b64encode(
hmac.new(
key.encode("utf-8"),
passphrase.encode("utf-8"),
hashlib.sha256).digest())
return signed_message.decode("utf-8")
def test_add_auth_headers_to_get_request_without_params(self):
request = RESTRequest(
method=RESTMethod.GET,
url="https://test.url/api/endpoint",
is_auth_required=True,
throttler_limit_id="/api/endpoint"
)
self.async_run_with_timeout(self.auth.rest_authenticate(request, use_time_provider=1))
self.assertEqual(self.api_key, request.headers["KC-API-KEY"])
self.assertEqual("1000000", request.headers["KC-API-TIMESTAMP"])
self.assertEqual("2", request.headers["KC-API-KEY-VERSION"])
expected_signature = self._sign("1000000" + "GET" + request.throttler_limit_id, key=self.secret_key)
self.assertEqual(expected_signature, request.headers["KC-API-SIGN"])
expected_passphrase = self._sign(self.passphrase, key=self.secret_key)
self.assertEqual(expected_passphrase, request.headers["KC-API-PASSPHRASE"])
self.assertEqual(CONSTANTS.HB_PARTNER_ID, request.headers["KC-API-PARTNER"])
expected_partner_signature = self._sign("1000000" + CONSTANTS.HB_PARTNER_ID + self.api_key,
key=CONSTANTS.HB_PARTNER_KEY)
self.assertEqual(expected_partner_signature, request.headers["KC-API-PARTNER-SIGN"])
def METHOD_NAME(self):
request = RESTRequest(
method=RESTMethod.GET,
url="https://test.url/api/endpoint",
params={"param1": "value1", "param2": "value2"},
is_auth_required=True,
throttler_limit_id="/api/endpoint"
)
self.async_run_with_timeout(self.auth.rest_authenticate(request, use_time_provider=1))
self.assertEqual(self.api_key, request.headers["KC-API-KEY"])
self.assertEqual("1000000", request.headers["KC-API-TIMESTAMP"])
self.assertEqual("2", request.headers["KC-API-KEY-VERSION"])
full_endpoint = f"{request.throttler_limit_id}?param1=value1¶m2=value2"
expected_signature = self._sign("1000000" + "GET" + full_endpoint, key=self.secret_key)
self.assertEqual(expected_signature, request.headers["KC-API-SIGN"])
expected_passphrase = self._sign(self.passphrase, key=self.secret_key)
self.assertEqual(expected_passphrase, request.headers["KC-API-PASSPHRASE"])
self.assertEqual(CONSTANTS.HB_PARTNER_ID, request.headers["KC-API-PARTNER"])
expected_partner_signature = self._sign("1000000" + CONSTANTS.HB_PARTNER_ID + self.api_key,
key=CONSTANTS.HB_PARTNER_KEY)
self.assertEqual(expected_partner_signature, request.headers["KC-API-PARTNER-SIGN"])
def test_add_auth_headers_to_post_request(self):
body = {"param_z": "value_param_z", "param_a": "value_param_a"}
request = RESTRequest(
method=RESTMethod.POST,
url="https://test.url/api/endpoint",
data=json.dumps(body),
is_auth_required=True,
throttler_limit_id="/api/endpoint"
)
self.async_run_with_timeout(self.auth.rest_authenticate(request, use_time_provider=1))
self.assertEqual(self.api_key, request.headers["KC-API-KEY"])
self.assertEqual("1000000", request.headers["KC-API-TIMESTAMP"])
self.assertEqual("2", request.headers["KC-API-KEY-VERSION"])
expected_signature = self._sign("1000000" + "POST" + request.throttler_limit_id + json.dumps(body),
key=self.secret_key)
self.assertEqual(expected_signature, request.headers["KC-API-SIGN"])
expected_passphrase = self._sign(self.passphrase, key=self.secret_key)
self.assertEqual(expected_passphrase, request.headers["KC-API-PASSPHRASE"])
self.assertEqual(CONSTANTS.HB_PARTNER_ID, request.headers["KC-API-PARTNER"])
expected_partner_signature = self._sign("1000000" + CONSTANTS.HB_PARTNER_ID + self.api_key,
key=CONSTANTS.HB_PARTNER_KEY)
self.assertEqual(expected_partner_signature, request.headers["KC-API-PARTNER-SIGN"])
def test_no_auth_added_to_wsrequest(self):
payload = {"param1": "value_param_1"}
request = WSJSONRequest(payload=payload, is_auth_required=True)
self.async_run_with_timeout(self.auth.ws_authenticate(request))
self.assertEqual(payload, request.payload)
def test_ws_auth_payload(self):
expires = self.auth._get_expiration_timestamp()
self.mock_time_provider.return_value = expires
payload = self.auth.get_ws_auth_payload()
raw_signature = "GET/realtime" + expires
expected_signature = hmac.new(self.secret_key.encode("utf-8"),
raw_signature.encode("utf-8"),
hashlib.sha256).hexdigest()
self.assertEqual(3, len(payload))
self.assertEqual(self.api_key, payload[0])
self.assertEqual(expires, payload[1])
self.assertEqual(expected_signature, payload[2]) | null |
333 | #!/usr/bin/env python3
#
# Copyright (c) 2015 - 2023, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
import unittest
from unittest import mock
mock_libgeopm = mock.Mock()
with mock.patch('cffi.FFI.dlopen', return_value=mock_libgeopm):
from geopmpy.endpoint import Endpoint
class TestEndpoint(unittest.TestCase):
def setUp(self):
mock_libgeopm.reset()
mock_libgeopm.geopm_endpoint_create.return_value = 0
mock_libgeopm.geopm_endpoint_destroy.return_value = 0
mock_libgeopm.geopm_endpoint_open.return_value = 0
mock_libgeopm.geopm_endpoint_close.return_value = 0
self._endpoint = Endpoint('test_endpoint')
self.test_agent_name = 'my_agent'
def mock_agent(endpoint, name_max, name_cstr):
for idx, char in enumerate(self.test_agent_name):
name_cstr[idx] = char.encode()
name_cstr[len(self.test_agent_name)] = b'\x00'
return 0
mock_libgeopm.geopm_endpoint_agent.side_effect = mock_agent
def test_endpoint_creation_destruction(self):
self.assertEqual("Endpoint(name='test_endpoint')", repr(self._endpoint))
initial_destroy_count = mock_libgeopm.geopm_endpoint_destroy.call_count
del self._endpoint
self.assertEqual(initial_destroy_count + 1, mock_libgeopm.geopm_endpoint_destroy.call_count)
mock_libgeopm.geopm_endpoint_create.return_value = 1
self.assertRaises(RuntimeError, Endpoint, 'test_endpoint')
def METHOD_NAME(self):
initial_open_count = mock_libgeopm.geopm_endpoint_open.call_count
initial_close_count = mock_libgeopm.geopm_endpoint_close.call_count
with self._endpoint:
self.assertEqual(initial_open_count + 1, mock_libgeopm.geopm_endpoint_open.call_count)
self.assertEqual(initial_close_count, mock_libgeopm.geopm_endpoint_close.call_count)
self.assertEqual(initial_close_count + 1, mock_libgeopm.geopm_endpoint_close.call_count)
def test_endpoint_agent_name(self):
self.assertEqual(self.test_agent_name, self._endpoint.agent())
def test_wait_for_agent_attach(self):
mock_libgeopm.geopm_endpoint_wait_for_agent_attach.return_value = 1
self.assertRaises(RuntimeError, self._endpoint.wait_for_agent_attach, 123.4)
mock_libgeopm.geopm_endpoint_wait_for_agent_attach.return_value = 0
self._endpoint.wait_for_agent_attach(123.4)
def test_stop_wait_loop(self):
mock_libgeopm.geopm_endpoint_wait_for_agent_stop_wait_loop.return_value = 1
self.assertRaises(RuntimeError, self._endpoint.stop_wait_loop)
mock_libgeopm.geopm_endpoint_wait_for_agent_stop_wait_loop.return_value = 0
self._endpoint.stop_wait_loop()
def test_reset_wait_loop(self):
mock_libgeopm.geopm_endpoint_wait_for_agent_reset_wait_loop.return_value = 1
self.assertRaises(RuntimeError, self._endpoint.reset_wait_loop)
mock_libgeopm.geopm_endpoint_wait_for_agent_reset_wait_loop.return_value = 0
self._endpoint.reset_wait_loop()
def test_endpoint_profile_name(self):
test_profile_name = 'my agent'
def mock_profile_name(endpoint, name_max, name_cstr):
for idx, char in enumerate(test_profile_name):
name_cstr[idx] = char.encode()
name_cstr[len(test_profile_name)] = b'\x00'
return 0
mock_libgeopm.geopm_endpoint_profile_name.side_effect = mock_profile_name
self.assertEqual(test_profile_name, self._endpoint.profile_name())
def test_endpoint_nodes(self):
test_node_names = ['node 1', 'node 2']
def mock_num_node(endpoint, num_node_p):
num_node_p[0] = len(test_node_names)
return 0
mock_libgeopm.geopm_endpoint_num_node.side_effect = mock_num_node
def mock_node_name(endpoint, node_idx, name_max, name_cstr):
for idx, char in enumerate(test_node_names[node_idx]):
name_cstr[idx] = char.encode()
name_cstr[len(test_node_names[node_idx])] = b'\x00'
return 0
mock_libgeopm.geopm_endpoint_node_name.side_effect = mock_node_name
self.assertEqual(test_node_names, self._endpoint.nodes())
def test_write_policy(self):
test_policy = {'p0': 0, 'p1': 1}
mock_libgeopm.geopm_endpoint_write_policy.return_value = 0
with mock.patch('geopmpy.agent.policy_names') as policy_mock:
policy_mock.return_value = list(test_policy)
self._endpoint.write_policy(test_policy)
args = mock_libgeopm.geopm_endpoint_write_policy.call_args[0]
_, num_policy, policy_array = args
self.assertEqual(num_policy, len(test_policy))
self.assertEqual(policy_array[0], 0)
self.assertEqual(policy_array[1], 1)
def test_read_sample(self):
test_sample = {'s0': 0, 's1': 1}
test_age = 1.1
def mock_read_sample(endpoint, num_sample, sample_array, sample_age_p):
sample_array[0] = test_sample['s0']
sample_array[1] = test_sample['s1']
sample_age_p[0] = test_age
return 0
mock_libgeopm.geopm_endpoint_read_sample.side_effect = mock_read_sample
with mock.patch('geopmpy.agent.sample_names') as sample_mock:
sample_mock.return_value = list(test_sample)
self.assertEqual((test_age, test_sample),
self._endpoint.read_sample())
if __name__ == '__main__':
unittest.main() | null |
334 | # GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, appearance (GUICG12)
import GemRB
import CharOverview
from GUIDefines import *
AppearanceWindow = 0
PortraitButton = 0
PortraitsTable = 0
LastPortrait = 0
Gender = 0
def SetPicture ():
global PortraitsTable, LastPortrait
PortraitName = PortraitsTable.GetRowName (LastPortrait)+"L"
PortraitButton.SetPicture (PortraitName)
return
def OnLoad ():
global AppearanceWindow, PortraitButton, PortraitsTable, LastPortrait
global Gender
Gender=GemRB.GetVar ("Gender")
AppearanceWindow = GemRB.LoadWindow (11, "GUICG")
CharOverview.PositionCharGenWin(AppearanceWindow)
#Load the Portraits Table
PortraitsTable = GemRB.LoadTable ("PICTURES")
PortraitsStart = PortraitsTable.FindValue (0, 2)
FemaleCount = PortraitsTable.GetRowCount () - PortraitsStart + 1
if Gender == 2:
LastPortrait = GemRB.Roll (1, FemaleCount, PortraitsStart-1)
else:
LastPortrait = GemRB.Roll (1, PortraitsTable.GetRowCount()-FemaleCount, 0)
PortraitButton = AppearanceWindow.GetControl (1)
PortraitButton.SetFlags (IE_GUI_BUTTON_PICTURE|IE_GUI_BUTTON_NO_IMAGE,OP_SET)
PortraitButton.SetState (IE_GUI_BUTTON_LOCKED)
LeftButton = AppearanceWindow.GetControl (2)
RightButton = AppearanceWindow.GetControl (3)
BackButton = AppearanceWindow.GetControl (5)
BackButton.SetText (15416)
BackButton.MakeEscape()
CustomButton = AppearanceWindow.GetControl (6)
CustomButton.SetText (17545)
DoneButton = AppearanceWindow.GetControl (0)
DoneButton.SetText (36789)
DoneButton.MakeDefault()
RightButton.OnPress (RightPress)
LeftButton.OnPress (LeftPress)
BackButton.OnPress (METHOD_NAME)
CustomButton.OnPress (CustomPress)
DoneButton.OnPress (NextPress)
while True:
if PortraitsTable.GetValue (LastPortrait, 0) == Gender:
SetPicture ()
break
LastPortrait = LastPortrait + 1
AppearanceWindow.Focus()
return
def RightPress ():
global LastPortrait
while True:
LastPortrait = LastPortrait + 1
if LastPortrait >= PortraitsTable.GetRowCount ():
LastPortrait = 0
if PortraitsTable.GetValue (LastPortrait, 0) == Gender:
SetPicture ()
return
def LeftPress ():
global LastPortrait
while True:
LastPortrait = LastPortrait - 1
if LastPortrait < 0:
LastPortrait = PortraitsTable.GetRowCount ()-1
if PortraitsTable.GetValue (LastPortrait, 0) == Gender:
SetPicture ()
return
def METHOD_NAME ():
if AppearanceWindow:
AppearanceWindow.Close ()
GemRB.SetNextScript ("CharGen")
GemRB.SetVar ("Gender",0) #scrapping the gender value
return
def CustomDone ():
Window = CustomWindow
Portrait = PortraitList1.QueryText ()
GemRB.SetToken ("LargePortrait", Portrait)
Portrait = PortraitList2.QueryText ()
GemRB.SetToken ("SmallPortrait", Portrait)
if Window:
Window.Close ()
if AppearanceWindow:
AppearanceWindow.Close ()
GemRB.SetNextScript ("CharGen2")
return
def CustomAbort ():
if CustomWindow:
CustomWindow.Close ()
return
def LargeCustomPortrait ():
Window = CustomWindow
Portrait = PortraitList1.QueryText ()
#small hack
if GemRB.GetVar ("Row1") == RowCount1:
return
Label = Window.GetControl (0x10000007)
Label.SetText (Portrait)
Button = Window.GetControl (6)
if Portrait=="":
Portrait = "NOPORTMD"
Button.SetState (IE_GUI_BUTTON_DISABLED)
else:
if PortraitList2.QueryText ()!="":
Button.SetState (IE_GUI_BUTTON_ENABLED)
Button = Window.GetControl (0)
Button.SetPicture (Portrait, "NOPORTMD")
return
def SmallCustomPortrait ():
Window = CustomWindow
Portrait = PortraitList2.QueryText ()
#small hack
if GemRB.GetVar ("Row2") == RowCount2:
return
Label = Window.GetControl (0x10000008)
Label.SetText (Portrait)
Button = Window.GetControl (6)
if Portrait=="":
Portrait = "NOPORTSM"
Button.SetState (IE_GUI_BUTTON_DISABLED)
else:
if PortraitList1.QueryText ()!="":
Button.SetState (IE_GUI_BUTTON_ENABLED)
Button = Window.GetControl (1)
Button.SetPicture (Portrait, "NOPORTSM")
return
def CustomPress ():
global PortraitList1, PortraitList2
global RowCount1, RowCount2
global CustomWindow
CustomWindow = Window = GemRB.LoadWindow (18)
PortraitList1 = Window.GetControl (2)
RowCount1 = len(PortraitList1.ListResources (CHR_PORTRAITS, 2))
PortraitList1.OnSelect (LargeCustomPortrait)
PortraitList1.SetVarAssoc ("Row1",RowCount1)
PortraitList2 = Window.GetControl (4)
RowCount2 = len(PortraitList2.ListResources (CHR_PORTRAITS, 0))
PortraitList2.OnSelect (SmallCustomPortrait)
PortraitList2.SetVarAssoc ("Row2",RowCount2)
Button = Window.GetControl (6)
Button.SetText (11973)
Button.MakeDefault()
Button.OnPress (CustomDone)
Button.SetState (IE_GUI_BUTTON_DISABLED)
Button = Window.GetControl (7)
Button.SetText (15416)
Button.MakeEscape()
Button.OnPress (CustomAbort)
Button = Window.GetControl (0)
PortraitName = PortraitsTable.GetRowName (LastPortrait)+"L"
Button.SetPicture (PortraitName, "NOPORTMD")
Button.SetState (IE_GUI_BUTTON_LOCKED)
Button = Window.GetControl (1)
PortraitName = PortraitsTable.GetRowName (LastPortrait)+"S"
Button.SetPicture (PortraitName, "NOPORTSM")
Button.SetState (IE_GUI_BUTTON_LOCKED)
Window.ShowModal (MODAL_SHADOW_NONE)
return
def NextPress ():
if AppearanceWindow:
AppearanceWindow.Close ()
PortraitTable = GemRB.LoadTable ("pictures")
PortraitName = PortraitTable.GetRowName (LastPortrait )
GemRB.SetToken ("SmallPortrait", PortraitName+"S")
GemRB.SetToken ("LargePortrait", PortraitName+"L")
GemRB.SetVar ("PortraitIndex", LastPortrait)
GemRB.SetNextScript ("CharGen2") #Before race
return | null |
335 | # coding=utf-8
# Copyright 2023 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Replace all images in the fake directory with their compressed versions.
This allows to reduce size of the images in the `fake_data/` directory.
Instructions:
python -m tensorflow_datasets.scripts.replace_fake_images \
--fake_dir=/path/to/tensorflow_datasets/testing/test_data/fake_examples
"""
import hashlib
import os
import tarfile
import tempfile
import zipfile
import zlib
import absl.app
import absl.flags
import numpy as np
import PIL.Image
FLAGS = absl.flags.FLAGS
absl.flags.DEFINE_string(
'fake_dir', None, 'path to the directory which contains files'
)
# Some dataset generation rely on the image content, so we cannot compress
# those.
SKIP_DATASETS = ['curated_breast_imaging_ddsm']
def rewrite_image(filepath):
"""Replace the image by an new one with smaller size (uniform color).
Args:
filepath: path of the images to get processed
"""
image_content = PIL.Image.open(filepath)
image = np.array(image_content)
# Filter unsuported images
if image_content.mode == 'RGBA' or image.dtype == bool:
return
# The color is a deterministic function of the relative filepath.
assert filepath.startswith(FLAGS.fake_dir)
relative_filepath = filepath[len(FLAGS.fake_dir) :]
color = int(hashlib.md5(relative_filepath.encode('utf-8')).hexdigest(), 16)
color %= 255
image = np.ones_like(image) * color
image = PIL.Image.fromarray(image)
image.save(filepath, optimize=True)
def rewrite_zip(root_dir, zip_filepath):
"""Rewrite the given .zip file into a new one containing compressed images.
Args:
root_dir: directory path which contain zip compressed file
zip_filepath: path from directory to file
"""
# Creating a temporary file to store images
with tempfile.TemporaryDirectory(dir=root_dir) as temp_dir:
# Extraction of compressed .zip file
with zipfile.ZipFile(zip_filepath, 'r') as zip_file:
zip_file.extractall(path=temp_dir)
rewrite_dir(temp_dir) # Recursively compress the archive content
# Compress the .zip file again
with zipfile.ZipFile(
zip_filepath,
'w',
compression=zipfile.ZIP_DEFLATED,
compresslevel=zlib.Z_BEST_COMPRESSION,
) as zip_file:
for file_dir, _, files in os.walk(temp_dir):
for file in files:
file_path = os.path.join(file_dir, file)
zip_file.write(
file_path, arcname=os.path.relpath(file_path, temp_dir)
)
def rewrite_tar(root_dir, tar_filepath):
"""Rewrite the older .tar file into new better compressed one.
Compression formats supported by this method (.tar.gz, .tgz, .tar.bz2)
Args:
root_dir: directory path which contain tar compressed file
tar_filepath: path from directory to file
"""
# Create a tempfile to store the images contain noise
with tempfile.TemporaryDirectory(dir=root_dir, suffix='fake') as temp_dir:
# Checking the extension of file to be extract
tar_filepath_lowercase = tar_filepath.lower()
if tar_filepath_lowercase.endswith('gz'):
extension = ':gz'
elif tar_filepath_lowercase.endswith('bz2'):
extension = ':bz2'
elif tar_filepath_lowercase.endswith('xz'):
extension = ':xz'
else:
extension = ''
# Extraction of .tar file
with tarfile.open(tar_filepath, 'r' + extension) as tar:
tar.extractall(path=temp_dir)
rewrite_dir(temp_dir) # Recursively compress the archive content
# Convert back into tar file
with tarfile.open(tar_filepath, 'w' + extension) as tar:
tar.add(temp_dir, arcname='', recursive=True)
def rewrite_dir(fake_dir):
"""Process the whole directory which contains the compressed files.
Args:
fake_dir: path of the directory which contains all compression files
"""
img_ext_list = ['.jpg', '.jpeg', '.png']
for root_dir, _, files in os.walk(fake_dir):
if any(skip_ds in root_dir for skip_ds in SKIP_DATASETS):
print(f'Skipping {root_dir}')
continue
print(f'Processing {root_dir}')
for file in files:
path = os.path.join(root_dir, file)
file_ext = os.path.splitext(file)[-1].lower()
if file_ext in img_ext_list:
rewrite_image(path)
elif file_ext == '.npz': # Filter `.npz` files
continue
elif zipfile.is_zipfile(path):
rewrite_zip(root_dir, path)
elif tarfile.is_tarfile(path):
rewrite_tar(root_dir, path)
def METHOD_NAME(_):
"""Main script."""
if FLAGS.fake_dir is None:
raise ValueError('You should specify the path of the `fake_dir`')
rewrite_dir(FLAGS.fake_dir)
if __name__ == '__main__':
absl.app.run(METHOD_NAME) | null |
336 | # -*- coding: utf-8 -*-
"""
This plugin enables a kind of permalink which can be used to refer to a piece
of content which is resistant to the file being moved or renamed.
"""
import itertools
import logging
import os
import os.path
from pelican import signals
from pelican.generators import Generator
from pelican.utils import clean_output_dir
from pelican.utils import mkdir_p
logger = logging.getLogger(__name__)
def article_url(content):
'''
Get the URL for an item of content
'''
return '{content.settings[SITEURL]}/{content.url}'.format(
content=content)
REDIRECT_STRING = '''
<!DOCTYPE HTML>
<html lang="en-US">
<head>
<meta charset="UTF-8">
<meta http-equiv="refresh" content="0;url={url}">
<script type="text/javascript">
window.location.href = "{url}"
</script>
<title>Page Redirection to {title}</title>
</head>
<body>
If you are not redirected automatically, follow the
<a href='{url}'>link to {title}</a>
</body>
</html>
'''
class PermalinkGenerator(Generator):
'''
Generate a redirect page for every item of content with a
permalink_id metadata
'''
def generate_context(self):
'''
Setup context
'''
self.permalink_output_path = os.path.join(
self.output_path, self.settings['PERMALINK_PATH'])
self.permalink_id_metadata_key = (
self.settings['PERMALINK_ID_METADATA_KEY'])
def generate_output(self, writer=None):
'''
Generate redirect files
'''
logger.info(
'Generating permalink files in %r', self.permalink_output_path)
clean_output_dir(self.permalink_output_path, [])
mkdir_p(self.permalink_output_path)
for content in itertools.chain(
self.context['articles'], self.context['pages']):
for permalink_id in content.get_permalink_ids_iter():
permalink_path = os.path.join(
self.permalink_output_path, permalink_id) + '.html'
redirect_string = REDIRECT_STRING.format(
url=article_url(content),
title=content.title)
open(permalink_path, 'w').write(redirect_string)
def get_permalink_ids_iter(self):
'''
Method to get permalink ids from content. To be bound to the class last
thing.
'''
permalink_id_key = self.settings['PERMALINK_ID_METADATA_KEY']
permalink_ids = self.metadata.get(permalink_id_key, '')
for permalink_id in permalink_ids.split(','):
if permalink_id:
yield permalink_id.strip()
def get_permalink_ids(self):
'''
Method to get permalink ids from content. To be bound to the class last
thing.
'''
return list(self.get_permalink_ids_iter())
def get_permalink_path(self):
"""Get just path component of permalink."""
try:
first_permalink_id = next(self.get_permalink_ids_iter())
except StopIteration:
return None
return '/{settings[PERMALINK_PATH]}/{first_permalink}.html'.format(
settings=self.settings, first_permalink=first_permalink_id)
def get_permalink_url(self):
'''
Get a permalink URL
'''
return "/".join((self.settings['SITEURL'], self.get_permalink_path()))
PERMALINK_METHODS = (
get_permalink_ids_iter,
get_permalink_ids,
get_permalink_url,
get_permalink_path,
)
def add_permalink_methods(content_inst):
'''
Add permalink methods to object
'''
for permalink_method in PERMALINK_METHODS:
setattr(
content_inst,
permalink_method.__name__,
permalink_method.__get__(content_inst, content_inst.__class__))
def add_permalink_option_defaults(pelicon_inst):
'''
Add perlican defaults
'''
pelicon_inst.settings.setdefault('PERMALINK_PATH', 'permalinks')
pelicon_inst.settings.setdefault(
'PERMALINK_ID_METADATA_KEY', 'permalink_id')
def METHOD_NAME(_pelican_object):
return PermalinkGenerator
def register():
signals.METHOD_NAME.connect(METHOD_NAME)
signals.content_object_init.connect(add_permalink_methods)
signals.initialized.connect(add_permalink_option_defaults) | null |
337 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class CreateDdrInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'CreateDdrInstance')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_DBInstanceStorage(self): # Integer
return self.get_query_params().get('DBInstanceStorage')
def set_DBInstanceStorage(self, DBInstanceStorage): # Integer
self.add_query_param('DBInstanceStorage', DBInstanceStorage)
def get_SystemDBCharset(self): # String
return self.get_query_params().get('SystemDBCharset')
def set_SystemDBCharset(self, SystemDBCharset): # String
self.add_query_param('SystemDBCharset', SystemDBCharset)
def get_EngineVersion(self): # String
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self, EngineVersion): # String
self.add_query_param('EngineVersion', EngineVersion)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_DBInstanceDescription(self): # String
return self.get_query_params().get('DBInstanceDescription')
def set_DBInstanceDescription(self, DBInstanceDescription): # String
self.add_query_param('DBInstanceDescription', DBInstanceDescription)
def get_Period(self): # String
return self.get_query_params().get('Period')
def set_Period(self, Period): # String
self.add_query_param('Period', Period)
def get_BackupSetId(self): # String
return self.get_query_params().get('BackupSetId')
def set_BackupSetId(self, BackupSetId): # String
self.add_query_param('BackupSetId', BackupSetId)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_DBInstanceClass(self): # String
return self.get_query_params().get('DBInstanceClass')
def set_DBInstanceClass(self, DBInstanceClass): # String
self.add_query_param('DBInstanceClass', DBInstanceClass)
def get_SecurityIPList(self): # String
return self.get_query_params().get('SecurityIPList')
def set_SecurityIPList(self, SecurityIPList): # String
self.add_query_param('SecurityIPList', SecurityIPList)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_PrivateIpAddress(self): # String
return self.get_query_params().get('PrivateIpAddress')
def set_PrivateIpAddress(self, PrivateIpAddress): # String
self.add_query_param('PrivateIpAddress', PrivateIpAddress)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_InstanceNetworkType(self): # String
return self.get_query_params().get('InstanceNetworkType')
def METHOD_NAME(self, InstanceNetworkType): # String
self.add_query_param('InstanceNetworkType', InstanceNetworkType)
def get_ConnectionMode(self): # String
return self.get_query_params().get('ConnectionMode')
def set_ConnectionMode(self, ConnectionMode): # String
self.add_query_param('ConnectionMode', ConnectionMode)
def get_SourceDBInstanceName(self): # String
return self.get_query_params().get('SourceDBInstanceName')
def set_SourceDBInstanceName(self, SourceDBInstanceName): # String
self.add_query_param('SourceDBInstanceName', SourceDBInstanceName)
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_Engine(self): # String
return self.get_query_params().get('Engine')
def set_Engine(self, Engine): # String
self.add_query_param('Engine', Engine)
def get_DBInstanceStorageType(self): # String
return self.get_query_params().get('DBInstanceStorageType')
def set_DBInstanceStorageType(self, DBInstanceStorageType): # String
self.add_query_param('DBInstanceStorageType', DBInstanceStorageType)
def get_DBInstanceNetType(self): # String
return self.get_query_params().get('DBInstanceNetType')
def set_DBInstanceNetType(self, DBInstanceNetType): # String
self.add_query_param('DBInstanceNetType', DBInstanceNetType)
def get_RestoreTime(self): # String
return self.get_query_params().get('RestoreTime')
def set_RestoreTime(self, RestoreTime): # String
self.add_query_param('RestoreTime', RestoreTime)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_UsedTime(self): # String
return self.get_query_params().get('UsedTime')
def set_UsedTime(self, UsedTime): # String
self.add_query_param('UsedTime', UsedTime)
def get_RestoreType(self): # String
return self.get_query_params().get('RestoreType')
def set_RestoreType(self, RestoreType): # String
self.add_query_param('RestoreType', RestoreType)
def get_VPCId(self): # String
return self.get_query_params().get('VPCId')
def set_VPCId(self, VPCId): # String
self.add_query_param('VPCId', VPCId)
def get_PayType(self): # String
return self.get_query_params().get('PayType')
def set_PayType(self, PayType): # String
self.add_query_param('PayType', PayType)
def get_SourceRegion(self): # String
return self.get_query_params().get('SourceRegion')
def set_SourceRegion(self, SourceRegion): # String
self.add_query_param('SourceRegion', SourceRegion) | null |
338 | # MIT License
# Copyright (c) 2020 Development Seed
# Copyright (c) 2021 Plan4Better
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import abc
from dataclasses import dataclass
from typing import Any, ClassVar, Dict, List, Optional
from pydantic import BaseModel, Field
from pydantic.class_validators import root_validator
from pydantic.networks import AnyHttpUrl
from src.core.config import settings
from src.resources.enums import MimeTypes
# =========================VECTOR TILE SCHEMAS=========================
class VectorTileLayer(BaseModel, metaclass=abc.ABCMeta):
"""Layer's Abstract BaseClass.
Attributes:
id (str): Layer's name.
bounds (list): Layer's bounds (left, bottom, right, top).
minzoom (int): Layer's min zoom level.
maxzoom (int): Layer's max zoom level.
tileurl (str, optional): Layer's tiles url.
"""
id: str
bounds: List[float] = [-180, -90, 180, 90]
minzoom: int = settings.DEFAULT_MINZOOM
maxzoom: int = settings.DEFAULT_MAXZOOM
tileurl: Optional[str]
class VectorTileTable(VectorTileLayer):
"""Table Reader.
Attributes:
id (str): Layer's name.
bounds (list): Layer's bounds (left, bottom, right, top).
minzoom (int): Layer's min zoom level.
maxzoom (int): Layer's max zoom level.
tileurl (str, optional): Layer's tiles url.
type (str): Layer's type.
schema (str): Table's database schema (e.g public).
geometry_type (str): Table's geometry type (e.g polygon).
geometry_column (str): Name of the geomtry column in the table.
properties (Dict): Properties available in the table.
"""
type: str = "Table"
dbschema: str = Field(..., alias="schema")
table: str
geometry_type: str
geometry_column: str
properties: Dict[str, str]
class VectorTileFunction(VectorTileTable):
"""Function Reader.
Attributes:
id (str): Layer's name.
bounds (list): Layer's bounds (left, bottom, right, top).
minzoom (int): Layer's min zoom level.
maxzoom (int): Layer's max zoom level.
tileurl (str, optional): Layer's tiles url.
type (str): Layer's type.
function_name (str): Nane of the SQL function to call. Defaults to `id`.
sql (str): Valid SQL function which returns Tile data.
options (list, optional): options available for the SQL function.
"""
type: str = "Function"
sql: str
function_name: Optional[str]
options: Optional[List[Dict[str, Any]]]
@root_validator
def function_name_default(cls, values):
"""Define default function's name to be same as id."""
function_name = values.get("function_name")
if function_name is None:
values["function_name"] = values.get("id")
return values
@classmethod
def from_file(cls, id: str, infile: str, **kwargs: Any):
"""load sql from file"""
with open(infile) as f:
sql = f.read()
return cls(id=id, sql=sql, **kwargs)
class TileMatrixSetLink(BaseModel):
"""
TileMatrixSetLink model.
Based on http://docs.opengeospatial.org/per/19-069.html#_tilematrixsets
"""
href: AnyHttpUrl
rel: str = "item"
type: MimeTypes = MimeTypes.json
class Config:
"""Config for model."""
use_enum_values = True
class TileMatrixSetRef(BaseModel):
"""
TileMatrixSetRef model.
Based on http://docs.opengeospatial.org/per/19-069.html#_tilematrixsets
"""
id: str
title: str
links: List[TileMatrixSetLink]
class TileMatrixSetList(BaseModel):
"""
TileMatrixSetList model.
Based on http://docs.opengeospatial.org/per/19-069.html#_tilematrixsets
"""
tileMatrixSets: List[TileMatrixSetRef]
@dataclass
class Registry:
"""function registry"""
funcs: ClassVar[Dict[str, VectorTileFunction]] = {}
@classmethod
def get(cls, key: str):
"""lookup function by name"""
return cls.funcs.get(key)
@classmethod
def METHOD_NAME(cls, *args: VectorTileFunction):
"""register function(s)"""
for func in args:
cls.funcs[func.id] = func
registry = Registry() | null |
339 | import struct
import base64
import json
from jmbitcoin import ecdsa_sign, ecdsa_verify
from jmdaemon import fidelity_bond_sanity_check
import binascii
def assert_is_utxo(utxo):
assert len(utxo) == 2
assert isinstance(utxo[0], bytes)
assert len(utxo[0]) == 32
assert isinstance(utxo[1], int)
assert utxo[1] >= 0
def METHOD_NAME(cert_pub, cert_expiry):
return b'fidelity-bond-cert|' + cert_pub + b'|' + str(cert_expiry).encode('ascii')
def get_ascii_cert_msg(cert_pub, cert_expiry):
return b'fidelity-bond-cert|' + binascii.hexlify(cert_pub) + b'|' + str(cert_expiry).encode('ascii')
class FidelityBond:
def __init__(self, utxo, utxo_pubkey, locktime, cert_expiry,
cert_privkey, cert_pubkey, cert_signature):
assert_is_utxo(utxo)
assert isinstance(utxo_pubkey, bytes)
assert isinstance(locktime, int)
assert isinstance(cert_expiry, int)
assert isinstance(cert_privkey, bytes)
assert isinstance(cert_pubkey, bytes)
assert isinstance(cert_signature, bytes)
self.utxo = utxo
self.utxo_pubkey = utxo_pubkey
self.locktime = locktime
self.cert_expiry = cert_expiry
self.cert_privkey = cert_privkey
self.cert_pubkey = cert_pubkey
self.cert_signature = cert_signature
def create_proof(self, maker_nick, taker_nick):
return FidelityBondProof(
maker_nick, taker_nick, self.cert_pubkey, self.cert_expiry,
self.cert_signature, self.utxo, self.utxo_pubkey, self.locktime)
def serialize(self):
return json.dumps([
self.utxo,
self.utxo_pubkey,
self.locktime,
self.cert_expiry,
self.cert_privkey,
self.cert_pubkey,
self.cert_signature,
])
@classmethod
def deserialize(cls, data):
return cls(*json.loads(data))
class FidelityBondProof:
# nick_sig + cert_sig + cert_pubkey + cert_expiry + utxo_pubkey + txid + vout + timelock
# 72 + 72 + 33 + 2 + 33 + 32 + 4 + 4 = 252 bytes
SER_STUCT_FMT = '<72s72s33sH33s32sII'
def __init__(self, maker_nick, taker_nick, cert_pub, cert_expiry,
cert_sig, utxo, utxo_pub, locktime):
assert isinstance(maker_nick, str)
assert isinstance(taker_nick, str)
assert isinstance(cert_pub, bytes)
assert isinstance(cert_sig, bytes)
assert isinstance(utxo_pub, bytes)
assert isinstance(locktime, int)
assert_is_utxo(utxo)
self.maker_nick = maker_nick
self.taker_nick = taker_nick
self.cert_pub = cert_pub
self.cert_expiry = cert_expiry
self.cert_sig = cert_sig
self.utxo = utxo
self.utxo_pub = utxo_pub
self.locktime = locktime
@property
def nick_msg(self):
return (self.taker_nick + '|' + self.maker_nick).encode('ascii')
def create_proof_msg(self, cert_priv):
nick_sig = ecdsa_sign(self.nick_msg, cert_priv)
# FIXME: remove stupid base64
nick_sig = base64.b64decode(nick_sig)
return self._serialize_proof_msg(nick_sig)
def _serialize_proof_msg(self, msg_signature):
msg_signature = msg_signature.rjust(72, b'\xff')
cert_sig = self.cert_sig.rjust(72, b'\xff')
fidelity_bond_data = struct.pack(
self.SER_STUCT_FMT,
msg_signature,
cert_sig,
self.cert_pub,
self.cert_expiry,
self.utxo_pub,
self.utxo[0],
self.utxo[1],
self.locktime
)
return base64.b64encode(fidelity_bond_data).decode('ascii')
@staticmethod
def _verify_signature(message, signature, pubkey):
# FIXME: remove stupid base64
return ecdsa_verify(message, base64.b64encode(signature), pubkey)
@classmethod
def parse_and_verify_proof_msg(cls, maker_nick, taker_nick, data):
if not fidelity_bond_sanity_check.fidelity_bond_sanity_check(data):
raise ValueError("sanity check failed")
decoded_data = base64.b64decode(data)
unpacked_data = struct.unpack(cls.SER_STUCT_FMT, decoded_data)
try:
signature = unpacked_data[0][unpacked_data[0].index(b'\x30'):]
cert_sig = unpacked_data[1][unpacked_data[1].index(b'\x30'):]
except ValueError:
#raised if index() doesnt find the position
raise ValueError("der signature header not found")
proof = cls(maker_nick, taker_nick, unpacked_data[2], unpacked_data[3],
cert_sig, (unpacked_data[5], unpacked_data[6]),
unpacked_data[4], unpacked_data[7])
cert_msg = METHOD_NAME(proof.cert_pub, proof.cert_expiry)
ascii_cert_msg = get_ascii_cert_msg(proof.cert_pub, proof.cert_expiry)
if not cls._verify_signature(proof.nick_msg, signature, proof.cert_pub):
raise ValueError("nick sig does not verify")
if not cls._verify_signature(cert_msg, proof.cert_sig, proof.utxo_pub) and\
not cls._verify_signature(ascii_cert_msg, proof.cert_sig, proof.utxo_pub):
raise ValueError("cert sig does not verify")
return proof | null |
340 | # Copyright 2022 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Tuple
import argparse
import os
import numpy as np
import nnabla as nn
from nnabla.ext_utils import get_extension_context
from nnabla.utils.data_iterator import DataIterator
from nnabla.logger import logger
from pointnet2 import pointnet2_classification_msg, pointnet2_classification_ssg
from loss import classification_loss
from running_utils import categorical_accuracy
# Install neu (nnabla examples utils) to import these functions.
# See [NEU](https://github.com/nnabla/nnabla-examples/tree/master/utils).
from neu.datasets.modelnet40_normal_resampled import data_iterator_modelnet40_normal_resampled
from neu.checkpoint_util import load_checkpoint
def eval_one_epoch(
valid_data_iter: DataIterator,
valid_vars: Dict[str, nn.Variable],
valid_loss_vars: Dict[str, nn.Variable],
) -> Tuple[np.ndarray, np.ndarray]:
total_steps = 0
total_accuracy = 0.0
total_loss = 0.0
num_iterations = valid_data_iter.size // valid_data_iter.batch_size
for _ in range(num_iterations):
point_cloud, label = valid_data_iter.next()
valid_vars["point_cloud"].d = point_cloud
valid_vars["label"].d = label
valid_loss_vars["loss"].forward(clear_buffer=True)
pred_logits = valid_loss_vars["pred"].d.copy()
accuracy = categorical_accuracy(pred_logits, valid_vars["label"].d)
total_steps += 1
total_accuracy += accuracy
total_loss += float(valid_loss_vars["loss"].d)
average_accuracy = total_accuracy / float(total_steps)
average_loss = total_loss / float(total_steps)
return average_accuracy, average_loss
def evaluate(args):
# Set context
extension_module = args.context
ctx = get_extension_context(extension_module, device_id=args.device_id)
nn.set_default_context(ctx)
# Feature dim, with normal vector or not
feature_dim = 6 if args.with_normal else 3
# Create validation graph
valid_batch_size = 4 # Setting 4 is for using all data of valid dataset
point_cloud_valid = nn.Variable(
(valid_batch_size, args.num_points, feature_dim))
label_valid = nn.Variable((valid_batch_size, 1))
if args.model_type == "ssg":
pred_valid = pointnet2_classification_ssg(
point_cloud_valid, train=False, num_classes=args.num_classes)
elif args.model_type == "msg":
pred_valid = pointnet2_classification_msg(
point_cloud_valid, train=False, num_classes=args.num_classes)
else:
raise ValueError
pred_valid.persistent = True
loss_valid = classification_loss(pred_valid, label_valid)
valid_vars = {"point_cloud": point_cloud_valid, "label": label_valid}
valid_loss_vars = {"loss": loss_valid, "pred": pred_valid}
# Load snapshot
load_checkpoint(args.checkpoint_json_path, {})
# Data Iterator
valid_data_iter = data_iterator_modelnet40_normal_resampled(
args.data_dir,
valid_batch_size,
False,
False,
args.num_points,
normalize=True,
with_normal=args.with_normal,
)
logger.info(f"Validation dataset size: {valid_data_iter.size}")
# Evaluation
logger.info(f"Evaluation starting ...")
accuracy, loss = eval_one_epoch(
valid_data_iter, valid_vars, valid_loss_vars)
logger.info("accuracy: {}".format(accuracy))
logger.info("loss: {}".format(loss))
def METHOD_NAME():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir", type=str, default=os.path.join(os.path.dirname(__file__), "data", "modelnet40_normal_resampled")
)
parser.add_argument("--model_type", type=str,
default="ssg", choices=["msg", "ssg"])
parser.add_argument("--num_classes", type=int, default=40)
parser.add_argument("--num_points", type=int, default=1024)
parser.add_argument("--with_normal", action="store_true")
parser.add_argument("--device_id", type=int, default=0)
parser.add_argument("--context", type=str, default="cudnn")
parser.add_argument(
"--checkpoint_json_path",
type=str,
default="./pointnet2_classification_result/seed_100/checkpoint_best/checkpoint_best.json",
)
args = parser.parse_args()
evaluate(args)
if __name__ == "__main__":
METHOD_NAME() | null |
341 | from typing import Optional
from pydantic import Field, SecretStr
from hummingbot.client.config.config_data_types import BaseConnectorConfigMap, ClientFieldData
from hummingbot.connector.exchange.ndax import ndax_constants as CONSTANTS
from hummingbot.core.utils.tracking_nonce import get_tracking_nonce
CENTRALIZED = True
EXAMPLE_PAIR = "BTC-CAD"
HUMMINGBOT_ID_PREFIX = 777
# NDAX fees: https://ndax.io/fees
# Fees have to be expressed as percent value
DEFAULT_FEES = [0.2, 0.2]
# USE_ETHEREUM_WALLET not required because default value is false
# FEE_TYPE not required because default value is Percentage
# FEE_TOKEN not required because the fee is not flat
def convert_to_exchange_trading_pair(hb_trading_pair: str) -> str:
return hb_trading_pair.replace("-", "")
def get_new_client_order_id(is_buy: bool, trading_pair: str) -> str:
ts_micro_sec: int = get_tracking_nonce()
return f"{HUMMINGBOT_ID_PREFIX}{ts_micro_sec}"
def METHOD_NAME(connector_variant_label: Optional[str]) -> str:
variant = connector_variant_label if connector_variant_label else "ndax_main"
return CONSTANTS.REST_URLS.get(variant)
def wss_url(connector_variant_label: Optional[str]) -> str:
variant = connector_variant_label if connector_variant_label else "ndax_main"
return CONSTANTS.WSS_URLS.get(variant)
class NdaxConfigMap(BaseConnectorConfigMap):
connector: str = Field(default="ndax", client_data=None)
ndax_uid: SecretStr = Field(
default=...,
client_data=ClientFieldData(
prompt=lambda cm: "Enter your NDAX user ID (uid)",
is_secure=True,
is_connect_key=True,
prompt_on_new=True,
)
)
ndax_account_name: SecretStr = Field(
default=...,
client_data=ClientFieldData(
prompt=lambda cm: "Enter the name of the account you want to use",
is_secure=True,
is_connect_key=True,
prompt_on_new=True,
)
)
ndax_api_key: SecretStr = Field(
default=...,
client_data=ClientFieldData(
prompt=lambda cm: "Enter your NDAX API key",
is_secure=True,
is_connect_key=True,
prompt_on_new=True,
)
)
ndax_secret_key: SecretStr = Field(
default=...,
client_data=ClientFieldData(
prompt=lambda cm: "Enter your NDAX secret key",
is_secure=True,
is_connect_key=True,
prompt_on_new=True,
)
)
class Config:
title = "ndax"
KEYS = NdaxConfigMap.construct()
OTHER_DOMAINS = ["ndax_testnet"]
OTHER_DOMAINS_PARAMETER = {"ndax_testnet": "ndax_testnet"}
OTHER_DOMAINS_EXAMPLE_PAIR = {"ndax_testnet": "BTC-CAD"}
OTHER_DOMAINS_DEFAULT_FEES = {"ndax_testnet": [0.2, 0.2]}
class NdaxTestnetConfigMap(BaseConnectorConfigMap):
connector: str = Field(default="ndax_testnet", client_data=None)
ndax_testnet_uid: SecretStr = Field(
default=...,
client_data=ClientFieldData(
prompt=lambda cm: "Enter your NDAX Testnet user ID (uid)",
is_secure=True,
is_connect_key=True,
prompt_on_new=True,
)
)
ndax_testnet_account_name: SecretStr = Field(
default=...,
client_data=ClientFieldData(
prompt=lambda cm: "Enter the name of the account you want to use",
is_secure=True,
is_connect_key=True,
prompt_on_new=True,
)
)
ndax_testnet_api_key: SecretStr = Field(
default=...,
client_data=ClientFieldData(
prompt=lambda cm: "Enter your NDAX Testnet API key",
is_secure=True,
is_connect_key=True,
prompt_on_new=True,
)
)
ndax_testnet_secret_key: SecretStr = Field(
default=...,
client_data=ClientFieldData(
prompt=lambda cm: "Enter your NDAX Testnet secret key",
is_secure=True,
is_connect_key=True,
prompt_on_new=True,
)
)
class Config:
title = "ndax_testnet"
OTHER_DOMAINS_KEYS = {"ndax_testnet": NdaxTestnetConfigMap.construct()} | null |
342 | from abc import abstractmethod
from typing import List, Iterator, Union
from docutils import nodes
from docutils.statemachine import ViewList, string2lines
from docutils.parsers.rst import Directive, directives
from conversion import transpile_py_to_r
def setup(app):
app.add_directive('pharmpy-execute', PharmpyExecute)
app.add_directive('pharmpy-code', PharmpyCode)
return {
'version': '0.1',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
def csv_option(s):
return [p.strip() for p in s.split(",")] if s else []
class RecursiveDirective(Directive):
def _convert_lines_to_nodes(self, lines: List[str]) -> List[nodes.Node]:
"""Turn an RST string into a node that can be used in the document.
See https://github.com/sphinx-doc/sphinx/issues/8039
"""
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(
ViewList(
string2lines('\n'.join(lines)),
source='[SnippetDirective]',
),
self.content_offset,
node,
)
return node.children
class PharmpyAbstractCodeDirective(RecursiveDirective):
option_spec = {
'linenos': directives.flag,
'lineno-start': directives.nonnegative_int,
'emphasize-lines': directives.unchanged_required,
}
def run(self):
return self._nodes()
def _nodes(self):
lines = self._lines()
return self._convert_lines_to_nodes(lines)
@abstractmethod
def _lines(self) -> List[str]:
"""Return lines for this directive"""
def _input(self):
return [
'.. tabs::',
*METHOD_NAME(3, [
'',
'.. code-tab:: py',
*METHOD_NAME(3, self._code_option_lines()),
'',
*METHOD_NAME(3, self.content),
'',
'.. code-tab:: r R',
*METHOD_NAME(3, self._code_option_lines()),
'',
*METHOD_NAME(3, transpile_py_to_r(self.content)),
]),
]
def _code_option_lines(self):
if 'emphasize-lines' in self.options:
yield f':emphasize-lines:{self.options.get("emphasize-lines")}'
if 'linenos' in self.options:
yield ':linenos:'
if 'lineno-start' in self.options:
yield f':lineno-start:{self.options.get("lineno-start")}'
class PharmpyExecute(PharmpyAbstractCodeDirective):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
has_content = True
option_spec = {
**PharmpyAbstractCodeDirective.option_spec,
'hide-code': directives.flag,
'hide-output': directives.flag,
'code-below': directives.flag,
'raises': csv_option,
'stderr': directives.flag,
}
def _lines(self) -> List[str]:
return [
f'.. container:: pharmpy-snippet{"" if "hide-output" in self.options else " with-output"}',
'',
*METHOD_NAME(3, self._input_output_lines())
]
def _input_output_lines(self):
# NOTE self._output should always be returned here, even when
# `hide-output` is set, otherwise the code will not be executed.
if 'hide-code' in self.options:
return self._output()
if 'code-below' in self.options:
return [
*self._output(),
'',
*self._input(),
]
return [
*self._input(),
'',
*self._output(),
]
def _output(self):
return [
'.. jupyter-execute::',
*METHOD_NAME(3, [
*self._jupyter_option_lines(),
'',
*self.content
]),
]
def _jupyter_option_lines(self):
yield ':hide-code:'
if 'hide-output' in self.options:
yield ':hide-output:'
if 'raise' in self.options:
yield f':raises:{",".join(self.options.get("raises"))}'
if 'stderr' in self.options:
yield ':stderr:'
class PharmpyCode(PharmpyAbstractCodeDirective):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
has_content = True
option_spec = PharmpyAbstractCodeDirective.option_spec
def _lines(self) -> List[str]:
return [
'.. container:: pharmpy-snippet',
'',
*METHOD_NAME(3, self._input())
]
def METHOD_NAME(n: int, lines: Union[List[str],Iterator[str]]):
return map(lambda line: (' '*n + line) if line else line, lines) | null |
343 | import logging
import httpx
from django.core.handlers.wsgi import WSGIRequest
from django.http import HttpResponse
from django.shortcuts import render
from django.utils import timezone
from django.views import View
from pydis_site import settings
from pydis_site.apps.home.models import RepositoryMetadata
log = logging.getLogger(__name__)
class HomeView(View):
"""The main landing page for the website."""
github_api = "https://api.github.com/users/python-discord/repos?per_page=100"
repository_cache_ttl = 3600
# Which of our GitHub repos should be displayed on the front page, and in which order?
repos = [
"python-discord/site",
"python-discord/bot",
"python-discord/snekbox",
"python-discord/sir-lancebot",
"python-discord/metricity",
"python-discord/king-arthur",
]
def __init__(self):
"""Clean up stale RepositoryMetadata."""
if not settings.STATIC_BUILD:
RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()
# If no token is defined (for example in local development), then
# it does not make sense to pass the Authorization header. More
# specifically, GitHub will reject any requests from us due to the
# invalid header. We can make a limited number of anonymous requests
# though, which is useful for testing.
if settings.GITHUB_TOKEN:
self.headers = {"Authorization": f"token {settings.GITHUB_TOKEN}"}
else:
self.headers = {}
def _get_api_data(self) -> dict[str, dict[str, str]]:
"""
Call the GitHub API and get information about our repos.
If we're unable to get that info for any reason, return an empty dict.
"""
repo_dict = {}
try:
# Fetch the data from the GitHub API
api_data: list[dict] = httpx.get(
self.github_api,
headers=self.headers,
timeout=settings.TIMEOUT_PERIOD
).json()
except httpx.TimeoutException:
log.error("Request to fetch GitHub repository metadata for timed out!")
return repo_dict
# Process the API data into our dict
for repo in api_data:
try:
full_name = repo["full_name"]
if full_name in self.repos:
repo_dict[full_name] = {
"full_name": repo["full_name"],
"description": repo["description"],
"language": repo["language"],
"forks_count": repo["forks_count"],
"stargazers_count": repo["stargazers_count"],
}
# Something is not right about the API data we got back from GitHub.
except (TypeError, ConnectionError, KeyError) as e:
log.error(
"Unable to parse the GitHub repository metadata from response!",
extra={
'api_data': api_data,
'error': e
}
)
continue
return repo_dict
def METHOD_NAME(self) -> list[RepositoryMetadata]:
"""Build a list of RepositoryMetadata objects that we can use to populate the front page."""
# First off, load the timestamp of the least recently updated entry.
if settings.STATIC_BUILD:
last_update = None
else:
last_update = (
RepositoryMetadata.objects.values_list("last_updated", flat=True)
.order_by("last_updated").first()
)
# If we did not retrieve any results here, we should import them!
if last_update is None:
# Try to get new data from the API. If it fails, we'll return an empty list.
# In this case, we simply don't display our projects on the site.
api_repositories = self._get_api_data()
# Create all the repodata records in the database.
data = [
RepositoryMetadata(
repo_name=api_data["full_name"],
description=api_data["description"],
forks=api_data["forks_count"],
stargazers=api_data["stargazers_count"],
language=api_data["language"],
)
for api_data in api_repositories.values()
]
if settings.STATIC_BUILD:
return data
return RepositoryMetadata.objects.bulk_create(data)
# If the data is stale, we should refresh it.
if (timezone.now() - last_update).seconds > self.repository_cache_ttl:
# Try to get new data from the API. If it fails, return the cached data.
api_repositories = self._get_api_data()
if not api_repositories:
return RepositoryMetadata.objects.all()
# Update or create all RepoData objects in self.repos
database_repositories = []
for api_data in api_repositories.values():
repo_data, _created = RepositoryMetadata.objects.update_or_create(
repo_name=api_data["full_name"],
defaults={
'repo_name': api_data["full_name"],
'description': api_data["description"],
'forks': api_data["forks_count"],
'stargazers': api_data["stargazers_count"],
'language': api_data["language"],
}
)
database_repositories.append(repo_data)
return database_repositories
# Otherwise, if the data is fresher than 2 minutes old, we should just return it.
return RepositoryMetadata.objects.all()
def get(self, request: WSGIRequest) -> HttpResponse:
"""Collect repo data and render the homepage view."""
repo_data = self.METHOD_NAME()
return render(request, "home/index.html", {"repo_data": repo_data})
def timeline(request: WSGIRequest) -> HttpResponse:
"""Render timeline view."""
return render(request, 'home/timeline.html') | null |
344 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdksas.endpoint import endpoint_data
class DescribeSuspEventsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Sas', '2018-12-03', 'DescribeSuspEvents')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Source(self): # String
return self.get_query_params().get('Source')
def set_Source(self, Source): # String
self.add_query_param('Source', Source)
def get_ContainerFieldName(self): # String
return self.get_query_params().get('ContainerFieldName')
def set_ContainerFieldName(self, ContainerFieldName): # String
self.add_query_param('ContainerFieldName', ContainerFieldName)
def get_SourceIp(self): # String
return self.get_query_params().get('SourceIp')
def set_SourceIp(self, SourceIp): # String
self.add_query_param('SourceIp', SourceIp)
def get_EventNames(self): # String
return self.get_query_params().get('EventNames')
def set_EventNames(self, EventNames): # String
self.add_query_param('EventNames', EventNames)
def get_From(self): # String
return self.get_query_params().get('From')
def set_From(self, _From): # String
self.add_query_param('From', _From)
def get_Id(self): # Long
return self.get_query_params().get('Id')
def set_Id(self, Id): # Long
self.add_query_param('Id', Id)
def get_TacticId(self): # String
return self.get_body_params().get('TacticId')
def set_TacticId(self, TacticId): # String
self.add_body_params('TacticId', TacticId)
def get_AlarmUniqueInfo(self): # String
return self.get_query_params().get('AlarmUniqueInfo')
def set_AlarmUniqueInfo(self, AlarmUniqueInfo): # String
self.add_query_param('AlarmUniqueInfo', AlarmUniqueInfo)
def get_UniqueInfo(self): # String
return self.get_query_params().get('UniqueInfo')
def set_UniqueInfo(self, UniqueInfo): # String
self.add_query_param('UniqueInfo', UniqueInfo)
def get_GroupId(self): # Long
return self.get_query_params().get('GroupId')
def set_GroupId(self, GroupId): # Long
self.add_query_param('GroupId', GroupId)
def get_OperateTimeEnd(self): # String
return self.get_query_params().get('OperateTimeEnd')
def set_OperateTimeEnd(self, OperateTimeEnd): # String
self.add_query_param('OperateTimeEnd', OperateTimeEnd)
def get_Name(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status)
def get_Uuids(self): # String
return self.get_query_params().get('Uuids')
def set_Uuids(self, Uuids): # String
self.add_query_param('Uuids', Uuids)
def get_TimeEnd(self): # String
return self.get_query_params().get('TimeEnd')
def set_TimeEnd(self, TimeEnd): # String
self.add_query_param('TimeEnd', TimeEnd)
def get_TargetType(self): # String
return self.get_query_params().get('TargetType')
def set_TargetType(self, TargetType): # String
self.add_query_param('TargetType', TargetType)
def METHOD_NAME(self): # String
return self.get_query_params().get('SortType')
def set_SortType(self, SortType): # String
self.add_query_param('SortType', SortType)
def get_Remark(self): # String
return self.get_query_params().get('Remark')
def set_Remark(self, Remark): # String
self.add_query_param('Remark', Remark)
def get_ContainerFieldValue(self): # String
return self.get_query_params().get('ContainerFieldValue')
def set_ContainerFieldValue(self, ContainerFieldValue): # String
self.add_query_param('ContainerFieldValue', ContainerFieldValue)
def get_PageSize(self): # String
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # String
self.add_query_param('PageSize', PageSize)
def get_Lang(self): # String
return self.get_query_params().get('Lang')
def set_Lang(self, Lang): # String
self.add_query_param('Lang', Lang)
def get_Dealed(self): # String
return self.get_query_params().get('Dealed')
def set_Dealed(self, Dealed): # String
self.add_query_param('Dealed', Dealed)
def get_CurrentPage(self): # String
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self, CurrentPage): # String
self.add_query_param('CurrentPage', CurrentPage)
def get_ClusterId(self): # String
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # String
self.add_query_param('ClusterId', ClusterId)
def get_OperateErrorCodeLists(self): # RepeatList
return self.get_query_params().get('OperateErrorCodeList')
def set_OperateErrorCodeLists(self, OperateErrorCodeList): # RepeatList
for depth1 in range(len(OperateErrorCodeList)):
self.add_query_param('OperateErrorCodeList.' + str(depth1 + 1), OperateErrorCodeList[depth1])
def get_SortColumn(self): # String
return self.get_query_params().get('SortColumn')
def set_SortColumn(self, SortColumn): # String
self.add_query_param('SortColumn', SortColumn)
def get_AssetsTypeLists(self): # RepeatList
return self.get_query_params().get('AssetsTypeList')
def set_AssetsTypeLists(self, AssetsTypeList): # RepeatList
for depth1 in range(len(AssetsTypeList)):
self.add_query_param('AssetsTypeList.' + str(depth1 + 1), AssetsTypeList[depth1])
def get_OperateTimeStart(self): # String
return self.get_query_params().get('OperateTimeStart')
def set_OperateTimeStart(self, OperateTimeStart): # String
self.add_query_param('OperateTimeStart', OperateTimeStart)
def get_TimeStart(self): # String
return self.get_query_params().get('TimeStart')
def set_TimeStart(self, TimeStart): # String
self.add_query_param('TimeStart', TimeStart)
def get_Levels(self): # String
return self.get_query_params().get('Levels')
def set_Levels(self, Levels): # String
self.add_query_param('Levels', Levels)
def get_ParentEventTypes(self): # String
return self.get_query_params().get('ParentEventTypes')
def set_ParentEventTypes(self, ParentEventTypes): # String
self.add_query_param('ParentEventTypes', ParentEventTypes) | null |
345 | """
Datatypes for Anvi'o
https://github.com/merenlab/anvio
"""
import glob
import logging
import os
from typing import Optional
from galaxy.datatypes.metadata import MetadataElement
from galaxy.datatypes.protocols import (
DatasetProtocol,
HasExtraFilesAndMetadata,
)
from galaxy.datatypes.text import Html
log = logging.getLogger(__name__)
class AnvioComposite(Html):
"""
Base class to use for Anvi'o composite datatypes.
Generally consist of a sqlite database, plus optional additional files
"""
file_ext = "anvio_composite"
composite_type = "auto_primary_file"
def METHOD_NAME(self, dataset: HasExtraFilesAndMetadata) -> str:
"""
This is called only at upload to write the html file
cannot rename the datasets here - they come with the default unfortunately
"""
defined_files = self.get_composite_files(dataset=dataset).items()
rval = [f"<html><head><title>Files for Anvi'o Composite Dataset ({self.file_ext})</title></head>"]
if defined_files:
rval.append("<p/>This composite dataset is composed of the following defined files:<p/><ul>")
for composite_name, composite_file in defined_files:
opt_text = ""
if composite_file.optional:
opt_text = " (optional)"
missing_text = ""
if not os.path.exists(os.path.join(dataset.extra_files_path, composite_name)):
missing_text = " (missing)"
rval.append(f'<li><a href="{composite_name}">{composite_name}</a>{opt_text}{missing_text}</li>')
rval.append("</ul>")
defined_files = map(lambda x: x[0], defined_files)
extra_files = []
for dirpath, _dirnames, filenames in os.walk(dataset.extra_files_path, followlinks=True):
for filename in filenames:
rel_path = os.path.relpath(os.path.join(dirpath, filename), dataset.extra_files_path)
if rel_path not in defined_files:
extra_files.append(rel_path)
if extra_files:
rval.append("<p/>This composite dataset contains these undefined files:<p/><ul>")
for rel_path in extra_files:
rval.append(f'<li><a href="{rel_path}">{rel_path}</a></li>')
rval.append("</ul>")
if not (defined_files or extra_files):
rval.append("<p/>This composite dataset does not contain any files!<p/><ul>")
rval.append("</html>")
return "\n".join(rval)
def get_mime(self) -> str:
"""Returns the mime type of the datatype"""
return "text/html"
def set_peek(self, dataset: DatasetProtocol, **kwd) -> None:
"""Set the peek and blurb text"""
if not dataset.dataset.purged:
dataset.peek = "Anvio database (multiple files)"
dataset.blurb = "Anvio database (multiple files)"
else:
dataset.peek = "file does not exist"
dataset.blurb = "file purged from disk"
def display_peek(self, dataset: DatasetProtocol) -> str:
"""Create HTML content, used for displaying peek."""
try:
return dataset.peek
except Exception:
return "Anvio database (multiple files)"
class AnvioDB(AnvioComposite):
"""Class for AnvioDB database files."""
_anvio_basename: Optional[str] = None
MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True)
file_ext = "anvio_db"
def __init__(self, *args, **kwd):
super().__init__(*args, **kwd)
if self._anvio_basename is not None:
self.add_composite_file(self._anvio_basename, is_binary=True, optional=False)
def set_meta(self, dataset: DatasetProtocol, overwrite: bool = True, **kwd) -> None:
"""
Set the anvio_basename based upon actual extra_files_path contents.
"""
super().set_meta(dataset, overwrite=overwrite, **kwd)
if dataset.metadata.anvio_basename is not None and os.path.exists(
os.path.join(dataset.extra_files_path, dataset.metadata.anvio_basename)
):
return
found = False
for basename in [dataset.metadata.anvio_basename, self._anvio_basename]:
if found:
break
if basename is not None and not os.path.exists(os.path.join(dataset.extra_files_path, basename)):
for name in glob.glob(os.path.join(dataset.extra_files_path, f"*{basename}")):
dataset.metadata.anvio_basename = os.path.basename(name)
found = True
break
class AnvioStructureDB(AnvioDB):
"""Class for Anvio Structure DB database files."""
_anvio_basename = "STRUCTURE.db"
MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True)
file_ext = "anvio_structure_db"
class AnvioGenomesDB(AnvioDB):
"""Class for Anvio Genomes DB database files."""
_anvio_basename = "-GENOMES.db"
MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True)
file_ext = "anvio_genomes_db"
class AnvioContigsDB(AnvioDB):
"""Class for Anvio Contigs DB database files."""
_anvio_basename = "CONTIGS.db"
MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True)
file_ext = "anvio_contigs_db"
def __init__(self, *args, **kwd):
super().__init__(*args, **kwd)
self.add_composite_file("CONTIGS.h5", is_binary=True, optional=True)
class AnvioProfileDB(AnvioDB):
"""Class for Anvio Profile DB database files."""
_anvio_basename = "PROFILE.db"
MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True)
file_ext = "anvio_profile_db"
def __init__(self, *args, **kwd):
super().__init__(*args, **kwd)
self.add_composite_file("RUNINFO.cp", is_binary=True, optional=True)
self.add_composite_file("RUNINFO.mcp", is_binary=True, optional=True)
self.add_composite_file("AUXILIARY_DATA.db", is_binary=True, optional=True)
self.add_composite_file("RUNLOG.txt", is_binary=False, optional=True)
class AnvioPanDB(AnvioDB):
"""Class for Anvio Pan DB database files."""
_anvio_basename = "PAN.db"
MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True)
file_ext = "anvio_pan_db"
class AnvioSamplesDB(AnvioDB):
"""Class for Anvio Samples DB database files."""
_anvio_basename = "SAMPLES.db"
MetadataElement(name="anvio_basename", default=_anvio_basename, desc="Basename", readonly=True)
file_ext = "anvio_samples_db" | null |
346 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkrds.endpoint import endpoint_data
class UpgradeDBInstanceMajorVersionRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Rds', '2014-08-15', 'UpgradeDBInstanceMajorVersion')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_DBInstanceStorage(self): # Integer
return self.get_query_params().get('DBInstanceStorage')
def set_DBInstanceStorage(self, DBInstanceStorage): # Integer
self.add_query_param('DBInstanceStorage', DBInstanceStorage)
def get_ZoneIdSlave1(self): # String
return self.get_query_params().get('ZoneIdSlave1')
def set_ZoneIdSlave1(self, ZoneIdSlave1): # String
self.add_query_param('ZoneIdSlave1', ZoneIdSlave1)
def get_ZoneIdSlave2(self): # String
return self.get_query_params().get('ZoneIdSlave2')
def set_ZoneIdSlave2(self, ZoneIdSlave2): # String
self.add_query_param('ZoneIdSlave2', ZoneIdSlave2)
def get_SwitchTimeMode(self): # String
return self.get_query_params().get('SwitchTimeMode')
def set_SwitchTimeMode(self, SwitchTimeMode): # String
self.add_query_param('SwitchTimeMode', SwitchTimeMode)
def get_SwitchOver(self): # String
return self.get_query_params().get('SwitchOver')
def set_SwitchOver(self, SwitchOver): # String
self.add_query_param('SwitchOver', SwitchOver)
def get_CollectStatMode(self): # String
return self.get_query_params().get('CollectStatMode')
def set_CollectStatMode(self, CollectStatMode): # String
self.add_query_param('CollectStatMode', CollectStatMode)
def get_SwitchTime(self): # String
return self.get_query_params().get('SwitchTime')
def set_SwitchTime(self, SwitchTime): # String
self.add_query_param('SwitchTime', SwitchTime)
def get_DBInstanceId(self): # String
return self.get_query_params().get('DBInstanceId')
def set_DBInstanceId(self, DBInstanceId): # String
self.add_query_param('DBInstanceId', DBInstanceId)
def METHOD_NAME(self): # String
return self.get_query_params().get('DBInstanceStorageType')
def set_DBInstanceStorageType(self, DBInstanceStorageType): # String
self.add_query_param('DBInstanceStorageType', DBInstanceStorageType)
def get_Period(self): # String
return self.get_query_params().get('Period')
def set_Period(self, Period): # String
self.add_query_param('Period', Period)
def get_UsedTime(self): # String
return self.get_query_params().get('UsedTime')
def set_UsedTime(self, UsedTime): # String
self.add_query_param('UsedTime', UsedTime)
def get_DBInstanceClass(self): # String
return self.get_query_params().get('DBInstanceClass')
def set_DBInstanceClass(self, DBInstanceClass): # String
self.add_query_param('DBInstanceClass', DBInstanceClass)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_PrivateIpAddress(self): # String
return self.get_query_params().get('PrivateIpAddress')
def set_PrivateIpAddress(self, PrivateIpAddress): # String
self.add_query_param('PrivateIpAddress', PrivateIpAddress)
def get_VPCId(self): # String
return self.get_query_params().get('VPCId')
def set_VPCId(self, VPCId): # String
self.add_query_param('VPCId', VPCId)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_PayType(self): # String
return self.get_query_params().get('PayType')
def set_PayType(self, PayType): # String
self.add_query_param('PayType', PayType)
def get_InstanceNetworkType(self): # String
return self.get_query_params().get('InstanceNetworkType')
def set_InstanceNetworkType(self, InstanceNetworkType): # String
self.add_query_param('InstanceNetworkType', InstanceNetworkType)
def get_TargetMajorVersion(self): # String
return self.get_query_params().get('TargetMajorVersion')
def set_TargetMajorVersion(self, TargetMajorVersion): # String
self.add_query_param('TargetMajorVersion', TargetMajorVersion) | null |
347 | from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple
from boa3.internal import constants
from boa3.internal.model.builtin.method.builtinmethod import IBuiltinMethod
from boa3.internal.model.expression import IExpression
from boa3.internal.model.method import Method
from boa3.internal.model.property import Property
from boa3.internal.model.type.classes.classarraytype import ClassArrayType
from boa3.internal.model.variable import Variable
from boa3.internal.neo.vm.opcode.Opcode import Opcode
class BlockType(ClassArrayType):
"""
A class used to represent Neo Block class
"""
def __init__(self):
super().__init__('Block')
from boa3.internal.model.type.type import Type
from boa3.internal.model.type.collection.sequence.uint160type import UInt160Type
from boa3.internal.model.type.collection.sequence.uint256type import UInt256Type
uint256 = UInt256Type.build()
self._variables: Dict[str, Variable] = {
'hash': Variable(uint256),
'version': Variable(Type.int),
'previous_hash': Variable(uint256),
'merkle_root': Variable(uint256),
'timestamp': Variable(Type.int),
'nonce': Variable(Type.int),
'index': Variable(Type.int),
'primary_index': Variable(Type.int),
'next_consensus': Variable(UInt160Type.build()),
'transaction_count': Variable(Type.int)
}
self._constructor: Method = None
@property
def class_variables(self) -> Dict[str, Variable]:
return {}
@property
def instance_variables(self) -> Dict[str, Variable]:
return self._variables.copy()
@property
def properties(self) -> Dict[str, Property]:
return {}
@property
def static_methods(self) -> Dict[str, Method]:
return {}
@property
def class_methods(self) -> Dict[str, Method]:
return {}
@property
def instance_methods(self) -> Dict[str, Method]:
return {}
def constructor_method(self) -> Optional[Method]:
# was having a problem with recursive import
if self._constructor is None:
self._constructor: Method = BlockMethod(self)
return self._constructor
@classmethod
def build(cls, value: Any = None) -> BlockType:
if value is None or cls._is_type_of(value):
return _Block
@classmethod
def _is_type_of(cls, value: Any):
return isinstance(value, BlockType)
_Block = BlockType()
class BlockMethod(IBuiltinMethod):
def __init__(self, return_type: BlockType):
identifier = '-Block__init__'
args: Dict[str, Variable] = {}
super().__init__(identifier, args, return_type=return_type)
def validate_parameters(self, *params: IExpression) -> bool:
return len(params) == 0
@property
def METHOD_NAME(self) -> List[Tuple[Opcode, bytes]]:
from boa3.internal.neo.vm.type.Integer import Integer
uint160_default = Integer(constants.SIZE_OF_INT160).to_byte_array() + bytes(constants.SIZE_OF_INT160)
uint256_default = Integer(constants.SIZE_OF_INT256).to_byte_array() + bytes(constants.SIZE_OF_INT256)
return [
(Opcode.PUSH0, b''), # transaction_count
(Opcode.PUSHDATA1, uint160_default), # next_consensus
(Opcode.PUSH0, b''), # primary_index
(Opcode.PUSH0, b''), # index
(Opcode.PUSH0, b''), # nonce
(Opcode.PUSH0, b''), # timestamp
(Opcode.PUSHDATA1, uint256_default), # merkle_root
(Opcode.PUSHDATA1, uint256_default), # previous_hash
(Opcode.PUSH0, b''), # version
(Opcode.PUSHDATA1, uint256_default), # hash
(Opcode.PUSH10, b''),
(Opcode.PACK, b'')
]
@property
def _args_on_stack(self) -> int:
return len(self.args)
@property
def _body(self) -> Optional[str]:
return | null |
348 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'PetaData', '2016-01-01', 'CreateInstance','petadata')
self.set_method('POST')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_NodeSpec(self):
return self.get_query_params().get('NodeSpec')
def set_NodeSpec(self,NodeSpec):
self.add_query_param('NodeSpec',NodeSpec)
def get_ClientToken(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_NetworkType(self):
return self.get_query_params().get('NetworkType')
def set_NetworkType(self,NetworkType):
self.add_query_param('NetworkType',NetworkType)
def get_AccountName(self):
return self.get_query_params().get('AccountName')
def set_AccountName(self,AccountName):
self.add_query_param('AccountName',AccountName)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_NodeNumber(self):
return self.get_query_params().get('NodeNumber')
def set_NodeNumber(self,NodeNumber):
self.add_query_param('NodeNumber',NodeNumber)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_SecurityIPList(self):
return self.get_query_params().get('SecurityIPList')
def set_SecurityIPList(self,SecurityIPList):
self.add_query_param('SecurityIPList',SecurityIPList)
def get_VSwitchId(self):
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self,VSwitchId):
self.add_query_param('VSwitchId',VSwitchId)
def get_AccountPassword(self):
return self.get_query_params().get('AccountPassword')
def set_AccountPassword(self,AccountPassword):
self.add_query_param('AccountPassword',AccountPassword)
def get_InstanceName(self):
return self.get_query_params().get('InstanceName')
def set_InstanceName(self,InstanceName):
self.add_query_param('InstanceName',InstanceName)
def get_DBName(self):
return self.get_query_params().get('DBName')
def set_DBName(self,DBName):
self.add_query_param('DBName',DBName)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
def get_ChargeType(self):
return self.get_query_params().get('ChargeType')
def METHOD_NAME(self,ChargeType):
self.add_query_param('ChargeType',ChargeType | null |
349 | #!/usr/bin/env python3
# Copyright (c) 2014-2021 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>:<port>
[<ipv6>]:<port>
<onion>.onion:<port>
<i2p>.b32.i2p:<port>
The output will be two data structures with the peers in binary format:
static const uint8_t chainparams_seed_{main,test}[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from enum import Enum
import struct
import sys
import os
import re
class BIP155Network(Enum):
IPV4 = 1
IPV6 = 2
TORV2 = 3 # no longer supported
TORV3 = 4
I2P = 5
CJDNS = 6
def name_to_bip155(addr):
'''Convert address string to BIP155 (networkID, addr) tuple.'''
if addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) == 35:
assert vchAddr[34] == 3
return (BIP155Network.TORV3, vchAddr[:32])
elif len(vchAddr) == 10:
return (BIP155Network.TORV2, vchAddr)
else:
raise ValueError('Invalid onion %s' % vchAddr)
elif addr.endswith('.b32.i2p'):
vchAddr = b32decode(addr[0:-8] + '====', True)
if len(vchAddr) == 32:
return (BIP155Network.I2P, vchAddr)
else:
raise ValueError(f'Invalid I2P {vchAddr}')
elif '.' in addr: # IPv4
return (BIP155Network.IPV4, bytes((int(x) for x in addr.split('.'))))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return (BIP155Network.IPV6, bytes(sub[0] + ([0] * nullbytes) + sub[1]))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s):
'''Convert endpoint string to BIP155 (networkID, addr, port) tuple.'''
match = re.match(r'\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = 0
else:
port = int(port)
host = name_to_bip155(host)
if host[0] == BIP155Network.TORV2:
return None # TORV2 is no longer supported, so we ignore it
else:
return host + (port, )
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def bip155_serialize(spec):
'''
Serialize (networkID, addr, port) tuple to BIP155 binary format.
'''
r = b""
r += struct.pack('B', spec[0].value)
r += ser_compact_size(len(spec[1]))
r += spec[1]
r += struct.pack('>H', spec[2])
return r
def METHOD_NAME(g, f, structname):
g.write('static const uint8_t %s[] = {\n' % structname)
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
spec = parse_spec(line)
if spec is None: # ignore this entry (e.g. no longer supported addresses like TORV2)
continue
blob = bip155_serialize(spec)
hoststr = ','.join(('0x%02x' % b) for b in blob)
g.write(f' {hoststr},\n')
g.write('};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a BIP155 serialized (networkID, addr, port) tuple.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'), 'r', encoding="utf8") as f:
METHOD_NAME(g, f, 'chainparams_seed_main')
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'), 'r', encoding="utf8") as f:
METHOD_NAME(g, f, 'chainparams_seed_test')
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main() | null |
350 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class CreateInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'PetaData', '2016-01-01', 'CreateInstance','petadata')
self.set_method('POST')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_NodeSpec(self):
return self.get_query_params().get('NodeSpec')
def set_NodeSpec(self,NodeSpec):
self.add_query_param('NodeSpec',NodeSpec)
def METHOD_NAME(self):
return self.get_query_params().get('ClientToken')
def set_ClientToken(self,ClientToken):
self.add_query_param('ClientToken',ClientToken)
def get_NetworkType(self):
return self.get_query_params().get('NetworkType')
def set_NetworkType(self,NetworkType):
self.add_query_param('NetworkType',NetworkType)
def get_AccountName(self):
return self.get_query_params().get('AccountName')
def set_AccountName(self,AccountName):
self.add_query_param('AccountName',AccountName)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_NodeNumber(self):
return self.get_query_params().get('NodeNumber')
def set_NodeNumber(self,NodeNumber):
self.add_query_param('NodeNumber',NodeNumber)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_SecurityIPList(self):
return self.get_query_params().get('SecurityIPList')
def set_SecurityIPList(self,SecurityIPList):
self.add_query_param('SecurityIPList',SecurityIPList)
def get_VSwitchId(self):
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self,VSwitchId):
self.add_query_param('VSwitchId',VSwitchId)
def get_AccountPassword(self):
return self.get_query_params().get('AccountPassword')
def set_AccountPassword(self,AccountPassword):
self.add_query_param('AccountPassword',AccountPassword)
def get_InstanceName(self):
return self.get_query_params().get('InstanceName')
def set_InstanceName(self,InstanceName):
self.add_query_param('InstanceName',InstanceName)
def get_DBName(self):
return self.get_query_params().get('DBName')
def set_DBName(self,DBName):
self.add_query_param('DBName',DBName)
def get_VpcId(self):
return self.get_query_params().get('VpcId')
def set_VpcId(self,VpcId):
self.add_query_param('VpcId',VpcId)
def get_ZoneId(self):
return self.get_query_params().get('ZoneId')
def set_ZoneId(self,ZoneId):
self.add_query_param('ZoneId',ZoneId)
def get_ChargeType(self):
return self.get_query_params().get('ChargeType')
def set_ChargeType(self,ChargeType):
self.add_query_param('ChargeType',ChargeType | null |
351 | import shutil
import os
import stat
import bpy
import arm.utils
from arm import log
if arm.is_reload(__name__):
log = arm.reload_module(log)
arm.utils = arm.reload_module(arm.utils)
else:
arm.enable_reload(__name__)
assets = []
reserved_names = ['return.']
khafile_params = []
khafile_defs = []
khafile_defs_last = []
embedded_data = []
shaders = []
shaders_last = []
shaders_external = []
shader_datas = []
shader_passes = []
shader_passes_assets = {}
shader_cons = {}
def reset():
global assets
global khafile_params
global khafile_defs
global khafile_defs_last
global embedded_data
global shaders
global shaders_last
global shaders_external
global shader_datas
global shader_passes
global shader_cons
assets = []
khafile_params = []
khafile_defs_last = khafile_defs
khafile_defs = []
embedded_data = []
shaders_last = shaders
shaders = []
shaders_external = []
shader_datas = []
shader_passes = []
shader_cons = {}
shader_cons['mesh_vert'] = []
shader_cons['depth_vert'] = []
shader_cons['depth_frag'] = []
shader_cons['voxel_vert'] = []
shader_cons['voxel_frag'] = []
shader_cons['voxel_geom'] = []
def add(asset_file):
global assets
# Asset already exists, do nothing
if asset_file in assets:
return
asset_file_base = os.path.basename(asset_file)
for f in assets:
f_file_base = os.path.basename(f)
if f_file_base == asset_file_base:
return
assets.append(asset_file)
# Reserved file name
for f in reserved_names:
if f in asset_file:
log.warn(f'File "{asset_file}" contains reserved keyword, this will break C++ builds!')
def add_khafile_def(d):
global khafile_defs
if d not in khafile_defs:
khafile_defs.append(d)
def add_khafile_param(p):
global khafile_params
if p not in khafile_params:
khafile_params.append(p)
def add_embedded_data(file):
global embedded_data
if file not in embedded_data:
embedded_data.append(file)
def add_shader(file):
global shaders
global shaders_last
if file not in shaders:
shaders.append(file)
def add_shader_data(file):
global shader_datas
if file not in shader_datas:
shader_datas.append(file)
def add_shader_pass(data_name):
global shader_passes
# Shader data for passes are written into single shader_datas.arm file
add_shader_data(arm.utils.get_fp_build() + '/compiled/Shaders/shader_datas.arm')
if data_name not in shader_passes:
shader_passes.append(data_name)
def METHOD_NAME(file):
global shaders_external
shaders_external.append(file)
name = file.split('/')[-1].split('\\')[-1]
add_shader(arm.utils.get_fp_build() + '/compiled/Shaders/' + name)
invalidate_enabled = True # Disable invalidating during build process
def remove_readonly(func, path, excinfo):
os.chmod(path, stat.S_IWRITE)
func(path)
def invalidate_shader_cache(self, context):
# compiled.inc changed, recompile all shaders next time
global invalidate_enabled
if invalidate_enabled is False:
return
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Shaders'):
shutil.rmtree(fp + '/compiled/Shaders', onerror=remove_readonly)
if os.path.isdir(fp + '/debug/html5-resources'):
shutil.rmtree(fp + '/debug/html5-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/krom-resources'):
shutil.rmtree(fp + '/krom-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/debug/krom-resources'):
shutil.rmtree(fp + '/debug/krom-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/windows-resources'):
shutil.rmtree(fp + '/windows-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/linux-resources'):
shutil.rmtree(fp + '/linux-resources', onerror=remove_readonly)
if os.path.isdir(fp + '/osx-resources'):
shutil.rmtree(fp + '/osx-resources', onerror=remove_readonly)
def invalidate_compiled_data(self, context):
global invalidate_enabled
if invalidate_enabled is False:
return
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled'):
shutil.rmtree(fp + '/compiled', onerror=remove_readonly)
def invalidate_mesh_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/meshes'):
shutil.rmtree(fp + '/compiled/Assets/meshes', onerror=remove_readonly)
def invalidate_envmap_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/envmaps'):
shutil.rmtree(fp + '/compiled/Assets/envmaps', onerror=remove_readonly)
def invalidate_unpacked_data(self, context):
fp = arm.utils.get_fp_build()
if os.path.isdir(fp + '/compiled/Assets/unpacked'):
shutil.rmtree(fp + '/compiled/Assets/unpacked', onerror=remove_readonly)
def invalidate_mesh_cache(self, context):
if context.object is None or context.object.data is None:
return
context.object.data.arm_cached = False
def invalidate_instance_cache(self, context):
if context.object is None or context.object.data is None:
return
invalidate_mesh_cache(self, context)
for slot in context.object.material_slots:
slot.material.arm_cached = False
def invalidate_compiler_cache(self, context):
bpy.data.worlds['Arm'].arm_recompile = True
def shader_equal(sh, ar, shtype):
# Merge equal shaders
for e in ar:
if sh.is_equal(e):
sh.context.data[shtype] = e.context.data[shtype]
sh.is_linked = True
return
ar.append(sh)
def vs_equal(c, ar):
shader_equal(c.vert, ar, 'vertex_shader')
def fs_equal(c, ar):
shader_equal(c.frag, ar, 'fragment_shader')
def gs_equal(c, ar):
shader_equal(c.geom, ar, 'geometry_shader')
def tcs_equal(c, ar):
shader_equal(c.tesc, ar, 'tesscontrol_shader')
def tes_equal(c, ar):
shader_equal(c.tese, ar, 'tesseval_shader') | null |
352 | from unittest import TestCase
from pcs.common.pacemaker.resource.operations import CibResourceOperationDto
from pcs.lib.cib.resource import agent
from pcs.lib.resource_agent import (
ResourceAgentAction,
ResourceAgentMetadata,
ResourceAgentName,
)
from pcs.lib.resource_agent.const import OCF_1_0
class GetDefaultOperationInterval(TestCase):
def test_return_0s_on_name_different_from_monitor(self):
self.assertEqual("0s", agent.get_default_operation_interval("start"))
def METHOD_NAME(self):
self.assertEqual("60s", agent.get_default_operation_interval("monitor"))
class CompleteOperationsOptions(TestCase):
def test_add_intervals_everywhere_is_missing(self):
self.assertEqual(
agent.complete_operations_options(
[
{"name": "monitor", "interval": "20s"},
{"name": "start"},
]
),
[
{"name": "monitor", "interval": "20s"},
{"name": "start", "interval": "0s"},
],
)
class GetDefaultOperations(TestCase):
fixture_actions = [
ResourceAgentAction(
"custom1", "40s", None, None, None, None, False, False
),
ResourceAgentAction(
"custom2", "60s", "25s", None, None, None, False, False
),
ResourceAgentAction(
"meta-data", None, None, None, None, None, False, False
),
ResourceAgentAction(
"monitor", "30s", "10s", None, None, None, False, False
),
ResourceAgentAction(
"start", None, "40s", None, None, None, False, False
),
ResourceAgentAction(
"status", "20s", "15s", None, None, None, False, False
),
ResourceAgentAction(
"validate-all", None, None, None, None, None, False, False
),
]
fixture_actions_meta_only = [
ResourceAgentAction(
"meta-data", None, None, None, None, None, False, False
)
]
maxDiff = None
@staticmethod
def fixture_agent(actions):
return ResourceAgentMetadata(
ResourceAgentName("ocf", "pacemaker", "Dummy"),
agent_exists=True,
ocf_version=OCF_1_0,
shortdesc="",
longdesc="",
parameters=[],
actions=actions,
)
@staticmethod
def fixture_stonith_agent(actions):
return ResourceAgentMetadata(
ResourceAgentName("stonith", None, "fence_test"),
agent_exists=True,
ocf_version=OCF_1_0,
shortdesc="",
longdesc="",
parameters=[],
actions=actions,
)
@staticmethod
def op_fixture(name, interval, timeout):
return CibResourceOperationDto(
id="",
name=name,
interval=interval,
description=None,
start_delay=None,
interval_origin=None,
timeout=timeout,
enabled=None,
record_pending=None,
role=None,
on_fail=None,
meta_attributes=[],
instance_attributes=[],
)
def test_select_only_actions_for_cib(self):
self.assertEqual(
agent.get_default_operations(
self.fixture_agent(self.fixture_actions)
),
[
self.op_fixture("custom1", "0s", "40s"),
self.op_fixture("custom2", "25s", "60s"),
self.op_fixture("monitor", "10s", "30s"),
self.op_fixture("start", "40s", None),
],
)
def test_select_only_actions_for_cib_stonith(self):
self.assertEqual(
agent.get_default_operations(
self.fixture_stonith_agent(self.fixture_actions)
),
[self.op_fixture("monitor", "10s", "30s")],
)
def test_select_only_necessary_actions_for_cib(self):
self.assertEqual(
agent.get_default_operations(
self.fixture_agent(self.fixture_actions), necessary_only=True
),
[self.op_fixture("monitor", "10s", "30s")],
)
def test_select_only_necessary_actions_for_cib_stonith(self):
self.assertEqual(
agent.get_default_operations(
self.fixture_stonith_agent(self.fixture_actions),
necessary_only=True,
),
[self.op_fixture("monitor", "10s", "30s")],
)
def test_complete_monitor(self):
self.assertEqual(
agent.get_default_operations(
self.fixture_agent(self.fixture_actions_meta_only),
necessary_only=True,
),
[self.op_fixture("monitor", "60s", None)],
)
def test_complete_monitor_stonith(self):
self.assertEqual(
agent.get_default_operations(
self.fixture_stonith_agent(self.fixture_actions_meta_only),
necessary_only=True,
),
[self.op_fixture("monitor", "60s", None)],
) | null |
353 | from __future__ import print_function
import IMP.test
import IMP.algebra
displayit = False
if displayit:
import IMP.display
from IMP.algebra import *
import pickle
class Tests(IMP.test.TestCase):
def test_magnitude(self):
"""Check dense log grid of ints"""
print("construct")
bb = BoundingBox3D(Vector3D(1, 1, 1), Vector3D(15, 15, 15))
sz = [5, 5, 5]
le = LogEmbedding3D(bb, Vector3D(2.0, 2.0, 2.0), sz)
g = DenseIntLogGrid3D(sz, le)
bbo = g.get_bounding_box()
print(bb, bbo)
if displayit:
w = IMP.display.PymolWriter(self.get_tmp_file_name("log.pym"))
bbg = IMP.display.BoundingBoxGeometry(bb)
bbg.set_color(IMP.display.get_display_color(0))
bbg.set_name("in")
w.add_geometry(bbg)
bbog = IMP.display.BoundingBoxGeometry(bbo)
bbog.set_color(IMP.display.get_display_color(1))
bbog.set_name("out")
w.add_geometry(bbog)
for i in range(0, sz[0]):
for j in range(0, sz[0]):
for k in range(0, sz[0]):
ei = ExtendedGridIndex3D(i, j, k)
gi = g.get_index(ei)
bbi = g.get_bounding_box(ei)
bbog = IMP.display.BoundingBoxGeometry(bbi)
bbog.set_name(str(ei))
w.add_geometry(bbog)
cg = IMP.display.PointGeometry(g.get_center(ei))
cg.set_name("center")
w.add_geometry(cg)
self.assertAlmostEqual(bbo.get_corner(1)[0], 15, delta=.1)
def METHOD_NAME(self):
"""Test mixed log embedding"""
eb = IMP.algebra.LogEmbedding3D(IMP.algebra.Vector3D(0, 0, 0),
IMP.algebra.Vector3D(1, 1, 1),
IMP.algebra.Vector3D(1, 2, 1))
for i in range(0, 10):
gi = IMP.algebra.ExtendedGridIndex3D([i, i, i])
center = eb.get_center(gi)
print(center)
def test_default_embedding_pickle(self):
"""Test (un-)pickle of DefaultEmbedding3D"""
e1 = IMP.algebra.DefaultEmbedding3D(IMP.algebra.Vector3D(1, 2, 3),
IMP.algebra.Vector3D(2, 4, 5))
e2 = IMP.algebra.DefaultEmbedding3D(IMP.algebra.Vector3D(4, 5, 6),
IMP.algebra.Vector3D(7, 8, 9))
e2.foo = 'bar'
dump = pickle.dumps((e1, e2))
newe1, newe2 = pickle.loads(dump)
self.assertLess(IMP.algebra.get_distance(
e1.get_origin(), newe1.get_origin()), 1e-4)
self.assertLess(IMP.algebra.get_distance(
e1.get_unit_cell(), newe1.get_unit_cell()), 1e-4)
self.assertLess(IMP.algebra.get_distance(
e2.get_origin(), newe2.get_origin()), 1e-4)
self.assertLess(IMP.algebra.get_distance(
e2.get_unit_cell(), newe2.get_unit_cell()), 1e-4)
self.assertEqual(newe2.foo, 'bar')
self.assertRaises(TypeError, e1._set_from_binary, 42)
def test_log_embedding_pickle(self):
"""Test (un-)pickle of LogEmbedding3D"""
e1 = IMP.algebra.LogEmbedding3D(IMP.algebra.Vector3D(1, 2, 3),
IMP.algebra.Vector3D(2, 4, 5),
IMP.algebra.Vector3D(7, 8, 9))
e2 = IMP.algebra.LogEmbedding3D(IMP.algebra.Vector3D(4, 5, 6),
IMP.algebra.Vector3D(7, 8, 9),
IMP.algebra.Vector3D(17, 18, 19))
e2.foo = 'bar'
dump = pickle.dumps((e1, e2))
newe1, newe2 = pickle.loads(dump)
self.assertLess(IMP.algebra.get_distance(
e1.get_origin(), newe1.get_origin()), 1e-4)
self.assertLess(IMP.algebra.get_distance(
e1.get_unit_cell(), newe1.get_unit_cell()), 1e-4)
self.assertLess(IMP.algebra.get_distance(
e2.get_origin(), newe2.get_origin()), 1e-4)
self.assertLess(IMP.algebra.get_distance(
e2.get_unit_cell(), newe2.get_unit_cell()), 1e-4)
self.assertEqual(newe2.foo, 'bar')
self.assertRaises(TypeError, e1._set_from_binary, 42)
def test_grid_index_pickle(self):
"""Test (un-)pickle of GridIndex3D"""
g1 = IMP.algebra.GridIndex3D(1, 2, 3)
g2 = IMP.algebra.GridIndex3D(4, 5, 6)
g2.foo = 'bar'
dump = pickle.dumps((g1, g2))
newg1, newg2 = pickle.loads(dump)
self.assertEqual(g1[0], newg1[0])
self.assertEqual(g1[1], newg1[1])
self.assertEqual(g1[2], newg1[2])
self.assertEqual(g2[0], newg2[0])
self.assertEqual(g2[1], newg2[1])
self.assertEqual(g2[2], newg2[2])
self.assertEqual(newg2.foo, 'bar')
self.assertRaises(TypeError, g1._set_from_binary, 42)
def test_extended_grid_index_pickle(self):
"""Test (un-)pickle of ExtendedGridIndex3D"""
g1 = IMP.algebra.ExtendedGridIndex3D(1, 2, 3)
g2 = IMP.algebra.ExtendedGridIndex3D(4, 5, 6)
g2.foo = 'bar'
dump = pickle.dumps((g1, g2))
newg1, newg2 = pickle.loads(dump)
self.assertEqual(g1[0], newg1[0])
self.assertEqual(g1[1], newg1[1])
self.assertEqual(g1[2], newg1[2])
self.assertEqual(g2[0], newg2[0])
self.assertEqual(g2[1], newg2[1])
self.assertEqual(g2[2], newg2[2])
self.assertEqual(newg2.foo, 'bar')
self.assertRaises(TypeError, g1._set_from_binary, 42)
def test_unbounded_grid_range_pickle(self):
"""Test (un-)pickle of UnboundedGridRange3D"""
g1 = IMP.algebra.UnboundedGridRange3D()
g2 = IMP.algebra.UnboundedGridRange3D()
g2.foo = 'bar'
dump = pickle.dumps((g1, g2))
newg1, newg2 = pickle.loads(dump)
self.assertEqual(newg2.foo, 'bar')
self.assertRaises(TypeError, g1._set_from_binary, 42)
def test_bounded_grid_range_pickle(self):
"""Test (un-)pickle of BoundedGridRange3D"""
g1 = IMP.algebra.BoundedGridRange3D([1, 2, 3])
g2 = IMP.algebra.BoundedGridRange3D([4, 5, 6])
g2.foo = 'bar'
dump = pickle.dumps((g1, g2))
newg1, newg2 = pickle.loads(dump)
self.assertEqual(g1.get_end_index(), newg1.get_end_index())
self.assertEqual(g2.get_end_index(), newg2.get_end_index())
self.assertEqual(newg2.foo, 'bar')
self.assertRaises(TypeError, g1._set_from_binary, 42)
if __name__ == '__main__':
IMP.test.main() | null |
354 | # Copyright (c) ZenML GmbH 2023. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from typing import Any, Optional
from unittest.mock import MagicMock, patch
from uuid import UUID, uuid4
import pytest
from zenml.steps.external_artifact import ExternalArtifact
class MockClient:
class MockArtifactResponse:
def __init__(self, name):
self.artifact_store_id = 42
self.name = name
self.id = 123
class MockPipelineResponse:
def __init__(self):
self.last_successful_run = MagicMock()
self.last_successful_run.artifacts = [
MockClient.MockArtifactResponse("foo"),
MockClient.MockArtifactResponse("bar"),
]
def __init__(self, artifact_store_id=42):
self.active_stack = MagicMock()
self.active_stack.artifact_store.id = artifact_store_id
self.active_stack.artifact_store.path = "foo"
def get_artifact(self, *args, **kwargs):
return MockClient.MockArtifactResponse("foo")
def get_pipeline(self, *args, **kwargs):
return MockClient.MockPipelineResponse()
@pytest.mark.parametrize(
argnames="value,id,pipeline_name,artifact_name,exception_start",
argvalues=[
[1, None, None, None, ""],
[None, uuid4(), None, None, ""],
[None, None, "foo", "bar", ""],
[None, None, None, None, "Either a value,"],
[1, uuid4(), None, None, "Only a value,"],
[None, uuid4(), "foo", "bar", "Only a value,"],
[1, None, "foo", "bar", "Only a value,"],
[None, None, "foo", None, "`pipeline_name` and `artifact_name`"],
[None, None, None, "bar", "`pipeline_name` and `artifact_name`"],
],
ids=[
"good_by_value",
"good_by_id",
"good_by_pipeline_artifact",
"bad_all_none",
"bad_id_and_value",
"bad_id_and_pipeline_artifact",
"bad_value_and_pipeline_artifact",
"bad_only_pipeline",
"bad_only_artifact",
],
)
def test_external_artifact_init(
value: Optional[Any],
id: Optional[UUID],
pipeline_name: Optional[str],
artifact_name: Optional[str],
exception_start: str,
):
"""Tests that initialization logic of `ExternalArtifact` works expectedly."""
if exception_start:
with pytest.raises(ValueError, match=exception_start):
ExternalArtifact(
value=value,
id=id,
pipeline_name=pipeline_name,
artifact_name=artifact_name,
)
else:
ExternalArtifact(
value=value,
id=id,
pipeline_name=pipeline_name,
artifact_name=artifact_name,
)
@patch("zenml.steps.external_artifact.Client")
@patch("zenml.steps.external_artifact.fileio")
@patch("zenml.steps.external_artifact.artifact_utils")
def test_upload_if_necessary_by_value(
mocked_zenml_client,
mocked_fileio,
mocked_artifact_utils,
):
mocked_fileio.exists.return_value = False
ea = ExternalArtifact(value=1)
assert ea._id is None
ea.upload_if_necessary()
assert ea._id is not None
assert ea._value is not None
assert ea._pipeline_name is None
assert ea._artifact_name is None
@pytest.mark.skip
@patch("zenml.steps.external_artifact.Client")
def test_upload_if_necessary_by_id(mocked_zenml_client):
mocked_zenml_client.return_value = MockClient()
ea = ExternalArtifact(id=123)
assert ea._value is None
assert ea._pipeline_name is None
assert ea._artifact_name is None
assert ea._id is not None
assert ea.upload_if_necessary() == 123
@patch("zenml.steps.external_artifact.Client")
def test_upload_if_necessary_by_pipeline_and_artifact(
mocked_zenml_client,
):
mocked_zenml_client.return_value = MockClient()
ea = ExternalArtifact(pipeline_name="foo", artifact_name="bar")
assert ea._value is None
assert ea._pipeline_name is not None
assert ea._artifact_name is not None
assert ea._id is None
assert ea.upload_if_necessary() == 123
assert ea._id == 123
@patch("zenml.steps.external_artifact.Client")
def test_upload_if_necessary_by_pipeline_and_artifact_other_artifact_store(
mocked_zenml_client,
):
mocked_zenml_client.return_value = MockClient(artifact_store_id=45)
with pytest.raises(RuntimeError, match=r"The artifact bar \(ID: 123\)"):
ExternalArtifact(
pipeline_name="foo", artifact_name="bar"
).upload_if_necessary()
@patch("zenml.steps.external_artifact.Client")
def METHOD_NAME(
mocked_zenml_client,
):
mocked_zenml_client.return_value = MockClient()
with pytest.raises(RuntimeError, match="Artifact with name `foobar`"):
ExternalArtifact(
pipeline_name="foo", artifact_name="foobar"
).upload_if_necessary() | null |
355 | import numpy as np
import pytest
import torch
from lhotse import AudioSource, CutSet, MultiCut, Recording, SupervisionSegment
from lhotse.audio import RecordingSet
from lhotse.cut import PaddingCut
from lhotse.utils import fastcopy
@pytest.fixture
def recording():
return Recording.from_file("test/fixtures/libri/libri-1088-134315-0000_8ch.wav")
@pytest.fixture
def mono_rir():
return Recording.from_file("test/fixtures/rir/sim_1ch.wav")
@pytest.fixture
def METHOD_NAME():
return Recording.from_file("test/fixtures/rir/real_8ch.wav")
@pytest.fixture
def cut_with_supervision(recording, cut_channels=None, sup_channels=None):
if cut_channels is None:
cut_channels = [0, 1, 2, 3, 4, 5, 6, 7]
if sup_channels is None:
sup_channels = [0, 1, 2, 3, 4, 5, 6, 7]
return MultiCut(
id="cut",
start=0.0,
duration=1.0,
channel=cut_channels,
supervisions=[
SupervisionSegment(
id="sup",
recording_id="rec",
start=0.0,
duration=1.0,
channel=sup_channels,
)
],
recording=recording,
)
def test_cut_perturb_speed11(cut_with_supervision):
cut_sp = cut_with_supervision.perturb_speed(1.1)
assert cut_sp.start == 0.0
assert cut_sp.duration == 0.9090625
assert cut_sp.end == 0.9090625
assert cut_sp.num_samples == 14545
assert cut_sp.recording.duration == 14.5818125
assert cut_sp.recording.num_samples == 233309
assert cut_sp.supervisions[0].start == 0.0
assert cut_sp.supervisions[0].duration == 0.9090625
assert cut_sp.supervisions[0].end == 0.9090625
cut_samples = cut_sp.load_audio()
assert cut_samples.shape[0] == 8
assert cut_samples.shape[1] == 14545
recording_samples = cut_sp.recording.load_audio()
assert recording_samples.shape[0] == 8
assert recording_samples.shape[1] == 233309
def test_cut_perturb_speed09(cut_with_supervision):
cut_sp = cut_with_supervision.perturb_speed(0.9)
assert cut_sp.start == 0.0
assert cut_sp.duration == 1.111125
assert cut_sp.end == 1.111125
assert cut_sp.num_samples == 17778
assert cut_sp.recording.duration == 17.82225
assert cut_sp.recording.num_samples == 285156
assert cut_sp.supervisions[0].start == 0.0
assert cut_sp.supervisions[0].duration == 1.111125
assert cut_sp.supervisions[0].end == 1.111125
cut_samples = cut_sp.load_audio()
assert cut_samples.shape[0] == 8
assert cut_samples.shape[1] == 17778
recording_samples = cut_sp.recording.load_audio()
assert recording_samples.shape[0] == 8
assert recording_samples.shape[1] == 285156
def test_cut_perturb_tempo09(cut_with_supervision):
cut_tp = cut_with_supervision.perturb_tempo(0.9)
assert cut_tp.start == 0.0
assert cut_tp.duration == 1.111125
assert cut_tp.end == 1.111125
assert cut_tp.num_samples == 17778
assert cut_tp.recording.duration == 17.82225
assert cut_tp.recording.num_samples == 285156
assert cut_tp.supervisions[0].start == 0.0
assert cut_tp.supervisions[0].duration == 1.111125
assert cut_tp.supervisions[0].end == 1.111125
cut_samples = cut_tp.load_audio()
assert cut_samples.shape[0] == 8
assert cut_samples.shape[1] == 17778
recording_samples = cut_tp.recording.load_audio()
assert recording_samples.shape[0] == 8
assert recording_samples.shape[1] == 285156
def test_cut_perturb_tempo11(cut_with_supervision):
cut_tp = cut_with_supervision.perturb_tempo(1.1)
assert cut_tp.start == 0.0
assert cut_tp.duration == 0.9090625
assert cut_tp.end == 0.9090625
assert cut_tp.num_samples == 14545
assert cut_tp.recording.duration == 14.5818125
assert cut_tp.recording.num_samples == 233309
assert cut_tp.supervisions[0].start == 0.0
assert cut_tp.supervisions[0].duration == 0.9090625
assert cut_tp.supervisions[0].end == 0.9090625
cut_samples = cut_tp.load_audio()
assert cut_samples.shape[0] == 8
assert cut_samples.shape[1] == 14545
recording_samples = cut_tp.recording.load_audio()
assert recording_samples.shape[0] == 8
assert recording_samples.shape[1] == 233309
def test_resample_cut(cut_with_supervision):
resampled = cut_with_supervision.resample(8000)
assert cut_with_supervision.sampling_rate == 16000
assert resampled.sampling_rate == 8000
assert cut_with_supervision.num_samples == 2 * resampled.num_samples
samples = resampled.load_audio()
assert samples.shape[1] == resampled.num_samples
@pytest.mark.parametrize("scale", [0.125, 2.0])
def test_cut_perturb_volume(cut_with_supervision, scale):
cut_vp = cut_with_supervision.perturb_volume(scale)
assert cut_vp.start == cut_with_supervision.start
assert cut_vp.duration == cut_with_supervision.duration
assert cut_vp.end == cut_with_supervision.end
assert cut_vp.num_samples == cut_with_supervision.num_samples
assert cut_vp.recording.duration == cut_with_supervision.recording.duration
assert cut_vp.recording.num_samples == cut_with_supervision.recording.num_samples
assert cut_vp.supervisions[0].start == cut_with_supervision.supervisions[0].start
assert (
cut_vp.supervisions[0].duration == cut_with_supervision.supervisions[0].duration
)
assert cut_vp.supervisions[0].end == cut_with_supervision.supervisions[0].end
assert cut_vp.load_audio().shape == cut_with_supervision.load_audio().shape
assert (
cut_vp.recording.load_audio().shape
== cut_with_supervision.recording.load_audio().shape
)
np.testing.assert_array_almost_equal(
cut_vp.load_audio(), cut_with_supervision.load_audio() * scale
)
np.testing.assert_array_almost_equal(
cut_vp.recording.load_audio(),
cut_with_supervision.recording.load_audio() * scale,
)
@pytest.mark.parametrize(
"rir, rir_channels, expected_channels",
[
("mono_rir", [0], [0, 1, 2, 3, 4, 5, 6, 7]),
pytest.param("mono_rir", [1], None, marks=pytest.mark.xfail),
("multi_channel_rir", [0, 1, 2, 3, 4, 5, 6, 7], [0, 1, 2, 3, 4, 5, 6, 7]),
("multi_channel_rir", [0], [0, 1, 2, 3, 4, 5, 6, 7]),
("multi_channel_rir", [1], [0, 1, 2, 3, 4, 5, 6, 7]),
pytest.param("multi_channel_rir", [0, 1], None, marks=pytest.mark.xfail),
],
)
def test_cut_reverb_rir(
cut_with_supervision, rir, rir_channels, expected_channels, request
):
rir = request.getfixturevalue(rir)
cut = cut_with_supervision
cut_rvb = cut.reverb_rir(rir, rir_channels=rir_channels)
print(cut_rvb.channel)
assert cut_rvb.start == cut.start
assert cut_rvb.duration == cut.duration
assert cut_rvb.end == cut.end
assert cut_rvb.num_samples == cut.num_samples
assert cut_rvb.recording.duration == cut.recording.duration
assert cut_rvb.recording.num_samples == cut.recording.num_samples
assert cut_rvb.supervisions[0].start == cut.supervisions[0].start
assert cut_rvb.supervisions[0].duration == cut.supervisions[0].duration
assert cut_rvb.supervisions[0].end == cut.supervisions[0].end
assert cut_rvb.load_audio().shape == cut.load_audio().shape
assert cut_rvb.recording.load_audio().shape == cut.recording.load_audio().shape
assert cut_rvb.channel == expected_channels
def test_cut_reverb_fast_rir(cut_with_supervision):
cut = cut_with_supervision
with pytest.raises(AssertionError):
cut_rvb = cut.reverb_rir(rir_recording=None) | null |
356 | ################################################################################
# Creme is a free/open-source Customer Relationship Management software
# Copyright (C) 2016-2020 Hybird
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
################################################################################
import logging
from django.contrib.contenttypes.models import ContentType
from django.http import QueryDict
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ngettext
from creme.documents import get_document_model
from ..forms.mass_import import form_factory, get_header
from ..models import MassImportJobResult
from ..utils.translation import get_model_verbose_name
from .base import JobProgress, JobType
logger = logging.getLogger(__name__)
class _MassImportType(JobType):
id = JobType.generate_id('creme_core', 'mass_import')
verbose_name = _('Mass import')
def _build_POST(self, job_data):
return QueryDict(job_data['POST'].encode('utf8'))
def _get_document(self, POST):
return get_document_model().objects.get(id=POST['document'])
def _get_ctype(self, job_data):
return ContentType.objects.get_for_id(job_data['ctype'])
def _execute(self, job):
job_data = job.data
POST = self._build_POST(job_data)
doc = self._get_document(POST)
header = get_header(doc.filedata, has_header='has_header' in POST)
form_class = form_factory(self._get_ctype(job_data), header)
form = form_class(user=job.user, data=POST)
if not form.is_valid():
# TODO: unit test
raise self.Error(
gettext('Invalid data [{}]').format(form.errors.as_text())
)
form.process(job)
def progress(self, job):
count = MassImportJobResult.objects.filter(job=job).count()
return JobProgress(
percentage=None,
label=ngettext(
'{count} line has been processed.',
'{count} lines have been processed.',
count
).format(count=count)
)
@property
def results_bricks(self):
from ..bricks import MassImportJobErrorsBrick
return [MassImportJobErrorsBrick()]
def METHOD_NAME(self, job):
try:
job_data = job.data
desc = [
gettext('Import «{model}» from {doc}').format(
model=self._get_ctype(job_data).model_class()._meta.verbose_name,
doc=self._get_document(self._build_POST(job_data)),
),
]
except Exception: # TODO: unit test
logger.exception('Error in _MassImportType.get_description')
desc = ['?']
return desc
def get_stats(self, job):
stats = []
result_qs = MassImportJobResult.objects.filter(job=job)
lines_count = result_qs.count()
entity_result_qs = result_qs.filter(entity__isnull=False)
created_count = entity_result_qs.filter(updated=False).count()
updated_count = entity_result_qs.filter(updated=True).count()
model = self._get_ctype(job.data).model_class()
if created_count:
stats.append(
ngettext(
'{count} «{model}» has been created.',
'{count} «{model}» have been created.',
created_count
).format(
count=created_count,
model=get_model_verbose_name(model, created_count),
)
)
elif updated_count != lines_count:
stats.append(
gettext('No «{model}» has been created.').format(
model=model._meta.verbose_name,
)
)
if updated_count:
stats.append(
ngettext(
'{count} «{model}» has been updated.',
'{count} «{model}» have been updated.',
updated_count
).format(
count=updated_count,
model=get_model_verbose_name(model, updated_count),
)
)
elif created_count != lines_count:
stats.append(
gettext('No «{model}» has been updated.').format(
model=model._meta.verbose_name,
)
)
stats.append(
ngettext(
'{count} line in the file.',
'{count} lines in the file.',
lines_count,
).format(count=lines_count)
)
return stats
mass_import_type = _MassImportType() | null |
357 | from __future__ import absolute_import
from six.moves import xrange
from argparse import ArgumentParser
import os
import logging
import random
from toil.common import Toil
from toil.job import Job
def setup(job, input_file_id, n, down_checkpoints):
"""Sets up the sort.
Returns the FileID of the sorted file
"""
# Write the input file to the file store
job.fileStore.logToMaster("Starting the merge sort")
return job.addChildJobFn(down,
input_file_id, n,
down_checkpoints=down_checkpoints,
memory='600M').rv()
def down(job, input_file_id, n, down_checkpoints):
"""Input is a file and a range into that file to sort and an output location in which
to write the sorted file.
If the range is larger than a threshold N the range is divided recursively and
a follow on job is then created which merges back the results. Otherwise,
the file is sorted and placed in the output.
"""
# Read the file
input_file = job.fileStore.readGlobalFile(input_file_id, cache=False)
length = os.path.getsize(input_file)
if length > n:
# We will subdivide the file
job.fileStore.logToMaster("Splitting file: %s of size: %s"
% (input_file_id, length), level=logging.CRITICAL)
# Split the file into two copies
mid_point = get_midpoint(input_file, 0, length)
t1 = job.fileStore.getLocalTempFile()
with open(t1, 'w') as fH:
copy_subrange_of_file(input_file, 0, mid_point + 1, fH)
t2 = job.fileStore.getLocalTempFile()
with open(t2, 'w') as fH:
copy_subrange_of_file(input_file, mid_point + 1, length, fH)
# Call the down function recursively
return job.addFollowOnJobFn(up, job.addChildJobFn(down, job.fileStore.writeGlobalFile(t1), n,
down_checkpoints=down_checkpoints, memory='600M').rv(),
job.addChildJobFn(down, job.fileStore.writeGlobalFile(t2), n,
down_checkpoints=down_checkpoints,
memory='600M').rv()).rv()
else:
# We can sort this bit of the file
job.fileStore.logToMaster("Sorting file: %s of size: %s"
% (input_file_id, length), level=logging.CRITICAL)
# Sort the copy and write back to the fileStore
output_file = job.fileStore.getLocalTempFile()
sort(input_file, output_file)
return job.fileStore.writeGlobalFile(output_file)
def up(job, input_file_id_1, input_file_id_2):
"""Merges the two files and places them in the output.
"""
with job.fileStore.writeGlobalFileStream() as (fileHandle, output_id):
with job.fileStore.readGlobalFileStream(input_file_id_1) as inputFileHandle1:
with job.fileStore.readGlobalFileStream(input_file_id_2) as inputFileHandle2:
job.fileStore.logToMaster("Merging %s and %s to %s"
% (input_file_id_1, input_file_id_2, output_id))
METHOD_NAME(inputFileHandle1, inputFileHandle2, fileHandle)
# Cleanup up the input files - these deletes will occur after the completion is successful.
job.fileStore.deleteGlobalFile(input_file_id_1)
job.fileStore.deleteGlobalFile(input_file_id_2)
return output_id
# convenience functions
def sort(in_file, out_file):
"""Sorts the given file.
"""
filehandle = open(in_file, 'r')
lines = filehandle.readlines()
filehandle.close()
lines.sort()
filehandle = open(out_file, 'w')
for line in lines:
filehandle.write(line)
filehandle.close()
def METHOD_NAME(filehandle_1, filehandle_2, output_filehandle):
"""Merges together two files maintaining sorted order.
"""
line2 = filehandle_2.readline()
for line1 in filehandle_1.readlines():
while line2 != '' and line2 <= line1:
output_filehandle.write(line2)
line2 = filehandle_2.readline()
output_filehandle.write(line1)
while line2 != '':
output_filehandle.write(line2)
line2 = filehandle_2.readline()
def copy_subrange_of_file(input_file, file_start, file_end, output_filehandle):
"""Copies the range (in bytes) between fileStart and fileEnd to the given
output file handle.
"""
with open(input_file, 'r') as fileHandle:
fileHandle.seek(file_start)
data = fileHandle.read(file_end - file_start)
assert len(data) == file_end - file_start
output_filehandle.write(data)
def get_midpoint(file, file_start, file_end):
"""Finds the point in the file to split.
Returns an int i such that fileStart <= i < fileEnd
"""
filehandle = open(file, 'r')
mid_point = (file_start + file_end) / 2
assert mid_point >= file_start
filehandle.seek(mid_point)
line = filehandle.readline()
assert len(line) >= 1
if len(line) + mid_point < file_end:
return mid_point + len(line) - 1
filehandle.seek(file_start)
line = filehandle.readline()
assert len(line) >= 1
assert len(line) + file_start <= file_end
return len(line) + file_start - 1
def make_file_to_sort(file_name, lines, line_length):
with open(file_name, 'w') as fileHandle:
for _ in xrange(lines):
line = "".join(random.choice('actgACTGNXYZ') for _ in xrange(line_length - 1)) + '\n'
fileHandle.write(line)
def main():
parser = ArgumentParser()
Job.Runner.addToilOptions(parser)
parser.add_argument('--num-lines', default=1000, help='Number of lines in file to sort.', type=int)
parser.add_argument('--line-length', default=50, help='Length of lines in file to sort.', type=int)
parser.add_argument("--N",
help="The threshold below which a serial sort function is used to sort file. "
"All lines must of length less than or equal to N or program will fail",
default=10000)
options = parser.parse_args()
if int(options.N) <= 0:
raise RuntimeError("Invalid value of N: %s" % options.N)
file_name = 'file_to_sort.txt'
make_file_to_sort(file_name=file_name, lines=options.num_lines, line_length=options.line_length)
with Toil(options) as toil:
sort_file_url = 'file://' + os.path.abspath('file_to_sort.txt')
if not toil.options.restart:
sort_file_id = toil.importFile(sort_file_url)
sorted_file_id = toil.start(Job.wrapJobFn(setup, sort_file_id, int(options.N), False, memory='600M'))
else:
sorted_file_id = toil.restart()
toil.exportFile(sorted_file_id, sort_file_url)
if __name__ == '__main__':
main() | null |
358 | from __future__ import unicode_literals
from rest_framework import generics
from rest_framework import permissions
from rest_framework.exceptions import NotFound
from api.actions.serializers import PreprintRequestActionSerializer
from api.base.views import JSONAPIBaseView
from api.base import permissions as base_permissions
from api.base.filters import ListFilterMixin
from api.base.utils import get_object_or_error
from api.requests.permissions import NodeRequestPermission, PreprintRequestPermission
from api.requests.serializers import NodeRequestSerializer, PreprintRequestSerializer
from framework.auth.oauth_scopes import CoreScopes
from osf.models import Node, NodeRequest, PreprintRequest, Preprint
class RequestMixin(object):
serializer_class = None
request_class = None
request_display_name = None
target_class = None
target_display_name = None
target_lookup_url_kwarg = None
request_lookup_url_kwarg = None
def __get_object(self, object_class, lookup_arg, display_name, check_object_permissions=True):
obj = get_object_or_error(
object_class,
self.kwargs[lookup_arg],
self.request,
display_name=display_name,
)
# May raise a permission denied
if check_object_permissions:
self.check_object_permissions(self.request, obj)
return obj
def get_request(self, check_object_permissions=True):
return self.__get_object(self.request_class, self.request_lookup_url_kwarg, self.request_display_name, check_object_permissions=check_object_permissions)
def get_target(self, check_object_permissions=True):
return self.__get_object(self.target_class, self.target_lookup_url_kwarg, self.target_display_name, check_object_permissions=check_object_permissions)
class NodeRequestMixin(RequestMixin):
serializer_class = NodeRequestSerializer
request_class = NodeRequest
request_display_name = 'node request'
target_class = Node
target_display_name = 'node'
target_lookup_url_kwarg = 'node_id'
request_lookup_url_kwarg = 'request_id'
class PreprintRequestMixin(RequestMixin):
serializer_class = PreprintRequestSerializer
request_class = PreprintRequest
request_display_name = 'preprint request'
target_class = Preprint
target_display_name = 'preprint'
target_lookup_url_kwarg = 'preprint_id'
request_lookup_url_kwarg = 'request_id'
class RequestDetail(JSONAPIBaseView, generics.RetrieveAPIView):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ALWAYS_PUBLIC] # Actual scope checks are done on subview.as_view
required_write_scopes = [CoreScopes.NULL]
view_category = 'requests'
view_name = 'request-detail'
def get(self, request, *args, **kwargs):
request_id = self.kwargs['request_id']
if NodeRequest.objects.filter(_id=request_id).exists():
return NodeRequestDetail.as_view()(request._request, *args, **kwargs)
elif PreprintRequest.objects.filter(_id=request_id).exists():
return PreprintRequestDetail.as_view()(request._request, *args, **kwargs)
else:
raise NotFound
class NodeRequestDetail(JSONAPIBaseView, generics.RetrieveAPIView, NodeRequestMixin):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
NodeRequestPermission,
)
required_read_scopes = [CoreScopes.NODE_REQUESTS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = NodeRequestSerializer
view_category = 'requests'
view_name = 'node-request-detail'
def get_object(self):
return self.get_request()
class PreprintRequestDetail(JSONAPIBaseView, generics.RetrieveAPIView, PreprintRequestMixin):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PreprintRequestPermission,
)
required_read_scopes = [CoreScopes.PREPRINT_REQUESTS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = PreprintRequestSerializer
view_category = 'requests'
view_name = 'preprint-request-detail'
def get_object(self):
return self.get_request()
class RequestActionList(JSONAPIBaseView, generics.ListAPIView):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
)
required_read_scopes = [CoreScopes.ACTIONS_READ]
required_write_scopes = [CoreScopes.NULL]
view_category = 'requests'
view_name = 'request-action-list'
def get(self, request, *args, **kwargs):
request_id = self.kwargs['request_id']
if PreprintRequest.objects.filter(_id=request_id).exists():
return PreprintRequestActionList.as_view()(request._request, *args, **kwargs)
else:
raise NotFound
class PreprintRequestActionList(JSONAPIBaseView, generics.ListAPIView, PreprintRequestMixin, ListFilterMixin):
permission_classes = (
permissions.IsAuthenticatedOrReadOnly,
base_permissions.TokenHasScope,
PreprintRequestPermission,
)
required_read_scopes = [CoreScopes.ACTIONS_READ]
required_write_scopes = [CoreScopes.NULL]
serializer_class = PreprintRequestActionSerializer
view_category = 'requests'
view_name = 'preprint-request-action-list'
# supports MustBeModerator
def METHOD_NAME(self):
request_id = self.kwargs['request_id']
preprint_request = PreprintRequest.load(request_id)
if preprint_request:
return preprint_request.target.provider
raise NotFound
# overrides ListFilterMixin
def get_default_queryset(self):
return self.get_request().actions.order_by('-created').all()
# overrides ListAPIView
def get_queryset(self):
return self.get_queryset_from_request() | null |
359 | import hashlib
import math
import operator
import re
from collections import OrderedDict
from typing import Dict, Iterable, List, Optional, Set
from click import UsageError
from pygitguardian.models import Match, PolicyBreak, ScanResult
from ggshield.core.types import IgnoredMatch
REGEX_MATCH_HIDE = re.compile(r"[^+\-\s]")
REGEX_SPECIAL_CHARS = set(".^$+*?{}()[]\\|")
INVALID_PATTERNS_REGEX = re.compile(
r"(\*\*\*)" # the "***" sequence is not valid
r"|(\*\*[^/])" # a "**" sequence must be immediately followed by a "/"
r"|([^/]\*\*)" # a "**" sequence must be either at the start of the string or
# immediately preceded by a "/"
)
MAXIMUM_CENSOR_LENGTH = 60
def is_ignored(
policy_break: PolicyBreak,
matches_ignore: Iterable[IgnoredMatch],
) -> bool:
"""
is_ignored checks if a occurrence is ignored.
There are 2 ways of ignoring a occurrence:
- matching the occurrence sha
- matching one of the match.match values
:param policy_break: Policy Break occurrence to judge
:param matches_ignore: Iterable of match ignores
:return: True if ignored
"""
matches = [match.match for match in matches_ignore]
if policy_break.policy.lower() != "secrets detection":
return True
if get_ignore_sha(policy_break) in matches or any(
match.match in matches for match in policy_break.matches
):
return True
return False
def remove_ignored_from_result(
scan_result: ScanResult, matches_ignore: Iterable[IgnoredMatch]
) -> None:
"""
remove_ignored removes occurrences from a Scan Result based on a sha
made from its matches.
:param scan_result: ScanResult to filter
:param matches_ignore: match SHAs or plaintext matches to filter out
"""
scan_result.policy_breaks = [
policy_break
for policy_break in scan_result.policy_breaks
if not is_ignored(policy_break, matches_ignore)
]
scan_result.policy_break_count = len(scan_result.policy_breaks)
def remove_results_from_ignore_detectors(
scan_result: ScanResult,
ignored_detectors: Optional[Set[str]] = None,
) -> None:
if not ignored_detectors:
return
scan_result.policy_breaks = [
policy_break
for policy_break in scan_result.policy_breaks
if policy_break.break_type not in ignored_detectors
]
scan_result.policy_break_count = len(scan_result.policy_breaks)
def get_ignore_sha(policy_break: PolicyBreak) -> str:
hashable = "".join(
[
f"{match.match},{match.match_type}"
for match in sorted(
policy_break.matches, key=operator.attrgetter("match_type")
)
]
)
return hashlib.sha256(hashable.encode("UTF-8")).hexdigest()
def leak_dictionary_by_ignore_sha(
policy_breaks: List[PolicyBreak],
) -> Dict[str, List[PolicyBreak]]:
"""
leak_dictionary_by_ignore_sha sorts matches and incidents by
first appearance in file.
sort incidents by first appearance on file,
file wide matches have no index
so give it -1 so they get bumped to the top
:return: Dictionary with line number as index and a list of
matches that start on said line.
"""
policy_breaks.sort(
key=lambda x: min( # type: ignore
match.index_start if match.index_start else -1 for match in x.matches
)
)
sha_dict: Dict[str, List[PolicyBreak]] = OrderedDict()
for policy_break in policy_breaks:
policy_break.matches.sort(key=lambda x: x.index_start if x.index_start else -1)
ignore_sha = get_ignore_sha(policy_break)
sha_dict.setdefault(ignore_sha, []).append(policy_break)
return sha_dict
def translate_user_pattern(pattern: str) -> str:
"""
Translate the user pattern into a regex. This function assumes that the given
pattern is valid and has been normalized beforehand.
"""
# Escape each special character
pattern = "".join(
f"\\{char}" if char in REGEX_SPECIAL_CHARS else char for char in pattern
)
# Handle start/end of pattern
if pattern[-1] != "/":
pattern += "$"
if pattern[0] == "/":
pattern = "^" + pattern[1:]
else:
pattern = "(^|/)" + pattern
# Replace * and ** sequences
pattern = re.sub(r"\\\*\\\*/", "([^/]+/)*", pattern)
pattern = re.sub(r"\\\*", "([^/]+)", pattern)
return pattern
def METHOD_NAME(pattern: str) -> bool:
return bool(pattern) and not INVALID_PATTERNS_REGEX.search(pattern)
def init_exclusion_regexes(paths_ignore: Iterable[str]) -> Set[re.Pattern]:
"""
filter_set creates a set of paths of the ignored
entries from 3 sources:
.gitguardian.yaml
files in .git
files ignore in .gitignore
"""
res = set()
for path in paths_ignore:
if not METHOD_NAME(path):
raise UsageError(f"{path} is not a valid exclude pattern.")
res.add(re.compile(translate_user_pattern(path)))
return res
def censor_string(text: str) -> str:
"""
Censor a string (usually a secret), revealing only the first and last
1/6th of the match up to a maximum of MAXIMUM_CENSOR_LENGTH.
:return: the text censored
"""
len_match = len(text)
start_privy_len = min(math.ceil(len_match / 6), MAXIMUM_CENSOR_LENGTH)
end_privy_len = len_match - min(math.ceil(len_match / 6), MAXIMUM_CENSOR_LENGTH)
censored = REGEX_MATCH_HIDE.sub("*", text)
return str(
text[:start_privy_len]
+ censored[start_privy_len:end_privy_len]
+ text[end_privy_len:]
)
def censor_match(match: Match) -> str:
return censor_string(match.match)
def censor_content(content: str, policy_breaks: List[PolicyBreak]) -> str:
for policy_break in policy_breaks:
for match in policy_break.matches:
if match.index_start is None:
continue
match.match = censor_match(match)
content = "".join(
(
content[: match.index_start],
match.match,
content[len(match.match) + match.index_start :],
)
)
return content | null |
360 | from datetime import datetime
from django.db import models
from django.utils import timezone
from elasticsearch.exceptions import NotFoundError
import pytz
class MetricMixin(object):
@classmethod
def _get_all_indices(cls):
all_aliases = cls._index.get_alias()
indices = set()
for index, aliases in all_aliases.items():
indices.add(index)
if aliases['aliases']:
for alias in aliases['aliases'].keys():
indices.add(alias)
return indices
@classmethod
def _get_relevant_indices(cls, after, before):
# NOTE: This will only work for yearly indices. This logic
# will need to be updated if we change to monthly or daily indices
if before and after:
year_range = range(after.year, before.year + 1)
elif after:
year_range = range(after.year, timezone.now().year + 1)
else:
# No metric data from before 2013
year_range = range(2013, before.year + 1)
all_indices = cls._get_all_indices()
relevant_indices = [
# get_index_name takes a datetime, so get Jan 1 for each relevant year
cls.get_index_name(datetime(year, 1, 1, tzinfo=pytz.utc))
for year in year_range
]
return [index for index in relevant_indices if index in all_indices]
@classmethod
def _get_id_to_count(cls, size, metric_field, count_field, after=None, before=None):
"""Performs the elasticsearch aggregation for get_top_by_count. Return a
dict mapping ids to summed counts. If there's no data in the ES index, return None.
"""
search = cls.search(after=after, before=before)
timestamp = {}
if after:
timestamp['gte'] = after
if before:
timestamp['lt'] = before
if timestamp:
search = search.filter('range', timestamp=timestamp)
search.aggs.\
bucket('by_id', 'terms', field=metric_field, size=size, order={'sum_count': 'desc'}).\
metric('sum_count', 'sum', field=count_field)
# Optimization: set size to 0 so that hits aren't returned (we only care about the aggregation)
search = search.extra(size=0)
try:
response = search.execute()
except NotFoundError:
# _get_relevant_indices returned 1 or more indices
# that doesn't exist. Fall back to unoptimized query
search = search.index().index(cls._default_index())
response = search.execute()
# No indexed data
if not hasattr(response.aggregations, 'by_id'):
return None
buckets = response.aggregations.by_id.buckets
# Map _id => count
return {
bucket.key: int(bucket.sum_count.value)
for bucket in buckets
}
# Overrides Document.search to only search relevant
# indices, determined from `after`
@classmethod
def search(cls, using=None, index=None, after=None, before=None, *args, **kwargs):
if not index and (before or after):
indices = cls._get_relevant_indices(after, before)
index = ','.join(indices)
return super(MetricMixin, cls).search(using=using, index=index, *args, **kwargs)
@classmethod
def METHOD_NAME(cls, qs, model_field, metric_field,
size, order_by=None,
count_field='count',
annotation='metric_count',
after=None, before=None):
"""Return a queryset annotated with the metric counts for each item.
Example: ::
# Get the top 10 PreprintProviders by download count
top_providers = PreprintDownload.get_top_by_count(
qs=PreprintProvider.objects.all(),
model_field='_id',
metric_field='provider_id',
annotation='download_count',
size=10
)
for each in top_providers:
print('{}: {}'.format(each._id, each.download_count))
``size`` determines the number of buckets returned by the aggregation.
If ``size=None``, the size of the queryset is used.
WARNING: Be careful when using size=None when using a large queryset.
:param QuerySet qs: The initial queryset to annotate
:param str model_field: Model field that corresponds to ``metric_field``.
:param str metric_field: Metric field that corresponds to ``model_field``.
:param int size: Size of the aggregation. Also determines the size of the final
queryset.
:param str order_by: Field to order queryset by. If `None`, orders by
the metric, descending.
:param datetime after: Minimum datetime to narrow the search (inclusive).
:param datetime before: Maximum datetime to narrow the search (exclusive).
:param str count_field: Name of the field where count values are stored.
:param str annotation: Name of the annotation.
"""
id_to_count = cls._get_id_to_count(
size=size or qs.count(),
metric_field=metric_field,
count_field=count_field,
after=after,
before=before
)
if id_to_count is None:
return qs.annotate(**{annotation: models.Value(0, models.IntegerField())})
# Annotate the queryset with the counts for each id
# https://stackoverflow.com/a/48187723/1157536
whens = [
models.When(**{
model_field: k,
'then': v,
}) for k, v in id_to_count.items()
]
# By default order by annotation, desc
order_by = order_by or '-{}'.format(annotation)
return qs.annotate(**{
annotation: models.Case(*whens, default=0, output_field=models.IntegerField())
}).order_by(order_by) | null |
361 | from methods.regular.regular_api import *
from methods.task.task_template.task_template_launch_handler import TaskTemplateLauncherThread
from methods.sync_events.sync_actions_handler import SyncActionsHandlerThread
from methods.action.action_flow_trigger_queue import ActionFlowTriggerQueueProcess
from shared.ingest.packet import enqueue_packet
@routes.route('/api/walrus/v1/interservice/receive',
methods = ['POST'])
def METHOD_NAME():
"""
Inter-Service route to notify of new job launch
For now relies on inter_service_security_token for permissions...
This is just a starting point for more generic inter service notification
Pros/Cons to having DB as intermediary point there, fo now
this is fairly light weight.
Once we have a good pattern here, eg retry/overflow handling,
can probably remove polling / thread
"""
spec_list = [{"inter_service_security_token": {
'kind': str,
'required': True,
'security_token': settings.INTER_SERVICE_SECRET
}
},
{"message": {
'kind': str,
'required': True
}
},
{"id": { # or "base_class_id"?
'kind': int,
'required': False,
'default': None
}
},
{"extra_params": {
'kind': dict,
'required': False,
'default': None
}
},
{"base_class_string": {
'kind': str,
'required': False,
'default': None
}
},
{"project_string_id": {
'kind': str,
'required': False,
'default': None
}
}
# Serialized object maybe?
]
log, input_from_request, untrusted_input = regular_input.master(request = request, spec_list = spec_list)
if len(log["error"].keys()) >= 1:
return jsonify(log = log), 400
logger.info("Received valid inter service request")
with sessionMaker.session_scope() as session:
# CAUTIONS
# Generally assumes any calls here are non blocking
# So as to reasonably return
# eg 1) Condition on message then some_launcher(event_id = input['id'])
# Or 2) if we want object here for some reason, something like:
# if input['base_class_string']:
# base_object = getattr(sys.modules[__name__], input['base_class_string']).get_by_id(
# id = input['id'],
# session = session)
if input_from_request['message'] == 'new_job_launch_queue_item':
job_launcher_thread = TaskTemplateLauncherThread(run_once = True)
log['info']['job_launcher_thread'] = True
if input_from_request['message'] == 'new_sync_action_item':
sync_action_thread = SyncActionsHandlerThread(run_once = True)
log['info']['job_launcher_thread'] = True
if input_from_request['message'] == 'new_action_flow_queue_item':
num_flows = ActionFlowTriggerQueueProcess.try_to_enqueue_new_action_flows(
session = session,
event_id = input_from_request['id'],
commit_per_element = True)
for i in range(0, num_flows):
action_flow_thread = ActionFlowTriggerQueueProcess(run_once = True)
if input_from_request['message'] == 'file_copy':
enqueue_packet(project_string_id = input_from_request.get('project_string_id'),
session = session,
media_url = None,
media_type = input_from_request['extra_params'].get('type'),
directory_id = input_from_request['extra_params'].get('destination_working_dir_id'),
source_directory_id = input_from_request['extra_params'].get('source_working_dir_id'),
remove_link = input_from_request['extra_params'].get('remove_link'),
add_link = input_from_request['extra_params'].get('add_link'),
copy_instance_list = input_from_request['extra_params'].get('copy_instance_list'),
job_id = None,
batch_id = input_from_request['extra_params'].get('batch_id'),
file_id = input_from_request['id'],
instance_list = [],
video_parent_length = input_from_request['extra_params'].get('frame_count'),
task_id = None,
mode = 'copy_file',
commit_input = True)
log['success'] = True
return jsonify(log = log), 200 | null |
362 | # coding=utf-8
# Copyright 2018-2023 EvaDB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
from pathlib import Path
import pandas as pd
from evadb.catalog.models.table_catalog import TableCatalogEntry
from evadb.database import EvaDBDatabase
from evadb.models.storage.batch import Batch
from evadb.parser.table_ref import TableInfo
from evadb.storage.abstract_storage_engine import AbstractStorageEngine
from evadb.storage.sqlite_storage_engine import SQLStorageEngine
from evadb.utils.logging_manager import logger
class AbstractMediaStorageEngine(AbstractStorageEngine):
def __init__(self, db: EvaDBDatabase):
super().__init__(db)
self._rdb_handler: SQLStorageEngine = SQLStorageEngine(db)
def METHOD_NAME(self, table: TableCatalogEntry):
return self.db.catalog().get_multimedia_metadata_table_catalog_entry(table)
def _create_metadata_table(self, table: TableCatalogEntry):
return (
self.db.catalog().create_and_insert_multimedia_metadata_table_catalog_entry(
table
)
)
def _xform_file_url_to_file_name(self, file_url: Path) -> str:
# Convert media_path to file name. This is done to support duplicate media_names with
# different complete paths. Without conversion, we cannot copy files with same name but
# different paths. Eg., a/b/my.mp4 and a/b/c/my.mp4.
# xformed_file_name = zlib.crc32(str(file_url).encode("utf-8")) & 0xFFFFFFFF
# return str(xformed_file_name)
# Previous approach with hashing is commented out above. Since we now use symbolic link, the only
# thing we need to worry about is the same file name under different directory. This motivates us
# to just breakdown directory also as part of file name. Additionally, it does not use hashing,
# which avoids computation overhead.
file_path_str = str(file_url)
file_path = re.sub(r"[^a-zA-Z0-9 \.\n]", "_", file_path_str)
return file_path
def create(self, table: TableCatalogEntry, if_not_exists=True):
"""
Create the directory to store the images.
Create a sqlite table to persist the file urls
"""
dir_path = Path(table.file_url)
try:
dir_path.mkdir(parents=True)
except FileExistsError:
if if_not_exists:
return True
error = "Failed to load the image as directory \
already exists: {}".format(
dir_path
)
logger.error(error)
raise FileExistsError(error)
self._rdb_handler.create(self._create_metadata_table(table))
return True
def drop(self, table: TableCatalogEntry):
try:
dir_path = Path(table.file_url)
shutil.rmtree(str(dir_path))
metadata_table = self.METHOD_NAME(table)
self._rdb_handler.drop(metadata_table)
# remove the metadata table from the catalog
self.db.catalog().delete_table_catalog_entry(metadata_table)
except Exception as e:
err_msg = f"Failed to drop the image table {e}"
logger.exception(err_msg)
raise Exception(err_msg)
def delete(self, table: TableCatalogEntry, rows: Batch):
try:
media_metadata_table = self.METHOD_NAME(table)
for media_file_path in rows.file_paths():
dst_file_name = self._xform_file_url_to_file_name(Path(media_file_path))
image_file = Path(table.file_url) / dst_file_name
self._rdb_handler.delete(
media_metadata_table,
where_clause={
media_metadata_table.identifier_column: str(media_file_path)
},
)
image_file.unlink()
except Exception as e:
error = f"Deleting file path {media_file_path} failed with exception {e}"
logger.exception(error)
raise RuntimeError(error)
return True
def write(self, table: TableCatalogEntry, rows: Batch):
try:
dir_path = Path(table.file_url)
copied_files = []
for media_file_path in rows.file_paths():
media_file = Path(media_file_path)
dst_file_name = self._xform_file_url_to_file_name(media_file)
dst_path = dir_path / dst_file_name
if dst_path.exists():
raise FileExistsError(
f"Duplicate File: {media_file} already exists in the table {table.name}"
)
src_path = Path.cwd() / media_file
os.symlink(src_path, dst_path)
copied_files.append(dst_path)
# assuming sql write is an atomic operation
self._rdb_handler.write(
self.METHOD_NAME(table),
Batch(pd.DataFrame({"file_url": list(rows.file_paths())})),
)
except Exception as e:
# delete the copied_files
for file in copied_files:
logger.info(f"Rollback file {file}")
file.unlink()
logger.exception(str(e))
raise RuntimeError(str(e))
else:
return True
def rename(self, old_table: TableCatalogEntry, new_name: TableInfo):
try:
self.db.catalog().rename_table_catalog_entry(old_table, new_name)
except Exception as e:
raise Exception(f"Failed to rename table {new_name} with exception {e}") | null |
363 | from typing import Iterable
from typing import Optional
from typing import Union
from .request import Request
from .response import Response
class CORSMiddleware(object):
"""CORS Middleware.
This middleware provides a simple out-of-the box CORS policy, including handling
of preflighted requests from the browser.
See also:
* https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS
* https://www.w3.org/TR/cors/#resource-processing-model
Keyword Arguments:
allow_origins (Union[str, Iterable[str]]): List of origins to allow (case
sensitive). The string ``'*'`` acts as a wildcard, matching every origin.
(default ``'*'``).
expose_headers (Optional[Union[str, Iterable[str]]]): List of additional
response headers to expose via the ``Access-Control-Expose-Headers``
header. These headers are in addition to the CORS-safelisted ones:
``Cache-Control``, ``Content-Language``, ``Content-Length``,
``Content-Type``, ``Expires``, ``Last-Modified``, ``Pragma``.
(default ``None``).
See also:
https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Access-Control-Expose-Headers
allow_credentials (Optional[Union[str, Iterable[str]]]): List of origins
(case sensitive) for which to allow credentials via the
``Access-Control-Allow-Credentials`` header.
The string ``'*'`` acts as a wildcard, matching every allowed origin,
while ``None`` disallows all origins. This parameter takes effect only
if the origin is allowed by the ``allow_origins`` argument.
(Default ``None``).
"""
def __init__(
self,
allow_origins: Union[str, Iterable[str]] = '*',
expose_headers: Optional[Union[str, Iterable[str]]] = None,
allow_credentials: Optional[Union[str, Iterable[str]]] = None,
):
if allow_origins == '*':
self.allow_origins = allow_origins
else:
if isinstance(allow_origins, str):
allow_origins = [allow_origins]
self.allow_origins = frozenset(allow_origins)
if '*' in self.allow_origins:
raise ValueError(
'The wildcard string "*" may only be passed to allow_origins as a '
'string literal, not inside an iterable.'
)
if expose_headers is not None and not isinstance(expose_headers, str):
expose_headers = ', '.join(expose_headers)
self.expose_headers = expose_headers
if allow_credentials is None:
allow_credentials = frozenset()
elif allow_credentials != '*':
if isinstance(allow_credentials, str):
allow_credentials = [allow_credentials]
allow_credentials = frozenset(allow_credentials)
if '*' in allow_credentials:
raise ValueError(
'The wildcard string "*" may only be passed to allow_credentials '
'as a string literal, not inside an iterable.'
)
self.allow_credentials = allow_credentials
def process_response(self, req: Request, resp: Response, resource, req_succeeded):
"""Implement the CORS policy for all routes.
This middleware provides a simple out-of-the box CORS policy,
including handling of preflighted requests from the browser.
See also: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS
See also: https://www.w3.org/TR/cors/#resource-processing-model
"""
origin = req.get_header('Origin')
if origin is None:
return
if self.allow_origins != '*' and origin not in self.allow_origins:
return
if resp.get_header('Access-Control-Allow-Origin') is None:
set_origin = '*' if self.allow_origins == '*' else origin
if self.allow_credentials == '*' or origin in self.allow_credentials:
set_origin = origin
resp.set_header('Access-Control-Allow-Credentials', 'true')
resp.set_header('Access-Control-Allow-Origin', set_origin)
if self.expose_headers:
resp.set_header('Access-Control-Expose-Headers', self.expose_headers)
if (
req_succeeded
and req.method == 'OPTIONS'
and req.get_header('Access-Control-Request-Method')
):
# NOTE(kgriffs): This is a CORS preflight request. Patch the
# response accordingly.
allow = resp.get_header('Allow')
resp.delete_header('Allow')
allow_headers = req.get_header(
'Access-Control-Request-Headers', default='*'
)
resp.set_header('Access-Control-Allow-Methods', allow)
resp.set_header('Access-Control-Allow-Headers', allow_headers)
resp.set_header('Access-Control-Max-Age', '86400') # 24 hours
async def METHOD_NAME(self, *args):
self.process_response(*args) | null |
364 | # Copyright (c) ZenML GmbH 2022. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Local ZenML server deployment."""
import logging
import sys
from typing import TYPE_CHECKING, Optional
from tests.harness.deployment.base import (
LOCAL_ZENML_SERVER_DEFAULT_PORT,
BaseTestDeployment,
)
from tests.harness.deployment.local_default import LocalDefaultTestDeployment
from tests.harness.model import (
DeploymentConfig,
DeploymentSetup,
DeploymentStoreConfig,
DeploymentType,
)
if TYPE_CHECKING:
from zenml.zen_server.deploy.deployment import ServerDeployment
class ServerLocalTestDeployment(BaseTestDeployment):
"""A deployment that runs a ZenML server as a background process."""
def __init__(self, config: DeploymentConfig) -> None:
"""Initializes a local ZenML server deployment.
Args:
config: The configuration for the deployment.
"""
super().__init__(config)
# The server local deployment is built on top of a local default
# deployment because the server is provisioned through the client
self.default_deployment = LocalDefaultTestDeployment(config)
@property
def server(self) -> Optional["ServerDeployment"]:
"""Returns the ZenML server corresponding to this configuration.
Returns:
The server for the deployment if it exists, None otherwise.
"""
from zenml.enums import ServerProviderType
from zenml.zen_server.deploy.deployer import ServerDeployer
# Managing the local server deployment is done through a default
# local deployment with the same config.
with self.default_deployment.connect():
deployer = ServerDeployer()
servers = deployer.list_servers(
provider_type=ServerProviderType.LOCAL
)
if not servers:
return None
return servers[0]
@property
def is_running(self) -> bool:
"""Returns whether the ZenML server is running.
Returns:
True if the server is running, False otherwise.
"""
server = self.server
if server is not None and server.is_running:
return True
return False
def up(self) -> None:
"""Starts the ZenML deployment.
Raises:
RuntimeError: If the deployment is not supported on the host OS.
"""
from zenml.enums import ServerProviderType
from zenml.utils.networking_utils import scan_for_available_port
from zenml.zen_server.deploy.deployer import ServerDeployer
from zenml.zen_server.deploy.deployment import ServerDeploymentConfig
if sys.platform == "win32":
raise RuntimeError(
"Running the ZenML server locally as a background process is "
"not supported on Windows."
)
else:
pass
if self.is_running:
logging.info(
f"Deployment '{self.config.name}' is already running. "
f"Skipping provisioning."
)
return
self.default_deployment.up()
# Managing the local server deployment is done through the default
# deployment with the same config.
with self.default_deployment.connect():
port = scan_for_available_port(LOCAL_ZENML_SERVER_DEFAULT_PORT)
if port is None:
raise RuntimeError(
"Could not find an available port for the ZenML server."
)
deployer = ServerDeployer()
server_config = ServerDeploymentConfig(
name=self.config.name,
provider=ServerProviderType.LOCAL,
port=port,
)
deployer.deploy_server(server_config)
logging.info(
f"Started ZenML server for deployment '{self.config.name}'."
)
def down(self) -> None:
"""Stops the ZenML deployment."""
from zenml.zen_server.deploy.deployer import ServerDeployer
server = self.server
if server is None:
logging.info(
f"Deployment '{self.config.name}' is no longer running. "
)
return
# Managing the local server deployment is done through the default
# deployment with the same config.
with self.default_deployment.connect():
deployer = ServerDeployer()
deployer.remove_server(server.config.name)
self.default_deployment.down()
def METHOD_NAME(self) -> Optional[DeploymentStoreConfig]:
"""Returns the store config for the deployment.
Returns:
The store config for the deployment if it is running, None
otherwise.
Raises:
RuntimeError: If the deployment is not running.
"""
from zenml.zen_stores.base_zen_store import (
DEFAULT_PASSWORD,
DEFAULT_USERNAME,
)
if not self.is_running:
raise RuntimeError(
f"The '{self.config.name}' deployment is not running."
)
server = self.server
if (
server is None
or server.status is None
or server.status.url is None
):
raise RuntimeError(
f"The '{self.config.name}' deployment is not running."
)
return DeploymentStoreConfig(
url=server.status.url,
username=DEFAULT_USERNAME,
password=DEFAULT_PASSWORD,
)
ServerLocalTestDeployment.register_deployment_class(
type=DeploymentType.SERVER, setup=DeploymentSetup.DEFAULT
) | null |
365 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkretailcloud.endpoint import endpoint_data
class CreateClusterRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'retailcloud', '2018-03-13', 'CreateCluster')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_BusinessCode(self): # String
return self.get_query_params().get('BusinessCode')
def set_BusinessCode(self, BusinessCode): # String
self.add_query_param('BusinessCode', BusinessCode)
def get_CreateWithLogIntegration(self): # Boolean
return self.get_query_params().get('CreateWithLogIntegration')
def set_CreateWithLogIntegration(self, CreateWithLogIntegration): # Boolean
self.add_query_param('CreateWithLogIntegration', CreateWithLogIntegration)
def get_Vswitchidss(self): # RepeatList
return self.get_query_params().get('Vswitchids')
def set_Vswitchidss(self, Vswitchids): # RepeatList
for depth1 in range(len(Vswitchids)):
self.add_query_param('Vswitchids.' + str(depth1 + 1), Vswitchids[depth1])
def get_CloudMonitorFlags(self): # Integer
return self.get_query_params().get('CloudMonitorFlags')
def set_CloudMonitorFlags(self, CloudMonitorFlags): # Integer
self.add_query_param('CloudMonitorFlags', CloudMonitorFlags)
def get_ClusterEnvType(self): # String
return self.get_query_params().get('ClusterEnvType')
def set_ClusterEnvType(self, ClusterEnvType): # String
self.add_query_param('ClusterEnvType', ClusterEnvType)
def METHOD_NAME(self): # Boolean
return self.get_query_params().get('CreateWithArmsIntegration')
def set_CreateWithArmsIntegration(self, CreateWithArmsIntegration): # Boolean
self.add_query_param('CreateWithArmsIntegration', CreateWithArmsIntegration)
def get_KeyPair(self): # String
return self.get_query_params().get('KeyPair')
def set_KeyPair(self, KeyPair): # String
self.add_query_param('KeyPair', KeyPair)
def get_ClusterTitle(self): # String
return self.get_query_params().get('ClusterTitle')
def set_ClusterTitle(self, ClusterTitle): # String
self.add_query_param('ClusterTitle', ClusterTitle)
def get_PodCIDR(self): # String
return self.get_query_params().get('PodCIDR')
def set_PodCIDR(self, PodCIDR): # String
self.add_query_param('PodCIDR', PodCIDR)
def get_ClusterId(self): # Long
return self.get_query_params().get('ClusterId')
def set_ClusterId(self, ClusterId): # Long
self.add_query_param('ClusterId', ClusterId)
def get_ClusterType(self): # String
return self.get_query_params().get('ClusterType')
def set_ClusterType(self, ClusterType): # String
self.add_query_param('ClusterType', ClusterType)
def get_Password(self): # String
return self.get_query_params().get('Password')
def set_Password(self, Password): # String
self.add_query_param('Password', Password)
def get_SnatEntry(self): # Integer
return self.get_query_params().get('SnatEntry')
def set_SnatEntry(self, SnatEntry): # Integer
self.add_query_param('SnatEntry', SnatEntry)
def get_NetPlug(self): # String
return self.get_query_params().get('NetPlug')
def set_NetPlug(self, NetPlug): # String
self.add_query_param('NetPlug', NetPlug)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_RegionName(self): # String
return self.get_query_params().get('RegionName')
def set_RegionName(self, RegionName): # String
self.add_query_param('RegionName', RegionName)
def get_PrivateZone(self): # Boolean
return self.get_query_params().get('PrivateZone')
def set_PrivateZone(self, PrivateZone): # Boolean
self.add_query_param('PrivateZone', PrivateZone)
def get_ServiceCIDR(self): # String
return self.get_query_params().get('ServiceCIDR')
def set_ServiceCIDR(self, ServiceCIDR): # String
self.add_query_param('ServiceCIDR', ServiceCIDR)
def get_PublicSlb(self): # Integer
return self.get_query_params().get('PublicSlb')
def set_PublicSlb(self, PublicSlb): # Integer
self.add_query_param('PublicSlb', PublicSlb) | null |
366 | from pyrokinetics.gk_code import GKInputGENE
from pyrokinetics import template_dir
from pyrokinetics.local_geometry import LocalGeometryMiller
from pyrokinetics.local_species import LocalSpecies
from pyrokinetics.numerics import Numerics
from pathlib import Path
import numpy as np
import pytest
import sys
docs_dir = Path(__file__).parent.parent.parent / "docs"
sys.path.append(str(docs_dir))
from examples import example_JETTO # noqa
template_file = template_dir / "input.gene"
@pytest.fixture
def default_gene():
return GKInputGENE()
@pytest.fixture
def gene():
return GKInputGENE(template_file)
def test_read(gene):
"""Ensure a gene file can be read, and that the 'data' attribute is set"""
params = ["general", "box", "geometry"]
assert np.all(np.isin(params, list(gene.data)))
def test_read_str():
"""Ensure a gene file can be read as a string, and that the 'data' attribute is set"""
params = ["general", "box", "geometry"]
with open(template_file, "r") as f:
gene = GKInputGENE.from_str(f.read())
assert np.all(np.isin(params, list(gene.data)))
def test_verify_file_type(gene):
"""Ensure that 'verify_file_type' does not raise exception on GENE file"""
gene.verify_file_type(template_file)
@pytest.mark.parametrize(
"filename", ["input.gs2", "input.cgyro", "transp.cdf", "helloworld"]
)
def test_verify_file_type_bad_inputs(gene, filename):
"""Ensure that 'verify_file_type' raises exception on non-GENE file"""
with pytest.raises(Exception):
gene.verify_file_type(template_dir / filename)
def test_is_nonlinear(gene):
"""Expect template file to be linear. Modify it so that it is nonlinear."""
gene.data["general"]["nonlinear"] = 0
assert gene.is_linear()
assert not gene.is_nonlinear()
gene.data["general"]["nonlinear"] = 1
assert not gene.is_linear()
assert gene.is_nonlinear()
def test_add_flags(gene):
gene.add_flags({"foo": {"bar": "baz"}})
assert gene.data["foo"]["bar"] == "baz"
def METHOD_NAME(gene):
# TODO test it has the correct values
local_geometry = gene.get_local_geometry()
assert isinstance(local_geometry, LocalGeometryMiller)
def test_get_local_species(gene):
local_species = gene.get_local_species()
assert isinstance(local_species, LocalSpecies)
assert local_species.nspec == 2
assert len(gene.data["species"]) == 2
# Ensure you can index gene.data["species"] (doesn't work on some f90nml versions)
assert gene.data["species"][0]
assert gene.data["species"][1]
assert local_species["electron"]
assert local_species["ion1"]
# TODO test it has the correct values
def test_get_numerics(gene):
# TODO test it has the correct values
numerics = gene.get_numerics()
assert isinstance(numerics, Numerics)
def test_write(tmp_path, gene):
"""Ensure a gene file can be written, and that no info is lost in the process"""
# Get template data
local_geometry = gene.get_local_geometry()
local_species = gene.get_local_species()
numerics = gene.get_numerics()
# Set output path
filename = tmp_path / "input.in"
# Write out a new input file
gene_writer = GKInputGENE()
gene_writer.set(local_geometry, local_species, numerics)
# Ensure you can index gene.data["species"] (doesn't work on some f90nml versions)
assert len(gene_writer.data["species"]) == 2
assert gene_writer.data["species"][0]
assert gene_writer.data["species"][1]
# Write to disk
gene_writer.write(filename)
# Ensure a new file exists
assert Path(filename).exists()
# Ensure it is a valid file
GKInputGENE().verify_file_type(filename)
gene_reader = GKInputGENE(filename)
new_local_geometry = gene_reader.get_local_geometry()
assert local_geometry.shat == new_local_geometry.shat
new_local_species = gene_reader.get_local_species()
assert local_species.nspec == new_local_species.nspec
new_numerics = gene_reader.get_numerics()
assert numerics.delta_time == new_numerics.delta_time
def test_species_order(tmp_path):
pyro = example_JETTO.main(tmp_path)
# Reverse species order so electron is last
pyro.local_species.names = pyro.local_species.names[::-1]
pyro.gk_code = "GENE"
pyro.write_gk_file(file_name=tmp_path / "input.in")
assert Path(tmp_path / "input.in").exists() | null |
367 | import re
import warnings
from contextlib import contextmanager
import pymssql # pylint: disable=import-error
from Orange.data import StringVariable, TimeVariable, ContinuousVariable, DiscreteVariable
from Orange.data.sql.backend import Backend
from Orange.data.sql.backend.base import ToSql, BackendError
def METHOD_NAME(ex: Exception) -> str:
try:
return ex.args[0][1].decode().splitlines()[-1]
except: # pylint: disable=bare-except
return str(ex)
class PymssqlBackend(Backend):
display_name = "SQL Server"
def __init__(self, connection_params):
connection_params["server"] = connection_params.pop("host", None)
for key in list(connection_params):
if connection_params[key] is None:
del connection_params[key]
super().__init__(connection_params)
try:
self.connection = pymssql.connect(login_timeout=5, **connection_params)
except pymssql.Error as ex:
raise BackendError(METHOD_NAME(ex)) from ex
except ValueError:
# ValueError is raised when 'server' contains "\\"
raise BackendError("Incorrect format of connection details")
def list_tables_query(self, schema=None):
return """
SELECT [TABLE_SCHEMA], [TABLE_NAME]
FROM information_schema.tables
WHERE TABLE_TYPE in ('VIEW' ,'BASE TABLE')
ORDER BY [TABLE_NAME]
"""
def quote_identifier(self, name):
return "[{}]".format(name)
def unquote_identifier(self, quoted_name):
return quoted_name[1:-1]
def create_sql_query(self, table_name, fields, filters=(),
group_by=None, order_by=None, offset=None, limit=None,
use_time_sample=None):
sql = ["SELECT"]
if limit and not offset:
sql.extend(["TOP", str(limit)])
sql.append(', '.join(fields))
sql.extend(["FROM", table_name])
if use_time_sample:
sql.append("TABLESAMPLE system_time(%i)" % use_time_sample)
if filters:
sql.extend(["WHERE", " AND ".join(filters)])
if group_by:
sql.extend(["GROUP BY", ", ".join(group_by)])
if offset and not order_by:
order_by = fields[0].split("AS")[1:]
if order_by:
sql.extend(["ORDER BY", ",".join(order_by)])
if offset:
sql.extend(["OFFSET", str(offset), "ROWS"])
if limit:
sql.extend(["FETCH FIRST", str(limit), "ROWS ONLY"])
return " ".join(sql)
@contextmanager
def execute_sql_query(self, query, params=()):
try:
with self.connection.cursor() as cur:
cur.execute(query, *params)
yield cur
except pymssql.Error as ex:
raise BackendError(METHOD_NAME(ex)) from ex
def create_variable(self, field_name, field_metadata, type_hints, inspect_table=None):
if field_name in type_hints:
var = type_hints[field_name]
else:
var = self._guess_variable(field_name, field_metadata,
inspect_table)
field_name_q = self.quote_identifier(field_name)
if var.is_continuous:
if isinstance(var, TimeVariable):
var.to_sql = ToSql("DATEDIFF(s, '1970-01-01 00:00:00', {})".format(field_name_q))
else:
var.to_sql = ToSql(field_name_q)
else: # discrete or string
var.to_sql = ToSql(field_name_q)
return var
def _guess_variable(self, field_name, field_metadata, inspect_table):
# pylint: disable=import-error
from pymssql import STRING, NUMBER, DATETIME, DECIMAL
type_code, *_ = field_metadata
if type_code in (NUMBER, DECIMAL):
return ContinuousVariable(field_name)
if type_code == DATETIME:
tv = TimeVariable(field_name)
tv.have_date = True
tv.have_time = True
return tv
if type_code == STRING:
if inspect_table:
values = self.get_distinct_values(field_name, inspect_table)
if values:
return DiscreteVariable(field_name, values)
return StringVariable(field_name)
EST_ROWS_RE = re.compile(r'StatementEstRows="(\d+)"')
def count_approx(self, query):
with self.connection.cursor() as cur:
try:
cur.execute("SET SHOWPLAN_XML ON")
try:
cur.execute(query)
result = cur.fetchone()
match = self.EST_ROWS_RE.search(result[0])
if not match:
# Either StatementEstRows was not found or
# a float is received.
# If it is a float then it is most probable
# that the server's statistics are out of date
# and the result is false. In that case
# it is preferable to return None so
# an exact count be used.
return None
return int(match.group(1))
finally:
cur.execute("SET SHOWPLAN_XML OFF")
except pymssql.Error as ex:
if "SHOWPLAN permission denied" in str(ex):
warnings.warn("SHOWPLAN permission denied, count approximates will not be used")
return None
raise BackendError(METHOD_NAME(ex)) from ex
def distinct_values_query(self, field_name: str, table_name: str) -> str:
field = self.quote_identifier(field_name)
return self.create_sql_query(
table_name,
[field],
# Cast - workaround for collations that are not case-sensitive and
# UTF characters sensitive
# DATALENGTH - workaround for string comparison that ignore trailing
# spaces, two strings that differ only in space in the end would
# group together if DATALENGTH wouldn't be used
group_by=[f"{field}, Cast({field} as binary), DATALENGTH({field})"],
order_by=[field],
limit=21,
) | null |
368 | import pytest
@pytest.fixture
def user_data_model():
from alfasim_sdk._internal.models import data_model
from alfasim_sdk._internal.types import BaseField
class ValidType(BaseField):
pass
@data_model(icon="model.png", caption="PLUGIN DEV MODEL")
class Model:
valid_attribute = ValidType(caption="valid")
return Model
@pytest.fixture
def user_data_container(user_data_model):
from alfasim_sdk._internal.models import container_model
from alfasim_sdk._internal.types import BaseField
class ValidType(BaseField):
pass
@container_model(
model=user_data_model, icon="container.png", caption="PLUGIN DEV CONTAINER"
)
class Container:
container_valid_attribute = ValidType(caption="valid")
return Container
def test_data_model(user_data_model):
import attr
# Attributes from the class, should be accessed by _alfasim_metadata
assert user_data_model._alfasim_metadata["caption"] == "PLUGIN DEV MODEL"
assert user_data_model._alfasim_metadata["icon"] == "model.png"
# "data_model" should not have references to others model
assert user_data_model._alfasim_metadata["model"] is None
# Attributes defined from the user should be accessed by attr fields
assert attr.fields(user_data_model).valid_attribute is not None
def METHOD_NAME(user_data_container):
import attr
assert user_data_container._alfasim_metadata["model"] is not None
assert "Model" in str(user_data_container._alfasim_metadata["model"])
assert user_data_container._alfasim_metadata["caption"] == "PLUGIN DEV CONTAINER"
assert user_data_container._alfasim_metadata["icon"] == "container.png"
assert attr.fields(user_data_container).container_valid_attribute is not None
def test_invalid_attribute():
from alfasim_sdk._internal.models import data_model
from alfasim_sdk._internal.types import BaseField
class ValidType(BaseField):
pass
error_msg = "Error defining _invalid_attribute, attributes starting with '_' are not allowed"
with pytest.raises(TypeError, match=error_msg):
@data_model(icon="model.png", caption="PLUGIN DEV MODEL")
class ModelPrivateAttribute: # pylint: disable=unused-variable
_invalid_attribute = ValidType(caption="invalid")
class Invalid(object):
pass
error_msg = (
"Error defining invalid, attributes must be a valid type defined by alfasim_sdk"
)
with pytest.raises(TypeError, match=error_msg):
@data_model(icon="model.png", caption="PLUGIN DEV MODEL")
class Model: # pylint: disable=unused-variable
invalid = Invalid()
def test_attribute_order():
from alfasim_sdk._internal.models import data_model
from alfasim_sdk._internal.types import (
Boolean,
Reference,
TracerType,
Enum,
String,
Quantity,
)
@data_model(icon="", caption="caption")
class Model:
boolean = Boolean(value=True, caption="caption")
data_reference = Reference(ref_type=TracerType, caption="caption")
enum = Enum(values=["value_1", "value_2"], caption="caption")
string = String(value="value", caption="caption")
quantity = Quantity(value=1, unit="m", caption="caption")
expected_order = ["boolean", "data_reference", "enum", "string", "quantity"]
assert [attr.name for attr in Model.__attrs_attrs__] == expected_order
def test_check_model_in_container_model():
from alfasim_sdk._internal.models import container_model, data_model
from alfasim_sdk._internal.types import String
@data_model(caption="The child")
class Child:
name = String(value="A child", caption="Name")
@container_model(caption="The parent", model=Child)
class Parent:
name = String(value="A parent", caption="Name")
with pytest.raises(TypeError):
@container_model(caption="The grand parent", model=Parent)
class GrandParent: # pragma: no cover (`container_model` is expected to raise)
name = String(value="A grand parent", caption="Name") | null |
369 | import asyncio
from typing import (
TYPE_CHECKING,
Any,
Callable,
Collection,
Optional,
Type,
)
import aiohttp
from requests.exceptions import (
ConnectionError,
HTTPError,
Timeout,
TooManyRedirects,
)
from web3.types import (
AsyncMiddlewareCoroutine,
RPCEndpoint,
RPCResponse,
)
if TYPE_CHECKING:
from web3 import ( # noqa: F401
AsyncWeb3,
Web3,
)
whitelist = [
"admin",
"miner",
"net",
"txpool",
"testing",
"evm",
"eth_protocolVersion",
"eth_syncing",
"eth_coinbase",
"eth_mining",
"eth_hashrate",
"eth_chainId",
"eth_gasPrice",
"eth_accounts",
"eth_blockNumber",
"eth_getBalance",
"eth_getStorageAt",
"eth_getProof",
"eth_getCode",
"eth_getBlockByNumber",
"eth_getBlockByHash",
"eth_getBlockTransactionCountByNumber",
"eth_getBlockTransactionCountByHash",
"eth_getUncleCountByBlockNumber",
"eth_getUncleCountByBlockHash",
"eth_getTransactionByHash",
"eth_getTransactionByBlockHashAndIndex",
"eth_getTransactionByBlockNumberAndIndex",
"eth_getTransactionReceipt",
"eth_getTransactionCount",
"eth_getRawTransactionByHash",
"eth_call",
"eth_estimateGas",
"eth_newBlockFilter",
"eth_newPendingTransactionFilter",
"eth_newFilter",
"eth_getFilterChanges",
"eth_getFilterLogs",
"eth_getLogs",
"eth_uninstallFilter",
"eth_getCompilers",
"eth_getWork",
"eth_sign",
"eth_signTypedData",
"eth_sendRawTransaction",
"personal_importRawKey",
"personal_newAccount",
"personal_listAccounts",
"personal_listWallets",
"personal_lockAccount",
"personal_unlockAccount",
"personal_ecRecover",
"personal_sign",
"personal_signTypedData",
]
def check_if_retry_on_failure(method: RPCEndpoint) -> bool:
root = method.split("_")[0]
if root in whitelist:
return True
elif method in whitelist:
return True
else:
return False
def exception_retry_middleware(
make_request: Callable[[RPCEndpoint, Any], RPCResponse],
_w3: "Web3",
errors: Collection[Type[BaseException]],
retries: int = 5,
) -> Callable[[RPCEndpoint, Any], RPCResponse]:
"""
Creates middleware that retries failed HTTP requests. Is a default
middleware for HTTPProvider.
"""
def middleware(method: RPCEndpoint, params: Any) -> Optional[RPCResponse]:
if check_if_retry_on_failure(method):
for i in range(retries):
try:
return make_request(method, params)
except tuple(errors):
if i < retries - 1:
continue
else:
raise
return None
else:
return make_request(method, params)
return middleware
def http_retry_request_middleware(
make_request: Callable[[RPCEndpoint, Any], Any], w3: "Web3"
) -> Callable[[RPCEndpoint, Any], Any]:
return exception_retry_middleware(
make_request, w3, (ConnectionError, HTTPError, Timeout, TooManyRedirects)
)
async def METHOD_NAME(
make_request: Callable[[RPCEndpoint, Any], Any],
_async_w3: "AsyncWeb3",
errors: Collection[Type[BaseException]],
retries: int = 5,
backoff_factor: float = 0.3,
) -> AsyncMiddlewareCoroutine:
"""
Creates middleware that retries failed HTTP requests.
Is a default middleware for AsyncHTTPProvider.
"""
async def middleware(method: RPCEndpoint, params: Any) -> Optional[RPCResponse]:
if check_if_retry_on_failure(method):
for i in range(retries):
try:
return await make_request(method, params)
except tuple(errors):
if i < retries - 1:
await asyncio.sleep(backoff_factor)
continue
else:
raise
return None
else:
return await make_request(method, params)
return middleware
async def async_http_retry_request_middleware(
make_request: Callable[[RPCEndpoint, Any], Any], async_w3: "AsyncWeb3"
) -> Callable[[RPCEndpoint, Any], Any]:
return await METHOD_NAME(
make_request,
async_w3,
(TimeoutError, aiohttp.ClientError),
) | null |
370 | import logging
import operator
from functools import reduce
from time import sleep
from typing import Any, Dict, List
import boto3
import botocore.loaders as boto_loader
import botocore.regions as boto_regions
from botocore.config import Config as BotoConfig
from botocore.exceptions import ClientError, NoCredentialsError, ProfileNotFound
from taskcat.exceptions import TaskCatException
LOG = logging.getLogger(__name__)
REGIONAL_ENDPOINT_SERVICES = ["sts"]
class Boto3Cache:
RETRIES = 10
BACKOFF = 2
DELAY = 0.1
CLIENT_THROTTLE_RETRIES = 20
def __init__(self, _boto3=boto3):
self._boto3 = _boto3
self._session_cache: Dict[str, Dict[str, boto3.Session]] = {}
self._client_cache: Dict[str, Dict[str, Dict[str, boto3.client]]] = {}
self._resource_cache: Dict[str, Dict[str, Dict[str, boto3.resource]]] = {}
self._account_info: Dict[str, Dict[str, str]] = {}
self._lock_cache_update = False
def session(self, profile: str = "default", region: str = None) -> boto3.Session:
region = self._get_region(region, profile)
try:
session = self._cache_lookup(
self._session_cache,
[profile, region],
self._boto3.Session,
[],
{"region_name": region, "profile_name": profile},
)
except ProfileNotFound:
if profile != "default":
raise
session = self._boto3.Session(region_name=region)
self._cache_set(self._session_cache, [profile, region], session)
return session
def client(
self, service: str, profile: str = "default", region: str = None
) -> boto3.client:
region = self._get_region(region, profile)
session = self.session(profile, region)
kwargs = {"config": BotoConfig(retries={"max_attempts": 20})}
if service in REGIONAL_ENDPOINT_SERVICES:
kwargs.update({"endpoint_url": self._get_endpoint_url(service, region)})
return self._cache_lookup(
self._client_cache,
[profile, region, service],
session.client,
[service],
kwargs,
)
def resource(
self, service: str, profile: str = "default", region: str = None
) -> boto3.resource:
region = self._get_region(region, profile)
session = self.session(profile, region)
return self._cache_lookup(
self._resource_cache,
[profile, region, service],
session.resource,
[service],
)
def partition(self, profile: str = "default") -> str:
return self._cache_lookup(
self._account_info, [profile], self._get_account_info, [profile]
)["partition"]
def account_id(self, profile: str = "default") -> str:
return self._cache_lookup(
self._account_info, [profile], self._get_account_info, [profile]
)["account_id"]
def _get_account_info(self, profile):
partition, region = self._get_partition(profile)
session = self.session(profile, region)
sts_client = session.client("sts", region_name=region)
try:
account_id = sts_client.get_caller_identity()["Account"]
except ClientError as e:
if e.response["Error"]["Code"] == "AccessDenied":
# pylint: disable=raise-missing-from
raise TaskCatException(
f"Not able to fetch account number from {region} using profile "
f"{profile}. {str(e)}"
)
raise
except NoCredentialsError as e:
# pylint: disable=raise-missing-from
raise TaskCatException(
f"Not able to fetch account number from {region} using profile "
f"{profile}. {str(e)}"
)
except ProfileNotFound as e:
# pylint: disable=raise-missing-from
raise TaskCatException(
f"Not able to fetch account number from {region} using profile "
f"{profile}. {str(e)}"
)
return {"partition": partition, "account_id": account_id}
def _make_parent_keys(self, cache: dict, keys: list):
if keys:
if not cache.get(keys[0]):
cache[keys[0]] = {}
self._make_parent_keys(cache[keys[0]], keys[1:])
def _cache_lookup(self, cache, key_list, create_func, args=None, kwargs=None):
try:
value = self._cache_get(cache, key_list)
except KeyError:
args = [] if not args else args
kwargs = {} if not kwargs else kwargs
value = self.METHOD_NAME(create_func, args, kwargs)
self._cache_set(cache, key_list, value)
return value
def METHOD_NAME(self, create_func, args, kwargs):
retries = self.RETRIES
delay = self.DELAY
while retries:
try:
return create_func(*args, **kwargs)
except KeyError as e:
if str(e) not in ["'credential_provider'", "'endpoint_resolver'"]:
raise
backoff = (self.RETRIES - retries + delay) * self.BACKOFF
sleep(backoff)
@staticmethod
def _get_endpoint_url(service, region):
data = boto_loader.create_loader().load_data("endpoints")
endpoint_data = boto_regions.EndpointResolver(data).construct_endpoint(
service, region
)
if not endpoint_data:
raise TaskCatException(
f"unable to resolve endpoint for {service} in {region}"
)
return f"https://{service}.{region}.{endpoint_data['dnsSuffix']}"
@staticmethod
def _cache_get(cache: dict, key_list: List[str]):
return reduce(operator.getitem, key_list, cache)
def _cache_set(self, cache: dict, key_list: list, value: Any):
self._make_parent_keys(cache, key_list[:-1])
self._cache_get(cache, key_list[:-1])[key_list[-1]] = value
def _get_region(self, region, profile):
if not region:
region = self.get_default_region(profile)
return region
def _get_partition(self, profile):
partition_regions = [
("aws", "us-east-1"),
("aws-cn", "cn-north-1"),
("aws-us-gov", "us-gov-west-1"),
]
for partition, region in partition_regions:
try:
self.session(profile, region).client(
"sts", region_name=region
).get_caller_identity()
return (partition, region)
except ClientError as e:
if "InvalidClientTokenId" in str(e):
continue
raise
raise ValueError("cannot find suitable AWS partition")
def get_default_region(self, profile_name="default") -> str:
try:
if profile_name != "default":
region = self._boto3.session.Session(
profile_name=profile_name
).region_name
else:
region = self._boto3.session.Session().region_name
except ProfileNotFound:
if profile_name != "default":
raise
region = self._boto3.session.Session().region_name
if not region:
_, region = self._get_partition(profile_name)
LOG.warning(
"Region not set in credential chain, defaulting to {}".format(region)
)
return region | null |
371 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ListGroupAuthorizationRulesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'IoTCC', '2021-05-13', 'ListGroupAuthorizationRules','IoTCC')
self.set_method('POST')
def get_DestinationTypes(self): # RepeatList
return self.get_query_params().get('DestinationType')
def set_DestinationTypes(self, DestinationType): # RepeatList
for depth1 in range(len(DestinationType)):
self.add_query_param('DestinationType.' + str(depth1 + 1), DestinationType[depth1])
def get_Destinations(self): # RepeatList
return self.get_query_params().get('Destination')
def set_Destinations(self, Destination): # RepeatList
for depth1 in range(len(Destination)):
self.add_query_param('Destination.' + str(depth1 + 1), Destination[depth1])
def get_Type(self): # String
return self.get_query_params().get('Type')
def set_Type(self, Type): # String
self.add_query_param('Type', Type)
def get_Protocols(self): # RepeatList
return self.get_query_params().get('Protocol')
def set_Protocols(self, Protocol): # RepeatList
for depth1 in range(len(Protocol)):
self.add_query_param('Protocol.' + str(depth1 + 1), Protocol[depth1])
def get_AuthorizationRuleIdss(self): # RepeatList
return self.get_query_params().get('AuthorizationRuleIds')
def set_AuthorizationRuleIdss(self, AuthorizationRuleIds): # RepeatList
for depth1 in range(len(AuthorizationRuleIds)):
self.add_query_param('AuthorizationRuleIds.' + str(depth1 + 1), AuthorizationRuleIds[depth1])
def get_NextToken(self): # String
return self.get_query_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_query_param('NextToken', NextToken)
def get_Policys(self): # RepeatList
return self.get_query_params().get('Policy')
def METHOD_NAME(self, Policy): # RepeatList
for depth1 in range(len(Policy)):
self.add_query_param('Policy.' + str(depth1 + 1), Policy[depth1])
def get_AuthorizationRuleStatuss(self): # RepeatList
return self.get_query_params().get('AuthorizationRuleStatus')
def set_AuthorizationRuleStatuss(self, AuthorizationRuleStatus): # RepeatList
for depth1 in range(len(AuthorizationRuleStatus)):
self.add_query_param('AuthorizationRuleStatus.' + str(depth1 + 1), AuthorizationRuleStatus[depth1])
def get_IoTCloudConnectorGroupId(self): # String
return self.get_query_params().get('IoTCloudConnectorGroupId')
def set_IoTCloudConnectorGroupId(self, IoTCloudConnectorGroupId): # String
self.add_query_param('IoTCloudConnectorGroupId', IoTCloudConnectorGroupId)
def get_AuthorizationRuleNames(self): # RepeatList
return self.get_query_params().get('AuthorizationRuleName')
def set_AuthorizationRuleNames(self, AuthorizationRuleName): # RepeatList
for depth1 in range(len(AuthorizationRuleName)):
self.add_query_param('AuthorizationRuleName.' + str(depth1 + 1), AuthorizationRuleName[depth1])
def get_DestinationPorts(self): # RepeatList
return self.get_query_params().get('DestinationPort')
def set_DestinationPorts(self, DestinationPort): # RepeatList
for depth1 in range(len(DestinationPort)):
self.add_query_param('DestinationPort.' + str(depth1 + 1), DestinationPort[depth1])
def get_MaxResults(self): # Integer
return self.get_query_params().get('MaxResults')
def set_MaxResults(self, MaxResults): # Integer
self.add_query_param('MaxResults', MaxResults) | null |
372 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkr_kvstore.endpoint import endpoint_data
class DescribeInstancesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'DescribeInstances','redisa')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_SearchKey(self): # String
return self.get_query_params().get('SearchKey')
def set_SearchKey(self, SearchKey): # String
self.add_query_param('SearchKey', SearchKey)
def get_NetworkType(self): # String
return self.get_query_params().get('NetworkType')
def set_NetworkType(self, NetworkType): # String
self.add_query_param('NetworkType', NetworkType)
def get_EngineVersion(self): # String
return self.get_query_params().get('EngineVersion')
def set_EngineVersion(self, EngineVersion): # String
self.add_query_param('EngineVersion', EngineVersion)
def get_InstanceClass(self): # String
return self.get_query_params().get('InstanceClass')
def METHOD_NAME(self, InstanceClass): # String
self.add_query_param('InstanceClass', InstanceClass)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_Expired(self): # String
return self.get_query_params().get('Expired')
def set_Expired(self, Expired): # String
self.add_query_param('Expired', Expired)
def get_SecurityToken(self): # String
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self, SecurityToken): # String
self.add_query_param('SecurityToken', SecurityToken)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_InstanceType(self): # String
return self.get_query_params().get('InstanceType')
def set_InstanceType(self, InstanceType): # String
self.add_query_param('InstanceType', InstanceType)
def get_EditionType(self): # String
return self.get_query_params().get('EditionType')
def set_EditionType(self, EditionType): # String
self.add_query_param('EditionType', EditionType)
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_InstanceStatus(self): # String
return self.get_query_params().get('InstanceStatus')
def set_InstanceStatus(self, InstanceStatus): # String
self.add_query_param('InstanceStatus', InstanceStatus)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_GlobalInstance(self): # Boolean
return self.get_query_params().get('GlobalInstance')
def set_GlobalInstance(self, GlobalInstance): # Boolean
self.add_query_param('GlobalInstance', GlobalInstance)
def get_PrivateIp(self): # String
return self.get_query_params().get('PrivateIp')
def set_PrivateIp(self, PrivateIp): # String
self.add_query_param('PrivateIp', PrivateIp)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_VSwitchId(self): # String
return self.get_query_params().get('VSwitchId')
def set_VSwitchId(self, VSwitchId): # String
self.add_query_param('VSwitchId', VSwitchId)
def get_InstanceIds(self): # String
return self.get_query_params().get('InstanceIds')
def set_InstanceIds(self, InstanceIds): # String
self.add_query_param('InstanceIds', InstanceIds)
def get_ArchitectureType(self): # String
return self.get_query_params().get('ArchitectureType')
def set_ArchitectureType(self, ArchitectureType): # String
self.add_query_param('ArchitectureType', ArchitectureType)
def get_VpcId(self): # String
return self.get_query_params().get('VpcId')
def set_VpcId(self, VpcId): # String
self.add_query_param('VpcId', VpcId)
def get_ZoneId(self): # String
return self.get_query_params().get('ZoneId')
def set_ZoneId(self, ZoneId): # String
self.add_query_param('ZoneId', ZoneId)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType) | null |
373 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklive.endpoint import endpoint_data
class AddCustomLiveStreamTranscodeRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'live', '2016-11-01', 'AddCustomLiveStreamTranscode','live')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResWithSource(self): # String
return self.get_query_params().get('ResWithSource')
def set_ResWithSource(self, ResWithSource): # String
self.add_query_param('ResWithSource', ResWithSource)
def get_Gop(self): # String
return self.get_query_params().get('Gop')
def set_Gop(self, Gop): # String
self.add_query_param('Gop', Gop)
def get_AudioCodec(self): # String
return self.get_query_params().get('AudioCodec')
def set_AudioCodec(self, AudioCodec): # String
self.add_query_param('AudioCodec', AudioCodec)
def get_KmsUID(self): # String
return self.get_query_params().get('KmsUID')
def set_KmsUID(self, KmsUID): # String
self.add_query_param('KmsUID', KmsUID)
def get_Height(self): # Integer
return self.get_query_params().get('Height')
def set_Height(self, Height): # Integer
self.add_query_param('Height', Height)
def get_App(self): # String
return self.get_query_params().get('App')
def set_App(self, App): # String
self.add_query_param('App', App)
def get_Profile(self): # Integer
return self.get_query_params().get('Profile')
def set_Profile(self, Profile): # Integer
self.add_query_param('Profile', Profile)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_ExtWithSource(self): # String
return self.get_query_params().get('ExtWithSource')
def set_ExtWithSource(self, ExtWithSource): # String
self.add_query_param('ExtWithSource', ExtWithSource)
def get_BitrateWithSource(self): # String
return self.get_query_params().get('BitrateWithSource')
def set_BitrateWithSource(self, BitrateWithSource): # String
self.add_query_param('BitrateWithSource', BitrateWithSource)
def get_Domain(self): # String
return self.get_query_params().get('Domain')
def set_Domain(self, Domain): # String
self.add_query_param('Domain', Domain)
def get_Template(self): # String
return self.get_query_params().get('Template')
def set_Template(self, Template): # String
self.add_query_param('Template', Template)
def get_Lazy(self): # String
return self.get_query_params().get('Lazy')
def set_Lazy(self, Lazy): # String
self.add_query_param('Lazy', Lazy)
def get_KmsKeyExpireInterval(self): # String
return self.get_query_params().get('KmsKeyExpireInterval')
def set_KmsKeyExpireInterval(self, KmsKeyExpireInterval): # String
self.add_query_param('KmsKeyExpireInterval', KmsKeyExpireInterval)
def get_TemplateType(self): # String
return self.get_query_params().get('TemplateType')
def set_TemplateType(self, TemplateType): # String
self.add_query_param('TemplateType', TemplateType)
def get_AudioProfile(self): # String
return self.get_query_params().get('AudioProfile')
def set_AudioProfile(self, AudioProfile): # String
self.add_query_param('AudioProfile', AudioProfile)
def get_EncryptParameters(self): # String
return self.get_query_params().get('EncryptParameters')
def set_EncryptParameters(self, EncryptParameters): # String
self.add_query_param('EncryptParameters', EncryptParameters)
def get_AudioChannelNum(self): # Integer
return self.get_query_params().get('AudioChannelNum')
def set_AudioChannelNum(self, AudioChannelNum): # Integer
self.add_query_param('AudioChannelNum', AudioChannelNum)
def get_FPS(self): # Integer
return self.get_query_params().get('FPS')
def METHOD_NAME(self, FPS): # Integer
self.add_query_param('FPS', FPS)
def get_AudioRate(self): # Integer
return self.get_query_params().get('AudioRate')
def set_AudioRate(self, AudioRate): # Integer
self.add_query_param('AudioRate', AudioRate)
def get_FpsWithSource(self): # String
return self.get_query_params().get('FpsWithSource')
def set_FpsWithSource(self, FpsWithSource): # String
self.add_query_param('FpsWithSource', FpsWithSource)
def get_AudioBitrate(self): # Integer
return self.get_query_params().get('AudioBitrate')
def set_AudioBitrate(self, AudioBitrate): # Integer
self.add_query_param('AudioBitrate', AudioBitrate)
def get_Width(self): # Integer
return self.get_query_params().get('Width')
def set_Width(self, Width): # Integer
self.add_query_param('Width', Width)
def get_VideoBitrate(self): # Integer
return self.get_query_params().get('VideoBitrate')
def set_VideoBitrate(self, VideoBitrate): # Integer
self.add_query_param('VideoBitrate', VideoBitrate)
def get_KmsKeyID(self): # String
return self.get_query_params().get('KmsKeyID')
def set_KmsKeyID(self, KmsKeyID): # String
self.add_query_param('KmsKeyID', KmsKeyID) | null |
374 | # Copyright 2021 Memgraph Ltd.
#
# Use of this software is governed by the Business Source License
# included in the file licenses/BSL.txt; by using this file, you agree to be bound by the terms of the Business Source
# License, and you may not use this file except in compliance with the Business Source License.
#
# As of the Change Date specified in that file, in accordance with
# the Business Source License, use of this software will be governed
# by the Apache License, Version 2.0, included in the file
# licenses/APL.txt.
import copy
import os
import subprocess
import sys
import time
import mgclient
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
PROJECT_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", ".."))
BUILD_DIR = os.path.join(PROJECT_DIR, "build")
MEMGRAPH_BINARY = os.path.join(BUILD_DIR, "memgraph")
def wait_for_server(port, delay=0.01):
cmd = ["nc", "-z", "-w", "1", "127.0.0.1", str(port)]
count = 0
while subprocess.call(cmd) != 0:
time.sleep(0.01)
if count > 10 / 0.01:
print("Could not wait for server on port", port, "to startup!")
sys.exit(1)
count += 1
time.sleep(delay)
def extract_bolt_port(args):
for arg_index, arg in enumerate(args):
if arg.startswith("--bolt-port="):
maybe_port = arg.split("=")[1]
if not maybe_port.isdigit():
raise Exception("Unable to read Bolt port after --bolt-port=.")
return int(maybe_port)
elif arg == "--bolt-port":
maybe_port = args[arg_index + 1]
if not maybe_port.isdigit():
raise Exception("Unable to read Bolt port after --bolt-port.")
return int(maybe_port)
return 7687
def replace_paths(path):
return path.replace("$PROJECT_DIR", PROJECT_DIR).replace("$SCRIPT_DIR", SCRIPT_DIR).replace("$BUILD_DIR", BUILD_DIR)
class MemgraphInstanceRunner:
def __init__(self, binary_path=MEMGRAPH_BINARY, use_ssl=False):
self.host = "127.0.0.1"
self.bolt_port = None
self.binary_path = binary_path
self.args = None
self.proc_mg = None
self.ssl = use_ssl
def METHOD_NAME(self, setup_queries):
if setup_queries is None:
return
# An assumption being database instance is fresh, no need for the auth.
conn = mgclient.connect(host=self.host, port=self.bolt_port, sslmode=self.ssl)
conn.autocommit = True
cursor = conn.cursor()
for query_coll in setup_queries:
if isinstance(query_coll, str):
cursor.execute(query_coll)
elif isinstance(query_coll, list):
for query in query_coll:
cursor.execute(query)
cursor.close()
conn.close()
# NOTE: Both query and get_connection may esablish new connection -> auth
# details required -> username/password should be optional arguments.
def query(self, query, conn=None, username="", password=""):
new_conn = conn is None
if new_conn:
conn = self.get_connection(username, password)
cursor = conn.cursor()
cursor.execute(query)
data = cursor.fetchall()
cursor.close()
if new_conn:
conn.close()
return data
def get_connection(self, username="", password=""):
conn = mgclient.connect(
host=self.host, port=self.bolt_port, sslmode=self.ssl, username=username, password=password
)
conn.autocommit = True
return conn
def start(self, restart=False, args=None, setup_queries=None):
if not restart and self.is_running():
return
self.stop()
if args is not None:
self.args = copy.deepcopy(args)
self.args = [replace_paths(arg) for arg in self.args]
args_mg = [
self.binary_path,
"--storage-wal-enabled",
"--storage-snapshot-interval-sec",
"300",
"--storage-properties-on-edges",
] + self.args
self.bolt_port = extract_bolt_port(args_mg)
self.proc_mg = subprocess.Popen(args_mg)
wait_for_server(self.bolt_port)
self.METHOD_NAME(setup_queries)
assert self.is_running(), "The Memgraph process died!"
def is_running(self):
if self.proc_mg is None:
return False
if self.proc_mg.poll() is not None:
return False
return True
def stop(self):
if not self.is_running():
return
self.proc_mg.terminate()
code = self.proc_mg.wait()
assert code == 0, "The Memgraph process exited with non-zero!"
def kill(self):
if not self.is_running():
return
self.proc_mg.kill()
code = self.proc_mg.wait()
assert code == -9, "The killed Memgraph process exited with non-nine!" | null |
375 | # Copyright 2017-2022 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pipeline.hpc.logger import Logger
def _perform_command(action, msg, error_msg, skip_on_failure):
Logger.info(msg)
try:
action()
except RuntimeError as e:
Logger.warn(error_msg)
if not skip_on_failure:
raise RuntimeError(error_msg, e)
class GridEngineType:
SGE = "SGE"
SLURM = "SLURM"
def __init__(self):
pass
class AllocationRuleParsingError(RuntimeError):
pass
class AllocationRule:
ALLOWED_VALUES = ['$pe_slots', '$fill_up', '$round_robin']
def __init__(self, value):
if value in AllocationRule.ALLOWED_VALUES:
self.value = value
else:
raise AllocationRuleParsingError('Wrong AllocationRule value, only %s is available!' % AllocationRule.ALLOWED_VALUES)
@staticmethod
def pe_slots():
return AllocationRule('$pe_slots')
@staticmethod
def fill_up():
return AllocationRule('$fill_up')
@staticmethod
def round_robin():
return AllocationRule('$round_robin')
@staticmethod
def fractional_rules():
return [AllocationRule.round_robin(), AllocationRule.fill_up()]
@staticmethod
def integral_rules():
return [AllocationRule.pe_slots()]
def __eq__(self, other):
if not isinstance(other, AllocationRule):
# don't attempt to compare against unrelated types
return False
return other.value == self.value
class GridEngineJobState:
RUNNING = 'running'
PENDING = 'pending'
SUSPENDED = 'suspended'
ERROR = 'errored'
DELETED = 'deleted'
COMPLETED = 'completed'
UNKNOWN = 'unknown'
_letter_codes_to_states = {
# Job statuses: [SGE] + [SLURM]
RUNNING: ['r', 't', 'Rr', 'Rt'] + ['RUNNING'],
PENDING: ['qw', 'qw', 'hqw', 'hqw', 'hRwq', 'hRwq', 'hRwq', 'qw', 'qw'] + ['PENDING'],
SUSPENDED: ['s', 'ts', 'S', 'tS', 'T', 'tT', 'Rs', 'Rts', 'RS', 'RtS', 'RT', 'RtT'] + ['SUSPENDED', 'STOPPED'],
ERROR: ['Eqw', 'Ehqw', 'EhRqw'] + ['DEADLINE', ' FAILED'],
DELETED: ['dr', 'dt', 'dRr', 'dRt', 'ds', 'dS', 'dT', 'dRs', 'dRS', 'dRT'] + ['DELETED', 'CANCELLED'],
COMPLETED: [] + ['COMPLETED', 'COMPLETING']
}
@staticmethod
def from_letter_code(code):
for key in GridEngineJobState._letter_codes_to_states:
if code in GridEngineJobState._letter_codes_to_states[key]:
return key
return GridEngineJobState.UNKNOWN
class GridEngineJob:
def __init__(self, id, root_id, name, user, state, datetime, hosts=None, cpu=0, gpu=0, mem=0, pe='local'):
self.id = id
self.root_id = root_id
self.name = name
self.user = user
self.state = state
self.datetime = datetime
self.hosts = hosts if hosts else []
self.cpu = cpu
self.gpu = gpu
self.mem = mem
self.pe = pe
def __repr__(self):
return str(self.__dict__)
class GridEngine:
def get_jobs(self):
pass
def disable_host(self, host):
"""
Disables host to prevent receiving new jobs from the queue.
This command does not abort currently running jobs.
:param host: Host to be enabled.
"""
pass
def enable_host(self, host):
"""
Enables host to make it available to receive new jobs from the queue.
:param host: Host to be enabled.
"""
pass
def get_pe_allocation_rule(self, pe):
"""
Returns allocation rule of the pe
:param pe: Parallel environment to return allocation rule.
"""
pass
def delete_host(self, host, skip_on_failure=False):
"""
Completely deletes host from GE:
1. Shutdown host execution daemon.
2. Removes host from queue settings.
3. Removes host from host group.
4. Removes host from administrative hosts.
5. Removes host from GE.
:param host: Host to be removed.
:param skip_on_failure: Specifies if the host killing should be continued even if some of
the commands has failed.
"""
pass
def get_host_supplies(self):
pass
def METHOD_NAME(self, host):
pass
def get_engine_type(self):
pass
def is_valid(self, host):
"""
Validates host in GE checking corresponding execution host availability and its states.
:param host: Host to be checked.
:return: True if execution host is valid.
"""
return True
def kill_jobs(self, jobs, force=False):
"""
Kills jobs in GE.
:param jobs: Grid engine jobs.
:param force: Specifies if this command should be performed with -f flag.
"""
pass
class GridEngineDemandSelector:
def select(self, jobs):
pass
class GridEngineJobValidator:
def validate(self, jobs):
pass | null |
376 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkvpc.endpoint import endpoint_data
class ListPublicIpAddressPoolsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Vpc', '2016-04-28', 'ListPublicIpAddressPools','vpc')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_Isp(self): # String
return self.get_query_params().get('Isp')
def set_Isp(self, Isp): # String
self.add_query_param('Isp', Isp)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_NextToken(self): # String
return self.get_query_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_query_param('NextToken', NextToken)
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
def get_PublicIpAddressPoolIdss(self): # RepeatList
return self.get_query_params().get('PublicIpAddressPoolIds')
def set_PublicIpAddressPoolIdss(self, PublicIpAddressPoolIds): # RepeatList
for depth1 in range(len(PublicIpAddressPoolIds)):
self.add_query_param('PublicIpAddressPoolIds.' + str(depth1 + 1), PublicIpAddressPoolIds[depth1])
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_Tagss(self): # RepeatList
return self.get_query_params().get('Tags')
def set_Tagss(self, Tags): # RepeatList
for depth1 in range(len(Tags)):
if Tags[depth1].get('Key') is not None:
self.add_query_param('Tags.' + str(depth1 + 1) + '.Key', Tags[depth1].get('Key'))
if Tags[depth1].get('Value') is not None:
self.add_query_param('Tags.' + str(depth1 + 1) + '.Value', Tags[depth1].get('Value'))
def METHOD_NAME(self): # String
return self.get_query_params().get('Name')
def set_Name(self, Name): # String
self.add_query_param('Name', Name)
def get_MaxResults(self): # Integer
return self.get_query_params().get('MaxResults')
def set_MaxResults(self, MaxResults): # Integer
self.add_query_param('MaxResults', MaxResults)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status) | null |
377 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeDisksFullStatusRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeDisksFullStatus','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_EventIds(self): # RepeatList
return self.get_query_params().get('EventId')
def set_EventIds(self, EventId): # RepeatList
for depth1 in range(len(EventId)):
self.add_query_param('EventId.' + str(depth1 + 1), EventId[depth1])
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_EventTimeStart(self): # String
return self.get_query_params().get('EventTime.Start')
def set_EventTimeStart(self, EventTimeStart): # String
self.add_query_param('EventTime.Start', EventTimeStart)
def get_ResourceGroupId(self): # String
return self.get_query_params().get('ResourceGroupId')
def set_ResourceGroupId(self, ResourceGroupId): # String
self.add_query_param('ResourceGroupId', ResourceGroupId)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def METHOD_NAME(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_DiskIds(self): # RepeatList
return self.get_query_params().get('DiskId')
def set_DiskIds(self, DiskId): # RepeatList
for depth1 in range(len(DiskId)):
self.add_query_param('DiskId.' + str(depth1 + 1), DiskId[depth1])
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_EventTimeEnd(self): # String
return self.get_query_params().get('EventTime.End')
def set_EventTimeEnd(self, EventTimeEnd): # String
self.add_query_param('EventTime.End', EventTimeEnd)
def get_HealthStatus(self): # String
return self.get_query_params().get('HealthStatus')
def set_HealthStatus(self, HealthStatus): # String
self.add_query_param('HealthStatus', HealthStatus)
def get_EventType(self): # String
return self.get_query_params().get('EventType')
def set_EventType(self, EventType): # String
self.add_query_param('EventType', EventType)
def get_Status(self): # String
return self.get_query_params().get('Status')
def set_Status(self, Status): # String
self.add_query_param('Status', Status) | null |
378 | """CustomFCNMaskHead for OTX template."""
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
import torch
from mmdet.models.builder import HEADS
from mmdet.models.roi_heads.mask_heads.fcn_mask_head import FCNMaskHead
from otx.algorithms.common.adapters.mmdeploy.utils import is_mmdeploy_enabled
@HEADS.register_module()
class CustomFCNMaskHead(FCNMaskHead):
"""Custom FCN Mask Head for fast mask evaluation."""
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
The original `FCNMaskHead.get_seg_masks` grid sampled 28 x 28 masks to the original image resolution.
As a result, the resized masks occupy a large amount of memory and slow down the inference.
This method directly returns 28 x 28 masks and resize to bounding boxes size in post-processing step.
Doing so can save memory and speed up the inference.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape (Tuple): original image height and width, shape (2,)
scale_factor(ndarray | Tensor): If ``rescale is True``, box
coordinates are divided by this scale factor to fit
``ori_shape``.
rescale (bool): If True, the resulting masks will be rescaled to
``ori_shape``.
Returns:
list[list]: encoded masks. The c-th item in the outer list
corresponds to the c-th class. Given the c-th outer list, the
i-th item in that inner list is the mask for the i-th box with
class label c.
"""
if isinstance(mask_pred, torch.Tensor):
mask_pred = mask_pred.sigmoid()
else:
# In AugTest, has been activated before
mask_pred = det_bboxes.new_tensor(mask_pred)
cls_segms = [[] for _ in range(self.num_classes)] # BG is not included in num_classes
labels = det_labels
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
threshold = rcnn_test_cfg.mask_thr_binary
if not self.class_agnostic:
mask_pred = mask_pred[range(N), labels][:, None]
for i in range(N):
mask = mask_pred[i]
if threshold >= 0:
mask = (mask >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
mask = (mask * 255).to(dtype=torch.uint8)
mask = mask.detach().cpu().numpy()
cls_segms[labels[i]].append(mask[0])
return cls_segms
def get_scaled_seg_masks(self, *args, **kwargs):
"""Original method "get_seg_mask" from FCNMaskHead. Used in Semi-SL algorithm."""
return super().get_seg_masks(*args, **kwargs)
if is_mmdeploy_enabled():
from mmdeploy.core import FUNCTION_REWRITER
@FUNCTION_REWRITER.register_rewriter(
"otx.algorithms.detection.adapters.mmdet.models." "heads.custom_fcn_mask_head.CustomFCNMaskHead.get_seg_masks"
)
def METHOD_NAME(
ctx, self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape, **kwargs
):
"""Rewrite `get_seg_masks` of `FCNMaskHead` for default backend.
Rewrite the get_seg_masks for only fcn_mask_head inference.
Args:
ctx (dict): context dict
self (CustomFCNMaskHead): CustomFCNMaskHead instance
mask_pred (Tensor): shape (n, #class, h, w).
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape (Tuple): original image height and width, shape (2,)
kwargs (dict): other arguments
Returns:
Tensor: a mask of shape (N, img_h, img_w).
"""
mask_pred = mask_pred.sigmoid()
bboxes = det_bboxes[:, :4]
labels = det_labels
if not self.class_agnostic:
box_inds = torch.arange(mask_pred.shape[0], device=bboxes.device)
mask_pred = mask_pred[box_inds, labels][:, None]
return mask_pred | null |
379 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class UpdateTaskDetailRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'devops-rdc', '2020-03-03', 'UpdateTaskDetail')
self.set_method('POST')
def get_Note(self): # String
return self.get_body_params().get('Note')
def set_Note(self, Note): # String
self.add_body_params('Note', Note)
def get_ExecutorId(self): # String
return self.get_body_params().get('ExecutorId')
def set_ExecutorId(self, ExecutorId): # String
self.add_body_params('ExecutorId', ExecutorId)
def get_StartDate(self): # String
return self.get_body_params().get('StartDate')
def set_StartDate(self, StartDate): # String
self.add_body_params('StartDate', StartDate)
def get_DelInvolvers(self): # String
return self.get_body_params().get('DelInvolvers')
def set_DelInvolvers(self, DelInvolvers): # String
self.add_body_params('DelInvolvers', DelInvolvers)
def get_Content(self): # String
return self.get_body_params().get('Content')
def set_Content(self, Content): # String
self.add_body_params('Content', Content)
def get_SprintId(self): # String
return self.get_body_params().get('SprintId')
def set_SprintId(self, SprintId): # String
self.add_body_params('SprintId', SprintId)
def get_CustomFieldId(self): # String
return self.get_body_params().get('CustomFieldId')
def set_CustomFieldId(self, CustomFieldId): # String
self.add_body_params('CustomFieldId', CustomFieldId)
def get_ProjectId(self): # String
return self.get_body_params().get('ProjectId')
def set_ProjectId(self, ProjectId): # String
self.add_body_params('ProjectId', ProjectId)
def get_TaskId(self): # String
return self.get_body_params().get('TaskId')
def set_TaskId(self, TaskId): # String
self.add_body_params('TaskId', TaskId)
def get_TaskFlowStatusId(self): # String
return self.get_body_params().get('TaskFlowStatusId')
def set_TaskFlowStatusId(self, TaskFlowStatusId): # String
self.add_body_params('TaskFlowStatusId', TaskFlowStatusId)
def get_TagIds(self): # String
return self.get_body_params().get('TagIds')
def set_TagIds(self, TagIds): # String
self.add_body_params('TagIds', TagIds)
def METHOD_NAME(self): # String
return self.get_body_params().get('AddInvolvers')
def set_AddInvolvers(self, AddInvolvers): # String
self.add_body_params('AddInvolvers', AddInvolvers)
def get_Priority(self): # Long
return self.get_body_params().get('Priority')
def set_Priority(self, Priority): # Long
self.add_body_params('Priority', Priority)
def get_OrgId(self): # String
return self.get_body_params().get('OrgId')
def set_OrgId(self, OrgId): # String
self.add_body_params('OrgId', OrgId)
def get_DueDate(self): # String
return self.get_body_params().get('DueDate')
def set_DueDate(self, DueDate): # String
self.add_body_params('DueDate', DueDate)
def get_WorkTimes(self): # Long
return self.get_body_params().get('WorkTimes')
def set_WorkTimes(self, WorkTimes): # Long
self.add_body_params('WorkTimes', WorkTimes)
def get_StoryPoint(self): # String
return self.get_body_params().get('StoryPoint')
def set_StoryPoint(self, StoryPoint): # String
self.add_body_params('StoryPoint', StoryPoint)
def get_CustomFieldValues(self): # String
return self.get_body_params().get('CustomFieldValues')
def set_CustomFieldValues(self, CustomFieldValues): # String
self.add_body_params('CustomFieldValues', CustomFieldValues) | null |
380 | """EmceeSampler class."""
from __future__ import annotations
import logging
from typing import List, Union
import numpy as np
from ..problem import Problem
from ..result import McmcPtResult
from ..startpoint import UniformStartpoints, uniform
from .sampler import Sampler, SamplerImportError
logger = logging.getLogger(__name__)
class EmceeSampler(Sampler):
"""Use emcee for sampling.
Wrapper around https://emcee.readthedocs.io/en/stable/, see there for
details.
"""
def __init__(
self,
nwalkers: int = 1,
sampler_args: dict = None,
run_args: dict = None,
):
"""
Initialize sampler.
Parameters
----------
nwalkers:
The number of walkers in the ensemble.
sampler_args:
Further keyword arguments that are passed on to
``emcee.EnsembleSampler.__init__``.
run_args:
Further keyword arguments that are passed on to
``emcee.EnsembleSampler.run_mcmc``.
"""
# check dependencies
try:
import emcee
except ImportError:
raise SamplerImportError("emcee")
super().__init__()
self.nwalkers: int = nwalkers
if sampler_args is None:
sampler_args = {}
self.sampler_args: dict = sampler_args
if run_args is None:
run_args = {}
self.run_args: dict = run_args
# set in initialize
self.problem: Union[Problem, None] = None
self.sampler: Union[emcee.EnsembleSampler, None] = None
self.state: Union[emcee.State, None] = None
def METHOD_NAME(
self,
center: np.ndarray,
problem: Problem,
epsilon: float = 1e-3,
):
"""Get walker initial positions as samples from an epsilon ball.
The ball is scaled in each direction according to the magnitude of the
center in that direction.
It is assumed that, because vectors are generated near a good point,
all generated vectors are evaluable, so evaluability is not checked.
Points that are generated outside the problem bounds will get shifted
to lie on the edge of the problem bounds.
Parameters
----------
center:
The center of the epsilon ball. The dimension should match the full
dimension of the pyPESTO problem. This will be returned as the
first position.
problem:
The pyPESTO problem.
epsilon:
The relative radius of the ball. e.g., if `epsilon=0.5`
and the center of the first dimension is at 100, then the upper
and lower bounds of the epsilon ball in the first dimension will
be 150 and 50, respectively.
"""
# Epsilon ball
lb = center * (1 - epsilon)
ub = center * (1 + epsilon)
# Adjust bounds to satisfy problem bounds
lb[lb < problem.lb] = problem.lb[lb < problem.lb]
ub[ub > problem.ub] = problem.ub[ub > problem.ub]
# Sample initial positions
initial_state_after_first = uniform(
n_starts=self.nwalkers - 1,
lb=lb,
ub=ub,
)
# Include `center` in initial positions
initial_state = np.row_stack(
(
center,
initial_state_after_first,
)
)
return initial_state
def initialize(
self,
problem: Problem,
x0: Union[np.ndarray, List[np.ndarray]],
) -> None:
"""Initialize the sampler.
It is recommended to initialize walkers
Parameters
----------
x0:
The "a priori preferred position". e.g., an optimized parameter
vector. https://emcee.readthedocs.io/en/stable/user/faq/
The position of the first walker will be this, the remaining
walkers will be assigned positions uniformly in a smaller ball
around this vector.
Alternatively, a set of vectors can be provided, which will be used
to initialize walkers. In this case, any remaining walkers will be
initialized at points sampled uniformly within the problem bounds.
"""
import emcee
self.problem = problem
# extract for pickling efficiency
objective = self.problem.objective
lb = self.problem.lb
ub = self.problem.ub
# parameter dimenstion
ndim = len(self.problem.x_free_indices)
def log_prob(x):
"""Log-probability density function."""
# check if parameter lies within bounds
if any(x < lb) or any(x > ub):
return -np.inf
# invert sign
return -1.0 * objective(x)
# initialize sampler
self.sampler = emcee.EnsembleSampler(
nwalkers=self.nwalkers,
ndim=ndim,
log_prob_fn=log_prob,
**self.sampler_args,
)
# assign startpoints
if self.state is None:
if x0.ndim > 1 and len(x0.shape[0]) > 1:
logger.warning(
"More than a single vector was provided to initialize the "
"walker positions. If these vectors do not exist in a "
"small ball around a high-probability position (e.g. "
"optimized vector) then sampling may be inefficient (see "
"emcee FAQ: "
"https://emcee.readthedocs.io/en/stable/user/faq/ )."
)
# extract x0
x0 = np.asarray(x0)
if x0.ndim == 1:
x0 = [x0]
x0 = np.array([problem.get_full_vector(x) for x in x0])
x_guesses_full0 = problem.x_guesses_full
# add x0 to guesses
problem.set_x_guesses(
np.row_stack(
(
x0,
problem.x_guesses_full,
)
)
)
# sample start points
initial_state = UniformStartpoints(
use_guesses=True,
check_fval=True,
check_grad=False,
)(
n_starts=self.nwalkers,
problem=problem,
)
# restore original guesses
problem.set_x_guesses(x_guesses_full0)
else:
initial_state = self.METHOD_NAME(
center=x0,
problem=problem,
)
self.state = initial_state
def sample(self, n_samples: int, beta: float = 1.0) -> None:
"""Return the most recent sample state."""
self.state = self.sampler.run_mcmc(
initial_state=self.state,
nsteps=n_samples,
**self.run_args,
)
def get_samples(self) -> McmcPtResult:
"""Get the samples into the fitting pypesto format."""
# all walkers are concatenated, yielding a flat array
trace_x = np.array([self.sampler.get_chain(flat=True)])
trace_neglogpost = np.array([-self.sampler.get_log_prob(flat=True)])
# the sampler does not know priors
trace_neglogprior = np.full(trace_neglogpost.shape, np.nan)
# the walkers all run on temperature 1
betas = np.array([1.0])
result = McmcPtResult(
trace_x=trace_x,
trace_neglogpost=trace_neglogpost,
trace_neglogprior=trace_neglogprior,
betas=betas,
)
return result | null |
381 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoceanbasepro.endpoint import endpoint_data
class DescribeSlowSQLListRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'OceanBasePro', '2019-09-01', 'DescribeSlowSQLList','oceanbase')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_StartTime(self): # String
return self.get_body_params().get('StartTime')
def set_StartTime(self, StartTime): # String
self.add_body_params('StartTime', StartTime)
def METHOD_NAME(self): # Integer
return self.get_body_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_body_params('PageNumber', PageNumber)
def get_SearchRule(self): # String
return self.get_body_params().get('SearchRule')
def set_SearchRule(self, SearchRule): # String
self.add_body_params('SearchRule', SearchRule)
def get_TenantId(self): # String
return self.get_body_params().get('TenantId')
def set_TenantId(self, TenantId): # String
self.add_body_params('TenantId', TenantId)
def get_PageSize(self): # Integer
return self.get_body_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_body_params('PageSize', PageSize)
def get_SearchParameter(self): # String
return self.get_body_params().get('SearchParameter')
def set_SearchParameter(self, SearchParameter): # String
self.add_body_params('SearchParameter', SearchParameter)
def get_SortOrder(self): # String
return self.get_body_params().get('SortOrder')
def set_SortOrder(self, SortOrder): # String
self.add_body_params('SortOrder', SortOrder)
def get_SearchValue(self): # String
return self.get_body_params().get('SearchValue')
def set_SearchValue(self, SearchValue): # String
self.add_body_params('SearchValue', SearchValue)
def get_SQLId(self): # String
return self.get_body_params().get('SQLId')
def set_SQLId(self, SQLId): # String
self.add_body_params('SQLId', SQLId)
def get_FilterCondition(self): # String
return self.get_body_params().get('FilterCondition')
def set_FilterCondition(self, FilterCondition): # String
self.add_body_params('FilterCondition', FilterCondition)
def get_EndTime(self): # String
return self.get_body_params().get('EndTime')
def set_EndTime(self, EndTime): # String
self.add_body_params('EndTime', EndTime)
def get_NodeIp(self): # String
return self.get_body_params().get('NodeIp')
def set_NodeIp(self, NodeIp): # String
self.add_body_params('NodeIp', NodeIp)
def get_DbName(self): # String
return self.get_body_params().get('DbName')
def set_DbName(self, DbName): # String
self.add_body_params('DbName', DbName)
def get_SearchKeyWord(self): # String
return self.get_body_params().get('SearchKeyWord')
def set_SearchKeyWord(self, SearchKeyWord): # String
self.add_body_params('SearchKeyWord', SearchKeyWord)
def get_SortColumn(self): # String
return self.get_body_params().get('SortColumn')
def set_SortColumn(self, SortColumn): # String
self.add_body_params('SortColumn', SortColumn) | null |
382 | from functools import partial
from unittest import (
TestCase,
mock,
)
from lxml import etree
from pcs.lib.cib.resource import primitive
from pcs.lib.cib.tools import IdProvider
from pcs.lib.resource_agent import ResourceAgentName
from pcs_test.tools.assertions import assert_xml_equal
class FindPrimitivesByAgent(TestCase):
# pylint: disable=protected-access
def setUp(self):
self.resources_section = etree.fromstring(
"""
<resources>
<primitive
class="standard" provider="provider" type="agent_type" id="r0"
/>
<primitive
class="something" provider="provider" type="agent_type" id="r23"
/>
<primitive class="stonith" type="agent_type" id="r1"/>
<primitive
class="standard" provider="provider" type="dummy1" id="r123"
/>
<group>
<primitive class="stonith" type="agent_type" id="r2"/>
<primitive
class="standard" provider="pacemaker" type="agent_type"
id="r3"
/>
<primitive
class="standard" provider="provider" type="agent_type"
id="r4"
/>
</group>
<clone>
<group>
<primitive
class="standard" provider="provider" type="agent_type"
id="r5"
/>
</group>
</clone>
<clone>
<primitive
class="standard" provider="provider" type="agent_type"
id="r6"
/>
</clone>
</resources>
"""
)
def test_stonith(self):
results = primitive._find_primitives_by_agent(
self.resources_section,
ResourceAgentName(
"stonith",
None,
"agent_type",
),
)
expected_results = [
'<primitive class="stonith" type="agent_type" id="r1"/>',
'<primitive class="stonith" type="agent_type" id="r2"/>',
]
self.assertEqual(len(expected_results), len(results))
for i, res in enumerate(results):
assert_xml_equal(expected_results[i], etree.tostring(res).decode())
def test_with_provider(self):
results = primitive._find_primitives_by_agent(
self.resources_section,
ResourceAgentName(
"standard",
"provider",
"agent_type",
),
)
expected_results = [
"""<primitive
class="standard" provider="provider" type="agent_type" id="r0"
/>""",
"""<primitive
class="standard" provider="provider" type="agent_type" id="r4"
/>""",
"""<primitive
class="standard" provider="provider" type="agent_type" id="r5"
/>""",
"""<primitive
class="standard" provider="provider" type="agent_type" id="r6"
/>""",
]
self.assertEqual(len(expected_results), len(results))
for i, res in enumerate(results):
assert_xml_equal(expected_results[i], etree.tostring(res).decode())
@mock.patch("pcs.lib.cib.resource.primitive.append_new_instance_attributes")
@mock.patch("pcs.lib.cib.resource.primitive.append_new_meta_attributes")
@mock.patch("pcs.lib.cib.resource.primitive.create_operations")
class AppendNew(TestCase):
def setUp(self):
self.resources_section = etree.fromstring("<resources/>")
self.instance_attributes = {"a": "b"}
self.meta_attributes = {"c": "d"}
self.operation_list = [{"name": "monitoring"}]
self.id_provider = IdProvider(self.resources_section)
self.run = partial(
primitive.append_new,
self.resources_section,
self.id_provider,
instance_attributes=self.instance_attributes,
meta_attributes=self.meta_attributes,
operation_list=self.operation_list,
)
def check_mocks(
self,
primitive_element,
create_operations,
append_new_meta_attributes,
append_new_instance_attributes,
):
create_operations.assert_called_once_with(
primitive_element, self.id_provider, self.operation_list
)
append_new_meta_attributes.assert_called_once_with(
primitive_element, self.meta_attributes, self.id_provider
)
append_new_instance_attributes.assert_called_once_with(
primitive_element, self.instance_attributes, self.id_provider
)
def METHOD_NAME(
self,
create_operations,
append_new_meta_attributes,
append_new_instance_attributes,
):
primitive_element = self.run("RESOURCE_ID", "OCF", None, "DUMMY")
self.assertEqual(
primitive_element, self.resources_section.find(".//primitive")
)
self.assertEqual(primitive_element.attrib["class"], "OCF")
self.assertEqual(primitive_element.attrib["type"], "DUMMY")
self.assertFalse(primitive_element.attrib.has_key("provider"))
self.check_mocks(
primitive_element,
create_operations,
append_new_meta_attributes,
append_new_instance_attributes,
)
def test_append_with_provider(
self,
create_operations,
append_new_meta_attributes,
append_new_instance_attributes,
):
primitive_element = self.run("RESOURCE_ID", "OCF", "HEARTBEAT", "DUMMY")
self.assertEqual(
primitive_element, self.resources_section.find(".//primitive")
)
self.assertEqual(primitive_element.attrib["class"], "OCF")
self.assertEqual(primitive_element.attrib["type"], "DUMMY")
self.assertEqual(primitive_element.attrib["provider"], "HEARTBEAT")
self.check_mocks(
primitive_element,
create_operations,
append_new_meta_attributes,
append_new_instance_attributes,
) | null |
383 | from itertools import product
from website.notifications.emails import compile_subscriptions
from website.notifications import utils, constants
def get_file_subs_from_folder(addon, user, kind, path, name):
"""Find the file tree under a specified folder."""
folder = dict(kind=kind, path=path, name=name)
file_tree = addon._get_file_tree(filenode=folder, user=user, version='latest-published')
return list_of_files(file_tree)
def list_of_files(file_object):
files = []
if file_object['kind'] == 'file':
return [file_object['path']]
else:
for child in file_object['children']:
files.extend(list_of_files(child))
return files
def compile_user_lists(files, user, source_node, node):
"""Take multiple file ids and compiles them.
:param files: List of WaterButler paths
:param user: User who initiated action/event
:param source_node: Node instance from
:param node: Node instance to
:return: move, warn, and remove dicts
"""
# initialise subscription dictionaries
move = {key: [] for key in constants.NOTIFICATION_TYPES}
warn = {key: [] for key in constants.NOTIFICATION_TYPES}
remove = {key: [] for key in constants.NOTIFICATION_TYPES}
# get the node subscription
if len(files) == 0:
move, warn, remove = categorize_users(
user, 'file_updated', source_node, 'file_updated', node
)
# iterate through file subscriptions
for file_path in files:
path = file_path.strip('/')
t_move, t_warn, t_remove = categorize_users(
user, path + '_file_updated', source_node,
path + '_file_updated', node
)
# Add file subs to overall list of subscriptions
for notification in constants.NOTIFICATION_TYPES:
move[notification] = list(set(move[notification]).union(set(t_move[notification])))
warn[notification] = list(set(warn[notification]).union(set(t_warn[notification])))
remove[notification] = list(set(remove[notification]).union(set(t_remove[notification])))
return move, warn, remove
def categorize_users(user, source_event, source_node, event, node):
"""Categorize users from a file subscription into three categories.
Puts users in one of three bins:
- Moved: User has permissions on both nodes, subscribed to both
- Warned: User has permissions on both, not subscribed to destination
- Removed: Does not have permission on destination node
:param user: User instance who started the event
:param source_event: <guid>_event_name
:param source_node: node from where the event happened
:param event: new guid event name
:param node: node where event ends up
:return: Moved, to be warned, and removed users.
"""
remove = utils.users_to_remove(source_event, source_node, node)
source_node_subs = compile_subscriptions(source_node, utils.find_subscription_type(source_event))
new_subs = compile_subscriptions(node, utils.find_subscription_type(source_event), event)
# Moves users into the warn bucket or the move bucket
move = subscriptions_users_union(source_node_subs, new_subs)
warn = subscriptions_users_difference(source_node_subs, new_subs)
# Removes users without permissions
warn, remove = METHOD_NAME(node, warn, remove)
# Remove duplicates
warn = subscriptions_users_remove_duplicates(warn, new_subs, remove_same=False)
move = subscriptions_users_remove_duplicates(move, new_subs, remove_same=False)
# Remove duplicates between move and warn; and move and remove
move = subscriptions_users_remove_duplicates(move, warn, remove_same=True)
move = subscriptions_users_remove_duplicates(move, remove, remove_same=True)
for notifications in constants.NOTIFICATION_TYPES:
# Remove the user who started this whole thing.
user_id = user._id
if user_id in warn[notifications]:
warn[notifications].remove(user_id)
if user_id in move[notifications]:
move[notifications].remove(user_id)
if user_id in remove[notifications]:
remove[notifications].remove(user_id)
return move, warn, remove
def METHOD_NAME(node, warn_subscription, remove_subscription):
for notification in constants.NOTIFICATION_TYPES:
subbed, removed = utils.separate_users(node, warn_subscription[notification])
warn_subscription[notification] = subbed
remove_subscription[notification].extend(removed)
remove_subscription[notification] = list(set(remove_subscription[notification]))
return warn_subscription, remove_subscription
def subscriptions_users_union(emails_1, emails_2):
return {
notification:
list(
set(emails_1[notification]).union(set(emails_2[notification]))
)
for notification in constants.NOTIFICATION_TYPES.keys()
}
def subscriptions_users_difference(emails_1, emails_2):
return {
notification:
list(
set(emails_1[notification]).difference(set(emails_2[notification]))
)
for notification in constants.NOTIFICATION_TYPES.keys()
}
def subscriptions_users_remove_duplicates(emails_1, emails_2, remove_same=False):
emails_list = dict(emails_1)
product_list = product(constants.NOTIFICATION_TYPES, repeat=2)
for notification_1, notification_2 in product_list:
if notification_2 == notification_1 and not remove_same or notification_2 == 'none':
continue
emails_list[notification_1] = list(
set(emails_list[notification_1]).difference(set(emails_2[notification_2]))
)
return emails_list | null |
384 | from datetime import datetime
from flask import g, request
from flask_appbuilder import ModelRestApi
from flask_appbuilder.api import expose, safe
from flask_appbuilder.const import API_RESULT_RES_KEY
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import permission_name, protect
from flask_appbuilder.security.sqla.apis.user.schema import (
UserPostSchema,
UserPutSchema,
)
from flask_appbuilder.security.sqla.models import Role, User
from marshmallow import ValidationError
from sqlalchemy.exc import IntegrityError
from werkzeug.security import generate_password_hash
class UserApi(ModelRestApi):
resource_name = "security/users"
openapi_spec_tag = "Security Users"
class_permission_name = "User"
datamodel = SQLAInterface(User)
allow_browser_login = True
list_columns = [
"id",
"roles.id",
"roles.name",
"first_name",
"last_name",
"username",
"active",
"email",
"last_login",
"login_count",
"fail_login_count",
"created_on",
"changed_on",
"created_by.id",
"changed_by.id",
]
show_columns = list_columns
add_columns = [
"roles",
"first_name",
"last_name",
"username",
"active",
"email",
"password",
]
edit_columns = add_columns
search_columns = [
"username",
"first_name",
"last_name",
"active",
"email",
"created_by",
"changed_by",
"roles",
]
add_model_schema = UserPostSchema()
edit_model_schema = UserPutSchema()
def METHOD_NAME(self, item):
item.changed_on = datetime.now()
item.changed_by_fk = g.user.id
if item.password:
item.password = generate_password_hash(item.password)
def pre_add(self, item):
item.password = generate_password_hash(item.password)
@expose("/", methods=["POST"])
@protect()
@safe
@permission_name("post")
def post(self):
"""Create new user
---
post:
requestBody:
description: Model schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
responses:
201:
description: Item changed
content:
application/json:
schema:
type: object
properties:
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.post'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
item = self.add_model_schema.load(request.json)
model = User()
roles = []
for key, value in item.items():
if key != "roles":
setattr(model, key, value)
else:
for role_id in item[key]:
role = (
self.datamodel.session.query(Role)
.filter(Role.id == role_id)
.one_or_none()
)
if role:
role.user_id = model.id
role.role_id = role_id
roles.append(role)
if "roles" in item.keys():
model.roles = roles
self.pre_add(model)
self.datamodel.add(model, raise_exception=True)
return self.response(201, id=model.id)
except ValidationError as error:
return self.response_400(message=error.messages)
except IntegrityError as e:
return self.response_422(message=str(e.orig))
@expose("/<pk>", methods=["PUT"])
@protect()
@safe
@permission_name("put")
def put(self, pk):
"""Edit user
---
put:
parameters:
- in: path
schema:
type: integer
name: pk
requestBody:
description: Model schema
required: true
content:
application/json:
schema:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
responses:
200:
description: Item changed
content:
application/json:
schema:
type: object
properties:
result:
$ref: '#/components/schemas/{{self.__class__.__name__}}.put'
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
422:
$ref: '#/components/responses/422'
500:
$ref: '#/components/responses/500'
"""
try:
item = self.edit_model_schema.load(request.json)
model = self.datamodel.get(pk, self._base_filters)
roles = []
for key, value in item.items():
if key != "roles":
setattr(model, key, value)
else:
for role_id in item[key]:
role = (
self.datamodel.session.query(Role)
.filter(Role.id == role_id)
.one_or_none()
)
if role:
role.user_id = model.id
role.role_id = role_id
roles.append(role)
if "roles" in item.keys():
model.roles = roles
self.METHOD_NAME(model)
self.datamodel.edit(model, raise_exception=True)
return self.response(
200,
**{API_RESULT_RES_KEY: self.edit_model_schema.dump(item, many=False)},
)
except ValidationError as e:
return self.response_400(message=e.messages)
except IntegrityError as e:
return self.response_422(message=str(e.orig)) | null |
385 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecd.endpoint import endpoint_data
class ExportDesktopListInfoRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'ecd', '2020-09-30', 'ExportDesktopListInfo')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_OfficeSiteId(self): # String
return self.get_query_params().get('OfficeSiteId')
def set_OfficeSiteId(self, OfficeSiteId): # String
self.add_query_param('OfficeSiteId', OfficeSiteId)
def get_DesktopStatus(self): # String
return self.get_query_params().get('DesktopStatus')
def set_DesktopStatus(self, DesktopStatus): # String
self.add_query_param('DesktopStatus', DesktopStatus)
def get_NextToken(self): # String
return self.get_query_params().get('NextToken')
def set_NextToken(self, NextToken): # String
self.add_query_param('NextToken', NextToken)
def get_EndUserIds(self): # RepeatList
return self.get_query_params().get('EndUserId')
def set_EndUserIds(self, EndUserId): # RepeatList
for depth1 in range(len(EndUserId)):
self.add_query_param('EndUserId.' + str(depth1 + 1), EndUserId[depth1])
def get_DesktopIds(self): # RepeatList
return self.get_query_params().get('DesktopId')
def set_DesktopIds(self, DesktopId): # RepeatList
for depth1 in range(len(DesktopId)):
self.add_query_param('DesktopId.' + str(depth1 + 1), DesktopId[depth1])
def get_Tags(self): # RepeatList
return self.get_query_params().get('Tag')
def set_Tags(self, Tag): # RepeatList
for depth1 in range(len(Tag)):
if Tag[depth1].get('Value') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Value', Tag[depth1].get('Value'))
if Tag[depth1].get('Key') is not None:
self.add_query_param('Tag.' + str(depth1 + 1) + '.Key', Tag[depth1].get('Key'))
def get_DesktopName(self): # String
return self.get_query_params().get('DesktopName')
def set_DesktopName(self, DesktopName): # String
self.add_query_param('DesktopName', DesktopName)
def get_GroupId(self): # String
return self.get_query_params().get('GroupId')
def set_GroupId(self, GroupId): # String
self.add_query_param('GroupId', GroupId)
def get_ExpiredTime(self): # String
return self.get_query_params().get('ExpiredTime')
def set_ExpiredTime(self, ExpiredTime): # String
self.add_query_param('ExpiredTime', ExpiredTime)
def get_MaxResults(self): # Integer
return self.get_query_params().get('MaxResults')
def set_MaxResults(self, MaxResults): # Integer
self.add_query_param('MaxResults', MaxResults)
def get_LangType(self): # String
return self.get_query_params().get('LangType')
def set_LangType(self, LangType): # String
self.add_query_param('LangType', LangType)
def get_ChargeType(self): # String
return self.get_query_params().get('ChargeType')
def set_ChargeType(self, ChargeType): # String
self.add_query_param('ChargeType', ChargeType)
def METHOD_NAME(self): # String
return self.get_query_params().get('PolicyGroupId')
def set_PolicyGroupId(self, PolicyGroupId): # String
self.add_query_param('PolicyGroupId', PolicyGroupId)
def get_UserName(self): # String
return self.get_query_params().get('UserName')
def set_UserName(self, UserName): # String
self.add_query_param('UserName', UserName) | null |
386 | from datetime import datetime
from unittest import mock
import pytest
import responses
from api.share.utils import shtrove_ingest_url, sharev2_push_url
from framework.auth.core import Auth
from osf.models.spam import SpamStatus
from osf.utils.permissions import READ, WRITE, ADMIN
from osf_tests.factories import (
AuthUserFactory,
ProjectFactory,
SubjectFactory,
PreprintFactory,
PreprintProviderFactory,
)
from website import settings
from website.preprints.tasks import on_preprint_updated
from ._utils import expect_preprint_ingest_request
@pytest.mark.django_db
@pytest.mark.enable_enqueue_task
class TestPreprintShare:
@pytest.fixture(scope='class', autouse=True)
def METHOD_NAME(self):
with mock.patch.object(settings, 'USE_CELERY', False):
yield
@pytest.fixture
def user(self):
return AuthUserFactory()
@pytest.fixture
def auth(self, user):
return Auth(user=user)
@pytest.fixture
def provider(self):
return PreprintProviderFactory(
name='Lars Larson Snowmobiling Experience',
access_token='Snowmobiling'
)
@pytest.fixture
def project(self, user, mock_share_responses):
return ProjectFactory(creator=user, is_public=True)
@pytest.fixture
def subject(self):
return SubjectFactory(text='Subject #1')
@pytest.fixture
def subject_two(self):
return SubjectFactory(text='Subject #2')
@pytest.fixture
def preprint(self, project, user, provider, subject):
return PreprintFactory(
creator=user,
filename='second_place.pdf',
provider=provider,
subjects=[[subject._id]],
project=project,
is_published=False
)
def test_save_unpublished_not_called(self, mock_share_responses, preprint):
# expecting no ingest requests (delete or otherwise)
with expect_preprint_ingest_request(mock_share_responses, preprint, count=0):
preprint.save()
def test_save_published_called(self, mock_share_responses, preprint, user, auth):
with expect_preprint_ingest_request(mock_share_responses, preprint):
preprint.set_published(True, auth=auth, save=True)
# This covers an edge case where a preprint is forced back to unpublished
# that it sends the information back to share
def test_save_unpublished_called_forced(self, mock_share_responses, auth, preprint):
with expect_preprint_ingest_request(mock_share_responses, preprint):
preprint.set_published(True, auth=auth, save=True)
with expect_preprint_ingest_request(mock_share_responses, preprint, delete=True):
preprint.is_published = False
preprint.save(**{'force_update': True})
def test_save_published_subject_change_called(self, mock_share_responses, auth, preprint, subject, subject_two):
preprint.set_published(True, auth=auth, save=True)
with expect_preprint_ingest_request(mock_share_responses, preprint):
preprint.set_subjects([[subject_two._id]], auth=auth)
def test_save_unpublished_subject_change_not_called(self, mock_share_responses, auth, preprint, subject_two):
with expect_preprint_ingest_request(mock_share_responses, preprint, delete=True):
preprint.set_subjects([[subject_two._id]], auth=auth)
def test_send_to_share_is_true(self, mock_share_responses, auth, preprint):
preprint.set_published(True, auth=auth, save=True)
with expect_preprint_ingest_request(mock_share_responses, preprint):
on_preprint_updated(preprint._id, saved_fields=['title'])
def test_preprint_contributor_changes_updates_preprints_share(self, mock_share_responses, user, auth):
preprint = PreprintFactory(is_published=True, creator=user)
preprint.set_published(True, auth=auth, save=True)
user2 = AuthUserFactory()
with expect_preprint_ingest_request(mock_share_responses, preprint):
preprint.add_contributor(contributor=user2, auth=auth, save=True)
with expect_preprint_ingest_request(mock_share_responses, preprint):
preprint.move_contributor(contributor=user, index=0, auth=auth, save=True)
data = [{'id': user._id, 'permissions': ADMIN, 'visible': True},
{'id': user2._id, 'permissions': WRITE, 'visible': False}]
with expect_preprint_ingest_request(mock_share_responses, preprint):
preprint.manage_contributors(data, auth=auth, save=True)
with expect_preprint_ingest_request(mock_share_responses, preprint):
preprint.update_contributor(user2, READ, True, auth=auth, save=True)
with expect_preprint_ingest_request(mock_share_responses, preprint):
preprint.remove_contributor(contributor=user2, auth=auth)
def test_call_async_update_on_500_failure(self, mock_share_responses, preprint, auth):
mock_share_responses.replace(responses.POST, shtrove_ingest_url(), status=500)
mock_share_responses.replace(responses.POST, sharev2_push_url(), status=500)
preprint.set_published(True, auth=auth, save=True)
with expect_preprint_ingest_request(mock_share_responses, preprint, count=5):
preprint.update_search()
def test_no_call_async_update_on_400_failure(self, mock_share_responses, preprint, auth):
mock_share_responses.replace(responses.POST, shtrove_ingest_url(), status=400)
mock_share_responses.replace(responses.POST, sharev2_push_url(), status=400)
preprint.set_published(True, auth=auth, save=True)
with expect_preprint_ingest_request(mock_share_responses, preprint, count=1):
preprint.update_search()
def test_delete_from_share(self, mock_share_responses):
preprint = PreprintFactory()
with expect_preprint_ingest_request(mock_share_responses, preprint):
preprint.update_search()
preprint.date_withdrawn = datetime.now()
preprint.save()
with expect_preprint_ingest_request(mock_share_responses, preprint):
preprint.update_search()
preprint.spam_status = SpamStatus.SPAM
preprint.save()
with expect_preprint_ingest_request(mock_share_responses, preprint, delete=True):
preprint.update_search() | null |
387 | import pytest
pytestmark = [
pytest.mark.django_db,
pytest.mark.usefixtures("purchase"),
]
@pytest.fixture
def METHOD_NAME(another_user, another_answer, question):
another_answer.question = question
another_answer.author = another_user
another_answer.save()
return another_answer
@pytest.mark.freeze_time("2022-10-09 10:30:12+12:00") # +12 hours kamchatka timezone
@pytest.mark.usefixtures("kamchatka_timezone")
def test_ok(api, question, answer):
got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"]
assert len(got[0]) == 9
assert got[0]["created"] == "2022-10-09T10:30:12+12:00"
assert got[0]["modified"] == "2022-10-09T10:30:12+12:00"
assert got[0]["slug"] == str(answer.slug)
assert got[0]["question"] == str(answer.question.slug)
assert "<em>test</em>" in got[0]["text"]
assert got[0]["src"] == "*test*"
assert got[0]["author"]["uuid"] == str(api.user.uuid)
assert got[0]["author"]["first_name"] == api.user.first_name
assert got[0]["author"]["last_name"] == api.user.last_name
assert got[0]["has_descendants"] is False
assert got[0]["reactions"] == []
def test_has_reaction_fields_if_there_is_reaction(api, question, answer, reaction):
got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"]
reactions = got[0]["reactions"]
assert len(reactions[0]) == 4
assert reactions[0]["emoji"] == reaction.emoji
assert reactions[0]["slug"] == str(reaction.slug)
assert reactions[0]["answer"] == str(reaction.answer.slug)
assert reactions[0]["author"]["uuid"] == str(reaction.author.uuid)
assert reactions[0]["author"]["first_name"] == reaction.author.first_name
assert reactions[0]["author"]["last_name"] == reaction.author.last_name
def test_has_descendants_is_true_if_answer_has_children(api, question, answer, another_answer):
another_answer.parent = answer
another_answer.save()
got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"]
assert got[0]["has_descendants"] is True
def test_nplusone(api, question, answer, another_answer, django_assert_num_queries, mixer):
for _ in range(5):
mixer.blend("homework.Reaction", author=api.user, answer=answer)
mixer.blend("homework.Reaction", author=api.user, answer=another_answer)
with django_assert_num_queries(7):
api.get(f"/api/v2/homework/answers/?question={question.slug}")
@pytest.mark.usefixtures("answer")
def test_answers_from_other_questions_are_excluded(api, another_question):
got = api.get(f"/api/v2/homework/answers/?question={another_question.slug}")["results"]
assert len(got) == 0
def test_non_root_answers_are_excluded(api, question, answer, METHOD_NAME):
answer.parent = METHOD_NAME
answer.save()
got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"]
assert len(got) == 1 # only answer_from_another_user
assert got[0]["slug"] == str(METHOD_NAME.slug)
@pytest.mark.usefixtures("answer", "answer_from_another_user")
def test_answers_from_other_questions_are_excluded_even_if_user_has_the_permission(api, another_question):
api.user.add_perm("homework.answer.see_all_answers")
got = api.get(f"/api/v2/homework/answers/?question={another_question.slug}")["results"]
assert len(got) == 0
@pytest.mark.usefixtures("answer_from_another_user")
def test_answers_from_another_authors_are_excluded(api, question):
got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"]
assert len(got) == 0
def test_answers_from_another_authors_are_included_if_already_seen(api, mixer, question, METHOD_NAME):
mixer.blend("homework.AnswerAccessLogEntry", user=api.user, answer=METHOD_NAME)
got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"]
assert len(got) == 1
def test_answers_from_another_authors_are_excluded_if_author_is_filtered(api, mixer, question, METHOD_NAME):
mixer.blend("homework.AnswerAccessLogEntry", user=api.user, answer=METHOD_NAME)
got = api.get(f"/api/v2/homework/answers/?question={question.slug}&author={api.user.uuid}")["results"]
assert len(got) == 0
def test_access_log_entries_from_another_users_do_not_break_the_select(api, mixer, question, answer):
mixer.cycle(5).blend("homework.AnswerAccessLogEntry", question=question, answer=answer)
got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"]
assert len(got) == 1
@pytest.mark.usefixtures("answer_from_another_user")
def test_users_with_permission_may_see_all_answers(api, question):
api.user.add_perm("homework.answer.see_all_answers")
got = api.get(f"/api/v2/homework/answers/?question={question.slug}")["results"]
assert len(got) == 1
def test_no_anon(anon, question):
anon.get(f"/api/v2/homework/answers/?question={question.slug}", expected_status_code=401)
@pytest.mark.parametrize(
"disable_pagination_value",
[
"True",
"true",
"1",
],
)
def test_pagination_could_be_disable_with_query_param(api, question, answer, disable_pagination_value):
got = api.get(f"/api/v2/homework/answers/?question={question.slug}&disable_pagination={disable_pagination_value}")
assert len(got) == 1
assert got[0]["slug"] == str(answer.slug)
@pytest.mark.parametrize(
"disable_pagination_value",
[
"false",
"False",
"any-other-value",
],
)
def test_paginated_response_with_disable_pagination_false_or_invalid_value(api, question, answer, disable_pagination_value):
got = api.get(f"/api/v2/homework/answers/?question={question.slug}&disable_pagination={disable_pagination_value}")
assert "results" in got
assert "count" in got
assert len(got["results"]) == 1 | null |
388 | from galaxy import model
from galaxy.util.unittest import TestCase
from galaxy.workflow import extract
UNDEFINED_JOB = object()
class TestWorkflowExtractSummary(TestCase):
def setUp(self):
self.history = MockHistory()
self.trans = MockTrans(self.history)
def METHOD_NAME(self):
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert not job_dict
def test_summarize_returns_name_and_dataset_list(self):
# Create two jobs and three datasets, test they are groupped
# by job correctly with correct output names.
hda1 = MockHda()
self.history.active_datasets.append(hda1)
hda2 = MockHda(job=hda1.job, output_name="out2")
self.history.active_datasets.append(hda2)
hda3 = MockHda(output_name="out3")
self.history.active_datasets.append(hda3)
job_dict, warnings = extract.summarize(trans=self.trans)
assert len(job_dict) == 2
assert not warnings
assert job_dict[hda1.job] == [("out1", hda1), ("out2", hda2)]
assert job_dict[hda3.job] == [("out3", hda3)]
def test_finds_original_job_if_copied(self):
hda = MockHda()
derived_hda_1 = MockHda()
derived_hda_1.copied_from_history_dataset_association = hda
derived_hda_2 = MockHda()
derived_hda_2.copied_from_history_dataset_association = derived_hda_1
self.history.active_datasets.append(derived_hda_2)
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert len(job_dict) == 1
assert job_dict[hda.job] == [("out1", derived_hda_2)]
def test_fake_job_hda(self):
"""Fakes job if creating_job_associations is empty."""
hda = MockHda(job=UNDEFINED_JOB)
self.history.active_datasets.append(hda)
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert len(job_dict) == 1
fake_job = next(iter(job_dict.keys()))
assert fake_job.id.startswith("fake_")
datasets = next(iter(job_dict.values()))
assert datasets == [(None, hda)]
def test_fake_job_hda_name_guess(self):
hda_from_history = MockHda(job=UNDEFINED_JOB)
hda_from_history.copied_from_history_dataset_association = MockHda(job=UNDEFINED_JOB)
self.history.active_datasets.append(hda_from_history)
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert len(job_dict) == 1
fake_job = next(iter(job_dict.keys()))
assert "History" in fake_job.name
self.history.active_datasets.remove(hda_from_history)
hda_from_library = MockHda(job=UNDEFINED_JOB)
hda_from_library.copied_from_library_dataset_dataset_association = MockHda(job=UNDEFINED_JOB)
self.history.active_datasets.append(hda_from_library)
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert len(job_dict) == 1
fake_job = next(iter(job_dict.keys()))
assert "Library" in fake_job.name
def test_fake_job_hdca(self):
hdca = MockHdca()
self.history.active_datasets.append(hdca)
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert len(job_dict) == 1
fake_job = next(iter(job_dict.keys()))
assert fake_job.id.startswith("fake_")
assert fake_job.is_fake
content_instances = next(iter(job_dict.values()))
assert content_instances == [(None, hdca)]
def test_implicit_map_job_hdca(self):
creating_job = model.Job()
hdca = MockHdca(implicit_output_name="out1", job=creating_job)
self.history.active_datasets.append(hdca)
job_dict, warnings = extract.summarize(trans=self.trans)
assert not warnings
assert len(job_dict) == 1
job = next(iter(job_dict.keys()))
assert job is creating_job
def test_warns_and_skips_datasets_if_not_finished(self):
hda = MockHda(state="queued")
self.history.active_datasets.append(hda)
job_dict, warnings = extract.summarize(trans=self.trans)
assert warnings
assert len(job_dict) == 0
class MockJobToOutputDatasetAssociation:
job = None
def __init__(self, name, dataset):
self.name = name
self.dataset = dataset
class MockHistory:
def __init__(self):
self.active_datasets = []
@property
def active_contents(self):
return self.active_datasets
class MockTrans:
def __init__(self, history):
self.history = history
def get_history(self):
return self.history
class MockHda:
def __init__(self, state="ok", output_name="out1", job=None):
self.hid = 1
self.id = 123
self.state = state
self.copied_from_history_dataset_association = None
self.copied_from_library_dataset_dataset_association = None
self.history_content_type = "dataset"
if job is not UNDEFINED_JOB:
if not job:
job = model.Job()
self.job = job
assoc = MockJobToOutputDatasetAssociation(output_name, self)
assoc.job = job
self.creating_job_associations = [assoc]
else:
self.creating_job_associations = []
class MockHdca:
def __init__(self, implicit_output_name=None, job=None, hid=1):
self.id = 124
self.copied_from_history_dataset_collection_association = None
self.history_content_type = "dataset_collection"
self.implicit_output_name = implicit_output_name
self.hid = 1
self.collection = model.DatasetCollection()
self.creating_job_associations = []
element = model.DatasetCollectionElement(
collection=self.collection,
element=model.HistoryDatasetAssociation(),
element_index=0,
element_identifier="moocow",
)
element.dataset_instance.dataset = model.Dataset()
element.dataset_instance.dataset.state = "ok"
creating = model.JobToOutputDatasetAssociation(
implicit_output_name,
element.dataset_instance,
)
creating.job = job
element.dataset_instance.creating_job_associations = [
creating,
] | null |
389 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary or list of tuples ``[(key, value)]`` (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
to add for the file.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How many seconds to wait for the server to send data
before giving up, as a float, or a :ref:`(connect timeout, read
timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
:return: :class:`Response <Response>` object
:rtype: requests.Response
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
# By using the 'with' statement we are sure the session is closed, thus we
# avoid leaving sockets open which can trigger a ResourceWarning in some
# cases, and look like a memory leak in others.
with sessions.Session() as session:
return session.request(method=method, url=url, **kwargs)
def get(url, params=None, **kwargs):
r"""Sends a GET request.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, params=params, **kwargs)
def options(url, **kwargs):
r"""Sends an OPTIONS request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def METHOD_NAME(url, **kwargs):
r"""Sends a HEAD request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
r"""Sends a POST request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
r"""Sends a PUT request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
r"""Sends a PATCH request.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary (will be form-encoded), bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
r"""Sends a DELETE request.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:return: :class:`Response <Response>` object
:rtype: requests.Response
"""
return request('delete', url, **kwargs) | null |
390 | # Copyright 2021-2023 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from warnings import warn
import unified_planning as up
from unified_planning.model.types import _UserType
from unified_planning.exceptions import UPProblemDefinitionError, UPValueError
from typing import List, Dict, Optional, cast
class UserTypesSetMixin:
"""
This class is a mixin that contains a `set` of `user types` with some related methods.
NOTE: when this mixin is used in combination with other mixins that share some
of the attributes (e.g. `has_name_method`), it is required to pass the very same
arguments to the mixins constructors.
"""
def __init__(self, env, has_name_method):
self._env = env
self._has_name_method = has_name_method
self._user_types: List["up.model.types.Type"] = []
# The field _user_types_hierarchy stores the information about the types and the list of their sons.
self._user_types_hierarchy: Dict[
Optional["up.model.types.Type"], List["up.model.types.Type"]
] = {}
def _add_user_type(self, type: "up.model.types.Type"):
"""This method adds a Type, together with all it's ancestors, to the user_types_hierarchy"""
assert type.is_user_type()
if type not in self._user_types:
ut = cast(_UserType, type)
if self._has_name_method(ut.name):
msg = f"The type name {ut.name} is already used in the problem! Different elements of a problem can have the same name if the environment flag error_used_name is disabled."
if self._env.error_used_name or any(
ut.name == cast(_UserType, t).name for t in self._user_types
):
raise UPProblemDefinitionError(msg)
else:
warn(msg)
if ut.father is not None:
self._add_user_type(ut.father)
self._user_types.append(type)
@property
def user_types(self) -> List["up.model.types.Type"]:
"""Returns the `list` of all the `user types` in the `problem`."""
return self._user_types
def user_type(self, name: str) -> "up.model.types.Type":
"""
Returns the `user type` in the `problem` with the given `name`.
:param name: The target `name` for the `type`.
:return: The `type` in the `problem` with the given `name`.
"""
for ut in self.user_types:
assert ut.is_user_type()
if cast(_UserType, ut).name == name:
return ut
raise UPValueError(f"UserType {name} is not defined!")
def METHOD_NAME(self, name: str) -> bool:
"""
Returns `True` if the `type` with the given `name` is defined in the
`problem`, `False`, otherwise.
:param name: The target `name` for the `type`.
:return: `True` if a `type` with the given `name` is in the `problem`,
`False` otherwise.
"""
for ut in self.user_types:
assert ut.is_user_type()
if cast(_UserType, ut).name == name:
return True
return False
@property
def user_types_hierarchy(
self,
) -> Dict[Optional["up.model.types.Type"], List["up.model.types.Type"]]:
"""
Returns a `Dict` where every `key` represents an `Optional Type` and the `value`
associated to the `key` is the `List` of the `direct sons` of the `Optional Type`.
All the `user types` corresponding to the 'None' key are fatherless.
"""
res: Dict[Optional["up.model.types.Type"], List["up.model.types.Type"]] = {}
for t in self._user_types:
if t not in res:
res[t] = []
f = cast(_UserType, t).father
if f not in res:
res[f] = [t]
else:
res[f].append(t)
return res
def __eq__(self, other):
return isinstance(other, UserTypesSetMixin) and set(self._user_types) == set(
other._user_types
)
def __hash__(self):
return sum(map(hash, self._user_types))
def _clone_to(self, other: "UserTypesSetMixin"):
other._user_types = self._user_types[:]
other._user_types_hierarchy = self._user_types_hierarchy.copy() | null |
391 | #!/usr/bin/env python
## @ CommonUtility.py
# Common utility script
#
# Copyright (c) 2016 - 2020, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
##
# Import Modules
#
import os
import sys
import re
import shutil
import subprocess
import struct
import hashlib
import string
from ctypes import *
from functools import reduce
from importlib.machinery import SourceFileLoader
def print_bytes (data, indent=0, offset=0, show_ascii = False):
bytes_per_line = 16
printable = ' ' + string.ascii_letters + string.digits + string.punctuation
str_fmt = '{:s}{:04x}: {:%ds} {:s}' % (bytes_per_line * 3)
bytes_per_line
data_array = bytearray(data)
for idx in range(0, len(data_array), bytes_per_line):
hex_str = ' '.join('%02X' % val for val in data_array[idx:idx + bytes_per_line])
asc_str = ''.join('%c' % (val if (chr(val) in printable) else '.')
for val in data_array[idx:idx + bytes_per_line])
print (str_fmt.format(indent * ' ', offset + idx, hex_str, ' ' + asc_str if show_ascii else ''))
def get_bits_from_bytes (bytes, start, length):
if length == 0:
return 0
byte_start = (start) // 8
byte_end = (start + length - 1) // 8
bit_start = start & 7
mask = (1 << length) - 1
val = bytes_to_value (bytes[byte_start:byte_end + 1])
val = (val >> bit_start) & mask
return val
def set_bits_to_bytes (bytes, start, length, bvalue):
if length == 0:
return
byte_start = (start) // 8
byte_end = (start + length - 1) // 8
bit_start = start & 7
mask = (1 << length) - 1
val = bytes_to_value (bytes[byte_start:byte_end + 1])
val &= ~(mask << bit_start)
val |= ((bvalue & mask) << bit_start)
bytes[byte_start:byte_end+1] = value_to_bytearray (val, byte_end + 1 - byte_start)
def value_to_bytes (value, length):
return value.to_bytes(length, 'little')
def bytes_to_value (bytes):
return int.from_bytes (bytes, 'little')
def value_to_bytearray (value, length):
return bytearray(value_to_bytes(value, length))
def value_to_bytearray (value, length):
return bytearray(value_to_bytes(value, length))
def get_aligned_value (value, alignment = 4):
if alignment != (1 << (alignment.bit_length() - 1)):
raise Exception ('Alignment (0x%x) should to be power of 2 !' % alignment)
value = (value + (alignment - 1)) & ~(alignment - 1)
return value
def get_padding_length (data_len, alignment = 4):
new_data_len = get_aligned_value (data_len, alignment)
return new_data_len - data_len
def METHOD_NAME (file, mode = 'rb'):
return open(file, mode).read()
def gen_file_from_object (file, object):
open (file, 'wb').write(object)
def gen_file_with_size (file, size):
open (file, 'wb').write(b'\xFF' * size);
def check_files_exist (base_name_list, dir = '', ext = ''):
for each in base_name_list:
if not os.path.exists (os.path.join (dir, each + ext)):
return False
return True
def load_source (name, filepath):
mod = SourceFileLoader (name, filepath).load_module()
return mod
def get_openssl_path ():
if os.name == 'nt':
if 'OPENSSL_PATH' not in os.environ:
openssl_dir = "C:\\Openssl\\bin\\"
if os.path.exists (openssl_dir):
os.environ['OPENSSL_PATH'] = openssl_dir
else:
os.environ['OPENSSL_PATH'] = "C:\\Openssl\\"
if 'OPENSSL_CONF' not in os.environ:
openssl_cfg = "C:\\Openssl\\openssl.cfg"
if os.path.exists(openssl_cfg):
os.environ['OPENSSL_CONF'] = openssl_cfg
openssl = os.path.join(os.environ.get ('OPENSSL_PATH', ''), 'openssl.exe')
else:
# Get openssl path for Linux cases
openssl = shutil.which('openssl')
return openssl
def run_process (arg_list, print_cmd = False, capture_out = False):
sys.stdout.flush()
if os.name == 'nt' and os.path.splitext(arg_list[0])[1] == '' and \
os.path.exists (arg_list[0] + '.exe'):
arg_list[0] += '.exe'
if print_cmd:
print (' '.join(arg_list))
exc = None
result = 0
output = ''
try:
if capture_out:
output = subprocess.check_output(arg_list).decode()
else:
result = subprocess.call (arg_list)
except Exception as ex:
result = 1
exc = ex
if result:
if not print_cmd:
print ('Error in running process:\n %s' % ' '.join(arg_list))
if exc is None:
sys.exit(1)
else:
raise exc
return output | null |
392 | # Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Callable, Dict, List, Tuple, Type
if TYPE_CHECKING:
from toil.batchSystems.abstractBatchSystem import AbstractBatchSystem
logger = logging.getLogger(__name__)
def aws_batch_batch_system_factory():
from toil.batchSystems.awsBatch import AWSBatchBatchSystem
return AWSBatchBatchSystem
def gridengine_batch_system_factory():
from toil.batchSystems.gridengine import GridEngineBatchSystem
return GridEngineBatchSystem
def parasol_batch_system_factory():
from toil.batchSystems.parasol import ParasolBatchSystem
return ParasolBatchSystem
def lsf_batch_system_factory():
from toil.batchSystems.lsf import LSFBatchSystem
return LSFBatchSystem
def single_machine_batch_system_factory():
from toil.batchSystems.singleMachine import SingleMachineBatchSystem
return SingleMachineBatchSystem
def mesos_batch_system_factory():
from toil.batchSystems.mesos.batchSystem import MesosBatchSystem
return MesosBatchSystem
def slurm_batch_system_factory():
from toil.batchSystems.slurm import SlurmBatchSystem
return SlurmBatchSystem
def tes_batch_system_factory():
from toil.batchSystems.tes import TESBatchSystem
return TESBatchSystem
def torque_batch_system_factory():
from toil.batchSystems.torque import TorqueBatchSystem
return TorqueBatchSystem
def htcondor_batch_system_factory():
from toil.batchSystems.htcondor import HTCondorBatchSystem
return HTCondorBatchSystem
def kubernetes_batch_system_factory():
from toil.batchSystems.kubernetes import KubernetesBatchSystem
return KubernetesBatchSystem
BATCH_SYSTEM_FACTORY_REGISTRY: Dict[str, Callable[[], Type["AbstractBatchSystem"]]] = {
'aws_batch' : aws_batch_batch_system_factory,
'parasol' : parasol_batch_system_factory,
'single_machine' : single_machine_batch_system_factory,
'grid_engine' : gridengine_batch_system_factory,
'lsf' : lsf_batch_system_factory,
'mesos' : mesos_batch_system_factory,
'slurm' : slurm_batch_system_factory,
'tes' : tes_batch_system_factory,
'torque' : torque_batch_system_factory,
'htcondor' : htcondor_batch_system_factory,
'kubernetes' : kubernetes_batch_system_factory
}
BATCH_SYSTEMS = list(BATCH_SYSTEM_FACTORY_REGISTRY.keys())
DEFAULT_BATCH_SYSTEM = 'single_machine'
def addBatchSystemFactory(key: str, batchSystemFactory: Callable[[], Type['AbstractBatchSystem']]):
"""
Adds a batch system to the registry for workflow-supplied batch systems.
"""
BATCH_SYSTEMS.append(key)
BATCH_SYSTEM_FACTORY_REGISTRY[key] = batchSystemFactory
# We need a snapshot save/restore system for testing. We can't just tamper with
# the globals because module-level globals are their own references, so we
# can't touch this module's global name bindings from a client module.
def METHOD_NAME() -> Tuple[List[str], Dict[str, Callable[[], Type['AbstractBatchSystem']]]]:
"""
Return a snapshot of the plugin registry that can be restored to remove
added plugins. Useful for testing the plugin system in-process with other
tests.
"""
snapshot = (list(BATCH_SYSTEMS), dict(BATCH_SYSTEM_FACTORY_REGISTRY))
return snapshot
def restore_batch_system_plugin_state(snapshot: Tuple[List[str], Dict[str, Callable[[], Type['AbstractBatchSystem']]]]):
"""
Restore the batch system registry state to a snapshot from
save_batch_system_plugin_state().
"""
# We need to apply the snapshot without rebinding the names, because that
# won't affect modules that imported the names.
wanted_batch_systems, wanted_registry = snapshot
BATCH_SYSTEMS.clear()
BATCH_SYSTEMS.extend(wanted_batch_systems)
BATCH_SYSTEM_FACTORY_REGISTRY.clear()
BATCH_SYSTEM_FACTORY_REGISTRY.update(wanted_registry) | null |
393 | #/*##########################################################################
# Copyright (C) 2004-2022 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
#############################################################################*/
__author__ = "V.A. Sole - ESRF"
__contact__ = "[email protected]"
__license__ = "MIT"
__copyright__ = "European Synchrotron Radiation Facility, Grenoble, France"
__doc__ = """
This plugin open a plot window with a browser to browse all images in
the stack.
A averaging filter with a configurable width is provided, to display an
average of several consecutive frames rather than a single frame.
The plot has also mask tools synchronized with the mask in the primary
window.
"""
import logging
from PyMca5 import StackPluginBase
from PyMca5.PyMcaGui.pymca import StackBrowser
from PyMca5.PyMcaGui import PyMca_Icons
_logger = logging.getLogger(__name__)
class StackBrowserPlugin(StackPluginBase.StackPluginBase):
def __init__(self, stackWindow, **kw):
if _logger.getEffectiveLevel() == logging.DEBUG:
StackPluginBase.pluginBaseLogger.setLevel(logging.DEBUG)
StackPluginBase.StackPluginBase.__init__(self, stackWindow, **kw)
self.methodDict = {'Show':[self.METHOD_NAME,
"Show Stack Image Browser",
PyMca_Icons.brushselect]}
self.__methodKeys = ['Show']
self.widget = None
def stackUpdated(self):
_logger.debug("StackBrowserPlugin.stackUpdated() called")
if self.widget is None:
return
if self.widget.isHidden():
return
stack = self.getStackDataObject()
self.widget.setStackDataObject(stack, stack_name="Stack Index")
self.widget.setBackgroundImage(self._getBackgroundImage())
mask = self.getStackSelectionMask()
self.widget.setSelectionMask(mask)
def _getBackgroundImage(self):
images, names = self.getStackROIImagesAndNames()
B = None
for key in names:
if key.endswith("ackground"):
B = images[names.index(key)]
return B
def selectionMaskUpdated(self):
if self.widget is None:
return
if self.widget.isHidden():
return
mask = self.getStackSelectionMask()
self.widget.setSelectionMask(mask)
def stackROIImageListUpdated(self):
if self.widget is None:
return
self.widget.setBackgroundImage(self._getBackgroundImage())
def mySlot(self, ddict):
_logger.debug("mySlot %s %s", ddict['event'], ddict.keys())
if ddict['event'] == "selectionMaskChanged":
self.setStackSelectionMask(ddict['current'])
elif ddict['event'] == "addImageClicked":
self.addImage(ddict['image'], ddict['title'])
elif ddict['event'] == "removeImageClicked":
self.removeImage(ddict['title'])
elif ddict['event'] == "replaceImageClicked":
self.replaceImage(ddict['image'], ddict['title'])
elif ddict['event'] == "resetSelection":
self.setStackSelectionMask(None)
#Methods implemented by the plugin
def getMethods(self):
return self.__methodKeys
def getMethodToolTip(self, name):
return self.methodDict[name][1]
def getMethodPixmap(self, name):
return self.methodDict[name][2]
def applyMethod(self, name):
return self.methodDict[name][0]()
def METHOD_NAME(self):
if self.widget is None:
self.widget = StackBrowser.StackBrowser(parent=None,
rgbwidget=None,
selection=True,
colormap=True,
imageicons=True,
standalonesave=True,
profileselection=True)
self.widget.setSelectionMode(True)
qt = StackBrowser.qt
self.widget.sigMaskImageWidgetSignal.connect(self.mySlot)
#Show
self.widget.show()
self.widget.raise_()
#update
self.stackUpdated()
MENU_TEXT = "Stack Image Browser"
def getStackPluginInstance(stackWindow, **kw):
ob = StackBrowserPlugin(stackWindow)
return ob | null |
394 | #!/usr/local/autopkg/python
# pylint: disable = invalid-name
'''
Copyright (c) 2023, dataJAR Ltd. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither data JAR Ltd nor the names of its contributors may be used to
endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY DATA JAR LTD 'AS IS' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL DATA JAR LTD BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
SUPPORT FOR THIS PROGRAM
This program is distributed 'as is' by DATA JAR LTD.
For more information or support, please utilise the following resources:
http://www.datajar.co.uk
DESCRIPTION
See docstring for FirefoxGetLocaleAndVersion class
'''
# Standard imports
import configparser
import glob
import os
import plistlib
# AutoPkg imports
# pylint: disable = import-error
from autopkglib.DmgMounter import DmgMounter
from autopkglib import ProcessorError
__all__ = ['FirefoxGetLocaleAndVersion']
__version__ = '1.0'
# pylint: disable = too-few-public-methods
class FirefoxGetLocaleAndVersion(DmgMounter):
'''
Returns the locale and version of the Firefox.app passed to dmg_path
Raising if Firefox.app not located at dmg_path.
Based off of:
https://github.com/autopkg/autopkg/blob/master/Code/autopkglib/AppDmgVersioner.py#L69-L86
'''
description = __doc__
input_variables = {
'choosen_locale': {
'required': True,
'description': ('Value of LOCALE in the override.'),
},
'dmg_path': {
'required': True,
'description': ('Path to the downloaded DMG.'),
}
}
output_variables = {
'app_locale': {
'description': ('Locale of the .app.'),
},
'app_version': {
'description': ('Version of the .app.'),
},
}
def METHOD_NAME(self, path):
'''
Find app bundle at path
'''
# Look for any .app in the mounted dmg
apps = glob.glob(os.path.join(path, "*.app"))
# Raise if no .app found
if len(apps) == 0:
raise ProcessorError("No app found in dmg")
# Return 1st found .app only
return apps[0]
def main(self):
'''
See docstring for the FirefoxGetLocaleAndVersion class
'''
# Mount the image.
mount_point = self.mount(self.env["dmg_path"])
# Wrap all other actions in a try/finally so the image is always
# unmounted.
try:
# Get the path the the .app in the DMG, raise if no .app found
app_path = self.METHOD_NAME(mount_point)
self.output(f"app_path = {app_path}")
# Get the path to locale.ini, if doesn't exist and LOCALE is en-US we're good
app_locale_ini = os.path.join(app_path, 'Contents/Resources/locale.ini')
self.output(f"Looking for {app_locale_ini}...")
# Get the .app's locale, if app_locale_ini exists
if os.path.exists(app_locale_ini):
# Progress notification
self.output(f"Found: {app_locale_ini}...")
# Try Read in the locale, raise if cannot be parsed
try:
# Create confgparser object
app_config = configparser.ConfigParser()
app_config.read(app_locale_ini)
# Setting app_locale
self.env['app_locale'] = app_config['locale']['locale']
# Raise if app_locale cannot be retrieved from app_locale_ini
except Exception as locale_parse_error:
raise ProcessorError("Cannot determine app_locale") from locale_parse_error
# en-US doesn't have a app_locale_ini, so if selected then
elif self.env["choosen_locale"] == 'en-US':
# Setting app_locale
self.env['app_locale'] = 'en-US'
self.output(f"Setting app_locale to \"en-US\", as {app_locale_ini} does "
f"not exist for the \"en-US\" locale")
# Raise if we can't find app_locale_ini and choosen_locale isn't en-US
else:
raise ProcessorError(f"Cannot find {app_locale_ini}")
# Progress notification
self.output(f"app_locale: {self.env['app_locale']}")
# Now we need to get the version
app_info_plist = os.path.join(app_path, 'Contents/Info.plist')
# If the info.plist exists
if os.path.exists(app_info_plist):
# Try to read in app_info_plist, raise if cannot be parsed
try:
# Read in the plist
with open(app_info_plist, "rb") as plist_file:
parsed_plist = plistlib.load(plist_file)
# Get version from info.plist
self.env['app_version'] = parsed_plist['CFBundleShortVersionString']
self.output(f"app_version: {self.env['app_version']}")
# Raising if plist cannot be parsed or version determined from plist
except Exception as info_plist_error:
raise ProcessorError(f"Cannot parse {app_info_plist}") from info_plist_error
# Raise if we can't find app_info_plist
else:
raise ProcessorError(f"Cannot find {app_info_plist}")
# Unmount the dmg
finally:
self.output("unmounting...")
self.unmount(self.env["dmg_path"])
if __name__ == '__main__':
PROCESSOR = FirefoxGetLocaleAndVersion() | null |
395 | import logging
import os
import subprocess
import time
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, Dict, List, Mapping, Optional
from fastapi import FastAPI, HTTPException
from meerkat.interactive.server import Server
from meerkat.tools.utils import WeakMapping
if TYPE_CHECKING:
from meerkat.interactive.modification import Modification
from meerkat.mixins.identifiable import IdentifiableMixin
logger = logging.getLogger(__name__)
@dataclass
class Secrets:
api_keys: Dict[str, str] = field(default_factory=dict)
def add(self, api: str, api_key: str):
self.api_keys[api] = api_key
def get(self, api: str):
try:
return self.api_keys[api]
except KeyError:
raise HTTPException(
status_code=404,
detail=f"No API key found for {api}.\
Add one with `secrets.add(api, api_key)`.",
)
@dataclass
class LanguageModel:
manifest: Any = None
def set(self, client: str = "ai21", engine: str = "j1-jumbo"):
from manifest import Manifest
self.manifest = Manifest(
client_name=client,
client_connection=state.secrets.get(client),
engine=engine,
cache_name="sqlite",
cache_connection="./logs",
)
def get(self):
return self.manifest
@dataclass
class APIInfo:
api: Optional[FastAPI]
port: Optional[int]
server: Optional[Server] = None
name: str = "localhost"
shared: bool = False
process: Optional[subprocess.Popen] = None
_url: Optional[str] = None
@property
def url(self):
if self._url:
return self._url
if self.shared:
return f"http://{self.name}"
return f"http://{self.name}:{self.port}"
@property
def METHOD_NAME(self):
return f"{self.url}/docs"
@property
def docs(self):
from IPython.display import IFrame
return IFrame(self.METHOD_NAME, width=800, height=600)
@dataclass
class FrontendInfo:
package_manager: Optional[str]
port: Optional[int]
name: str = "localhost"
shared: bool = False
process: Optional[subprocess.Popen] = None
_url: Optional[str] = None
@property
def url(self):
if self._url:
return self._url
if self.shared:
return f"http://{self.name}"
return f"http://{self.name}:{self.port}"
@dataclass
class Identifiables:
"""We maintain a separate group for each type of identifiable object.
Objects in the group are identified by a unique id.
"""
columns: WeakMapping = field(default_factory=WeakMapping)
dataframes: WeakMapping = field(default_factory=WeakMapping)
pages: Mapping = field(default_factory=dict)
slicebys: WeakMapping = field(default_factory=WeakMapping)
aggregations: WeakMapping = field(default_factory=WeakMapping)
box_operations: WeakMapping = field(default_factory=WeakMapping)
components: WeakMapping = field(default_factory=WeakMapping)
refs: WeakMapping = field(default_factory=WeakMapping)
stores: WeakMapping = field(default_factory=WeakMapping)
endpoints: WeakMapping = field(default_factory=WeakMapping)
routers: WeakMapping = field(default_factory=WeakMapping)
nodes: WeakMapping = field(default_factory=WeakMapping)
states: WeakMapping = field(default_factory=WeakMapping)
def add(self, obj: "IdentifiableMixin"):
group = getattr(self, obj.identifiable_group)
group[obj.id] = obj
def get(self, id: str, group: str):
group, group_name = getattr(self, group), group
try:
value = group[id]
except KeyError:
raise HTTPException(
status_code=404,
detail=f"No object in group '{group_name}' with id '{id}'",
)
return value
@dataclass
class ModificationQueue:
"""A queue of modifications to be applied to a dataframe."""
queue: List["Modification"] = field(default_factory=list)
# Boolean attribute that controls whether the queue is accepting new
# modifications
# When _ready is False, `add` will no-op
_ready: bool = False
def add(self, modification: "Modification"):
if self._ready:
logger.debug(f"Adding modification {modification} to queue.")
self.queue.append(modification)
return
# Do nothing if not ready
logger.debug(f"Modification queue not ready. Ignoring {modification}.")
def clear(self) -> List["Modification"]:
"""Clear the modification queue, and return the old queue."""
logger.debug("Clearing modification queue.")
current_queue = self.queue
self.queue = []
return current_queue
def ready(self):
"""Ready the queue for accepting new modifications."""
count = 0
while self._ready:
# Modification queue is already in use
# Wait for it to be unready
logger.debug("Modification queue is already in use. Waiting...")
time.sleep(0.1)
count += 1
if count == 1e-3:
logger.warn(
"Modification queue is taking a long time to unready."
"Check for deadlocks."
)
self._ready = True
logger.debug("Modification queue is now ready.")
def unready(self):
"""Unready the queue for accepting new modifications."""
self._ready = False
logger.debug("Modification queue is now unready.")
@dataclass
class ProgressQueue:
"""A queue of progress messages to be displayed to the user."""
queue: list = field(default_factory=list)
def add(self, message: str):
self.queue.append(message)
def clear(self) -> list:
"""Clear the progress queue, and return the old queue."""
current_queue = self.queue
self.queue = []
return current_queue
@dataclass
class GlobalState:
api_info: Optional[APIInfo] = None
frontend_info: Optional[FrontendInfo] = None
identifiables: Identifiables = field(default_factory=Identifiables)
secrets: Secrets = field(default_factory=Secrets)
llm: LanguageModel = field(default_factory=LanguageModel)
modification_queue: ModificationQueue = field(default_factory=ModificationQueue)
progress_queue: ProgressQueue = field(default_factory=ProgressQueue)
global state
state = GlobalState()
def add_secret(api: str, api_key: str):
"""Add an API key to the global state."""
state.secrets.add(api, api_key)
def run_on_startup():
"""Run on startup."""
frontend_url = os.environ.get("MEERKAT_FRONTEND_URL", None)
if frontend_url:
state.frontend_info = FrontendInfo(None, None, _url=frontend_url)
api_url = os.environ.get("MEERKAT_API_URL", None)
if api_url:
state.api_info = APIInfo(None, None, _url=api_url)
run_on_startup() | null |
396 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkidaas_doraemon.endpoint import endpoint_data
class VerifyUserAuthenticationRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'idaas-doraemon', '2021-05-20', 'VerifyUserAuthentication')
self.set_protocol_type('https')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_LogParams(self): # String
return self.get_query_params().get('LogParams')
def set_LogParams(self, LogParams): # String
self.add_query_param('LogParams', LogParams)
def METHOD_NAME(self): # String
return self.get_query_params().get('ClientExtendParamsJson')
def set_ClientExtendParamsJson(self, ClientExtendParamsJson): # String
self.add_query_param('ClientExtendParamsJson', ClientExtendParamsJson)
def get_UserId(self): # String
return self.get_query_params().get('UserId')
def set_UserId(self, UserId): # String
self.add_query_param('UserId', UserId)
def get_LogTag(self): # String
return self.get_query_params().get('LogTag')
def set_LogTag(self, LogTag): # String
self.add_query_param('LogTag', LogTag)
def get_ServerExtendParamsJson(self): # String
return self.get_query_params().get('ServerExtendParamsJson')
def set_ServerExtendParamsJson(self, ServerExtendParamsJson): # String
self.add_query_param('ServerExtendParamsJson', ServerExtendParamsJson)
def get_RequireBindHashBase64(self): # String
return self.get_query_params().get('RequireBindHashBase64')
def set_RequireBindHashBase64(self, RequireBindHashBase64): # String
self.add_query_param('RequireBindHashBase64', RequireBindHashBase64)
def get_AuthenticationContext(self): # String
return self.get_query_params().get('AuthenticationContext')
def set_AuthenticationContext(self, AuthenticationContext): # String
self.add_query_param('AuthenticationContext', AuthenticationContext)
def get_RequireChallengeBase64(self): # String
return self.get_query_params().get('RequireChallengeBase64')
def set_RequireChallengeBase64(self, RequireChallengeBase64): # String
self.add_query_param('RequireChallengeBase64', RequireChallengeBase64)
def get_AuthenticatorType(self): # String
return self.get_query_params().get('AuthenticatorType')
def set_AuthenticatorType(self, AuthenticatorType): # String
self.add_query_param('AuthenticatorType', AuthenticatorType)
def get_ClientExtendParamsJsonSign(self): # String
return self.get_query_params().get('ClientExtendParamsJsonSign')
def set_ClientExtendParamsJsonSign(self, ClientExtendParamsJsonSign): # String
self.add_query_param('ClientExtendParamsJsonSign', ClientExtendParamsJsonSign)
def get_UserSourceIp(self): # String
return self.get_query_params().get('UserSourceIp')
def set_UserSourceIp(self, UserSourceIp): # String
self.add_query_param('UserSourceIp', UserSourceIp)
def get_ApplicationExternalId(self): # String
return self.get_query_params().get('ApplicationExternalId')
def set_ApplicationExternalId(self, ApplicationExternalId): # String
self.add_query_param('ApplicationExternalId', ApplicationExternalId) | null |
397 | # This script compares an on-going release notes file with published release notes files.
# If the ongoing release notes file has a duplicate note with the published one, the script reports the note and replaces it with the published one.
import re, os
from tempfile import mkstemp
from shutil import move
from os import remove
# 获取已发布的 release notes Issue 号和 PR 号
def METHOD_NAME(ext_path,main_path):
exst_notes = []
exst_issue_nums = []
exst_note_levels = []
for maindir, subdir, files in os.walk(ext_path):
for afile in files:
file_path = (os.path.join(maindir, afile))
if file_path.endswith('.md') and not os.path.samefile(file_path,main_path):
with open(file_path,'r', encoding='utf-8') as fp:
level1 = level2 = level3 = ""
for line in fp:
exst_issue_num = re.search(r'https://github.com/(pingcap|tikv)/\w+/(issues|pull)/\d+', line)
if exst_issue_num:
if exst_issue_num.group() not in exst_issue_nums:
note_level = level1 + level2 + level3
note_pair = [exst_issue_num.group(),line,afile, note_level]
exst_issue_nums.append(exst_issue_num.group())
exst_notes.append(note_pair)
else:
continue
elif line.startswith("##"):
level1 = "> " + line.replace("##","").strip()
level2 = level3 = ""
elif line.startswith ("+") or line.startswith ("-"):
level2 = "> " + line.replace("+","").replace("-","").strip()
level3 = ""
elif line.startswith (" +") or line.startswith (" -"):
level3 = "> " + line.replace(" +","").replace(" -","").strip()
else:
continue
else:
pass
if len(exst_issue_nums) != 0:
return exst_notes
else:
return 0
# 检查当前准备中的 release notes 的 Issue 号和 PR 号是否有重复,如果有就进行替换
def check_exst_rn(note_pairs, main_path):
DupNum = 0
NoteNum = 0
target_file_path = mkstemp()[1]
source_file_path = main_path
with open(target_file_path, 'w', encoding='utf-8') as target_file:
with open(source_file_path, 'r', encoding='utf-8') as source_file:
LineNum = 0
for line in source_file:
LineNum += 1
issue_num = re.search('https://github.com/(pingcap|tikv)/\w+/(issues|pull)/\d+', line)
if issue_num:
NoteNum +=1
for note_pair in note_pairs:
if issue_num.group() == note_pair[0] and not line.strip().startswith("(dup"):
print('A duplicated note is found in line ' + str(LineNum) + " from " + note_pair[2] + note_pair[1])
match = re.fullmatch(r'(\s*)(?:- .+?)( @.+?)?\s*', line)
if match:
line = '{}(dup: {} {}){}{}\n'.format(match.group(1), note_pair[2], note_pair[3], note_pair[1].strip(), match.group(2) or "")
print('The duplicated note is replaced with ' + line)
DupNum += 1
else:
continue
break
target_file.write(line)
remove(source_file_path)
move(target_file_path, source_file_path)
DupRate = "%.0f%%" % (DupNum/NoteNum*100) #计算 release notes 重复率
print (str(DupNum) + " duplicated notes are found in " + str(NoteNum) + " notes. The duplicated rate is " + str(DupRate) + ".")
if __name__ == "__main__":
ext_path = r'/Users/aaa/Documents/GitHub/githubid/docs/releases' # 已发布的 release notes 文件夹
main_path = r'/Users/aaa/Documents/GitHub/githubid/docs/releases/release-5.3.1.md' # 当前正在准备的release notes 文档路径
note_pairs = METHOD_NAME(ext_path,main_path)
check_exst_rn(note_pairs, main_path) | null |
398 | # -*- coding: utf-8 -*-
import re
import hmac
import hashlib
import logging
from django.apps import apps
from nameparser import HumanName
from werkzeug.utils import cached_property
from framework.flask import request
from website import settings
from website.conferences.exceptions import ConferenceError
logger = logging.getLogger(__name__)
SSCORE_MAX_VALUE = 5
DKIM_PASS_VALUES = ['Pass']
SPF_PASS_VALUES = ['Pass', 'Neutral']
ANGLE_BRACKETS_REGEX = re.compile(r'<(.*?)>')
BASE_REGEX = r"""
(?P<test>(test|stage)(\d*)-)?
(?P<meeting>\w*?)
-
(?P<category>{allowed_types})
@osf\.io
"""
class ConferenceMessage(object):
def __init__(self):
self.request = request._get_current_object()
def verify(self):
self.verify_signature()
_ = [self.sender_email, self.route] # noqa
def verify_signature(self):
"""Verify that request comes from Mailgun. Based on sample code from
http://documentation.mailgun.com/user_manual.html#webhooks
"""
signature = hmac.new(
key=settings.MAILGUN_API_KEY.encode(),
msg='{}{}'.format(
self.form['timestamp'],
self.form['token'],
).encode(),
digestmod=hashlib.sha256,
).hexdigest()
if signature != self.form['signature']:
raise ConferenceError('Invalid headers on incoming mail')
@cached_property
def is_spam(self):
"""Check SSCORE, DKIM, and SPF headers for spam.
See http://documentation.mailgun.com/user_manual.html#spam-filter for
details.
:return: At least one header indicates spam
"""
try:
# Mailgun only inserts score headers for messages checked for spam.
sscore_header = float(self.form.get('X-Mailgun-Sscore', 0))
except (TypeError, ValueError):
return True
dkim_header = self.form.get('X-Mailgun-Dkim-Check-Result')
spf_header = self.form.get('X-Mailgun-Spf')
return (
(sscore_header and sscore_header > SSCORE_MAX_VALUE) or
(dkim_header and dkim_header not in DKIM_PASS_VALUES) or
(spf_header and spf_header not in SPF_PASS_VALUES)
)
@cached_property
def form(self):
return self.request.form
@cached_property
def raw(self):
return {
'headers': dict(self.request.headers),
'form': self.request.form.to_dict(),
'args': self.request.args.to_dict(),
}
@cached_property
def subject(self):
subject = self.form['subject']
subject = re.sub(r'^re:', '', subject, flags=re.I)
subject = re.sub(r'^fwd:', '', subject, flags=re.I)
return subject.strip()
@cached_property
def METHOD_NAME(self):
return self.form['recipient']
@cached_property
def text(self):
# Not included if there is no message body
# https://documentation.mailgun.com/user_manual.html#routes
return self.form.get('stripped-text', '')
@cached_property
def sender(self):
return self.form['from']
@cached_property
def sender_name(self):
if '<' in self.sender:
# sender format: "some name" <[email protected]>
name = ANGLE_BRACKETS_REGEX.sub('', self.sender)
name = name.strip().replace('"', '')
else:
# sender format: [email protected]
name = self.sender
return str(HumanName(name))
@cached_property
def sender_email(self):
match = ANGLE_BRACKETS_REGEX.search(self.sender)
if match:
# sender format: "some name" <[email protected]>
return match.groups()[0].lower().strip()
elif '@' in self.sender:
# sender format: [email protected]
return self.sender.lower().strip()
raise ConferenceError('Could not extract sender email')
@cached_property
def sender_display(self):
return self.sender_name or self.sender_email.split('@')[0]
@cached_property
def route(self):
match = re.search(re.compile(BASE_REGEX.format(allowed_types=(self.allowed_types or 'poster|talk')), re.IGNORECASE | re.VERBOSE), self.form['recipient'])
if not match:
raise ConferenceError('Invalid recipient: '.format(self.form['recipient']))
data = match.groupdict()
if bool(settings.DEV_MODE) != bool(data['test']):
# NOTE: test.osf.io has DEV_MODE = False
if not data['test'] or (data['test'] and data['test'].rstrip('-') != 'test'):
raise ConferenceError(
'Mismatch between `DEV_MODE` and recipient {0}'.format(
self.form['recipient']
)
)
return data
@cached_property
def conference_name(self):
return self.route['meeting']
@cached_property
def conference_category(self):
return self.route['category']
@cached_property
def attachments(self):
count = self.form.get('attachment-count', 0)
try:
count = int(count)
except (TypeError, ValueError):
count = 0
return list(filter(
lambda value: value is not None,
list(map(
lambda idx: self.request.files.get('attachment-{0}'.format(idx + 1)),
list(range(count)),
)),
))
@property
def allowed_types(self):
Conference = apps.get_model('osf.Conference')
allowed_types = []
for field_names in Conference.objects.values_list('field_names', flat=True):
allowed_types.extend([field_names['submission1'], field_names['submission2']])
regex_types_allowed = '|'.join(set(allowed_types))
return regex_types_allowed | null |
399 | """Unit Test for otx.algorithms.action.adapters.mmaction.utils.config_utils."""
# Copyright (C) 2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
from typing import Any
import torch
from mmaction.models import Recognizer3D
from mmcv.runner import BaseModule
from mmcv.utils import Config
from torch import nn
from otx.algorithms.action.adapters.mmaction.models.detectors.fast_rcnn import (
AVAFastRCNN,
)
from otx.algorithms.action.adapters.mmaction.utils.export_utils import (
Exporter,
_convert_sync_batch_to_normal_batch,
)
from tests.test_suite.e2e_test_system import e2e_pytest_unit
class MockRecognizer3D(Recognizer3D, BaseModule):
"""Mock class for Recognizer3D."""
def __init__(self) -> None:
super(BaseModule, self).__init__()
def forward(self, inputs: Any) -> str:
return "Forward function is replaced!"
def load_state_dict(self, weights) -> Recognizer3D:
pass
class MockAVAFastRCNN(AVAFastRCNN):
"""Mock class for AVAFastRCNN."""
def __init__(self) -> None:
super(BaseModule, self).__init__()
self.deploy_cfg = None
def METHOD_NAME(self) -> None:
pass
def forward_infer(self, inputs: Any, img_metas: Any) -> str:
return "Forward function is replaced!"
def load_state_dict(self, weights) -> AVAFastRCNN:
pass
def _mock_sync_batchnorm(inputs):
"""Mock function for _sync_batch_to_normal_batch function.
It returns its inputs
"""
return inputs
@e2e_pytest_unit
def test_convert_sync_batch_to_normal_batch() -> None:
"""Test _convert_sync_batch_to_normal_batch function.
<Steps>
1. Create sample module, which has some Conv3D, SyncBatchNorm, BatchNorm3d ops
2. Run _convert_sync_batch_to_normal_batch function to sample module
3. Check SyncBatchNorm is changed into BatchNorm3d
4. Check the other ops don't affect by this function
"""
sample_module = nn.Sequential(
nn.Conv3d(100, 100, 3), nn.SyncBatchNorm(100), nn.Conv3d(100, 100, 3), nn.BatchNorm3d(100)
)
output_module = _convert_sync_batch_to_normal_batch(sample_module)
assert isinstance(output_module[0], nn.Conv3d)
assert isinstance(output_module[1], nn.BatchNorm3d)
assert isinstance(output_module[2], nn.Conv3d)
assert isinstance(output_module[3], nn.BatchNorm3d)
class MockTaskProcessor:
"""Mock class of task_processor."""
def __init__(self, model_cfg, deploy_cfg, device):
self.model_cfg = model_cfg
def init_pytorch_model(self, weights):
if self.model_cfg.model == "cls":
return MockRecognizer3D()
return MockAVAFastRCNN()
def mock_build_task_processor(model_cfg, deploy_cfg, device):
return MockTaskProcessor(model_cfg, deploy_cfg, device)
class TestExporter:
"""Test class for Exporter."""
@e2e_pytest_unit
def test_init(self, mocker) -> None:
"""Test __init__ function.
<Steps>
1. Create mock task_processor
2. Create mock Recognizer3D using task_processor
3. Get inputs
4. Create mock AVAFastRCNN using task_processor
5. Get inputs
6. Check mo options when half precision
"""
mocker.patch(
"otx.algorithms.action.adapters.mmaction.utils.export_utils.build_task_processor",
side_effect=mock_build_task_processor,
)
recipe_cfg = Config(dict(model="cls"))
deploy_cfg = Config(
dict(
backend_config=dict(
type="openvino",
mo_options={},
model_inputs=[dict(opt_shapes=dict(input=[1, 1, 3, 32, 224, 224]))],
)
)
)
exporter = Exporter(recipe_cfg, None, deploy_cfg, "./tmp_dir/openvino", False, False)
assert isinstance(exporter.model, Recognizer3D)
assert exporter.input_tensor.shape == torch.Size([1, 1, 3, 32, 224, 224])
assert exporter.input_metas is None
recipe_cfg = Config(dict(model="det"))
deploy_cfg = Config(
dict(
backend_config=dict(
type="openvino",
mo_options={},
model_inputs=[dict(opt_shapes=dict(input=[1, 3, 32, 224, 224]))],
)
)
)
exporter = Exporter(recipe_cfg, None, deploy_cfg, "./tmp_dir/openvino", False, False)
assert isinstance(exporter.model, AVAFastRCNN)
assert exporter.input_tensor.shape == torch.Size([1, 3, 32, 224, 224])
assert exporter.input_metas is not None
exporter = Exporter(recipe_cfg, None, deploy_cfg, "./tmp_dir/openvino", True, False)
assert exporter.deploy_cfg.backend_config.mo_options["flags"] == ["--compress_to_fp16"]
@e2e_pytest_unit
def test_export(self, mocker) -> None:
"""Test export function."""
mocker.patch("otx.algorithms.action.adapters.mmaction.utils.export_utils.export", return_value=True)
mocker.patch("otx.algorithms.action.adapters.mmaction.utils.export_utils.from_onnx", return_value=True)
mocker.patch(
"otx.algorithms.action.adapters.mmaction.utils.export_utils.build_task_processor",
side_effect=mock_build_task_processor,
)
recipe_cfg = Config(dict(model="cls"))
deploy_cfg = Config(
dict(
backend_config=dict(
type="openvino",
mo_options={},
model_inputs=[dict(opt_shapes=dict(input=[1, 1, 3, 32, 224, 224]))],
),
ir_config=dict(input_names=["input"], output_names=["output"]),
)
)
exporter = Exporter(recipe_cfg, None, deploy_cfg, "./tmp_dir/openvino", False, False)
exporter.export() | null |