id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
300 | export sources | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.microsoft import is_msvc_static_runtime, is_msvc
from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rmdir, rm
from conan.tools.build import check_min_cppstd
from conan.tools.scm import Version
from conan.tools.cmake import CMake, CMakeDeps, CMakeToolchain, cmake_layout
import os
required_conan_version = ">=1.53.0"
class FTXUIConan(ConanFile):
name = "ftxui"
description = "C++ Functional Terminal User Interface."
license = "MIT"
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://github.com/ArthurSonzogni/FTXUI"
topics = ("ncurses", "terminal", "screen", "tui")
package_type = "library"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
@property
def _min_cppstd(self):
return 17
@property
def _compilers_minimum_version(self):
return {
"gcc": "8",
"clang": "7",
"apple-clang": "12",
"Visual Studio": "16",
"msvc": "192",
}
def METHOD_NAME(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
cmake_layout(self, src_folder="src")
def validate(self):
if self.settings.compiler.cppstd:
check_min_cppstd(self, self._min_cppstd)
minimum_version = self._compilers_minimum_version.get(str(self.settings.compiler), False)
if minimum_version and Version(self.settings.compiler.version) < minimum_version:
raise ConanInvalidConfiguration(
f"{self.ref} requires C++{self._min_cppstd}, which your compiler does not support."
)
if is_msvc(self) and self.options.shared and is_msvc_static_runtime(self):
raise ConanInvalidConfiguration("shared with static runtime not supported")
def source(self):
get(self, **self.conan_data["sources"][self.version], strip_root=True)
def generate(self):
# BUILD_SHARED_LIBS and POSITION_INDEPENDENT_CODE are automatically parsed when self.options.shared or self.options.fPIC exist
tc = CMakeToolchain(self)
tc.variables["CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS"] = True
tc.variables["FTXUI_BUILD_DOCS"] = False
tc.variables["FTXUI_BUILD_EXAMPLES"] = False
tc.generate()
tc = CMakeDeps(self)
tc.generate()
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
copy(self, pattern="LICENSE", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
if Version(self.version) >= "4.1.0":
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
if Version(self.version) >= "4.1.1":
rm(self, "ftxui.pc", os.path.join(self.package_folder, "lib"), )
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "ftxui")
if Version(self.version) >= "4.1.0":
self.cpp_info.set_property("pkg_config_name", "ftxui")
self.cpp_info.components["ftxui-dom"].set_property("cmake_target_name", "ftxui::dom")
self.cpp_info.components["ftxui-dom"].libs = ["ftxui-dom"]
self.cpp_info.components["ftxui-dom"].requires = ["ftxui-screen"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["ftxui-dom"].system_libs.append("m")
self.cpp_info.components["ftxui-screen"].set_property("cmake_target_name", "ftxui::screen")
self.cpp_info.components["ftxui-screen"].libs = ["ftxui-screen"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["ftxui-screen"].system_libs.append("m")
self.cpp_info.components["ftxui-component"].set_property("cmake_target_name", "ftxui::component")
self.cpp_info.components["ftxui-component"].libs = ["ftxui-component"]
self.cpp_info.components["ftxui-component"].requires = ["ftxui-dom"]
if self.settings.os in ["Linux", "FreeBSD"]:
self.cpp_info.components["ftxui-component"].system_libs.append("pthread")
# TODO: to remove in conan v2 once cmake_find_package* generators removed
self.cpp_info.components["ftxui-dom"].names["cmake_find_package"] = "dom"
self.cpp_info.components["ftxui-dom"].names["cmake_find_package_multi"] = "dom"
self.cpp_info.components["ftxui-screen"].names["cmake_find_package"] = "screen"
self.cpp_info.components["ftxui-screen"].names["cmake_find_package_multi"] = "screen"
self.cpp_info.components["ftxui-component"].names["cmake_find_package"] = "component"
self.cpp_info.components["ftxui-component"].names["cmake_find_package_multi"] = "component" |
301 | mma schedule | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-outside-toplevel, unused-variable
"""Common utility functions in TVM tir"""
def METHOD_NAME(
workload,
k_inner,
in_dtype,
b_transposed,
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
ldmatrix_a_intrin,
ldmatrix_b_intrin,
mma_intrin,
mma_fill_intrin,
mma_store_intrin,
shared_scope="shared",
):
"""Create a tensorized schedule for GEMM with MMA intrinsics."""
import tvm # pylint: disable=import-outside-toplevel
ir_module = tvm.IRModule({"main": workload})
sch = tvm.tir.Schedule(ir_module)
block = sch.get_block("C")
i, j, k = sch.get_loops(block)
i, i_tc = sch.split(i, factors=[None, 16])
j, j_tc = sch.split(j, factors=[None, 16])
k, k_tc = sch.split(k, factors=[None, k_inner])
sch.reorder(i, j, k, i_tc, j_tc, k_tc)
block_inner = sch.blockize(i_tc)
block_outer, block_inner = block_inner, block
num_ty = i_factors[2] * j_factors[2]
i0, i1, i2, i3, i4 = sch.split(i, factors=i_factors)
j0, j1, j2, j3, j4 = sch.split(j, factors=j_factors)
k0, k1, k2 = sch.split(k, k_factors)
sch.reorder(i0, j0, i1, j1, j2, i2, k0, k1, i3, j3, k2, i4, j4)
block_idx = sch.fuse(i0, j0)
block_idy = sch.fuse(i1, j1)
thread_idy = sch.fuse(j2, i2)
sch.bind(block_idx, "blockIdx.x")
sch.bind(block_idy, "blockIdx.y")
sch.bind(thread_idy, "threadIdx.y")
def fetch_to_shared(block, idx, ndim):
block_read = sch.cache_read(block, idx, shared_scope)
sch.compute_at(block_read, k0)
vector_size = 16 if in_dtype == "int8" else 8
warp_size = 32
fused = sch.fuse(*sch.get_loops(block_read)[-ndim:])
_, f_1, f_2, f_3 = sch.split(fused, factors=[None, num_ty, warp_size, vector_size])
sch.bind(f_2, "threadIdx.x")
sch.bind(f_1, "threadIdx.y")
sch.vectorize(f_3)
offset = 8 if in_dtype == "float16" else 16
sch.storage_align(block_read, 0, axis=-2, factor=32, offset=offset)
return block_read
fetch_to_shared(block_outer, 0, 2)
fetch_to_shared(block_outer, 1, 2)
A_warp = sch.cache_read(block_outer, 0, "warp")
B_warp = sch.cache_read(block_outer, 1, "warp")
sch.compute_at(A_warp, k1)
sch.compute_at(B_warp, k1)
C_warp = sch.cache_write(block_outer, 0, "warp")
sch.reverse_compute_at(C_warp, thread_idy)
ii, jj = sch.get_loops(C_warp)[-2:]
io, ii = sch.split(ii, factors=[None, 16])
jo, ji = sch.split(jj, factors=[None, 16])
sch.reorder(io, jo, ii, ji)
sch.decompose_reduction(block_outer, sch.get_loops(block_outer)[3])
block_init_c = sch.get_block("C_init")
def tile_wmma_fragment(block_read, height, width):
i, j = sch.get_loops(block_read)[-2:]
i0, i1 = sch.split(i, factors=[None, height])
j0, j1 = sch.split(j, factors=[None, width])
sch.reorder(i0, j0, i1, j1)
return i1
loop_a = tile_wmma_fragment(A_warp, 16, k_inner)
if b_transposed:
loop_b = tile_wmma_fragment(B_warp, 16, k_inner)
else:
loop_b = tile_wmma_fragment(B_warp, k_inner, 16)
sch.transform_layout(A_warp, ("write", 0), index_map_A)
sch.transform_layout(B_warp, ("write", 0), index_map_B)
sch.transform_layout(C_warp, ("read", 0), index_map_C)
sch.tensorize(loop_a, ldmatrix_a_intrin)
sch.tensorize(loop_b, ldmatrix_b_intrin)
sch.tensorize(sch.get_loops(block_inner)[-3], mma_intrin)
sch.tensorize(sch.get_loops(block_init_c)[-2], mma_fill_intrin)
sch.tensorize(sch.get_loops(C_warp)[-2], mma_store_intrin)
return sch
def mfma_schedule(
workload,
k_inner,
in_dtype,
b_transposed,
i_factors,
j_factors,
k_factors,
index_map_A,
index_map_B,
index_map_C,
ldmatrix_a_intrin,
ldmatrix_b_intrin,
mfma_intrin,
mfma_fill_intrin,
mfma_store_intrin,
shared_scope="shared",
):
"""Create a tensorized schedule for GEMM with MFMA intrinsics."""
import tvm
ir_module = tvm.IRModule({"main": workload})
sch = tvm.tir.Schedule(ir_module)
wmma_m = 16
wmma_n = 16
wmma_k = k_inner
warp_size = 64
block = sch.get_block("C")
i, j, k = sch.get_loops(block)
i, i_tc = sch.split(i, factors=[None, wmma_m])
j, j_tc = sch.split(j, factors=[None, wmma_n])
k, k_tc = sch.split(k, factors=[None, wmma_k])
sch.reorder(i, j, k, i_tc, j_tc, k_tc)
block_inner = sch.blockize(i_tc)
block_outer, block_inner = block_inner, block
num_ty = i_factors[2] * j_factors[2]
i0, i1, i2, i3, i4 = sch.split(i, factors=i_factors)
j0, j1, j2, j3, j4 = sch.split(j, factors=j_factors)
k0, k1, k2 = sch.split(k, k_factors)
sch.reorder(i0, j0, i1, j1, j2, i2, k0, k1, i3, j3, k2, i4, j4)
block_idx = sch.fuse(i0, j0)
block_idy = sch.fuse(i1, j1)
thread_idy = sch.fuse(j2, i2)
sch.bind(block_idx, "blockIdx.x")
sch.bind(block_idy, "blockIdx.y")
sch.bind(thread_idy, "threadIdx.y")
def fetch_to_shared(block, idx, ndim):
block_read = sch.cache_read(block, idx, shared_scope)
sch.compute_at(block_read, k0)
vector_size = 16 if in_dtype == "int8" else 8
fused = sch.fuse(*sch.get_loops(block_read)[-ndim:])
_, f_1, f_2, f_3 = sch.split(fused, factors=[None, num_ty, warp_size, vector_size])
sch.bind(f_2, "threadIdx.x")
sch.bind(f_1, "threadIdx.y")
sch.vectorize(f_3)
return block_read
fetch_to_shared(block_outer, 0, 2)
fetch_to_shared(block_outer, 1, 2)
A_warp = sch.cache_read(block_outer, 0, "warp")
B_warp = sch.cache_read(block_outer, 1, "warp")
sch.compute_at(A_warp, k1)
sch.compute_at(B_warp, k1)
C_warp = sch.cache_write(block_outer, 0, "warp")
sch.reverse_compute_at(C_warp, thread_idy)
ii, jj = sch.get_loops(C_warp)[-2:]
io, ii = sch.split(ii, factors=[None, 16])
jo, ji = sch.split(jj, factors=[None, 16])
sch.reorder(io, jo, ii, ji)
sch.decompose_reduction(block_outer, sch.get_loops(block_outer)[3])
block_init_c = sch.get_block("C_init")
def tile_wmma_fragment(block_read, height, width):
i, j = sch.get_loops(block_read)[-2:]
i0, i1 = sch.split(i, factors=[None, height])
j0, j1 = sch.split(j, factors=[None, width])
sch.reorder(i0, j0, i1, j1)
return i1
loop_a = tile_wmma_fragment(A_warp, 16, k_inner)
if b_transposed:
loop_b = tile_wmma_fragment(B_warp, 16, k_inner)
else:
loop_b = tile_wmma_fragment(B_warp, k_inner, 16)
sch.transform_layout(A_warp, ("write", 0), index_map_A)
sch.transform_layout(B_warp, ("write", 0), index_map_B)
sch.transform_layout(C_warp, ("read", 0), index_map_C)
sch.tensorize(loop_a, ldmatrix_a_intrin)
sch.tensorize(loop_b, ldmatrix_b_intrin)
sch.tensorize(sch.get_loops(block_inner)[-3], mfma_intrin)
sch.tensorize(sch.get_loops(block_init_c)[-2], mfma_fill_intrin)
sch.tensorize(sch.get_loops(C_warp)[-2], mfma_store_intrin)
return sch |
302 | test pgbouncer certgenerator with custom registry secret | import jmespath
import pytest
from tests.chart_tests.helm_template_generator import render_chart
from .. import supported_k8s_versions
expected_rbac = {
"apiGroups": [""],
"resources": ["secrets"],
"verbs": ["get", "watch", "list", "create", "patch", "delete"],
}
@pytest.mark.parametrize("kube_version", supported_k8s_versions)
class TestPgbouncersslFeature:
def test_pgbouncer_certgenerator_defaults(self, kube_version):
"""Test pgbouncer cert generator defaults."""
docs = render_chart(
kube_version=kube_version,
values={},
show_only="templates/generate-ssl.yaml",
)
assert len(docs) == 0
def test_pgbouncer_certgenerator_with_sslmode_enabled(self, kube_version):
"""Test pgbouncer certgenerator sslmode opts result."""
docs = render_chart(
kube_version=kube_version,
values={"airflow": {"pgbouncer": {"enabled": True, "sslmode": "require"}}},
show_only="templates/generate-ssl.yaml",
)
assert len(docs) == 4
assert "ServiceAccount" == jmespath.search("kind", docs[0])
assert "Role" == jmespath.search("kind", docs[1])
assert expected_rbac in docs[1]["rules"]
assert "RoleBinding" == jmespath.search("kind", docs[2])
assert docs[3]["spec"]["template"]["spec"]["affinity"] == {}
def METHOD_NAME(self, kube_version):
"""Test pgbouncer certgenerator sslmode opts result."""
docs = render_chart(
kube_version=kube_version,
values={
"airflow": {
"registry": {"secretName": "gscsecret"},
"pgbouncer": {"enabled": True, "sslmode": "require"},
}
},
show_only="templates/generate-ssl.yaml",
)
assert len(docs) == 4
assert [{"name": "gscsecret"}] == docs[3]["spec"]["template"]["spec"][
"imagePullSecrets"
]
def test_pgbouncer_certgenerator_pgbouncerssl_extraannotations(self, kube_version):
"""Test that certgenerator.extraAnnotations correctly inserts the annotations."""
extraAnnotations = {"test": "test"}
docs = render_chart(
kube_version=kube_version,
values={
"airflow": {
"pgbouncer": {
"enabled": True,
"sslmode": "require",
}
},
"certgenerator": {
"extraAnnotations": extraAnnotations,
},
},
show_only="templates/generate-ssl.yaml",
)
assert len(docs) == 4
assert (
docs[3]["spec"]["template"]["metadata"]["annotations"] == extraAnnotations
)
def test_pgbouncer_certgenerator_pgbouncerssl_affinity(self, kube_version):
"""Test that certgenerator.affinity correctly inserts the affinity."""
affinity = {
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [
{
"matchExpressions": [
{
"key": "foo",
"operator": "In",
"values": ["bar", "baz"],
}
]
}
]
}
}
}
docs = render_chart(
kube_version=kube_version,
values={
"airflow": {
"pgbouncer": {
"enabled": True,
"sslmode": "require",
}
},
"certgenerator": {
"affinity": affinity,
},
},
show_only="templates/generate-ssl.yaml",
)
assert len(docs) == 4
assert docs[3]["spec"]["template"]["spec"]["affinity"] == affinity |
303 | test parameter import lines | import os
import unittest
from checkov.cloudformation.context_parser import ContextParser
from checkov.cloudformation.runner import Runner
from checkov.runner_filter import RunnerFilter
from checkov.cloudformation.parser import parse
class TestCfnYaml(unittest.TestCase):
def test_skip_parsing(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files = f'{current_dir}/skip.yaml'
report = Runner().run(None, files=[test_files], runner_filter=RunnerFilter())
summary = report.get_summary()
self.assertEqual(summary['passed'], 1)
self.assertEqual(summary['failed'], 3)
self.assertEqual(summary['skipped'], 1)
self.assertEqual(summary['parsing_errors'], 0)
def test_code_line_extraction(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
# the test data that we'll evaluate against
# line ranges are 1-based
# mapping is file name, to resource index, to resource details
# checking the resource index helps make sure that we are testing what we think we are testing
files = [f'{current_dir}/cfn_newline_at_end.yaml', f'{current_dir}/cfn_nonewline_at_end.yaml']
resource_properties_mapping = {
files[0]: {
0: {
'name': 'MyDB',
'line_range': [2, 9]
},
1: {
'name': 'MyBucket',
'line_range': [10, 13]
}
},
files[1]: {
0: {
'name': 'MyDB',
'line_range': [2, 9]
},
1: {
'name': 'MyBucket',
'line_range': [11, 14]
}
}
}
for file in files:
cfn_dict, cfn_str = parse(file)
cf_context_parser = ContextParser(file, cfn_dict, cfn_str)
for index, (resource_name, resource) in enumerate(cfn_dict['Resources'].items()):
# this filters out __startline__ and __endline__ markers
resource_id = cf_context_parser.extract_cf_resource_id(resource, resource_name)
if resource_id:
# make sure we are checking the right resource
self.assertEqual(resource_name, resource_properties_mapping[file][index]['name'])
entity_lines_range, entity_code_lines = cf_context_parser.extract_cf_resource_code_lines(resource)
self.assertEqual(entity_lines_range[0], entity_code_lines[0][0])
self.assertEqual(entity_lines_range[1], entity_code_lines[-1][0])
self.assertEqual(entity_lines_range, resource_properties_mapping[file][index]['line_range'])
def test_trim_lines(self):
# trim from front
test1 = [
(0, '\n'),
(1, ''),
(2, ' here is text'),
(3, 'more text')
]
self.assertEqual(ContextParser.trim_lines(test1), test1[2:4])
# trim from back
test2 = [
(0, ' here is text'),
(1, 'more text'),
(2, '\n'),
(3, ''),
]
self.assertEqual(ContextParser.trim_lines(test2), test2[0:2])
# trim from both
test3 = [
(0, '\n'),
(1, ''),
(2, ' here is text'),
(3, 'more text'),
(4, '\n'),
(5, ''),
]
self.assertEqual(ContextParser.trim_lines(test3), test3[2:4])
# trim nothing
test4 = [
(2, ' here is text'),
(3, 'more text'),
]
self.assertEqual(ContextParser.trim_lines(test4), test4)
# trim everything
test5 = [
(2, ''),
(3, '\n'),
]
self.assertEqual(ContextParser.trim_lines(test5), [])
def METHOD_NAME(self):
# check that when a parameter is imported into a resource, the line numbers of the resource are preserved
current_dir = os.path.dirname(os.path.realpath(__file__))
file = f'{current_dir}/cfn_with_ref.yaml'
definitions, definitions_raw = parse(file)
cf_context_parser = ContextParser(file, definitions, definitions_raw)
resource = definitions['Resources']['ElasticsearchDomain']
entity_lines_range, entity_code_lines = cf_context_parser.extract_cf_resource_code_lines(resource)
self.assertEqual(entity_lines_range[0], 10)
self.assertEqual(entity_lines_range[1], 20)
def test_parsing_error(self):
current_dir = os.path.dirname(os.path.realpath(__file__))
test_files = ["cfn_bad_name.yaml", "cfn_with_ref_bad.yaml", "cfn_bad_iam.yaml"]
report = Runner().run(None, files=[f'{current_dir}/{f}' for f in test_files], runner_filter=RunnerFilter())
summary = report.get_summary()
self.assertEqual(summary['passed'], 6)
self.assertEqual(summary['failed'], 0)
self.assertEqual(summary['skipped'], 0)
self.assertEqual(summary['parsing_errors'], 2)
if __name__ == '__main__':
unittest.main() |
304 | test transformed identify | # This file is part of the MapProxy project.
# Copyright (C) 2010 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from io import BytesIO
import pytest
from mapproxy.request.wms import WMS111FeatureInfoRequest
from mapproxy.test.image import is_png, create_tmp_image
from mapproxy.test.http import mock_httpd
from mapproxy.test.system import SysTest
@pytest.fixture(scope="module")
def config_file():
return "arcgis.yaml"
transp = create_tmp_image((512, 512), mode="RGBA", color=(0, 0, 0, 0))
class TestArcgisSource(SysTest):
def setup(self):
self.common_fi_req = WMS111FeatureInfoRequest(
url="/service?",
param=dict(
x="10",
y="20",
width="200",
height="200",
layers="app2_with_layers_fi_layer",
format="image/png",
query_layers="app2_with_layers_fi_layer",
styles="",
bbox="1000,400,2000,1400",
srs="EPSG:3857",
info_format="application/json",
),
)
def test_get_tile(self, app):
expected_req = [
(
{
"path": "/arcgis/rest/services/ExampleLayer/ImageServer/exportImage?f=image&format=png&imageSR=900913&bboxSR=900913&bbox=-20037508.342789244,-20037508.342789244,20037508.342789244,20037508.342789244&size=512,512"
},
{"body": transp, "headers": {"content-type": "image/png"}},
)
]
with mock_httpd(
("localhost", 42423), expected_req, bbox_aware_query_comparator=True
):
resp = app.get("/tms/1.0.0/app2_layer/0/0/1.png")
assert resp.content_type == "image/png"
assert resp.content_length == len(resp.body)
data = BytesIO(resp.body)
assert is_png(data)
def test_get_tile_with_layer(self, app):
expected_req = [
(
{
"path": "/arcgis/rest/services/ExampleLayer/MapServer/export?f=image&format=png&layers=show:0,1&imageSR=900913&bboxSR=900913&bbox=-20037508.342789244,-20037508.342789244,20037508.342789244,20037508.342789244&size=512,512"
},
{"body": transp, "headers": {"content-type": "image/png"}},
)
]
with mock_httpd(
("localhost", 42423), expected_req, bbox_aware_query_comparator=True
):
resp = app.get("/tms/1.0.0/app2_with_layers_layer/0/0/1.png")
assert resp.content_type == "image/png"
assert resp.content_length == len(resp.body)
data = BytesIO(resp.body)
assert is_png(data)
def test_get_tile_from_missing_arcgis_layer(self, app):
expected_req = [
(
{
"path": "/arcgis/rest/services/NonExistentLayer/ImageServer/exportImage?f=image&format=png&imageSR=900913&bboxSR=900913&bbox=-20037508.342789244,-20037508.342789244,20037508.342789244,20037508.342789244&size=512,512"
},
{"body": b"", "status": 400},
)
]
with mock_httpd(
("localhost", 42423), expected_req, bbox_aware_query_comparator=True
):
resp = app.get("/tms/1.0.0/app2_wrong_url_layer/0/0/1.png", status=500)
assert resp.status_code == 500
def test_identify(self, app):
expected_req = [
(
{
"path": "/arcgis/rest/services/ExampleLayer/MapServer/identify?f=json&"
"geometry=1050.000000,1300.000000&returnGeometry=true&imageDisplay=200,200,96"
"&mapExtent=1000.0,400.0,2000.0,1400.0&layers=show:1,2,3"
"&tolerance=10&geometryType=esriGeometryPoint&sr=3857"
},
{
"body": b'{"results": []}',
"headers": {"content-type": "application/json"},
},
)
]
with mock_httpd(
("localhost", 42423), expected_req, bbox_aware_query_comparator=True
):
resp = app.get(self.common_fi_req)
assert resp.content_type == "application/json"
assert resp.content_length == len(resp.body)
assert resp.body == b'{"results": []}'
def METHOD_NAME(self, app):
expected_req = [
(
{
"path": "/arcgis/rest/services/ExampleLayer/MapServer/identify?f=json&"
"geometry=573295.377585,6927820.884193&returnGeometry=true&imageDisplay=200,321,96"
"&mapExtent=556597.453966,6446275.84102,890555.926346,6982997.92039&layers=show:1,2,3"
"&tolerance=10&geometryType=esriGeometryPoint&sr=3857"
},
{
"body": b'{"results": []}',
"headers": {"content-type": "application/json"},
},
)
]
with mock_httpd(("localhost", 42423), expected_req):
self.common_fi_req.params.bbox = "5,50,8,53"
self.common_fi_req.params.srs = "EPSG:4326"
resp = app.get(self.common_fi_req)
assert resp.content_type == "application/json"
assert resp.content_length == len(resp.body)
assert resp.body == b'{"results": []}' |
305 | test equivalence forecast | import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
import pytest
import statsmodels.datasets
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.base.prediction import PredictionResults
from statsmodels.tsa.deterministic import Fourier
from statsmodels.tsa.exponential_smoothing.ets import ETSModel
from statsmodels.tsa.forecasting.stl import STLForecast
from statsmodels.tsa.seasonal import STL, DecomposeResult
from statsmodels.tsa.statespace.exponential_smoothing import (
ExponentialSmoothing,
)
@pytest.fixture(scope="module")
def data(request):
rs = np.random.RandomState(987654321)
err = rs.standard_normal(500)
index = pd.date_range("1980-1-1", freq="M", periods=500)
fourier = Fourier(12, 1)
terms = fourier.in_sample(index)
det = np.squeeze(np.asarray(terms @ np.array([[2], [1]])))
for i in range(1, 500):
err[i] += 0.9 * err[i - 1] + det[i]
return pd.Series(err, index=index)
def test_smoke(data):
stlf = STLForecast(data, ARIMA, model_kwargs={"order": (2, 0, 0)})
res = stlf.fit(fit_kwargs={})
res.forecast(37)
assert isinstance(res.summary().as_text(), str)
assert isinstance(res.stl, STL)
assert isinstance(res.result, DecomposeResult)
assert isinstance(res.model, ARIMA)
assert hasattr(res.model_result, "forecast")
@pytest.mark.matplotlib
def test_sharex(data):
stlf = STLForecast(data, ARIMA, model_kwargs={"order": (2, 0, 0)})
res = stlf.fit(fit_kwargs={})
plt = res.result.plot()
grouper_view = plt.axes[0].get_shared_x_axes()
sibs = grouper_view.get_siblings(plt.axes[1])
assert len(sibs) == 4
MODELS = [
(ARIMA, {"order": (2, 0, 0), "trend": "c"}),
(ExponentialSmoothing, {"trend": True}),
(AutoReg, {"lags": 2, "old_names": False}),
(ETSModel, {}),
]
MODELS = MODELS[-1:]
IDS = [str(c[0]).split(".")[-1][:-2] for c in MODELS]
@pytest.mark.parametrize("config", MODELS, ids=IDS)
@pytest.mark.parametrize("horizon", [1, 7, 23])
def METHOD_NAME(data, config, horizon):
model, kwargs = config
stl = STL(data)
stl_fit = stl.fit()
resids = data - stl_fit.seasonal
mod = model(resids, **kwargs)
fit_kwarg = {}
if model is ETSModel:
fit_kwarg["disp"] = False
res = mod.fit(**fit_kwarg)
stlf = STLForecast(data, model, model_kwargs=kwargs).fit(
fit_kwargs=fit_kwarg
)
seasonal = np.asarray(stl_fit.seasonal)[-12:]
seasonal = np.tile(seasonal, 1 + horizon // 12)
fcast = res.forecast(horizon) + seasonal[:horizon]
actual = stlf.forecast(horizon)
assert_allclose(actual, fcast, rtol=1e-4)
if not hasattr(res, "get_prediction"):
return
pred = stlf.get_prediction(data.shape[0], data.shape[0] + horizon - 1)
assert isinstance(pred, PredictionResults)
assert_allclose(pred.predicted_mean, fcast, rtol=1e-4)
half = data.shape[0] // 2
stlf.get_prediction(half, data.shape[0] + horizon - 1)
stlf.get_prediction(half, data.shape[0] + horizon - 1, dynamic=True)
stlf.get_prediction(half, data.shape[0] + horizon - 1, dynamic=half // 2)
if hasattr(data, "index"):
loc = data.index[half + half // 2]
a = stlf.get_prediction(
half, data.shape[0] + horizon - 1, dynamic=loc.strftime("%Y-%m-%d")
)
b = stlf.get_prediction(
half, data.shape[0] + horizon - 1, dynamic=loc.to_pydatetime()
)
c = stlf.get_prediction(half, data.shape[0] + horizon - 1, dynamic=loc)
assert_allclose(a.predicted_mean, b.predicted_mean, rtol=1e-4)
assert_allclose(a.predicted_mean, c.predicted_mean, rtol=1e-4)
def test_exceptions(data):
class BadModel:
def __init__(self, *args, **kwargs):
pass
with pytest.raises(AttributeError, match="model must expose"):
STLForecast(data, BadModel)
class NoForecast(BadModel):
def fit(self, *args, **kwargs):
return BadModel()
with pytest.raises(AttributeError, match="The model's result"):
STLForecast(data, NoForecast).fit()
class BadResult:
def forecast(self, *args, **kwargs):
pass
class FakeModel(BadModel):
def fit(self, *args, **kwargs):
return BadResult()
with pytest.raises(AttributeError, match="The model result does not"):
STLForecast(data, FakeModel).fit().summary()
class BadResultSummary(BadResult):
def summary(self, *args, **kwargs):
return object()
class FakeModelSummary(BadModel):
def fit(self, *args, **kwargs):
return BadResultSummary()
with pytest.raises(TypeError, match="The model result's summary"):
STLForecast(data, FakeModelSummary).fit().summary()
@pytest.fixture(scope="function")
def sunspots():
df = statsmodels.datasets.sunspots.load_pandas().data
df.index = np.arange(df.shape[0])
return df.iloc[:, 0]
def test_get_prediction(sunspots):
# GH7309
stlf_model = STLForecast(
sunspots, model=ARIMA, model_kwargs={"order": (2, 2, 0)}, period=11
)
stlf_res = stlf_model.fit()
pred = stlf_res.get_prediction()
assert pred.predicted_mean.shape == (309,)
assert pred.var_pred_mean.shape == (309,)
@pytest.mark.parametrize("not_implemented", [True, False])
def test_no_var_pred(sunspots, not_implemented):
class DummyPred:
def __init__(self, predicted_mean, row_labels):
self.predicted_mean = predicted_mean
self.row_labels = row_labels
def f():
raise NotImplementedError
if not_implemented:
self.forecast = property(f)
class DummyRes:
def __init__(self, res):
self._res = res
def forecast(self, *args, **kwargs):
return self._res.forecast(*args, **kwargs)
def get_prediction(self, *args, **kwargs):
pred = self._res.get_prediction(*args, **kwargs)
return DummyPred(pred.predicted_mean, pred.row_labels)
class DummyMod:
def __init__(self, y):
self._mod = ARIMA(y)
def fit(self, *args, **kwargs):
res = self._mod.fit(*args, **kwargs)
return DummyRes(res)
stl_mod = STLForecast(sunspots, model=DummyMod, period=11)
stl_res = stl_mod.fit()
with pytest.warns(UserWarning, match="The variance of"):
pred = stl_res.get_prediction()
assert np.all(np.isnan(pred.var_pred_mean)) |
306 | prefetch | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import bisect
import numpy as np
from torch.utils.data.dataloader import default_collate
from . import FairseqDataset
class ConcatDataset(FairseqDataset):
@staticmethod
def cumsum(sequence, sample_ratios):
r, s = [], 0
for e, ratio in zip(sequence, sample_ratios):
curr_len = int(ratio * len(e))
r.append(curr_len + s)
s += curr_len
return r
def __init__(self, datasets, sample_ratios=1):
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, "datasets should not be an empty iterable"
self.datasets = list(datasets)
if isinstance(sample_ratios, int):
sample_ratios = [sample_ratios] * len(self.datasets)
self.sample_ratios = sample_ratios
self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios)
self.real_sizes = [len(d) for d in self.datasets]
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx][sample_idx]
def _get_dataset_and_sample_index(self, idx: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
sample_idx = sample_idx % self.real_sizes[dataset_idx]
return dataset_idx, sample_idx
def collater(self, samples, **extra_args):
# For now only supports datasets with same underlying collater implementations
if hasattr(self.datasets[0], "collater"):
return self.datasets[0].collater(samples, **extra_args)
else:
return default_collate(samples, **extra_args)
def size(self, idx: int):
"""
Return an example's size as a float or tuple.
"""
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx].size(sample_idx)
def num_tokens(self, index: int):
return np.max(self.size(index))
def attr(self, attr: str, index: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, index)
return getattr(self.datasets[dataset_idx], attr, None)
@property
def sizes(self):
_dataset_sizes = []
for ds, sr in zip(self.datasets, self.sample_ratios):
if isinstance(ds.sizes, np.ndarray):
_dataset_sizes.append(np.tile(ds.sizes, sr))
else:
# Only support underlying dataset with single size array.
assert isinstance(ds.sizes, list)
_dataset_sizes.append(np.tile(ds.sizes[0], sr))
return np.concatenate(_dataset_sizes)
@property
def supports_prefetch(self):
return all(d.supports_prefetch for d in self.datasets)
def ordered_indices(self):
"""
Returns indices sorted by length. So less padding is needed.
"""
if isinstance(self.sizes, np.ndarray) and len(self.sizes.shape) > 1:
# special handling for concatenating lang_pair_datasets
indices = np.arange(len(self))
sizes = self.sizes
tgt_sizes = (
sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
)
src_sizes = (
sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
)
# sort by target length, then source length
if tgt_sizes is not None:
indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")]
return indices[np.argsort(src_sizes[indices], kind="mergesort")]
else:
return np.argsort(self.sizes)
def METHOD_NAME(self, indices):
frm = 0
for to, ds in zip(self.cumulative_sizes, self.datasets):
real_size = len(ds)
if getattr(ds, "supports_prefetch", False):
ds.METHOD_NAME([(i - frm) % real_size for i in indices if frm <= i < to])
frm = to
@property
def can_reuse_epoch_itr_across_epochs(self):
return all(d.can_reuse_epoch_itr_across_epochs for d in self.datasets)
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, "set_epoch"):
ds.set_epoch(epoch) |
307 | run | # -*- coding: utf-8 -*-
# Copyright (c) 2022, Jonathan Lung <[email protected]>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
name: bitwarden
author:
- Jonathan Lung (@lungj) <[email protected]>
requirements:
- bw (command line utility)
- be logged into bitwarden
- bitwarden vault unlocked
- E(BW_SESSION) environment variable set
short_description: Retrieve secrets from Bitwarden
version_added: 5.4.0
description:
- Retrieve secrets from Bitwarden.
options:
_terms:
description: Key(s) to fetch values for from login info.
required: true
type: list
elements: str
search:
description: Field to retrieve, for example V(name) or V(id).
type: str
default: name
version_added: 5.7.0
field:
description: Field to fetch. Leave unset to fetch whole response.
type: str
collection_id:
description: Collection ID to filter results by collection. Leave unset to skip filtering.
type: str
version_added: 6.3.0
"""
EXAMPLES = """
- name: "Get 'password' from Bitwarden record named 'a_test'"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test', field='password') }}
- name: "Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}
- name: "Get 'password' from Bitwarden record named 'a_test' from collection"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}
- name: "Get full Bitwarden record named 'a_test'"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test') }}
- name: "Get custom field 'api_key' from Bitwarden record named 'a_test'"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}
"""
RETURN = """
_raw:
description: List of requested field or JSON object of list of matches.
type: list
elements: raw
"""
from subprocess import Popen, PIPE
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.parsing.ajson import AnsibleJSONDecoder
from ansible.plugins.lookup import LookupBase
class BitwardenException(AnsibleError):
pass
class Bitwarden(object):
def __init__(self, path='bw'):
self._cli_path = path
@property
def cli_path(self):
return self._cli_path
@property
def unlocked(self):
out, err = self.METHOD_NAME(['status'], stdin="")
decoded = AnsibleJSONDecoder().raw_decode(out)[0]
return decoded['status'] == 'unlocked'
def METHOD_NAME(self, args, stdin=None, expected_rc=0):
p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
out, err = p.communicate(to_bytes(stdin))
rc = p.wait()
if rc != expected_rc:
raise BitwardenException(err)
return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
def _get_matches(self, search_value, search_field, collection_id):
"""Return matching records whose search_field is equal to key.
"""
# Prepare set of params for Bitwarden CLI
params = ['list', 'items', '--search', search_value]
if collection_id:
params.extend(['--collectionid', collection_id])
out, err = self.METHOD_NAME(params)
# This includes things that matched in different fields.
initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]
# Filter to only include results from the right field.
return [item for item in initial_matches if item[search_field] == search_value]
def get_field(self, field, search_value, search_field="name", collection_id=None):
"""Return a list of the specified field for records whose search_field match search_value
and filtered by collection if collection has been provided.
If field is None, return the whole record for each match.
"""
matches = self._get_matches(search_value, search_field, collection_id)
if not field:
return matches
field_matches = []
for match in matches:
# if there are no custom fields, then `match` has no key 'fields'
if 'fields' in match:
custom_field_found = False
for custom_field in match['fields']:
if field == custom_field['name']:
field_matches.append(custom_field['value'])
custom_field_found = True
break
if custom_field_found:
continue
if 'login' in match and field in match['login']:
field_matches.append(match['login'][field])
continue
if field in match:
field_matches.append(match[field])
continue
if matches and not field_matches:
raise AnsibleError("field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
return field_matches
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
field = self.get_option('field')
search_field = self.get_option('search')
collection_id = self.get_option('collection_id')
if not _bitwarden.unlocked:
raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.")
return [_bitwarden.get_field(field, term, search_field, collection_id) for term in terms]
_bitwarden = Bitwarden() |
308 | compute panel | from __future__ import annotations
import typing
from copy import copy
import numpy as np
import pandas as pd
from ..exceptions import PlotnineError
from ..utils import groupby_apply, pivot_apply
from .position_dodge import position_dodge
if typing.TYPE_CHECKING:
from plotnine.typing import IntArray
class position_dodge2(position_dodge):
"""
Dodge overlaps and place objects side-by-side
This is an enhanced version of
:class:`~plotnine.positions.position_dodge` that can deal
with rectangular overlaps that do not share a lower x border.
Parameters
----------
width: float
Dodging width, when different to the width of the
individual elements. This is useful when you want
to align narrow geoms with wider geoms
preserve: str in ``['total', 'single']``
Should dodging preserve the total width of all elements
at a position, or the width of a single element?
padding : float
Padding between elements at the same position.
Elements are shrunk by this proportion to allow space
between them (Default: 0.1)
reverse : bool
Reverse the default ordering of the groups. This is
useful if you're rotating both the plot and legend.
(Default: False)
"""
REQUIRED_AES = {"x"}
def __init__(
self, width=None, preserve="total", padding=0.1, reverse=False
):
self.params = {
"width": width,
"preserve": preserve,
"padding": padding,
"reverse": reverse,
}
def setup_params(self, data):
if (
("xmin" not in data)
and ("xmax" not in data)
and (self.params["width"] is None)
):
msg = "Width not defined. " "Set with `position_dodge2(width = ?)`"
raise PlotnineError(msg)
params = copy(self.params)
if params["preserve"] == "total":
params["n"] = None
elif "x" in data:
def max_x_values(gdf):
n = gdf["x"].value_counts().max()
return pd.DataFrame({"n": [n]})
res = groupby_apply(data, "PANEL", max_x_values)
params["n"] = res["n"].max()
else:
def _find_x_overlaps(gdf):
return pd.DataFrame({"n": find_x_overlaps(gdf)})
# interval geoms
res = groupby_apply(data, "PANEL", _find_x_overlaps)
params["n"] = res["n"].max()
return params
@classmethod
def METHOD_NAME(cls, data, scales, params):
return cls.collide2(data, params=params)
@staticmethod
def strategy(data, params):
padding = params["padding"]
n = params["n"]
if not all(col in data.columns for col in ["xmin", "xmax"]):
data["xmin"] = data["x"]
data["xmax"] = data["x"]
# Groups of boxes that share the same position
data["xid"] = find_x_overlaps(data)
# Find newx using xid, i.e. the center of each group of
# overlapping elements. for boxes, bars, etc. this should
# be the same as original x, but for arbitrary rects it
# may not be
res1 = pivot_apply(data, "xmin", "xid", np.min)
res2 = pivot_apply(data, "xmax", "xid", np.max)
data["newx"] = (res1 + res2)[data["xid"].to_numpy()].to_numpy() / 2
if n is None:
# If n is None, preserve total widths of elements at
# each position by dividing widths by the number of
# elements at that position
n = data["xid"].value_counts().to_numpy()
n = n[data.loc[:, "xid"] - 1]
data["new_width"] = (data["xmax"] - data["xmin"]) / n
else:
data["new_width"] = (data["xmax"] - data["xmin"]) / n
# Find the total width of each group of elements
def sum_new_width(gdf):
return pd.DataFrame(
{
"size": [gdf["new_width"].sum()],
"newx": gdf["newx"].iloc[0],
}
)
group_sizes = groupby_apply(data, "newx", sum_new_width)
# Starting xmin for each group of elements
starts = group_sizes["newx"] - (group_sizes["size"] / 2)
# Set the elements in place
for i, start in enumerate(starts, start=1):
bool_idx = data["xid"] == i
divisions = np.cumsum(
np.hstack([start, data.loc[bool_idx, "new_width"]])
)
data.loc[bool_idx, "xmin"] = divisions[:-1]
data.loc[bool_idx, "xmax"] = divisions[1:]
# x values get moved to between xmin and xmax
data["x"] = (data["xmin"] + data["xmax"]) / 2
# Shrink elements to add space between them
if data["xid"].duplicated().any():
pad_width = data["new_width"] * (1 - padding)
data["xmin"] = data["x"] - pad_width / 2
data["xmax"] = data["x"] + pad_width / 2
data = data.drop(columns=["xid", "newx", "new_width"], errors="ignore")
return data
def find_x_overlaps(df: pd.DataFrame) -> IntArray:
"""
Find overlapping regions along the x axis
"""
n = len(df)
overlaps = np.zeros(n, dtype=int)
overlaps[0] = 1
counter = 1
for i in range(1, n):
if df["xmin"].iloc[i] >= df["xmax"].iloc[i - 1]:
counter += 1
overlaps[i] = counter
return overlaps |
309 | with dsn | import pytest
import datetime as dt
from typing import Any, Dict
import psycopg
from psycopg.conninfo import conninfo_to_dict
from . import dbapi20
from . import dbapi20_tpc
@pytest.fixture(scope="class")
def METHOD_NAME(request, session_dsn):
request.cls.connect_args = (session_dsn,)
@pytest.mark.usefixtures("with_dsn")
class PsycopgTests(dbapi20.DatabaseAPI20Test):
driver = psycopg
# connect_args = () # set by the fixture
connect_kw_args: Dict[str, Any] = {}
def test_nextset(self):
# tested elsewhere
pass
def test_setoutputsize(self):
# no-op
pass
@pytest.mark.usefixtures("tpc")
@pytest.mark.usefixtures("with_dsn")
class PsycopgTPCTests(dbapi20_tpc.TwoPhaseCommitTests):
driver = psycopg
connect_args = () # set by the fixture
def connect(self):
return psycopg.connect(*self.connect_args)
# Shut up warnings
PsycopgTests.failUnless = PsycopgTests.assertTrue
PsycopgTPCTests.assertEquals = PsycopgTPCTests.assertEqual
@pytest.mark.parametrize(
"typename, singleton",
[
("bytea", "BINARY"),
("date", "DATETIME"),
("timestamp without time zone", "DATETIME"),
("timestamp with time zone", "DATETIME"),
("time without time zone", "DATETIME"),
("time with time zone", "DATETIME"),
("interval", "DATETIME"),
("integer", "NUMBER"),
("smallint", "NUMBER"),
("bigint", "NUMBER"),
("real", "NUMBER"),
("double precision", "NUMBER"),
("numeric", "NUMBER"),
("decimal", "NUMBER"),
("oid", "ROWID"),
("varchar", "STRING"),
("char", "STRING"),
("text", "STRING"),
],
)
def test_singletons(conn, typename, singleton):
singleton = getattr(psycopg, singleton)
cur = conn.cursor()
cur.execute(f"select null::{typename}")
oid = cur.description[0].type_code
assert singleton == oid
assert oid == singleton
assert singleton != oid + 10000
assert oid + 10000 != singleton
@pytest.mark.parametrize(
"ticks, want",
[
(0, "1970-01-01T00:00:00.000000+0000"),
(1273173119.99992, "2010-05-06T14:11:59.999920-0500"),
],
)
def test_timestamp_from_ticks(ticks, want):
s = psycopg.TimestampFromTicks(ticks)
want = dt.datetime.strptime(want, "%Y-%m-%dT%H:%M:%S.%f%z")
assert s == want
@pytest.mark.parametrize(
"ticks, want",
[
(0, "1970-01-01"),
# Returned date is local
(1273173119.99992, ["2010-05-06", "2010-05-07"]),
],
)
def test_date_from_ticks(ticks, want):
s = psycopg.DateFromTicks(ticks)
if isinstance(want, str):
want = [want]
want = [dt.datetime.strptime(w, "%Y-%m-%d").date() for w in want]
assert s in want
@pytest.mark.parametrize(
"ticks, want",
[(0, "00:00:00.000000"), (1273173119.99992, "00:11:59.999920")],
)
def test_time_from_ticks(ticks, want):
s = psycopg.TimeFromTicks(ticks)
want = dt.datetime.strptime(want, "%H:%M:%S.%f").time()
assert s.replace(hour=0) == want
@pytest.mark.parametrize(
"args, kwargs, want",
[
((), {}, ""),
(("",), {}, ""),
(("host=foo user=bar",), {}, "host=foo user=bar"),
(("host=foo",), {"user": "baz"}, "host=foo user=baz"),
(
("host=foo port=5432",),
{"host": "qux", "user": "joe"},
"host=qux user=joe port=5432",
),
(("host=foo",), {"user": None}, "host=foo"),
],
)
def test_connect_args(monkeypatch, pgconn, args, kwargs, want):
the_conninfo: str
def fake_connect(conninfo):
nonlocal the_conninfo
the_conninfo = conninfo
return pgconn
yield
monkeypatch.setattr(psycopg.connection, "connect", fake_connect)
conn = psycopg.connect(*args, **kwargs)
assert conninfo_to_dict(the_conninfo) == conninfo_to_dict(want)
conn.close()
@pytest.mark.parametrize(
"args, kwargs, exctype",
[
(("host=foo", "host=bar"), {}, TypeError),
(("", ""), {}, TypeError),
((), {"nosuchparam": 42}, psycopg.ProgrammingError),
],
)
def test_connect_badargs(monkeypatch, pgconn, args, kwargs, exctype):
def fake_connect(conninfo):
return pgconn
yield
with pytest.raises(exctype):
psycopg.connect(*args, **kwargs) |
310 | test unary op | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Test the propagation of info on Quantity during operations."""
import copy
import numpy as np
from astropy import units as u
def assert_info_equal(a, b, ignore=set()):
a_info = a.info
b_info = b.info
for attr in (a_info.attr_names | b_info.attr_names) - ignore:
if attr == "unit":
assert a_info.unit.is_equivalent(b_info.unit)
else:
assert getattr(a_info, attr, None) == getattr(b_info, attr, None)
def assert_no_info(a):
assert "info" not in a.__dict__
class TestQuantityInfo:
@classmethod
def setup_class(self):
self.q = u.Quantity(np.arange(1.0, 5.0), "m/s")
self.q.info.name = "v"
self.q.info.description = "air speed of a african swallow"
def test_copy(self):
q_copy1 = self.q.copy()
assert_info_equal(q_copy1, self.q)
q_copy2 = copy.copy(self.q)
assert_info_equal(q_copy2, self.q)
q_copy3 = copy.deepcopy(self.q)
assert_info_equal(q_copy3, self.q)
def test_slice(self):
q_slice = self.q[1:3]
assert_info_equal(q_slice, self.q)
q_take = self.q.take([0, 1])
assert_info_equal(q_take, self.q)
def test_item(self):
# Scalars do not get info set (like for Column); TODO: is this OK?
q1 = self.q[1]
assert_no_info(q1)
q_item = self.q.item(1)
assert_no_info(q_item)
def test_iter(self):
# Scalars do not get info set.
for q in self.q:
assert_no_info(q)
for q in iter(self.q):
assert_no_info(q)
def test_change_to_equivalent_unit(self):
q1 = self.q.to(u.km / u.hr)
assert_info_equal(q1, self.q)
q2 = self.q.si
assert_info_equal(q2, self.q)
q3 = self.q.cgs
assert_info_equal(q3, self.q)
q4 = self.q.decompose()
assert_info_equal(q4, self.q)
def test_reshape(self):
q = self.q.reshape(-1, 1, 2)
assert_info_equal(q, self.q)
q2 = q.squeeze()
assert_info_equal(q2, self.q)
def test_insert(self):
q = self.q.copy()
q.insert(1, 1 * u.cm / u.hr)
assert_info_equal(q, self.q)
def METHOD_NAME(self):
q = -self.q
assert_no_info(q)
def test_binary_op(self):
q = self.q + self.q
assert_no_info(q)
def test_unit_change(self):
q = self.q * u.s
assert_no_info(q)
q2 = u.s / self.q
assert_no_info(q)
def test_inplace_unit_change(self):
# Not sure if it is logical to keep info here!
q = self.q.copy()
q *= u.s
assert_info_equal(q, self.q, ignore={"unit"})
class TestStructuredQuantity:
@classmethod
def setup_class(self):
value = np.array([(1.0, 2.0), (3.0, 4.0)], dtype=[("p", "f8"), ("v", "f8")])
self.q = u.Quantity(value, "m, m/s")
self.q.info.name = "pv"
self.q.info.description = "Location and speed"
def test_keying(self):
q_p = self.q["p"]
assert_no_info(q_p)
def test_slicing(self):
q = self.q[:1]
assert_info_equal(q, self.q)
def test_item(self):
# Scalars do not get info set.
q = self.q[1]
assert_no_info(q)
class TestQuantitySubclass:
"""Regression test for gh-14514: _new_view should __array_finalize__.
But info should be propagated only for slicing, etc.
"""
@classmethod
def setup_class(self):
class MyQuantity(u.Quantity):
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
if hasattr(obj, "swallow"):
self.swallow = obj.swallow
self.my_q = MyQuantity([10.0, 20.0], u.m / u.s)
self.my_q.swallow = "African"
self.my_q_w_info = self.my_q.copy()
self.my_q_w_info.info.name = "swallow"
def test_setup(self):
assert_no_info(self.my_q)
assert self.my_q_w_info.swallow == self.my_q.swallow
assert self.my_q_w_info.info.name == "swallow"
def test_slice(self):
slc1 = self.my_q[:1]
assert slc1.swallow == self.my_q.swallow
assert_no_info(slc1)
slc2 = self.my_q_w_info[1:]
assert slc2.swallow == self.my_q.swallow
assert_info_equal(slc2, self.my_q_w_info)
def test_op(self):
square1 = self.my_q**2
assert square1.swallow == self.my_q.swallow
assert_no_info(square1)
square2 = self.my_q_w_info**2
assert square2.swallow == self.my_q.swallow
assert_no_info(square2) |
311 | init repo | #!/usr/bin/env python
"""Script to commit the doc build outputs into the github-pages repo.
Use:
gh-pages.py [tag]
If no tag is given, the current output of 'git describe' is used. If given,
that is how the resulting directory will be named.
In practice, you should use either actual clean tags from a current build or
something like 'current' as a stable URL for the most current version of the """
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import shutil
import sys
from os import chdir as cd
from os.path import join as pjoin
from subprocess import Popen, PIPE, CalledProcessError, check_call
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
pages_dir = 'gh-pages'
html_dir = 'build/html'
pdf_dir = 'build/latex'
pages_repo = '[email protected]:jupyter/qtconsole.git'
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def sh(cmd):
"""Execute command in a subshell, return status code."""
return check_call(cmd, shell=True)
def sh2(cmd):
"""Execute command in a subshell, return stdout.
Stderr is unbuffered from the subshell.x"""
p = Popen(cmd, stdout=PIPE, shell=True)
out = p.communicate()[0]
retcode = p.returncode
if retcode:
raise CalledProcessError(retcode, cmd)
else:
return out.rstrip()
def sh3(cmd):
"""Execute command in a subshell, return stdout, stderr
If anything appears in stderr, print it out to sys.stderr"""
p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True)
out, err = p.communicate()
retcode = p.returncode
if retcode:
raise CalledProcessError(retcode, cmd)
else:
return out.rstrip(), err.rstrip()
def METHOD_NAME(path):
"""clone the gh-pages repo if we haven't already."""
sh("git clone %s %s"%(pages_repo, path))
here = os.getcwd()
cd(path)
sh('git checkout gh-pages')
cd(here)
#-----------------------------------------------------------------------------
# Script starts
#-----------------------------------------------------------------------------
if __name__ == '__main__':
# The tag can be given as a positional argument
try:
tag = sys.argv[1]
except IndexError:
tag = "dev"
startdir = os.getcwd()
if not os.path.exists(pages_dir):
# init the repo
METHOD_NAME(pages_dir)
else:
# ensure up-to-date before operating
cd(pages_dir)
sh('git checkout gh-pages')
sh('git pull')
cd(startdir)
dest = pjoin(pages_dir, tag)
# don't `make html` here, because gh-pages already depends on html in Makefile
# sh('make html')
if tag != 'dev':
# only build pdf for non-dev targets
#sh2('make pdf')
pass
# This is pretty unforgiving: we unconditionally nuke the destination
# directory, and then copy the html tree in there
shutil.rmtree(dest, ignore_errors=True)
shutil.copytree(html_dir, dest)
if tag != 'dev':
#shutil.copy(pjoin(pdf_dir, 'ipython.pdf'), pjoin(dest, 'ipython.pdf'))
pass
try:
cd(pages_dir)
branch = sh2('git rev-parse --abbrev-ref HEAD').strip().decode('ascii', 'replace')
if branch != 'gh-pages':
e = 'On %r, git branch is %r, MUST be "gh-pages"' % (pages_dir,
branch)
raise RuntimeError(e)
sh('git add -A %s' % tag)
sh('git commit -m"Updated doc release: %s"' % tag)
print()
print('Most recent 3 commits:')
sys.stdout.flush()
# Need 3 commits in the repo before this will work
#sh('git --no-pager log --oneline HEAD~3..')
finally:
cd(startdir)
print()
print('Now verify the build in: %r' % dest)
print("If everything looks good, 'git push'") |
312 | test debounced long lasting | import asyncio
import vaex.jupyter
import time
import pytest
import sys
if sys.version_info >= (3, 10):
import pytest
pytest.skip("Skipping tests on python 3.10 or higher", allow_module_level=True)
@pytest.mark.asyncio
async def test_await_promise(df_trimmed):
df = df_trimmed
execute_time = 0
@vaex.jupyter.debounced(delay_seconds=0.01)
async def execute():
nonlocal execute_time
print("EXECUTE!!")
execute_time = time.time()
await df.execute_async()
assert vaex.jupyter.utils.get_ioloop() is not None
count_promise = df.count(df.x, delay=True)
time_before_execute = time.time()
execute()
count = await count_promise
assert execute_time > time_before_execute
assert count == df.count(df.x)
def test_debounce_method(df_trimmed):
class Foo:
def __init__(self, count):
self.count = count
@vaex.jupyter.debounced(delay_seconds=0.01)
def run(self):
self.count += 1
return self.count
async def run():
a = Foo(1)
b = Foo(10)
af = a.run()
bf = b.run()
assert a.run.obj is a
assert b.run.obj is b
assert af is not bf
assert await af == 2
assert await bf == 11
asyncio.run(run())
@pytest.mark.parametrize("reentrant", [False, True])
def test_debounced_reentrant(reentrant):
value = 0
@vaex.jupyter.debounced(delay_seconds=0.01, reentrant=reentrant)
async def execute():
nonlocal value
local_value = value
await asyncio.sleep(0.02)
# if reentrant (and invoked withing 0.02 seconds) both calls reference to 0, and return 0
# if non-reentrant, the next call will always see the new value
value = local_value + 1
return {value}
async def run():
fa = execute()
fb = execute()
# we wait beyond the debounce time
# so that the function is still executing (sleep)
await asyncio.sleep(0.015)
# but already call the next
fc = execute()
a = await fa
b = await fb
c = await fc
if reentrant:
assert a is b
assert a == {1}
assert c.issubset({1, 2}) # c can actually be called at the same time
assert a is not c
else:
assert a is b
assert a == {1}
assert c == {2}
assert a is not c
asyncio.run(run())
def test_debounced_non_reentrant_hammer():
running = 0
@vaex.jupyter.debounced(delay_seconds=0.001, reentrant=False)
async def execute():
nonlocal running
assert not running
running = True
await asyncio.sleep(0.001)
running = False
raise "bla"
async def run():
for i in range(10000):
future = execute()
await asyncio.sleep(0.001/4)
try:
await future
except:
pass
asyncio.run(run())
def METHOD_NAME():
calls = 0
@vaex.jupyter.debounced(delay_seconds=0.01)
async def execute():
nonlocal calls
await asyncio.sleep(0.05)
calls += 1
return {calls}
async def run():
fa = execute()
fb = execute()
# we wait beyond the debounce time
# so that the function is still executing (sleep)
await asyncio.sleep(0.02)
# but already call the next
fc = execute()
a = await fa
b = await fb
c = await fc
assert a is b
assert a == {1}
assert c == {2}
assert fa is fb
assert a is not c
asyncio.run(run())
@pytest.mark.parametrize("as_coroutine", [False, True])
@pytest.mark.parametrize("as_method", [False, True])
def test_debounced_await(df_trimmed, as_coroutine, as_method, flush_guard,):
calls = 0
if as_method:
class Foo:
if as_coroutine:
@vaex.jupyter.debounced(delay_seconds=0.01)
async def foo(self):
nonlocal calls
calls += 1
return {'calls': calls}
@vaex.jupyter.debounced(delay_seconds=0.01)
async def foo_error(self):
nonlocal calls
calls += 1
raise RuntimeError("foo")
else:
@vaex.jupyter.debounced(delay_seconds=0.01)
def foo(self):
nonlocal calls
calls += 1
return {'calls': calls}
@vaex.jupyter.debounced(delay_seconds=0.01)
def foo_error(self):
nonlocal calls
calls += 1
raise RuntimeError("foo")
foo2 = Foo() # noqa
foo1 = Foo()
foo = foo1.foo
foo_error = foo1.foo_error
other_foo = foo2
else:
if as_coroutine:
@vaex.jupyter.debounced(delay_seconds=0.01)
async def foo():
nonlocal calls
calls += 1
return {'calls': calls}
@vaex.jupyter.debounced(delay_seconds=0.01)
async def foo_error():
nonlocal calls
calls += 1
raise RuntimeError("foo")
else:
@vaex.jupyter.debounced(delay_seconds=0.01)
def foo():
nonlocal calls
calls += 1
return {'calls': calls}
@vaex.jupyter.debounced(delay_seconds=0.01)
def foo_error():
nonlocal calls
calls += 1
raise RuntimeError("foo")
async def run():
nonlocal calls
assert calls == 0
if as_method:
calls -= 1 # we're gonna call it twice, so we correct
future1 = foo()
future2 = foo()
if as_method:
bla1 = other_foo.foo()
bla2 = other_foo.foo()
result1 = await future1
result2 = await future2
if as_method:
await bla1
await bla2
assert calls == 1
assert result1 is result2
# await asyncio.sleep(0.02)
if as_method:
await bla1
await bla2
calls = 1
future1b = foo()
future2b = foo()
result1b = await future1b
result2b = await future2b
assert calls == 2
assert result1b is result2b
assert result1 is not result1b
future1 = foo_error()
future2 = foo_error()
with pytest.raises(RuntimeError) as e1:
result1 = await future1
assert str(e1.value) == 'foo'
with pytest.raises(RuntimeError) as e2:
result2 = await future2
assert calls == 3
assert e1.value is e2.value
# await asyncio.sleep(0.02)
future1b = foo_error()
future2b = foo_error()
with pytest.raises(RuntimeError) as e1b:
result1b = await future1b
# assert str(e1.value) == 'foo'
# with pytest.raises(RuntimeError) as e2b:
# result2b = await future2b
# assert calls == 4
# assert e1b.value is e2b.value
# assert e1.value is not e1b.value
asyncio.run(run()) |
313 | set default minor | #!/usr/bin/env python3
import argparse
import functools
import re
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import Callable, Match, Pattern
SUCCESS_CHAR = '\u2714'
FAIL_CHAR = '\u2716'
CVAT_VERSION_PATTERN = re.compile(r'VERSION\s*=\s*\((\d+),\s*(\d*),\s*(\d+),\s*[\',\"](\w+)[\',\"],\s*(\d+)\)')
REPO_ROOT_DIR = Path(__file__).resolve().parents[1]
CVAT_INIT_PY_REL_PATH = 'cvat/__init__.py'
CVAT_INIT_PY_PATH = REPO_ROOT_DIR / CVAT_INIT_PY_REL_PATH
@dataclass()
class Version:
major: int = 0
minor: int = 0
patch: int = 0
prerelease: str = ''
prerelease_number: int = 0
def __str__(self) -> str:
return f'{self.major}.{self.minor}.{self.patch}-{self.prerelease}.{self.prerelease_number}'
def cvat_repr(self):
return f"({self.major}, {self.minor}, {self.patch}, '{self.prerelease}', {self.prerelease_number})"
def compose_repr(self):
if self.prerelease != 'final':
return 'dev'
return f'v{self.major}.{self.minor}.{self.patch}'
def increment_prerelease_number(self) -> None:
self.prerelease_number += 1
def increment_prerelease(self) -> None:
flow = ('alpha', 'beta', 'rc', 'final')
idx = flow.index(self.prerelease)
if idx == len(flow) - 1:
raise ValueError(f"Cannot increment current '{self.prerelease}' prerelease version")
self.prerelease = flow[idx + 1]
self._set_default_prerelease_number()
def set_prerelease(self, value: str) -> None:
values = ('alpha', 'beta', 'rc', 'final')
if value not in values:
raise ValueError(f'{value} is a wrong, must be one of {values}')
self.prerelease = value
self._set_default_prerelease_number()
def increment_patch(self) -> None:
self.patch += 1
self._set_default_prerelease()
def increment_minor(self) -> None:
self.minor += 1
self._set_default_patch()
def increment_major(self) -> None:
self.major += 1
self.METHOD_NAME()
def _set_default_prerelease_number(self) -> None:
self.prerelease_number = 0
def _set_default_prerelease(self) -> None:
self.prerelease = 'alpha'
self._set_default_prerelease_number()
def _set_default_patch(self) -> None:
self.patch = 0
self._set_default_prerelease()
def METHOD_NAME(self) -> None:
self.minor = 0
self._set_default_patch()
@dataclass(frozen=True)
class ReplacementRule:
rel_path: str
pattern: Pattern[str]
replacement: Callable[[Version, Match[str]], str]
def apply(self, new_version: Version, *, verify_only: bool) -> bool:
path = REPO_ROOT_DIR / self.rel_path
text = path.read_text()
new_text, num_replacements = self.pattern.subn(
functools.partial(self.replacement, new_version), text)
if not num_replacements:
print(f'{FAIL_CHAR} {self.rel_path}: failed to match version pattern.')
return False
if text == new_text:
if verify_only:
print(f'{SUCCESS_CHAR} {self.rel_path}: verified.')
else:
print(f'{SUCCESS_CHAR} {self.rel_path}: no need to update.')
else:
if verify_only:
print(f'{FAIL_CHAR} {self.rel_path}: verification failed.')
return False
else:
path.write_text(new_text)
print(f'{SUCCESS_CHAR} {self.rel_path}: updated.')
return True
REPLACEMENT_RULES = [
ReplacementRule(CVAT_INIT_PY_REL_PATH, CVAT_VERSION_PATTERN,
lambda v, m: f'VERSION = {v.cvat_repr()}'),
ReplacementRule('docker-compose.yml',
re.compile(r'(\$\{CVAT_VERSION:-)([\w.]+)(\})'),
lambda v, m: m[1] + v.compose_repr() + m[3]),
ReplacementRule('helm-chart/values.yaml',
re.compile(r'(^ image: cvat/(?:ui|server)\n tag: )([\w.]+)', re.M),
lambda v, m: m[1] + v.compose_repr()),
ReplacementRule('cvat-sdk/gen/generate.sh',
re.compile(r'^VERSION="[\d.]+"$', re.M),
lambda v, m: f'VERSION="{v.major}.{v.minor}.{v.patch}"'),
ReplacementRule('cvat/schema.yml',
re.compile(r"^ version: [\d.]+$", re.M),
lambda v, m: f' version: {v.major}.{v.minor}.{v.patch}'),
ReplacementRule('cvat-cli/src/cvat_cli/version.py',
re.compile(r'^VERSION = "[\d.]+"$', re.M),
lambda v, m: f'VERSION = "{v.major}.{v.minor}.{v.patch}"'),
ReplacementRule('cvat-cli/requirements/base.txt',
re.compile(r'^cvat-sdk~=[\d.]+$', re.M),
lambda v, m: f'cvat-sdk~={v.major}.{v.minor}.{v.patch}'),
]
def get_current_version() -> Version:
version_text = CVAT_INIT_PY_PATH.read_text()
match = re.search(CVAT_VERSION_PATTERN, version_text)
if not match:
raise RuntimeError(f'Failed to find version in {CVAT_INIT_PY_PATH}')
return Version(int(match[1]), int(match[2]), int(match[3]), match[4], int(match[5]))
def main() -> None:
parser = argparse.ArgumentParser(description='Bump CVAT version')
action_group = parser.add_mutually_exclusive_group(required=True)
action_group.add_argument('--major', action='store_true',
help='Increment the existing major version by 1')
action_group.add_argument('--minor', action='store_true',
help='Increment the existing minor version by 1')
action_group.add_argument('--patch', action='store_true',
help='Increment the existing patch version by 1')
action_group.add_argument('--prerelease', nargs='?', const='increment',
help='''Increment prerelease version alpha->beta->rc->final,
Also it's possible to pass value explicitly''')
action_group.add_argument('--prerelease_number', action='store_true',
help='Increment prerelease number by 1')
action_group.add_argument('--current', '--show-current',
action='store_true', help='Display current version')
action_group.add_argument('--verify-current',
action='store_true', help='Check that all version numbers are consistent')
args = parser.parse_args()
version = get_current_version()
verify_only = False
if args.current:
print(version)
return
elif args.verify_current:
verify_only = True
elif args.prerelease_number:
version.increment_prerelease_number()
elif args.prerelease:
if args.prerelease == 'increment':
version.increment_prerelease()
else:
version.set_prerelease(args.prerelease)
elif args.patch:
version.increment_patch()
elif args.minor:
version.increment_minor()
elif args.major:
version.increment_major()
else:
assert False, "Unreachable code"
if verify_only:
print(f'Verifying that version is {version}...')
else:
print(f'Bumping version to {version}...')
print()
success = True
for rule in REPLACEMENT_RULES:
if not rule.apply(version, verify_only=verify_only):
success = False
if not success:
if verify_only:
sys.exit("\nFailed to verify one or more files!")
else:
sys.exit("\nFailed to update one or more files!")
if __name__ == '__main__':
main() |
314 | on text filter change | import os
import sys
from qtpy import QtWidgets, QtCore
import qtawesome
from openpype import style
from openpype.client import get_projects
from openpype.pipeline import legacy_io
from openpype.tools.utils.delegates import VersionDelegate
from openpype.tools.utils.lib import (
qt_app_context,
preserve_expanded_rows,
preserve_selection,
FamilyConfigCache
)
from .model import (
InventoryModel,
FilterProxyModel
)
from .view import SceneInventoryView
module = sys.modules[__name__]
module.window = None
class SceneInventoryWindow(QtWidgets.QDialog):
"""Scene Inventory window"""
def __init__(self, parent=None):
super(SceneInventoryWindow, self).__init__(parent)
if not parent:
self.setWindowFlags(
self.windowFlags() | QtCore.Qt.WindowStaysOnTopHint
)
project_name = os.getenv("AVALON_PROJECT") or "<Project not set>"
self.setWindowTitle("Scene Inventory 1.0 - {}".format(project_name))
self.setObjectName("SceneInventory")
self.resize(1100, 480)
# region control
filter_label = QtWidgets.QLabel("Search", self)
text_filter = QtWidgets.QLineEdit(self)
outdated_only_checkbox = QtWidgets.QCheckBox(
"Filter to outdated", self
)
outdated_only_checkbox.setToolTip("Show outdated files only")
outdated_only_checkbox.setChecked(False)
icon = qtawesome.icon("fa.arrow-up", color="white")
update_all_button = QtWidgets.QPushButton(self)
update_all_button.setToolTip("Update all outdated to latest version")
update_all_button.setIcon(icon)
icon = qtawesome.icon("fa.refresh", color="white")
refresh_button = QtWidgets.QPushButton(self)
refresh_button.setToolTip("Refresh")
refresh_button.setIcon(icon)
control_layout = QtWidgets.QHBoxLayout()
control_layout.addWidget(filter_label)
control_layout.addWidget(text_filter)
control_layout.addWidget(outdated_only_checkbox)
control_layout.addWidget(update_all_button)
control_layout.addWidget(refresh_button)
# endregion control
family_config_cache = FamilyConfigCache(legacy_io)
model = InventoryModel(family_config_cache)
proxy = FilterProxyModel()
proxy.setSourceModel(model)
proxy.setDynamicSortFilter(True)
proxy.setFilterCaseSensitivity(QtCore.Qt.CaseInsensitive)
view = SceneInventoryView(self)
view.setModel(proxy)
# set some nice default widths for the view
view.setColumnWidth(0, 250) # name
view.setColumnWidth(1, 55) # version
view.setColumnWidth(2, 55) # count
view.setColumnWidth(3, 150) # family
view.setColumnWidth(4, 120) # group
view.setColumnWidth(5, 150) # loader
# apply delegates
version_delegate = VersionDelegate(legacy_io, self)
column = model.Columns.index("version")
view.setItemDelegateForColumn(column, version_delegate)
layout = QtWidgets.QVBoxLayout(self)
layout.addLayout(control_layout)
layout.addWidget(view)
# signals
text_filter.textChanged.connect(self.METHOD_NAME)
outdated_only_checkbox.stateChanged.connect(
self._on_outdated_state_change
)
view.hierarchy_view_changed.connect(
self._on_hierarchy_view_change
)
view.data_changed.connect(self._on_refresh_request)
refresh_button.clicked.connect(self._on_refresh_request)
update_all_button.clicked.connect(self._on_update_all)
self._update_all_button = update_all_button
self._outdated_only_checkbox = outdated_only_checkbox
self._view = view
self._model = model
self._proxy = proxy
self._version_delegate = version_delegate
self._family_config_cache = family_config_cache
self._first_show = True
family_config_cache.refresh()
def showEvent(self, event):
super(SceneInventoryWindow, self).showEvent(event)
if self._first_show:
self._first_show = False
self.setStyleSheet(style.load_stylesheet())
def keyPressEvent(self, event):
"""Custom keyPressEvent.
Override keyPressEvent to do nothing so that Maya's panels won't
take focus when pressing "SHIFT" whilst mouse is over viewport or
outliner. This way users don't accidentally perform Maya commands
whilst trying to name an instance.
"""
def _on_refresh_request(self):
"""Signal callback to trigger 'refresh' without any arguments."""
self.refresh()
def refresh(self, items=None):
with preserve_expanded_rows(
tree_view=self._view,
role=self._model.UniqueRole
):
with preserve_selection(
tree_view=self._view,
role=self._model.UniqueRole,
current_index=False
):
kwargs = {"items": items}
# TODO do not touch view's inner attribute
if self._view._hierarchy_view:
kwargs["selected"] = self._view._selected
self._model.refresh(**kwargs)
def _on_hierarchy_view_change(self, enabled):
self._proxy.set_hierarchy_view(enabled)
self._model.set_hierarchy_view(enabled)
def METHOD_NAME(self, text_filter):
if hasattr(self._proxy, "setFilterRegExp"):
self._proxy.setFilterRegExp(text_filter)
else:
self._proxy.setFilterRegularExpression(text_filter)
def _on_outdated_state_change(self):
self._proxy.set_filter_outdated(
self._outdated_only_checkbox.isChecked()
)
def _on_update_all(self):
self._view.update_all()
def show(root=None, debug=False, parent=None, items=None):
"""Display Scene Inventory GUI
Arguments:
debug (bool, optional): Run in debug-mode,
defaults to False
parent (QtCore.QObject, optional): When provided parent the interface
to this QObject.
items (list) of dictionaries - for injection of items for standalone
testing
"""
try:
module.window.close()
del module.window
except (RuntimeError, AttributeError):
pass
if debug is True:
legacy_io.install()
if not os.environ.get("AVALON_PROJECT"):
any_project = next(
project for project in get_projects()
)
project_name = any_project["name"]
else:
project_name = os.environ.get("AVALON_PROJECT")
legacy_io.Session["AVALON_PROJECT"] = project_name
with qt_app_context():
window = SceneInventoryWindow(parent)
window.show()
window.refresh(items=items)
module.window = window
# Pull window to the front.
module.window.raise_()
module.window.activateWindow() |
315 | test forward methods without rewrap | import os
import pathlib
import pytest
import trio
from trio._file_io import AsyncIOWrapper
from trio._path import AsyncAutoWrapperType as Type
@pytest.fixture
def path(tmpdir):
p = str(tmpdir.join("test"))
return trio.Path(p)
def method_pair(path, method_name):
path = pathlib.Path(path)
async_path = trio.Path(path)
return getattr(path, method_name), getattr(async_path, method_name)
async def test_open_is_async_context_manager(path):
async with await path.open("w") as f:
assert isinstance(f, AsyncIOWrapper)
assert f.closed
async def test_magic():
path = trio.Path("test")
assert str(path) == "test"
assert bytes(path) == b"test"
cls_pairs = [
(trio.Path, pathlib.Path),
(pathlib.Path, trio.Path),
(trio.Path, trio.Path),
]
@pytest.mark.parametrize("cls_a,cls_b", cls_pairs)
async def test_cmp_magic(cls_a, cls_b):
a, b = cls_a(""), cls_b("")
assert a == b
assert not a != b
a, b = cls_a("a"), cls_b("b")
assert a < b
assert b > a
# this is intentionally testing equivalence with none, due to the
# other=sentinel logic in _forward_magic
assert not a == None # noqa
assert not b == None # noqa
# upstream python3.8 bug: we should also test (pathlib.Path, trio.Path), but
# __*div__ does not properly raise NotImplementedError like the other comparison
# magic, so trio.Path's implementation does not get dispatched
cls_pairs = [
(trio.Path, pathlib.Path),
(trio.Path, trio.Path),
(trio.Path, str),
(str, trio.Path),
]
@pytest.mark.parametrize("cls_a,cls_b", cls_pairs)
async def test_div_magic(cls_a, cls_b):
a, b = cls_a("a"), cls_b("b")
result = a / b
assert isinstance(result, trio.Path)
assert str(result) == os.path.join("a", "b")
@pytest.mark.parametrize(
"cls_a,cls_b", [(trio.Path, pathlib.Path), (trio.Path, trio.Path)]
)
@pytest.mark.parametrize("path", ["foo", "foo/bar/baz", "./foo"])
async def test_hash_magic(cls_a, cls_b, path):
a, b = cls_a(path), cls_b(path)
assert hash(a) == hash(b)
async def test_forwarded_properties(path):
# use `name` as a representative of forwarded properties
assert "name" in dir(path)
assert path.name == "test"
async def test_async_method_signature(path):
# use `resolve` as a representative of wrapped methods
assert path.resolve.__name__ == "resolve"
assert path.resolve.__qualname__ == "Path.resolve"
assert "pathlib.Path.resolve" in path.resolve.__doc__
@pytest.mark.parametrize("method_name", ["is_dir", "is_file"])
async def test_compare_async_stat_methods(method_name):
method, async_method = method_pair(".", method_name)
result = method()
async_result = await async_method()
assert result == async_result
async def test_invalid_name_not_wrapped(path):
with pytest.raises(AttributeError):
getattr(path, "invalid_fake_attr")
@pytest.mark.parametrize("method_name", ["absolute", "resolve"])
async def test_async_methods_rewrap(method_name):
method, async_method = method_pair(".", method_name)
result = method()
async_result = await async_method()
assert isinstance(async_result, trio.Path)
assert str(result) == str(async_result)
async def test_forward_methods_rewrap(path, tmpdir):
with_name = path.with_name("foo")
with_suffix = path.with_suffix(".py")
assert isinstance(with_name, trio.Path)
assert with_name == tmpdir.join("foo")
assert isinstance(with_suffix, trio.Path)
assert with_suffix == tmpdir.join("test.py")
async def test_forward_properties_rewrap(path):
assert isinstance(path.parent, trio.Path)
async def METHOD_NAME(path, tmpdir):
path = await path.parent.resolve()
assert path.as_uri().startswith("file:///")
async def test_repr():
path = trio.Path(".")
assert repr(path) == "trio.Path('.')"
class MockWrapped:
unsupported = "unsupported"
_private = "private"
class MockWrapper:
_forwards = MockWrapped
_wraps = MockWrapped
async def test_type_forwards_unsupported():
with pytest.raises(TypeError):
Type.generate_forwards(MockWrapper, {})
async def test_type_wraps_unsupported():
with pytest.raises(TypeError):
Type.generate_wraps(MockWrapper, {})
async def test_type_forwards_private():
Type.generate_forwards(MockWrapper, {"unsupported": None})
assert not hasattr(MockWrapper, "_private")
async def test_type_wraps_private():
Type.generate_wraps(MockWrapper, {"unsupported": None})
assert not hasattr(MockWrapper, "_private")
@pytest.mark.parametrize("meth", [trio.Path.__init__, trio.Path.joinpath])
async def test_path_wraps_path(path, meth):
wrapped = await path.absolute()
result = meth(path, wrapped)
if result is None:
result = path
assert wrapped == result
async def test_path_nonpath():
with pytest.raises(TypeError):
trio.Path(1)
async def test_open_file_can_open_path(path):
async with await trio.open_file(path, "w") as f:
assert f.name == os.fspath(path)
async def test_globmethods(path):
# Populate a directory tree
await path.mkdir()
await (path / "foo").mkdir()
await (path / "foo" / "_bar.txt").write_bytes(b"")
await (path / "bar.txt").write_bytes(b"")
await (path / "bar.dat").write_bytes(b"")
# Path.glob
for _pattern, _results in {
"*.txt": {"bar.txt"},
"**/*.txt": {"_bar.txt", "bar.txt"},
}.items():
entries = set()
for entry in await path.glob(_pattern):
assert isinstance(entry, trio.Path)
entries.add(entry.name)
assert entries == _results
# Path.rglob
entries = set()
for entry in await path.rglob("*.txt"):
assert isinstance(entry, trio.Path)
entries.add(entry.name)
assert entries == {"_bar.txt", "bar.txt"}
async def test_iterdir(path):
# Populate a directory
await path.mkdir()
await (path / "foo").mkdir()
await (path / "bar.txt").write_bytes(b"")
entries = set()
for entry in await path.iterdir():
assert isinstance(entry, trio.Path)
entries.add(entry.name)
assert entries == {"bar.txt", "foo"}
async def test_classmethods():
assert isinstance(await trio.Path.home(), trio.Path)
# pathlib.Path has only two classmethods
assert str(await trio.Path.home()) == os.path.expanduser("~")
assert str(await trio.Path.cwd()) == os.getcwd()
# Wrapped method has docstring
assert trio.Path.home.__doc__ |
316 | tear down class | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from functools import wraps
from os.path import dirname
from git import Repo
from git.exc import InvalidGitRepositoryError
from qiita_core.qiita_settings import qiita_config, r_client
from qiita_pet import __version__ as qiita_pet_lib_version
from qiita_db.sql_connection import TRN
from qiita_db.environment_manager import reset_test_database
REDIS_QIITA_GIT_SHA_KEY = 'qiita-git-sha'
def is_test_environment():
"""Checks if Qiita is running in a test environment
Returns
-------
bool
Whether Qiita is running in a test environment or not
Notes
-----
Qiita is running in a test environment if:
- It is connected to a test database, AND
- The config file indicates that this is a test environment
"""
# Check that we are not in a production environment
with TRN:
TRN.add("SELECT test FROM settings")
test_db = TRN.execute_fetchflatten()[0]
return qiita_config.test_environment and test_db
def qiita_test_checker(test=False):
"""Decorator that allows the execution of all methods in a test class only
and only if Qiita is set up to work in a test environment.
Parameters
----------
test : bool, optional
If True it will raise a RuntimeError error
Raises
------
RuntimeError
If Qiita is set up to work in a production environment
"""
def class_modifier(cls):
if not is_test_environment() or test:
raise RuntimeError("Working in a production environment. Not "
"executing the tests to keep the production "
"database safe.")
# Now, we decorate the setup and teardown functions
class DecoratedClass(cls):
def setUp(self):
super(DecoratedClass, self).setUp()
@classmethod
@reset_test_database
def METHOD_NAME(cls):
pass
return DecoratedClass
return class_modifier
def execute_as_transaction(func):
"""Decorator to make a method execute inside a transaction"""
@wraps(func)
def wrapper(*args, **kwargs):
from qiita_db.sql_connection import TRN
with TRN:
return func(*args, **kwargs)
return wrapper
def update_redis_qiita_sha_version():
# the actual repo is the abspath of the current file without
# qiita_core
git_repo_path = dirname(dirname(__file__))
try:
repo = Repo(git_repo_path)
sha = repo.active_branch.commit.hexsha
repo.__del__()
except (InvalidGitRepositoryError, TypeError):
sha = ''
r_client.set(REDIS_QIITA_GIT_SHA_KEY, sha)
def get_qiita_version():
"""Returns the Qiita version and Git sha if present
Returns
------
tuple (version, sha)
The Qiita version and SHA. SHA can be an empty string.
"""
sha = r_client.get(REDIS_QIITA_GIT_SHA_KEY)
if sha is None:
sha = ''
return (qiita_pet_lib_version, sha)
def get_release_info(study_status='public'):
"""Returns the studies and the archive release details
Parameters
----------
study_status : str, optional
The study status to search for. Note that this should always be set
to 'public' but having this exposed helps with testing. The other
options are 'private' and 'sandbox'
Returns
------
((str, str, str), (str, str, str))
The release MD5, filepath and timestamp
"""
portal = qiita_config.portal
md5sum = r_client.get('%s:release:%s:md5sum' % (portal, study_status))
filepath = r_client.get('%s:release:%s:filepath' % (portal, study_status))
timestamp = r_client.get('%s:release:%s:time' % (portal, study_status))
# replacing None values for empty strings as the text is displayed nicely
# in the GUI
md5sum = '' if md5sum is None else md5sum.decode('ascii')
filepath = '' if filepath is None else filepath.decode('ascii')
timestamp = '' if timestamp is None else timestamp.decode('ascii')
biom_metadata_release = ((md5sum, filepath, timestamp))
md5sum = r_client.get('release-archive:md5sum')
filepath = r_client.get('release-archive:filepath')
timestamp = r_client.get('release-archive:time')
# replacing None values for empty strings as the text is displayed nicely
# in the GUI
md5sum = '' if md5sum is None else md5sum.decode('ascii')
filepath = '' if filepath is None else filepath.decode('ascii')
timestamp = '' if timestamp is None else timestamp.decode('ascii')
archive_release = ((md5sum, filepath, timestamp))
return (biom_metadata_release, archive_release) |
317 | test invalid alias name | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from typing import ContextManager
import pytest
from pants.option.alias import CliAlias, CliAliasCycleError, CliAliasInvalidError
from pants.option.scope import ScopeInfo
from pants.testutil.pytest_util import no_exception
from pants.util.frozendict import FrozenDict
def test_maybe_nothing() -> None:
cli_alias = CliAlias()
assert cli_alias.maybe_expand("arg") is None
@pytest.mark.parametrize(
"alias, expanded",
[
("--arg1", ("--arg1",)),
("--arg1 --arg2", ("--arg1", "--arg2")),
("--arg=value --option", ("--arg=value", "--option")),
("--arg=value --option flag", ("--arg=value", "--option", "flag")),
("--arg 'quoted value'", ("--arg", "quoted value")),
],
)
def test_maybe_expand_alias(alias: str, expanded: tuple[str, ...] | None) -> None:
cli_alias = CliAlias.from_dict(
{
"alias": alias,
}
)
assert cli_alias.maybe_expand("alias") == expanded
cli_alias = CliAlias.from_dict(
{
"--alias": alias,
}
)
assert cli_alias.maybe_expand("--alias") == expanded
@pytest.mark.parametrize(
"args, expanded",
[
(
("some", "alias", "target"),
("some", "--flag", "goal", "target"),
),
(
# Don't touch pass through args.
("some", "--", "alias", "target"),
("some", "--", "alias", "target"),
),
],
)
def test_expand_args(args: tuple[str, ...], expanded: tuple[str, ...]) -> None:
cli_alias = CliAlias.from_dict(
{
"alias": "--flag goal",
}
)
assert cli_alias.expand_args(args) == expanded
@pytest.mark.parametrize(
"args, expanded",
[
(
("some", "--alias", "target"),
("some", "--flag", "goal", "target"),
),
(
# Don't touch pass through args.
("some", "--", "--alias", "target"),
("some", "--", "--alias", "target"),
),
],
)
def test_expand_args_flag(args: tuple[str, ...], expanded: tuple[str, ...]) -> None:
cli_alias = CliAlias.from_dict(
{
"--alias": "--flag goal",
}
)
assert cli_alias.expand_args(args) == expanded
def test_no_expand_when_no_aliases() -> None:
args = ("./pants",)
cli_alias = CliAlias()
assert cli_alias.expand_args(args) is args
@pytest.mark.parametrize(
"alias, definitions",
[
(
{
"basic": "goal",
"nested": "--option=advanced basic",
},
{
"basic": ("goal",),
"nested": (
"--option=advanced",
"goal",
),
},
),
(
{
"multi-nested": "deep nested",
"basic": "goal",
"nested": "--option=advanced basic",
},
{
"multi-nested": ("deep", "--option=advanced", "goal"),
"basic": ("goal",),
"nested": (
"--option=advanced",
"goal",
),
},
),
(
{
"cycle": "other-alias",
"other-alias": "cycle",
},
pytest.raises(
CliAliasCycleError,
match=(
r"CLI alias cycle detected in `\[cli\]\.alias` option:\n"
+ r"other-alias -> cycle -> other-alias"
),
),
),
(
{
"cycle": "--other-alias",
"--other-alias": "cycle",
},
pytest.raises(
CliAliasCycleError,
match=(
r"CLI alias cycle detected in `\[cli\]\.alias` option:\n"
+ r"--other-alias -> cycle -> --other-alias"
),
),
),
(
{
"--cycle": "--other-alias",
"--other-alias": "--cycle",
},
pytest.raises(
CliAliasCycleError,
match=(
r"CLI alias cycle detected in `\[cli\]\.alias` option:\n"
+ r"--other-alias -> --cycle -> --other-alias"
),
),
),
],
)
def test_nested_alias(alias, definitions: dict | ContextManager) -> None:
expect: ContextManager = no_exception() if isinstance(definitions, dict) else definitions
with expect:
cli_alias = CliAlias.from_dict(alias)
if isinstance(definitions, dict):
assert cli_alias.definitions == FrozenDict(definitions)
@pytest.mark.parametrize(
"alias",
[
# Check that we do not allow any alias that may resemble a valid option/spec.
"dir/spec",
"file.name",
"target:name",
"-o",
"-option",
],
)
def METHOD_NAME(alias: str) -> None:
with pytest.raises(
CliAliasInvalidError, match=(f"Invalid alias in `\\[cli\\]\\.alias` option: {alias!r}\\.")
):
CliAlias.from_dict({alias: ""})
def test_banned_alias_names() -> None:
cli_alias = CliAlias.from_dict({"fmt": "--cleverness format"})
with pytest.raises(
CliAliasInvalidError,
match=(
r"Invalid alias in `\[cli\]\.alias` option: 'fmt'\. This is already a registered goal\."
),
):
cli_alias.check_name_conflicts({"fmt": ScopeInfo("fmt", is_goal=True)}, {})
@pytest.mark.parametrize(
"alias, info, expected",
[
(
{"--keep-sandboxes": "--foobar"},
{"": "--keep-sandboxes"},
pytest.raises(
CliAliasInvalidError,
match=(
r"Invalid flag-like alias in `\[cli\]\.alias` option: '--keep-sandboxes'\. This is already a registered flag in the 'global' scope\."
),
),
),
(
{"--changed-since": "--foobar"},
{"changed": "--changed-since"},
pytest.raises(
CliAliasInvalidError,
match=(
r"Invalid flag-like alias in `\[cli\]\.alias` option: '--changed-since'\. This is already a registered flag in the 'changed' scope\."
),
),
),
],
)
def test_banned_alias_flag_names(alias, info, expected) -> None:
cli_alias = CliAlias.from_dict(alias)
with expected:
cli_alias.check_name_conflicts({}, info) |
318 | get line cap | from typing import Any, Optional
class Context:
def __init__(self, target: Any) -> None: ...
def get_target(self): ...
def save(self) -> None: ...
def restore(self) -> None: ...
def __enter__(self): ...
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: ...
def push_group(self) -> None: ...
def push_group_with_content(self, content: Any) -> None: ...
def pop_group(self): ...
def pop_group_to_source(self) -> None: ...
def get_group_target(self): ...
def set_source_rgba(
self, red: float, green: float, blue: float, alpha: float = ...
) -> None: ...
def set_source_rgb(self, red: float, green: float, blue: float) -> None: ...
def set_source_surface(self, surface: Any, x: int = ..., y: int = ...) -> None: ...
def set_source(self, source: Any) -> None: ...
def get_source(self): ...
def set_antialias(self, antialias: Any) -> None: ...
def get_antialias(self): ...
def set_dash(self, dashes: Any, offset: int = ...) -> None: ...
def get_dash(self): ...
def get_dash_count(self): ...
def set_fill_rule(self, fill_rule: Any) -> None: ...
def get_fill_rule(self): ...
def set_line_cap(self, line_cap: Any) -> None: ...
def METHOD_NAME(self): ...
def set_line_join(self, line_join: Any) -> None: ...
def get_line_join(self): ...
def set_line_width(self, width: Any) -> None: ...
def get_line_width(self): ...
def set_miter_limit(self, limit: Any) -> None: ...
def get_miter_limit(self): ...
def set_operator(self, operator: Any) -> None: ...
def get_operator(self): ...
def set_tolerance(self, tolerance: Any) -> None: ...
def get_tolerance(self): ...
def translate(self, tx: Any, ty: Any) -> None: ...
def scale(self, sx: Any, sy: Optional[Any] = ...) -> None: ...
def rotate(self, radians: Any) -> None: ...
def transform(self, matrix: Any) -> None: ...
def set_matrix(self, matrix: Any) -> None: ...
def get_matrix(self): ...
def identity_matrix(self) -> None: ...
def user_to_device(self, x: Any, y: Any): ...
def user_to_device_distance(self, dx: Any, dy: Any): ...
def device_to_user(self, x: Any, y: Any): ...
def device_to_user_distance(self, dx: Any, dy: Any): ...
def has_current_point(self): ...
def get_current_point(self): ...
def new_path(self) -> None: ...
def new_sub_path(self) -> None: ...
def move_to(self, x: Any, y: Any) -> None: ...
def rel_move_to(self, dx: Any, dy: Any) -> None: ...
def line_to(self, x: Any, y: Any) -> None: ...
def rel_line_to(self, dx: Any, dy: Any) -> None: ...
def rectangle(self, x: Any, y: Any, width: Any, height: Any) -> None: ...
def arc(self, xc: Any, yc: Any, radius: Any, angle1: Any, angle2: Any) -> None: ...
def arc_negative(self, xc: Any, yc: Any, radius: Any, angle1: Any, angle2: Any) -> None: ...
def curve_to(self, x1: Any, y1: Any, x2: Any, y2: Any, x3: Any, y3: Any) -> None: ...
def rel_curve_to(
self, dx1: Any, dy1: Any, dx2: Any, dy2: Any, dx3: Any, dy3: Any
) -> None: ...
def text_path(self, text: Any) -> None: ...
def glyph_path(self, glyphs: Any) -> None: ...
def close_path(self) -> None: ...
def copy_path(self): ...
def copy_path_flat(self): ...
def append_path(self, path: Any) -> None: ...
def path_extents(self): ...
def paint(self) -> None: ...
def paint_with_alpha(self, alpha: Any) -> None: ...
def mask(self, pattern: Any) -> None: ...
def mask_surface(self, surface: Any, surface_x: int = ..., surface_y: int = ...) -> None: ...
def fill(self) -> None: ...
def fill_preserve(self) -> None: ...
def fill_extents(self): ...
def in_fill(self, x: Any, y: Any): ...
def stroke(self) -> None: ...
def stroke_preserve(self) -> None: ...
def stroke_extents(self): ...
def in_stroke(self, x: Any, y: Any): ...
def clip(self) -> None: ...
def clip_preserve(self) -> None: ...
def clip_extents(self): ...
def copy_clip_rectangle_list(self): ...
def in_clip(self, x: Any, y: Any): ...
def reset_clip(self) -> None: ...
def select_font_face(
self, family: str = ..., slant: Any = ..., weight: Any = ...
) -> None: ...
def set_font_face(self, font_face: Any) -> None: ...
def get_font_face(self): ...
def set_font_size(self, size: Any) -> None: ...
def set_font_matrix(self, matrix: Any) -> None: ...
def get_font_matrix(self): ...
def set_font_options(self, font_options: Any) -> None: ...
def get_font_options(self): ...
def set_scaled_font(self, scaled_font: Any) -> None: ...
def get_scaled_font(self): ...
def font_extents(self): ...
def text_extents(self, text: Any): ...
def glyph_extents(self, glyphs: Any): ...
def show_text(self, text: Any) -> None: ...
def show_glyphs(self, glyphs: Any) -> None: ...
def show_text_glyphs(
self, text: Any, glyphs: Any, clusters: Any, cluster_flags: int = ...
) -> None: ...
def show_page(self) -> None: ...
def copy_page(self) -> None: ...
def tag_begin(self, tag_name: Any, attributes: Optional[Any] = ...) -> None: ...
def tag_end(self, tag_name: Any) -> None: ... |
319 | test fit decay | """
Tests for tedana.decay
"""
import os.path as op
import numpy as np
import pytest
from tedana import combine
from tedana import decay as me
from tedana import io, utils
from tedana.tests.utils import get_test_data_path
@pytest.fixture(scope="module")
def testdata1():
tes = np.array([14.5, 38.5, 62.5])
in_files = [op.join(get_test_data_path(), "echo{0}.nii.gz".format(i + 1)) for i in range(3)]
data, _ = io.load_data(in_files, n_echos=len(tes))
mask, adaptive_mask = utils.make_adaptive_mask(data, getsum=True)
fittype = "loglin"
data_dict = {
"data": data,
"tes": tes,
"mask": mask,
"adaptive_mask": adaptive_mask,
"fittype": fittype,
}
return data_dict
def METHOD_NAME(testdata1):
"""
fit_decay should return data in (samples,) shape.
"""
t2sv, s0v, t2svG, s0vG = me.fit_decay(
testdata1["data"],
testdata1["tes"],
testdata1["mask"],
testdata1["adaptive_mask"],
testdata1["fittype"],
)
assert t2sv.ndim == 1
assert s0v.ndim == 1
assert t2svG.ndim == 1
assert s0vG.ndim == 1
def test_fit_decay_ts(testdata1):
"""
fit_decay_ts should return data in samples x time shape.
"""
t2sv, s0v, t2svG, s0vG = me.fit_decay_ts(
testdata1["data"],
testdata1["tes"],
testdata1["mask"],
testdata1["adaptive_mask"],
testdata1["fittype"],
)
assert t2sv.ndim == 2
assert s0v.ndim == 2
assert t2svG.ndim == 2
assert s0vG.ndim == 2
def test__apply_t2s_floor():
"""
_apply_t2s_floor applies a floor to T2* values to prevent a ZeroDivisionError during
optimal combination.
"""
n_voxels, n_echos, n_trs = 100, 5, 25
echo_times = np.array([2, 23, 54, 75, 96])
me_data = np.random.random((n_voxels, n_echos, n_trs))
t2s = np.random.random((n_voxels)) * 1000
t2s[t2s < 1] = 1 # Crop at 1 ms to be safe
t2s[0] = 0.001
# First establish a failure
with pytest.raises(ZeroDivisionError):
_ = combine._combine_t2s(me_data, echo_times[None, :], t2s[:, None])
# Now correct the T2* map and get a successful result.
t2s_corrected = me._apply_t2s_floor(t2s, echo_times)
assert t2s_corrected[0] != t2s[0] # First value should be corrected
assert np.array_equal(t2s_corrected[1:], t2s[1:]) # No other values should be corrected
combined = combine._combine_t2s(me_data, echo_times[None, :], t2s_corrected[:, None])
assert np.all(combined != 0)
# SMOKE TESTS
def test_smoke_fit_decay():
"""
test_smoke_fit_decay tests that the function fit_decay returns reasonable
objects with semi-random inputs in the correct format.
A mask with at least some "good" voxels and an adaptive mask where all
good voxels have at least two good echoes are generated to ensure that
the decay-fitting function has valid voxels on which to run.
"""
n_samples = 100
n_echos = 5
n_times = 20
data = np.random.random((n_samples, n_echos, n_times))
tes = np.random.random((n_echos)).tolist()
mask = np.ones(n_samples, dtype=int)
mask[n_samples // 2 :] = 0
adaptive_mask = np.random.randint(2, n_echos, size=(n_samples)) * mask
fittype = "loglin"
t2s_limited, s0_limited, t2s_full, s0_full = me.fit_decay(
data, tes, mask, adaptive_mask, fittype
)
assert t2s_limited is not None
assert s0_limited is not None
assert t2s_full is not None
assert s0_full is not None
def test_smoke_fit_decay_curvefit():
"""
test_smoke_fit_decay tests that the function fit_decay returns reasonable
objects with random inputs in the correct format when using the direct
monoexponetial approach
"""
n_samples = 100
n_echos = 5
n_times = 20
data = np.random.random((n_samples, n_echos, n_times))
tes = np.random.random((n_echos)).tolist()
mask = np.ones(n_samples, dtype=int)
mask[n_samples // 2 :] = 0
adaptive_mask = np.random.randint(2, n_echos, size=(n_samples)) * mask
fittype = "curvefit"
t2s_limited, s0_limited, t2s_full, s0_full = me.fit_decay(
data, tes, mask, adaptive_mask, fittype
)
assert t2s_limited is not None
assert s0_limited is not None
assert t2s_full is not None
assert s0_full is not None
def test_smoke_fit_decay_ts():
"""
test_smoke_fit_decay_ts tests that the function fit_decay_ts returns reasonable
objects with random inputs in the correct format
"""
n_samples = 100
n_echos = 5
n_times = 20
data = np.random.random((n_samples, n_echos, n_times))
tes = np.random.random((n_echos)).tolist()
mask = np.ones(n_samples, dtype=int)
mask[n_samples // 2 :] = 0
adaptive_mask = np.random.randint(2, n_echos, size=(n_samples)) * mask
fittype = "loglin"
t2s_limited_ts, s0_limited_ts, t2s_full_ts, s0_full_ts = me.fit_decay_ts(
data, tes, mask, adaptive_mask, fittype
)
assert t2s_limited_ts is not None
assert s0_limited_ts is not None
assert t2s_full_ts is not None
assert s0_full_ts is not None
def test_smoke_fit_decay_curvefit_ts():
"""
test_smoke_fit_decay_ts tests that the function fit_decay_ts returns reasonable
objects with random inputs in the correct format when using the direct
monoexponetial approach
"""
n_samples = 100
n_echos = 5
n_times = 20
data = np.random.random((n_samples, n_echos, n_times))
tes = np.random.random((n_echos)).tolist()
mask = np.ones(n_samples, dtype=int)
mask[n_samples // 2 :] = 0
adaptive_mask = np.random.randint(2, n_echos, size=(n_samples)) * mask
fittype = "curvefit"
t2s_limited_ts, s0_limited_ts, t2s_full_ts, s0_full_ts = me.fit_decay_ts(
data, tes, mask, adaptive_mask, fittype
)
assert t2s_limited_ts is not None
assert s0_limited_ts is not None
assert t2s_full_ts is not None
assert s0_full_ts is not None
# TODO: BREAK AND UNIT TESTS |
320 | clear | #!/usr/bin/python
'''* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
* Mupen64plus - code_convert.c *
* Mupen64Plus homepage: http://code.google.com/p/mupen64plus/ *
* Copyright (C) 2010 Rhett Osborne *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program; if not, write to the *
* Free Software Foundation, Inc., *
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
Usage:
python code_convert.py > ../data/mupencheat.txt < ../data/mupen64plus.cht
'''
from sys import stdin
class cheat:
def __init__(self):
self.n=""
self.d=""
self.c=[]
self.v=0
self.hb='00'
def add(self, l):
if(self.n == ""):
return
l.append(" cn %s"%(self.n))
if(self.d != ""): l.append(" cd %s"%(self.d))
for code in self.c:
l.append(" "+code)
def METHOD_NAME(self):
self.n=""
self.d=""
self.c=[]
self.v=0
l=[]
cCount=0
_cs = []
for i in range(225):
_cs.append(cheat())
cs = _cs[:]
def print_l():
global l, cs
for cheat in cs:
cheat.add(l)
for line in l:
print line.replace("\x00", "")
l=[]
cCount=0
for i in range(225):
cs[i].METHOD_NAME()
lines = stdin.read().split("\n")
for line in lines:
if len(line) < 2: continue
elif(line[:2] == "//" and line != "//----" and line != "//---" ):
l.append(line)
elif len(line) < 4: continue
elif(line[0] == '[' and line[-1] == ']' and len(line) > 23):
print_l()
l.append("\ncrc %s" % line[1:-1])
elif(line[:5] == "Name="):
l.append("gn %s" % (line[5:]))
elif(line[:5] == "Cheat"):
t = line[5:].split('=')[0]
if (len(t)>1 and t[-2] == '_'):
n = int(t[:-2])
if(t[-1] == 'N'):
cs[n].d = line.split("=")[1]
else:
for option in line.split("=")[1].split("$")[1:]:
if(len(option) < 4):
break;
if(option[-1]==','): end =-1
else: end = None
if(option[2] == " "):
cs[n].c[cs[n].v] += "%s%s:\"%s\""%(cs[n].hb,option[:2],option[3:end].replace("\"", "\\\""))
else:
cs[n].c[cs[n].v] += "%s:\"%s\""%(option[:4],option[5:end].replace("\"", "\\\""))
cs[n].c[cs[n].v]+=','
cs[n].c[cs[n].v] = cs[n].c[cs[n].v][:-1]
else:
n = int(t)
cn = line.split('"')
cs[n].c = cn[2][1:].split(',')
cs[n].n = cn[1];
i=0
for cheat in cs[n].c:
if(cheat[-1] == '?'):
if(cheat[-2:] == '??' and cheat[-4:-2] != '??'):
cs[n].hb = cheat[-4:-2]
else:
cs[n].hb = '00'
cs[n].c[i] = cheat[:9] + "???? ";
cs[n].v=i
i+=1
if(n > cCount):
cCount = n
elif(line != "//----" and line != "//---" ):
l.append("//%s" %line) |
321 | exit query | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=all
from .ScheduleQueryConditionListener import ScheduleQueryConditionListener
from azext_scheduled_query.vendored_sdks.azure_mgmt_scheduled_query.models import ConditionFailingPeriods
op_conversion = {
'=': 'Equal',
'!=': 'NotEqual',
'>': 'GreaterThan',
'>=': 'GreaterThanOrEqual',
'<': 'LessThan',
'<=': 'LessThanOrEqual'
}
agg_conversion = {
'avg': 'Average',
'min': 'Minimum',
'max': 'Maximum',
'total': 'Total',
'count': 'Count'
}
dim_op_conversion = {
'includes': 'Include',
'excludes': 'Exclude'
}
# This class defines a complete listener for a parse tree produced by MetricAlertConditionParser.
class ScheduleQueryConditionValidator(ScheduleQueryConditionListener):
def __init__(self):
super(ScheduleQueryConditionValidator, self).__init__()
self.parameters = {}
self._dimension_index = 0
# Exit a parse tree produced by MetricAlertConditionParser#aggregation.
def exitAggregation(self, ctx):
aggregation = agg_conversion[ctx.getText().strip()]
self.parameters['time_aggregation'] = aggregation
# Exit a parse tree produced by MetricAlertConditionParser#metric.
def exitMetric(self, ctx):
self.parameters['metric_measure_column'] = ctx.getText().strip()
# Exit a parse tree produced by MetricAlertConditionParser#operator.
def exitOperator(self, ctx):
operator = op_conversion[ctx.getText().strip()]
self.parameters['operator'] = operator
# Exit a parse tree produced by MetricAlertConditionParser#threshold.
def exitThreshold(self, ctx):
self.parameters['threshold'] = ctx.getText().strip()
# Exit a parse tree produced by MetricAlertConditionParser#threshold.
def METHOD_NAME(self, ctx):
query = ctx.getText().strip()
query = query.replace("\\\"", "\"")
query = query.replace("\\\'", "\'")
self.parameters['query'] = query
# Exit a parse tree produced by MetricAlertConditionParser#threshold.
def exitResource_id(self, ctx):
self.parameters['resource_id_column'] = ctx.getText().strip()
# Enter a parse tree produced by MetricAlertConditionParser#dimensions.
def enterFalling_period(self, ctx):
self.parameters['failing_periods'] = ConditionFailingPeriods()
# Exit a parse tree produced by MetricAlertConditionParser#threshold.
def exitMin_times(self, ctx):
self.parameters['failing_periods'].min_failing_periods_to_alert = int(float(ctx.getText().strip()))
# Exit a parse tree produced by MetricAlertConditionParser#threshold.
def exitEvaluation_period(self, ctx):
self.parameters['failing_periods'].number_of_evaluation_periods = int(float(ctx.getText().strip()))
# Enter a parse tree produced by MetricAlertConditionParser#dimensions.
def enterDimensions(self, ctx):
self.parameters['dimensions'] = []
# Enter a parse tree produced by MetricAlertConditionParser#dimension.
def enterDimension(self, ctx):
self.parameters['dimensions'].append({})
# Exit a parse tree produced by MetricAlertConditionParser#dimension.
def exitDimension(self, ctx):
self._dimension_index = self._dimension_index + 1
# Exit a parse tree produced by MetricAlertConditionParser#dname.
def exitDim_name(self, ctx):
self.parameters['dimensions'][self._dimension_index]['name'] = ctx.getText().strip()
# Exit a parse tree produced by MetricAlertConditionParser#dop.
def exitDim_operator(self, ctx):
op_text = ctx.getText().strip()
self.parameters['dimensions'][self._dimension_index]['operator'] = dim_op_conversion[op_text.lower()]
# Exit a parse tree produced by MetricAlertConditionParser#dvalues.
def exitDim_values(self, ctx):
dvalues = ctx.getText().strip().split(' ')
self.parameters['dimensions'][self._dimension_index]['values'] = [x for x in dvalues if x not in ['', 'or']]
def result(self):
from azext_scheduled_query.vendored_sdks.azure_mgmt_scheduled_query.models import Condition, Dimension
dim_params = self.parameters.get('dimensions', [])
dimensions = []
for dim in dim_params:
dimensions.append(Dimension(**dim))
self.parameters['dimensions'] = dimensions
return Condition(**self.parameters) |
322 | edge l2 error | import numpy as np
from .GaussLobattoQuadrature import GaussLobattoQuadrature
from .GaussLegendreQuadrature import GaussLegendreQuadrature
class PolygonMeshIntegralAlg():
def __init__(self, mesh, q, cellmeasure=None, cellbarycenter=None):
self.mesh = mesh
self.integrator = mesh.integrator(q)
self.cellintegrator = self.integrator
self.cellbarycenter = cellbarycenter if cellbarycenter is not None \
else mesh.entity_barycenter('cell')
self.cellmeasure = cellmeasure if cellmeasure is not None \
else mesh.entity_measure('cell')
self.edgemeasure = mesh.entity_measure('edge')
self.edgebarycenter = mesh.entity_barycenter('edge')
self.edgeintegrator = GaussLegendreQuadrature(q)
self.facemeasure = self.edgemeasure
self.facebarycenter = self.edgebarycenter
self.faceintegrator = self.edgeintegrator
def triangle_measure(self, tri):
v1 = tri[1] - tri[0]
v2 = tri[2] - tri[0]
area = np.cross(v1, v2)/2
return area
def edge_integral(self, u, q=None, index=None):
"""
Note:
edgetype 参数要去掉, 函数名字意味着是逐个实体上的积分
"""
mesh = self.mesh
NE = mesh.number_of_edges()
node = mesh.entity('node')
edge = mesh.entity('edge')
qf = self.edgeintegrator
bcs, ws = qf.quadpts, qf.weights
index = index or np.s_[:]
ps = mesh.edge_bc_to_point(bcs, index=index)
val = u(ps) # TODO: 这里默认为空间坐标, 是否存在重心坐标的形式?
e = np.einsum('q, qe..., e->e...', ws, val, self.edgemeasure[index])
return e
def face_integral(self, u, q=None, index=None):
"""
"""
return self.edge_integral(u, facetype, q, index)
def cell_integral(self, u, q=None):
"""
TODO:
引入 power 参数
"""
return self.integral(u, celltype=True, q=q)
def integral(self, u, celltype=False, q=None):
mesh = self.mesh
node = mesh.node
bc = self.cellbarycenter
edge = mesh.entity('edge')
edge2cell = mesh.ds.edge_to_cell()
NC = mesh.number_of_cells()
qf = self.cellintegrator if q is None else self.mesh.integrator(q)
bcs, ws = qf.quadpts, qf.weights
tri = [bc[edge2cell[:, 0]], node[edge[:, 0]], node[edge[:, 1]]]
a = self.triangle_measure(tri)
pp = np.einsum('ij, jkm->ikm', bcs, tri, optimize=True)
val = u(pp, edge2cell[:, 0])
shape = (NC, ) + val.shape[2:]
e = np.zeros(shape, dtype=np.float64)
ee = np.einsum('i, ij..., j->j...', ws, val, a, optimize=True)
np.add.at(e, edge2cell[:, 0], ee)
isInEdge = (edge2cell[:, 0] != edge2cell[:, 1])
if np.sum(isInEdge) > 0:
tri = [
bc[edge2cell[isInEdge, 1]],
node[edge[isInEdge, 1]],
node[edge[isInEdge, 0]]
]
a = self.triangle_measure(tri)
pp = np.einsum('ij, jkm->ikm', bcs, tri, optimize=True)
val = u(pp, edge2cell[isInEdge, 1])
ee = np.einsum('i, ij..., j->j...', ws, val, a, optimize=True)
np.add.at(e, edge2cell[isInEdge, 1], ee)
if celltype is True:
return e
else:
return e.sum(axis=0)
def fun_integral(self, f, celltype=False, q=None):
def u(x, index):
return f(x)
return self.integral(u, celltype=celltype, q=q)
def error(self, u, v, celltype=False, power=2, q=None):
"""
Notes
-----
给定两个函数,计算两个函数的之间的差,默认计算 L2 差(power=2)
power 的取值可以是任意的 p。
TODO
----
1. 考虑无穷范数的情形
"""
def efun(x, index):
return np.abs(u(x) - v(x, index))**power
e = self.integral(efun, celltype=celltype, q=q)
if isinstance(e, np.ndarray):
n = len(e.shape) - 1
if n > 0:
for i in range(n):
e = e.sum(axis=-1)
if celltype == False:
e = np.power(np.sum(e), 1/power)
else:
e = np.power(np.sum(e, axis=tuple(range(1, len(e.shape)))), 1/power)
return e
def L1_error(self, u, uh, celltype=False, q=None):
def f(x, index):
return np.abs(u(x) - uh(x, index))
e = self.integral(f, celltype=celltype, q=q)
return e
def L2_error(self, u, uh, celltype=False, q=None):
#TODO: deal with u is a discrete Function
def f(x, index):
return (u(x) - uh(x, index))**2
e = self.integral(f, celltype=celltype, q=q)
if isinstance(e, np.ndarray):
n = len(e.shape) - 1
if n > 0:
for i in range(n):
e = e.sum(axis=-1)
if celltype is False:
e = e.sum()
return np.sqrt(e)
def L2_error_1(self, u, uh, celltype=False, q=None):
def f(x, index):
return (u(x, index) - uh(x, index))**2
e = self.integral(f, celltype=celltype, q=q)
if isinstance(e, np.ndarray):
n = len(e.shape) - 1
if n > 0:
for i in range(n):
e = e.sum(axis=-1)
if celltype is False:
e = e.sum()
return np.sqrt(e)
def METHOD_NAME(self, u, uh, celltype=False, q=None):
mesh = self.mesh
NE = mesh.number_of_edges()
node = mesh.entity('node')
edge = mesh.entity('edge')
p = uh.space.p
qf = self.edgeintegrator if q is None else GaussLegendreQuadrature(p + 3)
bcs, ws = qf.quadpts, qf.weights
ps = np.einsum('ij, kjm->ikm', bcs, node[edge])
val = u(ps) - uh.edge_value(bcs)
e = np.sqrt(np.sum(
np.einsum(
'i, ij..., ij..., j->...', ws, val, val, self.edgemeasure
)/NE)
)
return e
def Lp_error(self, u, uh, p, celltype=False, q=None):
def f(x, index):
return np.abs(u(x) - uh(x, index))**p
e = self.integral(f, celltype=celltype, q=q)
return e**(1/p) |
323 | test csp custom | import requests
from PySide6 import QtTest
from .gui_base_test import GuiBaseTest
class TestWebsite(GuiBaseTest):
# Shared test methods
def view_website(self, tab):
"""Test that we can download the share"""
url = f"http://127.0.0.1:{tab.app.port}/"
r = requests.get(url)
QtTest.QTest.qWait(500, self.gui.qtapp)
self.assertTrue("This is a test website hosted by OnionShare" in r.text)
def check_csp_header(self, tab):
"""Test that the CSP header is present when enabled or vice versa"""
url = f"http://127.0.0.1:{tab.app.port}/"
r = requests.get(url)
QtTest.QTest.qWait(500, self.gui.qtapp)
if tab.settings.get("website", "disable_csp"):
self.assertFalse("Content-Security-Policy" in r.headers)
elif tab.settings.get("website", "custom_csp"):
self.assertEqual(tab.settings.get("website", "custom_csp"), r.headers["Content-Security-Policy"])
else:
self.assertEqual("default-src 'self'; frame-ancestors 'none'; form-action 'self'; base-uri 'self'; img-src 'self' data:;", r.headers["Content-Security-Policy"])
def run_all_website_mode_setup_tests(self, tab):
"""Tests in website mode prior to starting a share"""
tab.get_mode().server_status.file_selection.file_list.add_file(
self.tmpfile_index_html
)
for filename in self.tmpfiles:
tab.get_mode().server_status.file_selection.file_list.add_file(filename)
self.file_selection_widget_has_files(tab, 11)
self.history_is_not_visible(tab)
self.click_toggle_history(tab)
self.history_is_visible(tab)
def run_all_website_mode_started_tests(self, tab, startup_time=500):
"""Tests in website mode after starting a share"""
self.server_working_on_start_button_pressed(tab)
self.server_status_indicator_says_starting(tab)
self.add_remove_buttons_hidden(tab)
self.server_is_started(tab, startup_time)
self.web_server_is_running(tab)
self.url_description_shown(tab)
self.url_instructions_shown(tab)
self.url_shown(tab)
self.have_copy_url_button(tab)
self.have_show_url_qr_code_button(tab)
self.client_auth_instructions_shown(tab)
self.private_key_shown(tab)
self.have_show_client_auth_qr_code_button(tab)
self.server_status_indicator_says_started(tab)
def run_all_website_mode_download_tests(self, tab):
"""Tests in website mode after viewing the site"""
self.run_all_website_mode_setup_tests(tab)
self.run_all_website_mode_started_tests(tab, startup_time=500)
self.view_website(tab)
self.check_csp_header(tab)
self.history_widgets_present(tab)
self.server_is_stopped(tab)
self.web_server_is_stopped(tab)
self.server_status_indicator_says_closed(tab)
self.add_button_visible(tab)
# Tests
def test_website(self):
"""
Test website mode
"""
tab = self.new_website_tab()
self.run_all_website_mode_download_tests(tab)
self.close_all_tabs()
def test_csp_disabled(self):
"""
Test disabling CSP
"""
tab = self.new_website_tab()
tab.get_mode().disable_csp_checkbox.click()
self.assertFalse(tab.get_mode().custom_csp_checkbox.isEnabled())
self.run_all_website_mode_download_tests(tab)
self.close_all_tabs()
def METHOD_NAME(self):
"""
Test a custom CSP
"""
tab = self.new_website_tab()
tab.get_mode().custom_csp_checkbox.click()
self.assertFalse(tab.get_mode().disable_csp_checkbox.isEnabled())
tab.settings.set("website", "custom_csp", "default-src 'self'")
self.run_all_website_mode_download_tests(tab)
self.close_all_tabs()
def test_405_page_returned_for_invalid_methods(self):
"""
Our custom 405 page should return for invalid methods
"""
tab = self.new_website_tab()
tab.get_mode().mode_settings_widget.public_checkbox.click()
self.run_all_common_setup_tests()
self.run_all_website_mode_setup_tests(tab)
self.run_all_website_mode_started_tests(tab)
url = f"http://127.0.0.1:{tab.app.port}/"
self.hit_405(url, expected_resp="OnionShare: 405 Method Not Allowed", data = {'foo':'bar'}, methods = ["put", "post", "delete", "options"])
self.close_all_tabs() |
324 | categorical | import torch
import torch.nn.functional as tnn
from keras_core.backend.config import floatx
from keras_core.backend.torch.core import convert_to_tensor
from keras_core.backend.torch.core import get_device
from keras_core.backend.torch.core import to_torch_dtype
from keras_core.random.seed_generator import SeedGenerator
from keras_core.random.seed_generator import draw_seed
from keras_core.random.seed_generator import make_default_seed
def torch_seed_generator(seed):
first_seed, second_seed = draw_seed(seed)
device = get_device()
if device == "meta":
# Generator is not supported by the meta device.
return None
generator = torch.Generator(device=get_device())
generator.manual_seed(int(first_seed + second_seed))
return generator
def normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
# Do not use generator during symbolic execution.
if get_device() == "meta":
return torch.normal(
mean, stddev, size=shape, dtype=dtype, device=get_device()
)
generator = torch_seed_generator(seed)
return torch.normal(
mean,
stddev,
size=shape,
generator=generator,
dtype=dtype,
device=get_device(),
)
def METHOD_NAME(logits, num_samples, dtype="int32", seed=None):
logits = convert_to_tensor(logits)
dtype = to_torch_dtype(dtype)
probs = torch.softmax(logits, dim=-1)
# Do not use generator during symbolic execution.
if get_device() == "meta":
return torch.multinomial(
probs,
num_samples,
replacement=True,
).type(dtype)
generator = torch_seed_generator(seed)
return torch.multinomial(
probs,
num_samples,
replacement=True,
generator=generator,
).type(dtype)
def uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None):
dtype = dtype or floatx()
dtype = to_torch_dtype(dtype)
requested_shape = shape
if len(requested_shape) == 0:
shape = (1,)
# Do not use generator during symbolic execution.
if get_device() == "meta":
rand_tensor = torch.rand(size=shape, dtype=dtype, device=get_device())
else:
generator = torch_seed_generator(seed)
rand_tensor = torch.rand(
size=shape, generator=generator, dtype=dtype, device=get_device()
)
output = (maxval - minval) * rand_tensor + minval
if len(requested_shape) == 0:
return output[0]
return output
def randint(shape, minval, maxval, dtype="int32", seed=None):
dtype = to_torch_dtype(dtype)
# Do not use generator during symbolic execution.
if get_device() == "meta":
return torch.randint(
low=minval,
high=maxval,
size=shape,
dtype=dtype,
device=get_device(),
)
generator = torch_seed_generator(seed)
return torch.randint(
low=minval,
high=maxval,
size=shape,
generator=generator,
dtype=dtype,
device=get_device(),
)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
# Take a larger standard normal dist, discard values outside 2 * stddev
# Offset by mean and stddev
x = normal(tuple(shape) + (4,), mean=0, stddev=1, dtype=dtype, seed=seed)
valid = (x > -2) & (x < 2)
indexes = valid.max(-1, keepdim=True)[1]
trunc_x = torch.empty(shape, device=get_device())
trunc_x.data.copy_(x.gather(-1, indexes).squeeze(-1))
trunc_x.data.mul_(stddev).add_(mean)
return trunc_x
def _get_concrete_noise_shape(inputs, noise_shape):
if noise_shape is None:
return inputs.shape
concrete_inputs_shape = inputs.shape
concrete_noise_shape = []
for i, value in enumerate(noise_shape):
concrete_noise_shape.append(
concrete_inputs_shape[i] if value is None else value
)
return concrete_noise_shape
def dropout(inputs, rate, noise_shape=None, seed=None):
if (
seed is not None
and not (isinstance(seed, SeedGenerator) and seed._initial_seed is None)
or noise_shape is not None
):
keep_prob = 1.0 - rate
noise_shape = _get_concrete_noise_shape(inputs, noise_shape)
keep_prob_matrix = torch.full(
noise_shape, keep_prob, device=get_device()
)
generator = torch_seed_generator(seed)
# Do not use generator during symbolic execution.
if get_device() == "meta":
mask = torch.bernoulli(keep_prob_matrix)
else:
mask = torch.bernoulli(keep_prob_matrix, generator=generator)
mask = mask.bool()
mask = torch.broadcast_to(mask, inputs.shape)
return torch.where(
mask,
inputs / keep_prob,
torch.zeros_like(inputs, dtype=inputs.dtype),
)
# Fast path, unseeded (since torch doesn't support seeding dropout!!!!)
# Using the above implementation is possible, but much slower.
return torch.nn.functional.dropout(
inputs, p=rate, training=True, inplace=False
) |
325 | get code pull request id | from typing import Callable, Dict, Optional
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.exceptions import UnsupportedWebhookEventTypeError
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import WildValue, check_int, check_string, to_wild_value
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.lib.webhooks.git import (
TOPIC_WITH_BRANCH_TEMPLATE,
TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE,
get_pull_request_event_message,
get_push_commits_event_message,
)
from zerver.models import UserProfile
def get_code_pull_request_updated_body(payload: WildValue) -> str:
return get_pull_request_event_message(
user_name=get_code_pull_request_user_name(payload),
action="updated",
url=get_code_pull_request_url(payload),
number=METHOD_NAME(payload),
message=payload["detailedMessage"]["markdown"].tame(check_string),
title=get_code_pull_request_title(payload),
)
def get_code_pull_request_merged_body(payload: WildValue) -> str:
return get_pull_request_event_message(
user_name=get_code_pull_request_user_name(payload),
action="merged",
url=get_code_pull_request_url(payload),
number=METHOD_NAME(payload),
target_branch=payload["resource"]["sourceRefName"]
.tame(check_string)
.replace("refs/heads/", ""),
base_branch=payload["resource"]["targetRefName"]
.tame(check_string)
.replace("refs/heads/", ""),
title=get_code_pull_request_title(payload),
)
def get_code_pull_request_opened_body(payload: WildValue) -> str:
if payload["resource"].get("description"):
description = payload["resource"]["description"].tame(check_string)
else:
description = None
return get_pull_request_event_message(
user_name=get_code_pull_request_user_name(payload),
action="created",
url=get_code_pull_request_url(payload),
number=METHOD_NAME(payload),
target_branch=payload["resource"]["sourceRefName"]
.tame(check_string)
.replace("refs/heads/", ""),
base_branch=payload["resource"]["targetRefName"]
.tame(check_string)
.replace("refs/heads/", ""),
message=description,
title=get_code_pull_request_title(payload),
)
def get_code_push_commits_body(payload: WildValue) -> str:
compare_url = "{}/branchCompare?baseVersion=GC{}&targetVersion=GC{}&_a=files".format(
get_code_repository_url(payload),
payload["resource"]["refUpdates"][0]["oldObjectId"].tame(check_string),
payload["resource"]["refUpdates"][0]["newObjectId"].tame(check_string),
)
commits_data = [
{
"name": commit["author"]["name"].tame(check_string),
"sha": commit["commitId"].tame(check_string),
"url": "{}/commit/{}".format(
get_code_repository_url(payload), commit["commitId"].tame(check_string)
),
"message": commit["comment"].tame(check_string),
}
for commit in payload["resource"].get("commits", [])
]
return get_push_commits_event_message(
get_code_push_user_name(payload),
compare_url,
get_code_push_branch_name(payload),
commits_data,
)
def get_code_push_user_name(payload: WildValue) -> str:
return payload["resource"]["pushedBy"]["displayName"].tame(check_string)
def get_code_push_branch_name(payload: WildValue) -> str:
return (
payload["resource"]["refUpdates"][0]["name"].tame(check_string).replace("refs/heads/", "")
)
def get_code_repository_name(payload: WildValue) -> str:
return payload["resource"]["repository"]["name"].tame(check_string)
def get_code_repository_url(payload: WildValue) -> str:
return payload["resource"]["repository"]["remoteUrl"].tame(check_string)
def METHOD_NAME(payload: WildValue) -> int:
return payload["resource"]["pullRequestId"].tame(check_int)
def get_code_pull_request_title(payload: WildValue) -> str:
return payload["resource"]["title"].tame(check_string)
def get_code_pull_request_url(payload: WildValue) -> str:
return payload["resource"]["_links"]["web"]["href"].tame(check_string)
def get_code_pull_request_user_name(payload: WildValue) -> str:
return payload["resource"]["createdBy"]["displayName"].tame(check_string)
def get_topic_based_on_event(payload: WildValue, event: str) -> str:
if event == "git.push":
return TOPIC_WITH_BRANCH_TEMPLATE.format(
repo=get_code_repository_name(payload), branch=get_code_push_branch_name(payload)
)
elif "pullrequest" in event:
return TOPIC_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_code_repository_name(payload),
type="PR",
id=METHOD_NAME(payload),
title=get_code_pull_request_title(payload),
)
return get_code_repository_name(payload) # nocoverage
def get_event_name(payload: WildValue, branches: Optional[str]) -> Optional[str]:
event_name = payload["eventType"].tame(check_string)
if event_name == "git.push" and branches is not None:
branch = get_code_push_branch_name(payload)
if branches.find(branch) == -1:
return None
if event_name == "git.pullrequest.merged":
status = payload["resource"]["status"].tame(check_string)
merge_status = payload["resource"]["mergeStatus"].tame(check_string)
# azure devops sends webhook messages when a merge is attempted, i.e. there is a merge conflict
# after a PR is created, or when there is no conflict when PR is updated
# we're only interested in the case when the PR is merged successfully
if status != "completed" or merge_status != "succeeded":
return None
if event_name in EVENT_FUNCTION_MAPPER:
return event_name
raise UnsupportedWebhookEventTypeError(event_name)
EVENT_FUNCTION_MAPPER: Dict[str, Callable[[WildValue], str]] = {
"git.push": get_code_push_commits_body,
"git.pullrequest.created": get_code_pull_request_opened_body,
"git.pullrequest.merged": get_code_pull_request_merged_body,
"git.pullrequest.updated": get_code_pull_request_updated_body,
}
ALL_EVENT_TYPES = list(EVENT_FUNCTION_MAPPER.keys())
@webhook_view("AzureDevOps", all_event_types=ALL_EVENT_TYPES)
@has_request_variables
def api_azuredevops_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: WildValue = REQ(argument_type="body", converter=to_wild_value),
branches: Optional[str] = REQ(default=None),
) -> HttpResponse:
event = get_event_name(payload, branches)
if event is None:
return json_success(request)
topic = get_topic_based_on_event(payload, event)
body_function = EVENT_FUNCTION_MAPPER[event]
body = body_function(payload)
check_send_webhook_message(request, user_profile, topic, body)
return json_success(request) |
326 | build get request | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def METHOD_NAME(**kwargs: Any) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2020-12-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/workspace")
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class WorkspaceOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.synapse.artifacts.ArtifactsClient`'s
:attr:`workspace` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(self, **kwargs: Any) -> _models.Workspace:
"""Get Workspace.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Workspace or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.Workspace
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2020-12-01"))
cls: ClsType[_models.Workspace] = kwargs.pop("cls", None)
request = METHOD_NAME(
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, "str", skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorContract, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize("Workspace", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/workspace"} |
327 | find | """Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
import distutils.core
import distutils.filelist
import re
from distutils.errors import DistutilsOptionError
from distutils.util import convert_path
from fnmatch import fnmatchcase
from ._deprecation_warning import SetuptoolsDeprecationWarning
from setuptools.extern.six import PY3, string_types
from setuptools.extern.six.moves import filter, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution
from setuptools.depends import Require
from . import monkey
__metaclass__ = type
__all__ = [
'setup', 'Distribution', 'Command', 'Extension', 'Require',
'SetuptoolsDeprecationWarning',
'find_packages'
]
if PY3:
__all__.append('find_namespace_packages')
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ['lib2to3.fixes']
class PackageFinder:
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def METHOD_NAME(cls, where='.', exclude=(), include=('*',)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(cls._find_packages_iter(
convert_path(where),
cls._build_filter('ez_setup', '*__pycache__', *exclude),
cls._build_filter(*include)))
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, '.')
# Skip directory trees that are not valid packages
if ('.' in dir or not cls._looks_like_package(full_path)):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, '__init__.py'))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.METHOD_NAME
if PY3:
find_namespace_packages = PEP420PackageFinder.METHOD_NAME
def _install_setup_requires(attrs):
# Note: do not use `setuptools.Distribution` directly, as
# our PEP 517 backend patch `distutils.core.Distribution`.
dist = distutils.core.Distribution(dict(
(k, v) for k, v in attrs.items()
if k in ('dependency_links', 'setup_requires')
))
# Honor setup.cfg's options.
dist.parse_config_files(ignore_option_errors=True)
if dist.setup_requires:
dist.fetch_build_eggs(dist.setup_requires)
def setup(**attrs):
# Make sure we have any requirements needed to interpret 'attrs'.
_install_setup_requires(attrs)
return distutils.core.setup(**attrs)
setup.__doc__ = distutils.core.setup.__doc__
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def _ensure_stringlike(self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif not isinstance(val, string_types):
raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
% (option, what, val))
return val
def ensure_string_list(self, option):
r"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif isinstance(val, string_types):
setattr(self, option, re.split(r',\s*|\s+', val))
else:
if isinstance(val, list):
ok = all(isinstance(v, string_types) for v in val)
else:
ok = False
if not ok:
raise DistutilsOptionError(
"'%s' must be a list of strings (got %r)"
% (option, val))
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
class sic(str):
"""Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
# Apply monkey patches
monkey.patch_all() |
328 | say error | """
Utility functions for building OpenVR Advanced Settings.
"""
import os
import sys
import argparse
import subprocess
import string
def get_version_string():
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path + "/../compile_version_string.txt", "r") as file:
contents = file.readline().strip()
return contents
#Const globals
VERSION_STRING = get_version_string()
EXIT_CODE_SUCCESS = 0
EXIT_CODE_FAILURE_BUILD_APP = 1
EXIT_CODE_FAILURE_BUILD_LOCATION = 2
QT_LOC_VAR_NAME = "QT_LOC"
VS_LOC_VAR_NAME = "VS_LOC"
JOM_LOC_VAR_NAME = "JOM_LOC"
ZIP_LOC_VAR_NAME = "ZIP_LOC"
NSIS_LOC_VAR_NAME = "NSIS_LOC"
LLVM_LOC_VAR_NAME = "LLVM_LOC"
QT_LOC_DEFAULT = r"C:\Qt\5.12.2\msvc2017_64\bin\""
VS_LOC_DEFAULT = r"C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvarsall.bat"
JOM_LOC_DEFAULT = r"C:\Qt\Tools\QtCreator\bin\jom.exe"
ZIP_LOC_DEFAULT = r"C:\Program Files\7-Zip\7z.exe"
NSIS_LOC_DEFAULT = r"C:\Program Files (x86)\NSIS\""
LLVM_LOC_DEFAULT = r"C:\Program Files\LLVM\bin\""
BUILD_CLANG_VAR_NAME = "BUILD_CLANG"
BUILD_VSPROJ_VAR_NAME = "BUILD_VSPROJ"
BUILD_DEBUG_VAR_NAME = "BUILD_DEBUG"
PLATFORM_TARGET = "AMD64"
#Mutable globals
ORIGINAL_DIR = ""
PROJECT_DIR = ""
OUTPUT_DIR = r"\\bin\\win64\\AdvancedSettings"
DEPLOY_DIR = ""
current_activity = ""
bat_file_contents = ""
def set_current_activity(activity: str):
"""
Wrapper around the current_activity global var to make it usable in other files.
"""
global current_activity
current_activity = activity
def say(message: str):
"""
Prints a message to the console prepended by the current activity, like:
BUILD: Building things.
DEPLOY: Deploying things.
"""
global current_activity
print(current_activity + ": " + message)
def METHOD_NAME(message: str):
"""
Prints an error message to the console.
Not currently different from say, but put in incase somebody wanted to
do something fancy with outputting to stderr or something instead.
"""
global current_activity
print(current_activity + ": " + message)
def is_env_var_set(env_var_name: str):
"""
Returns whether or not an environment variable is set.
"""
var = os.getenv(env_var_name)
if var is None:
return False
return True
def add_line_to_run_bat_file(line: str):
"""
Adds a line ended by a newline to the batch file.
"""
global bat_file_contents
bat_file_contents += line + "\n"
def add_error_handling_line_to_bat_file():
"""
Adds an error handling line to the batch file.
This is because some applications don't cause the script to error out
even if they exit with an error.
"""
global bat_file_contents
bat_file_contents += "IF ERRORLEVEL 1 EXIT /B " + str(EXIT_CODE_FAILURE_BUILD_APP) + "\n"
def get_required_env_var_path(env_var_name: str, default_env_var_value: str):
"""
Easy function for getting *_LOC values, complete with testing for path validity and
outputting status to console.
"""
if not is_env_var_set(env_var_name):
say(f"{env_var_name} not defined. Using default value.")
#Sanitize quotes because we'll add them later and they might mess up
#how we put together strings.
path = default_env_var_value.replace('"', '')
else:
path = os.getenv(env_var_name).replace('"', '')
say(f"{env_var_name} set to '{path}'")
if not os.path.exists(path):
METHOD_NAME(f"{env_var_name} does not exist. Exiting")
exit_on_error(EXIT_CODE_FAILURE_BUILD_LOCATION)
say(f"{env_var_name} exists.")
return path
def find_qt_path():
"""
Returns the location of the Qt bin directory.
Precedence goes to the QT_LOC_VAR_NAME environment variable.
If that is set and exists it will be used, if it's set and doesn't exist
likely locations will be searched for an install.
"""
if is_env_var_set(QT_LOC_VAR_NAME):
say(f"{QT_LOC_VAR_NAME} is defined. Attempting to find first.")
qt_path = os.getenv(QT_LOC_VAR_NAME).replace('"', '')
say(f"{QT_LOC_VAR_NAME} is set to '{qt_path}'.")
if os.path.exists(qt_path):
say(f"'{qt_path}' exists. Using this path.")
return qt_path
else:
say(f"'{qt_path}' does not exist, attempting to traverse filesystem.")
qt_path = traverse_and_find_qt_path()
if qt_path == "NOTFOUND":
say("Qt path could not be find by traversal. Exiting.")
exit_on_error(EXIT_CODE_FAILURE_BUILD_LOCATION)
return qt_path
def traverse_and_find_qt_path():
"""
Searches all drives (A-Z) and versions above 12 to find the
Qt install location.
"""
qt_folder_name = r":\\Qt\\"
qt_visual_studio_name = r"\\msvc2017_64\bin\\"
qt_major_version = 5
qt_minor_versions = [12, 13, 14, 15, 16]
qt_patch_versions = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# We search the A and B drive letters even though it's
# unlikely for the install to be there
windows_drive_letters = string.ascii_uppercase
for drive in windows_drive_letters:
# Go in reverse because we want the highest versions first
for minor in reversed(qt_minor_versions):
for patch in reversed(qt_patch_versions):
version = str(qt_major_version) + "." + str(minor) + "." + str(patch)
path = str(drive) + qt_folder_name + version + qt_visual_studio_name
if os.path.exists(path):
say("Found QT path: '" + path + "'")
return path
say("Did not find any valid QT path.")
return "NOTFOUND"
def create_batch_file():
"""
Creates a batch file in the dir. Must be called by another outside batch file.
"""
file = open(get_project_dir() + "\\build_scripts\\win\\current_build.bat", "w+")
file.write(bat_file_contents)
file.close()
def exit_on_error(error_number):
"""Wrapper around sys.exit."""
sys.exit(error_number)
def set_dirs():
"""
Sets up the directories.
We know that the project dir will always be two dirs up from the dir the file is in.
"""
global ORIGINAL_DIR
global PROJECT_DIR
global DEPLOY_DIR
ORIGINAL_DIR = os.path.dirname(__file__)
PROJECT_DIR = os.path.abspath(os.path.join(ORIGINAL_DIR, r"..\.."))
DEPLOY_DIR = DEPLOY_DIR = PROJECT_DIR + OUTPUT_DIR
print(ORIGINAL_DIR)
def get_project_dir():
return PROJECT_DIR
def get_original_dir():
return ORIGINAL_DIR
def get_deploy_dir():
return DEPLOY_DIR
|
329 | add | from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Callable, Optional
import pytest
import ue.hierarchy
from ark.discovery import initialise_hierarchy
from ark.types import DCSC_CLS
from automate.ark import ArkSteamManager
from config import HIERARCHY_FILENAME, ConfigFile, get_global_config
from ue.asset import ExportTableItem, UAsset
from ue.loader import AssetLoader, CacheManager, ModResolver
TEST_PGD_PKG = '/Game/Mods/1821554891/PrimalGameData_BP_PurloviaTEST'
TEST_PGD_CLS = TEST_PGD_PKG + '.PrimalGameData_BP_PurloviaTEST_C'
TROODON_CHR = '/Game/PrimalEarth/Dinos/Troodon/Troodon_Character_BP.Troodon_Character_BP_C'
DODO_CHR = '/Game/PrimalEarth/Dinos/Dodo/Dodo_Character_BP.Dodo_Character_BP_C'
DODO_AB_CHR = '/Game/PrimalEarth/Dinos/Dodo/Dodo_Character_BP_Aberrant.Dodo_Character_BP_Aberrant_C'
DEINO_CHR = '/Game/PrimalEarth/Dinos/Raptor/Uberraptor/Deinonychus_Character_BP.Deinonychus_Character_BP_C'
X_DRAGON_CHR = '/Game/Genesis/Dinos/BiomeVariants/Volcano_Dragon/Volcanic_Dragon_Character_BP.Volcanic_Dragon_Character_BP_C'
DRAGON_BOSS_CHR = '/Game/PrimalEarth/Dinos/Dragon/Dragon_Character_BP_Boss.Dragon_Character_BP_Boss_C'
PTM_DCSC_CONFLICT_CHR = '/Game/Mods/1821554891/Dinos/PTM_DCSC_Conflict.PTM_DCSC_Conflict_C'
@pytest.fixture(name='tempdir', scope='function')
def fixture_tempdir():
'''
Fixture to create a temporary directory for testing, removing it automatically once done.
'''
with TemporaryDirectory() as tmpdir:
yield Path(tmpdir)
@pytest.fixture(name='config', scope='module')
def fixture_config() -> ConfigFile:
config = get_global_config()
assert '1821554891' in config.mods, "PurloviaTEST must be in config to run these tests"
config.settings.SkipGit = True
config.settings.SkipInstall = True
return config
@pytest.fixture(name='arkman', scope='module')
def fixture_arkman(config: ConfigFile) -> ArkSteamManager:
arkman = ArkSteamManager(config=config)
return arkman
@pytest.fixture(name='loader', scope='module')
def fixture_loader(arkman: ArkSteamManager) -> AssetLoader:
loader = arkman.getLoader()
return loader
@pytest.fixture(name='hierarchy', scope='module')
def fixture_hierarchy(arkman: ArkSteamManager):
initialise_hierarchy(arkman)
@pytest.fixture(name='internal_hierarchy', scope='module')
def fixture_internal_hierarchy():
ue.hierarchy.tree.clear()
ue.hierarchy.load_internal_hierarchy(HIERARCHY_FILENAME)
@pytest.fixture(name='dodos', scope='module')
def fixture_dodos(loader: AssetLoader, internal_hierarchy): # pylint: disable=unused-argument
# Scan the Dodo directory
ue.hierarchy.explore_path('/Game/PrimalEarth/Dinos/Dodo', loader, set())
@pytest.fixture(name='troodon', scope='module')
def fixture_troodon(loader: AssetLoader, internal_hierarchy): # pylint: disable=unused-argument
# Scan the Troondon asset
ue.hierarchy.explore_asset(TROODON_CHR, loader)
@pytest.fixture(name='test_hierarchy', scope='module')
def fixture_test_hierarchy(loader: AssetLoader, internal_hierarchy): # pylint: disable=unused-argument
# Scan the test mod's directory
ue.hierarchy.explore_path('/Game/Mods/1821554891/', loader, set())
@pytest.fixture(name='ark_types', scope='module')
def fixture_ark_types(loader: AssetLoader, internal_hierarchy): # pylint: disable=unused-argument
# Scan just a few Ark core types
ue.hierarchy.explore_asset(DCSC_CLS, loader)
@pytest.fixture(name='scan_and_load', scope='module')
def fixture_scan_and_load(loader: AssetLoader, ark_types): # pylint: disable=unused-argument
def _scan_and_load(cls_name: str):
cls = loader.load_class(cls_name)
ue.hierarchy.explore_asset(cls.asset.assetname, loader)
return cls
return _scan_and_load
ScanLoadFn = Callable[[str], ExportTableItem]
class MockModResolver(ModResolver):
def get_name_from_id(self, modid: str) -> Optional[str]:
raise NotImplementedError
def get_id_from_name(self, modname: str) -> Optional[str]:
raise NotImplementedError
class MockCacheManager(CacheManager):
def lookup(self, name) -> Optional[UAsset]:
raise NotImplementedError
def METHOD_NAME(self, name: str, asset: UAsset):
raise NotImplementedError
def remove(self, name: str):
raise NotImplementedError
def wipe(self, prefix: str = ''):
raise NotImplementedError
def get_count(self):
raise NotImplementedError |
330 | private link service connection state | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, id=None, name=None, private_endpoint=None, METHOD_NAME=None, provisioning_state=None, system_data=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", METHOD_NAME)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def METHOD_NAME(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
METHOD_NAME=self.METHOD_NAME,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
type=self.type)
def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
scope_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
Gets a private endpoint connection.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str scope_name: The name of the Azure Arc PrivateLinkScope resource.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['scopeName'] = scope_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:kubernetesconfiguration/v20220402preview:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
METHOD_NAME=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scope_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
Gets a private endpoint connection.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str scope_name: The name of the Azure Arc PrivateLinkScope resource.
"""
... |
331 | test init | import pathlib
import subprocess
import sys
import unittest
import os
import numpy
import pytest
from cupy.cuda import nccl
from cupy import testing
from cupyx.distributed import init_process_group
from cupyx.distributed._nccl_comm import _mpi_available
nccl_available = nccl.available
def _run_test(test_name, dtype=None):
# subprocess is required not to interfere with cupy module imported in top
# of this file
runner_path = pathlib.Path(__file__).parent / 'comm_runner.py'
args = [sys.executable, runner_path, test_name, 'store']
if dtype is not None:
args.append(numpy.dtype(dtype).char)
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdoutdata, stderrdata = proc.communicate()
assert stderrdata.decode() == ''
assert proc.returncode == 0
def _run_test_with_mpi(test_name, dtype=None):
# subprocess is required not to interfere with cupy module imported in top
# of this file
runner_path = pathlib.Path(__file__).parent / 'comm_runner.py'
args = ['mpiexec', '-n', '2', '--allow-run-as-root',
sys.executable, runner_path, test_name, 'mpi']
if dtype is not None:
args.append(numpy.dtype(dtype).char)
proc = subprocess.Popen(
args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=os.environ
)
stdoutdata, stderrdata = proc.communicate()
assert stderrdata.decode() == ''
assert proc.returncode == 0
@pytest.mark.skipif(not nccl_available, reason='nccl is not installed')
@testing.multi_gpu(2)
class TestNCCLBackend:
def _run_test(self, test, dtype):
_run_test(test, dtype)
@testing.for_all_dtypes(no_bool=True)
def test_broadcast(self, dtype):
self._run_test('broadcast', dtype)
@testing.for_all_dtypes(no_bool=True)
def test_reduce(self, dtype):
self._run_test('reduce', dtype)
@testing.for_all_dtypes(no_bool=True)
def test_all_reduce(self, dtype):
self._run_test('all_reduce', dtype)
@testing.for_all_dtypes(no_bool=True)
def test_reduce_scatter(self, dtype):
self._run_test('reduce_scatter', dtype)
@testing.for_all_dtypes(no_bool=True)
def test_all_gather(self, dtype):
self._run_test('all_gather', dtype)
@testing.for_all_dtypes(no_bool=True)
def test_send_and_recv(self, dtype):
self._run_test('send_and_recv', dtype)
@testing.for_all_dtypes(no_bool=True)
def test_send_recv(self, dtype):
self._run_test('send_recv', dtype)
@testing.for_all_dtypes(no_bool=True)
def test_scatter(self, dtype):
self._run_test('scatter', dtype)
@testing.for_all_dtypes(no_bool=True)
def test_gather(self, dtype):
self._run_test('gather', dtype)
@testing.for_all_dtypes(no_bool=True)
def test_all_to_all(self, dtype):
self._run_test('all_to_all', dtype)
def test_barrier(self):
self._run_test('barrier', None)
@pytest.mark.skipif(not _mpi_available, reason='mpi is not installed')
@testing.multi_gpu(2)
class TestNCCLBackendWithMPI(TestNCCLBackend):
def _run_test(self, test, dtype):
_run_test_with_mpi(test, dtype)
@pytest.mark.skipif(not nccl_available, reason='nccl is not installed')
@testing.multi_gpu(2)
class TestNCCLBackendSparse:
def _run_test(self, test, dtype):
_run_test(test, dtype)
@testing.for_dtypes('fdFD')
def test_send_and_recv(self, dtype):
self._run_test('sparse_send_and_recv', dtype)
@testing.for_dtypes('fdFD')
def test_broadcast(self, dtype):
self._run_test('sparse_broadcast', dtype)
@testing.for_dtypes('fdFD')
def test_reduce(self, dtype):
self._run_test('sparse_reduce', dtype)
@testing.for_dtypes('fdFD')
def test_all_reduce(self, dtype):
self._run_test('sparse_all_reduce', dtype)
@testing.for_dtypes('fdFD')
def test_scatter(self, dtype):
self._run_test('sparse_scatter', dtype)
@testing.for_dtypes('fdFD')
def test_gather(self, dtype):
self._run_test('sparse_gather', dtype)
@testing.for_dtypes('fdFD')
def test_all_gather(self, dtype):
self._run_test('sparse_all_gather', dtype)
@testing.for_dtypes('fdFD')
def test_all_to_all(self, dtype):
self._run_test('sparse_all_to_all', dtype)
@testing.for_dtypes('fdFD')
def test_reduce_scatter(self, dtype):
self._run_test('sparse_reduce_scatter', dtype)
@testing.for_dtypes('fdFD')
def test_send_recv(self, dtype):
self._run_test('sparse_send_recv', dtype)
@pytest.mark.skipif(not _mpi_available, reason='mpi is not installed')
@testing.multi_gpu(2)
class TestNCCLBackendSparseWithMPI(TestNCCLBackendSparse):
def _run_test(self, test, dtype):
_run_test_with_mpi(test, dtype)
@pytest.mark.skipif(not nccl_available, reason='nccl is not installed')
class TestInitDistributed(unittest.TestCase):
@testing.multi_gpu(2)
def METHOD_NAME(self):
_run_test('init')
def test_invalid_backend(self):
with pytest.raises(ValueError):
init_process_group(1, 0, backend='mpi')
def test_invalid_n_devices(self):
with pytest.raises(ValueError):
init_process_group(0, 0)
with pytest.raises(ValueError):
init_process_group(-1, 0)
def test_invalid_rank(self):
with pytest.raises(ValueError):
init_process_group(2, -1)
with pytest.raises(ValueError):
init_process_group(2, 3) |
332 | init generation dict |
# Copyright 2016-2022 The FEAGI Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Drives the evolutionary process.
"""
import os
import random
import json
import traceback
import logging
from inf import runtime_data
from inf.initialize import id_gen
from api import message_processor
logger = logging.getLogger(__name__)
def init_autopilot_folders():
if not os.path.isdir('./evo/autopilot/generations'):
os.mkdir('./evo/autopilot/generations')
if not os.path.isdir('./evo/autopilot/embodiments'):
os.mkdir('./evo/autopilot/embodiments')
if not os.path.isdir('./evo/autopilot/environments'):
os.mkdir('./evo/autopilot/environments')
def process_manager():
pass
def load_new_genome():
genome_file_name = pick_a_random_genome()
try:
with open("./evo/autopilot/brains/" + genome_file_name, "r") as data_file:
genome_str = json.load(data_file)
# todo: refactor the genome handling to not have to use the message processor for evo purpose
api_message = {"genome": genome_str}
message_processor.api_message_processor(api_message)
except Exception:
print("Error while loading genome file\n", traceback.print_exc())
def pick_a_random_genome():
genome_file_name = random.choice(os.listdir("./evo/autopilot/brains"))
# todo: validate the choice
return genome_file_name
def save_genome():
# save genome to brain folder
# add an entry to generation dict
pass
def log_generation():
pass
def set_default(obj):
if isinstance(obj, set):
return list(obj)
raise TypeError
def METHOD_NAME():
"""
Generation dictionary holds details about the composition of every single generation that has gone through the
evolutionary system.
Template example:
{
"generation_id_1": {
"genome_id" : genome_id,
"robot_id" : robot_id,
"environment_id": env_id
},
"generation_id_2": {
"genome_id" : genome_id,
"robot_id" : robot_id,
"environment_id": env_id
},
...,
}
"""
init_autopilot_folders()
runtime_data.current_generation_dict_id = id_gen(signature='_C') # C for generation collection
with open('./evo/autopilot/generations/' + runtime_data.current_generation_dict_id + '.json', "w") as data_file:
data = {}
data_file.seek(0) # rewind
data_file.write(json.dumps(data, indent=3, default=set_default))
data_file.truncate()
runtime_data.generation_dict = {}
def update_generation_dict(genome_id=runtime_data.genome_id,
robot_id=runtime_data.robot_id,
env_id=runtime_data.environment_id,
fitness=None):
print(">>> brain_run_id", runtime_data.brain_run_id)
runtime_data.generation_dict[runtime_data.brain_run_id] = {
'genome_id': genome_id,
'robot_id': robot_id,
'environment_id': env_id,
'fitness': fitness
}
update_generation_dict_file()
def update_generation_dict_file():
"""
Saves the updated version of the generation_dict from memory to disk
"""
with open('./evo/autopilot/generations/' + runtime_data.current_generation_dict_id + '.json', "w+") as data_file:
data = runtime_data.generation_dict
data_file.seek(0) # rewind
data_file.write(json.dumps(data, indent=3, default=set_default))
data_file.truncate() |
333 | tf shape to list | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tensorflow as tf
from distutils.version import LooseVersion
from google.protobuf import text_format
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import dtypes as tf_dtypes
from tensorflow.python.framework import tensor_util
from tf_nndct.graph import dtypes as nndct_dtypes
from tf_nndct.utils.convert_to_constants import convert_variables_to_constants_v2 as convert_to_constants
from tf_nndct.utils import generic_utils
def node_name_parts_from_input(input_name):
prefix = ''
node_name = ''
suffix = ''
if input_name.startswith('^'):
prefix = '^'
input_name = input_name[1:]
input_parts = input_name.split(':')
if len(input_parts) < 2:
suffix = ''
else:
suffix = ':' + input_parts[1]
node_name = input_parts[0]
return prefix, node_name, suffix
def node_name_from_input(input_name):
"""Strips off ports and other decorations to get the underlying node name."""
prefix, node_name, suffix = node_name_parts_from_input(input_name)
return node_name
def canonical_output_name(input_name):
prefix, node_name, suffix = node_name_parts_from_input(input_name)
if not suffix:
suffix = ':0'
return ''.join([prefix, node_name, suffix])
def dtype_to_tf_string(dtype):
if type(dtype) == nndct_dtypes.DType:
tf_dtype = nndct_dtypes.to_tf(dtype)
elif type(dtype) == tf_dtypes.DType:
tf_dtype = dtype
return ".".join(["tf", tf_dtypes._TYPE_TO_STRING[tf_dtype]])
def parse_tf_tensor(tensor):
"""Parse data from given `tensor`."""
if not isinstance(tensor, tensor_pb2.TensorProto):
raise TypeError("TensorProto required, but given {}".format(type(tensor)))
return tensor_util.MakeNdarray(tensor)
def values_from_tf_const(node_def):
"""Extracts the values from a const NodeDef as a numpy ndarray.
Args:
node_def: Const NodeDef that has the values we want to access.
Returns:
Numpy ndarray containing the values.
Raises:
ValueError: If the node isn't a Const.
"""
if node_def.op != "Const":
raise ValueError("Node '%s' should be a Const op." % node_def.name)
input_tensor = node_def.attr["value"].tensor
tensor_value = tensor_util.MakeNdarray(input_tensor)
return tensor_value
def parse_attr_proto(attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for key, value in attr_proto.items():
attrs[key] = get_attr_proto_value(value)
return attrs
def get_attr_proto_value(attr_value):
"""Returns the value of the attr of this buf with the given `name`.
Args:
attr_value: attrvalue protobuf.
Returns:
The value of the attr, as a Python object.
Raises:
ValueError: If this op does not have an attr with the given `name`.
"""
fields = ["s", "i", "f", "b", "type", "shape", "tensor", "func"]
x = attr_value
ret = []
# Treat an empty oneof value as an empty list.
if not x.WhichOneof("value"):
return ret
if x.HasField("list"):
for f in fields:
if getattr(x.list, f):
if f == "type":
ret += [tf_dtypes.as_dtype(x) for x in list(getattr(x.list, f))]
else:
ret += list(getattr(x.list, f))
else:
for f in fields:
if x.HasField(f):
if f == "type":
ret = tf_dtypes.as_dtype(getattr(x, f))
else:
ret = getattr(x, f)
return ret
def METHOD_NAME(shape):
"""Get shape from tensorflow attr 'shape'."""
dims = None
try:
if not shape.unknown_rank:
dims = [int(d.size) for d in shape.dim]
except: # pylint: disable=bare-except
pass
return dims
def tf_tensor_shape(tensor):
shape = []
try:
shape = tensor.get_shape().as_list()
except Exception: # pylint: disable=broad-except
shape = None
return shape
def write_proto(path, message, as_text=False):
dir_name = os.path.dirname(path)
generic_utils.mkdir_if_not_exist(dir_name)
if dir_name:
os.makedirs(dir_name, exist_ok=True)
if as_text:
with open(path, "w") as f:
f.write(text_format.MessageToString(message))
else:
with open(path, "wb") as f:
f.write(message.SerializeToString())
def write_text_proto(path, message):
write_proto(path, message, as_text=True)
def write_binary_proto(path, message):
write_proto(path, message, as_text=False)
def tf_version():
return tf.__version__
def is_tf_version_equal(version: str):
return tf_version() == LooseVersion(version)
def is_tf_version_greater_than(version: str):
return tf_version() > LooseVersion(version)
def is_tf_version_greater_equal(version: str):
return tf_version() >= LooseVersion(version)
def is_tf_version_less_than(version: str):
return tf_version() < LooseVersion(version)
def is_tf_version_less_equal(version: str):
return tf_version() <= LooseVersion(version)
def is_tf_concat(op):
return op.type in ("Concat", "ConcatV2", "ConcatV3")
def is_tf_const(op):
return op.type in ["Const", "ConstV2"]
def is_tf_identity(op):
return op.type == "Identity" or op.type == "IdentityN"
def is_tf_placeholder(op):
return op.type == "Placeholder"
def is_tf_biasadd(op):
return op.type == "BiasAdd" |
334 | test playlist load non extm3u | # This file is part of beets.
# Copyright 2022, J0J0 Todos.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Testsuite for the M3UFile class."""
from os import path
from tempfile import mkdtemp
from shutil import rmtree
import unittest
import sys
from beets.util import bytestring_path
from beets.util.m3u import M3UFile, EmptyPlaylistError
from test._common import RSRC
class M3UFileTest(unittest.TestCase):
"""Tests the M3UFile class."""
def test_playlist_write_empty(self):
"""Test whether saving an empty playlist file raises an error."""
tempdir = bytestring_path(mkdtemp())
the_playlist_file = path.join(tempdir, b'playlist.m3u8')
m3ufile = M3UFile(the_playlist_file)
with self.assertRaises(EmptyPlaylistError):
m3ufile.write()
rmtree(tempdir)
def test_playlist_write(self):
"""Test saving ascii paths to a playlist file."""
tempdir = bytestring_path(mkdtemp())
the_playlist_file = path.join(tempdir, b'playlist.m3u')
m3ufile = M3UFile(the_playlist_file)
m3ufile.set_contents([
bytestring_path('/This/is/a/path/to_a_file.mp3'),
bytestring_path('/This/is/another/path/to_a_file.mp3')
])
m3ufile.write()
self.assertTrue(path.exists(the_playlist_file))
rmtree(tempdir)
def test_playlist_write_unicode(self):
"""Test saving unicode paths to a playlist file."""
tempdir = bytestring_path(mkdtemp())
the_playlist_file = path.join(tempdir, b'playlist.m3u8')
m3ufile = M3UFile(the_playlist_file)
m3ufile.set_contents([
bytestring_path('/This/is/å/path/to_a_file.mp3'),
bytestring_path('/This/is/another/path/tö_a_file.mp3')
])
m3ufile.write()
self.assertTrue(path.exists(the_playlist_file))
rmtree(tempdir)
@unittest.skipUnless(sys.platform == 'win32', 'win32')
def test_playlist_write_and_read_unicode_windows(self):
"""Test saving unicode paths to a playlist file on Windows."""
tempdir = bytestring_path(mkdtemp())
the_playlist_file = path.join(tempdir,
b'playlist_write_and_read_windows.m3u8')
m3ufile = M3UFile(the_playlist_file)
m3ufile.set_contents([
bytestring_path(r"x:\This\is\å\path\to_a_file.mp3"),
bytestring_path(r"x:\This\is\another\path\tö_a_file.mp3")
])
m3ufile.write()
self.assertTrue(path.exists(the_playlist_file))
m3ufile_read = M3UFile(the_playlist_file)
m3ufile_read.load()
self.assertEqual(
m3ufile.media_list[0],
bytestring_path(
path.join('x:\\', 'This', 'is', 'å', 'path', 'to_a_file.mp3'))
)
self.assertEqual(
m3ufile.media_list[1],
bytestring_path(r"x:\This\is\another\path\tö_a_file.mp3"),
bytestring_path(path.join(
'x:\\', 'This', 'is', 'another', 'path', 'tö_a_file.mp3'))
)
rmtree(tempdir)
@unittest.skipIf(sys.platform == 'win32', 'win32')
def test_playlist_load_ascii(self):
"""Test loading ascii paths from a playlist file."""
the_playlist_file = path.join(RSRC, b'playlist.m3u')
m3ufile = M3UFile(the_playlist_file)
m3ufile.load()
self.assertEqual(m3ufile.media_list[0],
bytestring_path('/This/is/a/path/to_a_file.mp3'))
@unittest.skipIf(sys.platform == 'win32', 'win32')
def test_playlist_load_unicode(self):
"""Test loading unicode paths from a playlist file."""
the_playlist_file = path.join(RSRC, b'playlist.m3u8')
m3ufile = M3UFile(the_playlist_file)
m3ufile.load()
self.assertEqual(m3ufile.media_list[0],
bytestring_path('/This/is/å/path/to_a_file.mp3'))
@unittest.skipUnless(sys.platform == 'win32', 'win32')
def test_playlist_load_unicode_windows(self):
"""Test loading unicode paths from a playlist file."""
the_playlist_file = path.join(RSRC, b'playlist_windows.m3u8')
winpath = bytestring_path(path.join(
'x:\\', 'This', 'is', 'å', 'path', 'to_a_file.mp3'))
m3ufile = M3UFile(the_playlist_file)
m3ufile.load()
self.assertEqual(
m3ufile.media_list[0],
winpath
)
def test_playlist_load_extm3u(self):
"""Test loading a playlist with an #EXTM3U header."""
the_playlist_file = path.join(RSRC, b'playlist.m3u')
m3ufile = M3UFile(the_playlist_file)
m3ufile.load()
self.assertTrue(m3ufile.extm3u)
def METHOD_NAME(self):
"""Test loading a playlist without an #EXTM3U header."""
the_playlist_file = path.join(RSRC, b'playlist_non_ext.m3u')
m3ufile = M3UFile(the_playlist_file)
m3ufile.load()
self.assertFalse(m3ufile.extm3u)
def suite():
"""This testsuite's main function."""
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite') |
335 | print usage | #!/usr/bin/env python2
##############################################################
# distrib_stats.py #
##############################################################
# Python script for generating statistics based on the #
# contents of the distrib log files. #
##############################################################
import ftplib
import getopt
import os
import socket
import stat
import string
import sys
import time
import shutil
from datetime import datetime
###############################################################
# Local classes #
###############################################################
class Record:
def __init__(self):
self.total_bytes = 0
self.num_successes = 0
self.num_errors = 0
def add(self, num_bytes, success_flag):
self.total_bytes += num_bytes
if success_flag == "success":
self.num_successes += 1
elif success_flag == "error":
self.num_errors += 1
else:
print "*** unknown success/error flag: " + success_flag
print " treating record as an error"
self.num_errors += 1
return self
def get_total_bytes(self):
return self.total_bytes
def get_total_errors(self):
return self.num_errors
def get_total_msgs(self):
return self.num_successes + self.num_errors
def println(self):
print "bytes = %d, success = %d, errors = %d" % \
(self.total_bytes, self.num_successes, self.num_errors)
def print_stats(self, key):
num_records = self.num_successes + self.num_errors
percent_error = float(self.num_errors) / float(num_records)
bytes_per_record = float(self.total_bytes) / float(num_records)
print "%-30s %20d %11d %20.5f %10d (%f%%)" % \
(key, self.total_bytes, num_records, bytes_per_record, self.num_errors, percent_error * 100.0)
###############################################################
# Local subroutines #
###############################################################
###############################################################
# print_usage(): Print the usage for this script
#
def METHOD_NAME():
print "This script generates statistics from a list of distrib"
print "log files. Run on a single file to get daily statistics"
print "or on a list of files to get statistics for any time period."
print "Usage: ", prog_name, " < distrib file list >"
return
###############################################################
# print_stats(): Print the statistics for the given dictionary.
#
def print_stats(dictionary):
# Sort the keys for better output
keys = dictionary.keys()
keys.sort()
# Print the statistics for each record. Along the way,
# accumulate total statistics
total_bytes = 0
total_msgs = 0
total_errors = 0
print " total bytes num msgs bytes/msg errors"
print " ----------- -------- --------- ------"
for key in keys:
dictionary[key].print_stats(key)
total_bytes += dictionary[key].get_total_bytes()
total_msgs += dictionary[key].get_total_msgs()
total_errors += dictionary[key].get_total_errors()
# Now print the total statistics
percent_error = (float(total_errors) / float(total_msgs)) * 100.0
bytes_per_msg = float(total_bytes) / float(total_msgs)
print
print "Total %20d %11d %20.5f %10d (%f%%)" % \
(total_bytes, total_msgs, bytes_per_msg, total_errors, percent_error)
return
###############################################################
# Main program #
###############################################################
if __name__ == "__main__":
# Retrieve the program name from the command line.
prog_name = os.path.basename(sys.argv[0])
# Initialize the statistics records
dir_records = {}
host_records = {}
# The command line consists of a list of files to process.
# Process each file in turn.
for file in sys.argv[1:]:
print "*** Processing " + file
# Open the file
input_file = open(file, 'r')
# Process each of the lines in the file
for line in input_file:
# Split the line into tokens. We expect the following
# tokens:
# 0 - distrib time
# 1 - success/error
# 2 - over (overwrite???)
# 3 - destination host
# 4 - num bytes
# 5 - data time
# 6 - destination directory
tokens = str.split(line, ",")
if len(tokens) != 7:
print "Invalid line found: " + line
print "Skipping"
next
# Remove the whitespace around each of the tokens and
# give them mnemonic names
distrib_time = str.strip(tokens[0])
success_flag = str.strip(tokens[1])
over_flag = str.strip(tokens[2])
dest_host = str.strip(tokens[3])
num_bytes = str.strip(tokens[4])
data_time = str.strip(tokens[5])
dest_dir = str.strip(tokens[6])
# Update the dest dir record
if dest_dir in dir_records:
dir_record = dir_records[dest_dir]
else:
dir_record = Record()
dir_record.add(int(num_bytes), success_flag)
dir_records[dest_dir] = dir_record
# Update the dest host record
if dest_host in host_records:
host_record = host_records[dest_host]
else:
host_record = Record()
host_record.add(int(num_bytes), success_flag)
host_records[dest_host] = host_record
# Close the file
input_file.close()
# All of the files have been processed, so now we can calculate the
# statistics. Start with the destination dir statistics.
print
print
print "Destination Directory Statistics"
print "================================"
print_stats(dir_records)
# Now the destination host statistics
print
print
print "Destination Host Statistics"
print "==========================="
print_stats(host_records)
|
336 | unbind client function | # Setup logging ---------------
from GangaCore.Utility.logging import getLogger
log = getLogger()
class MonitoringClient(object):
def __init__(self, monitoringService, serviceName='Main'):
self.__MC = {}
self.subscribe(serviceName, monitoringService)
def subscribe(self, serviceName, monitoringService):
if self.isSubscribed(serviceName):
log.debug(
"The %s service already exists. Please unsubscribe first." % serviceName)
return False
else:
log.debug("Subscribing to the %s service." % serviceName)
try:
self.__MC[serviceName] = monitoringService
except Exception as msg:
log.debug(msg)
return False
return True
def unsubscribe(self, serviceName='Main'):
if self.isSubscribed(serviceName):
del self.__MC[serviceName]
return True
return False
def isSubscribed(self, serviceName='Main'):
_s = serviceName in self.__MC
if not _s:
log.debug("Service %s does not exist." % serviceName)
return _s
def allStop(self):
for service in self.__MC.keys():
self.__MC[service].stop()
def stop(self, serviceName='Main'):
"""Stops the disconnects from server and stops monitoring mechanism is required."""
if self.isSubscribed(serviceName):
self.__MC[serviceName].stop()
def pause(self, enableMonitoring=False, serviceName='Main'):
if self.isSubscribed(serviceName):
if enableMonitoring:
self.__MC[serviceName].enableMonitoring()
else:
self.__MC[serviceName].disableMonitoring()
def update(self, serviceName='Main'):
self.__MC[serviceName].updateJobs()
def combinedUpdate(self):
for service in self.__MC:
self.update(serviceName=service)
def _getRegistry(self, serviceName='Main'):
return self.__MC[serviceName].registry
def _getUpdateJobStatusFunction(self, serviceName='Main'):
return self.__MC[serviceName].updateJobStatus
def makeUpdateJobStatusFunction(self, func, serviceName='Main'):
return self.__MC[serviceName].makeUpdateJobStatusFunction(func)
# Client callback functions
def bindClientFunction(self, func, hookFunc, serviceName='Main'):
if self.isSubscribed(serviceName):
self.__MC[serviceName].setClientCallback(func, hookFunc)
def METHOD_NAME(self, func, serviceName='Main'):
if self.isSubscribed(serviceName):
try:
self.__MC[serviceName].removeClientCallback(func)
except Exception as msg:
log.debug("unbindClientFunction() failed on %s: %s." %
(serviceName, msg))
# Monitoring loop hook functions
def bindMLFunction(self, hookFunc, argDict, enabled=True, serviceName='Main'):
if self.isSubscribed(serviceName):
self.__MC[serviceName].setCallbackHook(hookFunc, argDict, enabled)
def unbindMLFunction(self, hookFunc, serviceName='Main'):
if self.isSubscribed(serviceName):
try:
self.__MC[serviceName].removeCallbackHook(hookFunc)
except Exception as msg:
log.debug("unbindClientFunction() failed on %s: %s." %
(serviceName, msg))
# Monitor filters. NOT IN USE YET.
def addFilter(self, mcFilterName, mcFilter, serviceName='Main'):
if self.isSubscribed(serviceName):
self.__MC[serviceName].addFilter(mcFilterName, mcFilter)
def removeFilter(self, mcFilterName, serviceName='Main'):
if self.isSubscribed(serviceName):
self.__MC[serviceName].removeFilter(mcFilterName)
def enableFilter(self, mcFilterName, enabled, serviceName='Main'):
if self.isSubscribed(serviceName):
self.__MC[serviceName].enableFilter(mcFilterName, enabled)
# set attribute value on service
# def setMCAttribute( self, attributeName, attributeValue, serviceName = 'Main' ):
# if self.isSubscribed( serviceName ) and attributeName in [ 'gridProxy' ]:
# setattr( self.__MC[ serviceName ], attributeName, attributeValue )
def getMCAttribute(self, attributeName, serviceName='Main'):
# and attributeName in [ 'gridProxy' ]:
if self.isSubscribed(serviceName):
return getattr(self.__MC[serviceName], attributeName) |
337 | connections | import enum
from _typeshed import Incomplete
from typing import Any, NamedTuple
from psutil._common import (
NIC_DUPLEX_FULL as NIC_DUPLEX_FULL,
NIC_DUPLEX_HALF as NIC_DUPLEX_HALF,
NIC_DUPLEX_UNKNOWN as NIC_DUPLEX_UNKNOWN,
AccessDenied as AccessDenied,
NoSuchProcess as NoSuchProcess,
ZombieProcess as ZombieProcess,
isfile_strict as isfile_strict,
parse_environ_block as parse_environ_block,
path_exists_strict as path_exists_strict,
supports_ipv6 as supports_ipv6,
usage_percent as usage_percent,
)
from psutil._compat import PY3 as PY3
__extra__all__: Any
POWER_SUPPLY_PATH: str
HAS_PROC_SMAPS: bool
HAS_PROC_SMAPS_ROLLUP: bool
HAS_PROC_IO_PRIORITY: Any
HAS_CPU_AFFINITY: Any
CLOCK_TICKS: Any
PAGESIZE: Any
BOOT_TIME: Any
LITTLE_ENDIAN: Any
DISK_SECTOR_SIZE: int
AF_LINK: Any
AddressFamily: Any
IOPRIO_CLASS_NONE: int
IOPRIO_CLASS_RT: int
IOPRIO_CLASS_BE: int
IOPRIO_CLASS_IDLE: int
class IOPriority(enum.IntEnum):
IOPRIO_CLASS_NONE: int
IOPRIO_CLASS_RT: int
IOPRIO_CLASS_BE: int
IOPRIO_CLASS_IDLE: int
PROC_STATUSES: Any
TCP_STATUSES: Any
class svmem(NamedTuple):
total: int
available: int
percent: float
used: int
free: int
active: int
inactive: int
buffers: int
cached: int
shared: int
slab: int
class sdiskio(NamedTuple):
read_count: Any
write_count: Any
read_bytes: Any
write_bytes: Any
read_time: Any
write_time: Any
read_merged_count: Any
write_merged_count: Any
busy_time: Any
class popenfile(NamedTuple):
path: Any
fd: Any
position: Any
mode: Any
flags: Any
class pmem(NamedTuple):
rss: Any
vms: Any
shared: Any
text: Any
lib: Any
data: Any
dirty: Any
class pfullmem(NamedTuple):
rss: Incomplete
vms: Incomplete
shared: Incomplete
text: Incomplete
lib: Incomplete
data: Incomplete
dirty: Incomplete
uss: Incomplete
pss: Incomplete
swap: Incomplete
class pmmap_grouped(NamedTuple):
path: Any
rss: Any
size: Any
pss: Any
shared_clean: Any
shared_dirty: Any
private_clean: Any
private_dirty: Any
referenced: Any
anonymous: Any
swap: Any
pmmap_ext: Any
class pio(NamedTuple):
read_count: Any
write_count: Any
read_bytes: Any
write_bytes: Any
read_chars: Any
write_chars: Any
class pcputimes(NamedTuple):
user: Any
system: Any
children_user: Any
children_system: Any
iowait: Any
def readlink(path): ...
def file_flags_to_mode(flags): ...
def is_storage_device(name): ...
def set_scputimes_ntuple(procfs_path) -> None: ...
scputimes: Any
prlimit: Any
def calculate_avail_vmem(mems): ...
def virtual_memory() -> svmem: ...
def swap_memory(): ...
def cpu_times(): ...
def per_cpu_times(): ...
def cpu_count_logical(): ...
def cpu_count_cores() -> int | None: ...
def cpu_stats(): ...
def cpu_freq(): ...
net_if_addrs: Any
class _Ipv6UnsupportedError(Exception): ...
class Connections:
tmap: Any
def __init__(self) -> None: ...
def get_proc_inodes(self, pid): ...
def get_all_inodes(self): ...
@staticmethod
def decode_address(addr, family): ...
@staticmethod
def process_inet(file, family, type_, inodes, filter_pid: Incomplete | None = ...) -> None: ...
@staticmethod
def process_unix(file, family, inodes, filter_pid: Incomplete | None = ...) -> None: ...
def retrieve(self, kind, pid: Incomplete | None = ...): ...
def net_connections(kind: str = ...): ...
def net_io_counters(): ...
def net_if_stats(): ...
disk_usage: Any
def disk_io_counters(perdisk: bool = ...): ...
class RootFsDeviceFinder:
major: Incomplete
minor: Incomplete
def __init__(self) -> None: ...
def ask_proc_partitions(self): ...
def ask_sys_dev_block(self): ...
def ask_sys_class_block(self): ...
def find(self): ...
def disk_partitions(all: bool = ...): ...
def sensors_temperatures(): ...
def sensors_fans(): ...
def sensors_battery(): ...
def users(): ...
def boot_time(): ...
def pids(): ...
def pid_exists(pid): ...
def ppid_map(): ...
def wrap_exceptions(fun): ...
class Process:
pid: Any
def __init__(self, pid) -> None: ...
def oneshot_enter(self) -> None: ...
def oneshot_exit(self) -> None: ...
def name(self): ...
def exe(self): ...
def cmdline(self): ...
def environ(self): ...
def terminal(self): ...
def io_counters(self) -> pio: ...
def cpu_times(self): ...
def cpu_num(self): ...
def wait(self, timeout: Incomplete | None = ...): ...
def create_time(self): ...
def memory_info(self): ...
def memory_full_info(self): ...
def memory_maps(self): ...
def cwd(self): ...
def num_ctx_switches(self, _ctxsw_re=...): ...
def num_threads(self, _num_threads_re=...): ...
def threads(self): ...
def nice_get(self): ...
def nice_set(self, value): ...
def cpu_affinity_get(self): ...
def cpu_affinity_set(self, cpus) -> None: ...
def ionice_get(self): ...
def ionice_set(self, ioclass, value): ...
def rlimit(self, resource_, limits: Incomplete | None = ...): ...
def status(self): ...
def open_files(self): ...
def METHOD_NAME(self, kind: str = ...): ...
def num_fds(self): ...
def ppid(self): ...
def uids(self, _uids_re=...): ...
def gids(self, _gids_re=...): ... |
338 | feed | ###############################################################################
# Queue and SimpleQueue implementation for loky
#
# authors: Thomas Moreau, Olivier Grisel
#
# based on multiprocessing/queues.py (16/02/2017)
# * Add some custom reducers for the Queues/SimpleQueue to tweak the
# pickling process. (overload Queue._feed/SimpleQueue.put)
#
import os
import sys
import errno
import weakref
import threading
from multiprocessing import util
from multiprocessing.queues import (
Full,
Queue as mp_Queue,
SimpleQueue as mp_SimpleQueue,
_sentinel,
)
from multiprocessing.context import assert_spawning
from .reduction import dumps
__all__ = ["Queue", "SimpleQueue", "Full"]
class Queue(mp_Queue):
def __init__(self, maxsize=0, reducers=None, ctx=None):
super().__init__(maxsize=maxsize, ctx=ctx)
self._reducers = reducers
# Use custom queue set/get state to be able to reduce the custom reducers
def __getstate__(self):
assert_spawning(self)
return (
self._ignore_epipe,
self._maxsize,
self._reader,
self._writer,
self._reducers,
self._rlock,
self._wlock,
self._sem,
self._opid,
)
def __setstate__(self, state):
(
self._ignore_epipe,
self._maxsize,
self._reader,
self._writer,
self._reducers,
self._rlock,
self._wlock,
self._sem,
self._opid,
) = state
if sys.version_info >= (3, 9):
self._reset()
else:
self._after_fork()
# Overload _start_thread to correctly call our custom _feed
def _start_thread(self):
util.debug("Queue._start_thread()")
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue.METHOD_NAME,
args=(
self._buffer,
self._notempty,
self._send_bytes,
self._wlock,
self._writer.close,
self._reducers,
self._ignore_epipe,
self._on_queue_feeder_error,
self._sem,
),
name="QueueFeederThread",
)
self._thread.daemon = True
util.debug("doing self._thread.start()")
self._thread.start()
util.debug("... done self._thread.start()")
# On process exit we will wait for data to be flushed to pipe.
#
# However, if this process created the queue then all
# processes which use the queue will be descendants of this
# process. Therefore waiting for the queue to be flushed
# is pointless once all the child processes have been joined.
created_by_this_process = self._opid == os.getpid()
if not self._joincancelled and not created_by_this_process:
self._jointhread = util.Finalize(
self._thread,
Queue._finalize_join,
[weakref.ref(self._thread)],
exitpriority=-5,
)
# Send sentinel to the thread queue object when garbage collected
self._close = util.Finalize(
self,
Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10,
)
# Overload the _feed methods to use our custom pickling strategy.
@staticmethod
def METHOD_NAME(
buffer,
notempty,
send_bytes,
writelock,
close,
reducers,
ignore_epipe,
onerror,
queue_sem,
):
util.debug("starting thread to feed data to pipe")
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != "win32":
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
while True:
try:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while True:
obj = bpopleft()
if obj is sentinel:
util.debug("feeder thread got sentinel -- exiting")
close()
return
# serialize the data before acquiring the lock
obj_ = dumps(obj, reducers=reducers)
if wacquire is None:
send_bytes(obj_)
else:
wacquire()
try:
send_bytes(obj_)
finally:
wrelease()
# Remove references early to avoid leaking memory
del obj, obj_
except IndexError:
pass
except BaseException as e:
if ignore_epipe and getattr(e, "errno", 0) == errno.EPIPE:
return
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
if util.is_exiting():
util.info(f"error in queue thread: {e}")
return
else:
queue_sem.release()
onerror(e, obj)
def _on_queue_feeder_error(self, e, obj):
"""
Private API hook called when feeding data in the background thread
raises an exception. For overriding by concurrent.futures.
"""
import traceback
traceback.print_exc()
class SimpleQueue(mp_SimpleQueue):
def __init__(self, reducers=None, ctx=None):
super().__init__(ctx=ctx)
# Add possiblity to use custom reducers
self._reducers = reducers
def close(self):
self._reader.close()
self._writer.close()
# Use custom queue set/get state to be able to reduce the custom reducers
def __getstate__(self):
assert_spawning(self)
return (
self._reader,
self._writer,
self._reducers,
self._rlock,
self._wlock,
)
def __setstate__(self, state):
(
self._reader,
self._writer,
self._reducers,
self._rlock,
self._wlock,
) = state
# Overload put to use our customizable reducer
def put(self, obj):
# serialize the data before acquiring the lock
obj = dumps(obj, reducers=self._reducers)
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self._writer.send_bytes(obj)
else:
with self._wlock:
self._writer.send_bytes(obj) |
339 | need rerun | from typing import (
Dict, Optional, Type, TYPE_CHECKING
)
from typing_extensions import Literal
import numpy as np
from libertem.io.dataset.base import DataSet
from libertem.udf.base import UDF, UDFResultDict
# Base classes for results moved to common for MIT licensing, refs #1031
from libertem.common.analysis import AnalysisResult, AnalysisResultSet
if TYPE_CHECKING:
from libertem.analysis.helper import GeneratorHelper
from libertem.web.rpc import ProcedureProtocol
import numpy.typing as nt
class Analysis:
"""
Abstract base class for Analysis classes.
An Analysis is the interface between a UDF and the Web API, and handles
visualization of partial and full results.
Passing an instance of an :class:`Analysis` sub-class to
:meth:`libertem.api.Context.run` will generate an :class:`AnalysisResultSet`.
The content of this result set is governed by the specific implementation of
the :code:`Analysis` sub-class.
.. versionadded:: 0.3.0
.. versionchanged:: 0.7.0
Removed deprecated methods :code:`get_results` and :code:`get_job`
"""
TYPE: Literal["UDF"] = "UDF"
registry: Dict[str, "Type[Analysis]"] = {}
def __init__(self, dataset: DataSet, parameters: Dict):
self.dataset = dataset
def __init_subclass__(cls, id_=None, **kwargs):
# override id_ with your own id
# Used to register the subclass
# https://www.python.org/dev/peps/pep-0487/#subclass-registration
super().__init_subclass__(**kwargs)
if id_ is not None:
cls.registry[id_] = cls
@classmethod
def get_analysis_by_type(cls, id_: str) -> Type["Analysis"]:
return cls.registry[id_]
@classmethod
def get_template_helper(cls) -> Type["GeneratorHelper"]:
raise NotImplementedError()
@classmethod
def get_rpc_definitions(cls) -> Dict[str, Type["ProcedureProtocol"]]:
return {}
async def controller(self, cancel_id, executor, job_is_cancelled, send_results):
raise NotImplementedError()
def get_udf_results(
self, udf_results: UDFResultDict, roi: Optional[np.ndarray],
damage: "nt.ArrayLike",
) -> AnalysisResultSet:
"""
Convert UDF results to a :code:`AnalysisResultSet`,
including visualizations.
Parameters
----------
udf_results
raw results from the UDF
roi : numpy.ndarray or None
Boolean array of the navigation dimension
Returns
-------
list of AnalysisResult
one or more annotated results
"""
raise NotImplementedError()
def get_udf(self) -> UDF:
"""
Set TYPE='UDF' on the class and implement this method to run a UDF
from this analysis
"""
raise NotImplementedError()
def get_roi(self) -> Optional[np.ndarray]:
"""
Get the region of interest the UDF should be run on. For example,
the parameters could describe some geometry, which this method should
convert to a boolean array. See also: :func:`libertem.analysis.getroi.get_roi`
Returns
-------
numpy.ndarray or None
region of interest for which we want to run our analysis
"""
raise NotImplementedError()
def get_complex_results(self, job_result, key_prefix, title, desc, damage, default_lin=True):
raise NotImplementedError()
def get_parameters(self, parameters: Dict) -> Dict:
"""
Get analysis parameters. Override to set defaults
"""
raise NotImplementedError()
def METHOD_NAME(self, old_params: Dict, new_params: Dict) -> bool:
"""
Determine if the analysis needs to be re-run on the data. If not,
we can just call `get_udf_results` again, for example if the parameters
only change the visualization.
Parameters
----------
old_params : Dict
new_params : Dict
Returns
-------
bool
True iff the parameter change needs to cause a re-run on the data
"""
return True
class BaseAnalysis(Analysis):
def __init__(self, dataset, parameters):
super().__init__(dataset, parameters)
self.parameters = self.get_parameters(parameters)
self.parameters.update(parameters)
if self.TYPE == 'JOB':
raise RuntimeError("Job support was removed in 0.7")
def get_roi(self):
return None
def get_complex_results(
self, job_result, key_prefix, title, desc, damage, default_lin=True):
from libertem.viz import visualize_simple, CMAP_CIRCULAR_DEFAULT
damage = damage & np.isfinite(job_result)
magn = np.abs(job_result)
angle = np.angle(job_result)
wheel = CMAP_CIRCULAR_DEFAULT.rgb_from_vector(
(job_result.real, job_result.imag, 0),
vmax=np.max(magn[damage])
)
return [
# for compatability, the magnitude has key=key_prefix
AnalysisResult(
raw_data=magn,
visualized=visualize_simple(magn, damage=damage),
key=key_prefix if default_lin else f'{key_prefix}_lin',
title="%s [magn]" % title,
desc="%s [magn]" % desc,
),
AnalysisResult(
raw_data=magn,
visualized=visualize_simple(magn, logarithmic=True, damage=damage),
key=f'{key_prefix}_log' if default_lin else key_prefix,
title="%s [log(magn)]" % title,
desc="%s [log(magn)]" % desc,
),
AnalysisResult(
raw_data=job_result.real,
visualized=visualize_simple(job_result.real, damage=damage),
key="%s_real" % key_prefix,
title="%s [real]" % title,
desc="%s [real]" % desc,
),
AnalysisResult(
raw_data=job_result.imag,
visualized=visualize_simple(job_result.imag, damage=damage),
key="%s_imag" % key_prefix,
title="%s [imag]" % title,
desc="%s [imag]" % desc,
),
AnalysisResult(
raw_data=angle,
visualized=visualize_simple(angle, damage=damage),
key="%s_angle" % key_prefix,
title="%s [angle]" % title,
desc="%s [angle]" % desc,
),
AnalysisResult(
raw_data=job_result,
visualized=wheel,
key="%s_complex" % key_prefix,
title="%s [complex]" % title,
desc="%s [complex]" % desc,
),
]
def get_parameters(self, parameters: Dict):
"""
Get analysis parameters. Override to set defaults
"""
return parameters
__all__ = ['AnalysisResult', 'AnalysisResultSet', 'Analysis', 'BaseAnalysis'] |
340 | get value | import os
import sys
import argparse
import re
class style():
RED = '\033[31m'
GREEN = '\033[32m'
RESET = '\033[0m'
resultRegex = r"init time = (.*)s, run time = (.*)s"
def getTimesFromFile( filePath ):
"""
Return the init time and run time from a GEOSX standard output file.
Arguments:
filePath: The path of the output file to parse.
"""
with open( filePath, "r" ) as file:
for line in file:
matches = re.search( resultRegex, line )
if matches is not None:
return float( matches.groups()[ 0 ] ), float( matches.groups()[ 1 ] )
raise Exception( "Could not get times from {}".format( filePath ) )
def getTimesFromFolder( folder ):
"""
Return a dictionary containing the init and run times of each run in the benchmark folder.
Arguments:
folder: The top level directory the benchmarks were run in.
"""
results = {}
for outerFile in os.listdir( folder ):
xmlName = outerFile
outerFile = os.path.join( folder, outerFile )
if os.path.isdir( outerFile ):
for innerFile in os.listdir( outerFile ):
problemName = innerFile
innerFile = os.path.join( outerFile, innerFile )
if os.path.isdir( innerFile ):
outputFile = os.path.join( innerFile, "output.txt" );
if not os.path.exists( outputFile ) or not os.path.isfile( outputFile ):
raise ValueError( "{} does not exist or is not a file.".format( outputFile ) )
init, run = getTimesFromFile( outputFile )
results[ ( xmlName, problemName ) ] = init, run
return results
def joinResults( results, baselineResults ):
"""
Return a dictionary containing both the results and baseline results.
Arguments:
results: The dictionary of benchmark results.
baselineResults: The dictionary of baseline benchmark results.
"""
joined = {}
for key in results:
joined[ key ] = [ results[ key ][ 0 ], results[ key ][ 1 ], float( "nan" ), float( "nan" ) ]
for key in baselineResults:
if key in joined:
joined[ key ][ 2 ] = baselineResults[ key ][ 0 ]
joined[ key ][ 3 ] = baselineResults[ key ][ 1 ]
else:
joined[ key ] = [ float( "nan" ), float( "nan" ), baselineResults[ key ][ 0 ], baselineResults[ key ][ 1 ] ]
joinedList = []
for key in joined:
item = []
item += key
item += joined[ key ]
joinedList.append( item )
return sorted( joinedList, lambda x, y: cmp( x, y ) )
def METHOD_NAME( x ):
"""
If x is a tuple return the first entry, else return x.
Arguments:
x: The object to get the value of.
"""
if isinstance( x, tuple ):
return x[ 0 ]
else:
return x
def getColor( x ):
"""
If x is a tuple return the second entry, which should be an ANSI color code. Else return the default color.
Arguments:
x: The object to get the color of.
"""
if isinstance( x, tuple ):
return x[ 1 ]
else:
return style.RESET
def printTable( table ):
"""
Print a table in a nice format, with optional coloring.
Arguments:
table: A list of rows to print. Each row should be of the same length. Then entries in each row
should either be a string or a tuple of a string and ANSI color code.
"""
col_width = [ max( len( METHOD_NAME( x ) ) for x in col ) for col in zip( *table ) ]
print( "| " + " | ".join( "{:{}}".format( METHOD_NAME( x ), col_width[ i ] ) for i, x in enumerate( table[ 0 ] ) ) + " |" )
print( "|" + "|".join( "-" * width + "--" for width in col_width ) + "|" )
for line in table[ 1: ]:
print( "| " + " | ".join( "{}{:{}}{}".format( getColor( x ), METHOD_NAME( x ), col_width[ i ], style.RESET ) for i, x in enumerate( line ) ) + " |" )
print( "|" + "|".join( "-" * width + "--" for width in col_width ) + "|" )
def generateTable( results, baselineResults ):
"""
Print a table containing the speed up of the results over the baseline results.
Arguments:
results: The dictionary of benchmark results.
baselineResults: The dictionary of baseline benchmark results.
"""
lines = [ ( "XML Name", "Problem Name", "init speed up", "run speed up" ) ]
joined = joinResults( results, baselineResults )
for result in joined:
xmlName = result[ 0 ]
problemName = result[ 1 ]
initTime = result[ 2 ]
runTime = result[ 3 ]
baseInitTime = result[ 4 ]
baseRunTime = result[ 5 ]
lines.append( ( xmlName, problemName,
"{:.2f}x".format( baseInitTime / initTime ),
"{:.2f}x".format( baseRunTime / runTime ) ) )
printTable( lines )
def main():
""" Parse the command line arguments and compare the benchmarks. """
parser = argparse.ArgumentParser()
parser.add_argument( "toCompareDir", help="The directory where the new benchmarks were run." )
parser.add_argument( "baselineDir", help="The directory where the baseline benchmarks were run." )
args = parser.parse_args()
toCompareDir = os.path.abspath( args.toCompareDir )
if not os.path.isdir( toCompareDir ):
raise ValueError( "toCompareDir is not a directory!" )
baselineDir = os.path.abspath( args.baselineDir )
if not os.path.isdir( baselineDir ):
raise ValueError( "baselineDir is not a directory!" )
results = getTimesFromFolder( toCompareDir )
baselineResults = getTimesFromFolder( baselineDir )
generateTable( results, baselineResults )
return 0
if __name__ == "__main__" and not sys.flags.interactive:
sys.exit(main()) |
341 | has too small nets | #
# Copyright (C) 2012 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Builds the prefix matrix."""
from collections import namedtuple
import logging
import math
import IPy
from django.urls import reverse
from nav.metrics.templates import metric_path_for_prefix
from nav.metrics.graphs import get_simple_graph_url
from nav.report import metaIP, IPtools, IPtree
_logger = logging.getLogger(__name__)
class Cell(object):
"""Represents a table cell in subnet matrix"""
def __init__(self, **kwargs):
self.prefixid = kwargs.get('prefixid', '')
self.colspan = kwargs.get('colspan', 1)
self.rowspan = kwargs.get('rowspan', 1)
self.content = kwargs.get('content', ' ')
self.is_empty = kwargs.get('is_empty', False)
self.netaddr = kwargs.get('netaddr')
self.dataurl = kwargs.get('dataurl')
self.link = kwargs.get('link')
Link = namedtuple('Link', ('href', 'text', 'title'))
class Matrix(object):
"""This class is "abstract" and should not be instansiated directly.
Superclass with usefull methods for IP matrices.
Direct known subclasses:
nav.report.matrixIPv6
nav.report.matrixIPv4
"""
Node = namedtuple('Node', 'net subnets')
def __init__(self, start_net, end_net=None, bits_in_matrix=3):
if end_net is None:
end_net = IPtools.getLastSubnet(start_net)
self.start_net = start_net
self.end_net = end_net
self.bits_in_matrix = bits_in_matrix
self.tree = IPtree.build_tree(
start_net, end_net, bits_in_matrix=bits_in_matrix, add_missing_nets=True
)
self.tree_nets = self.extract_tree_nets()
self.matrix_nets = self.extract_matrix_nets()
self.heading_colspan = 1
self.nodes = None
self.num_columns = None
self.column_headings = None
def build(self):
"""Builds the datastructure for the template to render
Must be overriden and implemented by subclasses
"""
raise NotImplementedError('Must be implemented in subclass')
def METHOD_NAME(self, net):
"""
Returns True if argument ``net'' has too many small subnets for the
matrix.
"""
for net in IPtree.get_subtree(self.tree, net):
if net.prefixlen() > self.end_net.prefixlen():
return True
return False
def extract_matrix_nets(self):
"""These should be shown as horizontal rows in the matrix."""
return IPtree.extract_subtrees_with_prefix_length(
self.tree, self.end_net.prefixlen() - self.bits_in_matrix
)
def extract_tree_nets(self):
"""These should be listed vertically in the leftmost column."""
return IPtree.remove_subnets_with_prefixlength(
self.tree, self.end_net.prefixlen() - self.bits_in_matrix + 1
)
def _colspan(self, ip):
return min(
self.num_columns,
int(math.pow(2, self.end_net.prefixlen() - ip.prefixlen())),
)
def _get_row_size(self):
"""Gets the prefixlength for a row"""
return self.end_net.prefixlen() - self.bits_in_matrix
def _create_data_row(self, subnet):
"""Create a data row containing a list of cells
:rtype: list[Cell]
"""
if self.METHOD_NAME(subnet):
return [self._create_too_small_subnets_cell()]
elif self.matrix_nets[subnet]:
# this subnet is divided into parts
host_nybbles_map = IPtools.getLastbitsIpMap(
list(self.matrix_nets[subnet].keys())
)
return self._add_child_nets(host_nybbles_map)
else:
# this subnet spans the whole row
meta = metaIP.MetaIP(subnet)
return [self._create_cell(subnet, meta)]
def _add_child_nets(self, host_nybbles_map):
next_header_idx = -1
cells = []
for i in self.column_headings:
if self.column_headings.index(i) < next_header_idx:
continue
key = i.lower()
if key in host_nybbles_map:
ip = host_nybbles_map[key]
meta = metaIP.MetaIP(ip)
matrix_cell = self._create_cell(ip, meta, key=key)
next_header_idx = self.column_headings.index(i) + int(self._colspan(ip))
else:
matrix_cell = Cell(is_empty=True)
cells.append(matrix_cell)
return cells
def _create_cell(self, ip, meta, rowspan=1, key=0):
"""Creates a table cell based on ip"""
return Cell(
prefixid=meta.prefixid,
colspan=self._colspan(ip),
rowspan=rowspan,
content=self._get_content(key, ip),
dataurl=self._get_prefix_url(ip),
netaddr=ip,
)
@staticmethod
def _create_empty_cell():
return Cell(colspan=80, color=None, is_empty=True)
def _create_index_cell(self, subnet, link=True):
"""Creates the cell for the first column in the matrix
This cell typically displays the subnet
:param link: If the cell should contain a link to subnet or not
"""
if link:
return Cell(link=self._netlink(subnet))
else:
return Cell(content=metaIP.MetaIP(subnet).getTreeNet())
def _create_too_small_subnets_cell(self):
return Cell(
colspan=self.num_columns,
color=self._get_color('large'),
link=self._get_too_small_net_link(),
)
def _add_large_subnet(self, subnet, matrix_row):
"""Adds correct rowspan to cell for large nets"""
meta = metaIP.MetaIP(subnet)
rowspan = 2 ** (self._get_row_size() - subnet.prefixlen())
matrix_row.append(self._create_cell(subnet, meta, rowspan=rowspan))
# Return the number of extra rows that need to be made
return rowspan - 1
def _create_extra_rows(self, num_extra_rows, subnet):
extra_nets = []
row_net = IPy.IP('{}/{}'.format(subnet.net(), self._get_row_size()))
for _ in range(num_extra_rows):
row_net = IPtools.get_next_subnet(row_net)
extra_nets.append([self._create_index_cell(row_net, link=False)])
return extra_nets
@staticmethod
def _get_content(key, ip):
raise NotImplementedError
@staticmethod
def _netlink(ip, append_term_and_prefix=False):
raise NotImplementedError
def _get_too_small_net_link(self):
"""Creates a link to the next drill down net"""
link = reverse('report-matrix-scope', args=[self.end_net])
return Link(link, 'Too many small nets', 'Go to matrix for smaller prefix')
@staticmethod
def _get_color(nettype):
"""Gets the css-class name added to the cell based on usage"""
if nettype == 'static' or nettype == 'scope' or nettype == 'reserved':
return 'subnet_other'
elif nettype == 'large':
return 'subnet_large'
@staticmethod
def _get_prefix_url(prefix):
return get_simple_graph_url(
[metric_path_for_prefix(prefix.strCompressed(), 'ip_count')], format='json'
) |
342 | generate dataloader | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BaseDataloder of all dataloaders."""
from abc import abstractmethod
class BaseDataLoader:
"""Base class for all DataLoaders.
_generate_dataloader is needed to create a dataloader object
from the general params like batch_size and sampler. The dynamic batching is just to
generate a new dataloader by setting batch_size and last_batch.
"""
def __init__(
self,
dataset,
batch_size=1,
last_batch="rollover",
collate_fn=None,
sampler=None,
batch_sampler=None,
num_workers=0,
pin_memory=False,
shuffle=False,
distributed=False,
):
"""Initialize BaseDataLoader.
Args:
dataset (object): dataset from which to load the data
batch_size (int, optional): number of samples per batch. Defaults to 1.
last_batch (str, optional): whether to drop the last batch if it is incomplete.
Support ['rollover', 'discard'], rollover means False, discard means True.
Defaults to 'rollover'.
collate_fn (callable, optional): merge data with outer dimension batch size. Defaults to None.
sampler (Sampler, optional): Sampler object to sample data. Defaults to None.
batch_sampler (BatchSampler, optional): BatchSampler object to generate batch of indices. Defaults to None.
num_workers (int, optional): number of subprocesses to use for data loading. Defaults to 0.
pin_memory (bool, optional): whether to copy data into pinned memory before returning. Defaults to False.
shuffle (bool, optional): whether to shuffle data. Defaults to False.
distributed (bool, optional): whether the dataloader is distributed. Defaults to False.
"""
self.dataset = dataset
self.collate_fn = collate_fn
self.sampler = sampler
self.batch_sampler = batch_sampler
self.num_workers = num_workers
self.pin_memory = pin_memory
self._batch_size = batch_size
self.shuffle = shuffle
self.distributed = distributed
self.last_batch = last_batch
self.drop_last = False if last_batch == "rollover" else True
self.dataloader = self.METHOD_NAME(
self.dataset,
batch_size=batch_size,
last_batch=last_batch,
collate_fn=collate_fn,
sampler=sampler,
batch_sampler=batch_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
shuffle=shuffle,
distributed=distributed,
)
def batch(self, batch_size, last_batch=None):
"""Set batch size for dataloader.
Args:
batch_size (int): number of samples per batch.
last_batch (str, optional): whether to drop the last batch if it is incomplete.
Support ['rollover', 'discard'], rollover means False, discard means True.
Defaults to None.
"""
self._batch_size = batch_size
if last_batch is not None:
self.last_batch = last_batch
self.dataloader = self.METHOD_NAME(
self.dataset,
batch_size,
self.last_batch,
self.collate_fn,
self.sampler,
self.batch_sampler,
self.num_workers,
self.pin_memory,
self.shuffle,
self.distributed,
)
@property
def batch_size(self):
"""Get dataloader's batch_size.
Returns:
int: batch_size
"""
return self._batch_size
def __iter__(self):
"""Yield data in iterative order.
Returns:
iterator: iterator for dataloder
"""
return iter(self.dataloader)
@abstractmethod
def METHOD_NAME(
self,
dataset,
batch_size,
last_batch,
collate_fn,
sampler,
batch_sampler,
num_workers,
pin_memory,
shuffle,
distributed,
):
raise NotImplementedError |
343 | on night death message | from __future__ import annotations
import random
import re
from typing import Optional, Union
from src import config
from src import users
from src.cats import Wolf
from src.containers import UserSet, UserDict
from src.decorators import command
from src.dispatcher import MessageDispatcher
from src.events import Event, event_listener
from src.functions import get_players, get_all_players, get_target, get_main_role
from src.gamestate import GameState
from src.messages import messages
from src.status import try_misdirection, try_exchange, add_protection, add_dying
from src.users import User
GUARDED: UserDict[users.User, users.User] = UserDict()
LASTGUARDED: UserDict[users.User, users.User] = UserDict()
PASSED = UserSet()
@command("guard", chan=False, pm=True, playing=True, silenced=True, phases=("night",), roles=("guardian angel",))
def guard(wrapper: MessageDispatcher, message: str):
"""Guard a player, preventing them from being killed that night."""
if wrapper.source in GUARDED:
wrapper.pm(messages["already_protecting"])
return
var = wrapper.game_state
target = get_target(wrapper, re.split(" +", message)[0], allow_self=config.Main.get("gameplay.safes.guard_self"), not_self_message="cannot_guard_self")
if not target:
return
if LASTGUARDED.get(wrapper.source) is target:
wrapper.pm(messages["guardian_target_another"].format(target))
return
target = try_misdirection(var, wrapper.source, target)
if try_exchange(var, wrapper.source, target):
return
add_protection(var, target, wrapper.source, "guardian angel")
PASSED.discard(wrapper.source)
GUARDED[wrapper.source] = target
LASTGUARDED[wrapper.source] = target
if wrapper.source is target:
wrapper.pm(messages["guardian_guard_self"])
else:
wrapper.pm(messages["protecting_target"].format(target))
target.send(messages["target_protected"])
@command("pass", chan=False, pm=True, playing=True, phases=("night",), roles=("guardian angel",))
def pass_cmd(wrapper: MessageDispatcher, message: str):
"""Decline to use your special power for that night."""
if wrapper.source in GUARDED:
wrapper.pm(messages["already_protecting"])
return
PASSED.add(wrapper.source)
wrapper.pm(messages["guardian_no_protect"])
@event_listener("del_player")
def on_del_player(evt: Event, var: GameState, player: User, all_roles: set[str], death_triggers: bool):
if var.current_phase == "night" and player in GUARDED:
GUARDED[player].send(messages["protector_disappeared"])
for dictvar in (GUARDED, LASTGUARDED):
for k, v in list(dictvar.items()):
if player in (k, v):
del dictvar[k]
PASSED.discard(player)
@event_listener("new_role")
def on_new_role(evt: Event, var: GameState, player: User, old_role: Optional[str]):
if old_role == "guardian angel" and evt.data["role"] != "guardian angel":
if player in GUARDED:
guarded = GUARDED.pop(player)
guarded.send(messages["protector_disappeared"])
del LASTGUARDED[:player:]
@event_listener("chk_nightdone")
def on_chk_nightdone(evt: Event, var: GameState):
evt.data["acted"].extend(GUARDED)
evt.data["acted"].extend(PASSED)
evt.data["nightroles"].extend(get_players(var, ("guardian angel",)))
@event_listener("resolve_killer_tag")
def on_resolve_killer_tag(evt: Event, var: GameState, victim: User, tag: str):
if tag == "@angel":
# GA is attacked by the wolf they (mistakenly?) guarded
evt.data["attacker"] = GUARDED[victim]
evt.data["role"] = get_main_role(var, GUARDED[victim])
evt.data["try_lycanthropy"] = True
@event_listener("night_kills")
def on_night_kills(evt: Event, var: GameState):
chance = config.Main.get("gameplay.safes.angel_dies")
if chance == 0:
return
evt.data["kill_priorities"]["@angel"] = 10
wolves = get_players(var, Wolf)
for angel in get_all_players(var, ("guardian angel",)):
if GUARDED.get(angel) in wolves and random.random() * 100 < chance:
evt.data["victims"].add(angel)
evt.data["killers"][angel].append("@angel")
@event_listener("night_death_message")
def METHOD_NAME(evt: Event, var: GameState, victim: User, killer: Union[User, str]):
if killer == "@angel":
evt.data["key"] = "protected_wolf" if var.role_reveal == "on" else "protected_wolf_no_reveal"
evt.data["args"] = [victim, "guardian angel"]
@event_listener("transition_night_begin")
def on_transition_night_begin(evt: Event, var: GameState):
# needs to be here in order to allow protections to work during the daytime
# (right now they don't due to other reasons, but that may change)
GUARDED.clear()
@event_listener("send_role")
def on_send_role(evt: Event, var: GameState):
ps = get_players(var)
for gangel in get_all_players(var, ("guardian angel",)):
pl = ps[:]
random.shuffle(pl)
if gangel in LASTGUARDED:
if LASTGUARDED[gangel] in pl:
pl.remove(LASTGUARDED[gangel])
chance = config.Main.get("gameplay.safes.angel_dies")
gangel.send(messages["guardian_angel_notify"])
if var.next_phase != "night":
return
if chance > 0:
gangel.send(messages["bodyguard_death_chance"].format(chance))
if config.Main.get("gameplay.safes.guard_self"):
gangel.send(messages["guardian_self_notification"])
else:
pl.remove(gangel)
gangel.send(messages["players_list"].format(pl))
@event_listener("player_protected")
def on_player_protected(evt: Event, var: GameState, target: User, attacker: User, attacker_role: str, protector: User, protector_role: str, reason: str):
if protector_role == "guardian angel":
evt.data["messages"].append(messages[reason + "_angel"].format(attacker, target))
@event_listener("remove_protection")
def on_remove_protection(evt: Event, var: GameState, target: User, attacker: User, attacker_role: str, protector: User, protector_role: str, reason: str):
if attacker_role == "fallen angel" and protector_role == "guardian angel":
evt.data["remove"] = True
if protector is not target:
protector.send(messages[reason + "_success"].format(target))
target.send(messages[reason + "_deprotect"])
if (random.random() * 100) < config.Main.get("gameplay.safes.fallen_kills"):
add_dying(var, protector, killer_role="fallen angel", reason=reason)
@event_listener("begin_day")
def on_begin_day(evt: Event, var: GameState):
PASSED.clear()
# clear out LASTGUARDED for people that didn't guard last night
for g in list(LASTGUARDED.keys()):
if g not in GUARDED:
del LASTGUARDED[g]
@event_listener("reset")
def on_reset(evt: Event, var: GameState):
GUARDED.clear()
LASTGUARDED.clear()
PASSED.clear()
@event_listener("get_role_metadata")
def on_get_role_metadata(evt: Event, var: Optional[GameState], kind: str):
if kind == "role_categories":
evt.data["guardian angel"] = {"Village", "Safe", "Nocturnal"}
elif kind == "lycanthropy_role":
evt.data["guardian angel"] = {"role": "fallen angel", "prefix": "fallen_angel", "secondary_roles": {"assassin"}} |
344 | layout | from conan import ConanFile
from conan.tools.cmake import CMake, CMakeToolchain, CMakeDeps, cmake_layout
from conan.errors import ConanInvalidConfiguration
from conan.tools.files import apply_conandata_patches, export_conandata_patches, get, copy, rm, rmdir
import os
required_conan_version = ">=1.52.0"
class CgnsConan(ConanFile):
name = "cgns"
description = "Standard for data associated with the numerical solution " \
"of fluid dynamics equations."
topics = "data", "cfd", "fluids"
homepage = "http://cgns.org/"
license = "Zlib"
url = "https://github.com/conan-io/conan-center-index"
settings = "os", "compiler", "build_type", "arch"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_hdf5": [True, False],
"parallel": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
"with_hdf5": True,
"parallel": False,
}
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
try:
del self.options.fPIC
except Exception:
pass
try:
del self.settings.compiler.libcxx
except Exception:
pass
try:
del self.settings.compiler.cppstd
except Exception:
pass
def METHOD_NAME(self):
cmake_layout(self, src_folder="src")
def requirements(self):
if self.options.with_hdf5:
self.requires("hdf5/1.14.0")
def validate(self):
if self.info.options.parallel and not (self.info.options.with_hdf5 and self.dependencies["hdf5"].options.parallel):
raise ConanInvalidConfiguration("The option 'parallel' requires HDF5 with parallel=True")
if self.info.options.parallel and self.info.options.with_hdf5 and self.dependencies["hdf5"].options.enable_cxx:
raise ConanInvalidConfiguration("The option 'parallel' requires HDF5 with enable_cxx=False")
def source(self):
get(self, **self.conan_data["sources"][self.version], destination=self.source_folder, strip_root=True)
def generate(self):
cmake = CMakeDeps(self)
cmake.generate()
tc = CMakeToolchain(self)
tc.variables["CGNS_ENABLE_TESTS"] = False
tc.variables["CGNS_BUILD_TESTING"] = False
tc.variables["CGNS_ENABLE_FORTRAN"] = False
tc.variables["CGNS_ENABLE_HDF5"] = self.options.with_hdf5
tc.variables["CGNS_BUILD_SHARED"] = self.options.shared
tc.variables["CGNS_USE_SHARED"] = self.options.shared
tc.variables["CGNS_ENABLE_PARALLEL"] = self.options.parallel
tc.variables["CGNS_BUILD_CGNSTOOLS"] = False
tc.generate()
# Other flags, seen in appveyor.yml in source code, not currently managed.
# CGNS_ENABLE_LFS:BOOL=OFF --- note in code: needed on 32 bit systems
# CGNS_ENABLE_SCOPING:BOOL=OFF --- disabled in VTK's bundle
# HDF5_NEED_ZLIB:BOOL=ON -- should be dealt with by cmake auto dependency management or something?
def build(self):
apply_conandata_patches(self)
cmake = CMake(self)
cmake.configure()
cmake.build(target="cgns_shared" if self.options.shared else "cgns_static")
def package(self):
copy(self, "license.txt", dst=os.path.join(self.package_folder, "licenses"), src=self.source_folder)
cmake = CMake(self)
cmake.install()
rm(self, "cgnsBuild.defs", os.path.join(self.package_folder, "include"))
rmdir(self, os.path.join(self.package_folder, "lib", "cmake"))
def package_info(self):
self.cpp_info.set_property("cmake_file_name", "CGNS")
self.cpp_info.set_property("cmake_target_name", "CGNS::CGNS")
if self.options.shared:
self.cpp_info.components["cgns_shared"].set_property("cmake_target_name", "CGNS::cgns_shared")
self.cpp_info.components["cgns_shared"].libs = ["cgnsdll" if self.settings.os == "Windows" else "cgns"]
self.cpp_info.components["cgns_shared"].libdirs = ["lib"]
if self.options.with_hdf5:
self.cpp_info.components["cgns_shared"].requires = ["hdf5::hdf5"]
if self.settings.os == "Windows":
# we could instead define USE_DLL but it's too generic
self.cpp_info.components["cgns_shared"].defines = ["CGNSDLL=__declspec(dllimport)"]
else:
self.cpp_info.components["cgns_static"].set_property("cmake_target_name", "CGNS::cgns_static")
self.cpp_info.components["cgns_static"].libs = ["cgns"]
self.cpp_info.components["cgns_static"].libdirs = ["lib"]
if self.options.with_hdf5:
self.cpp_info.components["cgns_static"].requires = ["hdf5::hdf5"]
# TODO: to remove in conan v2 once cmake_find_package_* generators removed
self.cpp_info.names["cmake_find_package"] = "CGNS"
self.cpp_info.names["cmake_find_package_multi"] = "CGNS" |
345 | header parameters | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"adp account wait",
)
class Wait(AAZWaitCommand):
"""Place the CLI in a waiting state until a condition is met.
"""
_aaz_info = {
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.autonomousdevelopmentplatform/accounts/{}", "2022-09-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.account_name = AAZStrArg(
options=["-n", "--name", "--account-name"],
help="The name of the ADP account",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*",
max_length=50,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.AccountsGet(ctx=self.ctx)()
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=False)
return result
class AccountsGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AutonomousDevelopmentPlatform/accounts/{accountName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"accountName", self.ctx.args.account_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-09-01-preview",
required=True,
),
}
return parameters
@property
def METHOD_NAME(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"client_flatten": True},
)
_schema_on_200.system_data = AAZObjectType(
serialized_name="systemData",
flags={"read_only": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.account_id = AAZStrType(
serialized_name="accountId",
flags={"read_only": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
system_data = cls._schema_on_200.system_data
system_data.created_at = AAZStrType(
serialized_name="createdAt",
flags={"read_only": True},
)
system_data.created_by = AAZStrType(
serialized_name="createdBy",
flags={"read_only": True},
)
system_data.created_by_type = AAZStrType(
serialized_name="createdByType",
flags={"read_only": True},
)
system_data.last_modified_at = AAZStrType(
serialized_name="lastModifiedAt",
flags={"read_only": True},
)
system_data.last_modified_by = AAZStrType(
serialized_name="lastModifiedBy",
flags={"read_only": True},
)
system_data.last_modified_by_type = AAZStrType(
serialized_name="lastModifiedByType",
flags={"read_only": True},
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
__all__ = ["Wait"] |
346 | test block splitter with iter | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2010-2023 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Test related to code in openquake/utils/general.py
"""
import unittest.mock as mock
import unittest
import numpy
from operator import attrgetter
from collections import namedtuple
from openquake.baselib.general import (
block_splitter, split_in_blocks, assert_close, rmsdiff,
deprecated, DeprecationWarning, cached_property,
compress, decompress, random_choice)
class BlockSplitterTestCase(unittest.TestCase):
"""Tests for :func:`openquake.baselib.general.block_splitter`."""
DATA = range(10)
def test_block_splitter(self):
expected = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9],
]
actual = [x for x in block_splitter(self.DATA, 3)]
self.assertEqual(expected, actual)
def test_block_splitter_block_size_eq_data_len(self):
expected = [self.DATA]
actual = [x for x in block_splitter(self.DATA, 10)]
self.assertEqual(expected, actual)
def test_block_splitter_block_size_gt_data_len(self):
expected = [self.DATA]
actual = [x for x in block_splitter(self.DATA, 11)]
self.assertEqual(expected, actual)
def test_block_splitter_zero_block_size(self):
gen = block_splitter(self.DATA, 0)
with self.assertRaises(ValueError):
next(gen)
def test_block_splitter_block_size_lt_zero(self):
gen = block_splitter(self.DATA, -1)
with self.assertRaises(ValueError):
next(gen)
def test_block_splitter_with_generator(self):
# Test the block with a data set of unknown length
# (such as a generator)
data = range(10)
expected = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9],
]
actual = [x for x in block_splitter(data, 3)]
self.assertEqual(expected, actual)
def METHOD_NAME(self):
# Test the block with a data set of unknown length
data = range(10)
expected = [
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9],
]
actual = [x for x in block_splitter(data, 3)]
self.assertEqual(expected, actual)
def test_split_with_weight(self):
weights = dict([('a', 11), ('b', 10), ('c', 100), ('d', 15), ('e', 20),
('f', 5), ('g', 30), ('h', 17), ('i', 25)])
blocks = list(block_splitter('abcdefghi', 50, weights.get))
self.assertEqual(repr(blocks), "[<WeightedSequence ['a', 'b'], weight=21>, <WeightedSequence ['c'], weight=100>, <WeightedSequence ['d', 'e', 'f'], weight=40>, <WeightedSequence ['g', 'h'], weight=47>, <WeightedSequence ['i'], weight=25>]")
def test_split_in_blocks(self):
weights = dict([('a', 11), ('b', 10), ('c', 100), ('d', 15), ('e', 20),
('f', 5), ('g', 30), ('h', 17), ('i', 25)])
blocks = list(split_in_blocks('abcdefghi', 1, weights.get))
self.assertEqual(len(blocks), 1)
blocks = list(split_in_blocks('abcdefghi', 2, weights.get))
self.assertEqual(len(blocks), 3)
self.assertEqual(repr(blocks), "[<WeightedSequence ['f', 'b', 'a', 'd', 'h', 'e', 'i'], weight=103>, <WeightedSequence ['g'], weight=30>, <WeightedSequence ['c'], weight=100>]")
def test_split_with_kind(self):
Source = namedtuple('Source', 'typology, weight')
s1 = Source('point', 1)
s2 = Source('point', 1)
s3 = Source('area', 2)
s4 = Source('area', 4)
s5 = Source('area', 4)
blocks = list(
block_splitter([s1, s2, s3, s4, s5], max_weight=6,
weight=attrgetter('weight'),
key=attrgetter('typology')))
self.assertEqual(list(map(len, blocks)), [2, 2, 1])
self.assertEqual([b.weight for b in blocks], [2, 6, 4])
blocks = list(
split_in_blocks([s1, s2, s3, s4, s5], hint=6,
weight=attrgetter('weight'),
key=attrgetter('typology')))
self.assertEqual(list(map(len, blocks)), [1, 1, 1, 2])
self.assertEqual([b.weight for b in blocks], [2, 4, 4, 2])
class AssertCloseTestCase(unittest.TestCase):
def test_different(self):
a = [1, 2]
b = [1, 2, 3]
with self.assertRaises(AssertionError): # different lenghts
assert_close(a, b)
with self.assertRaises(AssertionError): # different floats
assert_close([1, 2, 3.1], b)
with self.assertRaises(AssertionError): # None and float
assert_close([1, 2, None], b)
with self.assertRaises(AssertionError): # nested dicts
gmf1 = {'a': {'PGA': [0.1, 0.2], 'SA(0.1)': [0.3, 0.4]}}
gmf2 = {'a': {'PGA': [0.1, 0.2], 'SA(0.1)': [0.3, 0.41]}}
assert_close(gmf1, gmf2)
class C(object):
pass
c1 = C()
c2 = C()
c2.a = 1
with self.assertRaises(AssertionError): # different attributes
assert_close(c1, c2)
class DeprecatedTestCase(unittest.TestCase):
def test(self):
@deprecated(msg='Use dummy_new instead.')
def dummy():
pass
# check that a DeprecationWarning is printed
with mock.patch('warnings.warn') as warn:
dummy()
warning_msg, warning_type = warn.call_args[0]
self.assertIs(warning_type, DeprecationWarning)
self.assertIn(
'general_test.dummy has been deprecated. Use dummy_new instead.',
warning_msg)
# check that at the second call the warning is not printed
with mock.patch('warnings.warn') as warn:
dummy()
self.assertIsNone(warn.call_args)
class CachedPropertyTestCase(unittest.TestCase):
@cached_property
def one(self):
self.ncalls += 1
return 1
def test(self):
self.ncalls = 0
assert 'one' not in vars(self)
self.assertEqual(self.one, 1)
assert 'one' in vars(self)
self.assertEqual(self.__dict__['one'], 1)
self.assertEqual(self.ncalls, 1)
self.__dict__['one'] = 2
self.assertEqual(self.one, 2)
self.assertEqual(self.ncalls, 1)
def double(calc_id, val):
print((calc_id, val * 2))
class CompressTestCase(unittest.TestCase):
def test(self):
a = dict(a=numpy.array([9999.]))
self.assertEqual(a, decompress(compress(a)))
class RmsDiffTestCase(unittest.TestCase):
def test(self):
a = numpy.array([[.1, .2, .3],
[1.1, 1.2, 1.3]])
b = numpy.array([[.11, .21, .31],
[1.1, 1.21, 1.31]])
rms, index = rmsdiff(a, b)
print(rms, index)
class RandomChoiceTestCase(unittest.TestCase):
def test_advance(self):
chars = numpy.array(list('ABCDEFGHIJK'))
ch1 = random_choice(chars, 1_000_000, 0)
ch2 = random_choice(chars, 2_000_000, 1_000_000)
ch3 = random_choice(chars, 3_000_000, 3_000_000)
ch_tot = numpy.concatenate([ch1, ch2, ch3])
ch6 = random_choice(chars, 6_000_000, 0)
numpy.testing.assert_equal(ch_tot, ch6) |
347 | wrap mitmproxy | #!/usr/bin/env python
#
# Helper tool to enable/disable OS X proxy and wrap mitmproxy
#
# Get usage information with:
#
# mitmproxywrapper.py -h
#
import argparse
import contextlib
import os
import re
import signal
import socketserver
import subprocess
import sys
class Wrapper:
def __init__(self, port, use_mitmweb, extra_arguments=None):
self.port = port
self.use_mitmweb = use_mitmweb
self.extra_arguments = extra_arguments
def run_networksetup_command(self, *arguments):
return subprocess.check_output(
["sudo", "networksetup"] + list(arguments)
).decode()
def proxy_state_for_service(self, service):
state = self.run_networksetup_command("-getwebproxy", service).splitlines()
return dict([re.findall(r"([^:]+): (.*)", line)[0] for line in state])
def enable_proxy_for_service(self, service):
print(f"Enabling proxy on {service}...")
for subcommand in ["-setwebproxy", "-setsecurewebproxy"]:
self.run_networksetup_command(
subcommand, service, "127.0.0.1", str(self.port)
)
def disable_proxy_for_service(self, service):
print(f"Disabling proxy on {service}...")
for subcommand in ["-setwebproxystate", "-setsecurewebproxystate"]:
self.run_networksetup_command(subcommand, service, "Off")
def interface_name_to_service_name_map(self):
order = self.run_networksetup_command("-listnetworkserviceorder")
mapping = re.findall(
r"\(\d+\)\s(.*)$\n\(.*Device: (.+)\)$", order, re.MULTILINE
)
return {b: a for (a, b) in mapping}
def run_command_with_input(self, command, input):
popen = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
(stdout, stderr) = popen.communicate(input.encode())
return stdout.decode()
def primary_interace_name(self):
scutil_script = "get State:/Network/Global/IPv4\nd.show\n"
stdout = self.run_command_with_input("/usr/sbin/scutil", scutil_script)
(interface,) = re.findall(r"PrimaryInterface\s*:\s*(.+)", stdout)
return interface
def primary_service_name(self):
return self.interface_name_to_service_name_map()[self.primary_interace_name()]
def proxy_enabled_for_service(self, service):
return self.proxy_state_for_service(service)["Enabled"] == "Yes"
def toggle_proxy(self):
new_state = not self.proxy_enabled_for_service(self.primary_service_name())
for service_name in self.connected_service_names():
if self.proxy_enabled_for_service(service_name) and not new_state:
self.disable_proxy_for_service(service_name)
elif not self.proxy_enabled_for_service(service_name) and new_state:
self.enable_proxy_for_service(service_name)
def connected_service_names(self):
scutil_script = "list\n"
stdout = self.run_command_with_input("/usr/sbin/scutil", scutil_script)
service_ids = re.findall(r"State:/Network/Service/(.+)/IPv4", stdout)
service_names = []
for service_id in service_ids:
scutil_script = f"show Setup:/Network/Service/{service_id}\n"
stdout = self.run_command_with_input("/usr/sbin/scutil", scutil_script)
(service_name,) = re.findall(r"UserDefinedName\s*:\s*(.+)", stdout)
service_names.append(service_name)
return service_names
def METHOD_NAME(self):
with self.wrap_proxy():
cmd = ["mitmweb" if self.use_mitmweb else "mitmproxy", "-p", str(self.port)]
if self.extra_arguments:
cmd.extend(self.extra_arguments)
subprocess.check_call(cmd)
def wrap_honeyproxy(self):
with self.wrap_proxy():
popen = subprocess.Popen("honeyproxy.sh")
try:
popen.wait()
except KeyboardInterrupt:
popen.terminate()
@contextlib.contextmanager
def wrap_proxy(self):
connected_service_names = self.connected_service_names()
for service_name in connected_service_names:
if not self.proxy_enabled_for_service(service_name):
self.enable_proxy_for_service(service_name)
yield
for service_name in connected_service_names:
if self.proxy_enabled_for_service(service_name):
self.disable_proxy_for_service(service_name)
@classmethod
def ensure_superuser(cls):
if os.getuid() != 0:
print("Relaunching with sudo...")
os.execv("/usr/bin/sudo", ["/usr/bin/sudo"] + sys.argv)
@classmethod
def main(cls):
parser = argparse.ArgumentParser(
description="Helper tool for OS X proxy configuration and mitmproxy.",
epilog="Any additional arguments will be passed on unchanged to mitmproxy/mitmweb.",
)
parser.add_argument(
"-t",
"--toggle",
action="store_true",
help="just toggle the proxy configuration",
)
# parser.add_argument('--honeyproxy', action='store_true', help='run honeyproxy instead of mitmproxy')
parser.add_argument(
"-p",
"--port",
type=int,
help="override the default port of 8080",
default=8080,
)
parser.add_argument(
"-P",
"--port-random",
action="store_true",
help="choose a random unused port",
)
parser.add_argument(
"-w",
"--web",
action="store_true",
help="web interface: run mitmweb instead of mitmproxy",
)
args, extra_arguments = parser.parse_known_args()
port = args.port
# Allocate a random unused port, and hope no other process steals it before mitmproxy/mitmweb uses it.
# Passing the allocated socket to mitmproxy/mitmweb would be nicer of course.
if args.port_random:
with socketserver.TCPServer(("localhost", 0), None) as s:
port = s.server_address[1]
print(f"Using random port {port}...")
wrapper = cls(port=port, use_mitmweb=args.web, extra_arguments=extra_arguments)
def handler(signum, frame):
print("Cleaning up proxy settings...")
wrapper.toggle_proxy()
signal.signal(signal.SIGINT, handler)
if args.toggle:
wrapper.toggle_proxy()
# elif args.honeyproxy:
# wrapper.wrap_honeyproxy()
else:
wrapper.METHOD_NAME()
if __name__ == "__main__":
Wrapper.ensure_superuser()
Wrapper.main() |
348 | test creates new stats model if not | # coding: utf-8
#
# Copyright 2021 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for jobs.batch_jobs.user_stats_computation_jobs."""
from __future__ import annotations
import datetime
from core import feconf
from core.jobs import job_test_utils
from core.jobs.batch_jobs import user_stats_computation_jobs
from core.jobs.types import job_run_result
from core.platform import models
from typing import Final, Type
MYPY = False
if MYPY:
from mypy_imports import user_models
(user_models,) = models.Registry.import_models([models.Names.USER])
class CollectWeeklyDashboardStatsJobTests(job_test_utils.JobTestBase):
JOB_CLASS: Type[
user_stats_computation_jobs.CollectWeeklyDashboardStatsJob
] = user_stats_computation_jobs.CollectWeeklyDashboardStatsJob
VALID_USER_ID_1: Final = 'uid_%s' % (
'a' * feconf.USER_ID_RANDOM_PART_LENGTH
)
VALID_USER_ID_2: Final = 'uid_%s' % (
'b' * feconf.USER_ID_RANDOM_PART_LENGTH
)
def setUp(self) -> None:
super().setUp()
self.formated_datetime = datetime.datetime.utcnow().strftime(
feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT)
def test_empty_storage(self) -> None:
self.assert_job_output_is_empty()
def test_updates_existing_stats_model_when_no_values_are_provided(
self
) -> None:
user_settings_model = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID_1, email='[email protected]')
user_stats_model = self.create_model(
user_models.UserStatsModel,
id=self.VALID_USER_ID_1,
)
self.put_multi([user_settings_model, user_stats_model])
self.assert_job_output_is([
job_run_result.JobRunResult(stdout='OLD MODELS SUCCESS: 1')
])
new_user_stats_model = (
user_models.UserStatsModel.get(self.VALID_USER_ID_1))
# Ruling out the possibility of None for mypy type checking.
assert new_user_stats_model is not None
self.assertEqual(
new_user_stats_model.weekly_creator_stats_list,
[{
self.formated_datetime: {
'num_ratings': 0,
'average_ratings': None,
'total_plays': 0
}
}]
)
def test_fails_when_existing_stats_has_wrong_schema_version(self) -> None:
user_settings_model = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID_1, email='[email protected]')
user_stats_model = self.create_model(
user_models.UserStatsModel,
id=self.VALID_USER_ID_1,
schema_version=0
)
self.put_multi([user_settings_model, user_stats_model])
with self.assertRaisesRegex(
Exception,
'Sorry, we can only process v1-v%d dashboard stats schemas at '
'present.' % feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION
):
self.assert_job_output_is([
job_run_result.JobRunResult(stdout='OLD MODELS SUCCESS: 1')
])
new_user_stats_model = (
user_models.UserStatsModel.get(self.VALID_USER_ID_1))
# Ruling out the possibility of None for mypy type checking.
assert new_user_stats_model is not None
self.assertEqual(new_user_stats_model.weekly_creator_stats_list, [])
def test_updates_existing_stats_model_when_values_are_provided(
self
) -> None:
user_settings_model = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID_1, email='[email protected]')
user_stats_model = self.create_model(
user_models.UserStatsModel,
id=self.VALID_USER_ID_1,
num_ratings=10,
average_ratings=4.5,
total_plays=22,
)
self.put_multi([user_settings_model, user_stats_model])
self.assert_job_output_is([
job_run_result.JobRunResult(stdout='OLD MODELS SUCCESS: 1')
])
new_user_stats_model = (
user_models.UserStatsModel.get(self.VALID_USER_ID_1))
# Ruling out the possibility of None for mypy type checking.
assert new_user_stats_model is not None
self.assertEqual(
new_user_stats_model.weekly_creator_stats_list,
[{
self.formated_datetime: {
'num_ratings': 10,
'average_ratings': 4.5,
'total_plays': 22
}
}]
)
def METHOD_NAME(self) -> None:
user_settings_model = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID_1, email='[email protected]')
user_settings_model.update_timestamps()
user_settings_model.put()
self.assert_job_output_is([
job_run_result.JobRunResult(stdout='NEW MODELS SUCCESS: 1')
])
user_stats_model = user_models.UserStatsModel.get(self.VALID_USER_ID_1)
# Ruling out the possibility of None for mypy type checking.
assert user_stats_model is not None
self.assertEqual(
user_stats_model.weekly_creator_stats_list,
[{
self.formated_datetime: {
'num_ratings': 0,
'average_ratings': None,
'total_plays': 0
}
}]
)
def test_handles_multiple_models(self) -> None:
user_settings_model_1 = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID_1, email='[email protected]')
user_settings_model_2 = self.create_model(
user_models.UserSettingsModel,
id=self.VALID_USER_ID_2, email='[email protected]')
user_stats_model_1 = self.create_model(
user_models.UserStatsModel,
id=self.VALID_USER_ID_1)
self.put_multi([
user_settings_model_1, user_settings_model_2, user_stats_model_1])
self.assert_job_output_is([
job_run_result.JobRunResult(stdout='OLD MODELS SUCCESS: 1'),
job_run_result.JobRunResult(stdout='NEW MODELS SUCCESS: 1')
])
user_stats_model = user_models.UserStatsModel.get(self.VALID_USER_ID_2)
self.assertIsNotNone(user_stats_model) |
349 | create response | from boto.compat import http_client
from tests.compat import mock, unittest
class AWSMockServiceTestCase(unittest.TestCase):
"""Base class for mocking aws services."""
# This param is used by the unittest module to display a full
# diff when assert*Equal methods produce an error message.
maxDiff = None
connection_class = None
def setUp(self):
self.https_connection = mock.Mock(spec=http_client.HTTPSConnection)
self.https_connection.debuglevel = 0
self.https_connection_factory = (
mock.Mock(return_value=self.https_connection), ())
self.service_connection = self.create_service_connection(
https_connection_factory=self.https_connection_factory,
aws_access_key_id='aws_access_key_id',
aws_secret_access_key='aws_secret_access_key')
self.initialize_service_connection()
def initialize_service_connection(self):
self.actual_request = None
self.original_mexe = self.service_connection._mexe
self.service_connection._mexe = self._mexe_spy
self.proxy = None
self.use_proxy = False
def create_service_connection(self, **kwargs):
if self.connection_class is None:
raise ValueError("The connection_class class attribute must be "
"set to a non-None value.")
return self.connection_class(**kwargs)
def _mexe_spy(self, request, *args, **kwargs):
self.actual_request = request
return self.original_mexe(request, *args, **kwargs)
def METHOD_NAME(self, status_code, reason='', header=[], body=None):
if body is None:
body = self.default_body()
response = mock.Mock(spec=http_client.HTTPResponse)
response.status = status_code
response.read.return_value = body
response.reason = reason
response.getheaders.return_value = header
response.msg = dict(header)
def overwrite_header(arg, default=None):
header_dict = dict(header)
if arg in header_dict:
return header_dict[arg]
else:
return default
response.getheader.side_effect = overwrite_header
return response
def assert_request_parameters(self, params, ignore_params_values=None):
"""Verify the actual parameters sent to the service API."""
request_params = self.actual_request.params.copy()
if ignore_params_values is not None:
for param in ignore_params_values:
try:
del request_params[param]
except KeyError:
pass
self.assertDictEqual(request_params, params)
def set_http_response(self, status_code, reason='', header=[], body=None):
http_response = self.METHOD_NAME(status_code, reason, header, body)
self.https_connection.getresponse.return_value = http_response
def default_body(self):
return ''
class MockServiceWithConfigTestCase(AWSMockServiceTestCase):
def setUp(self):
super(MockServiceWithConfigTestCase, self).setUp()
self.environ = {}
self.config = {}
self.config_patch = mock.patch('boto.provider.config.get',
self.get_config)
self.has_config_patch = mock.patch('boto.provider.config.has_option',
self.has_config)
self.environ_patch = mock.patch('os.environ', self.environ)
self.config_patch.start()
self.has_config_patch.start()
self.environ_patch.start()
def tearDown(self):
self.config_patch.stop()
self.has_config_patch.stop()
self.environ_patch.stop()
def has_config(self, section_name, key):
try:
self.config[section_name][key]
return True
except KeyError:
return False
def get_config(self, section_name, key, default=None):
try:
return self.config[section_name][key]
except KeyError:
return None |
350 | submit ftp creation | #!/usr/local/CyberCP/bin/python
import os,sys
sys.path.append('/usr/local/CyberCP')
import django
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "CyberCP.settings")
django.setup()
from plogical import mysqlUtilities as sql
import subprocess
from plogical import CyberCPLogFileWriter as logging
import os
import shlex
import argparse
from websiteFunctions.models import Websites, ChildDomains
from loginSystem.models import Administrator
import pwd
import grp
import hashlib
from ftp.models import Users
from datetime import datetime
from plogical.processUtilities import ProcessUtilities
class FTPUtilities:
@staticmethod
def createNewFTPAccount(udb,upass,username,password,path):
try:
cmd = []
cmd.append("chown")
cmd.append("-R")
cmd.append("ftpuser:2001")
cmd.append(path)
res = subprocess.call(cmd)
if res == 1:
print("Permissions not changed.")
else:
print("User permissions setted.")
query = "INSERT INTO ftp_ftpuser (userid,passwd,homedir) VALUES ('" + username + "'" +","+"'"+password+"'"+","+"'"+path+"'"+");"
print(query)
sql.mysqlUtilities.SendQuery(udb,upass, "ftp", query)
except BaseException as msg:
logging.CyberCPLogFileWriter.writeToFile(
str(msg) + " [createNewFTPAccount]")
return 0
return 1
@staticmethod
def changePermissions(directory):
try:
command = "sudo chmod -R 775 " + directory
cmd = shlex.split(command)
res = subprocess.call(cmd)
if res == 1:
print("Permissions not changed.")
return 0
else:
print("User permissions setted.")
command = "sudo chown -R lscpd:cyberpanel " + directory
cmd = shlex.split(command)
res = subprocess.call(cmd)
if res == 1:
return 0
else:
return 1
except BaseException as msg:
logging.CyberCPLogFileWriter.writeToFile(
str(msg) + " [createNewFTPAccount]")
return 0
return 1
@staticmethod
def ftpFunctions(path,externalApp):
try:
command = 'mkdir %s' % (path)
ProcessUtilities.executioner(command, externalApp)
return 1,'None'
except BaseException as msg:
logging.CyberCPLogFileWriter.writeToFile(
str(msg) + " [ftpFunctions]")
return 0, str(msg)
@staticmethod
def METHOD_NAME(domainName, userName, password, path, owner, api = None):
try:
## need to get gid and uid
try:
website = ChildDomains.objects.get(domain=domainName)
externalApp = website.master.externalApp
except:
website = Websites.objects.get(domain=domainName)
externalApp = website.externalApp
uid = pwd.getpwnam(externalApp).pw_uid
gid = grp.getgrnam(externalApp).gr_gid
## gid , uid ends
path = path.lstrip("/")
if path != 'None':
path = "/home/" + domainName + "/" + path
## Security Check
if path.find("..") > -1:
raise BaseException("Specified path must be inside virtual host home!")
result = FTPUtilities.ftpFunctions(path, externalApp)
if result[0] == 1:
pass
else:
raise BaseException(result[1])
else:
path = "/home/" + domainName
if os.path.islink(path):
print("0, %s file is symlinked." % (path))
return 0
ProcessUtilities.decideDistro()
if ProcessUtilities.ubuntu22Check == 1:
from crypt import crypt, METHOD_SHA512
FTPPass = crypt(password, METHOD_SHA512)
else:
hash = hashlib.md5()
hash.update(password.encode('utf-8'))
FTPPass = hash.hexdigest()
admin = Administrator.objects.get(userName=owner)
if api == '0':
userName = admin.userName + "_" + userName
if website.package.ftpAccounts == 0:
user = Users(domain=website, user=userName, password=FTPPass, uid=uid, gid=gid,
dir=path,
quotasize=website.package.diskSpace,
status="1",
ulbandwidth=500000,
dlbandwidth=500000,
date=datetime.now())
user.save()
elif website.users_set.all().count() < website.package.ftpAccounts:
user = Users(domain=website, user=userName, password=FTPPass, uid=uid, gid=gid,
dir=path, quotasize=website.package.diskSpace,
status="1",
ulbandwidth=500000,
dlbandwidth=500000,
date=datetime.now())
user.save()
else:
raise BaseException("Exceeded maximum amount of FTP accounts allowed for the package.")
print("1,None")
return 1,'None'
except BaseException as msg:
logging.CyberCPLogFileWriter.writeToFile(str(msg) + " [submitFTPCreation]")
print("0,"+str(msg))
return 0, str(msg)
@staticmethod
def submitFTPDeletion(ftpUsername):
try:
ftp = Users.objects.get(user=ftpUsername)
ftp.delete()
return 1,'None'
except BaseException as msg:
return 0, str(msg)
@staticmethod
def changeFTPPassword(userName, password):
try:
ProcessUtilities.decideDistro()
if ProcessUtilities.ubuntu22Check == 1:
from crypt import crypt, METHOD_SHA512
FTPPass = crypt(password, METHOD_SHA512)
else:
hash = hashlib.md5()
hash.update(password.encode('utf-8'))
FTPPass = hash.hexdigest()
ftp = Users.objects.get(user=userName)
ftp.password = FTPPass
ftp.save()
return 1, None
except BaseException as msg:
return 0,str(msg)
@staticmethod
def getFTPRecords(virtualHostName):
try:
website = Websites.objects.get(domain=virtualHostName)
return website.users_set.all()
except:
## There does not exist a zone for this domain.
pass
def main():
parser = argparse.ArgumentParser(description='CyberPanel Installer')
parser.add_argument('function', help='Specific a function to call!')
parser.add_argument('--domainName', help='Domain to create FTP for!')
parser.add_argument('--userName', help='Username for FTP Account')
parser.add_argument('--password', help='Password for FTP Account')
parser.add_argument('--owner', help='FTP Account owner.')
parser.add_argument('--path', help='Path to ftp directory!')
parser.add_argument('--api', help='API Check!')
args = parser.parse_args()
if args.function == "submitFTPCreation":
FTPUtilities.METHOD_NAME(args.domainName,args.userName, args.password, args.path, args.owner, args.api)
if __name__ == "__main__":
main( |
351 | test sanitized render extensions | # -*- encoding: utf-8 -*-
import unittest
import textwrap
from isso import config
from isso.utils import html
class TestHTML(unittest.TestCase):
def test_markdown(self):
convert = html.Markdown(extensions=())
examples = [
("*Ohai!*", "<p><em>Ohai!</em></p>"),
("<em>Hi</em>", "<p><em>Hi</em></p>"),
("http://example.org/", '<p>http://example.org/</p>')]
for (input, expected) in examples:
self.assertEqual(convert(input), expected)
def test_markdown_extensions(self):
convert = html.Markdown(extensions=("strikethrough", "superscript"))
examples = [
("~~strike~~ through", "<p><del>strike</del> through</p>"),
("sup^(script)", "<p>sup<sup>script</sup></p>")]
for (input, expected) in examples:
self.assertEqual(convert(input), expected)
def test_github_flavoured_markdown(self):
convert = html.Markdown(extensions=("fenced-code", ))
# without lang
_in = textwrap.dedent("""\
Hello, World
```
#!/usr/bin/env python
print("Hello, World")""")
_out = textwrap.dedent("""\
<p>Hello, World</p>
<pre><code>#!/usr/bin/env python
print("Hello, World")
</code></pre>""")
self.assertEqual(convert(_in), _out)
# w/ lang
_in = textwrap.dedent("""\
Hello, World
```python
#!/usr/bin/env python
print("Hello, World")""")
_out = textwrap.dedent("""\
<p>Hello, World</p>
<pre><code class="python">#!/usr/bin/env python
print("Hello, World")
</code></pre>""")
def test_sanitizer(self):
sanitizer = html.Sanitizer(elements=[], attributes=[])
examples = [
('Look: <img src="..." />', 'Look: '),
('<a href="http://example.org/">Ha</a>',
['<a href="http://example.org/" rel="nofollow noopener">Ha</a>',
'<a rel="nofollow noopener" href="http://example.org/">Ha</a>']),
('<a href="sms:+1234567890">Ha</a>', '<a>Ha</a>'),
('<p style="visibility: hidden;">Test</p>', '<p>Test</p>'),
('<script>alert("Onoe")</script>', 'alert("Onoe")')]
for (input, expected) in examples:
if isinstance(expected, list):
self.assertIn(sanitizer.sanitize(input), expected)
else:
self.assertEqual(sanitizer.sanitize(input), expected)
def test_sanitizer_extensions(self):
sanitizer = html.Sanitizer(elements=["img"], attributes=["src"])
examples = [
('<img src="cat.gif" />', '<img src="cat.gif">'),
('<script src="doge.js"></script>', '')]
for (input, expected) in examples:
self.assertEqual(sanitizer.sanitize(input), expected)
def test_render(self):
conf = config.new({
"markup": {
"options": "autolink",
"flags": "",
"allowed-elements": "",
"allowed-attributes": ""
}
})
renderer = html.Markup(conf.section("markup")).render
self.assertIn(renderer("http://example.org/ and sms:+1234567890"),
['<p><a href="http://example.org/" rel="nofollow noopener">http://example.org/</a> and sms:+1234567890</p>',
'<p><a rel="nofollow noopener" href="http://example.org/">http://example.org/</a> and sms:+1234567890</p>'])
def METHOD_NAME(self):
"""Options should be normalized from both dashed-case or snake_case (legacy)"""
conf = config.new({
"markup": {
"options": "no_intra_emphasis", # Deliberately snake_case
"flags": "",
"allowed-elements": "",
"allowed-attributes": ""
}
})
renderer = html.Markup(conf.section("markup")).render
self.assertEqual(renderer("foo_bar_baz"), '<p>foo_bar_baz</p>')
conf.set("markup", "options", "no-intra-emphasis") # dashed-case
renderer = html.Markup(conf.section("markup")).render
self.assertEqual(renderer("foo_bar_baz"), '<p>foo_bar_baz</p>')
def test_code_blocks(self):
convert = html.Markdown(extensions=('fenced-code',))
examples = [
("```\nThis is a code-fence. <hello>\n```", "<p><pre><code>This is a code-fence. <hello>\n</code></pre></p>"),
("```c++\nThis is a code-fence. <hello>\n```", "<p><pre><code class=\"c++\">This is a code-fence. <hello>\n</code></pre></p>"),
(" This is a four-character indent. <hello>", "<p><pre><code>This is a four-character indent. <hello>\n</code></pre></p>")]
for (input, expected) in examples:
self.assertEqual(convert(input), expected) |
352 | test flag v | """
Name: r.what test
Purpose: Tests r.what and its flags/options.
Author: Sunveer Singh, Google Code-in 2018
Copyright: (C) 2018 by Sunveer Singh and the GRASS Development Team
Licence: This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
"""
from grass.gunittest.case import TestCase
from grass.gunittest.main import test
from grass.gunittest.gmodules import SimpleModule
class Testrr(TestCase):
input = "elevation"
coordinates = (633614.08, 224125.12, 632972.36, 225382.87)
points = "comm_colleges"
@classmethod
def setUpClass(cls):
cls.use_temp_region()
cls.runModule("g.region", raster=cls.input, flags="p")
@classmethod
def tearDownClass(cls):
cls.del_temp_region()
def test_flag_n(self):
"""Testing output with flag n"""
string = """1|145096.8591495|154534.264883875||*
2|616341.4371495|146049.750883875||*
3|410595.7191495|174301.828883875||*
4|734153.6871495|169168.437883875||*
"""
r_what = SimpleModule(
"r.what", map=self.input, coordinates=self.coordinates, flags="n"
)
r_what.outputs.stdout = string
self.assertLooksLike(reference=string, actual=r_what.outputs.stdout)
def test_flag_f(self):
"""Testing output with flag f"""
string = """5|706338.2501495|54889.417883875||*
6|758009.7501495|112019.898883875||*
7|754002.7501495|200902.234883875||*
8|704771.7501495|183364.484883875||*"""
r_what = SimpleModule(
"r.what", map=self.input, coordinates=self.coordinates, flags="f"
)
r_what.outputs.stdout = string
self.assertLooksLike(reference=string, actual=r_what.outputs.stdout)
def test_flag_r(self):
"""Testing output with flag r"""
string = """9|399187.0631495|220018.859883875||*
10|685098.9371495|33282.089883875||*
11|577750.8131495|257153.109883875||*
12|794095.5621495|199742.671883875||*"""
r_what = SimpleModule(
"r.what", map=self.input, coordinates=self.coordinates, flags="r"
)
r_what.outputs.stdout = string
self.assertLooksLike(reference=string, actual=r_what.outputs.stdout)
def test_flag_i(self):
"""Testing output with flag i"""
string = """13|634688.2501495|100629.616883875||*
14|287638.7811495|207582.624883875||*
15|366218.5321495|222940.625883875||*
16|385212.4371495|236593.109883875||*"""
r_what = SimpleModule(
"r.what", map=self.input, coordinates=self.coordinates, flags="i"
)
r_what.outputs.stdout = string
self.assertLooksLike(reference=string, actual=r_what.outputs.stdout)
def test_flag_c(self):
"""Testing output with flag c"""
string = """17|628137.4371495|63995.550883875||*
18|782600.5631495|152698.890883875||*
19|502813.9381495|235232.577883875||*
20|705922.6251495|136589.359883875||*"""
r_what = SimpleModule(
"r.what", map=self.input, coordinates=self.coordinates, flags="c"
)
r_what.outputs.stdout = string
self.assertLooksLike(reference=string, actual=r_what.outputs.stdout)
def METHOD_NAME(self):
"""Testing output with flag v"""
string = """21|620397.8131495|246847.640883875||*
22|738465.3751495|237233.983883875||*
23|708944.7501495|247632.296883875||*
24|526666.6871495|249780.312883875||*"""
r_what = SimpleModule(
"r.what",
map=self.input,
coordinates=self.coordinates,
flags="v",
points=self.points,
)
r_what.outputs.stdout = string
self.assertLooksLike(reference=string, actual=r_what.outputs.stdout)
if __name__ == "__main__":
from grass.gunittest.main import test
test() |
353 | release level | """
The MIT License (MIT)
Copyright (c) 2015-2021 Rapptz
Copyright (c) 2021-present Pycord Development
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import datetime
import re
import warnings
from importlib.metadata import PackageNotFoundError, version
from ._typed_dict import TypedDict
__all__ = ("__version__", "VersionInfo", "version_info")
from typing import Literal, NamedTuple
from .utils import deprecated
try:
__version__ = version("py-cord")
except PackageNotFoundError:
# Package is not installed
try:
from setuptools_scm import get_version # type: ignore[import]
__version__ = get_version()
except ImportError:
# setuptools_scm is not installed
__version__ = "0.0.0"
warnings.warn(
(
"Package is not installed, and setuptools_scm is not installed. "
f"As a fallback, {__name__}.__version__ will be set to {__version__}"
),
RuntimeWarning,
stacklevel=2,
)
class AdvancedVersionInfo(TypedDict):
serial: int
build: int | None
commit: str | None
date: datetime.date | None
class VersionInfo(NamedTuple):
major: int
minor: int
micro: int
releaselevel: Literal["alpha", "beta", "candidate", "final"]
# We can't set instance attributes on a NamedTuple, so we have to use a
# global variable to store the advanced version info.
@property
def advanced(self) -> AdvancedVersionInfo:
return _advanced
@advanced.setter
def advanced(self, value: object) -> None:
global _advanced
_advanced = value
@property
@deprecated("releaselevel", "2.4")
def METHOD_NAME(self) -> Literal["alpha", "beta", "candidate", "final"]:
return self.releaselevel
@property
@deprecated('.advanced["serial"]', "2.4")
def serial(self) -> int:
return self.advanced["serial"]
@property
@deprecated('.advanced["build"]', "2.4")
def build(self) -> int | None:
return self.advanced["build"]
@property
@deprecated('.advanced["commit"]', "2.4")
def commit(self) -> str | None:
return self.advanced["commit"]
@property
@deprecated('.advanced["date"]', "2.4")
def date(self) -> datetime.date | None:
return self.advanced["date"]
version_regex = re.compile(
r"^(?P<major>\d+)(?:\.(?P<minor>\d+))?(?:\.(?P<patch>\d+))?"
r"(?:(?P<level>rc|a|b)(?P<serial>\d+))?"
r"(?:\.dev(?P<build>\d+))?"
r"(?:\+(?:(?:g(?P<commit>[a-fA-F0-9]{4,40})(?:\.d(?P<date>\d{4}\d{2}\d{2})|))|d(?P<date1>\d{4}\d{2}\d{2})))?$"
)
version_match = version_regex.match(__version__)
if version_match is None:
raise RuntimeError(f"Invalid version string: {__version__}")
raw_info = version_match.groupdict()
level_info: Literal["alpha", "beta", "candidate", "final"]
if raw_info["level"] == "a":
level_info = "alpha"
elif raw_info["level"] == "b":
level_info = "beta"
elif raw_info["level"] == "rc":
level_info = "candidate"
elif raw_info["level"] is None:
level_info = "final"
else:
raise RuntimeError("Invalid release level")
if (raw_date := raw_info["date"] or raw_info["date1"]) is not None:
date_info = datetime.date(
int(raw_date[:4]),
int(raw_date[4:6]),
int(raw_date[6:]),
)
else:
date_info = None
version_info: VersionInfo = VersionInfo(
major=int(raw_info["major"] or 0) or None,
minor=int(raw_info["minor"] or 0) or None,
micro=int(raw_info["patch"] or 0) or None,
releaselevel=level_info,
)
_advanced = AdvancedVersionInfo(
serial=raw_info["serial"],
build=int(raw_info["build"] or 0) or None,
commit=raw_info["commit"],
date=date_info,
) |
354 | clear all alru caches | # The MIT License
#
# Copyright (c) 2018 aio-libs team https://github.com/aio-libs/
# Copyright (c) 2017 Ocean S. A. https://ocean.io/
# Copyright (c) 2016-2017 WikiBusiness Corporation http://wikibusiness.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import asyncio
import os
import weakref
from collections import OrderedDict
from functools import _CacheInfo, _make_key, partial, wraps
__version__ = "1.0.2"
__all__ = ("alru_cache", "clear_all_alru_caches")
_is_ci = (os.environ.get("CI") or "0").lower() in ("1", "true")
_all_wrapped = weakref.WeakSet()
def METHOD_NAME():
for wrapped in _all_wrapped:
wrapped.cache_clear()
def unpartial(fn):
while hasattr(fn, "func"):
fn = fn.func
return fn
def _done_callback(fut, task):
if task.cancelled():
fut.cancel()
return
exc = task.exception()
if exc is not None:
fut.set_exception(exc)
return
fut.set_result(task.result())
def _cache_invalidate(wrapped, typed, *args, **kwargs):
key = _make_key(args, kwargs, typed)
exists = key in wrapped._cache
if exists:
wrapped._cache.pop(key)
return exists
def _cache_clear(wrapped):
wrapped.hits = wrapped.misses = 0
wrapped._cache = OrderedDict()
wrapped.tasks = set()
def _open(wrapped):
if not wrapped.closed:
raise RuntimeError("alru_cache is not closed")
was_closed = (
wrapped.hits == wrapped.misses == len(wrapped.tasks) == len(wrapped._cache) == 0
)
if not was_closed:
raise RuntimeError("alru_cache was not closed correctly")
wrapped.closed = False
def _close(wrapped, *, cancel=False, return_exceptions=True):
if wrapped.closed:
raise RuntimeError("alru_cache is closed")
wrapped.closed = True
if cancel:
for task in wrapped.tasks:
if not task.done(): # not sure is it possible
task.cancel()
return _wait_closed(wrapped, return_exceptions=return_exceptions)
async def _wait_closed(wrapped, *, return_exceptions):
wait_closed = asyncio.gather(*wrapped.tasks, return_exceptions=return_exceptions)
wait_closed.add_done_callback(partial(_close_waited, wrapped))
ret = await wait_closed
# hack to get _close_waited callback to be executed
await asyncio.sleep(0)
return ret
def _close_waited(wrapped, _):
wrapped.cache_clear()
def _cache_info(wrapped, maxsize):
return _CacheInfo(
wrapped.hits,
wrapped.misses,
maxsize,
len(wrapped._cache),
)
def __cache_touch(wrapped, key):
try:
wrapped._cache.move_to_end(key)
except KeyError: # not sure is it possible
pass
def _cache_hit(wrapped, key):
wrapped.hits += 1
__cache_touch(wrapped, key)
def _cache_miss(wrapped, key):
wrapped.misses += 1
__cache_touch(wrapped, key)
def alru_cache(
fn=None,
maxsize=128,
typed=False,
*,
cache_exceptions=True,
):
def wrapper(fn):
_origin = unpartial(fn)
if not asyncio.iscoroutinefunction(_origin):
raise RuntimeError("Coroutine function is required, got {}".format(fn))
# functools.partialmethod support
if hasattr(fn, "_make_unbound_method"):
fn = fn._make_unbound_method()
@wraps(fn)
async def wrapped(*fn_args, **fn_kwargs):
if wrapped.closed:
raise RuntimeError("alru_cache is closed for {}".format(wrapped))
loop = asyncio.get_event_loop()
key = _make_key(fn_args, fn_kwargs, typed)
fut = wrapped._cache.get(key)
if fut is not None:
if not fut.done():
_cache_hit(wrapped, key)
return await asyncio.shield(fut)
exc = fut._exception
if exc is None or cache_exceptions:
_cache_hit(wrapped, key)
return fut.result()
# exception here and cache_exceptions == False
wrapped._cache.pop(key)
fut = loop.create_future()
task = loop.create_task(fn(*fn_args, **fn_kwargs))
task.add_done_callback(partial(_done_callback, fut))
wrapped.tasks.add(task)
task.add_done_callback(wrapped.tasks.remove)
wrapped._cache[key] = fut
if maxsize is not None and len(wrapped._cache) > maxsize:
wrapped._cache.popitem(last=False)
_cache_miss(wrapped, key)
return await asyncio.shield(fut)
_cache_clear(wrapped)
wrapped._origin = _origin
wrapped.closed = False
wrapped.cache_info = partial(_cache_info, wrapped, maxsize)
wrapped.cache_clear = partial(_cache_clear, wrapped)
wrapped.invalidate = partial(_cache_invalidate, wrapped, typed)
wrapped.close = partial(_close, wrapped)
wrapped.open = partial(_open, wrapped)
if _is_ci:
_all_wrapped.add(wrapped)
return wrapped
if fn is None:
return wrapper
if callable(fn) or hasattr(fn, "_make_unbound_method"):
return wrapper(fn)
raise NotImplementedError("{} decorating is not supported".format(fn)) |
355 | compute chunksize | # Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""Contains utility functions for frame partitioning."""
import re
from typing import Hashable, List
import contextlib
import numpy as np
import pandas
from modin.config import MinPartitionSize, NPartitions
from math import ceil
@contextlib.contextmanager
def _nullcontext(dummy_value=None): # noqa: PR01
"""
Act as a replacement for contextlib.nullcontext missing in older Python.
Notes
-----
contextlib.nullcontext is only available from Python 3.7.
"""
yield dummy_value
def METHOD_NAME(axis_len, num_splits, min_block_size=None):
"""
Compute the number of elements (rows/columns) to include in each partition.
Chunksize is defined the same for both axes.
Parameters
----------
axis_len : int
Element count in an axis.
num_splits : int
The number of splits.
min_block_size : int, optional
Minimum number of rows/columns in a single split.
If not specified, the value is assumed equal to ``MinPartitionSize``.
Returns
-------
int
Integer number of rows/columns to split the DataFrame will be returned.
"""
if min_block_size is None:
min_block_size = MinPartitionSize.get()
assert min_block_size > 0, "`min_block_size` should be > 0"
chunksize = axis_len // num_splits
if axis_len % num_splits:
chunksize += 1
# chunksize shouldn't be less than `min_block_size` to avoid a
# large amount of small partitions.
return max(chunksize, min_block_size)
def split_result_of_axis_func_pandas(axis, num_splits, result, length_list=None):
"""
Split pandas DataFrame evenly based on the provided number of splits.
Parameters
----------
axis : {0, 1}
Axis to split across. 0 means index axis when 1 means column axis.
num_splits : int
Number of splits to separate the DataFrame into.
This parameter is ignored if `length_list` is specified.
result : pandas.DataFrame
DataFrame to split.
length_list : list of ints, optional
List of slice lengths to split DataFrame into. This is used to
return the DataFrame to its original partitioning schema.
Returns
-------
list of pandas.DataFrames
Splitted dataframe represented by list of frames.
"""
if num_splits == 1:
return [result]
if length_list is None:
length_list = get_length_list(result.shape[axis], num_splits)
# Inserting the first "zero" to properly compute cumsum indexing slices
length_list = np.insert(length_list, obj=0, values=[0])
sums = np.cumsum(length_list)
axis = 0 if isinstance(result, pandas.Series) else axis
# We do this to restore block partitioning
if axis == 0:
chunked = [result.iloc[sums[i] : sums[i + 1]] for i in range(len(sums) - 1)]
else:
chunked = [result.iloc[:, sums[i] : sums[i + 1]] for i in range(len(sums) - 1)]
return [
# Sliced MultiIndex still stores all encoded values of the original index, explicitly
# asking it to drop unused values in order to save memory.
chunk.set_axis(chunk.axes[axis].remove_unused_levels(), axis=axis, copy=False)
if isinstance(chunk.axes[axis], pandas.MultiIndex)
else chunk
for chunk in chunked
]
def get_length_list(axis_len: int, num_splits: int) -> list:
"""
Compute partitions lengths along the axis with the specified number of splits.
Parameters
----------
axis_len : int
Element count in an axis.
num_splits : int
Number of splits along the axis.
Returns
-------
list of ints
List of integer lengths of partitions.
"""
chunksize = METHOD_NAME(axis_len, num_splits)
return [
chunksize
if (i + 1) * chunksize <= axis_len
else max(0, axis_len - i * chunksize)
for i in range(num_splits)
]
def length_fn_pandas(df):
"""
Compute number of rows of passed `pandas.DataFrame`.
Parameters
----------
df : pandas.DataFrame
Returns
-------
int
"""
assert isinstance(df, pandas.DataFrame)
return len(df) if len(df) > 0 else 0
def width_fn_pandas(df):
"""
Compute number of columns of passed `pandas.DataFrame`.
Parameters
----------
df : pandas.DataFrame
Returns
-------
int
"""
assert isinstance(df, pandas.DataFrame)
return len(df.columns) if len(df.columns) > 0 else 0
def get_group_names(regex: "re.Pattern") -> "List[Hashable]":
"""
Get named groups from compiled regex.
Unnamed groups are numbered.
Parameters
----------
regex : compiled regex
Returns
-------
list of column labels
"""
names = {v: k for k, v in regex.groupindex.items()}
return [names.get(1 + i, i) for i in range(regex.groups)]
def merge_partitioning(left, right, axis=1):
"""
Get the number of splits across the `axis` for the two dataframes being concatenated.
Parameters
----------
left : PandasDataframe
right : PandasDataframe
axis : int, default: 1
Returns
-------
int
"""
lshape = left._row_lengths_cache if axis == 0 else left._column_widths_cache
rshape = right._row_lengths_cache if axis == 0 else right._column_widths_cache
if lshape is not None and rshape is not None:
res_shape = sum(lshape) + sum(rshape)
chunk_size = METHOD_NAME(axis_len=res_shape, num_splits=NPartitions.get())
return ceil(res_shape / chunk_size)
else:
lsplits = left._partitions.shape[axis]
rsplits = right._partitions.shape[axis]
return min(lsplits + rsplits, NPartitions.get()) |
356 | gram matrix | r"""
Weyl Lie Conformal Algebra
Given a commutative ring `R`, a free `R`-module `M` and a
non-degenerate, skew-symmetric, bilinear pairing
`\langle \cdot,\cdot\rangle: M \otimes_R M \rightarrow R`. The *Weyl*
Lie conformal algebra associated to this datum is the free
`R[T]`-module generated by `M` plus a central vector `K`. The
non-vanishing `\lambda`-brackets are given by:
.. MATH::
[v_\lambda w] = \langle v, w\rangle K.
This is not an H-graded Lie conformal algebra. The choice of a
Lagrangian decomposition `M = L \oplus L^*` determines an H-graded
structure. For this H-graded Lie conformal algebra see the
:mod:`Bosonic Ghosts Lie conformal algebra<sage.algebras.\
lie_conformal_algebras.bosonic_ghosts_lie_conformal_algebra>`
AUTHORS:
- Reimundo Heluani (2019-08-09): Initial implementation.
"""
# *****************************************************************************
# Copyright (C) 2019 Reimundo Heluani <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
from .lie_conformal_algebra_with_structure_coefs import \
LieConformalAlgebraWithStructureCoefficients
from sage.matrix.special import identity_matrix
from sage.structure.indexed_generators import standardize_names_index_set
class WeylLieConformalAlgebra(LieConformalAlgebraWithStructureCoefficients):
r"""
The Weyl Lie conformal algebra.
INPUT:
- ``R`` -- a commutative ring; the base ring of this Lie
conformal algebra.
- ``ngens``: an even positive Integer (default `2`); The number
of non-central generators of this Lie conformal algebra.
- ``gram_matrix``: a matrix (default: ``None``); A non-singular
skew-symmetric square matrix with coefficients in `R`.
- ``names`` -- a list or tuple of ``str``; alternative names
for the generators
- ``index_set`` -- an enumerated set; alternative indexing set
for the generators
OUTPUT:
The Weyl Lie conformal algebra with generators
`\alpha_i`, `i=1,...,ngens` and `\lambda`-brackets
.. MATH::
[{\alpha_i}_{\lambda} \alpha_j] = M_{ij} K,
where `M` is the ``gram_matrix`` above.
.. NOTE::
The returned Lie conformal algebra is not `H`-graded. For
a related `H`-graded Lie conformal algebra see
:class:`BosonicGhostsLieConformalAlgebra<sage.algebras.\
lie_conformal_algebras.bosonic_ghosts_lie_conformal_algebra\
.BosonicGhostsLieConformalAlgebra>`.
EXAMPLES::
sage: lie_conformal_algebras.Weyl(QQ)
The Weyl Lie conformal algebra with generators (alpha0, alpha1, K) over Rational Field
sage: R = lie_conformal_algebras.Weyl(QQbar, gram_matrix=Matrix(QQ,[[0,1],[-1,0]]), names = ('a','b'))
sage: R.inject_variables()
Defining a, b, K
sage: a.bracket(b)
{0: K}
sage: b.bracket(a)
{0: -K}
sage: R = lie_conformal_algebras.Weyl(QQbar, ngens=4)
sage: R.gram_matrix()
[ 0 0| 1 0]
[ 0 0| 0 1]
[-----+-----]
[-1 0| 0 0]
[ 0 -1| 0 0]
sage: R.inject_variables()
Defining alpha0, alpha1, alpha2, alpha3, K
sage: alpha0.bracket(alpha2)
{0: K}
sage: R = lie_conformal_algebras.Weyl(QQ); R.category()
Category of finitely generated Lie conformal algebras with basis over Rational Field
sage: R in LieConformalAlgebras(QQ).Graded()
False
sage: R.inject_variables()
Defining alpha0, alpha1, K
sage: alpha0.degree()
Traceback (most recent call last):
...
AttributeError: 'WeylLieConformalAlgebra_with_category.element_class' object has no attribute 'degree'
TESTS::
sage: lie_conformal_algebras.Weyl(ZZ, gram_matrix=identity_matrix(ZZ,3))
Traceback (most recent call last):
...
ValueError: The gram_matrix should be a non degenerate skew-symmetric 3 x 3 matrix, got [1 0 0]
[0 1 0]
[0 0 1]
"""
def __init__(self, R, ngens=None, METHOD_NAME=None, names=None,
index_set=None):
"""
Initialize self.
TESTS::
sage: V = lie_conformal_algebras.Weyl(QQ)
sage: TestSuite(V).run()
"""
from sage.matrix.matrix_space import MatrixSpace
if ngens:
from sage.rings.integer_ring import ZZ
if not (ngens in ZZ and not ngens % 2):
raise ValueError("ngens needs to be an even positive Integer, "
f"got {ngens}")
if METHOD_NAME is not None:
if ngens is None:
ngens = METHOD_NAME.dimensions()[0]
try:
assert (METHOD_NAME in MatrixSpace(R, ngens, ngens))
except AssertionError:
raise ValueError("The gram_matrix should be a skew-symmetric "
"{0} x {0} matrix, got {1}".format(ngens, METHOD_NAME))
if (not METHOD_NAME.is_skew_symmetric() or
METHOD_NAME.is_singular()):
raise ValueError("The gram_matrix should be a non degenerate "
"skew-symmetric {0} x {0} matrix, got {1}"
.format(ngens, METHOD_NAME))
elif METHOD_NAME is None:
if ngens is None:
ngens = 2
A = identity_matrix(R, ngens // 2)
from sage.matrix.special import block_matrix
METHOD_NAME = block_matrix([[R.zero(), A], [-A, R.zero()]])
latex_names = None
if (names is None) and (index_set is None):
names = 'alpha'
latex_names = tuple(r'\alpha_{%d}' % i
for i in range(ngens)) + ('K',)
names, index_set = standardize_names_index_set(names=names,
index_set=index_set,
ngens=ngens)
weyldict = {(i, j): {0: {('K', 0): METHOD_NAME[index_set.rank(i),
index_set.rank(j)]}}
for i in index_set for j in index_set}
super().__init__(R, weyldict, names=names,
latex_names=latex_names,
index_set=index_set,
central_elements=('K',))
self._gram_matrix = METHOD_NAME
def _repr_(self):
"""
The name of this Lie conformal algebra.
EXAMPLES::
sage: R = lie_conformal_algebras.Weyl(ZZ); R
The Weyl Lie conformal algebra with generators (alpha0, alpha1, K) over Integer Ring
"""
return "The Weyl Lie conformal algebra with generators {} over {}"\
.format(self.gens(), self.base_ring())
def METHOD_NAME(self):
r"""
The Gram matrix that specifies the `\lambda`-brackets of the
generators.
EXAMPLES::
sage: R = lie_conformal_algebras.Weyl(QQbar, ngens=4)
sage: R.gram_matrix()
[ 0 0| 1 0]
[ 0 0| 0 1]
[-----+-----]
[-1 0| 0 0]
[ 0 -1| 0 0]
"""
return self._gram_matrix |
357 | make graph match | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple graph matching functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import string_types
from tensorflow.contrib.graph_editor import select
from tensorflow.python.framework import ops as tf_ops
__all__ = [
"op_type",
"OpMatcher",
]
def METHOD_NAME(graph_match):
"""Convert to a OpMatcher instance."""
if graph_match is None:
return None
if not isinstance(graph_match, OpMatcher):
graph_match = OpMatcher(graph_match)
return graph_match
def op_type(op_types, op=None):
"""Check if an op is of the given type.
Args:
op_types: tuple of strings containing the types to check against.
For instance: ("Add", "Const")
op: the operation to check (or None).
Returns:
if op is not None, return True if the op is of the correct type.
if op is None, return a lambda function which does the type checking.
"""
if isinstance(op_types, string_types):
op_types = (op_types)
if op is None:
return lambda op: op.node_def.op in op_types
else:
return op.node_def.op in op_types
class OpMatcher(object):
"""Graph match class."""
def __init__(self, positive_filter):
"""Graph match constructor."""
self.positive_filters = []
self.input_op_matches = None
self.control_input_op_matches = None
self.output_op_matches = None
positive_filter = self._finalize_positive_filter(positive_filter)
self.positive_filters.append(positive_filter)
def _finalize_positive_filter(self, elem):
"""Convert to a filter function."""
if select.can_be_regex(elem):
regex_ = select.make_regex(elem)
return lambda op, regex=regex_: regex.search(op.name) is not None
elif isinstance(elem, tf_ops.Operation):
return lambda op, match_op=elem: op is match_op
elif callable(elem):
return elem
elif elem is True:
return lambda op: True
else:
raise ValueError("Cannot finalize the positive filter: {}".format(elem))
def __call__(self, op):
"""Evaluate if the op matches or not."""
if not isinstance(op, tf_ops.Operation):
raise TypeError("Expect tf.Operation, got: {}".format(type(op)))
for positive_filter in self.positive_filters:
if not positive_filter(op):
return False
if self.input_op_matches is not None:
if len(op.inputs) != len(self.input_op_matches):
return False
for input_t, input_op_match in zip(op.inputs, self.input_op_matches):
if input_op_match is None:
continue
if not input_op_match(input_t.op):
return False
if self.control_input_op_matches is not None:
if len(op.control_inputs) != len(self.control_input_op_matches):
return False
for cinput_op, cinput_op_match in zip(op.control_inputs,
self.control_input_op_matches):
if cinput_op_match is None:
continue
if not cinput_op_match(cinput_op):
return False
if self.output_op_matches is not None:
if len(op.outputs) != len(self.output_op_matches):
return False
for output_t, output_op_matches in zip(op.outputs,
self.output_op_matches):
if output_op_matches is None:
continue
if len(output_t.consumers()) != len(output_op_matches):
return False
for consumer_op, consumer_op_match in zip(output_t.consumers(),
output_op_matches):
if consumer_op_match is None:
continue
if not consumer_op_match(consumer_op):
return False
return True
def input_ops(self, *args):
"""Add input matches."""
if self.input_op_matches is not None:
raise ValueError("input_op_matches is already set.")
self.input_op_matches = []
for input_match in args:
self.input_op_matches.append(METHOD_NAME(input_match))
return self
def control_input_ops(self, *args):
"""Add input matches."""
if self.control_input_op_matches is not None:
raise ValueError("control_input_op_matches is already set.")
self.control_input_op_matches = []
for input_match in args:
self.control_input_op_matches.append(METHOD_NAME(input_match))
return self
def output_ops(self, *args):
"""Add output matches."""
if self.output_op_matches is not None:
raise ValueError("output_op_matches is already set.")
self.output_op_matches = []
for consumer_op_matches in args:
if consumer_op_matches is None:
self.output_op_matches.append(None)
if not isinstance(consumer_op_matches, list):
consumer_op_matches = [consumer_op_matches]
consumer_op_matches = [METHOD_NAME(consumer_op_match)
for consumer_op_match in consumer_op_matches]
self.output_op_matches.append(consumer_op_matches)
return self |
358 | test confusion matrix multiclass subset labels | #
# Copyright (c) 2019-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import dask.array as da
from cuml.dask.metrics import confusion_matrix
from cuml.testing.utils import stress_param, generate_random_labels
from sklearn.metrics import confusion_matrix as sk_confusion_matrix
import pytest
from cuml.internals.safe_imports import gpu_only_import
from itertools import chain, permutations
from cuml.internals.safe_imports import cpu_only_import
np = cpu_only_import("numpy")
cp = gpu_only_import("cupy")
@pytest.mark.mg
@pytest.mark.parametrize("chunks", ["auto", 2, 1])
def test_confusion_matrix(client, chunks):
y_true = da.from_array(cp.array([2, 0, 2, 2, 0, 1]), chunks=chunks)
y_pred = da.from_array(cp.array([0, 0, 2, 2, 0, 2]), chunks=chunks)
cm = confusion_matrix(y_true, y_pred)
ref = cp.array([[2, 0, 0], [0, 0, 1], [1, 0, 2]])
cp.testing.assert_array_equal(cm, ref)
@pytest.mark.mg
@pytest.mark.parametrize("chunks", ["auto", 2, 1])
def test_confusion_matrix_binary(client, chunks):
y_true = da.from_array(cp.array([0, 1, 0, 1]), chunks=chunks)
y_pred = da.from_array(cp.array([1, 1, 1, 0]), chunks=chunks)
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
ref = cp.array([0, 2, 1, 1])
cp.testing.assert_array_equal(ref, cp.array([tn, fp, fn, tp]))
@pytest.mark.mg
@pytest.mark.parametrize("n_samples", [50, 3000, stress_param(500000)])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("problem_type", ["binary", "multiclass"])
def test_confusion_matrix_random(n_samples, dtype, problem_type, client):
upper_range = 2 if problem_type == "binary" else 1000
y_true, y_pred, np_y_true, np_y_pred = generate_random_labels(
lambda rng: rng.randint(0, upper_range, n_samples).astype(dtype),
as_cupy=True,
)
y_true, y_pred = da.from_array(y_true), da.from_array(y_pred)
cm = confusion_matrix(y_true, y_pred)
ref = sk_confusion_matrix(np_y_true, np_y_pred)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
@pytest.mark.mg
@pytest.mark.parametrize(
"normalize, expected_results",
[
("true", 0.333333333),
("pred", 0.333333333),
("all", 0.1111111111),
(None, 2),
],
)
def test_confusion_matrix_normalize(normalize, expected_results, client):
y_test = da.from_array(cp.array([0, 1, 2] * 6))
y_pred = da.from_array(cp.array(list(chain(*permutations([0, 1, 2])))))
cm = confusion_matrix(y_test, y_pred, normalize=normalize)
cp.testing.assert_allclose(cm, cp.array(expected_results))
@pytest.mark.mg
@pytest.mark.parametrize("labels", [(0, 1), (2, 1), (2, 1, 4, 7), (2, 20)])
def METHOD_NAME(labels, client):
y_true, y_pred, np_y_true, np_y_pred = generate_random_labels(
lambda rng: rng.randint(0, 3, 10).astype(np.int32), as_cupy=True
)
y_true, y_pred = da.from_array(y_true), da.from_array(y_pred)
ref = sk_confusion_matrix(np_y_true, np_y_pred, labels=labels)
labels = cp.array(labels, dtype=np.int32)
cm = confusion_matrix(y_true, y_pred, labels=labels)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4)
@pytest.mark.mg
@pytest.mark.parametrize("n_samples", [50, 3000, stress_param(500000)])
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("weights_dtype", ["int", "float"])
def test_confusion_matrix_random_weights(
n_samples, dtype, weights_dtype, client
):
y_true, y_pred, np_y_true, np_y_pred = generate_random_labels(
lambda rng: rng.randint(0, 10, n_samples).astype(dtype), as_cupy=True
)
y_true, y_pred = da.from_array(y_true), da.from_array(y_pred)
if weights_dtype == "int":
sample_weight = np.random.RandomState(0).randint(0, 10, n_samples)
else:
sample_weight = np.random.RandomState(0).rand(n_samples)
ref = sk_confusion_matrix(
np_y_true, np_y_pred, sample_weight=sample_weight
)
sample_weight = cp.array(sample_weight)
sample_weight = da.from_array(sample_weight)
cm = confusion_matrix(y_true, y_pred, sample_weight=sample_weight)
cp.testing.assert_array_almost_equal(ref, cm, decimal=4) |
359 | test uniqueness data check warnings | import numpy as np
import pandas as pd
import pytest
from evalml.data_checks import (
DataCheckActionCode,
DataCheckActionOption,
DataCheckMessageCode,
DataCheckWarning,
UniquenessDataCheck,
)
uniqueness_data_check_name = UniquenessDataCheck.name
def test_uniqueness_data_check_init():
uniqueness_check = UniquenessDataCheck("regression")
assert uniqueness_check.threshold == 0.50
uniqueness_check = UniquenessDataCheck("regression", threshold=0.0)
assert uniqueness_check.threshold == 0
uniqueness_check = UniquenessDataCheck("regression", threshold=0.5)
assert uniqueness_check.threshold == 0.5
uniqueness_check = UniquenessDataCheck("regression", threshold=1.0)
assert uniqueness_check.threshold == 1.0
with pytest.raises(
ValueError,
match="threshold must be a float between 0 and 1, inclusive.",
):
UniquenessDataCheck("regression", threshold=-0.1)
with pytest.raises(
ValueError,
match="threshold must be a float between 0 and 1, inclusive.",
):
UniquenessDataCheck("regression", threshold=1.1)
def test_uniqueness_data_check_uniqueness_score():
uniqueness_score = UniquenessDataCheck.uniqueness_score
# Test uniqueness for a simple series.
# [0,1,2,0,1,2,0,1,2,0]
data = pd.Series([x % 3 for x in range(10)])
scores = uniqueness_score(data)
ans = 0.66
assert scores == ans
# Test uniqueness for the same series, repeated. Should be the score.
# [0,1,2,0,1,2,0,1,2,0,0,1,2,0,1,2,0,1,2,0]
data = pd.Series([x % 3 for x in range(10)] * 2)
scores = uniqueness_score(data)
ans = 0.66
assert scores == ans
# Test uniqueness for a simple series with NaN.
# [0,1,2,0,1,2,0,1,2,0]
data = pd.Series([x % 3 for x in range(10)] + [np.nan])
scores = uniqueness_score(data)
ans = 0.66
assert scores == ans
# Test uniqueness in each column of a DataFrame
data = pd.DataFrame(
{
"most_unique": [float(x) for x in range(10)], # [0,1,2,3,4,5,6,7,8,9]
"more_unique": [x % 5 for x in range(10)], # [0,1,2,3,4,0,1,2,3,4]
"unique": [x % 3 for x in range(10)], # [0,1,2,0,1,2,0,1,2,0]
"less_unique": [x % 2 for x in range(10)], # [0,1,0,1,0,1,0,1,0,1]
"not_unique": [float(1) for x in range(10)],
},
) # [1,1,1,1,1,1,1,1,1,1]
scores = data.apply(uniqueness_score)
ans = pd.Series(
{
"most_unique": 0.90,
"more_unique": 0.80,
"unique": 0.66,
"less_unique": 0.50,
"not_unique": 0.00,
},
)
assert scores.round(7).equals(ans)
def METHOD_NAME():
data = pd.DataFrame(
{
"regression_unique_enough": [float(x) for x in range(100)],
"regression_not_unique_enough": [float(1) for x in range(100)],
},
)
uniqueness_check = UniquenessDataCheck(problem_type="regression")
assert uniqueness_check.validate(data) == [
DataCheckWarning(
message="Input columns 'regression_not_unique_enough' for regression problem type are not unique enough.",
data_check_name=uniqueness_data_check_name,
message_code=DataCheckMessageCode.NOT_UNIQUE_ENOUGH,
details={
"columns": ["regression_not_unique_enough"],
"uniqueness_score": {"regression_not_unique_enough": 0.0},
},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=uniqueness_data_check_name,
metadata={"columns": ["regression_not_unique_enough"]},
),
],
).to_dict(),
]
data = pd.DataFrame(
{
"multiclass_too_unique": ["Cats", "Are", "Absolutely", "The", "Best"] * 20,
"multiclass_not_too_unique": ["Cats", "Cats", "Best", "Best", "Best"] * 20,
},
)
uniqueness_check = UniquenessDataCheck(problem_type="multiclass")
assert uniqueness_check.validate(data) == [
DataCheckWarning(
message="Input columns 'multiclass_too_unique' for multiclass problem type are too unique.",
data_check_name=uniqueness_data_check_name,
message_code=DataCheckMessageCode.TOO_UNIQUE,
details={
"columns": ["multiclass_too_unique"],
"uniqueness_score": {"multiclass_too_unique": 0.7999999999999999},
},
action_options=[
DataCheckActionOption(
DataCheckActionCode.DROP_COL,
data_check_name=uniqueness_data_check_name,
metadata={"columns": ["multiclass_too_unique"]},
),
],
).to_dict(),
] |
360 | cache stampede | import functools
import math
import random
import time
from datetime import datetime
from dateutil.parser import isoparse
from django.core.cache import cache as django_cache
from django_redis.client import DefaultClient
from django_redis.client.default import _main_exceptions
DEFERRED_FLAG = "__DEFERRED"
def METHOD_NAME(expire, beta=1):
"""Cache decorator with cache stampede protection.
Based on http://www.vldb.org/pvldb/vol8/p886-vattani.pdf (research by
Vattani, A.; Chierichetti, F.; Lowenstein, K. (2015), Optimal Probabilistic
Cache Stampede Prevention, VLDB, pp. 886-897, ISSN 2150-8097) and the
Python implementation at
https://github.com/grantjenks/python-diskcache/blob/master/diskcache/recipes.py#L315
The cache stampede problem (also called dog-piling, cache miss storm,
or cache choking) is a situation that occurs when a popular cache item
expires, leading to multiple requests seeing a cache miss and
regenerating that same item at the same time
This decorator implements cache stampede protection through
early recomputation. Early recomputation of function results will occur
probabilistically before expiration in a background thread of
execution.
IMPORTANT:
The decorated function must have the cache key as its first parameter.
:param float expire: seconds until arguments expire
:param int beta: the parameter beta can be set to a value greater than 1 to
favor earlier recomputations and further reduce stampedes but
the paper authors show that setting beta=1 works well in practice
:return: callable decorator
"""
def decorator(func):
def timer(*args, **kwargs):
"Time execution of `func` and return result and time delta."
start = time.time()
result = func(*args, **kwargs)
delta = time.time() - start
# The variable delta represents the time to recompute the value
# and is used to scale the probability distribution appropriately.
return result, delta
@functools.wraps(func)
def wrapper(*args, **kwargs):
key = args[0]
cached = django_cache.get(key)
if cached is not None:
metadata = cached["METADATA"]
if cached["CALCULATING"]:
return metadata
expire_time = cached["EXPIRE"]
now = time.time()
ttl = expire_time - now
delta = cached["DELTA"]
if (-delta * math.log(random.random())) < ttl:
return metadata # Cache hit.
metadata, delta = timer(*args, *kwargs)
cached_info = {
"CALCULATING": False,
"METADATA": metadata,
"DELTA": delta,
"EXPIRE": time.time() + expire,
}
django_cache.set(key, cached_info, timeout=None)
return wrapper
return decorator
def delete_cache_keys(key_pattern):
"""
Deletes all cache keys that match key_pattern, if found.
Note that not all cache backends support wildcards, or have a way to retrieve all keys.
In this case, this function will just check if the key specified by key_pattern exists,
and delete it if so.
:param key_pattern: A string with a key name, can contain wildcard (*) characters
:param for_view:
:return: Number of keys deleted
"""
if hasattr(django_cache, "delete_pattern"):
return django_cache.delete_pattern(key_pattern)
if django_cache.has_key(key_pattern): # noqa: W601
django_cache.delete(key_pattern)
return 1
return 0
def delete_public_channel_cache_keys():
"""
Delete all caches related to the public channel caching.
"""
from contentcuration.views.base import PUBLIC_CHANNELS_CACHE_KEYS
delete_cache_keys("*get_public_channel_list*")
delete_cache_keys("*get_user_public_channels*")
django_cache.delete_many(list(PUBLIC_CHANNELS_CACHE_KEYS.values()))
def redis_retry(func):
"""
This decorator wraps a function using the lower level Redis client to mimic functionality
that occurs in the DefaultClient. It attempts a retry for certain exceptions, which this
catches and retries once
@see django_redis.client.default.DefaultClient
"""
def redis_retry_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except _main_exceptions:
# try one more time
return func(*args, **kwargs)
return redis_retry_func
FILE_MODIFIED = -1
class ResourceSizeCache:
"""
Helper class for managing Resource size cache.
If the django_cache is Redis, then we use the lower level Redis client to use
its hash commands, HSET and HGET, to ensure we can store lots of data in performant way
"""
def __init__(self, node, cache=None):
self.node = node
self.cache = cache or django_cache
@classmethod
def reset_modified_for_file(cls, file, modified=FILE_MODIFIED):
"""
:type file: contentcuration.models.File
:type modified: datetime|None|FILE_MODIFIED
"""
if not file.contentnode_id:
return
cache = ResourceSizeCache(file.contentnode.get_root())
cache.reset_modified(file.modified if modified == FILE_MODIFIED else modified)
@property
def redis_client(self):
"""
Gets the lower level Redis client, if the cache is a Redis cache
:rtype: redis.client.StrictRedis
"""
redis_client = None
cache_client = getattr(self.cache, 'client', None)
if isinstance(cache_client, DefaultClient):
redis_client = cache_client.get_client(write=True)
return redis_client
@property
def hash_key(self):
# only first four characters
return "resource_size:{}".format(self.node.pk[:4])
@property
def size_key(self):
return "{}:value".format(self.node.pk)
@property
def modified_key(self):
return "{}:modified".format(self.node.pk)
@redis_retry
def cache_get(self, key):
if self.redis_client is not None:
# notice use of special `HGET`
# See: https://redis.io/commands/hget
return self.redis_client.hget(self.hash_key, key)
return self.cache.get("{}:{}".format(self.hash_key, key))
@redis_retry
def cache_set(self, key, val):
if self.redis_client is not None:
# notice use of special `HSET` and `HDEL`
# See: https://redis.io/commands/hset
# See: https://redis.io/commands/hdel
if val is None:
return self.redis_client.hdel(self.hash_key, key)
return self.redis_client.hset(self.hash_key, key, val)
return self.cache.set("{}:{}".format(self.hash_key, key), val)
def get_size(self):
size = self.cache_get(self.size_key)
return int(size) if size else size
def get_modified(self):
modified = self.cache_get(self.modified_key)
return isoparse(modified) if modified is not None else modified
def set_size(self, size):
return self.cache_set(self.size_key, size)
def set_modified(self, modified):
return self.cache_set(self.modified_key, modified.isoformat() if isinstance(modified, datetime) else modified)
def reset_modified(self, modified):
"""
Sets modified if it's less than the existing, otherwise sets None if not a datetime
:param modified: A datetime or None
"""
if not isinstance(modified, datetime):
return self.set_modified(None)
current_modified = self.get_modified()
if current_modified and current_modified > modified:
return self.set_modified(modified) |
361 | spawn | from collections.abc import Callable
from typing import Any
from typing_extensions import TypeAlias
_Macro: TypeAlias = tuple[str] | tuple[str, str | None]
def gen_lib_options(
compiler: CCompiler, library_dirs: list[str], runtime_library_dirs: list[str], libraries: list[str]
) -> list[str]: ...
def gen_preprocess_options(macros: list[_Macro], include_dirs: list[str]) -> list[str]: ...
def get_default_compiler(osname: str | None = None, platform: str | None = None) -> str: ...
def new_compiler(
plat: str | None = None, compiler: str | None = None, verbose: int = 0, dry_run: int = 0, force: int = 0
) -> CCompiler: ...
def show_compilers() -> None: ...
class CCompiler:
dry_run: bool
force: bool
verbose: bool
output_dir: str | None
macros: list[_Macro]
include_dirs: list[str]
libraries: list[str]
library_dirs: list[str]
runtime_library_dirs: list[str]
objects: list[str]
def __init__(self, verbose: int = 0, dry_run: int = 0, force: int = 0) -> None: ...
def add_include_dir(self, dir: str) -> None: ...
def set_include_dirs(self, dirs: list[str]) -> None: ...
def add_library(self, libname: str) -> None: ...
def set_libraries(self, libnames: list[str]) -> None: ...
def add_library_dir(self, dir: str) -> None: ...
def set_library_dirs(self, dirs: list[str]) -> None: ...
def add_runtime_library_dir(self, dir: str) -> None: ...
def set_runtime_library_dirs(self, dirs: list[str]) -> None: ...
def define_macro(self, name: str, value: str | None = None) -> None: ...
def undefine_macro(self, name: str) -> None: ...
def add_link_object(self, object: str) -> None: ...
def set_link_objects(self, objects: list[str]) -> None: ...
def detect_language(self, sources: str | list[str]) -> str | None: ...
def find_library_file(self, dirs: list[str], lib: str, debug: bool = ...) -> str | None: ...
def has_function(
self,
funcname: str,
includes: list[str] | None = None,
include_dirs: list[str] | None = None,
libraries: list[str] | None = None,
library_dirs: list[str] | None = None,
) -> bool: ...
def library_dir_option(self, dir: str) -> str: ...
def library_option(self, lib: str) -> str: ...
def runtime_library_dir_option(self, dir: str) -> str: ...
def set_executables(self, **args: str) -> None: ...
def compile(
self,
sources: list[str],
output_dir: str | None = None,
macros: _Macro | None = None,
include_dirs: list[str] | None = None,
debug: bool = ...,
extra_preargs: list[str] | None = None,
extra_postargs: list[str] | None = None,
depends: list[str] | None = None,
) -> list[str]: ...
def create_static_lib(
self,
objects: list[str],
output_libname: str,
output_dir: str | None = None,
debug: bool = ...,
target_lang: str | None = None,
) -> None: ...
def link(
self,
target_desc: str,
objects: list[str],
output_filename: str,
output_dir: str | None = None,
libraries: list[str] | None = None,
library_dirs: list[str] | None = None,
runtime_library_dirs: list[str] | None = None,
export_symbols: list[str] | None = None,
debug: bool = ...,
extra_preargs: list[str] | None = None,
extra_postargs: list[str] | None = None,
build_temp: str | None = None,
target_lang: str | None = None,
) -> None: ...
def link_executable(
self,
objects: list[str],
output_progname: str,
output_dir: str | None = None,
libraries: list[str] | None = None,
library_dirs: list[str] | None = None,
runtime_library_dirs: list[str] | None = None,
debug: bool = ...,
extra_preargs: list[str] | None = None,
extra_postargs: list[str] | None = None,
target_lang: str | None = None,
) -> None: ...
def link_shared_lib(
self,
objects: list[str],
output_libname: str,
output_dir: str | None = None,
libraries: list[str] | None = None,
library_dirs: list[str] | None = None,
runtime_library_dirs: list[str] | None = None,
export_symbols: list[str] | None = None,
debug: bool = ...,
extra_preargs: list[str] | None = None,
extra_postargs: list[str] | None = None,
build_temp: str | None = None,
target_lang: str | None = None,
) -> None: ...
def link_shared_object(
self,
objects: list[str],
output_filename: str,
output_dir: str | None = None,
libraries: list[str] | None = None,
library_dirs: list[str] | None = None,
runtime_library_dirs: list[str] | None = None,
export_symbols: list[str] | None = None,
debug: bool = ...,
extra_preargs: list[str] | None = None,
extra_postargs: list[str] | None = None,
build_temp: str | None = None,
target_lang: str | None = None,
) -> None: ...
def preprocess(
self,
source: str,
output_file: str | None = None,
macros: list[_Macro] | None = None,
include_dirs: list[str] | None = None,
extra_preargs: list[str] | None = None,
extra_postargs: list[str] | None = None,
) -> None: ...
def executable_filename(self, basename: str, strip_dir: int = 0, output_dir: str = "") -> str: ...
def library_filename(self, libname: str, lib_type: str = "static", strip_dir: int = 0, output_dir: str = "") -> str: ...
def object_filenames(self, source_filenames: list[str], strip_dir: int = 0, output_dir: str = "") -> list[str]: ...
def shared_object_filename(self, basename: str, strip_dir: int = 0, output_dir: str = "") -> str: ...
def execute(self, func: Callable[..., object], args: tuple[Any, ...], msg: str | None = None, level: int = 1) -> None: ...
def METHOD_NAME(self, cmd: list[str]) -> None: ...
def mkpath(self, name: str, mode: int = 0o777) -> None: ...
def move_file(self, src: str, dst: str) -> str: ...
def announce(self, msg: str, level: int = 1) -> None: ...
def warn(self, msg: str) -> None: ...
def debug_print(self, msg: str) -> None: ... |
362 | tear down | #
# Copyright (C) 2013-2022 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Integration test for exclusions
import unittest as ut
import unittest_decorators as utx
import espressomd
import espressomd.electrostatics
@utx.skipIfMissingFeatures(['EXCLUSIONS'])
class Exclusions(ut.TestCase):
system = espressomd.System(box_l=[1.0, 1.0, 1.0])
def setUp(self):
self.system.box_l = 3 * [10]
self.system.cell_system.skin = 0.4
self.system.time_step = 0.01
def METHOD_NAME(self):
self.system.electrostatics.clear()
self.system.part.clear()
def test_add_remove(self):
p0 = self.system.part.add(id=0, pos=[0, 0, 0])
self.system.part.add(id=1, pos=[0, 0, 0])
self.system.part.add(id=2, pos=[0, 0, 0])
p0.add_exclusion(1)
p0.add_exclusion(2)
self.assertEqual(list(p0.exclusions), [1, 2])
p0.delete_exclusion(1)
self.assertEqual(list(p0.exclusions), [2])
p0.delete_exclusion(2)
self.assertEqual(list(p0.exclusions), [])
def test_transfer(self):
p0 = self.system.part.add(id=0, pos=[0, 0, 0], v=[1., 1., 1])
self.system.part.add(id=1, pos=[0, 0, 0])
self.system.part.add(id=2, pos=[0, 0, 0])
self.system.part.add(id=3, pos=[0, 0, 0])
p0.exclusions = [1, 2, 3]
for _ in range(15):
self.system.integrator.run(100)
self.assertEqual(list(p0.exclusions), [1, 2, 3])
@utx.skipIfMissingFeatures(['LENNARD_JONES'])
def test_particle_property(self):
self.system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1., sigma=2., cutoff=1.5, shift=0.0)
p0 = self.system.part.add(id=0, pos=[0, 0, 0], type=0)
p1 = self.system.part.add(id=1, pos=[1, 0, 0], type=0)
pair_energy = self.system.analysis.energy()['total']
self.assertGreater(pair_energy, 0.)
pair_pressure = self.system.analysis.pressure()['total']
self.assertGreater(pair_pressure, 0.)
self.system.integrator.run(0)
pair_force = p0.f[0]
self.assertGreater(abs(pair_force), 0.)
self.assertAlmostEqual(p1.f[0], -pair_force, places=7)
p2 = self.system.part.add(id=2, pos=[2, 0, 0], type=0)
self.system.integrator.run(0)
self.assertAlmostEqual(self.system.analysis.energy()['total'],
2 * pair_energy)
self.assertAlmostEqual(self.system.analysis.pressure()['total'],
2 * pair_pressure)
self.assertAlmostEqual(p2.f[0], -pair_force, places=7)
p1.exclusions = [0, 2]
self.system.integrator.run(0)
self.assertAlmostEqual(self.system.analysis.energy()['total'], 0)
self.assertAlmostEqual(self.system.analysis.pressure()['total'], 0)
self.assertAlmostEqual(p0.f[0], 0, places=7)
self.assertAlmostEqual(p1.f[0], 0, places=7)
self.assertAlmostEqual(p2.f[0], 0, places=7)
p1.exclusions = [0]
self.assertAlmostEqual(
self.system.analysis.energy()['total'],
pair_energy)
self.assertAlmostEqual(self.system.analysis.pressure()['total'],
pair_pressure)
self.system.integrator.run(0)
self.assertAlmostEqual(p0.f[0], 0, places=7)
self.assertAlmostEqual(p1.f[0], pair_force, places=7)
self.assertAlmostEqual(p2.f[0], -pair_force, places=7)
p1.exclusions = []
self.assertAlmostEqual(self.system.analysis.energy()['total'],
2 * pair_energy)
self.assertAlmostEqual(self.system.analysis.pressure()['total'],
2 * pair_pressure)
self.system.integrator.run(0)
self.assertAlmostEqual(p0.f[0], pair_force, places=7)
self.assertAlmostEqual(p1.f[0], 0, places=7)
self.assertAlmostEqual(p2.f[0], -pair_force, places=7)
p1.exclusions = [0]
self.assertAlmostEqual(
self.system.analysis.energy()['total'],
pair_energy)
self.assertAlmostEqual(self.system.analysis.pressure()['total'],
pair_pressure)
self.system.integrator.run(0)
self.assertAlmostEqual(p0.f[0], 0, places=7)
self.assertAlmostEqual(p1.f[0], pair_force, places=7)
self.assertAlmostEqual(p2.f[0], -pair_force, places=7)
@utx.skipIfMissingFeatures(['P3M'])
def test_electrostatics_not_excluded(self):
p0 = self.system.part.add(id=0, pos=[0, 0, 0], type=0, q=+1.)
p1 = self.system.part.add(id=1, pos=[1, 0, 0], type=0, q=-1.)
# Small alpha means large short-range contribution
p3m = espressomd.electrostatics.P3M(
prefactor=1, r_cut=3.0, accuracy=1e-3, mesh=32, cao=7, alpha=0.1,
tune=False)
self.system.electrostatics.solver = p3m
# Only short-range part of the coulomb energy
pair_energy = self.system.analysis.energy()[('coulomb', 0)]
self.assertGreater(abs(pair_energy), 0.)
self.system.integrator.run(0)
pair_force = p0.f[0]
self.assertGreater(abs(pair_force), 0.)
self.assertAlmostEqual(p1.f[0], -pair_force, places=7)
pair_pressure = self.system.analysis.pressure()[('coulomb', 0)]
self.assertGreater(abs(pair_pressure), 0.)
p0.exclusions = [1]
# Force and energy should not be changed by the exclusion
self.system.integrator.run(0)
self.assertAlmostEqual(p0.f[0], pair_force, places=7)
self.assertAlmostEqual(p1.f[0], -pair_force, places=7)
self.assertAlmostEqual(self.system.analysis.energy()[('coulomb', 0)],
pair_energy, places=7)
self.assertAlmostEqual(self.system.analysis.pressure()[('coulomb', 0)],
pair_pressure, places=7)
if __name__ == "__main__":
ut.main() |
363 | delete account | """Routes for settings view."""
from dataclasses import field
from typing import Any
from flask import render_template, session, flash, Response
from flask import request
from jinja2 import TemplateNotFound
from timApp.admin.user_cli import do_soft_delete
from timApp.answer.answer_models import AnswerUpload
from timApp.answer.routes import hide_points, hide_points_modifier
from timApp.auth.accesshelper import verify_logged_in, verify_admin, verify_view_access
from timApp.auth.sessioninfo import get_current_user_object, clear_session
from timApp.document.docentry import DocEntry
from timApp.folder.folder import Folder
from timApp.item.block import Block, BlockType
from timApp.notification.notify import get_current_user_notifications
from timApp.timdb.sqa import db
from timApp.user.consentchange import ConsentChange
from timApp.user.preferences import Preferences
from timApp.user.settings.style_utils import is_style_doc
from timApp.user.user import User, Consent, get_owned_objects_query
from timApp.util.flask.requesthelper import get_option, RouteException, NotExist
from timApp.util.flask.responsehelper import json_response, ok_response
from timApp.util.flask.typedblueprint import TypedBlueprint
settings_page = TypedBlueprint("settings_page", __name__, url_prefix="/settings")
@settings_page.before_request
def verify_login() -> None:
verify_logged_in()
@settings_page.get("")
def show() -> str:
try:
limit = 50
return render_template(
"settings.jinja2",
notification_limit=limit,
notifications=get_current_user_notifications(limit=limit),
contacts=get_current_user_object().contacts,
)
except TemplateNotFound:
raise NotExist()
@settings_page.get("/get")
def get_settings() -> Response:
return json_response(get_current_user_object().get_prefs())
def verify_new_styles(curr_prefs: Preferences, new_prefs: Preferences) -> None:
new_style_doc_ids = set(new_prefs.style_doc_ids) - set(curr_prefs.style_doc_ids)
if not new_style_doc_ids:
return
new_style_docs: list[DocEntry] = DocEntry.query.filter(
DocEntry.id.in_(new_style_doc_ids)
).all()
if len(new_style_docs) != len(new_style_doc_ids):
raise NotExist("Some style docs could not be found")
for doc in new_style_docs:
if not is_style_doc(doc):
raise RouteException(f"Document {doc.path} is not a style document")
verify_view_access(doc)
@settings_page.post("/save")
def save_settings() -> Response:
user = get_current_user_object()
# Don't overwrite bookmarks. If the user has multiple tabs open, the latest bookmarks might get overwritten.
attrs_to_preserve = {"bookmarks"}
j = request.get_json(silent=True)
if not j or not isinstance(j, dict):
return json_response(user.get_prefs().to_json(with_style=True))
try:
curr_prefs = user.get_prefs()
for attr in attrs_to_preserve:
val = getattr(curr_prefs, attr)
j[attr] = val
new_prefs = Preferences.from_json(j)
verify_new_styles(curr_prefs, new_prefs)
user.set_prefs(new_prefs)
except TypeError as e:
raise RouteException(f"Invalid settings: {e}")
db.session.commit()
r = json_response(user.get_prefs().to_json(with_style=True))
if new_prefs.language:
r.set_cookie("lang", new_prefs.language)
return r
@settings_page.put("/save/lang")
def save_language_route(lang: str) -> Response:
u = get_current_user_object()
prefs = u.get_prefs()
prefs.language = lang
u.set_prefs(prefs)
db.session.commit()
r = ok_response()
r.set_cookie("lang", lang)
return r
@settings_page.get("/get/<name>")
def get_setting(name: str) -> Response:
prefs = get_current_user_object().get_prefs()
return json_response({name: getattr(prefs, name, None)})
def get_user_info(u: User, include_doc_content: bool = False) -> dict[str, Any]:
"""Returns all data associated with a user."""
block_query = get_owned_objects_query(u)
docs = DocEntry.query.filter(DocEntry.id.in_(block_query)).all()
folders = Folder.query.filter(Folder.id.in_(block_query)).all()
images = Block.query.filter(
Block.id.in_(block_query) & (Block.type_id == BlockType.Image.value)
).all()
files = Block.query.filter(
Block.id.in_(block_query) & (Block.type_id == BlockType.File.value)
).all()
answers = u.answers.all()
answer_uploads = AnswerUpload.query.filter(
AnswerUpload.answer_id.in_([a.id for a in answers])
).all()
answers_no_points = list(map(hide_points, answers))
answers_no_points = list(map(hide_points_modifier, answers_no_points))
for d in docs:
d.serialize_content = include_doc_content
annotations = u.annotations.all()
for ann in annotations:
for c in ann.comments:
if c.commenter.id != u.id:
c.commenter.anonymize = True
return {
"annotations": annotations,
"answers": answers_no_points,
"answer_uploads": answer_uploads,
"groups": u.groups,
"lectureanswers": u.lectureanswers.all(),
"notes": u.get_personal_group().notes.all(),
"owned_documents": docs,
"owned_folders": folders,
"owned_lectures": u.owned_lectures.all(),
"readparagraphs": u.get_personal_group().readparagraphs.all(),
"uploaded_images": images,
"uploaded_files": files,
"user": {
**u.to_json(contacts=True),
"given_name": u.given_name,
"last_name": u.last_name,
"prefs": u.prefs,
"origin": u.origin,
"consent": u.consent,
"created": u.created,
"modified": u.modified,
},
"velps": u.velps.all(),
}
def get_info_for(u: User) -> Response:
include_doc_content = get_option(request, "content", False)
return json_response(get_user_info(u, include_doc_content))
@settings_page.get("/info")
def get_info_current() -> Response:
return get_info_for(get_current_user_object())
@settings_page.get("/info/<username>")
def get_info_any(username: str) -> Response:
verify_admin()
u = User.get_by_name(username)
if not u:
raise NotExist("User not found")
return get_info_for(u)
@settings_page.post("/updateConsent")
def update_consent(consent: Consent = field(metadata={"by_value": True})) -> Response:
u = get_current_user_object()
if u.consent != consent:
u.consent = consent
u.consents.append(ConsentChange(consent=consent))
db.session.commit()
return ok_response()
@settings_page.post("/account/delete")
def METHOD_NAME() -> Response:
verify_logged_in()
u = get_current_user_object()
if not u.is_email_user:
raise RouteException(
"Only users registered via email can delete their account manually."
)
do_soft_delete(u)
db.session.commit()
clear_session()
flash("Your account has been deleted.")
return ok_response() |
364 | measure thread | import json
import cv2
import base64
import threading
import time
from datetime import datetime
from websocket_server import WebsocketServer
import os
# Graphical User Interface Class
class GUI:
# Initialization function
# The actual initialization
def __init__(self, host, car):
t = threading.Thread(target=self.run_server)
self.payload = {'image': ''}
self.left_payload = {'image_left': ''}
self.server = None
self.client = None
self.host = host
# Image variables
self.image_to_be_shown = None
self.image_to_be_shown_updated = False
self.image_show_lock = threading.Lock()
self.left_image_to_be_shown = None
self.left_image_to_be_shown_updated = False
self.left_image_show_lock = threading.Lock()
self.acknowledge = False
self.acknowledge_lock = threading.Lock()
# Take the console object to set the same websocket and client
self.car = car
t.start()
# Explicit initialization function
# Class method, so user can call it without instantiation
@classmethod
def initGUI(cls, host):
# self.payload = {'image': '', 'shape': []}
new_instance = cls(host)
return new_instance
# Function to prepare image payload
# Encodes the image as a JSON string and sends through the WS
def payloadImage(self):
self.image_show_lock.acquire()
image_to_be_shown_updated = self.image_to_be_shown_updated
image_to_be_shown = self.image_to_be_shown
self.image_show_lock.release()
image = image_to_be_shown
payload = {'image': '', 'shape': ''}
if not image_to_be_shown_updated:
return payload
shape = image.shape
frame = cv2.imencode('.JPEG', image)[1]
encoded_image = base64.b64encode(frame)
payload['image'] = encoded_image.decode('utf-8')
payload['shape'] = shape
self.image_show_lock.acquire()
self.image_to_be_shown_updated = False
self.image_show_lock.release()
return payload
# Function to prepare image payload
# Encodes the image as a JSON string and sends through the WS
def payloadLeftImage(self):
self.left_image_show_lock.acquire()
left_image_to_be_shown_updated = self.left_image_to_be_shown_updated
left_image_to_be_shown = self.left_image_to_be_shown
self.left_image_show_lock.release()
image = left_image_to_be_shown
payload = {'image_left': '', 'shape': ''}
if not left_image_to_be_shown_updated:
return payload
shape = image.shape
frame = cv2.imencode('.JPEG', image)[1]
encoded_image = base64.b64encode(frame)
payload['image_left'] = encoded_image.decode('utf-8')
payload['shape'] = shape
self.left_image_show_lock.acquire()
self.left_image_to_be_shown_updated = False
self.left_image_show_lock.release()
return payload
# Function for student to call
def showImage(self, image):
self.image_show_lock.acquire()
self.image_to_be_shown = image
self.image_to_be_shown_updated = True
self.image_show_lock.release()
# Function for student to call
def showLeftImage(self, image):
self.left_image_show_lock.acquire()
self.left_image_to_be_shown = image
self.left_image_to_be_shown_updated = True
self.left_image_show_lock.release()
# Function to get the client
# Called when a new client is received
def get_client(self, client, server):
self.client = client
# Function to get value of Acknowledge
def get_acknowledge(self):
self.acknowledge_lock.acquire()
acknowledge = self.acknowledge
self.acknowledge_lock.release()
return acknowledge
# Function to get value of Acknowledge
def set_acknowledge(self, value):
self.acknowledge_lock.acquire()
self.acknowledge = value
self.acknowledge_lock.release()
# Update the gui
def update_gui(self):
# Payload Image Message
payload = self.payloadImage()
self.payload["image"] = json.dumps(payload)
message = "#gui" + json.dumps(self.payload)
self.server.send_message(self.client, message)
# Payload Left Image Message
left_payload = self.payloadLeftImage()
self.left_payload["image_left"] = json.dumps(left_payload)
message = "#gui" + json.dumps(self.left_payload)
self.server.send_message(self.client, message)
# Function to read the message from websocket
# Gets called when there is an incoming message from the client
def get_message(self, client, server, message):
# Acknowledge Message for GUI Thread
if message[:4] == "#ack":
self.set_acknowledge(True)
elif message[:4] == "#car":
self.car.start_car(int(message[4:5]))
elif message[:4] == "#stp":
self.car.stop_car()
elif message[:4] == "#rst":
self.car.reset_car()
# Activate the server
def run_server(self):
self.server = WebsocketServer(port=2303, host=self.host)
self.server.set_fn_new_client(self.get_client)
self.server.set_fn_message_received(self.get_message)
home_dir = os.path.expanduser('~')
logged = False
while not logged:
try:
f = open(f"{home_dir}/ws_gui.log", "w")
f.write("websocket_gui=ready")
f.close()
logged = True
except:
time.sleep(0.1)
self.server.run_forever()
# Function to reset
def reset_gui(self):
pass
# This class decouples the user thread
# and the GUI update thread
class ThreadGUI:
def __init__(self, gui):
self.gui = gui
# Time variables
self.ideal_cycle = 80
self.measured_cycle = 80
self.iteration_counter = 0
# Function to start the execution of threads
def start(self):
self.METHOD_NAME = threading.Thread(target=self.METHOD_NAME)
self.thread = threading.Thread(target=self.run)
self.METHOD_NAME.start()
self.thread.start()
print("GUI Thread Started!")
# The measuring thread to measure frequency
def METHOD_NAME(self):
while self.gui.client is None:
pass
previous_time = datetime.now()
while True:
# Sleep for 2 seconds
time.sleep(2)
# Measure the current time and subtract from previous time to get real time interval
current_time = datetime.now()
dt = current_time - previous_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
previous_time = current_time
# Get the time period
try:
# Division by zero
self.measured_cycle = ms / self.iteration_counter
except:
self.measured_cycle = 0
# Reset the counter
self.iteration_counter = 0
# The main thread of execution
def run(self):
while self.gui.client is None:
pass
while True:
start_time = datetime.now()
self.gui.update_gui()
acknowledge_message = self.gui.get_acknowledge()
while not acknowledge_message:
acknowledge_message = self.gui.get_acknowledge()
self.gui.set_acknowledge(False)
finish_time = datetime.now()
self.iteration_counter = self.iteration_counter + 1
dt = finish_time - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
if ms < self.ideal_cycle:
time.sleep((self.ideal_cycle-ms) / 1000.0) |
365 | salesforce dataset | from typing import Any, Dict, Generator
import pydash
import pytest
import requests
from sqlalchemy.orm import Session
from starlette.status import HTTP_204_NO_CONTENT, HTTP_404_NOT_FOUND
from fides.api.cryptography import cryptographic_util
from fides.api.db import session
from fides.api.models.connectionconfig import (
AccessLevel,
ConnectionConfig,
ConnectionType,
)
from fides.api.models.datasetconfig import DatasetConfig
from fides.api.models.sql_models import Dataset as CtlDataset
from fides.api.util.saas_util import (
load_config_with_replacement,
load_dataset_with_replacement,
)
from tests.ops.test_helpers.vault_client import get_secrets
secrets = get_secrets("salesforce")
@pytest.fixture(scope="session")
def salesforce_secrets(saas_config):
return {
"domain": pydash.get(saas_config, "salesforce.domain") or secrets["domain"],
"client_id": pydash.get(saas_config, "salesforce.client_id")
or secrets["client_id"],
"client_secret": pydash.get(saas_config, "salesforce.client_secret")
or secrets["client_secret"],
"redirect_uri": pydash.get(saas_config, "salesforce.redirect_uri")
or secrets["redirect_uri"],
"access_token": pydash.get(saas_config, "salesforce.access_token")
or secrets["access_token"],
"refresh_token": pydash.get(saas_config, "salesforce.refresh_token")
or secrets["refresh_token"],
}
@pytest.fixture(scope="session")
def salesforce_identity_email(saas_config):
return (
pydash.get(saas_config, "salesforce.identity_email")
or secrets["identity_email"]
)
@pytest.fixture(scope="session")
def salesforce_identity_phone_number(saas_config):
return (
pydash.get(saas_config, "salesforce.identity_phone_number")
or secrets["identity_phone_number"]
)
@pytest.fixture(scope="session")
def salesforce_erasure_identity_email():
return f"{cryptographic_util.generate_secure_random_string(13)}@email.com"
@pytest.fixture
def salesforce_config() -> Dict[str, Any]:
return load_config_with_replacement(
"data/saas/config/salesforce_config.yml",
"<instance_fides_key>",
"salesforce_instance",
)
@pytest.fixture
def METHOD_NAME() -> Dict[str, Any]:
return load_dataset_with_replacement(
"data/saas/dataset/salesforce_dataset.yml",
"<instance_fides_key>",
"salesforce_instance",
)[0]
@pytest.fixture(scope="function")
def salesforce_connection_config(
db: session,
salesforce_config,
salesforce_secrets,
) -> Generator:
fides_key = salesforce_config["fides_key"]
connection_config = ConnectionConfig.create(
db=db,
data={
"key": fides_key,
"name": fides_key,
"connection_type": ConnectionType.saas,
"access": AccessLevel.write,
"secrets": salesforce_secrets,
"saas_config": salesforce_config,
},
)
yield connection_config
connection_config.delete(db)
@pytest.fixture
def salesforce_dataset_config(
db: Session,
salesforce_connection_config: ConnectionConfig,
METHOD_NAME: Dict[str, Any],
) -> Generator:
fides_key = METHOD_NAME["fides_key"]
salesforce_connection_config.name = fides_key
salesforce_connection_config.key = fides_key
salesforce_connection_config.save(db=db)
ctl_dataset = CtlDataset.create_from_dataset_dict(db, METHOD_NAME)
dataset = DatasetConfig.create(
db=db,
data={
"connection_config_id": salesforce_connection_config.id,
"fides_key": fides_key,
"ctl_dataset_id": ctl_dataset.id,
},
)
yield dataset
dataset.delete(db=db)
ctl_dataset.delete(db=db)
@pytest.fixture(scope="function")
def salesforce_create_erasure_data(
salesforce_erasure_identity_email, salesforce_secrets
) -> Generator:
"""
Creates a dynamic test data record for tests.
Yields contact ID as this may be useful to have in test scenarios
"""
base_url = f"https://{salesforce_secrets['domain']}"
headers = {
"Authorization": f"Bearer {salesforce_secrets['access_token']}",
}
# Create account
account_data = {"name": "Ethyca Test"}
accounts_response = requests.post(
url=f"{base_url}/services/data/v54.0/sobjects/Account",
headers=headers,
json=account_data,
)
assert accounts_response.ok
account_id = accounts_response.json()["id"]
# Create contact
contact_data = {
"firstName": "Fidesops",
"lastName": "Test Contact",
"email": salesforce_erasure_identity_email,
"AccountId": account_id,
}
contacts_response = requests.post(
url=f"{base_url}/services/data/v54.0/sobjects/Contact",
headers=headers,
json=contact_data,
)
assert contacts_response.ok
contact_id = contacts_response.json()["id"]
# Create lead
lead_data = {
"firstName": "Fidesops",
"lastName": "Test Lead",
"email": salesforce_erasure_identity_email,
"Company": "Test Company",
}
leads_response = requests.post(
url=f"{base_url}/services/data/v54.0/sobjects/Lead",
headers=headers,
json=lead_data,
)
assert leads_response.ok
lead_id = leads_response.json()["id"]
# Create Case
case_data = {
"SuppliedEmail": salesforce_erasure_identity_email,
"SuppliedCompany": "Test Company",
"ContactId": contact_id,
}
cases_response = requests.post(
url=f"{base_url}/services/data/v54.0/sobjects/Case",
headers=headers,
json=case_data,
)
assert cases_response.ok
case_id = cases_response.json()["id"]
# Create Campaign Member
# We need to create a campaign for it first
campaign_data = {
"Description": "Test Description",
"Name": "Test Campaign",
}
campaigns_response = requests.post(
url=f"{base_url}/services/data/v54.0/sobjects/Campaign",
headers=headers,
json=campaign_data,
)
assert campaigns_response.ok
campaign_id = campaigns_response.json()["id"]
# Now creating campaign member for this campaign
campaign_member_data = {
"campaignId": campaign_id,
"contactId": contact_id,
"leadId": lead_id,
}
campaign_members_response = requests.post(
url=f"{base_url}/services/data/v54.0/sobjects/CampaignMember",
headers=headers,
json=campaign_member_data,
)
assert campaign_members_response.ok
campaign_member_id = campaign_members_response.json()["id"]
yield account_id, contact_id, case_id, lead_id, campaign_member_id
# cleanup data by doing a full deletion instead of just masking
case_response = requests.delete(
url=f"{base_url}/services/data/v54.0/sobjects/Case/{case_id}", headers=headers
)
assert case_response.status_code == HTTP_204_NO_CONTENT
case_response = requests.get(
url=f"{base_url}/services/data/v54.0/sobjects/Case/{case_id}", headers=headers
)
assert case_response.status_code == HTTP_404_NOT_FOUND
account_response = requests.delete(
url=f"{base_url}/services/data/v54.0/sobjects/Account/{account_id}",
headers=headers,
)
assert account_response.status_code == HTTP_204_NO_CONTENT
account_response = requests.get(
url=f"{base_url}/services/data/v54.0/sobjects/Account/{account_id}",
headers=headers,
)
assert account_response.status_code == HTTP_404_NOT_FOUND |
366 | ensure movement not obstructed by module | """Labware movement command handling."""
from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from opentrons_shared_data.gripper.constants import IDLE_STATE_GRIP_FORCE
from opentrons.hardware_control import HardwareControlAPI
from opentrons.hardware_control.types import OT3Mount, Axis
from opentrons.motion_planning import get_gripper_labware_movement_waypoints
from opentrons.protocol_engine.state import StateStore
from opentrons.protocol_engine.resources.ot3_validation import ensure_ot3_hardware
from .thermocycler_movement_flagger import ThermocyclerMovementFlagger
from .heater_shaker_movement_flagger import HeaterShakerMovementFlagger
from .thermocycler_plate_lifter import ThermocyclerPlateLifter
from ..errors import (
GripperNotAttachedError,
LabwareMovementNotAllowedError,
ThermocyclerNotOpenError,
HeaterShakerLabwareLatchNotOpenError,
)
from ..types import (
OnLabwareLocation,
LabwareLocation,
LabwareMovementOffsetData,
OnDeckLabwareLocation,
)
if TYPE_CHECKING:
from opentrons.protocol_engine.execution import EquipmentHandler, MovementHandler
# TODO (spp, 2022-10-20): name this GripperMovementHandler if it doesn't handle
# any non-gripper implementations
class LabwareMovementHandler:
"""Implementation logic for labware movement."""
_hardware_api: HardwareControlAPI
_state_store: StateStore
_movement: MovementHandler
_equipment: EquipmentHandler
def __init__(
self,
hardware_api: HardwareControlAPI,
state_store: StateStore,
equipment: EquipmentHandler,
movement: MovementHandler,
thermocycler_plate_lifter: Optional[ThermocyclerPlateLifter] = None,
thermocycler_movement_flagger: Optional[ThermocyclerMovementFlagger] = None,
heater_shaker_movement_flagger: Optional[HeaterShakerMovementFlagger] = None,
) -> None:
"""Initialize a LabwareMovementHandler instance."""
self._hardware_api = hardware_api
self._state_store = state_store
self._thermocycler_plate_lifter = (
thermocycler_plate_lifter
or ThermocyclerPlateLifter(
state_store=self._state_store,
equipment=equipment,
movement=movement,
)
)
self._tc_movement_flagger = (
thermocycler_movement_flagger
or ThermocyclerMovementFlagger(
state_store=self._state_store, hardware_api=self._hardware_api
)
)
self._hs_movement_flagger = (
heater_shaker_movement_flagger
or HeaterShakerMovementFlagger(
state_store=self._state_store, hardware_api=self._hardware_api
)
)
async def move_labware_with_gripper(
self,
labware_id: str,
current_location: OnDeckLabwareLocation,
new_location: OnDeckLabwareLocation,
user_offset_data: LabwareMovementOffsetData,
) -> None:
"""Move a loaded labware from one location to another using gripper."""
use_virtual_gripper = self._state_store.config.use_virtual_gripper
if use_virtual_gripper:
return
ot3api = ensure_ot3_hardware(
hardware_api=self._hardware_api,
error_msg="Gripper is only available on Opentrons Flex",
)
if not ot3api.has_gripper():
raise GripperNotAttachedError(
"No gripper found for performing labware movements."
)
gripper_mount = OT3Mount.GRIPPER
# Retract all mounts
await ot3api.home(axes=[Axis.Z_L, Axis.Z_R, Axis.Z_G])
gripper_homed_position = await ot3api.gantry_position(mount=gripper_mount)
async with self._thermocycler_plate_lifter.lift_plate_for_labware_movement(
labware_location=current_location
):
final_offsets = (
self._state_store.geometry.get_final_labware_movement_offset_vectors(
from_location=current_location,
to_location=new_location,
additional_offset_vector=user_offset_data,
)
)
from_labware_center = self._state_store.geometry.get_labware_grip_point(
labware_id=labware_id, location=current_location
)
to_labware_center = self._state_store.geometry.get_labware_grip_point(
labware_id=labware_id, location=new_location
)
movement_waypoints = get_gripper_labware_movement_waypoints(
from_labware_center=from_labware_center,
to_labware_center=to_labware_center,
gripper_home_z=gripper_homed_position.z,
offset_data=final_offsets,
)
labware_grip_force = self._state_store.labware.get_grip_force(labware_id)
for waypoint_data in movement_waypoints:
if waypoint_data.jaw_open:
await ot3api.ungrip()
else:
await ot3api.grip(force_newtons=labware_grip_force)
await ot3api.move_to(
mount=gripper_mount, abs_position=waypoint_data.position
)
# Keep the gripper in idly gripped position to avoid colliding with
# things like the thermocycler latches
await ot3api.grip(force_newtons=IDLE_STATE_GRIP_FORCE, stay_engaged=False)
async def METHOD_NAME(
self, labware_id: str, new_location: LabwareLocation
) -> None:
"""Ensure that the labware movement is not obstructed by a parent module.
Raises: LabwareMovementNotAllowedError if either current location or
new location is a module that is in a state that prevents the labware from
being moved (either manually or using gripper).
"""
current_parent = self._state_store.labware.get_parent_location(
labware_id=labware_id
)
if isinstance(new_location, OnLabwareLocation):
new_location = self._state_store.labware.get_parent_location(
labware_id=new_location.labwareId
)
for parent in (current_parent, new_location):
try:
await self._tc_movement_flagger.raise_if_labware_in_non_open_thermocycler(
labware_parent=parent
)
await self._hs_movement_flagger.raise_if_labware_latched_on_heater_shaker(
labware_parent=parent
)
except ThermocyclerNotOpenError:
raise LabwareMovementNotAllowedError(
"Cannot move labware to or from a Thermocycler with its lid closed."
)
except HeaterShakerLabwareLatchNotOpenError:
raise LabwareMovementNotAllowedError(
"Cannot move labware to or from a Heater-Shaker"
" with its labware latch closed."
) |
367 | get whit monday | from _typeshed import Incomplete
from collections.abc import Generator
from typing import ClassVar
MON: Incomplete
TUE: Incomplete
WED: Incomplete
THU: Incomplete
FRI: Incomplete
SAT: Incomplete
SUN: Incomplete
ISO_MON: Incomplete
ISO_TUE: Incomplete
ISO_WED: Incomplete
ISO_THU: Incomplete
ISO_FRI: Incomplete
ISO_SAT: Incomplete
ISO_SUN: Incomplete
def cleaned_date(day, keep_datetime: bool = False): ...
def daterange(start, end) -> Generator[Incomplete, None, None]: ...
class ChristianMixin:
EASTER_METHOD: Incomplete
include_epiphany: ClassVar[bool]
include_clean_monday: ClassVar[bool]
include_annunciation: ClassVar[bool]
include_fat_tuesday: ClassVar[bool]
fat_tuesday_label: ClassVar[str | None]
include_ash_wednesday: ClassVar[bool]
ash_wednesday_label: ClassVar[str]
include_palm_sunday: ClassVar[bool]
include_holy_thursday: ClassVar[bool]
holy_thursday_label: ClassVar[str]
include_good_friday: ClassVar[bool]
good_friday_label: ClassVar[str]
include_easter_monday: ClassVar[bool]
include_easter_saturday: ClassVar[bool]
easter_saturday_label: ClassVar[str]
include_easter_sunday: ClassVar[bool]
include_all_saints: ClassVar[bool]
include_immaculate_conception: ClassVar[bool]
immaculate_conception_label: ClassVar[str]
include_christmas: ClassVar[bool]
christmas_day_label: ClassVar[str]
include_christmas_eve: ClassVar[bool]
include_ascension: ClassVar[bool]
include_assumption: ClassVar[bool]
include_whit_sunday: ClassVar[bool]
whit_sunday_label: ClassVar[str]
include_whit_monday: ClassVar[bool]
whit_monday_label: ClassVar[str]
include_corpus_christi: ClassVar[bool]
include_boxing_day: ClassVar[bool]
boxing_day_label: ClassVar[str]
include_all_souls: ClassVar[bool]
def get_fat_tuesday(self, year): ...
def get_ash_wednesday(self, year): ...
def get_palm_sunday(self, year): ...
def get_holy_thursday(self, year): ...
def get_good_friday(self, year): ...
def get_clean_monday(self, year): ...
def get_easter_saturday(self, year): ...
def get_easter_sunday(self, year): ...
def get_easter_monday(self, year): ...
def get_ascension_thursday(self, year): ...
def METHOD_NAME(self, year): ...
def get_whit_sunday(self, year): ...
def get_corpus_christi(self, year): ...
def shift_christmas_boxing_days(self, year): ...
def get_variable_days(self, year): ...
class WesternMixin(ChristianMixin):
EASTER_METHOD: Incomplete
WEEKEND_DAYS: Incomplete
class OrthodoxMixin(ChristianMixin):
EASTER_METHOD: Incomplete
WEEKEND_DAYS: Incomplete
include_orthodox_christmas: ClassVar[bool]
orthodox_christmas_day_label: ClassVar[str]
def get_fixed_holidays(self, year): ...
class LunarMixin:
@staticmethod
def lunar(year, month, day): ...
class ChineseNewYearMixin(LunarMixin):
include_chinese_new_year_eve: ClassVar[bool]
chinese_new_year_eve_label: ClassVar[str]
include_chinese_new_year: ClassVar[bool]
chinese_new_year_label: ClassVar[str]
include_chinese_second_day: ClassVar[bool]
chinese_second_day_label: ClassVar[str]
include_chinese_third_day: ClassVar[bool]
chinese_third_day_label: ClassVar[str]
shift_sunday_holidays: ClassVar[bool]
shift_start_cny_sunday: ClassVar[bool]
def get_chinese_new_year(self, year): ...
def get_variable_days(self, year): ...
def get_shifted_holidays(self, dates) -> Generator[Incomplete, None, None]: ...
def get_calendar_holidays(self, year): ...
class CalverterMixin:
conversion_method: Incomplete
ISLAMIC_HOLIDAYS: Incomplete
def __init__(self, *args, **kwargs) -> None: ...
def converted(self, year): ...
def calverted_years(self, year): ...
def get_islamic_holidays(self): ...
def get_delta_islamic_holidays(self, year) -> None: ...
def get_variable_days(self, year): ...
class IslamicMixin(CalverterMixin):
WEEKEND_DAYS: Incomplete
conversion_method: Incomplete
include_prophet_birthday: ClassVar[bool]
include_day_after_prophet_birthday: ClassVar[bool]
include_start_ramadan: ClassVar[bool]
include_eid_al_fitr: ClassVar[bool]
length_eid_al_fitr: int
eid_al_fitr_label: ClassVar[str]
include_eid_al_adha: ClassVar[bool]
eid_al_adha_label: ClassVar[str]
length_eid_al_adha: int
include_day_of_sacrifice: ClassVar[bool]
day_of_sacrifice_label: ClassVar[str]
include_islamic_new_year: ClassVar[bool]
include_laylat_al_qadr: ClassVar[bool]
include_nuzul_al_quran: ClassVar[bool]
def get_islamic_holidays(self): ...
class CoreCalendar:
FIXED_HOLIDAYS: Incomplete
WEEKEND_DAYS: Incomplete
def __init__(self) -> None: ...
def name(cls): ...
def get_fixed_holidays(self, year): ...
def get_variable_days(self, year): ...
def get_calendar_holidays(self, year): ...
def holidays(self, year: Incomplete | None = None): ...
def get_holiday_label(self, day): ...
def holidays_set(self, year: Incomplete | None = None): ...
def get_weekend_days(self): ...
def is_working_day(self, day, extra_working_days: Incomplete | None = None, extra_holidays: Incomplete | None = None): ...
def is_holiday(self, day, extra_holidays: Incomplete | None = None): ...
def add_working_days(
self,
day,
delta,
extra_working_days: Incomplete | None = None,
extra_holidays: Incomplete | None = None,
keep_datetime: bool = False,
): ...
def sub_working_days(
self,
day,
delta,
extra_working_days: Incomplete | None = None,
extra_holidays: Incomplete | None = None,
keep_datetime: bool = False,
): ...
def find_following_working_day(self, day): ...
@staticmethod
def get_nth_weekday_in_month(year, month, weekday, n: int = 1, start: Incomplete | None = None): ...
@staticmethod
def get_last_weekday_in_month(year, month, weekday): ...
@staticmethod
def get_iso_week_date(year, week_nb, weekday=1): ...
@staticmethod
def get_first_weekday_after(day, weekday): ...
def get_working_days_delta(
self,
start,
end,
include_start: bool = False,
extra_working_days: Incomplete | None = None,
extra_holidays: Incomplete | None = None,
): ...
def export_to_ical(self, period=[2000, 2030], target_path: Incomplete | None = None): ...
class Calendar(CoreCalendar):
include_new_years_day: ClassVar[bool]
include_new_years_eve: ClassVar[bool]
shift_new_years_day: ClassVar[bool]
include_labour_day: ClassVar[bool]
labour_day_label: ClassVar[str]
def __init__(self, **kwargs) -> None: ...
def get_fixed_holidays(self, year): ...
def get_variable_days(self, year): ...
class WesternCalendar(WesternMixin, Calendar): ...
class OrthodoxCalendar(OrthodoxMixin, Calendar): ...
class ChineseNewYearCalendar(ChineseNewYearMixin, Calendar):
WEEKEND_DAYS: Incomplete
class IslamicCalendar(IslamicMixin, Calendar): ...
class IslamoWesternCalendar(IslamicMixin, WesternMixin, Calendar):
FIXED_HOLIDAYS: Incomplete |
368 | test multiport floodping | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2017 IBM
# Author: Harsha Thyagaraja <[email protected]>
import os
from avocado import Test
from avocado.utils.software_manager.manager import SoftwareManager
from avocado.utils import process
from avocado.utils import distro
from avocado.utils.network.interfaces import NetworkInterface
from avocado.utils.network.hosts import LocalHost, RemoteHost
class MultiportStress(Test):
'''
To perform IO stress on multiple ports on a NIC adapter
'''
def setUp(self):
'''
To check and install dependencies for the test
'''
self.host_interfaces = []
interfaces = os.listdir('/sys/class/net')
self.local = LocalHost()
devices = self.params.get("host_interfaces", default=None)
for device in devices.split(" "):
if device in interfaces:
self.host_interfaces.append(device)
elif self.local.validate_mac_addr(device) and device in self.local.get_all_hwaddr():
self.host_interfaces.append(self.local.get_interface_by_hwaddr(device).name)
else:
self.host_interfaces = None
self.cancel("Please check the network device")
smm = SoftwareManager()
if distro.detect().name == 'Ubuntu':
pkg = 'iputils-ping'
else:
pkg = 'iputils'
if not smm.check_installed(pkg) and not smm.install(pkg):
self.cancel("Package %s is needed to test" % pkg)
self.peer_ips = self.params.get("peer_ips",
default="").split(" ")
self.peer_public_ip = self.params.get("peer_public_ip", default="")
self.count = self.params.get("count", default="1000")
self.ipaddr = self.params.get("host_ips", default="").split(" ")
self.netmask = self.params.get("netmask", default="")
for ipaddr, interface in zip(self.ipaddr, self.host_interfaces):
networkinterface = NetworkInterface(interface, self.local)
try:
networkinterface.add_ipaddr(ipaddr, self.netmask)
networkinterface.save(ipaddr, self.netmask)
except Exception:
networkinterface.save(ipaddr, self.netmask)
networkinterface.bring_up()
self.peer_user = self.params.get("peer_user", default="root")
self.peer_password = self.params.get("peer_password", '*',
default="None")
self.mtu = self.params.get("mtu", default=1500)
self.remotehost = RemoteHost(self.peer_ips[0], self.peer_user,
password=self.peer_password)
self.remotehost_public = RemoteHost(self.peer_public_ip,
self.peer_user,
password=self.peer_password)
for peer_ip in self.peer_ips:
peer_interface = self.remotehost.get_interface_by_ipaddr(
peer_ip).name
peer_networkinterface = NetworkInterface(peer_interface,
self.remotehost)
if peer_networkinterface.set_mtu(self.mtu) is not None:
self.cancel("Failed to set mtu in peer")
for host_interface in self.host_interfaces:
self.networkinterface = NetworkInterface(
host_interface, self.local)
if self.networkinterface.set_mtu(self.mtu) is not None:
self.cancel("Failed to set mtu in host")
def multiport_ping(self, ping_option):
'''
Ping to multiple peers parallely
'''
parallel_procs = []
for host, peer in zip(self.host_interfaces, self.peer_ips):
self.log.info('Starting Ping test')
cmd = "ping -I %s %s -c %s %s" % (host, peer, self.count,
ping_option)
obj = process.SubProcess(cmd, verbose=False, shell=True)
obj.start()
parallel_procs.append(obj)
self.log.info('Wait for background processes to finish'
' before proceeding')
for proc in parallel_procs:
proc.wait()
errors = []
for proc in parallel_procs:
out_buf = proc.get_stdout()
out_buf += proc.get_stderr()
for val in out_buf.decode("utf-8").splitlines():
if 'packet loss' in val and ', 0% packet loss,' not in val:
errors.append(out_buf)
break
if errors:
self.fail(b"\n".join(errors))
def test_multiport_ping(self):
self.multiport_ping('')
def METHOD_NAME(self):
self.multiport_ping('-f')
def tearDown(self):
'''
unset ip for host interface
'''
if self.host_interfaces:
for host_interface in self.host_interfaces:
networkinterface = NetworkInterface(host_interface, self.local)
if networkinterface.set_mtu("1500") is not None:
self.cancel("Failed to set mtu in host")
for peer_ip in self.peer_ips:
peer_interface = self.remotehost.get_interface_by_ipaddr(
peer_ip).name
try:
peer_networkinterface = NetworkInterface(peer_interface,
self.remotehost)
peer_networkinterface.set_mtu("1500")
except Exception:
peer_public_networkinterface = NetworkInterface(peer_interface,
self.remotehost_public)
peer_public_networkinterface.set_mtu("1500")
for ipaddr, interface in zip(self.ipaddr, self.host_interfaces):
networkinterface = NetworkInterface(interface, self.local)
networkinterface.remove_ipaddr(ipaddr, self.netmask)
try:
networkinterface.restore_from_backup()
except Exception:
self.log.info(
"backup file not availbale, could not restore file.")
self.remotehost.remote_session.quit()
self.remotehost_public.remote_session.quit() |
369 | test save blob s3 | # This file is part of the Open Data Cube, see https://opendatacube.org for more information
#
# Copyright (c) 2015-2023 ODC Contributors
# SPDX-License-Identifier: Apache-2.0
import pytest
import moto
from pathlib import Path
import dask
import dask.delayed
from datacube.utils.io import slurp
from datacube.utils.dask import (
start_local_dask,
get_total_available_memory,
compute_memory_per_worker,
compute_tasks,
pmap,
partition_map,
save_blob_to_file,
save_blob_to_s3,
_save_blob_to_file,
_save_blob_to_s3,
)
from datacube.utils.aws import (
s3_url_parse,
s3_fetch,
s3_client,
)
def test_compute_tasks():
try:
client = start_local_dask(threads_per_worker=1,
dashboard_address=None)
tasks = (dask.delayed(x) for x in range(100))
xx = [x for x in compute_tasks(tasks, client)]
assert xx == [x for x in range(100)]
finally:
client.close()
del client
def test_start_local_dask_dashboard_link(monkeypatch):
monkeypatch.setenv('JUPYTERHUB_SERVICE_PREFIX', 'user/test/')
try:
client = start_local_dask()
assert client.dashboard_link.startswith('user/test/proxy/')
finally:
client.close()
del client
def test_partition_map():
tasks = partition_map(10, str, range(101))
tt = [t for t in tasks]
assert len(tt) == 11
lump = tt[0].compute()
assert len(lump) == 10
assert lump == [str(x) for x in range(10)]
lump = tt[-1].compute()
assert len(lump) == 1
def test_pmap():
try:
client = start_local_dask(threads_per_worker=1,
dashboard_address=None)
xx_it = pmap(str, range(101), client=client)
xx = [x for x in xx_it]
assert xx == [str(x) for x in range(101)]
finally:
client.close()
del client
@pytest.mark.parametrize("blob", [
"some utf8 string",
b"raw bytes",
])
def test_save_blob_file_direct(tmpdir, blob):
tmpdir = Path(str(tmpdir))
fname = str(tmpdir/"file.txt")
mode = "rt" if isinstance(blob, str) else "rb"
assert _save_blob_to_file(blob, fname) == (fname, True)
assert slurp(fname, mode=mode) == blob
fname = str(tmpdir/"missing"/"file.txt")
assert _save_blob_to_file(blob, fname) == (fname, False)
@pytest.mark.parametrize("blob", [
"some utf8 string",
b"raw bytes",
])
def test_save_blob_file(tmpdir, blob, dask_client):
tmpdir = Path(str(tmpdir))
fname = str(tmpdir/"file.txt")
dask_blob = dask.delayed(blob)
mode = "rt" if isinstance(blob, str) else "rb"
rr = save_blob_to_file(dask_blob, fname)
assert dask_client.compute(rr).result() == (fname, True)
assert slurp(fname, mode=mode) == blob
fname = str(tmpdir/"missing"/"file.txt")
rr = save_blob_to_file(dask_blob, fname)
assert dask_client.compute(rr).result() == (fname, False)
@pytest.mark.parametrize("blob", [
"some utf8 string",
b"raw bytes",
])
def test_save_blob_s3_direct(blob, monkeypatch):
region_name = "us-west-2"
blob2 = blob + blob
url = "s3://bucket/file.txt"
url2 = "s3://bucket/file-2.txt"
bucket, _ = s3_url_parse(url)
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "fake-key-id")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "fake-secret")
with moto.mock_s3():
s3 = s3_client(region_name=region_name)
s3.create_bucket(Bucket=bucket, CreateBucketConfiguration={'LocationConstraint': "fake-region"})
assert _save_blob_to_s3(blob, url, region_name=region_name) == (url, True)
assert _save_blob_to_s3(blob2, url2, region_name=region_name) == (url2, True)
bb1 = s3_fetch(url, s3=s3)
bb2 = s3_fetch(url2, s3=s3)
if isinstance(blob, str):
bb1 = bb1.decode("utf8")
bb2 = bb2.decode("utf8")
assert bb1 == blob
assert bb2 == blob2
assert _save_blob_to_s3("", "s3://not-a-bucket/f.txt") == ("s3://not-a-bucket/f.txt", False)
@pytest.mark.parametrize("blob", [
"some utf8 string",
b"raw bytes",
])
def METHOD_NAME(blob, monkeypatch, dask_client):
region_name = "us-west-2"
blob2 = blob + blob
dask_blob = dask.delayed(blob)
dask_blob2 = dask.delayed(blob2)
url = "s3://bucket/file.txt"
url2 = "s3://bucket/file-2.txt"
bucket, _ = s3_url_parse(url)
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "fake-key-id")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "fake-secret")
with moto.mock_s3():
s3 = s3_client(region_name=region_name)
s3.create_bucket(Bucket=bucket, CreateBucketConfiguration={'LocationConstraint': "fake-region"})
rr = save_blob_to_s3(dask_blob, url, region_name=region_name)
assert rr.compute() == (url, True)
rr = save_blob_to_s3(dask_blob2, url2, region_name=region_name)
assert dask_client.compute(rr).result() == (url2, True)
bb1 = s3_fetch(url, s3=s3)
bb2 = s3_fetch(url2, s3=s3)
if isinstance(blob, str):
bb1 = bb1.decode("utf8")
bb2 = bb2.decode("utf8")
assert bb1 == blob
assert bb2 == blob2
def test_memory_functions(monkeypatch):
gig = 10**9
total_mem = get_total_available_memory()
default_safety = min(500*(1 << 20), total_mem//2)
assert total_mem - compute_memory_per_worker() == default_safety
assert total_mem - compute_memory_per_worker(2)*2 == default_safety
assert compute_memory_per_worker(mem_safety_margin=1) == total_mem - 1
assert compute_memory_per_worker(memory_limit='4G') == 4*gig
assert compute_memory_per_worker(2, memory_limit='4G') == 2*gig
assert compute_memory_per_worker(memory_limit='4G',
mem_safety_margin='1G') == 3*gig
total_mem = 1*gig
monkeypatch.setenv('MEM_LIMIT', str(total_mem))
assert get_total_available_memory() == 1*gig
assert compute_memory_per_worker(mem_safety_margin=1) == total_mem - 1 |
370 | cancel | #
# An Introduction to Tkinter
# tkSimpleDialog.py
#
# Copyright (c) 1997 by Fredrik Lundh
#
# [email protected]
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# dialog base class
'''Dialog boxes
This module handles dialog boxes. It contains the following
public symbols:
Dialog -- a base class for dialogs
askinteger -- get an integer from the user
askfloat -- get a float from the user
askstring -- get a string from the user
'''
from Tkinter import *
class Dialog(Toplevel):
'''Class to open dialogs.
This class is intended as a base class for custom dialogs
'''
def __init__(self, parent, title = None):
'''Initialize a dialog.
Arguments:
parent -- a parent window (the application window)
title -- the dialog title
'''
Toplevel.__init__(self, parent)
self.withdraw() # remain invisible for now
# If the master is not viewable, don't
# make the child transient, or else it
# would be opened withdrawn
if parent.winfo_viewable():
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
self.buttonbox()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.METHOD_NAME)
if self.parent is not None:
self.geometry("+%d+%d" % (parent.winfo_rootx()+50,
parent.winfo_rooty()+50))
self.deiconify() # become visibile now
self.initial_focus.focus_set()
# wait for window to appear on screen before calling grab_set
self.wait_visibility()
self.grab_set()
self.wait_window(self)
def destroy(self):
'''Destroy the window'''
self.initial_focus = None
Toplevel.destroy(self)
#
# construction hooks
def body(self, master):
'''create dialog body.
return widget that should have initial focus.
This method should be overridden, and is called
by the __init__ method.
'''
pass
def buttonbox(self):
'''add standard button box.
override if you do not want the standard buttons
'''
box = Frame(self)
w = Button(box, text="OK", width=10, command=self.ok, default=ACTIVE)
w.pack(side=LEFT, padx=5, pady=5)
w = Button(box, text="Cancel", width=10, command=self.METHOD_NAME)
w.pack(side=LEFT, padx=5, pady=5)
self.bind("<Return>", self.ok)
self.bind("<Escape>", self.METHOD_NAME)
box.pack()
#
# standard button semantics
def ok(self, event=None):
if not self.validate():
self.initial_focus.focus_set() # put focus back
return
self.withdraw()
self.update_idletasks()
try:
self.apply()
finally:
self.METHOD_NAME()
def METHOD_NAME(self, event=None):
# put focus back to the parent window
if self.parent is not None:
self.parent.focus_set()
self.destroy()
#
# command hooks
def validate(self):
'''validate the data
This method is called automatically to validate the data before the
dialog is destroyed. By default, it always validates OK.
'''
return 1 # override
def apply(self):
'''process the data
This method is called automatically to process the data, *after*
the dialog is destroyed. By default, it does nothing.
'''
pass # override
# --------------------------------------------------------------------
# convenience dialogues
class _QueryDialog(Dialog):
def __init__(self, title, prompt,
initialvalue=None,
minvalue = None, maxvalue = None,
parent = None):
if not parent:
import Tkinter
parent = Tkinter._default_root
self.prompt = prompt
self.minvalue = minvalue
self.maxvalue = maxvalue
self.initialvalue = initialvalue
Dialog.__init__(self, parent, title)
def destroy(self):
self.entry = None
Dialog.destroy(self)
def body(self, master):
w = Label(master, text=self.prompt, justify=LEFT)
w.grid(row=0, padx=5, sticky=W)
self.entry = Entry(master, name="entry")
self.entry.grid(row=1, padx=5, sticky=W+E)
if self.initialvalue is not None:
self.entry.insert(0, self.initialvalue)
self.entry.select_range(0, END)
return self.entry
def validate(self):
import tkMessageBox
try:
result = self.getresult()
except ValueError:
tkMessageBox.showwarning(
"Illegal value",
self.errormessage + "\nPlease try again",
parent = self
)
return 0
if self.minvalue is not None and result < self.minvalue:
tkMessageBox.showwarning(
"Too small",
"The allowed minimum value is %s. "
"Please try again." % self.minvalue,
parent = self
)
return 0
if self.maxvalue is not None and result > self.maxvalue:
tkMessageBox.showwarning(
"Too large",
"The allowed maximum value is %s. "
"Please try again." % self.maxvalue,
parent = self
)
return 0
self.result = result
return 1
class _QueryInteger(_QueryDialog):
errormessage = "Not an integer."
def getresult(self):
return int(self.entry.get())
def askinteger(title, prompt, **kw):
'''get an integer from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is an integer
'''
d = _QueryInteger(title, prompt, **kw)
return d.result
class _QueryFloat(_QueryDialog):
errormessage = "Not a floating point value."
def getresult(self):
return float(self.entry.get())
def askfloat(title, prompt, **kw):
'''get a float from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is a float
'''
d = _QueryFloat(title, prompt, **kw)
return d.result
class _QueryString(_QueryDialog):
def __init__(self, *args, **kw):
if "show" in kw:
self.__show = kw["show"]
del kw["show"]
else:
self.__show = None
_QueryDialog.__init__(self, *args, **kw)
def body(self, master):
entry = _QueryDialog.body(self, master)
if self.__show is not None:
entry.configure(show=self.__show)
return entry
def getresult(self):
return self.entry.get()
def askstring(title, prompt, **kw):
'''get a string from the user
Arguments:
title -- the dialog title
prompt -- the label text
**kw -- see SimpleDialog class
Return value is a string
'''
d = _QueryString(title, prompt, **kw)
return d.result
if __name__ == "__main__":
root = Tk()
root.update()
print askinteger("Spam", "Egg count", initialvalue=12*12)
print askfloat("Spam", "Egg weight\n(in tons)", minvalue=1, maxvalue=100)
print askstring("Spam", "Egg label") |
371 | test notifications api returns 403 error if | from django.urls import reverse
from ....notifications.models import Notification
def METHOD_NAME(db, client):
response = client.get(reverse("misago:apiv2:notifications"))
assert response.status_code == 403
def test_notifications_api_returns_empty_list_if_user_has_no_notifications(user_client):
response = user_client.get(reverse("misago:apiv2:notifications"))
assert response.status_code == 200
assert response.json() == {
"results": [],
"hasNext": False,
"hasPrevious": False,
"firstCursor": None,
"lastCursor": None,
"unreadNotifications": None,
}
def test_notifications_api_returns_list_with_all_user_notifications(user, user_client):
read_notification = Notification.objects.create(
user=user, verb="TEST", is_read=True
)
notification = Notification.objects.create(user=user, verb="TEST", is_read=False)
response = user_client.get(reverse("misago:apiv2:notifications"))
assert response.status_code == 200
response_json = response.json()
assert [result["id"] for result in response_json["results"]] == [
notification.id,
read_notification.id,
]
assert not response_json["hasNext"]
assert not response_json["hasPrevious"]
def test_notifications_api_returns_list_with_read_user_notifications(user, user_client):
read_notification = Notification.objects.create(
user=user, verb="TEST", is_read=True
)
Notification.objects.create(user=user, verb="TEST", is_read=False)
response = user_client.get(reverse("misago:apiv2:notifications") + "?filter=read")
assert response.status_code == 200
response_json = response.json()
assert [result["id"] for result in response_json["results"]] == [
read_notification.id
]
assert not response_json["hasNext"]
assert not response_json["hasPrevious"]
def test_notifications_api_returns_list_with_unread_user_notifications(
user, user_client
):
Notification.objects.create(user=user, verb="TEST", is_read=True)
notification = Notification.objects.create(user=user, verb="TEST", is_read=False)
response = user_client.get(reverse("misago:apiv2:notifications") + "?filter=unread")
assert response.status_code == 200
response_json = response.json()
assert [result["id"] for result in response_json["results"]] == [notification.id]
assert not response_json["hasNext"]
assert not response_json["hasPrevious"]
def test_notifications_api_returns_list_with_notification_by_actor(
user, other_user, user_client
):
notification = Notification.objects.create(
user=user,
actor=other_user,
actor_name=other_user.username,
verb="TEST",
is_read=False,
)
response = user_client.get(reverse("misago:apiv2:notifications"))
assert response.status_code == 200
response_json = response.json()
assert [result["id"] for result in response_json["results"]] == [notification.id]
assert not response_json["hasNext"]
assert not response_json["hasPrevious"]
def test_notifications_api_excludes_other_users_notifications(
user, other_user, user_client
):
Notification.objects.create(user=other_user, verb="TEST", is_read=True)
notification = Notification.objects.create(user=user, verb="TEST", is_read=False)
response = user_client.get(reverse("misago:apiv2:notifications"))
assert response.status_code == 200
response_json = response.json()
assert [result["id"] for result in response_json["results"]] == [notification.id]
assert not response_json["hasNext"]
assert not response_json["hasPrevious"]
def test_notifications_api_supports_limiting_results_count(user, user_client):
Notification.objects.create(user=user, verb="TEST", is_read=False)
Notification.objects.create(user=user, verb="TEST", is_read=False)
Notification.objects.create(user=user, verb="TEST", is_read=False)
response = user_client.get(reverse("misago:apiv2:notifications") + "?limit=2")
assert response.status_code == 200
response_json = response.json()
assert len(response_json["results"]) == 2
assert response_json["hasNext"]
assert not response_json["hasPrevious"]
def test_notifications_api_returns_400_error_if_too_many_results_are_requested(
user, user_client
):
response = user_client.get(reverse("misago:apiv2:notifications") + "?limit=2000")
assert response.status_code == 400
def test_notifications_api_clears_unread_notifications_if_user_has_no_notifications(
user, user_client
):
user.unread_notifications = 10
user.save()
response = user_client.get(reverse("misago:apiv2:notifications"))
assert response.status_code == 200
response_json = response.json()
assert not response_json["results"]
assert not response_json["hasNext"]
assert not response_json["hasPrevious"]
assert response_json["unreadNotifications"] is None
user.refresh_from_db()
assert user.unread_notifications == 0
def test_notifications_api_clears_unread_notifications_if_unread_list_is_empty(
user, user_client
):
user.unread_notifications = 10
user.save()
response = user_client.get(reverse("misago:apiv2:notifications") + "?filter=unread")
assert response.status_code == 200
response_json = response.json()
assert not response_json["results"]
assert not response_json["hasNext"]
assert not response_json["hasPrevious"]
assert response_json["unreadNotifications"] is None
user.refresh_from_db()
assert user.unread_notifications == 0
def test_notifications_api_recounts_unread_notifications_if_only_page_has_unread_items(
user, user_client
):
user.unread_notifications = 0
user.save()
Notification.objects.create(user=user, verb="TEST", is_read=False)
response = user_client.get(reverse("misago:apiv2:notifications"))
assert response.status_code == 200
response_json = response.json()
assert response_json["results"]
assert response_json["unreadNotifications"] == "1"
user.refresh_from_db()
assert user.unread_notifications == 1
def test_notifications_api_recounts_unread_notifications_if_unread_list_has_items(
user, user_client
):
user.unread_notifications = 0
user.save()
notification = Notification.objects.create(user=user, verb="TEST", is_read=False)
response = user_client.get(
reverse("misago:apiv2:notifications")
+ f"?filter=unread&before={notification.id - 1}"
)
assert response.status_code == 200
response_json = response.json()
assert response_json["results"]
assert response_json["unreadNotifications"] == "1"
user.refresh_from_db()
assert user.unread_notifications == 1
def test_notifications_api_recounts_unread_notifications_if_user_has_new_notifications(
user, user_client
):
user.unread_notifications = 0
user.save()
notification = Notification.objects.create(user=user, verb="TEST", is_read=False)
response = user_client.get(
reverse("misago:apiv2:notifications") + f"?before={notification.id - 1}"
)
assert response.status_code == 200
response_json = response.json()
assert response_json["results"]
assert response_json["unreadNotifications"] == "1"
user.refresh_from_db()
assert user.unread_notifications == 1 |
372 | test nested dictionaries with difference | # -*- coding: utf-8 -*-
# 2018.07.26 --- use DictComparison instead of GcpRequest
#
# Copyright (c) 2016, Tom Melendez <[email protected]>
#
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.community.general.tests.unit.compat import unittest
from ansible_collections.community.general.plugins.module_utils.hwc_utils import are_different_dicts
class HwcDictComparisonTestCase(unittest.TestCase):
def test_simple_no_difference(self):
value1 = {
'foo': 'bar',
'test': 'original'
}
self.assertFalse(are_different_dicts(value1, value1))
def test_simple_different(self):
value1 = {
'foo': 'bar',
'test': 'original'
}
value2 = {
'foo': 'bar',
'test': 'different'
}
value3 = {
'test': 'original'
}
self.assertTrue(are_different_dicts(value1, value2))
self.assertTrue(are_different_dicts(value1, value3))
self.assertTrue(are_different_dicts(value2, value3))
def test_nested_dictionaries_no_difference(self):
value1 = {
'foo': {
'quiet': {
'tree': 'test'
},
'bar': 'baz'
},
'test': 'original'
}
self.assertFalse(are_different_dicts(value1, value1))
def METHOD_NAME(self):
value1 = {
'foo': {
'quiet': {
'tree': 'test'
},
'bar': 'baz'
},
'test': 'original'
}
value2 = {
'foo': {
'quiet': {
'tree': 'baz'
},
'bar': 'hello'
},
'test': 'original'
}
value3 = {
'foo': {
'quiet': {
'tree': 'test'
},
'bar': 'baz'
}
}
self.assertTrue(are_different_dicts(value1, value2))
self.assertTrue(are_different_dicts(value1, value3))
self.assertTrue(are_different_dicts(value2, value3))
def test_arrays_strings_no_difference(self):
value1 = {
'foo': [
'baz',
'bar'
]
}
self.assertFalse(are_different_dicts(value1, value1))
def test_arrays_strings_with_difference(self):
value1 = {
'foo': [
'baz',
'bar',
]
}
value2 = {
'foo': [
'baz',
'hello'
]
}
value3 = {
'foo': [
'bar',
]
}
self.assertTrue(are_different_dicts(value1, value2))
self.assertTrue(are_different_dicts(value1, value3))
self.assertTrue(are_different_dicts(value2, value3))
def test_arrays_dicts_with_no_difference(self):
value1 = {
'foo': [
{
'test': 'value',
'foo': 'bar'
},
{
'different': 'dict'
}
]
}
self.assertFalse(are_different_dicts(value1, value1))
def test_arrays_dicts_with_difference(self):
value1 = {
'foo': [
{
'test': 'value',
'foo': 'bar'
},
{
'different': 'dict'
}
]
}
value2 = {
'foo': [
{
'test': 'value2',
'foo': 'bar2'
},
]
}
value3 = {
'foo': [
{
'test': 'value',
'foo': 'bar'
}
]
}
self.assertTrue(are_different_dicts(value1, value2))
self.assertTrue(are_different_dicts(value1, value3))
self.assertTrue(are_different_dicts(value2, value3)) |
373 | delete | from typing import List, Optional
from pydantic.error_wrappers import ErrorWrapper, ValidationError
from sqlalchemy.sql.expression import true
from dispatch.exceptions import NotFoundError
from dispatch.project import service as project_service
from .models import (
CasePriority,
CasePriorityCreate,
CasePriorityRead,
CasePriorityUpdate,
)
def get(*, db_session, case_priority_id: int) -> Optional[CasePriority]:
"""Returns a case priority based on the given priority id."""
return db_session.query(CasePriority).filter(CasePriority.id == case_priority_id).one_or_none()
def get_default(*, db_session, project_id: int):
"""Returns the default case priority."""
return (
db_session.query(CasePriority)
.filter(CasePriority.default == true())
.filter(CasePriority.project_id == project_id)
.one_or_none()
)
def get_default_or_raise(*, db_session, project_id: int) -> CasePriority:
"""Returns the default case priority or raises a ValidationError if one doesn't exist."""
case_priority = get_default(db_session=db_session, project_id=project_id)
if not case_priority:
raise ValidationError(
[
ErrorWrapper(
NotFoundError(msg="No default case priority defined."),
loc="case_priority",
)
],
model=CasePriorityRead,
)
return case_priority
def get_by_name(*, db_session, project_id: int, name: str) -> Optional[CasePriority]:
"""Returns a case priority based on the given priority name."""
return (
db_session.query(CasePriority)
.filter(CasePriority.name == name)
.filter(CasePriority.project_id == project_id)
.one_or_none()
)
def get_by_name_or_raise(
*, db_session, project_id: int, case_priority_in=CasePriorityRead
) -> CasePriority:
"""Returns the case priority specified or raises ValidationError."""
case_priority = get_by_name(
db_session=db_session, project_id=project_id, name=case_priority_in.name
)
if not case_priority:
raise ValidationError(
[
ErrorWrapper(
NotFoundError(
msg="Case priority not found.",
case_priority=case_priority_in.name,
),
loc="case_priority",
)
],
model=CasePriorityRead,
)
return case_priority
def get_by_name_or_default(
*, db_session, project_id: int, case_priority_in=CasePriorityRead
) -> CasePriority:
"""Returns a case priority based on a name or the default if not specified."""
if case_priority_in:
if case_priority_in.name:
return get_by_name_or_raise(
db_session=db_session,
project_id=project_id,
case_priority_in=case_priority_in,
)
return get_default_or_raise(db_session=db_session, project_id=project_id)
def get_all(*, db_session, project_id: int = None) -> List[Optional[CasePriority]]:
"""Returns all case priorities."""
if project_id:
return db_session.query(CasePriority).filter(CasePriority.project_id == project_id)
return db_session.query(CasePriority)
def get_all_enabled(*, db_session, project_id: int = None) -> List[Optional[CasePriority]]:
"""Returns all enabled case priorities."""
if project_id:
return (
db_session.query(CasePriority)
.filter(CasePriority.project_id == project_id)
.filter(CasePriority.enabled == true())
)
return db_session.query(CasePriority).filter(CasePriority.enabled == true())
def create(*, db_session, case_priority_in: CasePriorityCreate) -> CasePriority:
"""Creates a case priority."""
project = project_service.get_by_name_or_raise(
db_session=db_session, project_in=case_priority_in.project
)
case_priority = CasePriority(
**case_priority_in.dict(exclude={"project", "color"}), project=project
)
if case_priority_in.color:
case_priority.color = case_priority_in.color.as_hex()
db_session.add(case_priority)
db_session.commit()
return case_priority
def update(
*, db_session, case_priority: CasePriority, case_priority_in: CasePriorityUpdate
) -> CasePriority:
"""Updates a case priority."""
case_priority_data = case_priority.dict()
update_data = case_priority_in.dict(skip_defaults=True, exclude={"project", "color"})
for field in case_priority_data:
if field in update_data:
setattr(case_priority, field, update_data[field])
if case_priority_in.color:
case_priority.color = case_priority_in.color.as_hex()
db_session.commit()
return case_priority
def METHOD_NAME(*, db_session, case_priority_id: int):
"""Deletes a case priority."""
db_session.query(CasePriority).filter(CasePriority.id == case_priority_id).METHOD_NAME()
db_session.commit() |
374 | cli | import re
from genie.libs.parser.utils.common import Common
from genie.metaparser import MetaParser
from genie.metaparser.util.schemaengine import (And, Any, Default, Optional,
Or, Schema, Use)
# ======================================================
# Parser for 'show stack-power load-shedding '
# ======================================================
class ShowStackPowerLoadSheddingSchema(MetaParser):
"""Schema for show stack-power load-shedding """
schema = {
'power_stack': {
Any(): {
'power_name': str,
'stack_mode': str,
'stack_topology': str,
'stack_pwr': int,
'total_pwr': int,
'rsvd_pwr': int,
'alloc_pwr': int,
'sw_avail_num': int,
'num_ps': int,
},
},
'priority': {
Any(): {
'sw': int,
'power_name': str,
'stack_priority': str,
'consumd_sw': int,
'consumd_hi': float,
'consumd_lo': float,
'alloc_hi': float,
'alloc_lo': float,
},
},
'totals': {
'consumd_sw': int,
'consumd_hi': float,
'consumd_lo': float,
'alloc_hi': float,
'alloc_lo': float,
},
}
class ShowStackPowerLoadShedding(ShowStackPowerLoadSheddingSchema):
"""Parser for show stack-power load-shedding"""
cli_command = 'show stack-power load-shedding'
def METHOD_NAME(self, output=None):
if output is None:
output = self.device.execute(self.cli_command)
# Powerstack-6 SP-PS Stndaln 1100 0 505 595 1 1
p1 = re.compile(r"^(?P<power_name>\S+)\s+(?P<stack_mode>\S+)\s+(?P<stack_topology>\w+)\s+(?P<stack_pwr>\d+)\s+(?P<total_pwr>\d+)\s+(?P<rsvd_pwr>\d+)\s+(?P<alloc_pwr>\d+)\s+(?P<sw_avail_num>\d+)\s+(?P<num_ps>\d+)$")
# 1 Powerstack-1 2-11-20 108 0.0 0.0 0.0 0.0
p2 = re.compile(r"^(?P<sw>\d+)\s+(?P<power_name>\S+)\s+(?P<stack_priority>\d+\s*-\s*\d+\s*-\s*\d+)\s+(?P<consumd_sw>\d+)\s+(?P<consumd_hi>\S+)\s+(?P<consumd_lo>\S+)\s+(?P<alloc_hi>\S+)\s+(?P<alloc_lo>\S+)$")
# 1109 0.0 0.0 0.0 0.0
p3 = re.compile(r"^Totals:\s+(?P<consumd_sw>\d+)\s+(?P<consumd_hi>\S+)\s+(?P<consumd_lo>\S+)\s+(?P<alloc_hi>\S+)\s+(?P<alloc_lo>\S+)$")
ret_dict = {}
for line in output.splitlines():
line = line.strip()
# Powerstack-6 SP-PS Stndaln 1100 0 505 595 1 1
m = p1.match(line)
if m:
dict_val = m.groupdict()
power_name_var = dict_val['power_name']
power_stack = ret_dict.setdefault('power_stack', {})
power_name_dict = ret_dict['power_stack'].setdefault(power_name_var, {})
power_name_dict['power_name'] = dict_val['power_name']
power_name_dict['stack_mode'] = dict_val['stack_mode']
power_name_dict['stack_topology'] = dict_val['stack_topology']
power_name_dict['stack_pwr'] = int(dict_val['stack_pwr'])
power_name_dict['total_pwr'] = int(dict_val['total_pwr'])
power_name_dict['rsvd_pwr'] = int(dict_val['rsvd_pwr'])
power_name_dict['alloc_pwr'] = int(dict_val['alloc_pwr'])
power_name_dict['sw_avail_num'] = int(dict_val['sw_avail_num'])
power_name_dict['num_ps'] = int(dict_val['num_ps'])
continue
# 1 Powerstack-1 2-11-20 108 0.0 0.0 0.0 0.0
m = p2.match(line)
if m:
dict_val = m.groupdict()
sw_var = dict_val['sw']
priority = ret_dict.setdefault('priority', {})
sw_dict = ret_dict['priority'].setdefault(sw_var, {})
sw_dict['sw'] = int(dict_val['sw'])
sw_dict['power_name'] = dict_val['power_name']
sw_dict['stack_priority'] = dict_val['stack_priority']
sw_dict['consumd_sw'] = int(dict_val['consumd_sw'])
sw_dict['consumd_hi'] = float(dict_val['consumd_hi'])
sw_dict['consumd_lo'] = float(dict_val['consumd_lo'])
sw_dict['alloc_hi'] = float(dict_val['alloc_hi'])
sw_dict['alloc_lo'] = float(dict_val['alloc_lo'])
continue
# 1109 0.0 0.0 0.0 0.0
m = p3.match(line)
if m:
dict_val = m.groupdict()
totals_dict = ret_dict.setdefault('totals', {})
totals_dict['consumd_sw'] = int(dict_val['consumd_sw'])
totals_dict['consumd_hi'] = float(dict_val['consumd_hi'])
totals_dict['consumd_lo'] = float(dict_val['consumd_lo'])
totals_dict['alloc_hi'] = float(dict_val['alloc_hi'])
totals_dict['alloc_lo'] = float(dict_val['alloc_lo'])
continue
return ret_dict |
375 | filter supported wikis | #!/usr/bin/env python
# coding=utf-8
import copy
import os
import json
import codecs
import requests
from jinja2 import Environment, FileSystemLoader
CHINESE_WIKI_LANG = "zh"
SIMPLIFIED_CHINESE_LANG = "zh-hans"
TRADITIONAL_CHINESE_LANG = "zh-hant"
# T114042
NORWEGIAN_BOKMAL_WIKI_LANG = "no"
NORWEGIAN_BOKMAL_LANG = "nb"
# Wikis that cause problems and hence we pretend
# do not exist.
# - "got" -> Gothic runes wiki. The name of got in got
# contains characters outside the Unicode BMP. Android
# hard crashes on these. Let's ignore these fellas
# for now.
# - "mo" -> Moldovan, which automatically redirects to Romanian (ro),
# which already exists in our list.
OSTRICH_WIKIS = [u"got", "mo"]
# Represents a single wiki, along with arbitrary properties of that wiki
# Simple data container object
class Wiki(object):
def __init__(self, lang):
self.lang = lang
self.props = {}
# Represents a list of wikis plus their properties.
# Encapsulates rendering code as well
class WikiList(object):
def __init__(self, wikis):
self.wikis = wikis
self.template_env = Environment(loader=FileSystemLoader(
os.path.join(os.path.dirname(os.path.realpath(__file__)), u"templates")
))
def render(self, template, class_name, **kwargs):
data = {
u"class_name": class_name,
u"wikis": self.wikis
}
data.update(kwargs)
rendered = self.template_env.get_template(template).render(**data)
out = codecs.open(u"../src/main/java/org/wikipedia/staticdata/" + class_name + u".java", u"w", u"utf-8")
out.write(rendered)
out.close()
def build_wiki(lang, english_name, local_name):
wiki = Wiki(lang)
wiki.props["english_name"] = english_name
wiki.props["local_name"] = local_name
return wiki
def list_from_sitematrix():
QUERY_SITEMATRIX = 'https://www.mediawiki.org/w/api.php?action=sitematrix' \
'&format=json&formatversion=2&smtype=language&smstate=all'
print(u"Fetching languages...")
data = json.loads(requests.get(QUERY_SITEMATRIX).text)
wikis = []
for key, value in data[u"sitematrix"].items():
if type(value) is not dict:
continue
site_list = value[u"site"]
if type(site_list) is not list:
continue
wikipedia_url = ""
for site in site_list:
if "wikipedia.org" in site[u"url"] and u"closed" not in site:
wikipedia_url = site[u"url"]
if len(wikipedia_url) == 0:
continue
wikis.append(build_wiki(value[u"code"], value[u"localname"], value[u"name"]))
return wikis
# Remove unsupported wikis.
def METHOD_NAME(wikis):
return [wiki for wiki in wikis if wiki.lang not in OSTRICH_WIKIS]
# Apply manual tweaks to the list of wikis before they're populated.
def preprocess_wikis(wikis):
# Add TestWiki.
wikis.append(build_wiki(lang="test", english_name="Test", local_name="Test"))
return wikis
# Apply manual tweaks to the list of wikis after they're populated.
def postprocess_wikis(wiki_list):
# Add Simplified and Traditional Chinese dialects.
chineseWiki = next((wiki for wiki in wiki_list.wikis if wiki.lang == CHINESE_WIKI_LANG), None)
chineseWikiIndex = wiki_list.wikis.index(chineseWiki)
simplifiedWiki = copy.deepcopy(chineseWiki)
simplifiedWiki.lang = SIMPLIFIED_CHINESE_LANG
simplifiedWiki.props["english_name"] = "Simplified Chinese"
simplifiedWiki.props["local_name"] = "简体中文"
wiki_list.wikis.insert(chineseWikiIndex + 1, simplifiedWiki)
traditionalWiki = copy.deepcopy(chineseWiki)
traditionalWiki.lang = TRADITIONAL_CHINESE_LANG
traditionalWiki.props["english_name"] = "Traditional Chinese"
traditionalWiki.props["local_name"] = "繁體中文"
wiki_list.wikis.insert(chineseWikiIndex + 2, traditionalWiki)
bokmalWiki = next((wiki for wiki in wiki_list.wikis if wiki.lang == NORWEGIAN_BOKMAL_WIKI_LANG), None)
bokmalWiki.lang = NORWEGIAN_BOKMAL_LANG
return wiki_list
# Populate the aliases for "Special:" and "File:" in all wikis
def populate_aliases(wikis):
for wiki in wikis.wikis:
print(u"Fetching Special Page and File alias for %s" % wiki.lang)
url = u"https://%s.wikipedia.org/w/api.php" % wiki.lang + \
u"?action=query&meta=siteinfo&format=json&siprop=namespaces"
data = json.loads(requests.get(url).text)
# according to https://www.mediawiki.org/wiki/Manual:Namespace
# -1 seems to be the ID for Special Pages
wiki.props[u"special_alias"] = data[u"query"][u"namespaces"][u"-1"][u"*"]
# 6 is the ID for File pages
wiki.props[u"file_alias"] = data[u"query"][u"namespaces"][u"6"][u"*"]
return wikis
# Populates data on names of main page in each wiki
def populate_main_pages(wikis):
for wiki in wikis.wikis:
print(u"Fetching Main Page for %s" % wiki.lang)
url = u"https://%s.wikipedia.org/w/api.php" % wiki.lang + \
u"?action=query&meta=siteinfo&format=json&siprop=general"
data = json.loads(requests.get(url).text)
wiki.props[u"main_page_name"] = data[u"query"][u"general"][u"mainpage"]
return wikis
# Returns a function that renders a particular template when passed
# a WikiList object
def render_template(template, filename, **kwargs):
def _actual_render(wikis):
wikis.render(template, filename, **kwargs)
return wikis
return _actual_render
# Kinda like reduce(), but special cases first function
def chain(*funcs):
res = funcs[0]()
for func in funcs[1:]:
res = func(res)
chain(
list_from_sitematrix,
METHOD_NAME,
preprocess_wikis,
WikiList,
populate_aliases,
populate_main_pages,
postprocess_wikis,
render_template(u"basichash.java.jinja", u"SpecialAliasData", key=u"special_alias"),
render_template(u"basichash.java.jinja", u"FileAliasData", key=u"file_alias"),
render_template(u"basichash.java.jinja", u"MainPageNameData", key=u"main_page_name"),
) |
376 | wait for | #
# Copyright © 2022 Josep Maria Viñolas Auquer
#
# This file is part of IsardVDI.
#
# IsardVDI is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# IsardVDI is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with IsardVDI. If not, see <https://www.gnu.org/licenses/>.
#
# SPDX-License-Identifier: AGPL-3.0-or-later
import json
import logging
import os
import time
import traceback
from datetime import datetime, timedelta
import requests
from jose import jwt
def header_auth():
token = jwt.encode(
{
"exp": datetime.utcnow() + timedelta(seconds=20),
"kid": "isardvdi",
"data": {
"role_id": "admin",
"category_id": "*",
},
},
os.environ["API_ISARDVDI_SECRET"],
algorithm="HS256",
)
return {"Authorization": "Bearer " + token}
def is_ip(ip):
try:
parts = ip.split(".")
if len(parts) != 4:
return False
for x in parts:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
except:
return False
return True
container_base_path = {
"isard-api": "/api/v3",
"isard-scheduler": "/scheduler",
}
class ApiRest:
def __init__(self, service="isard-api", base_url=None):
if base_url:
self.base_url = base_url
self.verify_cert = False if base_url.startswith("http://") else True
else:
if service == "isard-api":
actual_server = os.environ.get("API_DOMAIN")
if service == "isard-scheduler":
actual_server = "isard-scheduler"
if actual_server:
if actual_server == "localhost" or actual_server.startswith("isard-"):
self.base_url = (
"http://"
+ actual_server
+ ":5000"
+ container_base_path[service]
)
self.verify_cert = False
else:
self.base_url = (
"https://" + actual_server + container_base_path[service]
)
self.verify_cert = False if is_ip(actual_server) else True
else:
self.base_url = (
"http://" + service + ":5000" + container_base_path[service]
)
self.verify_cert = False
self.service = service
logging.debug(
"Api base url for service " + service + " set to " + self.base_url
)
def METHOD_NAME(self, max_retries=-1, timeout=1):
while max_retries:
try:
logging.info(
"Check connection to "
+ self.service
+ " container at "
+ self.base_url
)
self.get()
max_retries = 0
except:
logging.error(
"Unable to reach " + self.service + " container at " + self.base_url
)
time.sleep(timeout)
if max_retries >= 0:
max_retries -= 1
def get(self, url="", timeout=5):
resp = requests.get(
self.base_url + url,
headers=header_auth(),
verify=self.verify_cert,
timeout=timeout,
)
resp.raise_for_status()
return json.loads(resp.text)
def post(self, url, data={}):
resp = requests.post(
self.base_url + url,
json=data,
headers=header_auth(),
verify=self.verify_cert,
)
resp.raise_for_status()
return json.loads(resp.text)
def put(self, url, data={}):
resp = requests.put(
self.base_url + url,
json=data,
headers=header_auth(),
verify=self.verify_cert,
)
resp.raise_for_status()
return json.loads(resp.text)
def delete(self, url, data={}):
resp = requests.delete(
self.base_url + url,
json=data,
headers=header_auth(),
verify=self.verify_cert,
)
resp.raise_for_status()
return json.loads(resp.text) |
377 | create chunked tasks signatures | """Loads and instantiates Celery, registers our tasks, and performs any other
necessary Celery-related setup. Also provides Celery-related utility methods,
in particular exposing a shortcut to the @task decorator.
Please note that this module should not import model-related code because
Django may not be properly set-up during import time (e.g if this module
is directly being run/imported by Celery)
"""
import datetime
from django.core.cache import cache
from celery import Celery, group
from celery.signals import task_failure, task_postrun, task_prerun
from django_statsd.clients import statsd
from kombu import serialization
from post_request_task.task import PostRequestTask
import olympia.core.logger
log = olympia.core.logger.getLogger('z.task')
class AMOTask(PostRequestTask):
"""A custom celery Task base class that inherits from `PostRequestTask`
to delay tasks and adds a special hack to still perform a serialization
roundtrip in eager mode, to mimic what happens in production in tests.
The serialization is applied both to apply_async() and apply() to work
around the fact that celery groups have their own apply_async() method that
directly calls apply() on each task in eager mode.
Note that we should never somehow be using eager mode with actual workers,
that would cause them to try to serialize data that has already been
serialized...
"""
abstract = True
def _serialize_args_and_kwargs_for_eager_mode(
self, args=None, kwargs=None, **options
):
producer = options.get('producer')
with app.producer_or_acquire(producer) as eager_producer:
serializer = options.get('serializer', eager_producer.serializer)
body = args, kwargs
content_type, content_encoding, data = serialization.dumps(body, serializer)
args, kwargs = serialization.loads(data, content_type, content_encoding)
return args, kwargs
def apply_async(self, args=None, kwargs=None, **options):
if app.conf.task_always_eager:
args, kwargs = self._serialize_args_and_kwargs_for_eager_mode(
args=args, kwargs=kwargs, **options
)
return super().apply_async(args=args, kwargs=kwargs, **options)
def apply(self, args=None, kwargs=None, **options):
if app.conf.task_always_eager:
args, kwargs = self._serialize_args_and_kwargs_for_eager_mode(
args=args, kwargs=kwargs, **options
)
return super().apply(args=args, kwargs=kwargs, **options)
app = Celery('olympia', task_cls=AMOTask)
task = app.task
app.config_from_object('django.conf:settings', namespace='CELERY')
app.autodiscover_tasks()
@task_failure.connect
def process_failure_signal(
exception, traceback, sender, task_id, signal, args, kwargs, einfo, **kw
):
"""Catch any task failure signals from within our worker processes and log
them as exceptions, so they appear in Sentry and ordinary logging
output."""
exc_info = (type(exception), exception, traceback)
log.error(
'Celery TASK exception: {0.__name__}: {1}'.format(*exc_info),
exc_info=exc_info,
extra={
'data': {
'task_id': task_id,
'sender': sender,
'args': args,
'kwargs': kwargs,
}
},
)
@task_prerun.connect
def start_task_timer(task_id, task, **kw):
timer = TaskTimer()
log.info(
'starting task timer; id={id}; name={name}; '
'current_dt={current_dt}'.format(
id=task_id, name=task.name, current_dt=timer.current_datetime
)
)
# Cache start time for one hour. This will allow us to catch crazy long
# tasks. Currently, stats indexing tasks run around 20-30 min.
expiration = 60 * 60
cache_key = timer.cache_key(task_id)
cache.set(cache_key, timer.current_epoch_ms, expiration)
@task_postrun.connect
def track_task_run_time(task_id, task, **kw):
timer = TaskTimer()
start_time = cache.get(timer.cache_key(task_id))
if start_time is None:
log.info(
'could not track task run time; id={id}; name={name}; '
'current_dt={current_dt}'.format(
id=task_id, name=task.name, current_dt=timer.current_datetime
)
)
else:
run_time = timer.current_epoch_ms - start_time
log.info(
'tracking task run time; id={id}; name={name}; '
'run_time={run_time}; current_dt={current_dt}'.format(
id=task_id,
name=task.name,
current_dt=timer.current_datetime,
run_time=run_time,
)
)
statsd.timing(f'tasks.{task.name}', run_time)
cache.delete(timer.cache_key(task_id))
class TaskTimer:
def __init__(self):
from olympia.amo.utils import utc_millesecs_from_epoch
self.current_datetime = datetime.datetime.now()
self.current_epoch_ms = utc_millesecs_from_epoch(self.current_datetime)
def cache_key(self, task_id):
return f'task_start_time.{task_id}'
def METHOD_NAME(
task, items, chunk_size, task_args=None, task_kwargs=None
):
"""
Splits a task depending on a list of items into a bunch of tasks of the
specified chunk_size, passing a chunked queryset and optional additional
arguments to each.
Return the group of task signatures without executing it."""
from olympia.amo.utils import chunked
if task_args is None:
task_args = ()
if task_kwargs is None:
task_kwargs = {}
tasks = [
task.si(chunk, *task_args, **task_kwargs)
for chunk in chunked(items, chunk_size)
]
log.info('Created a group of %s tasks for task "%s".', len(tasks), str(task.name))
return group(tasks) |
378 | test file not found in home | import netrc, os, unittest, sys, tempfile, textwrap
from test import support
from test.support import os_helper
class NetrcTestCase(unittest.TestCase):
def make_nrc(self, test_data):
test_data = textwrap.dedent(test_data)
mode = 'w'
if sys.platform != 'cygwin':
mode += 't'
temp_fd, temp_filename = tempfile.mkstemp()
with os.fdopen(temp_fd, mode=mode, encoding="utf-8") as fp:
fp.write(test_data)
self.addCleanup(os.unlink, temp_filename)
return netrc.netrc(temp_filename)
def test_default(self):
nrc = self.make_nrc("""\
machine host1.domain.com login log1 password pass1 account acct1
default login log2 password pass2
""")
self.assertEqual(nrc.hosts['host1.domain.com'],
('log1', 'acct1', 'pass1'))
self.assertEqual(nrc.hosts['default'], ('log2', None, 'pass2'))
nrc2 = self.make_nrc(nrc.__repr__())
self.assertEqual(nrc.hosts, nrc2.hosts)
def test_macros(self):
nrc = self.make_nrc("""\
macdef macro1
line1
line2
macdef macro2
line3
line4
""")
self.assertEqual(nrc.macros, {'macro1': ['line1\n', 'line2\n'],
'macro2': ['line3\n', 'line4\n']})
def _test_passwords(self, nrc, passwd):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['host.domain.com'], ('log', 'acct', passwd))
def test_password_with_leading_hash(self):
self._test_passwords("""\
machine host.domain.com login log password #pass account acct
""", '#pass')
def test_password_with_trailing_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pass# account acct
""", 'pass#')
def test_password_with_internal_hash(self):
self._test_passwords("""\
machine host.domain.com login log password pa#ss account acct
""", 'pa#ss')
def _test_comment(self, nrc, passwd='pass'):
nrc = self.make_nrc(nrc)
self.assertEqual(nrc.hosts['foo.domain.com'], ('bar', None, passwd))
self.assertEqual(nrc.hosts['bar.domain.com'], ('foo', None, 'pass'))
def test_comment_before_machine_line(self):
self._test_comment("""\
# comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_before_machine_line_no_space(self):
self._test_comment("""\
#comment
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_before_machine_line_hash_only(self):
self._test_comment("""\
#
machine foo.domain.com login bar password pass
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line(self):
self._test_comment("""\
machine foo.domain.com login bar password pass # comment
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line_no_space(self):
self._test_comment("""\
machine foo.domain.com login bar password pass #comment
machine bar.domain.com login foo password pass
""")
def test_comment_at_end_of_machine_line_pass_has_hash(self):
self._test_comment("""\
machine foo.domain.com login bar password #pass #comment
machine bar.domain.com login foo password pass
""", '#pass')
@unittest.skipUnless(os.name == 'posix', 'POSIX only test')
def test_security(self):
# This test is incomplete since we are normally not run as root and
# therefore can't test the file ownership being wrong.
with os_helper.temp_cwd(None) as d:
fn = os.path.join(d, '.netrc')
with open(fn, 'wt') as f:
f.write("""\
machine foo.domain.com login bar password pass
default login foo password pass
""")
with os_helper.EnvironmentVarGuard() as environ:
environ.set('HOME', d)
os.chmod(fn, 0o600)
nrc = netrc.netrc()
self.assertEqual(nrc.hosts['foo.domain.com'],
('bar', None, 'pass'))
os.chmod(fn, 0o622)
self.assertRaises(netrc.NetrcParseError, netrc.netrc)
def METHOD_NAME(self):
with os_helper.temp_cwd(None) as d:
with os_helper.EnvironmentVarGuard() as environ:
environ.set('HOME', d)
self.assertRaises(FileNotFoundError, netrc.netrc)
def test_file_not_found_explicit(self):
self.assertRaises(FileNotFoundError, netrc.netrc,
file='unlikely_netrc')
def test_home_not_set(self):
with os_helper.temp_cwd(None) as fake_home:
fake_netrc_path = os.path.join(fake_home, '.netrc')
with open(fake_netrc_path, 'w') as f:
f.write('machine foo.domain.com login bar password pass')
os.chmod(fake_netrc_path, 0o600)
orig_expanduser = os.path.expanduser
called = []
def fake_expanduser(s):
called.append(s)
with os_helper.EnvironmentVarGuard() as environ:
environ.set('HOME', fake_home)
environ.set('USERPROFILE', fake_home)
result = orig_expanduser(s)
return result
with support.swap_attr(os.path, 'expanduser', fake_expanduser):
nrc = netrc.netrc()
login, account, password = nrc.authenticators('foo.domain.com')
self.assertEqual(login, 'bar')
self.assertTrue(called)
if __name__ == "__main__":
unittest.main() |
379 | lock | """
Control concurrency of steps within state execution using zookeeper
===================================================================
:depends: kazoo
:configuration: See :py:mod:`salt.modules.zookeeper` for setup instructions.
This module allows you to "wrap" a state's execution with concurrency control.
This is useful to protect against all hosts executing highstate simultaneously
if your services don't all HUP restart. The common way of protecting against this
is to run in batch mode, but that doesn't protect from another person running
the same batch command (and thereby having 2x the number of nodes deploying at once).
This module will bock while acquiring a slot, meaning that however the command gets
called it will coordinate with zookeeper to ensure that no more than max_concurrency
steps are executing with a single path.
.. code-block:: yaml
acquire_lock:
zk_concurrency.lock:
- name: /trafficeserver
- zk_hosts: 'zookeeper:2181'
- max_concurrency: 4
- prereq:
- service: trafficserver
trafficserver:
service.running:
- watch:
- file: /etc/trafficserver/records.config
/etc/trafficserver/records.config:
file.managed:
- source: salt://records.config
release_lock:
zk_concurrency.unlock:
- name: /trafficserver
- require:
- service: trafficserver
This example would allow the file state to change, but would limit the
concurrency of the trafficserver service restart to 4.
"""
# TODO: use depends decorator to make these per function deps, instead of all or nothing
REQUIRED_FUNCS = (
"zk_concurrency.lock",
"zk_concurrency.unlock",
"zk_concurrency.party_members",
)
__virtualname__ = "zk_concurrency"
def __virtual__():
if not all(func in __salt__ for func in REQUIRED_FUNCS):
return (False, "zk_concurrency module could not be loaded")
return __virtualname__
def METHOD_NAME(
name,
zk_hosts=None,
identifier=None,
max_concurrency=1,
timeout=None,
ephemeral_lease=False,
profile=None,
scheme=None,
username=None,
password=None,
default_acl=None,
):
"""
Block state execution until you are able to get the lock (or hit the timeout)
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
conn_kwargs = {
"profile": profile,
"scheme": scheme,
"username": username,
"password": password,
"default_acl": default_acl,
}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Attempt to acquire lock"
return ret
if identifier is None:
identifier = __grains__["id"]
locked = __salt__["zk_concurrency.lock"](
name,
zk_hosts,
identifier=identifier,
max_concurrency=max_concurrency,
timeout=timeout,
ephemeral_lease=ephemeral_lease,
**conn_kwargs
)
if locked:
ret["result"] = True
ret["comment"] = "lock acquired"
else:
ret["comment"] = "Unable to acquire lock"
return ret
def unlock(
name,
zk_hosts=None, # in case you need to unlock without having run lock (failed execution for example)
identifier=None,
max_concurrency=1,
ephemeral_lease=False,
profile=None,
scheme=None,
username=None,
password=None,
default_acl=None,
):
"""
Remove lease from semaphore.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
conn_kwargs = {
"profile": profile,
"scheme": scheme,
"username": username,
"password": password,
"default_acl": default_acl,
}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Released lock if it is here"
return ret
if identifier is None:
identifier = __grains__["id"]
unlocked = __salt__["zk_concurrency.unlock"](
name,
zk_hosts=zk_hosts,
identifier=identifier,
max_concurrency=max_concurrency,
ephemeral_lease=ephemeral_lease,
**conn_kwargs
)
if unlocked:
ret["result"] = True
else:
ret["comment"] = "Unable to find lease for path {}".format(name)
return ret
def min_party(
name,
zk_hosts,
min_nodes,
blocking=False,
profile=None,
scheme=None,
username=None,
password=None,
default_acl=None,
):
"""
Ensure that there are `min_nodes` in the party at `name`, optionally blocking if not available.
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
conn_kwargs = {
"profile": profile,
"scheme": scheme,
"username": username,
"password": password,
"default_acl": default_acl,
}
if __opts__["test"]:
ret["result"] = None
ret["comment"] = "Attempt to ensure min_party"
return ret
nodes = __salt__["zk_concurrency.party_members"](
name, zk_hosts, min_nodes, blocking=blocking, **conn_kwargs
)
if not isinstance(nodes, list):
raise Exception(
"Error from zk_concurrency.party_members, return was not a list: {}".format(
nodes
)
)
num_nodes = len(nodes)
if num_nodes >= min_nodes or blocking:
ret["result"] = None if __opts__["test"] else True
if not blocking:
ret["comment"] = "Currently {} nodes, which is >= {}".format(
num_nodes, min_nodes
)
else:
ret["comment"] = (
"Blocked until {} nodes were available. Unblocked after {} nodes became"
" available".format(min_nodes, num_nodes)
)
else:
ret["result"] = False
ret["comment"] = "Currently {} nodes, which is < {}".format(
num_nodes, min_nodes
)
return ret |
380 | test using half static synapse singles | # Copyright (c) 2017 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyNN.spiNNaker as sim
from spinnaker_testbase import BaseTestCase
WEIGHT = 5
DELAY = 2
class TestFromListConnector(BaseTestCase):
# NO unittest_setup() as sim.setup is called
def check_weights(
self, projection, aslist, w_index, d_index, sources, destinations):
from_pro = projection.get(["weight", "delay"], "list")
aslist.sort()
as_index = 0
for (source, dest, weight, delay) in from_pro:
from_as = aslist[as_index]
while from_as[0] >= sources:
as_index += 1
from_as = aslist[as_index]
while from_as[1] >= destinations:
as_index += 1
from_as = aslist[as_index]
self.assertEqual(from_as[0], source)
self.assertEqual(from_as[1], dest)
if w_index:
self.assertAlmostEqual(from_as[w_index], weight, 4)
else:
self.assertEqual(WEIGHT, weight)
if d_index:
self.assertAlmostEqual(from_as[d_index], delay, 4)
else:
self.assertEqual(DELAY, delay)
as_index += 1
while as_index < len(aslist):
from_as = aslist[as_index]
assert from_as[0] >= sources or from_as[1] >= destinations
as_index += 1
def check_other_connect(
self, aslist, column_names=None, w_index=2, d_index=3, sources=6,
destinations=8):
sim.setup(1.0)
pop1 = sim.Population(sources, sim.IF_curr_exp(), label="pop1")
pop2 = sim.Population(destinations, sim.IF_curr_exp(), label="pop2")
synapse_type = sim.StaticSynapse(weight=WEIGHT, delay=DELAY)
projection = sim.Projection(
pop1, pop2, sim.FromListConnector(
aslist, column_names=column_names),
synapse_type=synapse_type)
sim.run(0)
self.check_weights(
projection, aslist, w_index, d_index, sources, destinations)
sim.end()
def test_simple(self):
as_list = [
(0, 0, 0.1, 10),
(3, 0, 0.2, 11),
(2, 3, 0.3, 12),
(5, 1, 0.4, 13),
(0, 1, 0.5, 14),
]
self.check_other_connect(as_list)
def test_list_too_big(self):
as_list = [
(0, 0, 0.1, 10),
(13, 0, 0.2, 11),
(2, 13, 0.3, 12),
(5, 1, 0.4, 13),
(0, 1, 0.5, 14),
]
self.check_other_connect(as_list)
def test_no_delays(self):
as_list = [
(0, 0, 0.1),
(3, 0, 0.2),
(2, 3, 0.3),
(5, 1, 0.4),
(0, 1, 0.5),
]
self.check_other_connect(
as_list, column_names=["weight"], d_index=None)
def test_no_weight(self):
as_list = [
(0, 0, 10),
(3, 0, 11),
(2, 3, 12),
(5, 1, 13),
(0, 1, 14),
]
self.check_other_connect(
as_list, column_names=["delay"], d_index=2, w_index=None)
def test_invert(self):
as_list = [
(0, 0, 10, 0.1),
(3, 0, 11, 0.2),
(2, 3, 12, 0.3),
(5, 1, 13, 0.4),
(0, 1, 14, 0.5),
]
self.check_other_connect(
as_list, column_names=["delay", "weight"], w_index=3, d_index=2)
def test_big(self):
sources = 200
destinations = 300
aslist = []
for s in range(sources):
for d in range(destinations):
aslist.append((s, d, 5, 2))
self.check_other_connect(
aslist, column_names=None, w_index=2, d_index=3, sources=sources,
destinations=destinations)
def test_get_before_run(self):
sim.setup(1.0)
pop1 = sim.Population(3, sim.IF_curr_exp(), label="pop1")
pop2 = sim.Population(3, sim.IF_curr_exp(), label="pop2")
synapse_type = sim.StaticSynapse(weight=5, delay=1)
projection = sim.Projection(
pop1, pop2, sim.FromListConnector([[0, 0]]),
synapse_type=synapse_type)
weights = projection.get(["weight"], "list")
sim.run(0)
self.assertEqual(1, len(weights))
sim.end()
def test_using_static_synapse_singles(self):
sim.setup(timestep=1.0)
input = sim.Population(2, sim.SpikeSourceArray([0]), label="input")
pop = sim.Population(2, sim.IF_curr_exp(), label="pop")
as_list = [(0, 0), (1, 1)]
conn = sim.Projection(input, pop, sim.FromListConnector(as_list),
sim.StaticSynapse(weight=0.7, delay=3))
sim.run(1)
weights = conn.get(['weight', 'delay'], 'list')
sim.end()
target = [(0, 0, 0.7, 3), (1, 1, 0.7, 3)]
for i in range(2):
for j in range(2):
self.assertAlmostEqual(weights[i][j], target[i][j], places=3)
def METHOD_NAME(self):
sim.setup(timestep=1.0)
input = sim.Population(2, sim.SpikeSourceArray([0]), label="input")
pop = sim.Population(2, sim.IF_curr_exp(), label="pop")
as_list = [(0, 0, 0.7), (1, 1, 0.3)]
conn = sim.Projection(input, pop, sim.FromListConnector(
as_list, column_names=["weight"]),
sim.StaticSynapse(weight=0.6, delay=3))
sim.run(1)
weights = conn.get(['weight', 'delay'], 'list')
sim.end()
target = [(0, 0, 0.7, 3), (1, 1, 0.3, 3)]
for i in range(2):
for j in range(2):
self.assertAlmostEqual(weights[i][j], target[i][j], places=3)
def test_using_static_synapse_doubles(self):
sim.setup(timestep=1.0)
input = sim.Population(2, sim.SpikeSourceArray([0]), label="input")
pop = sim.Population(2, sim.IF_curr_exp(), label="pop")
as_list = [(0, 0), (1, 1)]
conn = sim.Projection(input, pop, sim.FromListConnector(as_list),
sim.StaticSynapse(weight=[0.7, 0.3],
delay=[3, 33]))
sim.run(1)
weights = conn.get(['weight', 'delay'], 'list')
target = [(0, 0, 0.7, 3), (1, 1, 0.3, 33)]
for i in range(2):
for j in range(2):
self.assertAlmostEqual(weights[i][j], target[i][j], places=3)
sim.end() |
381 | stop | """
daemontools service module. This module will create daemontools type
service watcher.
This module is compatible with the :mod:`service <salt.states.service>` states,
so it can be used to maintain services using the ``provider`` argument:
.. code-block:: yaml
myservice:
service.running:
- provider: daemontools
"""
import logging
import os
import os.path
import re
import salt.utils.path
from salt.exceptions import CommandExecutionError
# Function alias to not shadow built-ins.
__func_alias__ = {"reload_": "reload"}
log = logging.getLogger(__name__)
__virtualname__ = "daemontools"
VALID_SERVICE_DIRS = [
"/service",
"/var/service",
"/etc/service",
]
SERVICE_DIR = None
for service_dir in VALID_SERVICE_DIRS:
if os.path.exists(service_dir):
SERVICE_DIR = service_dir
break
def __virtual__():
# Ensure that daemontools is installed properly.
BINS = frozenset(("svc", "supervise", "svok"))
if all(salt.utils.path.which(b) for b in BINS) and SERVICE_DIR:
return __virtualname__
return (False, "Missing dependency: {}".format(BINS))
def _service_path(name):
"""
build service path
"""
if not SERVICE_DIR:
raise CommandExecutionError("Could not find service directory.")
return "{}/{}".format(SERVICE_DIR, name)
# -- states.service compatible args
def start(name):
"""
Starts service via daemontools
CLI Example:
.. code-block:: bash
salt '*' daemontools.start <service name>
"""
__salt__["file.remove"]("{}/down".format(_service_path(name)))
cmd = "svc -u {}".format(_service_path(name))
return not __salt__["cmd.retcode"](cmd, python_shell=False)
# -- states.service compatible args
def METHOD_NAME(name):
"""
Stops service via daemontools
CLI Example:
.. code-block:: bash
salt '*' daemontools.stop <service name>
"""
__salt__["file.touch"]("{}/down".format(_service_path(name)))
cmd = "svc -d {}".format(_service_path(name))
return not __salt__["cmd.retcode"](cmd, python_shell=False)
def term(name):
"""
Send a TERM to service via daemontools
CLI Example:
.. code-block:: bash
salt '*' daemontools.term <service name>
"""
cmd = "svc -t {}".format(_service_path(name))
return not __salt__["cmd.retcode"](cmd, python_shell=False)
# -- states.service compatible
def reload_(name):
"""
Wrapper for term()
CLI Example:
.. code-block:: bash
salt '*' daemontools.reload <service name>
"""
term(name)
# -- states.service compatible
def restart(name):
"""
Restart service via daemontools. This will stop/start service
CLI Example:
.. code-block:: bash
salt '*' daemontools.restart <service name>
"""
ret = "restart False"
if METHOD_NAME(name) and start(name):
ret = "restart True"
return ret
# -- states.service compatible
def full_restart(name):
"""
Calls daemontools.restart() function
CLI Example:
.. code-block:: bash
salt '*' daemontools.full_restart <service name>
"""
restart(name)
# -- states.service compatible
def status(name, sig=None):
"""
Return the status for a service via daemontools, return pid if running
CLI Example:
.. code-block:: bash
salt '*' daemontools.status <service name>
"""
cmd = "svstat {}".format(_service_path(name))
out = __salt__["cmd.run_stdout"](cmd, python_shell=False)
try:
pid = re.search(r"\(pid (\d+)\)", out).group(1)
except AttributeError:
pid = ""
return pid
def available(name):
"""
Returns ``True`` if the specified service is available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' daemontools.available foo
"""
return name in get_all()
def missing(name):
"""
The inverse of daemontools.available.
Returns ``True`` if the specified service is not available, otherwise returns
``False``.
CLI Example:
.. code-block:: bash
salt '*' daemontools.missing foo
"""
return name not in get_all()
def get_all():
"""
Return a list of all available services
CLI Example:
.. code-block:: bash
salt '*' daemontools.get_all
"""
if not SERVICE_DIR:
raise CommandExecutionError("Could not find service directory.")
# - List all daemontools services in
return sorted(os.listdir(SERVICE_DIR))
def enabled(name, **kwargs):
"""
Return True if the named service is enabled, false otherwise
A service is considered enabled if in your service directory:
- an executable ./run file exist
- a file named "down" does not exist
.. versionadded:: 2015.5.7
name
Service name
CLI Example:
.. code-block:: bash
salt '*' daemontools.enabled <service name>
"""
if not available(name):
log.error("Service %s not found", name)
return False
run_file = os.path.join(SERVICE_DIR, name, "run")
down_file = os.path.join(SERVICE_DIR, name, "down")
return (
os.path.isfile(run_file)
and os.access(run_file, os.X_OK)
and not os.path.isfile(down_file)
)
def disabled(name):
"""
Return True if the named service is enabled, false otherwise
.. versionadded:: 2015.5.6
CLI Example:
.. code-block:: bash
salt '*' daemontools.disabled <service name>
"""
return not enabled(name) |
382 | define simple | import logging
from collections import defaultdict
from typing import Callable, Mapping, Sequence
from dagster import DagsterEvent, job, op
from dagster._core.definitions.graph_definition import GraphDefinition
from dagster._core.definitions.job_definition import JobDefinition
from dagster._core.definitions.node_definition import NodeDefinition
from dagster._core.events import DagsterEventType
from dagster._core.events.log import EventLogEntry, construct_event_logger
from dagster._loggers import colored_console_logger
from dagster._serdes import deserialize_value
def get_loggers(event_callback):
return {
"callback": construct_event_logger(event_callback),
"console": colored_console_logger,
}
def single_dagster_event(
events: Mapping[DagsterEventType, Sequence[DagsterEvent]], event_type: DagsterEventType
) -> DagsterEvent:
assert event_type in events
return events[event_type][0]
def define_event_logging_job(
name: str,
node_defs: Sequence[NodeDefinition],
event_callback: Callable[[EventLogEntry], None],
deps=None,
) -> JobDefinition:
return JobDefinition(
graph_def=GraphDefinition(
name=name,
node_defs=node_defs,
dependencies=deps,
),
logger_defs=get_loggers(event_callback),
)
def test_empty_job():
events = defaultdict(list)
def _event_callback(record):
assert isinstance(record, EventLogEntry)
if record.is_dagster_event:
events[record.dagster_event.event_type].append(record)
job_def = JobDefinition(
graph_def=GraphDefinition(
name="empty_job",
node_defs=[],
),
logger_defs=get_loggers(_event_callback),
)
result = job_def.execute_in_process({"loggers": {"callback": {}, "console": {}}})
assert result.success
assert events
assert single_dagster_event(events, DagsterEventType.PIPELINE_START).job_name == "empty_job"
assert single_dagster_event(events, DagsterEventType.PIPELINE_SUCCESS).job_name == "empty_job"
def test_single_op_job_success():
events = defaultdict(list)
@op
def op_one():
return 1
def _event_callback(record):
if record.is_dagster_event:
events[record.dagster_event.event_type].append(record)
job_def = JobDefinition(
graph_def=GraphDefinition(
name="single_op_job",
node_defs=[op_one],
),
logger_defs=get_loggers(_event_callback),
tags={"foo": "bar"},
)
result = job_def.execute_in_process({"loggers": {"callback": {}}})
assert result.success
assert events
start_event = single_dagster_event(events, DagsterEventType.STEP_START)
assert start_event.job_name == "single_op_job"
assert start_event.dagster_event.node_name == "op_one"
# persisted logging tags contain pipeline_name but not pipeline_tags
assert start_event.dagster_event.logging_tags["job_name"] == "single_op_job"
assert "pipeline_tags" not in start_event.dagster_event.logging_tags
output_event = single_dagster_event(events, DagsterEventType.STEP_OUTPUT)
assert output_event
assert output_event.dagster_event.step_output_data.output_name == "result"
success_event = single_dagster_event(events, DagsterEventType.STEP_SUCCESS)
assert success_event.job_name == "single_op_job"
assert success_event.dagster_event.node_name == "op_one"
assert isinstance(success_event.dagster_event.step_success_data.duration_ms, float)
assert success_event.dagster_event.step_success_data.duration_ms > 0.0
def test_single_op_job_failure():
events = defaultdict(list)
@op
def op_one():
raise Exception("nope")
def _event_callback(record):
if record.is_dagster_event:
events[record.dagster_event.event_type].append(record)
single_op_job = JobDefinition(
graph_def=GraphDefinition(
name="single_op_job",
node_defs=[op_one],
),
logger_defs=get_loggers(_event_callback),
)
result = single_op_job.execute_in_process({"loggers": {"callback": {}}}, raise_on_error=False)
assert not result.success
start_event = single_dagster_event(events, DagsterEventType.STEP_START)
assert start_event.job_name == "single_op_job"
assert start_event.dagster_event.node_name == "op_one"
assert start_event.level == logging.DEBUG
failure_event = single_dagster_event(events, DagsterEventType.STEP_FAILURE)
assert failure_event.job_name == "single_op_job"
assert failure_event.dagster_event.node_name == "op_one"
assert failure_event.level == logging.ERROR
def METHOD_NAME():
@op
def yes():
return "yes"
@job
def simple():
yes()
return simple
# Generated by printing out an existing serialized event and modifying the event type and
# event_specific_data to types that don't exist yet, to simulate the case where an old
# client deserializes events written from a newer Dagster version
SERIALIZED_EVENT_FROM_THE_FUTURE_WITH_EVENT_SPECIFIC_DATA = (
'{"__class__": "DagsterEvent", "event_specific_data": {"__class__": "FutureEventData", "foo":'
' null, "bar": null, "baz": null, "metadata_entries": [{"__class__": "EventMetadataEntry",'
' "description": null, "entry_data": {"__class__": "TextMetadataEntryData", "text": "999"},'
' "label": "pid"}]}, "event_type_value": "EVENT_TYPE_FROM_THE_FUTURE", "logging_tags": {},'
' "message": "howdy", "pid": null, "pipeline_name": "nonce", "solid_handle": null,'
' "step_handle": null, "step_key": "future_step", "step_kind_value": null}'
)
SERIALIZED_EVENT_FROM_THE_FUTURE_WITHOUT_EVENT_SPECIFIC_DATA = (
'{"__class__": "DagsterEvent", "event_specific_data": null, "event_type_value":'
' "EVENT_TYPE_FROM_THE_FUTURE", "logging_tags": {}, "message": "howdy", "pid": null,'
' "pipeline_name": "nonce", "solid_handle": null, "step_handle": null, "step_key":'
' "future_step", "step_kind_value": null}'
)
def test_event_forward_compat_with_event_specific_data():
result = deserialize_value(
SERIALIZED_EVENT_FROM_THE_FUTURE_WITH_EVENT_SPECIFIC_DATA, DagsterEvent
)
assert (
result.message
== "Could not deserialize event of type EVENT_TYPE_FROM_THE_FUTURE. This event may have"
' been written by a newer version of Dagster. Original message: "howdy"'
)
assert result.event_type_value == DagsterEventType.ENGINE_EVENT.value
assert result.job_name == "nonce"
assert result.step_key == "future_step"
assert (
'Attempted to deserialize class "FutureEventData" which is not in the whitelist.'
in result.event_specific_data.error.message
)
def test_event_forward_compat_without_event_specific_data():
result = deserialize_value(
SERIALIZED_EVENT_FROM_THE_FUTURE_WITHOUT_EVENT_SPECIFIC_DATA, DagsterEvent
)
assert (
result.message
== "Could not deserialize event of type EVENT_TYPE_FROM_THE_FUTURE. This event may have"
' been written by a newer version of Dagster. Original message: "howdy"'
)
assert result.event_type_value == DagsterEventType.ENGINE_EVENT.value
assert result.job_name == "nonce"
assert result.step_key == "future_step"
assert (
"'EVENT_TYPE_FROM_THE_FUTURE' is not a valid DagsterEventType"
in result.event_specific_data.error.message
) |
383 | seconds from string | #!/usr/bin/env python
from __future__ import print_function
import sys
import time
import calendar
""" Converts between a 64bit timestamp and a human readable string
usage: ./convertTime.py [-l] time1 [time2 ...]
- "-l" to use local time
- "time" is either a 64bit timestamp or a string formatted "DD/MM/YYYY HH:MM:SS"
"""
def pack(high,low):
"""pack high,low 32bit unsigned int to one unsigned 64bit long long
Note:the print value of result number may appear signed, if the sign bit is used.
"""
h=high<<32
return (h|low)
def METHOD_NAME(t, localTime = True):
"""convert from a string in the format output from timeStamptoDate to a 32bit seconds from the epoch.
If the time is UTC, the boolean value localTime must be set to False.
The format accepted is \"DD/MM/YYYY HH:MM:SS\". The year must be the full number.
"""
# time string, format -> time structure
timeStruct = time.strptime(t, "%d/%m/%Y %H:%M:%S")
if localTime:
# time structure -> timestamp float -> timestamp int
return int(time.mktime(timeStruct))
else:
# time structrue -> timestamp int
return calendar.timegm(timeStruct)
def packFromString(s, localTime = True):
"""pack from a string in the format output from timeStamptoDate to a 64bit timestamp.
If the time is UTC, the boolean value localTime must be set to False.
The format accepted is \"DD/MM/YYYY HH:MM:SS\" . The year must be the full number.
"""
return pack(METHOD_NAME(s, localTime), 0)
def unpack(i):
"""unpack 64bit unsigned long long into 2 32bit unsigned int, return tuple (high,low)
"""
high=i>>32
low=i&0xFFFFFFFF
return(high,low)
def addZeros(time):
"""Adds a zero to the start of a single digit number"""
timeString = str(time)
if len(timeString) < 2:
return ("0"+timeString)
return timeString
def getMonth(s):
months = { 'Jan':1, 'Feb':2, 'Mar':3, 'Apr': 4, 'May': 5, 'Jun': 6,
'Jul':7, 'Aug':8, 'Sep':9, 'Oct':10, 'Nov':11, 'Dec':12 }
return months[s]
def timeStamptoDate(i, localTime = True):
"""convert 64bit timestamp to local date in string format.
If the time is UTC, the boolean value localTime must be set to False.
The format accepted is \"DD/MM/YYYY HH:MM:SS\" . The year must be the full number.
"""
#GBenelli Add a try: except: to handle the stop time of the last IOV "end of time"
try:
if localTime:
# 64bit timestamp -> 32bit timestamp(high) -> timestamp string (local)
date=time.ctime(unpack(i)[0])
else:
# 64bit timestamp -> 32bit timestamp(high) -> time tuple -> timestamp string (UTC)
date=time.asctime(time.gmtime(unpack(i)[0]))
# change date to "DD/MM/YYYY HH:MM:SS" format
date = date.split()
date[1] = getMonth(date[1])
date = addZeros(date[2]) +'/'+ addZeros(date[1]) +'/'+ date[4] +' '+ date[3]
except:
#Handle the case of last IOV (or any IOV) timestamp being "out of range" by returning -1 instead of the date...
print("Could not unpack time stamp %s, unpacked to %s!"%(i,unpack(i)[0]))
date=-1
return date
def printUsage():
print('usage: ./convertTime.py time localTime')
print(' - "time" is either a 64bit timestamp or a string formatted "DD/MM/YYYY HH:MM:SS"')
print(' - "useUTC" is a bool that defaults to True (set to False for local time)')
def main(time, localTime=True):
# convert 64bit timestamp to time string
if time.isdigit():
time = long(time)
return timeStamptoDate(time, localTime)
# convert time string to 64bit timestamp
else:
return packFromString(time, localTime)
if __name__ == "__main__":
args = sys.argv[:]
if len(args) < 2 :
printUsage()
sys.exit(1)
args = args[1:]
if args[0]=='-h' or args[0]=='--help':
printUsage()
sys.exit(0)
args = args[1:]
useUTC = True
if args[0]=='-l' or args[0]=='--localtime':
useUTC = False
args=args[1:]
for time0 in args:
time1 = main(time0, not useUTC)
print(time0, '->', time1)
|
384 | test bounds of child objects | ##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of Image Engine Design Inc nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import imath
import IECore
import IECoreScene
import GafferScene
import GafferSceneTest
class DeleteFacesTest( GafferSceneTest.SceneTestCase ) :
def makeRectangleFromTwoSquaresScene( self ) :
verticesPerFace = IECore.IntVectorData( [4, 4] )
vertexIds = IECore.IntVectorData( [0, 1, 4, 3, 1, 2, 5, 4] )
p = IECore.V3fVectorData( [imath.V3f( 0, 0, 0 ), imath.V3f( 1, 0, 0 ), imath.V3f( 2, 0, 0 ), imath.V3f( 0, 1, 0 ), imath.V3f( 1, 1, 0 ), imath.V3f( 2, 1, 0 )] )
deleteData = IECore.IntVectorData( [0, 1] )
mesh = IECoreScene.MeshPrimitive( verticesPerFace, vertexIds, "linear", p )
mesh["deleteFaces"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, deleteData )
mesh["uniform"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Uniform, IECore.IntVectorData( [10, 11] ) )
mesh["vertex"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.Vertex, IECore.IntVectorData( [100, 101, 102, 103, 104, 105] ) )
mesh["faceVarying"] = IECoreScene.PrimitiveVariable( IECoreScene.PrimitiveVariable.Interpolation.FaceVarying, IECore.IntVectorData( [20, 21, 22, 23, 24, 25, 26, 27] ) )
self.assertTrue(mesh.arePrimitiveVariablesValid())
objectToScene = GafferScene.ObjectToScene()
objectToScene["object"].setValue( mesh )
return objectToScene
def testCanDeleteFaces( self ) :
rectangleScene = self.makeRectangleFromTwoSquaresScene()
deleteFaces = GafferScene.DeleteFaces()
deleteFaces["in"].setInput( rectangleScene["out"] )
pathFilter = GafferScene.PathFilter( "PathFilter" )
pathFilter["paths"].setValue( IECore.StringVectorData( [ '/object' ] ) )
deleteFaces["filter"].setInput( pathFilter["out"] )
faceDeletedObject = deleteFaces["out"].object( "/object" )
self.assertEqual( faceDeletedObject.verticesPerFace, IECore.IntVectorData([4]) )
self.assertEqual( faceDeletedObject.vertexIds, IECore.IntVectorData([0, 1, 3, 2]) )
self.assertEqual( faceDeletedObject.numFaces(), 1 )
self.assertEqual( faceDeletedObject["P"].data, IECore.V3fVectorData( [imath.V3f( 0, 0, 0 ), imath.V3f( 1, 0, 0 ), imath.V3f( 0, 1, 0 ), imath.V3f( 1, 1, 0 )], IECore.GeometricData.Interpretation.Point) )
# verify the primvars are correct
self.assertEqual( faceDeletedObject["uniform"].data, IECore.IntVectorData([10]) )
self.assertEqual( faceDeletedObject["vertex"].data, IECore.IntVectorData([100, 101, 103, 104]) )
self.assertEqual( faceDeletedObject["faceVarying"].data, IECore.IntVectorData([20, 21, 22, 23]) )
# invert
# ======
deleteFaces["invert"].setValue( True )
faceDeletedObject = deleteFaces["out"].object( "/object" )
self.assertEqual( faceDeletedObject.verticesPerFace, IECore.IntVectorData([4]) )
self.assertEqual( faceDeletedObject.vertexIds, IECore.IntVectorData([0, 1, 3, 2]) )
self.assertEqual( faceDeletedObject.numFaces(), 1 )
self.assertEqual( faceDeletedObject["P"].data,
IECore.V3fVectorData( [imath.V3f( 1, 0, 0 ), imath.V3f( 2, 0, 0 ), imath.V3f( 1, 1, 0 ), imath.V3f( 2, 1, 0 )],
IECore.GeometricData.Interpretation.Point ) )
# verify the primvars are correct
self.assertEqual( faceDeletedObject["uniform"].data, IECore.IntVectorData([11]) )
self.assertEqual( faceDeletedObject["vertex"].data, IECore.IntVectorData([101, 102, 104, 105]) )
self.assertEqual( faceDeletedObject["faceVarying"].data, IECore.IntVectorData([24, 25, 26, 27]) )
def testDeletingFacesUpdatesBounds( self ) :
rectangleScene = self.makeRectangleFromTwoSquaresScene()
expectedOriginalBound = rectangleScene["out"].bound( "/object" )
self.assertEqual(expectedOriginalBound, imath.Box3f( imath.V3f( 0, 0, 0 ), imath.V3f( 2, 1, 0 ) ) )
deleteFaces = GafferScene.DeleteFaces()
deleteFaces["in"].setInput( rectangleScene["out"] )
pathFilter = GafferScene.PathFilter( "PathFilter" )
pathFilter["paths"].setValue( IECore.StringVectorData( [ '/object' ] ) )
deleteFaces["filter"].setInput( pathFilter["out"] )
actualFaceDeletedBounds = deleteFaces["out"].bound( "/object" )
expectedBoundingBox = imath.Box3f( imath.V3f( 0, 0, 0 ), imath.V3f( 1, 1, 0 ) )
self.assertEqual( actualFaceDeletedBounds, expectedBoundingBox )
def METHOD_NAME( self ) :
rectangle = self.makeRectangleFromTwoSquaresScene()
sphere = GafferScene.Sphere()
sphere["radius"].setValue( 10 ) # Totally encloses the rectangle
parent = GafferScene.Parent()
parent["in"].setInput( rectangle["out"] )
parent["parent"].setValue( "/object" )
parent["children"][0].setInput( sphere["out"] )
self.assertSceneValid( parent["out"] )
pathFilter = GafferScene.PathFilter( "PathFilter" )
pathFilter["paths"].setValue( IECore.StringVectorData( [ "/object" ] ) )
deleteFaces = GafferScene.DeleteFaces()
deleteFaces["in"].setInput( parent["out"] )
deleteFaces["filter"].setInput( pathFilter["out"] )
# The sphere should not have been modified
self.assertEqual( deleteFaces["out"].object( "/object/sphere" ), parent["out"].object( "/object/sphere" ) )
# And the bounding boxes should still enclose all the objects,
# including the sphere.
self.assertSceneValid( deleteFaces["out"] )
def testIgnoreMissing( self ) :
rectangle = self.makeRectangleFromTwoSquaresScene()
deleteFaces = GafferScene.DeleteFaces()
deleteFaces["in"].setInput( rectangle["out"] )
pathFilter = GafferScene.PathFilter( "PathFilter" )
pathFilter["paths"].setValue( IECore.StringVectorData( [ '/object' ] ) )
deleteFaces["filter"].setInput( pathFilter["out"] )
self.assertNotEqual( deleteFaces["in"].object( "/object" ), deleteFaces["out"].object( "/object" ) )
deleteFaces["faces"].setValue( "doesNotExist" )
self.assertRaises( RuntimeError, deleteFaces["out"].object, "/object" )
deleteFaces["ignoreMissingVariable"].setValue( True )
self.assertEqual( deleteFaces["in"].object( "/object" ), deleteFaces["out"].object( "/object" ) )
if __name__ == "__main__":
unittest.main() |
385 | test filter single label dataset | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import tensorflow as tf
from tensorflow_federated.python.simulation.datasets import dataset_utils
from tensorflow_federated.python.simulation.datasets import from_tensor_slices_client_data
class DatasetUtilsTest(tf.test.TestCase):
def test_deterministic_dataset_mixture(self):
a = tf.data.Dataset.range(5)
b = tf.data.Dataset.range(5).map(lambda x: x + 5)
mixture1 = dataset_utils.build_dataset_mixture(
a, b, a_probability=0.5, seed=0
)
mixture2 = dataset_utils.build_dataset_mixture(
a, b, a_probability=0.5, seed=0
)
examples1 = [self.evaluate(x) for x in mixture1]
examples2 = [self.evaluate(x) for x in mixture2]
self.assertAllEqual(examples1, examples2)
def test_deterministic_dataset_mixture_distribution(self):
# Create a dataset of infinite eights.
a = tf.data.Dataset.from_tensor_slices([8]).repeat(None)
# Create a normal sampling of integers around mean=5
b = tf.data.Dataset.from_tensor_slices(
tf.cast(tf.random.normal(shape=[1000], mean=5, stddev=2.0), tf.int32)
)
# Create a mixture of 1000 integers (bounded by the size of `b` since `a` is
# infinite).
mixture1 = dataset_utils.build_dataset_mixture(
a, b, a_probability=0.8, seed=0
)
mixture2 = dataset_utils.build_dataset_mixture(
a, b, a_probability=0.8, seed=0
)
mixture3 = dataset_utils.build_dataset_mixture(
a, b, a_probability=0.8, seed=1
)
counts1 = collections.Counter(self.evaluate(x) for x in mixture1)
counts2 = collections.Counter(self.evaluate(x) for x in mixture2)
counts3 = collections.Counter(self.evaluate(x) for x in mixture3)
self.assertEqual(counts1, counts2)
self.assertNotEqual(counts1, counts3)
def test_non_deterministic_dataset_mixture_different(self):
num_examples = 100
a = tf.data.Dataset.from_tensor_slices([0] * num_examples)
b = tf.data.Dataset.from_tensor_slices([1] * num_examples)
mixture_1 = dataset_utils.build_dataset_mixture(a, b, a_probability=0.5)
mixture_2 = dataset_utils.build_dataset_mixture(a, b, a_probability=0.5)
# The mixtures should produce different samples.
self.assertNotEqual(
self.evaluate(list(iter(mixture_1))),
self.evaluate(list(iter(mixture_2))),
)
def METHOD_NAME(self):
# Create a uniform sampling of integers in [0, 10).
d = tf.data.Dataset.from_tensor_slices(
{
'label': tf.random.uniform(
shape=[1000], minval=0, maxval=9, dtype=tf.int32
),
}
)
filtered_d = dataset_utils.build_single_label_dataset(
d, label_key='label', desired_label=6
)
filtered_examples = [self.evaluate(x) for x in filtered_d]
# Expect close to 1000 / 10 = 100 examples.
self.assertLen(filtered_examples, 103)
self.assertTrue(all(x['label'] == 6 for x in filtered_d))
def test_build_synthethic_iid_client_data(self):
# Create a fake, very non-IID ClientData.
client_datasets = collections.OrderedDict(a=[1] * 3, b=[2] * 5, c=[3] * 7)
non_iid_client_data = from_tensor_slices_client_data.TestClientData(
client_datasets
)
iid_client_data_iter = iter(
dataset_utils.build_synthethic_iid_datasets(
non_iid_client_data, client_dataset_size=5
)
)
num_synthethic_clients = 3
run_results = []
for _ in range(5):
actual_iid_client_datasets = []
for _ in range(num_synthethic_clients):
dataset = next(iid_client_data_iter)
actual_iid_client_datasets.append([self.evaluate(x) for x in dataset])
# We expect 3 datasets: 15 examples in the global dataset, synthetic
# non-iid configured for 5 examples per client.
self.assertEqual([5, 5, 5], [len(d) for d in actual_iid_client_datasets])
run_results.append(actual_iid_client_datasets)
# Assert no run is the same. The chance that two runs are the same is far
# less than 1 in a million, flakes should be imperceptible.
for i, run_a in enumerate(run_results[:-1]):
for run_b in run_results[i + 1 :]:
self.assertNotEqual(run_a, run_b, msg=str(run_results))
if __name__ == '__main__':
tf.test.main() |
386 | read text pair | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
from paddlenlp.datasets import MapDataset
def create_dataloader(dataset, mode="train", batch_size=1, batchify_fn=None, trans_fn=None):
if trans_fn:
dataset = dataset.map(trans_fn)
shuffle = True if mode == "train" else False
if mode == "train":
batch_sampler = paddle.io.DistributedBatchSampler(dataset, batch_size=batch_size, shuffle=shuffle)
else:
batch_sampler = paddle.io.BatchSampler(dataset, batch_size=batch_size, shuffle=shuffle)
return paddle.io.DataLoader(dataset=dataset, batch_sampler=batch_sampler, collate_fn=batchify_fn, return_list=True)
def METHOD_NAME(data_path):
"""Reads data."""
with open(data_path, "r", encoding="utf-8") as f:
for line in f:
data = line.rstrip().split("\t")
if len(data) != 3:
continue
yield {"query": data[0], "title": data[1]}
def convert_pointwise_example(example, tokenizer, max_seq_length=512, is_test=False):
query, title = example["query"], example["title"]
encoded_inputs = tokenizer(text=query, text_pair=title, max_seq_len=max_seq_length)
input_ids = encoded_inputs["input_ids"]
token_type_ids = encoded_inputs["token_type_ids"]
if not is_test:
label = np.array([example["label"]], dtype="int64")
return input_ids, token_type_ids, label
else:
return input_ids, token_type_ids
def convert_pairwise_example(example, tokenizer, max_seq_length=512, phase="train"):
if phase == "train":
query, pos_title, neg_title = example["query"], example["title"], example["neg_title"]
pos_inputs = tokenizer(text=query, text_pair=pos_title, max_seq_len=max_seq_length)
neg_inputs = tokenizer(text=query, text_pair=neg_title, max_seq_len=max_seq_length)
pos_input_ids = pos_inputs["input_ids"]
pos_token_type_ids = pos_inputs["token_type_ids"]
neg_input_ids = neg_inputs["input_ids"]
neg_token_type_ids = neg_inputs["token_type_ids"]
return (pos_input_ids, pos_token_type_ids, neg_input_ids, neg_token_type_ids)
else:
query, title = example["query"], example["title"]
inputs = tokenizer(text=query, text_pair=title, max_seq_len=max_seq_length)
input_ids = inputs["input_ids"]
token_type_ids = inputs["token_type_ids"]
if phase == "eval":
return input_ids, token_type_ids, example["label"]
elif phase == "predict":
return input_ids, token_type_ids
else:
raise ValueError("not supported phase:{}".format(phase))
def gen_pair(dataset, pool_size=100):
"""
Generate triplet randomly based on dataset
Args:
dataset: A `MapDataset` or `IterDataset` or a tuple of those.
Each example is composed of 2 texts: example["query"], example["title"]
pool_size: the number of example to sample negative example randomly
Return:
dataset: A `MapDataset` or `IterDataset` or a tuple of those.
Each example is composed of 3 texts: example["query"], example["pos_title"]、example["neg_title"]
"""
if len(dataset) < pool_size:
pool_size = len(dataset)
new_examples = []
pool = []
tmp_examples = []
for example in dataset:
label = example["label"]
# Filter negative example
if label == 0:
continue
tmp_examples.append(example)
pool.append(example["title"])
if len(pool) >= pool_size:
np.random.shuffle(pool)
for idx, example in enumerate(tmp_examples):
example["neg_title"] = pool[idx]
new_examples.append(example)
tmp_examples = []
pool = []
else:
continue
return MapDataset(new_examples) |
387 | log file | import abc
import time
import shutil
import psutil
import datetime
import threading
import subprocess
from ..hands import *
class BaseService(object):
def __init__(self, **kwargs):
self.name = kwargs['name']
self._process = None
self.STOP_TIMEOUT = 10
self.max_retry = 0
self.retry = 3
self.LOG_KEEP_DAYS = 7
self.EXIT_EVENT = threading.Event()
@property
@abc.abstractmethod
def cmd(self):
return []
@property
@abc.abstractmethod
def cwd(self):
return ''
@property
def is_running(self):
if self.pid == 0:
return False
try:
os.kill(self.pid, 0)
except (OSError, ProcessLookupError):
return False
else:
return True
def show_status(self):
if self.is_running:
msg = f'{self.name} is running: {self.pid}.'
else:
msg = f'{self.name} is stopped.'
if DEBUG:
msg = '\033[31m{} is stopped.\033[0m\nYou can manual start it to find the error: \n' \
' $ cd {}\n' \
' $ {}'.format(self.name, self.cwd, ' '.join(self.cmd))
print(msg)
# -- log --
@property
def log_filename(self):
return f'{self.name}.log'
@property
def log_filepath(self):
return os.path.join(LOG_DIR, self.log_filename)
@property
def METHOD_NAME(self):
return open(self.log_filepath, 'a')
@property
def log_dir(self):
return os.path.dirname(self.log_filepath)
# -- end log --
# -- pid --
@property
def pid_filepath(self):
return os.path.join(TMP_DIR, f'{self.name}.pid')
@property
def pid(self):
if not os.path.isfile(self.pid_filepath):
return 0
with open(self.pid_filepath) as f:
try:
pid = int(f.read().strip())
except ValueError:
pid = 0
return pid
def write_pid(self):
with open(self.pid_filepath, 'w') as f:
f.write(str(self.process.pid))
def remove_pid(self):
if os.path.isfile(self.pid_filepath):
os.unlink(self.pid_filepath)
# -- end pid --
# -- process --
@property
def process(self):
if not self._process:
try:
self._process = psutil.Process(self.pid)
except:
pass
return self._process
# -- end process --
# -- action --
def open_subprocess(self):
kwargs = {'cwd': self.cwd, 'stderr': self.METHOD_NAME, 'stdout': self.METHOD_NAME}
self._process = subprocess.Popen(self.cmd, **kwargs)
def start(self):
if self.is_running:
self.show_status()
return
self.remove_pid()
self.open_subprocess()
self.write_pid()
self.start_other()
def start_other(self):
pass
def stop(self, force=False):
if not self.is_running:
self.show_status()
# self.remove_pid()
return
print(f'Stop service: {self.name}', end='')
sig = 9 if force else 15
os.kill(self.pid, sig)
if self.process is None:
print("\033[31m No process found\033[0m")
return
try:
self.process.wait(1)
except:
pass
for i in range(self.STOP_TIMEOUT):
if i == self.STOP_TIMEOUT - 1:
print("\033[31m Error\033[0m")
if not self.is_running:
print("\033[32m Ok\033[0m")
self.remove_pid()
break
else:
continue
def watch(self):
self._check()
if not self.is_running:
self._restart()
self._rotate_log()
def _check(self):
now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
print(f"{now} Check service status: {self.name} -> ", end='')
if self.process:
try:
self.process.wait(1) # 不wait,子进程可能无法回收
except:
pass
if self.is_running:
print(f'running at {self.pid}')
else:
print(f'stopped at {self.pid}')
def _restart(self):
if self.retry > self.max_retry:
logging.info("Service start failed, exit: {}".format(self.name))
self.EXIT_EVENT.set()
return
self.retry += 1
logging.info(f'> Find {self.name} stopped, retry {self.retry}, {self.pid}')
self.start()
def _rotate_log(self):
now = datetime.datetime.now()
_time = now.strftime('%H:%M')
if _time != '23:59':
return
backup_date = now.strftime('%Y-%m-%d')
backup_log_dir = os.path.join(self.log_dir, backup_date)
if not os.path.exists(backup_log_dir):
os.mkdir(backup_log_dir)
backup_log_path = os.path.join(backup_log_dir, self.log_filename)
if os.path.isfile(self.log_filepath) and not os.path.isfile(backup_log_path):
logging.info(f'Rotate log file: {self.log_filepath} => {backup_log_path}')
shutil.copy(self.log_filepath, backup_log_path)
with open(self.log_filepath, 'w') as f:
pass
to_delete_date = now - datetime.timedelta(days=self.LOG_KEEP_DAYS)
to_delete_dir = os.path.join(LOG_DIR, to_delete_date.strftime('%Y-%m-%d'))
if os.path.exists(to_delete_dir):
logging.info(f'Remove old log: {to_delete_dir}')
shutil.rmtree(to_delete_dir, ignore_errors=True)
# -- end action -- |
388 | assert xranges equivalent | # Python test set -- built-in functions
import test.test_support, unittest
import sys
import pickle
import itertools
import warnings
warnings.filterwarnings("ignore", "integer argument expected",
DeprecationWarning, "unittest")
# pure Python implementations (3 args only), for comparison
def pyrange(start, stop, step):
if (start - stop) // step < 0:
# replace stop with next element in the sequence of integers
# that are congruent to start modulo step.
stop += (start - stop) % step
while start != stop:
yield start
start += step
def pyrange_reversed(start, stop, step):
stop += (start - stop) % step
return pyrange(stop - step, start - step, -step)
class XrangeTest(unittest.TestCase):
def assert_iterators_equal(self, xs, ys, test_id, limit=None):
# check that an iterator xs matches the expected results ys,
# up to a given limit.
if limit is not None:
xs = itertools.islice(xs, limit)
ys = itertools.islice(ys, limit)
sentinel = object()
pairs = itertools.izip_longest(xs, ys, fillvalue=sentinel)
for i, (x, y) in enumerate(pairs):
if x == y:
continue
elif x == sentinel:
self.fail('{}: iterator ended unexpectedly '
'at position {}; expected {}'.format(test_id, i, y))
elif y == sentinel:
self.fail('{}: unexpected excess element {} at '
'position {}'.format(test_id, x, i))
else:
self.fail('{}: wrong element at position {};'
'expected {}, got {}'.format(test_id, i, y, x))
def METHOD_NAME(self, x, y):
# Check that two xrange objects are equivalent, in the sense of the
# associated sequences being the same. We want to use this for large
# xrange objects, so instead of converting to lists and comparing
# directly we do a number of indirect checks.
if len(x) != len(y):
self.fail('{} and {} have different '
'lengths: {} and {} '.format(x, y, len(x), len(y)))
if len(x) >= 1:
if x[0] != y[0]:
self.fail('{} and {} have different initial '
'elements: {} and {} '.format(x, y, x[0], y[0]))
if x[-1] != y[-1]:
self.fail('{} and {} have different final '
'elements: {} and {} '.format(x, y, x[-1], y[-1]))
if len(x) >= 2:
x_step = x[1] - x[0]
y_step = y[1] - y[0]
if x_step != y_step:
self.fail('{} and {} have different step: '
'{} and {} '.format(x, y, x_step, y_step))
def test_xrange(self):
self.assertEqual(list(xrange(3)), [0, 1, 2])
self.assertEqual(list(xrange(1, 5)), [1, 2, 3, 4])
self.assertEqual(list(xrange(0)), [])
self.assertEqual(list(xrange(-3)), [])
self.assertEqual(list(xrange(1, 10, 3)), [1, 4, 7])
self.assertEqual(list(xrange(5, -5, -3)), [5, 2, -1, -4])
a = 10
b = 100
c = 50
self.assertEqual(list(xrange(a, a+2)), [a, a+1])
self.assertEqual(list(xrange(a+2, a, -1L)), [a+2, a+1])
self.assertEqual(list(xrange(a+4, a, -2)), [a+4, a+2])
seq = list(xrange(a, b, c))
self.assertIn(a, seq)
self.assertNotIn(b, seq)
self.assertEqual(len(seq), 2)
seq = list(xrange(b, a, -c))
self.assertIn(b, seq)
self.assertNotIn(a, seq)
self.assertEqual(len(seq), 2)
seq = list(xrange(-a, -b, -c))
self.assertIn(-a, seq)
self.assertNotIn(-b, seq)
self.assertEqual(len(seq), 2)
self.assertRaises(TypeError, xrange)
self.assertRaises(TypeError, xrange, 1, 2, 3, 4)
self.assertRaises(ValueError, xrange, 1, 2, 0)
self.assertRaises(OverflowError, xrange, 10**100, 10**101, 10**101)
self.assertRaises(TypeError, xrange, 0, "spam")
self.assertRaises(TypeError, xrange, 0, 42, "spam")
self.assertEqual(len(xrange(0, sys.maxint, sys.maxint-1)), 2)
self.assertRaises(OverflowError, xrange, -sys.maxint, sys.maxint)
self.assertRaises(OverflowError, xrange, 0, 2*sys.maxint)
r = xrange(-sys.maxint, sys.maxint, 2)
self.assertEqual(len(r), sys.maxint)
self.assertRaises(OverflowError, xrange, -sys.maxint-1, sys.maxint, 2)
def test_pickling(self):
testcases = [(13,), (0, 11), (-22, 10), (20, 3, -1),
(13, 21, 3), (-2, 2, 2)]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in testcases:
r = xrange(*t)
self.assertEqual(list(pickle.loads(pickle.dumps(r, proto))),
list(r))
M = min(sys.maxint, sys.maxsize)
large_testcases = testcases + [
(0, M, 1),
(M, 0, -1),
(0, M, M - 1),
(M // 2, M, 1),
(0, -M, -1),
(0, -M, 1 - M),
(-M, M, 2),
(-M, M, 1024),
(-M, M, 10585),
(M, -M, -2),
(M, -M, -1024),
(M, -M, -10585),
]
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for t in large_testcases:
r = xrange(*t)
r_out = pickle.loads(pickle.dumps(r, proto))
self.METHOD_NAME(r_out, r)
def test_repr(self):
# Check that repr of an xrange is a valid representation
# of that xrange.
# Valid xranges have at most min(sys.maxint, sys.maxsize) elements.
M = min(sys.maxint, sys.maxsize)
testcases = [
(13,),
(0, 11),
(-22, 10),
(20, 3, -1),
(13, 21, 3),
(-2, 2, 2),
(0, M, 1),
(M, 0, -1),
(0, M, M - 1),
(M // 2, M, 1),
(0, -M, -1),
(0, -M, 1 - M),
(-M, M, 2),
(-M, M, 1024),
(-M, M, 10585),
(M, -M, -2),
(M, -M, -1024),
(M, -M, -10585),
]
for t in testcases:
r = xrange(*t)
r_out = eval(repr(r))
self.METHOD_NAME(r, r_out)
def test_range_iterators(self):
# see issue 7298
limits = [base + jiggle
for M in (2**32, 2**64)
for base in (-M, -M//2, 0, M//2, M)
for jiggle in (-2, -1, 0, 1, 2)]
test_ranges = [(start, end, step)
for start in limits
for end in limits
for step in (-2**63, -2**31, -2, -1, 1, 2)]
for start, end, step in test_ranges:
try:
iter1 = xrange(start, end, step)
except OverflowError:
pass
else:
iter2 = pyrange(start, end, step)
test_id = "xrange({}, {}, {})".format(start, end, step)
# check first 100 entries
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
try:
iter1 = reversed(xrange(start, end, step))
except OverflowError:
pass
else:
iter2 = pyrange_reversed(start, end, step)
test_id = "reversed(xrange({}, {}, {}))".format(start, end, step)
self.assert_iterators_equal(iter1, iter2, test_id, limit=100)
def test_main():
test.test_support.run_unittest(XrangeTest)
if __name__ == "__main__":
test_main() |
389 | get iterator for map style | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Callable, Generic, Iterable, List, Optional, TypeVar
from nncf.common.utils.api_marker import api
DataItem = TypeVar("DataItem")
ModelInput = TypeVar("ModelInput")
@api(canonical_alias="nncf.Dataset")
class Dataset(Generic[DataItem, ModelInput]):
"""
Wrapper for passing custom user datasets into NNCF algorithms.
This class defines the interface by which compression algorithms
retrieve data items from the passed data source object. These data items are used
for different purposes, for example, model inference and model validation, based
on the choice of the exact compression algorithm.
If the data item has been returned from the data source per iteration and it cannot be
used as input for model inference, the transformation function is used to extract the
model's input from this data item. For example, in supervised learning, the data item
usually contains both examples and labels. So transformation function should extract
the examples from the data item.
:param data_source: The iterable object serving as the source of data items.
:param transform_func: The function that is used to extract the model's input
from the data item. The data item here is the data item that is returned from
the data source per iteration. This function should be passed when
the data item cannot be directly used as model's input. If this is not specified, then the data item
will be passed into the model as-is.
"""
def __init__(
self, data_source: Iterable[DataItem], transform_func: Optional[Callable[[DataItem], ModelInput]] = None
):
self._data_source = data_source
self._transform_func = transform_func
def get_data(self, indices: Optional[List[int]] = None) -> Iterable[DataItem]:
"""
Returns the iterable object that contains selected data items from the data source as-is.
:param indices: The zero-based indices of data items that should be selected from
the data source. The indices should be sorted in ascending order. If indices are
not passed all data items are selected from the data source.
:return: The iterable object that contains selected data items from the data source as-is.
"""
return DataProvider(self._data_source, None, indices)
def get_inference_data(self, indices: Optional[List[int]] = None) -> Iterable[ModelInput]:
"""
Returns the iterable object that contains selected data items from the data source, for which
the transformation function was applied. The item, which was returned per iteration from this
iterable, can be used as the model's input for model inference.
:param indices: The zero-based indices of data items that should be selected from
the data source. The indices should be sorted in ascending order. If indices are
not passed all data items are selected from the data source.
:return: The iterable object that contains selected data items from the data source, for which
the transformation function was applied.
"""
return DataProvider(self._data_source, self._transform_func, indices)
class DataProvider(Generic[DataItem, ModelInput]):
def __init__(
self,
data_source: Iterable[DataItem],
transform_func: Callable[[DataItem], ModelInput],
indices: Optional[List[int]] = None,
):
self._data_source = data_source
if transform_func is None:
transform_func = lambda x: x
self._transform_func = transform_func
self._indices = indices
def __iter__(self):
if self._indices is None:
return map(self._transform_func, self._data_source)
if hasattr(self._data_source, "__getitem__"):
return DataProvider.METHOD_NAME(self._data_source, self._transform_func, self._indices)
return DataProvider._get_iterator_for_iter(self._data_source, self._transform_func, sorted(self._indices))
@staticmethod
def METHOD_NAME(
data_source: Iterable[DataItem], transform_func: Callable[[DataItem], ModelInput], indices: List[int]
):
for index in indices:
yield transform_func(data_source[index])
@staticmethod
def _get_iterator_for_iter(
data_source: Iterable[DataItem], transform_func: Callable[[DataItem], ModelInput], indices: List[int]
):
pos = 0
num_indices = len(indices)
for idx, data_item in enumerate(data_source):
if pos == num_indices:
# All specified data items were selected.
break
if idx == indices[pos]:
pos = pos + 1
yield transform_func(data_item) |
390 | test invalid sharing | from common import *
from trezor.crypto import slip39, random
from slip39_vectors import vectors
def combinations(iterable, r):
# Taken from https://docs.python.org/3.7/library/itertools.html#itertools.combinations
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = list(range(r))
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
class TestCryptoSlip39(unittest.TestCase):
EMS = b"ABCDEFGHIJKLMNOP"
def test_basic_sharing_random(self):
ems = random.bytes(32)
identifier = slip39.generate_random_identifier()
mnemonics = slip39.split_ems(1, [(3, 5)], identifier, 1, ems)
mnemonics = mnemonics[0]
self.assertEqual(slip39.recover_ems(mnemonics[:3]), slip39.recover_ems(mnemonics[2:]))
def test_basic_sharing_fixed(self):
generated_identifier = slip39.generate_random_identifier()
mnemonics = slip39.split_ems(1, [(3, 5)], generated_identifier, 1, self.EMS)
mnemonics = mnemonics[0]
identifier, exponent, ems = slip39.recover_ems(mnemonics[:3])
self.assertEqual(ems, self.EMS)
self.assertEqual(generated_identifier, identifier)
self.assertEqual(slip39.recover_ems(mnemonics[1:4])[2], ems)
with self.assertRaises(slip39.MnemonicError):
slip39.recover_ems(mnemonics[1:3])
def test_iteration_exponent(self):
identifier = slip39.generate_random_identifier()
mnemonics = slip39.split_ems(1, [(3, 5)], identifier, 1, self.EMS)
mnemonics = mnemonics[0]
identifier, exponent, ems = slip39.recover_ems(mnemonics[1:4])
self.assertEqual(ems, self.EMS)
identifier = slip39.generate_random_identifier()
mnemonics = slip39.split_ems(1, [(3, 5)], identifier, 2, self.EMS)
mnemonics = mnemonics[0]
identifier, exponent, ems = slip39.recover_ems(mnemonics[1:4])
self.assertEqual(ems, self.EMS)
def test_group_sharing(self):
group_threshold = 2
group_sizes = (5, 3, 5, 1)
member_thresholds = (3, 2, 2, 1)
identifier = slip39.generate_random_identifier()
mnemonics = slip39.split_ems(
group_threshold, list(zip(member_thresholds, group_sizes)), identifier, 1, self.EMS
)
# Test all valid combinations of mnemonics.
for groups in combinations(zip(mnemonics, member_thresholds), group_threshold):
for group1_subset in combinations(groups[0][0], groups[0][1]):
for group2_subset in combinations(groups[1][0], groups[1][1]):
mnemonic_subset = list(group1_subset + group2_subset)
random.shuffle(mnemonic_subset)
identifier, exponent, ems = slip39.recover_ems(mnemonic_subset)
self.assertEqual(ems, self.EMS)
# Minimal sets of mnemonics.
identifier, exponent, ems = slip39.recover_ems([mnemonics[2][0], mnemonics[2][2], mnemonics[3][0]])
self.assertEqual(ems, self.EMS)
self.assertEqual(slip39.recover_ems([mnemonics[2][3], mnemonics[3][0], mnemonics[2][4]])[2], ems)
# One complete group and one incomplete group out of two groups required.
with self.assertRaises(slip39.MnemonicError):
slip39.recover_ems(mnemonics[0][2:] + [mnemonics[1][0]])
# One group of two required.
with self.assertRaises(slip39.MnemonicError):
slip39.recover_ems(mnemonics[0][1:4])
def test_group_sharing_threshold_1(self):
group_threshold = 1
group_sizes = (5, 3, 5, 1)
member_thresholds = (3, 2, 2, 1)
identifier = slip39.generate_random_identifier()
mnemonics = slip39.split_ems(
group_threshold, list(zip(member_thresholds, group_sizes)), identifier, 1, self.EMS
)
# Test all valid combinations of mnemonics.
for group, threshold in zip(mnemonics, member_thresholds):
for group_subset in combinations(group, threshold):
mnemonic_subset = list(group_subset)
random.shuffle(mnemonic_subset)
identifier, exponent, ems = slip39.recover_ems(mnemonic_subset)
self.assertEqual(ems, self.EMS)
def test_all_groups_exist(self):
for group_threshold in (1, 2, 5):
identifier = slip39.generate_random_identifier()
mnemonics = slip39.split_ems(
group_threshold, [(3, 5), (1, 1), (2, 3), (2, 5), (3, 5)], identifier, 1, self.EMS
)
self.assertEqual(len(mnemonics), 5)
self.assertEqual(len(sum(mnemonics, [])), 19)
def METHOD_NAME(self):
identifier = slip39.generate_random_identifier()
# Group threshold exceeds number of groups.
with self.assertRaises(ValueError):
slip39.split_ems(3, [(3, 5), (2, 5)], identifier, 1, self.EMS)
# Invalid group threshold.
with self.assertRaises(ValueError):
slip39.split_ems(0, [(3, 5), (2, 5)], identifier, 1, self.EMS)
# Member threshold exceeds number of members.
with self.assertRaises(ValueError):
slip39.split_ems(2, [(3, 2), (2, 5)], identifier, 1, self.EMS)
# Invalid member threshold.
with self.assertRaises(ValueError):
slip39.split_ems(2, [(0, 2), (2, 5)], identifier, 1, self.EMS)
# Group with multiple members and threshold 1.
with self.assertRaises(ValueError):
slip39.split_ems(2, [(3, 5), (1, 3), (2, 5)], identifier, 1, self.EMS)
def test_vectors(self):
for mnemonics, secret in vectors:
if secret:
identifier, exponent, ems = slip39.recover_ems(mnemonics)
self.assertEqual(slip39.decrypt(ems, b"TREZOR", exponent, identifier), unhexlify(secret))
else:
with self.assertRaises(slip39.MnemonicError):
slip39.recover_ems(mnemonics)
if __name__ == '__main__':
unittest.main() |
391 | python2relay | # type: ignore
import tvm
from tvm import relay
from calyx.py_ast import CompVar, Stdlib, CompInst, Cell, Invoke, CompPort
from calyx.utils import bits_needed
from typing import List
from dataclasses import dataclass
# Mapping from the tensor dimensions to the
# corresponding Calyx primitive.
NumDimsToCell = {
0: Stdlib().register,
1: Stdlib().seq_mem_d1,
2: Stdlib().seq_mem_d2,
3: Stdlib().seq_mem_d3,
4: Stdlib().seq_mem_d4,
}
@dataclass
class DahliaFuncDef:
"""Necessary information to compute a Dahlia
function definition."""
function_id: str
component_name: str
dest: CompVar
args: List[CompVar]
attributes: tvm.ir.Attrs
data_type: str
component: CompInst
def get_dims(c: CompInst):
"""Mapping from memory to number of dimensions."""
id = c.id
id2dimensions = {
"std_reg": 0,
"seq_mem_d1": 1,
"seq_mem_d2": 2,
"seq_mem_d3": 3,
"seq_mem_d4": 4,
}
assert id in id2dimensions, f"{id} not supported."
return id2dimensions[id]
def get_dimension_sizes(c: CompInst) -> List[int]:
"""Given a cell `c`, returns the corresponding
memory sizes.
Example:
std_mem_d1(32, 8, 3) returns [8]."""
dims = get_dims(c)
return [c.args[i] for i in range(1, dims + 1)]
def get_addr_ports(c: CompInst):
"""Returns a list of (address, index size)
for each address port in the component
instance."""
dims = get_dims(c)
addresses = range(0, dims)
indices = range(dims + 1, dims << 1 + 1)
return [(f"addr{i}", c.args[n]) for (i, n) in zip(addresses, indices)]
def emit_invoke_control(
decl: CompVar, dest: Cell, args: List[Cell], old_args=[], old_dest=None
) -> Invoke:
"""Returns the Invoke control."""
ref_cells = []
inputs = []
def add_arg(cell):
comp = cell.comp
param = f"{cell.id.name}"
arg = CompVar(cell.id.name)
# If this is a constant or a register, connect the ports
if any(p in comp.id for p in ["reg", "const"]):
inputs.append((f"{param}", CompPort(arg, "out")))
else:
ref_cells.append((param, arg))
# this function is similar to add_arg, but is for the case when we are
# "reusing" a Dahlia Function (which will later be a Calyx component)
# and therefore need to use the same parameter names as the previous invoke
def add_arg2(arg_cell, param_cell):
assert (
arg_cell.comp == param_cell.comp
), "arg cell and param cell must be same component"
comp = arg_cell.comp
param = f"{param_cell.id.name}"
arg = CompVar(arg_cell.id.name)
# If this is a constant or a register, connect the ports
if any(p in comp.id for p in ["reg", "const"]):
inputs.append((f"{param}", CompPort(arg, "out")))
else:
ref_cells.append((param, arg))
if len(old_args) == 0:
for cell in args:
add_arg(cell)
add_arg(dest)
else:
# case for when we are "reusing" a Dahlia Function/Calyx component and
# therefore need to make sure we're using the previous parameter names
assert len(old_args) == len(
args
), "we are reusing a dahlia function but the args are different lengths"
assert old_dest is not None, "if using old_args must provide an old_dest too"
for (cell1, cell2) in zip(args, old_args):
add_arg2(cell1, cell2)
add_arg2(dest, old_dest)
return Invoke(decl, inputs, [], ref_cells)
def get_dahlia_data_type(relay_type) -> str:
"""Gets the Dahlia data type from the given Relay type.
It maps the types in the following manner:
Relay | Dahlia
--------|-------------------------------
int | (`bit`, width)
float | (`fix`, (width, width // 2))
"""
width = get_bitwidth(relay_type)
if "int" in relay_type.dtype:
return f"bit<{width}>"
if "float" in relay_type.dtype:
return f"fix<{width}, {width // 2}>"
assert 0, f"{relay_type} is not supported."
def get_bitwidth(relay_type) -> int:
"""Gets the bitwidth from a Relay type."""
dtype = relay_type.dtype
assert "int" in dtype or "float" in dtype, f"{relay_type} not supported."
return int("".join(filter(str.isdigit, dtype)))
def get_memory(name: str, type: tvm.ir.Type) -> Cell:
"""Returns a Calyx memory for a given TVM type.
For non-Tensor types, a register is returned.
Otherwise, a memory with the corresponding dimension size
is returned, if it exists in Calyx."""
dims = type.concrete_shape
# Bitwidth, along with sizes and index sizes (if it is a Tensor).
args = [get_bitwidth(type)] + [d for d in dims] + [bits_needed(d) for d in dims]
num_dims = len(dims)
assert num_dims in NumDimsToCell, f"Memory of size {num_dims} not supported."
return Cell(CompVar(name), NumDimsToCell[num_dims](*args), is_external=True)
def METHOD_NAME(func) -> str:
"""Used to lower Relay IR from the
TVM Python library."""
seq = tvm.transform.Sequential(
[
relay.transform.SimplifyExpr(),
relay.transform.SimplifyInference(),
relay.transform.ToANormalForm(),
]
)
mod_opt = tvm.IRModule.from_expr(func)
mod_opt = seq(mod_opt)
return mod_opt["main"] |
392 | do kill | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""Module to test RabbitMQ."""
import asyncio
import plumpy
import pytest
from aiida.engine import ProcessState
from aiida.manage import get_manager
from aiida.orm import Int
from tests.utils import processes as test_processes
@pytest.mark.requires_rmq
class TestProcessControl:
"""Test AiiDA's RabbitMQ functionalities."""
TIMEOUT = 2.
@pytest.fixture(autouse=True)
def init_profile(self): # pylint: disable=unused-argument
"""Initialize the profile."""
# pylint: disable=attribute-defined-outside-init
# The coroutine defined in testcase should run in runner's loop
# and process need submit by runner.submit rather than `submit` import from
# aiida.engine, since the broad one will create its own loop
manager = get_manager()
self.runner = manager.get_runner()
def test_submit_simple(self):
""""Launch the process."""
async def do_submit():
calc_node = self.runner.submit(test_processes.DummyProcess)
await self.wait_for_process(calc_node)
assert calc_node.is_finished_ok
assert calc_node.process_state.value == plumpy.ProcessState.FINISHED.value
self.runner.loop.run_until_complete(do_submit())
def test_launch_with_inputs(self):
"""Test launch with inputs."""
async def do_launch():
term_a = Int(5)
term_b = Int(10)
calc_node = self.runner.submit(test_processes.AddProcess, a=term_a, b=term_b)
await self.wait_for_process(calc_node)
assert calc_node.is_finished_ok
assert calc_node.process_state.value == plumpy.ProcessState.FINISHED.value
self.runner.loop.run_until_complete(do_launch())
def test_submit_bad_input(self):
with pytest.raises(ValueError):
self.runner.submit(test_processes.AddProcess, a=Int(5))
def test_exception_process(self):
"""Test process excpetion."""
async def do_exception():
calc_node = self.runner.submit(test_processes.ExceptionProcess)
await self.wait_for_process(calc_node)
assert not calc_node.is_finished_ok
assert calc_node.process_state.value == plumpy.ProcessState.EXCEPTED.value
self.runner.loop.run_until_complete(do_exception())
def test_pause(self):
"""Testing sending a pause message to the process."""
controller = get_manager().get_process_controller()
async def do_pause():
calc_node = self.runner.submit(test_processes.WaitProcess)
while calc_node.process_state != ProcessState.WAITING:
await asyncio.sleep(0.1)
assert not calc_node.paused
pause_future = controller.pause_process(calc_node.pk)
future = await with_timeout(asyncio.wrap_future(pause_future))
result = await self.wait_future(asyncio.wrap_future(future))
assert result
assert calc_node.paused
kill_message = 'Sorry, you have to go mate'
kill_future = controller.kill_process(calc_node.pk, msg=kill_message)
future = await with_timeout(asyncio.wrap_future(kill_future))
result = await self.wait_future(asyncio.wrap_future(future))
assert result
self.runner.loop.run_until_complete(do_pause())
def test_pause_play(self):
"""Test sending a pause and then a play message."""
controller = get_manager().get_process_controller()
async def do_pause_play():
calc_node = self.runner.submit(test_processes.WaitProcess)
assert not calc_node.paused
while calc_node.process_state != ProcessState.WAITING:
await asyncio.sleep(0.1)
pause_message = 'Take a seat'
pause_future = controller.pause_process(calc_node.pk, msg=pause_message)
future = await with_timeout(asyncio.wrap_future(pause_future))
result = await self.wait_future(asyncio.wrap_future(future))
assert calc_node.paused
assert calc_node.process_status == pause_message
play_future = controller.play_process(calc_node.pk)
future = await with_timeout(asyncio.wrap_future(play_future))
result = await self.wait_future(asyncio.wrap_future(future))
assert result
assert not calc_node.paused
assert calc_node.process_status is None
kill_message = 'Sorry, you have to go mate'
kill_future = controller.kill_process(calc_node.pk, msg=kill_message)
future = await with_timeout(asyncio.wrap_future(kill_future))
result = await self.wait_future(asyncio.wrap_future(future))
assert result
self.runner.loop.run_until_complete(do_pause_play())
def test_kill(self):
"""Test sending a kill message."""
controller = get_manager().get_process_controller()
async def METHOD_NAME():
calc_node = self.runner.submit(test_processes.WaitProcess)
assert not calc_node.is_killed
while calc_node.process_state != ProcessState.WAITING:
await asyncio.sleep(0.1)
kill_message = 'Sorry, you have to go mate'
kill_future = controller.kill_process(calc_node.pk, msg=kill_message)
future = await with_timeout(asyncio.wrap_future(kill_future))
result = await self.wait_future(asyncio.wrap_future(future))
assert result
await self.wait_for_process(calc_node)
assert calc_node.is_killed
assert calc_node.process_status == kill_message
self.runner.loop.run_until_complete(METHOD_NAME())
async def wait_for_process(self, calc_node, timeout=2.):
future = self.runner.get_process_future(calc_node.pk)
result = await with_timeout(future, timeout)
return result
@staticmethod
async def wait_future(future, timeout=2.):
result = await with_timeout(future, timeout)
return result
async def with_timeout(what, timeout=5.0):
result = await asyncio.wait_for(what, timeout)
return result |
393 | test enrich repo labels | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2023 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Alvaro del Castillo <[email protected]>
# Valerio Cosentino <[email protected]>
#
import logging
import unittest
from base import TestBaseBackend
from grimoire_elk.enriched.utils import REPO_LABELS
class TestRedmine(TestBaseBackend):
"""Test Redmine backend"""
connector = "redmine"
ocean_index = "test_" + connector
enrich_index = "test_" + connector + "_enrich"
def test_has_identites(self):
"""Test value of has_identities method"""
enrich_backend = self.connectors[self.connector][2]()
self.assertTrue(enrich_backend.has_identities())
def test_items_to_raw(self):
"""Test whether JSON items are properly inserted into ES"""
result = self._test_items_to_raw()
self.assertEqual(result['items'], 3)
self.assertEqual(result['raw'], 3)
def test_raw_to_enrich(self):
"""Test whether the raw index is properly enriched"""
result = self._test_raw_to_enrich()
self.assertEqual(result['raw'], 3)
self.assertEqual(result['enrich'], 3)
enrich_backend = self.connectors[self.connector][2]()
for i in self.items:
ei = enrich_backend.get_rich_item(i)
self.assertIn('metadata__gelk_version', ei)
self.assertIn('metadata__gelk_backend_name', ei)
self.assertIn('metadata__enriched_on', ei)
if 'description' in ei:
self.assertIn('description_analyzed', ei)
if 'subject' in ei:
self.assertIn('subject_analyzed', ei)
def METHOD_NAME(self):
"""Test whether the field REPO_LABELS is present in the enriched items"""
self._test_raw_to_enrich()
enrich_backend = self.connectors[self.connector][2]()
for item in self.items:
eitem = enrich_backend.get_rich_item(item)
self.assertIn(REPO_LABELS, eitem)
def test_raw_to_enrich_sorting_hat(self):
"""Test enrich with SortingHat"""
result = self._test_raw_to_enrich(sortinghat=True)
self.assertEqual(result['raw'], 3)
self.assertEqual(result['enrich'], 3)
enrich_backend = self.connectors[self.connector][2]()
url = self.es_con + "/" + self.enrich_index + "/_search"
response = enrich_backend.requests.get(url, verify=False).json()
for hit in response['hits']['hits']:
source = hit['_source']
if 'author_uuid' in source:
self.assertIn('author_domain', source)
self.assertIn('author_gender', source)
self.assertIn('author_gender_acc', source)
self.assertIn('author_org_name', source)
self.assertIn('author_bot', source)
self.assertIn('author_multi_org_names', source)
def test_raw_to_enrich_projects(self):
"""Test enrich with Projects"""
result = self._test_raw_to_enrich(projects=True)
self.assertEqual(result['raw'], 3)
self.assertEqual(result['enrich'], 3)
enrich_backend = self.connectors[self.connector][2](json_projects_map="data/projects-release.json",
db_user=self.db_user,
db_password=self.db_password)
for i in self.items:
ei = enrich_backend.get_rich_item(i)
self.assertIn('project', ei)
self.assertIn('project_1', ei)
def test_copy_raw_fields(self):
"""Test copied raw fields"""
self._test_raw_to_enrich()
enrich_backend = self.connectors[self.connector][2]()
for item in self.items:
eitem = enrich_backend.get_rich_item(item)
for attribute in enrich_backend.RAW_FIELDS_COPY:
if attribute in item:
self.assertEqual(item[attribute], eitem[attribute])
else:
self.assertIsNone(eitem[attribute])
def test_refresh_identities(self):
"""Test refresh identities"""
result = self._test_refresh_identities()
# ... ?
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(message)s')
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("requests").setLevel(logging.WARNING)
unittest.main(warnings='ignore') |
394 | test attach detach rar activation reject | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
import unittest
import s1ap_types
from integ_tests.s1aptests import s1ap_wrapper
from integ_tests.s1aptests.s1ap_utils import SessionManagerUtil
from lte.protos.policydb_pb2 import FlowMatch
class TestAttachDetachRarActivationReject(unittest.TestCase):
"""Integration Test: TestAttachDetachRarActivationReject"""
def setUp(self):
"""Initialize before test case execution"""
self._s1ap_wrapper = s1ap_wrapper.TestWrapper()
self._sessionManager_util = SessionManagerUtil()
def tearDown(self):
"""Cleanup after test case execution"""
self._s1ap_wrapper.cleanup()
def METHOD_NAME(self):
"""Attach/detach + rar + dedicated bearer activation reject test
with a single UE"""
num_ues = 1
self._s1ap_wrapper.configUEDevice(num_ues)
req = self._s1ap_wrapper.ue_req
print(
"********************** Running End to End attach for UE id",
req.ue_id,
)
# Now actually complete the attach
self._s1ap_wrapper._s1_util.attach(
req.ue_id,
s1ap_types.tfwCmd.UE_END_TO_END_ATTACH_REQUEST,
s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND,
s1ap_types.ueAttachAccept_t,
)
# Wait for EMM Information from MME
self._s1ap_wrapper._s1_util.receive_emm_info()
# UL Flow description #1
ul_flow1 = {
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.UPLINK, # Direction
}
# DL Flow description #1
dl_flow1 = {
"ip_proto": FlowMatch.IPPROTO_TCP, # Protocol Type
"direction": FlowMatch.DOWNLINK, # Direction
}
# Flow list to be configured
flow_list = [
ul_flow1,
dl_flow1,
]
# QoS
qos = {
"qci": 5, # qci value [1 to 9]
"priority": 15, # Range [0-255]
"max_req_bw_ul": 10000000, # MAX bw Uplink
"max_req_bw_dl": 15000000, # MAX bw Downlink
"gbr_ul": 1000000, # GBR Uplink
"gbr_dl": 2000000, # GBR Downlink
"arp_prio": 15, # ARP priority
"pre_cap": 1, # pre-emption capability
"pre_vul": 1, # pre-emption vulnerability
}
policy_id = "ims-voice"
time.sleep(5)
print(
"********************** Sending RAR for IMSI",
"".join([str(i) for i in req.imsi]),
)
self._sessionManager_util.send_ReAuthRequest(
"IMSI" + "".join([str(i) for i in req.imsi]),
policy_id,
flow_list,
qos,
)
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.UE_ACT_DED_BER_REQ.value
act_ded_ber_ctxt_req = response.cast(
s1ap_types.UeActDedBearCtxtReq_t,
)
print(
"********************** Received Activate Dedicated Bearer Request",
)
time.sleep(15)
# The T3485 timer expires in 8 seconds, leading to re-transmission of
# Dedicated Bearer Activation Request message.
# Handling re-transmitted Dedicated Bearer Activation Request
response = self._s1ap_wrapper.s1_util.get_response()
assert response.msg_type == s1ap_types.tfwCmd.UE_ACT_DED_BER_REQ.value
print(
"********************** Ignoring re-transmitted Dedicated Bearer "
"Activation Request",
)
print(
"********************** Sending Activate Dedicated Bearer Reject",
)
# Send Bearer Activation Reject
ded_bearer_rej = s1ap_types.UeActDedBearCtxtRej_t()
ded_bearer_rej.ue_Id = req.ue_id
ded_bearer_rej.bearerId = act_ded_ber_ctxt_req.bearerId
self._s1ap_wrapper._s1_util.issue_cmd(
s1ap_types.tfwCmd.UE_ACT_DED_BER_REJ,
ded_bearer_rej,
)
time.sleep(15)
print(
"********************** Running UE detach for UE id ",
req.ue_id,
)
# Now detach the UE
self._s1ap_wrapper.s1_util.detach(
req.ue_id,
s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value,
)
if __name__ == "__main__":
unittest.main() |
395 | get firewall rule output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetFirewallRuleResult',
'AwaitableGetFirewallRuleResult',
'get_firewall_rule',
'get_firewall_rule_output',
]
@pulumi.output_type
class GetFirewallRuleResult:
"""
Represents a server firewall rule.
"""
def __init__(__self__, end_ip_address=None, id=None, name=None, start_ip_address=None, type=None):
if end_ip_address and not isinstance(end_ip_address, str):
raise TypeError("Expected argument 'end_ip_address' to be a str")
pulumi.set(__self__, "end_ip_address", end_ip_address)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if start_ip_address and not isinstance(start_ip_address, str):
raise TypeError("Expected argument 'start_ip_address' to be a str")
pulumi.set(__self__, "start_ip_address", start_ip_address)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="endIpAddress")
def end_ip_address(self) -> str:
"""
The end IP address of the server firewall rule. Must be IPv4 format.
"""
return pulumi.get(self, "end_ip_address")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startIpAddress")
def start_ip_address(self) -> str:
"""
The start IP address of the server firewall rule. Must be IPv4 format.
"""
return pulumi.get(self, "start_ip_address")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetFirewallRuleResult(GetFirewallRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFirewallRuleResult(
end_ip_address=self.end_ip_address,
id=self.id,
name=self.name,
start_ip_address=self.start_ip_address,
type=self.type)
def get_firewall_rule(firewall_rule_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFirewallRuleResult:
"""
Gets information about a server firewall rule.
:param str firewall_rule_name: The name of the server firewall rule.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['firewallRuleName'] = firewall_rule_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:dbformariadb/v20180601:getFirewallRule', __args__, opts=opts, typ=GetFirewallRuleResult).value
return AwaitableGetFirewallRuleResult(
end_ip_address=pulumi.get(__ret__, 'end_ip_address'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
start_ip_address=pulumi.get(__ret__, 'start_ip_address'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_firewall_rule)
def METHOD_NAME(firewall_rule_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetFirewallRuleResult]:
"""
Gets information about a server firewall rule.
:param str firewall_rule_name: The name of the server firewall rule.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str server_name: The name of the server.
"""
... |
396 | dblinks | # Copyright 2001 by Tarjei Mikkelsen. All rights reserved.
# Copyright 2007 by Michiel de Hoon. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
"""Code to work with the KEGG Ligand/Compound database.
Functions:
- parse - Returns an iterator giving Record objects.
Classes:
- Record - A representation of a KEGG Ligand/Compound.
"""
from Bio.KEGG import _default_wrap, _struct_wrap, _wrap_kegg, _write_kegg
# Set up line wrapping rules (see Bio.KEGG._wrap_kegg)
name_wrap = [0, "", (" ", "$", 1, 1), ("-", "$", 1, 1)]
id_wrap = _default_wrap
struct_wrap = _struct_wrap
class Record:
"""Holds info from a KEGG Ligand/Compound record.
Attributes:
- entry The entry identifier.
- name A list of the compound names.
- formula The chemical formula for the compound
- mass The molecular weight for the compound
- pathway A list of 3-tuples: ('PATH', pathway id, pathway)
- enzyme A list of the EC numbers.
- structures A list of 2-tuples: (database, list of struct ids)
- dblinks A list of 2-tuples: (database, list of link ids)
"""
def __init__(self):
"""Initialize as new record."""
self.entry = ""
self.name = []
self.formula = ""
self.mass = ""
self.pathway = []
self.enzyme = []
self.structures = []
self.dblinks = []
def __str__(self):
"""Return a string representation of this Record."""
return (
self._entry()
+ self._name()
+ self._formula()
+ self._mass()
+ self._pathway()
+ self._enzyme()
+ self._structures()
+ self.METHOD_NAME()
+ "///"
)
def _entry(self):
return _write_kegg("ENTRY", [self.entry])
def _name(self):
return _write_kegg(
"NAME", [_wrap_kegg(line, wrap_rule=name_wrap) for line in self.name]
)
def _formula(self):
return _write_kegg("FORMULA", [self.formula])
def _mass(self):
return _write_kegg("MASS", [self.mass])
def _pathway(self):
s = []
for entry in self.pathway:
s.append(entry[0] + " " + entry[1])
return _write_kegg(
"PATHWAY", [_wrap_kegg(line, wrap_rule=id_wrap(16)) for line in s]
)
def _enzyme(self):
return _write_kegg(
"ENZYME", [_wrap_kegg(line, wrap_rule=name_wrap) for line in self.enzyme]
)
def _structures(self):
s = []
for entry in self.structures:
s.append(entry[0] + ": " + " ".join(entry[1]) + " ")
return _write_kegg(
"STRUCTURES", [_wrap_kegg(line, wrap_rule=struct_wrap(5)) for line in s]
)
def METHOD_NAME(self):
s = []
for entry in self.dblinks:
s.append(entry[0] + ": " + " ".join(entry[1]))
return _write_kegg(
"DBLINKS", [_wrap_kegg(line, wrap_rule=id_wrap(9)) for line in s]
)
def parse(handle):
"""Parse a KEGG Ligan/Compound file, returning Record objects.
This is an iterator function, typically used in a for loop. For
example, using one of the example KEGG files in the Biopython
test suite,
>>> with open("KEGG/compound.sample") as handle:
... for record in parse(handle):
... print("%s %s" % (record.entry, record.name[0]))
...
C00023 Iron
C00017 Protein
C00099 beta-Alanine
C00294 Inosine
C00298 Trypsin
C00348 all-trans-Undecaprenyl phosphate
C00349 2-Methyl-3-oxopropanoate
C01386 NH2Mec
"""
record = Record()
for line in handle:
if line[:3] == "///":
yield record
record = Record()
continue
if line[:12] != " ":
keyword = line[:12]
data = line[12:].strip()
if keyword == "ENTRY ":
words = data.split()
record.entry = words[0]
elif keyword == "NAME ":
data = data.strip(";")
record.name.append(data)
elif keyword == "ENZYME ":
while data:
column = data[:16]
data = data[16:]
enzyme = column.strip()
record.enzyme.append(enzyme)
elif keyword == "PATHWAY ":
map, name = data.split(" ")
pathway = ("PATH", map, name)
record.pathway.append(pathway)
elif keyword == "FORMULA ":
record.formula = data
elif keyword in ("MASS ", "EXACT_MASS "):
record.mass = data
elif keyword == "DBLINKS ":
if ":" in data:
key, values = data.split(":")
values = values.split()
row = (key, values)
record.dblinks.append(row)
else:
row = record.dblinks[-1]
key, values = row
values.extend(data.split())
row = key, values
record.dblinks[-1] = row
if __name__ == "__main__":
from Bio._utils import run_doctest
run_doctest() |
397 | instance queue pattern | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import copy
import logging
import os
import re
import pytest
from six.moves import range
from datadog_checks.dev import docker_run
from datadog_checks.dev.conditions import CheckDockerLogs, WaitFor
from datadog_checks.dev.utils import ON_WINDOWS
from . import common
log = logging.getLogger(__file__)
@pytest.fixture(scope='session')
def get_check():
# Late import to ignore missing library for e2e
from datadog_checks.ibm_mq import IbmMqCheck
yield lambda instance: IbmMqCheck('ibm_mq', {}, [instance])
@pytest.fixture
def instance():
return copy.deepcopy(common.INSTANCE)
@pytest.fixture
def instance_ssl():
return copy.deepcopy(common.INSTANCE_SSL)
@pytest.fixture
def instance_with_connection_name():
return copy.deepcopy(common.INSTANCE_WITH_CONNECTION_NAME)
@pytest.fixture
def METHOD_NAME():
return copy.deepcopy(common.INSTANCE_QUEUE_PATTERN)
@pytest.fixture
def instance_queue_regex():
return copy.deepcopy(common.INSTANCE_QUEUE_REGEX)
@pytest.fixture
def instance_collect_all():
return copy.deepcopy(common.INSTANCE_COLLECT_ALL)
@pytest.fixture
def instance_queue_regex_tag():
return copy.deepcopy(common.INSTANCE_QUEUE_REGEX_TAG)
@pytest.fixture
def instance_ssl_dummy(instance):
instance['ssl_auth'] = 'yes'
instance['ssl_cipher_spec'] = 'TLS_RSA_WITH_AES_256_CBC_SHA256'
instance['ssl_key_repository_location'] = '/dummy'
return instance
@pytest.fixture
def seed_data():
publish()
consume()
def publish():
# Late import to not require it for e2e
import pymqi
conn_info = "%s(%s)" % (common.HOST, common.PORT)
qmgr = pymqi.connect(common.QUEUE_MANAGER, common.CHANNEL, conn_info, common.USERNAME, common.PASSWORD)
queue = pymqi.Queue(qmgr, common.QUEUE)
for i in range(10):
try:
message = 'Hello from Python! Message {}'.format(i)
log.info("sending message: %s", message)
queue.put(message.encode())
except Exception as e:
log.info("exception publishing: %s", e)
queue.close()
qmgr.disconnect()
return
queue.close()
qmgr.disconnect()
def consume():
# Late import to not require it for e2e
import pymqi
conn_info = "%s(%s)" % (common.HOST, common.PORT)
qmgr = pymqi.connect(common.QUEUE_MANAGER, common.CHANNEL, conn_info, common.USERNAME, common.PASSWORD)
queue = pymqi.Queue(qmgr, common.QUEUE)
for _ in range(10):
try:
message = queue.get()
print("got a new message: {}".format(message))
except Exception as e:
if not re.search("MQRC_NO_MSG_AVAILABLE", e.errorAsString()):
print(e)
queue.close()
qmgr.disconnect()
return
else:
pass
queue.close()
qmgr.disconnect()
def prepare_queue_manager():
import pymqi
conn_info = '{0}({1})'.format(common.HOST, common.PORT)
qm_name = common.QUEUE_MANAGER.lower()
qmgr = pymqi.QueueManager(None)
qmgr.connectTCPClient(common.QUEUE_MANAGER, pymqi.CD(), common.CHANNEL, conn_info, common.USERNAME, common.PASSWORD)
pcf = pymqi.PCFExecute(qmgr, response_wait_interval=5000)
attrs = [
pymqi.CFST(
Parameter=pymqi.CMQC.MQCA_SSL_KEY_REPOSITORY,
String=pymqi.ensure_bytes('/etc/mqm/pki/keys/{}'.format(qm_name)),
),
pymqi.CFST(Parameter=pymqi.CMQC.MQCA_CERT_LABEL, String=pymqi.ensure_bytes(qm_name)),
]
pcf.MQCMD_CHANGE_Q_MGR(attrs)
tls_channel_name = pymqi.ensure_bytes(common.CHANNEL_SSL)
cypher_spec = pymqi.ensure_bytes(common.SSL_CYPHER_SPEC)
client_dn = pymqi.ensure_bytes('CN={}'.format(common.SSL_CLIENT_LABEL))
certificate_label_qmgr = pymqi.ensure_bytes(qm_name)
attrs = [
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_CHANNEL_NAME, String=pymqi.ensure_bytes(tls_channel_name)),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACH_CHANNEL_TYPE, Value=pymqi.CMQC.MQCHT_SVRCONN),
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_SSL_CIPHER_SPEC, String=cypher_spec),
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_SSL_PEER_NAME, String=client_dn),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACH_SSL_CLIENT_AUTH, Value=pymqi.CMQXC.MQSCA_OPTIONAL),
pymqi.CFST(Parameter=pymqi.CMQC.MQCA_CERT_LABEL, String=certificate_label_qmgr),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACF_REPLACE, Value=pymqi.CMQCFC.MQRP_YES),
]
pcf.MQCMD_CREATE_CHANNEL(attrs)
attrs = [
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_CHANNEL_NAME, String=pymqi.ensure_bytes(tls_channel_name)),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACF_CHLAUTH_TYPE, Value=pymqi.CMQCFC.MQCAUT_USERMAP),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACF_ACTION, Value=pymqi.CMQCFC.MQACT_REPLACE),
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_CLIENT_USER_ID, String=pymqi.ensure_bytes(common.USERNAME)),
pymqi.CFIN(Parameter=pymqi.CMQC.MQIA_CHECK_CLIENT_BINDING, Value=pymqi.CMQCFC.MQCHK_REQUIRED_ADMIN),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACH_USER_SOURCE, Value=pymqi.CMQC.MQUSRC_MAP),
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_MCA_USER_ID, String=b'mqm'),
]
pcf.MQCMD_SET_CHLAUTH_REC(attrs)
attrs = [
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_CHANNEL_NAME, String=pymqi.ensure_bytes(tls_channel_name)),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACF_CHLAUTH_TYPE, Value=pymqi.CMQCFC.MQCAUT_BLOCKUSER),
pymqi.CFST(Parameter=pymqi.CMQCFC.MQCACH_MCA_USER_ID_LIST, String=b'nobody'),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACH_WARNING, Value=pymqi.CMQC.MQWARN_NO),
pymqi.CFIN(Parameter=pymqi.CMQCFC.MQIACF_ACTION, Value=pymqi.CMQCFC.MQACT_REPLACE),
]
pcf.MQCMD_SET_CHLAUTH_REC(attrs)
pcf.disconnect()
qmgr.disconnect()
@pytest.fixture(scope='session')
def dd_environment():
if common.MQ_VERSION == 9:
log_pattern = "AMQ5026I: The listener 'DEV.LISTENER.TCP' has started. ProcessId"
elif common.MQ_VERSION == 8:
log_pattern = r".*QMNAME\({}\)\s*STATUS\(Running\).*".format(common.QUEUE_MANAGER)
else:
raise RuntimeError('Invalid version: {}'.format(common.MQ_VERSION))
e2e_meta = copy.deepcopy(common.E2E_METADATA)
e2e_meta.setdefault('docker_volumes', [])
e2e_meta['docker_volumes'].append("{}:/opt/pki/keys".format(os.path.join(common.HERE, 'keys')))
conditions = [CheckDockerLogs('ibm_mq1', log_pattern)]
if not ON_WINDOWS:
conditions.append(WaitFor(prepare_queue_manager))
with docker_run(compose_file=common.COMPOSE_FILE_PATH, build=True, conditions=conditions, sleep=10, attempts=2):
yield common.INSTANCE, e2e_meta |
398 | is nve | #!/usr/bin/env python3
#
# Copyright (c) 2018-2021 NVIDIA CORPORATION & AFFILIATES.
# Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This utility set the power mode of a given module.
"""
import sys
import errno
from python_sdk_api.sx_api import *
DEVICE_ID = 1
SWITCH_ID = 0
SX_PORT_ATTR_ARR_SIZE = 64
PORT_TYPE_CPU = 4
PORT_TYPE_NVE = 8
PORT_TYPE_OFFSET = 28
PORT_TYPE_MASK = 0xF0000000
NVE_MASK = PORT_TYPE_MASK & (PORT_TYPE_NVE << PORT_TYPE_OFFSET)
CPU_MASK = PORT_TYPE_MASK & (PORT_TYPE_CPU << PORT_TYPE_OFFSET)
def METHOD_NAME(port):
return (port & NVE_MASK) != 0
def is_cpu(port):
return (port & CPU_MASK) != 0
def is_port_admin_status_up(log_port):
oper_state_p = new_sx_port_oper_state_t_p()
admin_state_p = new_sx_port_admin_state_t_p()
module_state_p = new_sx_port_module_state_t_p()
rc = sx_api_port_state_get(handle, log_port, oper_state_p, admin_state_p, module_state_p)
assert rc == SX_STATUS_SUCCESS, "sx_api_port_state_get failed, rc = %d" % rc
admin_state = sx_port_admin_state_t_p_value(admin_state_p)
if admin_state == SX_PORT_ADMIN_STATUS_UP:
return True
else:
return False
def set_port_admin_status_by_log_port(handle, log_port, admin_status):
rc = sx_api_port_state_set(handle, log_port, admin_status)
assert rc == SX_STATUS_SUCCESS, "sx_api_port_state_set failed, rc = %d" % rc
# Get all the ports related to the sfp, if port admin status is up, put it to list
def get_log_ports(handle, sfp_module):
port_attributes_list = new_sx_port_attributes_t_arr(SX_PORT_ATTR_ARR_SIZE)
port_cnt_p = new_uint32_t_p()
uint32_t_p_assign(port_cnt_p, SX_PORT_ATTR_ARR_SIZE)
rc = sx_api_port_device_get(handle, DEVICE_ID, SWITCH_ID, port_attributes_list, port_cnt_p)
assert rc == SX_STATUS_SUCCESS, "sx_api_port_device_get failed, rc = %d" % rc
port_cnt = uint32_t_p_value(port_cnt_p)
log_port_list = []
for i in range(0, port_cnt):
port_attributes = sx_port_attributes_t_arr_getitem(port_attributes_list, i)
if not METHOD_NAME(int(port_attributes.log_port)) \
and not is_cpu(int(port_attributes.log_port)) \
and port_attributes.port_mapping.module_port == sfp_module \
and is_port_admin_status_up(port_attributes.log_port):
log_port_list.append(port_attributes.log_port)
return log_port_list
def mgmt_phy_mod_pwr_attr_set(handle, module_id, power_attr_type, admin_pwr_mode):
sx_mgmt_phy_mod_pwr_attr = sx_mgmt_phy_mod_pwr_attr_t()
sx_mgmt_phy_mod_pwr_mode_attr = sx_mgmt_phy_mod_pwr_mode_attr_t()
sx_mgmt_phy_mod_pwr_attr.power_attr_type = power_attr_type
sx_mgmt_phy_mod_pwr_mode_attr.admin_pwr_mode_e = admin_pwr_mode
sx_mgmt_phy_mod_pwr_attr.pwr_mode_attr = sx_mgmt_phy_mod_pwr_mode_attr
sx_mgmt_phy_mod_pwr_attr_p = new_sx_mgmt_phy_mod_pwr_attr_t_p()
sx_mgmt_phy_mod_pwr_attr_t_p_assign(sx_mgmt_phy_mod_pwr_attr_p, sx_mgmt_phy_mod_pwr_attr)
try:
rc = sx_mgmt_phy_mod_pwr_attr_set(handle, SX_ACCESS_CMD_SET, module_id, sx_mgmt_phy_mod_pwr_attr_p)
assert SX_STATUS_SUCCESS == rc, "sx_mgmt_phy_mod_pwr_attr_set failed"
finally:
delete_sx_mgmt_phy_mod_pwr_attr_t_p(sx_mgmt_phy_mod_pwr_attr_p)
def mgmt_phy_mod_pwr_attr_get(handle, module_id, power_attr_type):
sx_mgmt_phy_mod_pwr_attr_p = new_sx_mgmt_phy_mod_pwr_attr_t_p()
sx_mgmt_phy_mod_pwr_attr = sx_mgmt_phy_mod_pwr_attr_t()
sx_mgmt_phy_mod_pwr_attr.power_attr_type = power_attr_type
sx_mgmt_phy_mod_pwr_attr_t_p_assign(sx_mgmt_phy_mod_pwr_attr_p, sx_mgmt_phy_mod_pwr_attr)
try:
rc = sx_mgmt_phy_mod_pwr_attr_get(handle, module_id, sx_mgmt_phy_mod_pwr_attr_p)
assert SX_STATUS_SUCCESS == rc, "sx_mgmt_phy_mod_pwr_attr_get failed"
sx_mgmt_phy_mod_pwr_attr = sx_mgmt_phy_mod_pwr_attr_t_p_value(sx_mgmt_phy_mod_pwr_attr_p)
pwr_mode_attr = sx_mgmt_phy_mod_pwr_attr.pwr_mode_attr
return pwr_mode_attr.admin_pwr_mode_e, pwr_mode_attr.oper_pwr_mode_e
finally:
delete_sx_mgmt_phy_mod_pwr_attr_t_p(sx_mgmt_phy_mod_pwr_attr_p)
def pwr_attr_set(handle, module_id, ports, attr_type, power_mode):
# Check if the module already works in the same mode
admin_pwr_mode, oper_pwr_mode = mgmt_phy_mod_pwr_attr_get(handle, module_id, attr_type)
if (power_mode == SX_MGMT_PHY_MOD_PWR_MODE_LOW_E and oper_pwr_mode == SX_MGMT_PHY_MOD_PWR_MODE_LOW_E) \
or (power_mode == SX_MGMT_PHY_MOD_PWR_MODE_AUTO_E and admin_pwr_mode == SX_MGMT_PHY_MOD_PWR_MODE_AUTO_E):
return
try:
# Bring the port down
for port in ports:
set_port_admin_status_by_log_port(handle, port, SX_PORT_ADMIN_STATUS_DOWN)
# Set the desired power mode
mgmt_phy_mod_pwr_attr_set(handle, module_id, attr_type, power_mode)
# Bring the port up
finally:
for port in ports:
set_port_admin_status_by_log_port(handle, port, SX_PORT_ADMIN_STATUS_UP)
def set_lpmode(handle, cmd, module_id):
# Construct the port module map.
log_port_list = get_log_ports(handle, module_id)
if cmd == "enable":
pwr_attr_set(handle, module_id, log_port_list,
SX_MGMT_PHY_MOD_PWR_ATTR_PWR_MODE_E, SX_MGMT_PHY_MOD_PWR_MODE_LOW_E)
print("Enabled low power mode for module [%d]" % module_id)
elif cmd == "disable":
pwr_attr_set(handle, module_id, log_port_list,
SX_MGMT_PHY_MOD_PWR_ATTR_PWR_MODE_E, SX_MGMT_PHY_MOD_PWR_MODE_AUTO_E)
print("Disabled low power mode for module [%d]" % module_id)
else:
print("Error: Invalid command")
sys.exit(0)
if len(sys.argv) < 3:
print("SFP module number or LPM is missed.")
print("Usage: sfplpmset.py <SFP module> <on|off>")
sys.exit(errno.EINVAL)
cmd = None
lpm_enable = None
if sys.argv[2] == 'on':
lpm_enable = True
cmd = 'enable'
elif sys.argv[2] == 'off':
lpm_enable = False
cmd = 'disable'
else:
print("Unrecognized LPM parameter. Please use <on> or <off> values")
sys.exit(errno.EINVAL)
# Get SFP module
sfp_module = int(sys.argv[1]) - 1
print("[+] opening sdk")
rc, handle = sx_api_open(None)
if (rc != SX_STATUS_SUCCESS):
print("Failed to open api handle.\nPlease check that SDK is running.")
sys.exit(errno.EACCES)
# Set low power mode
set_lpmode(handle, cmd, sfp_module)
sx_api_close(handle) |
399 | test no context lines | # Copyright 2010-2023 The pygit2 contributors
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2,
# as published by the Free Software Foundation.
#
# In addition to the permissions in the GNU General Public License,
# the authors give you unlimited permission to link the compiled
# version of this file into combinations with other programs,
# and to distribute those combinations without any restriction
# coming from the use of this file. (The General Public License
# restrictions do apply in other respects; for example, they cover
# modification of the file, and distribution when not linked into
# a combined executable.)
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
# Boston, MA 02110-1301, USA.
import pygit2
import pytest
BLOB_OLD_SHA = 'a520c24d85fbfc815d385957eed41406ca5a860b'
BLOB_NEW_SHA = '3b18e512dba79e4c8300dd08aeb37f8e728b8dad'
BLOB_OLD_CONTENT = b"""hello world
hola mundo
bonjour le monde
"""
BLOB_NEW_CONTENT = b'foo bar\n'
BLOB_OLD_PATH = 'a/file'
BLOB_NEW_PATH = 'b/file'
BLOB_PATCH2 = """diff --git a/a/file b/b/file
index a520c24..3b18e51 100644
--- a/a/file
+++ b/b/file
@@ -1,3 +1 @@
hello world
-hola mundo
-bonjour le monde
"""
BLOB_PATCH = """diff --git a/a/file b/b/file
index a520c24..d675fa4 100644
--- a/a/file
+++ b/b/file
@@ -1,3 +1 @@
-hello world
-hola mundo
-bonjour le monde
+foo bar
"""
BLOB_PATCH_ADDED = """diff --git a/a/file b/b/file
new file mode 100644
index 0000000..d675fa4
--- /dev/null
+++ b/b/file
@@ -0,0 +1 @@
+foo bar
"""
BLOB_PATCH_DELETED = """diff --git a/a/file b/b/file
deleted file mode 100644
index a520c24..0000000
--- a/a/file
+++ /dev/null
@@ -1,3 +0,0 @@
-hello world
-hola mundo
-bonjour le monde
"""
def test_patch_create_from_buffers():
patch = pygit2.Patch.create_from(
BLOB_OLD_CONTENT,
BLOB_NEW_CONTENT,
old_as_path=BLOB_OLD_PATH,
new_as_path=BLOB_NEW_PATH,
)
assert patch.text == BLOB_PATCH
def test_patch_create_from_blobs(testrepo):
old_blob = testrepo[BLOB_OLD_SHA]
new_blob = testrepo[BLOB_NEW_SHA]
patch = pygit2.Patch.create_from(
old_blob,
new_blob,
old_as_path=BLOB_OLD_PATH,
new_as_path=BLOB_NEW_PATH,
)
assert patch.text == BLOB_PATCH2
def test_patch_create_from_blob_buffer(testrepo):
old_blob = testrepo[BLOB_OLD_SHA]
patch = pygit2.Patch.create_from(
old_blob,
BLOB_NEW_CONTENT,
old_as_path=BLOB_OLD_PATH,
new_as_path=BLOB_NEW_PATH,
)
assert patch.text == BLOB_PATCH
def test_patch_create_from_blob_buffer_add(testrepo):
patch = pygit2.Patch.create_from(
None,
BLOB_NEW_CONTENT,
old_as_path=BLOB_OLD_PATH,
new_as_path=BLOB_NEW_PATH,
)
assert patch.text == BLOB_PATCH_ADDED
def test_patch_create_from_blob_buffer_delete(testrepo):
old_blob = testrepo[BLOB_OLD_SHA]
patch = pygit2.Patch.create_from(
old_blob,
None,
old_as_path=BLOB_OLD_PATH,
new_as_path=BLOB_NEW_PATH,
)
assert patch.text == BLOB_PATCH_DELETED
def test_patch_create_from_bad_old_type_arg(testrepo):
with pytest.raises(TypeError):
pygit2.Patch.create_from(testrepo, BLOB_NEW_CONTENT)
def test_patch_create_from_bad_new_type_arg(testrepo):
with pytest.raises(TypeError):
pygit2.Patch.create_from(None, testrepo)
def test_context_lines(testrepo):
old_blob = testrepo[BLOB_OLD_SHA]
new_blob = testrepo[BLOB_NEW_SHA]
patch = pygit2.Patch.create_from(
old_blob,
new_blob,
old_as_path=BLOB_OLD_PATH,
new_as_path=BLOB_NEW_PATH,
)
context_count = len(
[line for line in patch.text.splitlines() if line.startswith(" ")]
)
assert context_count != 0
def METHOD_NAME(testrepo):
old_blob = testrepo[BLOB_OLD_SHA]
new_blob = testrepo[BLOB_NEW_SHA]
patch = pygit2.Patch.create_from(
old_blob,
new_blob,
old_as_path=BLOB_OLD_PATH,
new_as_path=BLOB_NEW_PATH,
context_lines=0,
)
context_count = len(
[line for line in patch.text.splitlines() if line.startswith(" ")]
)
assert context_count == 0
def test_patch_create_blob_blobs(testrepo):
old_blob = testrepo[testrepo.create_blob(BLOB_OLD_CONTENT)]
new_blob = testrepo[testrepo.create_blob(BLOB_NEW_CONTENT)]
patch = pygit2.Patch.create_from(
old_blob,
new_blob,
old_as_path=BLOB_OLD_PATH,
new_as_path=BLOB_NEW_PATH,
)
assert patch.text == BLOB_PATCH
def test_patch_create_blob_buffer(testrepo):
blob = testrepo[testrepo.create_blob(BLOB_OLD_CONTENT)]
patch = pygit2.Patch.create_from(
blob,
BLOB_NEW_CONTENT,
old_as_path=BLOB_OLD_PATH,
new_as_path=BLOB_NEW_PATH,
)
assert patch.text == BLOB_PATCH
def test_patch_create_blob_delete(testrepo):
blob = testrepo[testrepo.create_blob(BLOB_OLD_CONTENT)]
patch = pygit2.Patch.create_from(
blob,
None,
old_as_path=BLOB_OLD_PATH,
new_as_path=BLOB_NEW_PATH,
)
assert patch.text == BLOB_PATCH_DELETED
def test_patch_create_blob_add(testrepo):
blob = testrepo[testrepo.create_blob(BLOB_NEW_CONTENT)]
patch = pygit2.Patch.create_from(
None,
blob,
old_as_path=BLOB_OLD_PATH,
new_as_path=BLOB_NEW_PATH,
)
assert patch.text == BLOB_PATCH_ADDED
def test_patch_delete_blob(testrepo):
blob = testrepo[BLOB_OLD_SHA]
patch = pygit2.Patch.create_from(
blob,
None,
old_as_path=BLOB_OLD_PATH,
new_as_path=BLOB_NEW_PATH,
)
# Make sure that even after deleting the blob the patch still has the
# necessary references to generate its patch
del blob
assert patch.text == BLOB_PATCH_DELETED
def test_patch_multi_blob(testrepo):
blob = testrepo[BLOB_OLD_SHA]
patch = pygit2.Patch.create_from(
blob,
None
)
patch_text = patch.text
blob = testrepo[BLOB_OLD_SHA]
patch2 = pygit2.Patch.create_from(
blob,
None
)
patch_text2 = patch.text
assert patch_text == patch_text2
assert patch_text == patch.text
assert patch_text2 == patch2.text
assert patch.text == patch2.text |