hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6a7ef877f9a75af565239a4f498da3558863fc35 | 7,766 | py | Python | tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py | Smokrow/tensorflow | debd66dae1c9a49d36ea006c97facf06b4ac25cb | [
"Apache-2.0"
] | 1 | 2018-09-08T08:26:31.000Z | 2018-09-08T08:26:31.000Z | tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py | Smokrow/tensorflow | debd66dae1c9a49d36ea006c97facf06b4ac25cb | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/data/python/kernel_tests/optimization/map_and_filter_fusion_test.py | Smokrow/tensorflow | debd66dae1c9a49d36ea006c97facf06b4ac25cb | [
"Apache-2.0"
] | 1 | 2020-02-15T14:34:36.000Z | 2020-02-15T14:34:36.000Z | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the MapAndFilterFusion optimization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.contrib.data.python.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class MapAndFilterFusionTest(test.TestCase, parameterized.TestCase):
@staticmethod
def map_functions():
identity = lambda x: x
increment = lambda x: x + 1
def increment_and_square(x):
y = x + 1
return y * y
functions = [identity, increment, increment_and_square]
tests = []
for i, fun1 in enumerate(functions):
for j, fun2 in enumerate(functions):
tests.append((
"Test{}{}".format(i, j),
[fun1, fun2],
))
for k, fun3 in enumerate(functions):
tests.append((
"Test{}{}{}".format(i, j, k),
[fun1, fun2, fun3],
))
swap = lambda x, n: (n, x)
tests.append((
"Swap1",
[lambda x: (x, 42), swap],
))
tests.append((
"Swap2",
[lambda x: (x, 42), swap, swap],
))
return tuple(tests)
@parameterized.named_parameters(*map_functions.__func__())
def testMapFusion(self, functions):
dataset = dataset_ops.Dataset.range(5).apply(
optimization.assert_next(["Map", "Prefetch"]))
for function in functions:
dataset = dataset.map(function)
dataset = dataset.prefetch(0).apply(optimization.optimize(["map_fusion"]))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
for x in range(5):
result = sess.run(get_next)
r = x
for function in functions:
if isinstance(r, tuple):
r = function(*r) # Pass tuple as multiple arguments.
else:
r = function(r)
self.assertAllEqual(r, result)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@staticmethod
def map_and_filter_functions():
identity = lambda x: x
increment = lambda x: x + 1
minus_five = lambda x: x - 5
def increment_and_square(x):
y = x + 1
return y * y
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
is_odd = lambda x: math_ops.equal(x % 2, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
functions = [identity, increment, minus_five, increment_and_square]
filters = [take_all, is_zero, is_odd, greater]
tests = []
for x, fun in enumerate(functions):
for y, predicate in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), fun, predicate))
# Multi output
tests.append(("Multi1", lambda x: (x, x),
lambda x, y: constant_op.constant(True)))
tests.append(
("Multi2", lambda x: (x, 2),
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)))
return tuple(tests)
@parameterized.named_parameters(*map_and_filter_functions.__func__())
def testMapFilterFusion(self, function, predicate):
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(
["Map",
"FilterByLastComponent"])).map(function).filter(predicate).apply(
optimization.optimize(["map_and_filter_fusion"]))
self._testMapAndFilter(dataset, function, predicate)
def _testMapAndFilter(self, dataset, function, predicate):
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
for x in range(10):
r = function(x)
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if sess.run(b):
result = sess.run(get_next)
self.assertAllEqual(r, result)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testAdditionalInputs(self):
a = constant_op.constant(3, dtype=dtypes.int64)
b = constant_op.constant(4, dtype=dtypes.int64)
some_tensor = math_ops.mul(a, b)
function = lambda x: x * x
def predicate(y):
return math_ops.less(math_ops.cast(y, dtypes.int64), some_tensor)
# We are currently not supporting functions with additional inputs.
dataset = dataset_ops.Dataset.range(10).apply(
optimization.assert_next(
["Map", "Filter"])).map(function).filter(predicate).apply(
optimization.optimize(["map_and_filter_fusion"]))
self._testMapAndFilter(dataset, function, predicate)
@staticmethod
def filter_functions():
take_all = lambda x: constant_op.constant(True)
is_zero = lambda x: math_ops.equal(x, 0)
greater = lambda x: math_ops.greater(x + 5, 0)
tests = []
filters = [take_all, is_zero, greater]
identity = lambda x: x
for x, predicate_1 in enumerate(filters):
for y, predicate_2 in enumerate(filters):
tests.append(("Mixed{}{}".format(x, y), identity,
[predicate_1, predicate_2]))
for z, predicate_3 in enumerate(filters):
tests.append(("Mixed{}{}{}".format(x, y, z), identity,
[predicate_1, predicate_2, predicate_3]))
take_all_multiple = lambda x, y: constant_op.constant(True)
# Multi output
tests.append(("Multi1", lambda x: (x, x),
[take_all_multiple, take_all_multiple]))
tests.append(("Multi2", lambda x: (x, 2), [
take_all_multiple,
lambda x, y: math_ops.equal(x * math_ops.cast(y, dtypes.int64), 0)
]))
return tuple(tests)
@parameterized.named_parameters(*filter_functions.__func__())
def testFilterFusion(self, map_function, predicates):
dataset = dataset_ops.Dataset.range(5).apply(
optimization.assert_next(["Map", "Filter",
"Prefetch"])).map(map_function)
for predicate in predicates:
dataset = dataset.filter(predicate)
dataset = dataset.prefetch(0).apply(
optimization.optimize(["filter_fusion"]))
iterator = dataset.make_one_shot_iterator()
get_next = iterator.get_next()
with self.test_session() as sess:
for x in range(5):
r = map_function(x)
filtered = False
for predicate in predicates:
if isinstance(r, tuple):
b = predicate(*r) # Pass tuple as multiple arguments.
else:
b = predicate(r)
if not sess.run(b):
filtered = True
break
if not filtered:
result = sess.run(get_next)
self.assertAllEqual(r, result)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| 34.515556 | 80 | 0.638424 | 6,484 | 0.834921 | 0 | 0 | 5,181 | 0.667139 | 0 | 0 | 1,192 | 0.15349 |
6a7f52df743becc0516c5282308cd0c5db04737d | 16,979 | py | Python | meerk40t/lihuiyu/lihuiyuemulator.py | jpirnay/meerk40t | 10d4e41a8c5e2bb95a504904273699e115822b9b | [
"MIT"
] | null | null | null | meerk40t/lihuiyu/lihuiyuemulator.py | jpirnay/meerk40t | 10d4e41a8c5e2bb95a504904273699e115822b9b | [
"MIT"
] | null | null | null | meerk40t/lihuiyu/lihuiyuemulator.py | jpirnay/meerk40t | 10d4e41a8c5e2bb95a504904273699e115822b9b | [
"MIT"
] | null | null | null | from meerk40t.core.cutcode import CutCode, RawCut
from meerk40t.core.parameters import Parameters
from meerk40t.core.units import UNITS_PER_MIL
from meerk40t.kernel import Module
from meerk40t.numpath import Numpath
from meerk40t.svgelements import Color
class LihuiyuEmulator(Module):
def __init__(self, context, path):
Module.__init__(self, context, path)
self.context.setting(bool, "fix_speeds", False)
self.parser = LihuiyuParser()
self.parser.fix_speeds = self.context.fix_speeds
self.parser.channel = self.context.channel("lhy")
def pos(p):
if p is None:
return
x0, y0, x1, y1 = p
self.context.signal("emulator;position", (x0, y0, x1, y1))
self.parser.position = pos
def __repr__(self):
return "LihuiyuEmulator(%s)" % self.name
def initialize(self, *args, **kwargs):
context = self.context
active = self.context.root.active
send = context.channel("%s/usb_send" % active)
send.watch(self.parser.write_packet)
def finalize(self, *args, **kwargs):
context = self.context
active = self.context.root.active
send = context.channel("%s/usb_send" % active)
send.unwatch(self.parser.write_packet)
class LihuiyuParser:
"""
LihuiyuParser parses LHYMicro-GL code with a state diagram. This should accurately reconstruct the values.
When the position is changed it calls a self.position() function if one exists.
"""
def __init__(self):
self.channel = None
self.position = None
self.board = "M2"
self.header_skipped = False
self.count_lines = 0
self.count_flag = 0
self.settings = Parameters({"speed": 20.0, "power": 1000.0})
self.speed_code = None
self.x = 0.0
self.y = 0.0
self.number_value = ""
self.distance_x = 0
self.distance_y = 0
self.filename = ""
self.laser = 0
self.left = False
self.top = False
self.x_on = False
self.y_on = False
self.small_jump = False
self.returning_compact = True
self.returning_finished = False
self.mode = None
self.raster_step = 0
self.paused_state = False
self.compact_state = False
self.finish_state = False
self.horizontal_major = False
self.fix_speeds = False
self.number_consumer = {}
def parse(self, data, elements):
self.path = Numpath()
def position(p):
if p is None:
return
from_x, from_y, to_x, to_y = p
if self.program_mode:
if self.laser:
self.path.line(complex(from_x, from_y), complex(to_x, to_y))
self.position = position
self.write(data)
self.path.uscale(UNITS_PER_MIL)
elements.elem_branch.add(
type="elem numpath",
path=self.path,
stroke=Color("black"),
**self.settings.settings,
)
elements.signal("refresh_scene", 0)
@property
def program_mode(self):
return self.compact_state
@property
def default_mode(self):
return not self.compact_state
@property
def raster_mode(self):
return self.settings.get("raster_step", 0) != 0
def new_file(self):
self.header_skipped = False
self.count_flag = 0
self.count_lines = 0
@staticmethod
def remove_header(data):
count_lines = 0
count_flag = 0
for i in range(len(data)):
b = data[i]
c = chr(b)
if c == "\n":
count_lines += 1
elif c == "%":
count_flag += 1
if count_lines >= 3 and count_flag >= 5:
return data[i:]
def header_write(self, data):
"""
Write data to the emulator including the header. This is intended for saved .egv files which include a default
header.
"""
if self.header_skipped:
self.write(data)
else:
data = LihuiyuParser.remove_header(data)
self.write(data)
def write_packet(self, packet):
self.write(packet[1:31])
def write(self, data):
for b in data:
self.process(b, chr(b))
def distance_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
self.append_distance(int(self.number_value))
self.number_value = ""
def speedcode_b1_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode B1 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_b2_consumer
def speedcode_b2_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode B2 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_accel_consumer
def speedcode_accel_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 1:
if self.channel:
self.channel("Speedcode Accel = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_mult_consumer
def speedcode_mult_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode Accel = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_dratio_b1_consumer
def speedcode_dratio_b1_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode Dratio b1 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_dratio_b2_consumer
def speedcode_dratio_b2_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Speedcode Dratio b2 = %s" % self.number_value)
self.number_value = ""
self.number_consumer = self.distance_consumer
def raster_step_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 3:
if self.channel:
self.channel("Raster Step = %s" % self.number_value)
self.raster_step = int(self.number_value)
self.number_value = ""
self.number_consumer = self.distance_consumer
def mode_consumer(self, c):
self.number_value += c
if len(self.number_value) >= 1:
if self.channel:
self.channel("Set Mode = %s" % self.number_value)
self.mode = int(self.number_value)
self.number_value = ""
self.number_consumer = self.speedcode_mult_consumer
def append_distance(self, amount):
if self.x_on:
self.distance_x += amount
if self.y_on:
self.distance_y += amount
def execute_distance(self):
if self.distance_x != 0 or self.distance_y != 0:
dx = self.distance_x
dy = self.distance_y
if self.left:
dx = -dx
if self.top:
dy = -dy
self.distance_x = 0
self.distance_y = 0
ox = self.x
oy = self.y
self.x += dx
self.y += dy
if self.position:
self.position((ox, oy, self.x, self.y))
if self.channel:
self.channel("Moving (%d %d) now at %d %d" % (dx, dy, self.x, self.y))
def process(self, b, c):
if c == "I":
self.finish_state = False
self.compact_state = False
self.paused_state = False
self.distance_x = 0
self.distance_y = 0
if self.finish_state: # In finished all commands are black holed
return
if ord("0") <= b <= ord("9"):
self.number_consumer(c)
return
else:
self.number_consumer = self.distance_consumer
self.number_value = ""
if self.compact_state:
# Every command in compact state executes distances.
self.execute_distance()
if c == "|":
self.append_distance(25)
self.small_jump = True
elif ord("a") <= b <= ord("y"):
self.append_distance(b + 1 - ord("a"))
self.small_jump = False
elif c == "z":
self.append_distance(26 if self.small_jump else 255)
self.small_jump = False
elif c == "B": # Move to Right.
if self.left and self.horizontal_major:
# Was T switched to B with horizontal rastering.
if self.raster_step:
self.distance_y += self.raster_step
self.left = False
self.x_on = True
self.y_on = False
if self.channel:
self.channel("Right")
elif c == "T": # Move to Left
if not self.left and self.horizontal_major:
# Was T switched to B with horizontal rastering.
if self.raster_step:
self.distance_y += self.raster_step
self.left = True
self.x_on = True
self.y_on = False
if self.channel:
self.channel("Left")
elif c == "R": # Move to Bottom
if self.top and not self.horizontal_major:
# Was L switched to R with vertical rastering.
if self.raster_step:
self.distance_x += self.raster_step
self.top = False
self.x_on = False
self.y_on = True
if self.channel:
self.channel("Bottom")
elif c == "L": # Move to Top
if not self.top and not self.horizontal_major:
# Was R switched to L with vertical rastering.
if self.raster_step:
self.distance_x += self.raster_step
self.top = True
self.x_on = False
self.y_on = True
if self.channel:
self.channel("Top")
elif c == "U":
self.laser = 0
if self.channel:
self.channel("Laser Off")
elif c == "D":
self.laser = 1
if self.channel:
self.channel("Laser On")
elif c == "F":
if self.channel:
self.channel("Finish")
self.returning_compact = False
self.returning_finished = True
elif c == "@":
if self.channel:
self.channel("Reset")
self.returning_finished = False
self.returning_compact = False
elif c in "C":
if self.channel:
self.channel("Speedcode")
self.speed_code = ""
elif c in "V":
self.raster_step = None
if self.channel:
self.channel("Velocity")
self.number_consumer = self.speedcode_b1_consumer
elif c in "G":
if self.channel:
self.channel("Step Value")
self.number_consumer = self.raster_step_consumer
elif c == "S":
if self.channel:
self.channel("Mode Set")
self.laser = 0
self.execute_distance()
self.mode = None
self.number_consumer = self.mode_consumer
elif c == "E":
if self.channel:
self.channel("Execute State")
if self.mode is None:
if self.returning_compact:
self.compact_state = True
if self.returning_finished:
self.finish_state = True
if self.horizontal_major:
self.left = not self.left
self.x_on = True
self.y_on = False
if self.raster_step:
self.distance_y += self.raster_step
else:
# vertical major
self.top = not self.top
self.x_on = False
self.y_on = True
if self.raster_step:
self.distance_x += self.raster_step
elif self.mode == 0:
# Homes then moves position.
pass
elif self.mode == 1:
self.compact_state = True
self.horizontal_major = self.x_on
if self.channel:
self.channel("Setting Axis: h=" + str(self.x_on))
elif self.mode == 2:
# Rail unlocked.
self.compact_state = True
self.returning_finished = False
self.returning_compact = True
self.laser = 0
elif c == "P":
if self.channel:
self.channel("Pause")
self.laser = 0
if self.paused_state:
# Home sequence triggered by 2 P commands in the same packet.
# This should resume if not located within the same packet.
if self.position:
self.position((self.x, self.y, 0, 0))
self.x = 0
self.y = 0
self.distance_y = 0
self.distance_x = 0
self.finish_state = True
self.paused_state = False
else:
self.execute_distance() # distance is executed by a P command
self.paused_state = True
elif c == "N":
if self.channel:
self.channel("N")
self.execute_distance() # distance is executed by an N command.
self.laser = 0
self.compact_state = False
if self.position:
self.position(None)
elif c == "M":
self.x_on = True
self.y_on = True
if self.channel:
a = "Top" if self.top else "Bottom"
b = "Left" if self.left else "Right"
self.channel("Diagonal %s %s" % (a, b))
class EGVBlob:
def __init__(self, data: bytearray, name=None):
self.name = name
self.data = data
self.operation = "blob"
self._cutcode = None
self._cut = None
def __repr__(self):
return "EGV(%s, %d bytes)" % (self.name, len(self.data))
def as_cutobjects(self):
parser = LihuiyuParser()
self._cutcode = CutCode()
self._cut = RawCut()
def new_cut():
if self._cut is not None and len(self._cut):
self._cutcode.append(self._cut)
self._cut = RawCut()
self._cut.settings = dict(parser.settings)
def position(p):
if p is None or self._cut is None:
new_cut()
return
from_x, from_y, to_x, to_y = p
if parser.program_mode:
if len(self._cut.plot) == 0:
self._cut.plot_append(int(from_x), int(from_y), parser.laser)
self._cut.plot_append(int(to_x), int(to_y), parser.laser)
else:
new_cut()
parser.position = position
parser.header_write(self.data)
cutcode = self._cutcode
self._cut = None
self._cutcode = None
return cutcode
def generate(self):
yield "blob", "egv", LihuiyuParser.remove_header(self.data)
class EgvLoader:
@staticmethod
def remove_header(data):
count_lines = 0
count_flag = 0
for i in range(len(data)):
b = data[i]
c = chr(b)
if c == "\n":
count_lines += 1
elif c == "%":
count_flag += 1
if count_lines >= 3 and count_flag >= 5:
return data[i:]
@staticmethod
def load_types():
yield "Engrave Files", ("egv",), "application/x-egv"
@staticmethod
def load(kernel, elements_modifier, pathname, **kwargs):
import os
basename = os.path.basename(pathname)
with open(pathname, "rb") as f:
op_branch = elements_modifier.get(type="branch ops")
op_branch.add(
data=bytearray(EgvLoader.remove_header(f.read())),
data_type="egv",
type="blob",
name=basename,
)
return True
| 32.777992 | 118 | 0.52777 | 16,712 | 0.984275 | 165 | 0.009718 | 1,547 | 0.091113 | 0 | 0 | 1,703 | 0.1003 |
6a7f701b1440f625bfec8817f0a39a899231c69f | 105,704 | py | Python | tencentcloud/dbbrain/v20210527/models.py | lleiyyang/tencentcloud-sdk-python | e6e6a4ce89286673b2322ae92d3c2fbf8665aa0b | [
"Apache-2.0"
] | 465 | 2018-04-27T09:54:59.000Z | 2022-03-29T02:18:01.000Z | tencentcloud/dbbrain/v20210527/models.py | lleiyyang/tencentcloud-sdk-python | e6e6a4ce89286673b2322ae92d3c2fbf8665aa0b | [
"Apache-2.0"
] | 91 | 2018-04-27T09:48:11.000Z | 2022-03-12T08:04:04.000Z | tencentcloud/dbbrain/v20210527/models.py | lleiyyang/tencentcloud-sdk-python | e6e6a4ce89286673b2322ae92d3c2fbf8665aa0b | [
"Apache-2.0"
] | 232 | 2018-05-02T08:02:46.000Z | 2022-03-30T08:02:48.000Z | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from tencentcloud.common.abstract_model import AbstractModel
class AddUserContactRequest(AbstractModel):
"""AddUserContact请求参数结构体
"""
def __init__(self):
r"""
:param Name: 联系人姓名,由中英文、数字、空格、!@#$%^&*()_+-=()组成,不能以下划线开头,长度在20以内。
:type Name: str
:param ContactInfo: 邮箱地址,支持大小写字母、数字、下划线及@字符, 不能以下划线开头,邮箱地址不可重复。
:type ContactInfo: str
:param Product: 服务产品类型,固定值:"mysql"。
:type Product: str
"""
self.Name = None
self.ContactInfo = None
self.Product = None
def _deserialize(self, params):
self.Name = params.get("Name")
self.ContactInfo = params.get("ContactInfo")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class AddUserContactResponse(AbstractModel):
"""AddUserContact返回参数结构体
"""
def __init__(self):
r"""
:param Id: 添加成功的联系人id。
:type Id: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Id = None
self.RequestId = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.RequestId = params.get("RequestId")
class ContactItem(AbstractModel):
"""联系人contact描述。
"""
def __init__(self):
r"""
:param Id: 联系人id。
:type Id: int
:param Name: 联系人姓名。
:type Name: str
:param Mail: 联系人绑定的邮箱。
:type Mail: str
"""
self.Id = None
self.Name = None
self.Mail = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Name = params.get("Name")
self.Mail = params.get("Mail")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDBDiagReportTaskRequest(AbstractModel):
"""CreateDBDiagReportTask请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param StartTime: 开始时间,如“2020-11-08T14:00:00+08:00”。
:type StartTime: str
:param EndTime: 结束时间,如“2020-11-09T14:00:00+08:00”。
:type EndTime: str
:param SendMailFlag: 是否发送邮件: 0 - 否,1 - 是。
:type SendMailFlag: int
:param ContactPerson: 接收邮件的联系人ID数组。
:type ContactPerson: list of int
:param ContactGroup: 接收邮件的联系组ID数组。
:type ContactGroup: list of int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认值为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.SendMailFlag = None
self.ContactPerson = None
self.ContactGroup = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SendMailFlag = params.get("SendMailFlag")
self.ContactPerson = params.get("ContactPerson")
self.ContactGroup = params.get("ContactGroup")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDBDiagReportTaskResponse(AbstractModel):
"""CreateDBDiagReportTask返回参数结构体
"""
def __init__(self):
r"""
:param AsyncRequestId: 异步任务的请求 ID,可使用此 ID 查询异步任务的执行结果。
注意:此字段可能返回 null,表示取不到有效值。
:type AsyncRequestId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
class CreateDBDiagReportUrlRequest(AbstractModel):
"""CreateDBDiagReportUrl请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param AsyncRequestId: 健康报告相应的任务ID,可通过DescribeDBDiagReportTasks查询。
:type AsyncRequestId: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.AsyncRequestId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.AsyncRequestId = params.get("AsyncRequestId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateDBDiagReportUrlResponse(AbstractModel):
"""CreateDBDiagReportUrl返回参数结构体
"""
def __init__(self):
r"""
:param ReportUrl: 健康报告浏览地址。
:type ReportUrl: str
:param ExpireTime: 健康报告浏览地址到期时间戳(秒)。
:type ExpireTime: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ReportUrl = None
self.ExpireTime = None
self.RequestId = None
def _deserialize(self, params):
self.ReportUrl = params.get("ReportUrl")
self.ExpireTime = params.get("ExpireTime")
self.RequestId = params.get("RequestId")
class CreateMailProfileRequest(AbstractModel):
"""CreateMailProfile请求参数结构体
"""
def __init__(self):
r"""
:param ProfileInfo: 邮件配置内容。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo`
:param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。
:type ProfileLevel: str
:param ProfileName: 配置名称,需要保持唯一性,数据库巡检邮件配置名称自拟;定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。
:type ProfileName: str
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
:type ProfileType: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。
:type Product: str
:param BindInstanceIds: 配置绑定的实例ID,当配置级别为"Instance"时需要传入且只能为一个实例;当配置级别为“User”时,此参数不填。
:type BindInstanceIds: list of str
"""
self.ProfileInfo = None
self.ProfileLevel = None
self.ProfileName = None
self.ProfileType = None
self.Product = None
self.BindInstanceIds = None
def _deserialize(self, params):
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
self.ProfileLevel = params.get("ProfileLevel")
self.ProfileName = params.get("ProfileName")
self.ProfileType = params.get("ProfileType")
self.Product = params.get("Product")
self.BindInstanceIds = params.get("BindInstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateMailProfileResponse(AbstractModel):
"""CreateMailProfile返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateSchedulerMailProfileRequest(AbstractModel):
"""CreateSchedulerMailProfile请求参数结构体
"""
def __init__(self):
r"""
:param WeekConfiguration: 取值范围1-7,分别代表周一至周日。
:type WeekConfiguration: list of int
:param ProfileInfo: 邮件配置内容。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo`
:param ProfileName: 配置名称,需要保持唯一性,定期生成邮件配置命名格式:"scheduler_" + {instanceId},如"schduler_cdb-test"。
:type ProfileName: str
:param BindInstanceId: 配置订阅的实例ID。
:type BindInstanceId: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.WeekConfiguration = None
self.ProfileInfo = None
self.ProfileName = None
self.BindInstanceId = None
self.Product = None
def _deserialize(self, params):
self.WeekConfiguration = params.get("WeekConfiguration")
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
self.ProfileName = params.get("ProfileName")
self.BindInstanceId = params.get("BindInstanceId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateSchedulerMailProfileResponse(AbstractModel):
"""CreateSchedulerMailProfile返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class CreateSecurityAuditLogExportTaskRequest(AbstractModel):
"""CreateSecurityAuditLogExportTask请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param StartTime: 导出日志开始时间,例如2020-12-28 00:00:00。
:type StartTime: str
:param EndTime: 导出日志结束时间,例如2020-12-28 01:00:00。
:type EndTime: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
:param DangerLevels: 日志风险等级列表,支持值包括:0 无风险;1 低风险;2 中风险;3 高风险。
:type DangerLevels: list of int
"""
self.SecAuditGroupId = None
self.StartTime = None
self.EndTime = None
self.Product = None
self.DangerLevels = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
self.DangerLevels = params.get("DangerLevels")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class CreateSecurityAuditLogExportTaskResponse(AbstractModel):
"""CreateSecurityAuditLogExportTask返回参数结构体
"""
def __init__(self):
r"""
:param AsyncRequestId: 日志导出任务Id。
:type AsyncRequestId: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.AsyncRequestId = None
self.RequestId = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.RequestId = params.get("RequestId")
class DeleteSecurityAuditLogExportTasksRequest(AbstractModel):
"""DeleteSecurityAuditLogExportTasks请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param AsyncRequestIds: 日志导出任务Id列表,接口会忽略不存在或已删除的任务Id。
:type AsyncRequestIds: list of int non-negative
:param Product: 服务产品类型,支持值: "mysql" - 云数据库 MySQL。
:type Product: str
"""
self.SecAuditGroupId = None
self.AsyncRequestIds = None
self.Product = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.AsyncRequestIds = params.get("AsyncRequestIds")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DeleteSecurityAuditLogExportTasksResponse(AbstractModel):
"""DeleteSecurityAuditLogExportTasks返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class DescribeAllUserContactRequest(AbstractModel):
"""DescribeAllUserContact请求参数结构体
"""
def __init__(self):
r"""
:param Product: 服务产品类型,固定值:mysql。
:type Product: str
:param Names: 联系人名数组,支持模糊搜索。
:type Names: list of str
"""
self.Product = None
self.Names = None
def _deserialize(self, params):
self.Product = params.get("Product")
self.Names = params.get("Names")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAllUserContactResponse(AbstractModel):
"""DescribeAllUserContact返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 联系人的总数量。
:type TotalCount: int
:param Contacts: 联系人的信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Contacts: list of ContactItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Contacts = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Contacts") is not None:
self.Contacts = []
for item in params.get("Contacts"):
obj = ContactItem()
obj._deserialize(item)
self.Contacts.append(obj)
self.RequestId = params.get("RequestId")
class DescribeAllUserGroupRequest(AbstractModel):
"""DescribeAllUserGroup请求参数结构体
"""
def __init__(self):
r"""
:param Product: 服务产品类型,固定值:mysql。
:type Product: str
:param Names: 联系组名称数组,支持模糊搜索。
:type Names: list of str
"""
self.Product = None
self.Names = None
def _deserialize(self, params):
self.Product = params.get("Product")
self.Names = params.get("Names")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeAllUserGroupResponse(AbstractModel):
"""DescribeAllUserGroup返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 组总数。
:type TotalCount: int
:param Groups: 组信息。
注意:此字段可能返回 null,表示取不到有效值。
:type Groups: list of GroupItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Groups = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Groups") is not None:
self.Groups = []
for item in params.get("Groups"):
obj = GroupItem()
obj._deserialize(item)
self.Groups.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDBDiagEventRequest(AbstractModel):
"""DescribeDBDiagEvent请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param EventId: 事件 ID 。通过“获取实例诊断历史DescribeDBDiagHistory”获取。
:type EventId: int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.EventId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.EventId = params.get("EventId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBDiagEventResponse(AbstractModel):
"""DescribeDBDiagEvent返回参数结构体
"""
def __init__(self):
r"""
:param DiagItem: 诊断项。
:type DiagItem: str
:param DiagType: 诊断类型。
:type DiagType: str
:param EventId: 事件 ID 。
:type EventId: int
:param Explanation: 诊断事件详情,若无附加解释信息则输出为空。
:type Explanation: str
:param Outline: 诊断概要。
:type Outline: str
:param Problem: 诊断出的问题。
:type Problem: str
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param StartTime: 开始时间
:type StartTime: str
:param Suggestions: 诊断建议,若无建议则输出为空。
:type Suggestions: str
:param Metric: 保留字段。
注意:此字段可能返回 null,表示取不到有效值。
:type Metric: str
:param EndTime: 结束时间。
:type EndTime: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.DiagItem = None
self.DiagType = None
self.EventId = None
self.Explanation = None
self.Outline = None
self.Problem = None
self.Severity = None
self.StartTime = None
self.Suggestions = None
self.Metric = None
self.EndTime = None
self.RequestId = None
def _deserialize(self, params):
self.DiagItem = params.get("DiagItem")
self.DiagType = params.get("DiagType")
self.EventId = params.get("EventId")
self.Explanation = params.get("Explanation")
self.Outline = params.get("Outline")
self.Problem = params.get("Problem")
self.Severity = params.get("Severity")
self.StartTime = params.get("StartTime")
self.Suggestions = params.get("Suggestions")
self.Metric = params.get("Metric")
self.EndTime = params.get("EndTime")
self.RequestId = params.get("RequestId")
class DescribeDBDiagHistoryRequest(AbstractModel):
"""DescribeDBDiagHistory请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 结束时间,如“2019-09-11 12:13:14”,结束时间与开始时间的间隔最大可为2天。
:type EndTime: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBDiagHistoryResponse(AbstractModel):
"""DescribeDBDiagHistory返回参数结构体
"""
def __init__(self):
r"""
:param Events: 事件描述。
:type Events: list of DiagHistoryEventItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Events = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Events") is not None:
self.Events = []
for item in params.get("Events"):
obj = DiagHistoryEventItem()
obj._deserialize(item)
self.Events.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDBDiagReportTasksRequest(AbstractModel):
"""DescribeDBDiagReportTasks请求参数结构体
"""
def __init__(self):
r"""
:param StartTime: 第一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。
:type StartTime: str
:param EndTime: 最后一个任务的开始时间,用于范围查询,时间格式如:2019-09-10 12:13:14。
:type EndTime: str
:param InstanceIds: 实例ID数组,用于筛选指定实例的任务列表。
:type InstanceIds: list of str
:param Sources: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。
:type Sources: list of str
:param HealthLevels: 报告的健康等级,支持的取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK" - 危险;"HIGH_RISK" - 高危。
:type HealthLevels: str
:param TaskStatuses: 任务的状态,支持的取值包括:"created" - 新建;"chosen" - 待执行; "running" - 执行中;"failed" - 失败;"finished" - 已完成。
:type TaskStatuses: str
:param Offset: 偏移量,默认0。
:type Offset: int
:param Limit: 返回数量,默认20,最大值为100。
:type Limit: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.StartTime = None
self.EndTime = None
self.InstanceIds = None
self.Sources = None
self.HealthLevels = None
self.TaskStatuses = None
self.Offset = None
self.Limit = None
self.Product = None
def _deserialize(self, params):
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.InstanceIds = params.get("InstanceIds")
self.Sources = params.get("Sources")
self.HealthLevels = params.get("HealthLevels")
self.TaskStatuses = params.get("TaskStatuses")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBDiagReportTasksResponse(AbstractModel):
"""DescribeDBDiagReportTasks返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 任务总数目。
:type TotalCount: int
:param Tasks: 任务列表。
:type Tasks: list of HealthReportTask
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Tasks = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Tasks") is not None:
self.Tasks = []
for item in params.get("Tasks"):
obj = HealthReportTask()
obj._deserialize(item)
self.Tasks.append(obj)
self.RequestId = params.get("RequestId")
class DescribeDBSpaceStatusRequest(AbstractModel):
"""DescribeDBSpaceStatus请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param RangeDays: 时间段天数,截止日期为当日,默认为7天。
:type RangeDays: int
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.RangeDays = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.RangeDays = params.get("RangeDays")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDBSpaceStatusResponse(AbstractModel):
"""DescribeDBSpaceStatus返回参数结构体
"""
def __init__(self):
r"""
:param Growth: 磁盘增长量(MB)。
:type Growth: int
:param Remain: 磁盘剩余(MB)。
:type Remain: int
:param Total: 磁盘总量(MB)。
:type Total: int
:param AvailableDays: 预计可用天数。
:type AvailableDays: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Growth = None
self.Remain = None
self.Total = None
self.AvailableDays = None
self.RequestId = None
def _deserialize(self, params):
self.Growth = params.get("Growth")
self.Remain = params.get("Remain")
self.Total = params.get("Total")
self.AvailableDays = params.get("AvailableDays")
self.RequestId = params.get("RequestId")
class DescribeDiagDBInstancesRequest(AbstractModel):
"""DescribeDiagDBInstances请求参数结构体
"""
def __init__(self):
r"""
:param IsSupported: 是否是DBbrain支持的实例,固定传 true。
:type IsSupported: bool
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Offset: 分页参数,偏移量。
:type Offset: int
:param Limit: 分页参数,分页值,最大值为100。
:type Limit: int
:param InstanceNames: 根据实例名称条件查询。
:type InstanceNames: list of str
:param InstanceIds: 根据实例ID条件查询。
:type InstanceIds: list of str
:param Regions: 根据地域条件查询。
:type Regions: list of str
"""
self.IsSupported = None
self.Product = None
self.Offset = None
self.Limit = None
self.InstanceNames = None
self.InstanceIds = None
self.Regions = None
def _deserialize(self, params):
self.IsSupported = params.get("IsSupported")
self.Product = params.get("Product")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.InstanceNames = params.get("InstanceNames")
self.InstanceIds = params.get("InstanceIds")
self.Regions = params.get("Regions")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeDiagDBInstancesResponse(AbstractModel):
"""DescribeDiagDBInstances返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 实例总数。
:type TotalCount: int
:param DbScanStatus: 全实例巡检状态:0:开启全实例巡检;1:未开启全实例巡检。
:type DbScanStatus: int
:param Items: 实例相关信息。
:type Items: list of InstanceInfo
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.DbScanStatus = None
self.Items = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
self.DbScanStatus = params.get("DbScanStatus")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = InstanceInfo()
obj._deserialize(item)
self.Items.append(obj)
self.RequestId = params.get("RequestId")
class DescribeHealthScoreRequest(AbstractModel):
"""DescribeHealthScore请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 需要获取健康得分的实例ID。
:type InstanceId: str
:param Time: 获取健康得分的时间,时间格式如:2019-09-10 12:13:14。
:type Time: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Time = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Time = params.get("Time")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeHealthScoreResponse(AbstractModel):
"""DescribeHealthScore返回参数结构体
"""
def __init__(self):
r"""
:param Data: 健康得分以及异常扣分项。
:type Data: :class:`tencentcloud.dbbrain.v20210527.models.HealthScoreInfo`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Data = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Data") is not None:
self.Data = HealthScoreInfo()
self.Data._deserialize(params.get("Data"))
self.RequestId = params.get("RequestId")
class DescribeMailProfileRequest(AbstractModel):
"""DescribeMailProfile请求参数结构体
"""
def __init__(self):
r"""
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
:type ProfileType: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Offset: 分页偏移量。
:type Offset: int
:param Limit: 分页单位,最大支持50。
:type Limit: int
:param ProfileName: 根据邮件配置名称查询,定期发送的邮件配置名称遵循:"scheduler_"+{instanceId}的规则。
:type ProfileName: str
"""
self.ProfileType = None
self.Product = None
self.Offset = None
self.Limit = None
self.ProfileName = None
def _deserialize(self, params):
self.ProfileType = params.get("ProfileType")
self.Product = params.get("Product")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
self.ProfileName = params.get("ProfileName")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeMailProfileResponse(AbstractModel):
"""DescribeMailProfile返回参数结构体
"""
def __init__(self):
r"""
:param ProfileList: 邮件配置详情。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileList: list of UserProfile
:param TotalCount: 邮件模版总数。
注意:此字段可能返回 null,表示取不到有效值。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ProfileList = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ProfileList") is not None:
self.ProfileList = []
for item in params.get("ProfileList"):
obj = UserProfile()
obj._deserialize(item)
self.ProfileList.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeMySqlProcessListRequest(AbstractModel):
"""DescribeMySqlProcessList请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param ID: 线程的ID,用于筛选线程列表。
:type ID: int
:param User: 线程的操作账号名,用于筛选线程列表。
:type User: str
:param Host: 线程的操作主机地址,用于筛选线程列表。
:type Host: str
:param DB: 线程的操作数据库,用于筛选线程列表。
:type DB: str
:param State: 线程的操作状态,用于筛选线程列表。
:type State: str
:param Command: 线程的执行类型,用于筛选线程列表。
:type Command: str
:param Time: 线程的操作时长最小值,单位秒,用于筛选操作时长大于该值的线程列表。
:type Time: int
:param Info: 线程的操作语句,用于筛选线程列表。
:type Info: str
:param Limit: 返回数量,默认20。
:type Limit: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.ID = None
self.User = None
self.Host = None
self.DB = None
self.State = None
self.Command = None
self.Time = None
self.Info = None
self.Limit = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.ID = params.get("ID")
self.User = params.get("User")
self.Host = params.get("Host")
self.DB = params.get("DB")
self.State = params.get("State")
self.Command = params.get("Command")
self.Time = params.get("Time")
self.Info = params.get("Info")
self.Limit = params.get("Limit")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeMySqlProcessListResponse(AbstractModel):
"""DescribeMySqlProcessList返回参数结构体
"""
def __init__(self):
r"""
:param ProcessList: 实时线程列表。
:type ProcessList: list of MySqlProcess
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.ProcessList = None
self.RequestId = None
def _deserialize(self, params):
if params.get("ProcessList") is not None:
self.ProcessList = []
for item in params.get("ProcessList"):
obj = MySqlProcess()
obj._deserialize(item)
self.ProcessList.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSecurityAuditLogDownloadUrlsRequest(AbstractModel):
"""DescribeSecurityAuditLogDownloadUrls请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param AsyncRequestId: 异步任务Id。
:type AsyncRequestId: int
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
"""
self.SecAuditGroupId = None
self.AsyncRequestId = None
self.Product = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.AsyncRequestId = params.get("AsyncRequestId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSecurityAuditLogDownloadUrlsResponse(AbstractModel):
"""DescribeSecurityAuditLogDownloadUrls返回参数结构体
"""
def __init__(self):
r"""
:param Urls: 导出结果的COS链接列表。当结果集很大时,可能会切分为多个url下载。
:type Urls: list of str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Urls = None
self.RequestId = None
def _deserialize(self, params):
self.Urls = params.get("Urls")
self.RequestId = params.get("RequestId")
class DescribeSecurityAuditLogExportTasksRequest(AbstractModel):
"""DescribeSecurityAuditLogExportTasks请求参数结构体
"""
def __init__(self):
r"""
:param SecAuditGroupId: 安全审计组Id。
:type SecAuditGroupId: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL。
:type Product: str
:param AsyncRequestIds: 日志导出任务Id列表。
:type AsyncRequestIds: list of int non-negative
:param Offset: 偏移量,默认0。
:type Offset: int
:param Limit: 返回数量,默认20,最大值为100。
:type Limit: int
"""
self.SecAuditGroupId = None
self.Product = None
self.AsyncRequestIds = None
self.Offset = None
self.Limit = None
def _deserialize(self, params):
self.SecAuditGroupId = params.get("SecAuditGroupId")
self.Product = params.get("Product")
self.AsyncRequestIds = params.get("AsyncRequestIds")
self.Offset = params.get("Offset")
self.Limit = params.get("Limit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSecurityAuditLogExportTasksResponse(AbstractModel):
"""DescribeSecurityAuditLogExportTasks返回参数结构体
"""
def __init__(self):
r"""
:param Tasks: 安全审计日志导出任务列表。
:type Tasks: list of SecLogExportTaskInfo
:param TotalCount: 安全审计日志导出任务总数。
:type TotalCount: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Tasks = None
self.TotalCount = None
self.RequestId = None
def _deserialize(self, params):
if params.get("Tasks") is not None:
self.Tasks = []
for item in params.get("Tasks"):
obj = SecLogExportTaskInfo()
obj._deserialize(item)
self.Tasks.append(obj)
self.TotalCount = params.get("TotalCount")
self.RequestId = params.get("RequestId")
class DescribeSlowLogTimeSeriesStatsRequest(AbstractModel):
"""DescribeSlowLogTimeSeriesStats请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 结束时间,如“2019-09-10 12:13:14”,结束时间与开始时间的间隔最大可为7天。
:type EndTime: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSlowLogTimeSeriesStatsResponse(AbstractModel):
"""DescribeSlowLogTimeSeriesStats返回参数结构体
"""
def __init__(self):
r"""
:param Period: 柱间单位时间间隔,单位为秒。
:type Period: int
:param TimeSeries: 单位时间间隔内慢日志数量统计。
:type TimeSeries: list of TimeSlice
:param SeriesData: 单位时间间隔内的实例 cpu 利用率监控数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData`
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Period = None
self.TimeSeries = None
self.SeriesData = None
self.RequestId = None
def _deserialize(self, params):
self.Period = params.get("Period")
if params.get("TimeSeries") is not None:
self.TimeSeries = []
for item in params.get("TimeSeries"):
obj = TimeSlice()
obj._deserialize(item)
self.TimeSeries.append(obj)
if params.get("SeriesData") is not None:
self.SeriesData = MonitorMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
self.RequestId = params.get("RequestId")
class DescribeSlowLogTopSqlsRequest(AbstractModel):
"""DescribeSlowLogTopSqls请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param StartTime: 开始时间,如“2019-09-10 12:13:14”。
:type StartTime: str
:param EndTime: 截止时间,如“2019-09-11 10:13:14”,截止时间与开始时间的间隔小于7天。
:type EndTime: str
:param SortBy: 排序键,目前支持 QueryTime,ExecTimes,RowsSent,LockTime以及RowsExamined 等排序键,默认为QueryTime。
:type SortBy: str
:param OrderBy: 排序方式,支持ASC(升序)以及DESC(降序),默认为DESC。
:type OrderBy: str
:param Limit: 返回数量,默认为20,最大值为100。
:type Limit: int
:param Offset: 偏移量,默认为0。
:type Offset: int
:param SchemaList: 数据库名称数组。
:type SchemaList: list of SchemaItem
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.SortBy = None
self.OrderBy = None
self.Limit = None
self.Offset = None
self.SchemaList = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.SortBy = params.get("SortBy")
self.OrderBy = params.get("OrderBy")
self.Limit = params.get("Limit")
self.Offset = params.get("Offset")
if params.get("SchemaList") is not None:
self.SchemaList = []
for item in params.get("SchemaList"):
obj = SchemaItem()
obj._deserialize(item)
self.SchemaList.append(obj)
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSlowLogTopSqlsResponse(AbstractModel):
"""DescribeSlowLogTopSqls返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 符合条件的记录总数。
:type TotalCount: int
:param Rows: 慢日志 top sql 列表
:type Rows: list of SlowLogTopSqlItem
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Rows = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Rows") is not None:
self.Rows = []
for item in params.get("Rows"):
obj = SlowLogTopSqlItem()
obj._deserialize(item)
self.Rows.append(obj)
self.RequestId = params.get("RequestId")
class DescribeSlowLogUserHostStatsRequest(AbstractModel):
"""DescribeSlowLogUserHostStats请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param StartTime: 查询范围的开始时间,时间格式如:2019-09-10 12:13:14。
:type StartTime: str
:param EndTime: 查询范围的结束时间,时间格式如:2019-09-10 12:13:14。
:type EndTime: str
:param Product: 服务产品类型,支持值:"mysql" - 云数据库 MySQL;"cynosdb" - 云数据库 TDSQL-C for MySQL,默认为"mysql"。
:type Product: str
:param Md5: SOL模板的MD5值
:type Md5: str
"""
self.InstanceId = None
self.StartTime = None
self.EndTime = None
self.Product = None
self.Md5 = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Product = params.get("Product")
self.Md5 = params.get("Md5")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeSlowLogUserHostStatsResponse(AbstractModel):
"""DescribeSlowLogUserHostStats返回参数结构体
"""
def __init__(self):
r"""
:param TotalCount: 来源地址数目。
:type TotalCount: int
:param Items: 各来源地址的慢日志占比详情列表。
:type Items: list of SlowLogHost
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TotalCount = None
self.Items = None
self.RequestId = None
def _deserialize(self, params):
self.TotalCount = params.get("TotalCount")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = SlowLogHost()
obj._deserialize(item)
self.Items.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopSpaceSchemaTimeSeriesRequest(AbstractModel):
"""DescribeTopSpaceSchemaTimeSeries请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param Limit: 返回的Top库数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。
:type StartDate: str
:param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。
:type EndDate: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.StartDate = None
self.EndDate = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.StartDate = params.get("StartDate")
self.EndDate = params.get("EndDate")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceSchemaTimeSeriesResponse(AbstractModel):
"""DescribeTopSpaceSchemaTimeSeries返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceSchemaTimeSeries: 返回的Top库空间统计信息的时序数据列表。
:type TopSpaceSchemaTimeSeries: list of SchemaSpaceTimeSeries
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceSchemaTimeSeries = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceSchemaTimeSeries") is not None:
self.TopSpaceSchemaTimeSeries = []
for item in params.get("TopSpaceSchemaTimeSeries"):
obj = SchemaSpaceTimeSeries()
obj._deserialize(item)
self.TopSpaceSchemaTimeSeries.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopSpaceSchemasRequest(AbstractModel):
"""DescribeTopSpaceSchemas请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top库数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top库所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceSchemasResponse(AbstractModel):
"""DescribeTopSpaceSchemas返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceSchemas: 返回的Top库空间统计信息列表。
:type TopSpaceSchemas: list of SchemaSpaceData
:param Timestamp: 采集库空间数据的时间戳(秒)。
:type Timestamp: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceSchemas = None
self.Timestamp = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceSchemas") is not None:
self.TopSpaceSchemas = []
for item in params.get("TopSpaceSchemas"):
obj = SchemaSpaceData()
obj._deserialize(item)
self.TopSpaceSchemas.append(obj)
self.Timestamp = params.get("Timestamp")
self.RequestId = params.get("RequestId")
class DescribeTopSpaceTableTimeSeriesRequest(AbstractModel):
"""DescribeTopSpaceTableTimeSeries请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top表数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize,默认为 PhysicalFileSize。
:type SortBy: str
:param StartDate: 开始日期,如“2021-01-01”,最早为当日的前第29天,默认为截止日期的前第6天。
:type StartDate: str
:param EndDate: 截止日期,如“2021-01-01”,最早为当日的前第29天,默认为当日。
:type EndDate: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.StartDate = None
self.EndDate = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.StartDate = params.get("StartDate")
self.EndDate = params.get("EndDate")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceTableTimeSeriesResponse(AbstractModel):
"""DescribeTopSpaceTableTimeSeries返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceTableTimeSeries: 返回的Top表空间统计信息的时序数据列表。
:type TopSpaceTableTimeSeries: list of TableSpaceTimeSeries
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceTableTimeSeries = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceTableTimeSeries") is not None:
self.TopSpaceTableTimeSeries = []
for item in params.get("TopSpaceTableTimeSeries"):
obj = TableSpaceTimeSeries()
obj._deserialize(item)
self.TopSpaceTableTimeSeries.append(obj)
self.RequestId = params.get("RequestId")
class DescribeTopSpaceTablesRequest(AbstractModel):
"""DescribeTopSpaceTables请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Limit: 返回的Top表数量,最大值为100,默认为20。
:type Limit: int
:param SortBy: 筛选Top表所用的排序字段,可选字段包含DataLength、IndexLength、TotalLength、DataFree、FragRatio、TableRows、PhysicalFileSize(仅云数据库 MySQL实例支持),云数据库 MySQL实例默认为 PhysicalFileSize,其他产品实例默认为TotalLength。
:type SortBy: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Limit = None
self.SortBy = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Limit = params.get("Limit")
self.SortBy = params.get("SortBy")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeTopSpaceTablesResponse(AbstractModel):
"""DescribeTopSpaceTables返回参数结构体
"""
def __init__(self):
r"""
:param TopSpaceTables: 返回的Top表空间统计信息列表。
:type TopSpaceTables: list of TableSpaceData
:param Timestamp: 采集表空间数据的时间戳(秒)。
:type Timestamp: int
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.TopSpaceTables = None
self.Timestamp = None
self.RequestId = None
def _deserialize(self, params):
if params.get("TopSpaceTables") is not None:
self.TopSpaceTables = []
for item in params.get("TopSpaceTables"):
obj = TableSpaceData()
obj._deserialize(item)
self.TopSpaceTables.append(obj)
self.Timestamp = params.get("Timestamp")
self.RequestId = params.get("RequestId")
class DescribeUserSqlAdviceRequest(AbstractModel):
"""DescribeUserSqlAdvice请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param SqlText: SQL语句。
:type SqlText: str
:param Schema: 库名。
:type Schema: str
"""
self.InstanceId = None
self.SqlText = None
self.Schema = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class DescribeUserSqlAdviceResponse(AbstractModel):
"""DescribeUserSqlAdvice返回参数结构体
"""
def __init__(self):
r"""
:param Advices: SQL优化建议,可解析为JSON数组,无需优化时输出为空。
:type Advices: str
:param Comments: SQL优化建议备注,可解析为String数组,无需优化时输出为空。
:type Comments: str
:param SqlText: SQL语句。
:type SqlText: str
:param Schema: 库名。
:type Schema: str
:param Tables: 相关表的DDL信息,可解析为JSON数组。
:type Tables: str
:param SqlPlan: SQL执行计划,可解析为JSON,无需优化时输出为空。
:type SqlPlan: str
:param Cost: SQL优化后的成本节约详情,可解析为JSON,无需优化时输出为空。
:type Cost: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Advices = None
self.Comments = None
self.SqlText = None
self.Schema = None
self.Tables = None
self.SqlPlan = None
self.Cost = None
self.RequestId = None
def _deserialize(self, params):
self.Advices = params.get("Advices")
self.Comments = params.get("Comments")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
self.Tables = params.get("Tables")
self.SqlPlan = params.get("SqlPlan")
self.Cost = params.get("Cost")
self.RequestId = params.get("RequestId")
class DiagHistoryEventItem(AbstractModel):
"""实例诊断历史事件
"""
def __init__(self):
r"""
:param DiagType: 诊断类型。
:type DiagType: str
:param EndTime: 结束时间。
:type EndTime: str
:param StartTime: 开始时间。
:type StartTime: str
:param EventId: 事件唯一ID 。
:type EventId: int
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param Outline: 诊断概要。
:type Outline: str
:param DiagItem: 诊断项说明。
:type DiagItem: str
:param InstanceId: 实例 ID 。
:type InstanceId: str
:param Metric: 保留字段。
注意:此字段可能返回 null,表示取不到有效值。
:type Metric: str
:param Region: 地域。
:type Region: str
"""
self.DiagType = None
self.EndTime = None
self.StartTime = None
self.EventId = None
self.Severity = None
self.Outline = None
self.DiagItem = None
self.InstanceId = None
self.Metric = None
self.Region = None
def _deserialize(self, params):
self.DiagType = params.get("DiagType")
self.EndTime = params.get("EndTime")
self.StartTime = params.get("StartTime")
self.EventId = params.get("EventId")
self.Severity = params.get("Severity")
self.Outline = params.get("Outline")
self.DiagItem = params.get("DiagItem")
self.InstanceId = params.get("InstanceId")
self.Metric = params.get("Metric")
self.Region = params.get("Region")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class EventInfo(AbstractModel):
"""异常事件信息。
"""
def __init__(self):
r"""
:param EventId: 事件 ID 。
:type EventId: int
:param DiagType: 诊断类型。
:type DiagType: str
:param StartTime: 开始时间。
:type StartTime: str
:param EndTime: 结束时间。
:type EndTime: str
:param Outline: 概要。
:type Outline: str
:param Severity: 严重程度。严重程度分为5级,按影响程度从高至低分别为:1:致命,2:严重,3:告警,4:提示,5:健康。
:type Severity: int
:param ScoreLost: 扣分。
:type ScoreLost: int
:param Metric: 保留字段。
:type Metric: str
:param Count: 告警数目。
:type Count: int
"""
self.EventId = None
self.DiagType = None
self.StartTime = None
self.EndTime = None
self.Outline = None
self.Severity = None
self.ScoreLost = None
self.Metric = None
self.Count = None
def _deserialize(self, params):
self.EventId = params.get("EventId")
self.DiagType = params.get("DiagType")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.Outline = params.get("Outline")
self.Severity = params.get("Severity")
self.ScoreLost = params.get("ScoreLost")
self.Metric = params.get("Metric")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class GroupItem(AbstractModel):
"""描述组信息。
"""
def __init__(self):
r"""
:param Id: 组id。
:type Id: int
:param Name: 组名称。
:type Name: str
:param MemberCount: 组成员数量。
:type MemberCount: int
"""
self.Id = None
self.Name = None
self.MemberCount = None
def _deserialize(self, params):
self.Id = params.get("Id")
self.Name = params.get("Name")
self.MemberCount = params.get("MemberCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthReportTask(AbstractModel):
"""健康报告任务详情。
"""
def __init__(self):
r"""
:param AsyncRequestId: 异步任务请求 ID。
:type AsyncRequestId: int
:param Source: 任务的触发来源,支持的取值包括:"DAILY_INSPECTION" - 实例巡检;"SCHEDULED" - 定时生成;"MANUAL" - 手动触发。
:type Source: str
:param Progress: 任务完成进度,单位%。
:type Progress: int
:param CreateTime: 任务创建时间。
:type CreateTime: str
:param StartTime: 任务开始执行时间。
:type StartTime: str
:param EndTime: 任务完成执行时间。
:type EndTime: str
:param InstanceInfo: 任务所属实例的基础信息。
:type InstanceInfo: :class:`tencentcloud.dbbrain.v20210527.models.InstanceBasicInfo`
:param HealthStatus: 健康报告中的健康信息。
:type HealthStatus: :class:`tencentcloud.dbbrain.v20210527.models.HealthStatus`
"""
self.AsyncRequestId = None
self.Source = None
self.Progress = None
self.CreateTime = None
self.StartTime = None
self.EndTime = None
self.InstanceInfo = None
self.HealthStatus = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.Source = params.get("Source")
self.Progress = params.get("Progress")
self.CreateTime = params.get("CreateTime")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
if params.get("InstanceInfo") is not None:
self.InstanceInfo = InstanceBasicInfo()
self.InstanceInfo._deserialize(params.get("InstanceInfo"))
if params.get("HealthStatus") is not None:
self.HealthStatus = HealthStatus()
self.HealthStatus._deserialize(params.get("HealthStatus"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthScoreInfo(AbstractModel):
"""获取健康得分返回的详情。
"""
def __init__(self):
r"""
:param IssueTypes: 异常详情。
:type IssueTypes: list of IssueTypeInfo
:param EventsTotalCount: 异常事件总数。
:type EventsTotalCount: int
:param HealthScore: 健康得分。
:type HealthScore: int
:param HealthLevel: 健康等级, 如:"HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"。
:type HealthLevel: str
"""
self.IssueTypes = None
self.EventsTotalCount = None
self.HealthScore = None
self.HealthLevel = None
def _deserialize(self, params):
if params.get("IssueTypes") is not None:
self.IssueTypes = []
for item in params.get("IssueTypes"):
obj = IssueTypeInfo()
obj._deserialize(item)
self.IssueTypes.append(obj)
self.EventsTotalCount = params.get("EventsTotalCount")
self.HealthScore = params.get("HealthScore")
self.HealthLevel = params.get("HealthLevel")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class HealthStatus(AbstractModel):
"""实例健康详情。
"""
def __init__(self):
r"""
:param HealthScore: 健康分数,满分100。
:type HealthScore: int
:param HealthLevel: 健康等级,取值包括:"HEALTH" - 健康;"SUB_HEALTH" - 亚健康;"RISK"- 危险;"HIGH_RISK" - 高危。
:type HealthLevel: str
:param ScoreLost: 总扣分分数。
:type ScoreLost: int
:param ScoreDetails: 扣分详情。
注意:此字段可能返回 null,表示取不到有效值。
:type ScoreDetails: list of ScoreDetail
"""
self.HealthScore = None
self.HealthLevel = None
self.ScoreLost = None
self.ScoreDetails = None
def _deserialize(self, params):
self.HealthScore = params.get("HealthScore")
self.HealthLevel = params.get("HealthLevel")
self.ScoreLost = params.get("ScoreLost")
if params.get("ScoreDetails") is not None:
self.ScoreDetails = []
for item in params.get("ScoreDetails"):
obj = ScoreDetail()
obj._deserialize(item)
self.ScoreDetails.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceBasicInfo(AbstractModel):
"""实例基础信息。
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param InstanceName: 实例名称。
:type InstanceName: str
:param Vip: 实例内网IP。
:type Vip: str
:param Vport: 实例内网Port。
:type Vport: int
:param Product: 实例产品。
:type Product: str
:param EngineVersion: 实例引擎版本。
:type EngineVersion: str
"""
self.InstanceId = None
self.InstanceName = None
self.Vip = None
self.Vport = None
self.Product = None
self.EngineVersion = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.Product = params.get("Product")
self.EngineVersion = params.get("EngineVersion")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceConfs(AbstractModel):
"""实例配置。
"""
def __init__(self):
r"""
:param DailyInspection: 数据库巡检开关, Yes/No。
:type DailyInspection: str
:param OverviewDisplay: 实例概览开关,Yes/No。
:type OverviewDisplay: str
"""
self.DailyInspection = None
self.OverviewDisplay = None
def _deserialize(self, params):
self.DailyInspection = params.get("DailyInspection")
self.OverviewDisplay = params.get("OverviewDisplay")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class InstanceInfo(AbstractModel):
"""查询实例列表,返回实例的相关信息的对象。
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param InstanceName: 实例名称。
:type InstanceName: str
:param Region: 实例所属地域。
:type Region: str
:param HealthScore: 健康得分。
:type HealthScore: int
:param Product: 所属产品。
:type Product: str
:param EventCount: 异常事件数量。
:type EventCount: int
:param InstanceType: 实例类型:1:MASTER;2:DR,3:RO,4:SDR。
:type InstanceType: int
:param Cpu: 核心数。
:type Cpu: int
:param Memory: 内存,单位MB。
:type Memory: int
:param Volume: 硬盘存储,单位GB。
:type Volume: int
:param EngineVersion: 数据库版本。
:type EngineVersion: str
:param Vip: 内网地址。
:type Vip: str
:param Vport: 内网端口。
:type Vport: int
:param Source: 接入来源。
:type Source: str
:param GroupId: 分组ID。
:type GroupId: str
:param GroupName: 分组组名。
:type GroupName: str
:param Status: 实例状态:0:发货中;1:运行正常;4:销毁中;5:隔离中。
:type Status: int
:param UniqSubnetId: 子网统一ID。
:type UniqSubnetId: str
:param DeployMode: cdb类型。
:type DeployMode: str
:param InitFlag: cdb实例初始化标志:0:未初始化;1:已初始化。
:type InitFlag: int
:param TaskStatus: 任务状态。
:type TaskStatus: int
:param UniqVpcId: 私有网络统一ID。
:type UniqVpcId: str
:param InstanceConf: 实例巡检/概览的状态。
:type InstanceConf: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs`
:param DeadlineTime: 资源到期时间。
:type DeadlineTime: str
:param IsSupported: 是否是DBbrain支持的实例。
:type IsSupported: bool
:param SecAuditStatus: 实例安全审计日志开启状态:ON: 安全审计开启;OFF: 未开启安全审计。
:type SecAuditStatus: str
:param AuditPolicyStatus: 实例审计日志开启状态,ALL_AUDIT: 开启全审计;RULE_AUDIT: 开启规则审计;UNBOUND: 未开启审计。
:type AuditPolicyStatus: str
:param AuditRunningStatus: 实例审计日志运行状态:normal: 运行中; paused: 欠费暂停。
:type AuditRunningStatus: str
"""
self.InstanceId = None
self.InstanceName = None
self.Region = None
self.HealthScore = None
self.Product = None
self.EventCount = None
self.InstanceType = None
self.Cpu = None
self.Memory = None
self.Volume = None
self.EngineVersion = None
self.Vip = None
self.Vport = None
self.Source = None
self.GroupId = None
self.GroupName = None
self.Status = None
self.UniqSubnetId = None
self.DeployMode = None
self.InitFlag = None
self.TaskStatus = None
self.UniqVpcId = None
self.InstanceConf = None
self.DeadlineTime = None
self.IsSupported = None
self.SecAuditStatus = None
self.AuditPolicyStatus = None
self.AuditRunningStatus = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.InstanceName = params.get("InstanceName")
self.Region = params.get("Region")
self.HealthScore = params.get("HealthScore")
self.Product = params.get("Product")
self.EventCount = params.get("EventCount")
self.InstanceType = params.get("InstanceType")
self.Cpu = params.get("Cpu")
self.Memory = params.get("Memory")
self.Volume = params.get("Volume")
self.EngineVersion = params.get("EngineVersion")
self.Vip = params.get("Vip")
self.Vport = params.get("Vport")
self.Source = params.get("Source")
self.GroupId = params.get("GroupId")
self.GroupName = params.get("GroupName")
self.Status = params.get("Status")
self.UniqSubnetId = params.get("UniqSubnetId")
self.DeployMode = params.get("DeployMode")
self.InitFlag = params.get("InitFlag")
self.TaskStatus = params.get("TaskStatus")
self.UniqVpcId = params.get("UniqVpcId")
if params.get("InstanceConf") is not None:
self.InstanceConf = InstanceConfs()
self.InstanceConf._deserialize(params.get("InstanceConf"))
self.DeadlineTime = params.get("DeadlineTime")
self.IsSupported = params.get("IsSupported")
self.SecAuditStatus = params.get("SecAuditStatus")
self.AuditPolicyStatus = params.get("AuditPolicyStatus")
self.AuditRunningStatus = params.get("AuditRunningStatus")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class IssueTypeInfo(AbstractModel):
"""指标信息。
"""
def __init__(self):
r"""
:param IssueType: 指标分类:AVAILABILITY:可用性,MAINTAINABILITY:可维护性,PERFORMANCE,性能,RELIABILITY可靠性。
:type IssueType: str
:param Events: 异常事件。
:type Events: list of EventInfo
:param TotalCount: 异常事件总数。
:type TotalCount: int
"""
self.IssueType = None
self.Events = None
self.TotalCount = None
def _deserialize(self, params):
self.IssueType = params.get("IssueType")
if params.get("Events") is not None:
self.Events = []
for item in params.get("Events"):
obj = EventInfo()
obj._deserialize(item)
self.Events.append(obj)
self.TotalCount = params.get("TotalCount")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class KillMySqlThreadsRequest(AbstractModel):
"""KillMySqlThreads请求参数结构体
"""
def __init__(self):
r"""
:param InstanceId: 实例ID。
:type InstanceId: str
:param Stage: kill会话任务的阶段,取值包括:"Prepare"-准备阶段,"Commit"-提交阶段。
:type Stage: str
:param Threads: 需要kill的sql会话ID列表,此参数用于Prepare阶段。
:type Threads: list of int
:param SqlExecId: 执行ID,此参数用于Commit阶段。
:type SqlExecId: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL,默认为"mysql"。
:type Product: str
"""
self.InstanceId = None
self.Stage = None
self.Threads = None
self.SqlExecId = None
self.Product = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
self.Stage = params.get("Stage")
self.Threads = params.get("Threads")
self.SqlExecId = params.get("SqlExecId")
self.Product = params.get("Product")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class KillMySqlThreadsResponse(AbstractModel):
"""KillMySqlThreads返回参数结构体
"""
def __init__(self):
r"""
:param Threads: kill完成的sql会话ID列表。
:type Threads: list of int
:param SqlExecId: 执行ID, Prepare阶段的任务输出,用于Commit阶段中指定执行kill操作的会话ID。
注意:此字段可能返回 null,表示取不到有效值。
:type SqlExecId: str
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.Threads = None
self.SqlExecId = None
self.RequestId = None
def _deserialize(self, params):
self.Threads = params.get("Threads")
self.SqlExecId = params.get("SqlExecId")
self.RequestId = params.get("RequestId")
class MailConfiguration(AbstractModel):
"""邮件发送配置
"""
def __init__(self):
r"""
:param SendMail: 是否开启邮件发送: 0, 否; 1, 是。
:type SendMail: int
:param Region: 地域配置, 如["ap-guangzhou", "ap-shanghai"]。巡检的邮件发送模版,配置需要发送巡检邮件的地域;订阅的邮件发送模版,配置当前订阅实例的所属地域。
:type Region: list of str
:param HealthStatus: 发送指定的健康等级的报告, 如["HEALTH", "SUB_HEALTH", "RISK", "HIGH_RISK"]。
:type HealthStatus: list of str
:param ContactPerson: 联系人id, 联系人/联系组不能都为空。
:type ContactPerson: list of int
:param ContactGroup: 联系组id, 联系人/联系组不能都为空。
:type ContactGroup: list of int
"""
self.SendMail = None
self.Region = None
self.HealthStatus = None
self.ContactPerson = None
self.ContactGroup = None
def _deserialize(self, params):
self.SendMail = params.get("SendMail")
self.Region = params.get("Region")
self.HealthStatus = params.get("HealthStatus")
self.ContactPerson = params.get("ContactPerson")
self.ContactGroup = params.get("ContactGroup")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDiagDBInstanceConfRequest(AbstractModel):
"""ModifyDiagDBInstanceConf请求参数结构体
"""
def __init__(self):
r"""
:param InstanceConfs: 实例配置,包括巡检、概览开关等。
:type InstanceConfs: :class:`tencentcloud.dbbrain.v20210527.models.InstanceConfs`
:param Regions: 生效实例地域,取值为"All",代表全地域。
:type Regions: str
:param Product: 服务产品类型,支持值包括: "mysql" - 云数据库 MySQL, "cynosdb" - 云数据库 CynosDB for MySQL。
:type Product: str
:param InstanceIds: 指定更改巡检状态的实例ID。
:type InstanceIds: list of str
"""
self.InstanceConfs = None
self.Regions = None
self.Product = None
self.InstanceIds = None
def _deserialize(self, params):
if params.get("InstanceConfs") is not None:
self.InstanceConfs = InstanceConfs()
self.InstanceConfs._deserialize(params.get("InstanceConfs"))
self.Regions = params.get("Regions")
self.Product = params.get("Product")
self.InstanceIds = params.get("InstanceIds")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ModifyDiagDBInstanceConfResponse(AbstractModel):
"""ModifyDiagDBInstanceConf返回参数结构体
"""
def __init__(self):
r"""
:param RequestId: 唯一请求 ID,每次请求都会返回。定位问题时需要提供该次请求的 RequestId。
:type RequestId: str
"""
self.RequestId = None
def _deserialize(self, params):
self.RequestId = params.get("RequestId")
class MonitorFloatMetric(AbstractModel):
"""监控数据(浮点型)
"""
def __init__(self):
r"""
:param Metric: 指标名称。
:type Metric: str
:param Unit: 指标单位。
:type Unit: str
:param Values: 指标值。
注意:此字段可能返回 null,表示取不到有效值。
:type Values: list of float
"""
self.Metric = None
self.Unit = None
self.Values = None
def _deserialize(self, params):
self.Metric = params.get("Metric")
self.Unit = params.get("Unit")
self.Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MonitorFloatMetricSeriesData(AbstractModel):
"""单位时间间隔内的监控指标数据(浮点型)
"""
def __init__(self):
r"""
:param Series: 监控指标。
:type Series: list of MonitorFloatMetric
:param Timestamp: 监控指标对应的时间戳。
:type Timestamp: list of int
"""
self.Series = None
self.Timestamp = None
def _deserialize(self, params):
if params.get("Series") is not None:
self.Series = []
for item in params.get("Series"):
obj = MonitorFloatMetric()
obj._deserialize(item)
self.Series.append(obj)
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MonitorMetric(AbstractModel):
"""监控数据
"""
def __init__(self):
r"""
:param Metric: 指标名称。
:type Metric: str
:param Unit: 指标单位。
:type Unit: str
:param Values: 指标值。
注意:此字段可能返回 null,表示取不到有效值。
:type Values: list of float
"""
self.Metric = None
self.Unit = None
self.Values = None
def _deserialize(self, params):
self.Metric = params.get("Metric")
self.Unit = params.get("Unit")
self.Values = params.get("Values")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MonitorMetricSeriesData(AbstractModel):
"""单位时间间隔内的监控指标数据
"""
def __init__(self):
r"""
:param Series: 监控指标。
:type Series: list of MonitorMetric
:param Timestamp: 监控指标对应的时间戳。
:type Timestamp: list of int
"""
self.Series = None
self.Timestamp = None
def _deserialize(self, params):
if params.get("Series") is not None:
self.Series = []
for item in params.get("Series"):
obj = MonitorMetric()
obj._deserialize(item)
self.Series.append(obj)
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class MySqlProcess(AbstractModel):
"""关系型数据库线程
"""
def __init__(self):
r"""
:param ID: 线程ID。
:type ID: str
:param User: 线程的操作账号名。
:type User: str
:param Host: 线程的操作主机地址。
:type Host: str
:param DB: 线程的操作数据库。
:type DB: str
:param State: 线程的操作状态。
:type State: str
:param Command: 线程的执行类型。
:type Command: str
:param Time: 线程的操作时长,单位秒。
:type Time: str
:param Info: 线程的操作语句。
:type Info: str
"""
self.ID = None
self.User = None
self.Host = None
self.DB = None
self.State = None
self.Command = None
self.Time = None
self.Info = None
def _deserialize(self, params):
self.ID = params.get("ID")
self.User = params.get("User")
self.Host = params.get("Host")
self.DB = params.get("DB")
self.State = params.get("State")
self.Command = params.get("Command")
self.Time = params.get("Time")
self.Info = params.get("Info")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ProfileInfo(AbstractModel):
"""用户配置的信息
"""
def __init__(self):
r"""
:param Language: 语言, 如"zh"。
:type Language: str
:param MailConfiguration: 邮件模板的内容。
:type MailConfiguration: :class:`tencentcloud.dbbrain.v20210527.models.MailConfiguration`
"""
self.Language = None
self.MailConfiguration = None
def _deserialize(self, params):
self.Language = params.get("Language")
if params.get("MailConfiguration") is not None:
self.MailConfiguration = MailConfiguration()
self.MailConfiguration._deserialize(params.get("MailConfiguration"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SchemaItem(AbstractModel):
"""SchemaItem数组
"""
def __init__(self):
r"""
:param Schema: 数据库名称
:type Schema: str
"""
self.Schema = None
def _deserialize(self, params):
self.Schema = params.get("Schema")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SchemaSpaceData(AbstractModel):
"""库空间统计数据。
"""
def __init__(self):
r"""
:param TableSchema: 库名。
:type TableSchema: str
:param DataLength: 数据空间(MB)。
:type DataLength: float
:param IndexLength: 索引空间(MB)。
:type IndexLength: float
:param DataFree: 碎片空间(MB)。
:type DataFree: float
:param TotalLength: 总使用空间(MB)。
:type TotalLength: float
:param FragRatio: 碎片率(%)。
:type FragRatio: float
:param TableRows: 行数。
:type TableRows: int
:param PhysicalFileSize: 库中所有表对应的独立物理文件大小加和(MB)。
注意:此字段可能返回 null,表示取不到有效值。
:type PhysicalFileSize: float
"""
self.TableSchema = None
self.DataLength = None
self.IndexLength = None
self.DataFree = None
self.TotalLength = None
self.FragRatio = None
self.TableRows = None
self.PhysicalFileSize = None
def _deserialize(self, params):
self.TableSchema = params.get("TableSchema")
self.DataLength = params.get("DataLength")
self.IndexLength = params.get("IndexLength")
self.DataFree = params.get("DataFree")
self.TotalLength = params.get("TotalLength")
self.FragRatio = params.get("FragRatio")
self.TableRows = params.get("TableRows")
self.PhysicalFileSize = params.get("PhysicalFileSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SchemaSpaceTimeSeries(AbstractModel):
"""库空间时序数据
"""
def __init__(self):
r"""
:param TableSchema: 库名
:type TableSchema: str
:param SeriesData: 单位时间间隔内的空间指标数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorMetricSeriesData`
"""
self.TableSchema = None
self.SeriesData = None
def _deserialize(self, params):
self.TableSchema = params.get("TableSchema")
if params.get("SeriesData") is not None:
self.SeriesData = MonitorMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ScoreDetail(AbstractModel):
"""扣分详情。
"""
def __init__(self):
r"""
:param IssueType: 扣分项分类,取值包括:可用性、可维护性、性能及可靠性。
:type IssueType: str
:param ScoreLost: 扣分总分。
:type ScoreLost: int
:param ScoreLostMax: 扣分总分上限。
:type ScoreLostMax: int
:param Items: 扣分项列表。
注意:此字段可能返回 null,表示取不到有效值。
:type Items: list of ScoreItem
"""
self.IssueType = None
self.ScoreLost = None
self.ScoreLostMax = None
self.Items = None
def _deserialize(self, params):
self.IssueType = params.get("IssueType")
self.ScoreLost = params.get("ScoreLost")
self.ScoreLostMax = params.get("ScoreLostMax")
if params.get("Items") is not None:
self.Items = []
for item in params.get("Items"):
obj = ScoreItem()
obj._deserialize(item)
self.Items.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class ScoreItem(AbstractModel):
"""诊断扣分项。
"""
def __init__(self):
r"""
:param DiagItem: 异常诊断项名称。
:type DiagItem: str
:param IssueType: 诊断项分类,取值包括:可用性、可维护性、性能及可靠性。
:type IssueType: str
:param TopSeverity: 健康等级,取值包括:信息、提示、告警、严重、致命。
:type TopSeverity: str
:param Count: 该异常诊断项出现次数。
:type Count: int
:param ScoreLost: 扣分分数。
:type ScoreLost: int
"""
self.DiagItem = None
self.IssueType = None
self.TopSeverity = None
self.Count = None
self.ScoreLost = None
def _deserialize(self, params):
self.DiagItem = params.get("DiagItem")
self.IssueType = params.get("IssueType")
self.TopSeverity = params.get("TopSeverity")
self.Count = params.get("Count")
self.ScoreLost = params.get("ScoreLost")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SecLogExportTaskInfo(AbstractModel):
"""安全审计日志导出任务信息
"""
def __init__(self):
r"""
:param AsyncRequestId: 异步任务Id。
:type AsyncRequestId: int
:param StartTime: 任务开始时间。
注意:此字段可能返回 null,表示取不到有效值。
:type StartTime: str
:param EndTime: 任务结束时间。
注意:此字段可能返回 null,表示取不到有效值。
:type EndTime: str
:param CreateTime: 任务创建时间。
:type CreateTime: str
:param Status: 任务状态。
:type Status: str
:param Progress: 任务执行进度。
:type Progress: int
:param LogStartTime: 导出日志开始时间。
注意:此字段可能返回 null,表示取不到有效值。
:type LogStartTime: str
:param LogEndTime: 导出日志结束时间。
注意:此字段可能返回 null,表示取不到有效值。
:type LogEndTime: str
:param TotalSize: 日志文件总大小,单位KB。
注意:此字段可能返回 null,表示取不到有效值。
:type TotalSize: int
:param DangerLevels: 风险等级列表。0 无风险;1 低风险;2 中风险;3 高风险。
注意:此字段可能返回 null,表示取不到有效值。
:type DangerLevels: list of int non-negative
"""
self.AsyncRequestId = None
self.StartTime = None
self.EndTime = None
self.CreateTime = None
self.Status = None
self.Progress = None
self.LogStartTime = None
self.LogEndTime = None
self.TotalSize = None
self.DangerLevels = None
def _deserialize(self, params):
self.AsyncRequestId = params.get("AsyncRequestId")
self.StartTime = params.get("StartTime")
self.EndTime = params.get("EndTime")
self.CreateTime = params.get("CreateTime")
self.Status = params.get("Status")
self.Progress = params.get("Progress")
self.LogStartTime = params.get("LogStartTime")
self.LogEndTime = params.get("LogEndTime")
self.TotalSize = params.get("TotalSize")
self.DangerLevels = params.get("DangerLevels")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SlowLogHost(AbstractModel):
"""慢日志来源地址详情。
"""
def __init__(self):
r"""
:param UserHost: 来源地址。
:type UserHost: str
:param Ratio: 该来源地址的慢日志数目占总数目的比例,单位%。
:type Ratio: float
:param Count: 该来源地址的慢日志数目。
:type Count: int
"""
self.UserHost = None
self.Ratio = None
self.Count = None
def _deserialize(self, params):
self.UserHost = params.get("UserHost")
self.Ratio = params.get("Ratio")
self.Count = params.get("Count")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class SlowLogTopSqlItem(AbstractModel):
"""慢日志TopSql
"""
def __init__(self):
r"""
:param LockTime: sql总锁等待时间,单位秒
:type LockTime: float
:param LockTimeMax: 最大锁等待时间,单位秒
:type LockTimeMax: float
:param LockTimeMin: 最小锁等待时间,单位秒
:type LockTimeMin: float
:param RowsExamined: 总扫描行数
:type RowsExamined: int
:param RowsExaminedMax: 最大扫描行数
:type RowsExaminedMax: int
:param RowsExaminedMin: 最小扫描行数
:type RowsExaminedMin: int
:param QueryTime: 总耗时,单位秒
:type QueryTime: float
:param QueryTimeMax: 最大执行时间,单位秒
:type QueryTimeMax: float
:param QueryTimeMin: 最小执行时间,单位秒
:type QueryTimeMin: float
:param RowsSent: 总返回行数
:type RowsSent: int
:param RowsSentMax: 最大返回行数
:type RowsSentMax: int
:param RowsSentMin: 最小返回行数
:type RowsSentMin: int
:param ExecTimes: 执行次数
:type ExecTimes: int
:param SqlTemplate: sql模板
:type SqlTemplate: str
:param SqlText: 带参数SQL(随机)
:type SqlText: str
:param Schema: 数据库名
:type Schema: str
:param QueryTimeRatio: 总耗时占比,单位%
:type QueryTimeRatio: float
:param LockTimeRatio: sql总锁等待时间占比,单位%
:type LockTimeRatio: float
:param RowsExaminedRatio: 总扫描行数占比,单位%
:type RowsExaminedRatio: float
:param RowsSentRatio: 总返回行数占比,单位%
:type RowsSentRatio: float
:param QueryTimeAvg: 平均执行时间,单位秒
:type QueryTimeAvg: float
:param RowsSentAvg: 平均返回行数
:type RowsSentAvg: float
:param LockTimeAvg: 平均锁等待时间,单位秒
:type LockTimeAvg: float
:param RowsExaminedAvg: 平均扫描行数
:type RowsExaminedAvg: float
:param Md5: SOL模板的MD5值
:type Md5: str
"""
self.LockTime = None
self.LockTimeMax = None
self.LockTimeMin = None
self.RowsExamined = None
self.RowsExaminedMax = None
self.RowsExaminedMin = None
self.QueryTime = None
self.QueryTimeMax = None
self.QueryTimeMin = None
self.RowsSent = None
self.RowsSentMax = None
self.RowsSentMin = None
self.ExecTimes = None
self.SqlTemplate = None
self.SqlText = None
self.Schema = None
self.QueryTimeRatio = None
self.LockTimeRatio = None
self.RowsExaminedRatio = None
self.RowsSentRatio = None
self.QueryTimeAvg = None
self.RowsSentAvg = None
self.LockTimeAvg = None
self.RowsExaminedAvg = None
self.Md5 = None
def _deserialize(self, params):
self.LockTime = params.get("LockTime")
self.LockTimeMax = params.get("LockTimeMax")
self.LockTimeMin = params.get("LockTimeMin")
self.RowsExamined = params.get("RowsExamined")
self.RowsExaminedMax = params.get("RowsExaminedMax")
self.RowsExaminedMin = params.get("RowsExaminedMin")
self.QueryTime = params.get("QueryTime")
self.QueryTimeMax = params.get("QueryTimeMax")
self.QueryTimeMin = params.get("QueryTimeMin")
self.RowsSent = params.get("RowsSent")
self.RowsSentMax = params.get("RowsSentMax")
self.RowsSentMin = params.get("RowsSentMin")
self.ExecTimes = params.get("ExecTimes")
self.SqlTemplate = params.get("SqlTemplate")
self.SqlText = params.get("SqlText")
self.Schema = params.get("Schema")
self.QueryTimeRatio = params.get("QueryTimeRatio")
self.LockTimeRatio = params.get("LockTimeRatio")
self.RowsExaminedRatio = params.get("RowsExaminedRatio")
self.RowsSentRatio = params.get("RowsSentRatio")
self.QueryTimeAvg = params.get("QueryTimeAvg")
self.RowsSentAvg = params.get("RowsSentAvg")
self.LockTimeAvg = params.get("LockTimeAvg")
self.RowsExaminedAvg = params.get("RowsExaminedAvg")
self.Md5 = params.get("Md5")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TableSpaceData(AbstractModel):
"""库表空间统计数据。
"""
def __init__(self):
r"""
:param TableName: 表名。
:type TableName: str
:param TableSchema: 库名。
:type TableSchema: str
:param Engine: 库表的存储引擎。
:type Engine: str
:param DataLength: 数据空间(MB)。
:type DataLength: float
:param IndexLength: 索引空间(MB)。
:type IndexLength: float
:param DataFree: 碎片空间(MB)。
:type DataFree: float
:param TotalLength: 总使用空间(MB)。
:type TotalLength: float
:param FragRatio: 碎片率(%)。
:type FragRatio: float
:param TableRows: 行数。
:type TableRows: int
:param PhysicalFileSize: 表对应的独立物理文件大小(MB)。
:type PhysicalFileSize: float
"""
self.TableName = None
self.TableSchema = None
self.Engine = None
self.DataLength = None
self.IndexLength = None
self.DataFree = None
self.TotalLength = None
self.FragRatio = None
self.TableRows = None
self.PhysicalFileSize = None
def _deserialize(self, params):
self.TableName = params.get("TableName")
self.TableSchema = params.get("TableSchema")
self.Engine = params.get("Engine")
self.DataLength = params.get("DataLength")
self.IndexLength = params.get("IndexLength")
self.DataFree = params.get("DataFree")
self.TotalLength = params.get("TotalLength")
self.FragRatio = params.get("FragRatio")
self.TableRows = params.get("TableRows")
self.PhysicalFileSize = params.get("PhysicalFileSize")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TableSpaceTimeSeries(AbstractModel):
"""库表空间时序数据
"""
def __init__(self):
r"""
:param TableName: 表名。
:type TableName: str
:param TableSchema: 库名。
:type TableSchema: str
:param Engine: 库表的存储引擎。
:type Engine: str
:param SeriesData: 单位时间间隔内的空间指标数据。
:type SeriesData: :class:`tencentcloud.dbbrain.v20210527.models.MonitorFloatMetricSeriesData`
"""
self.TableName = None
self.TableSchema = None
self.Engine = None
self.SeriesData = None
def _deserialize(self, params):
self.TableName = params.get("TableName")
self.TableSchema = params.get("TableSchema")
self.Engine = params.get("Engine")
if params.get("SeriesData") is not None:
self.SeriesData = MonitorFloatMetricSeriesData()
self.SeriesData._deserialize(params.get("SeriesData"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class TimeSlice(AbstractModel):
"""单位时间间隔内的慢日志统计
"""
def __init__(self):
r"""
:param Count: 总数
:type Count: int
:param Timestamp: 统计开始时间
:type Timestamp: int
"""
self.Count = None
self.Timestamp = None
def _deserialize(self, params):
self.Count = params.get("Count")
self.Timestamp = params.get("Timestamp")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
class UserProfile(AbstractModel):
"""用户配置的相关信息,包括邮件配置。
"""
def __init__(self):
r"""
:param ProfileId: 配置的id。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileId: str
:param ProfileType: 配置类型,支持值包括:"dbScan_mail_configuration" - 数据库巡检邮件配置,"scheduler_mail_configuration" - 定期生成邮件配置。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileType: str
:param ProfileLevel: 配置级别,支持值包括:"User" - 用户级别,"Instance" - 实例级别,其中数据库巡检邮件配置为用户级别,定期生成邮件配置为实例级别。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileLevel: str
:param ProfileName: 配置名称。
注意:此字段可能返回 null,表示取不到有效值。
:type ProfileName: str
:param ProfileInfo: 配置详情。
:type ProfileInfo: :class:`tencentcloud.dbbrain.v20210527.models.ProfileInfo`
"""
self.ProfileId = None
self.ProfileType = None
self.ProfileLevel = None
self.ProfileName = None
self.ProfileInfo = None
def _deserialize(self, params):
self.ProfileId = params.get("ProfileId")
self.ProfileType = params.get("ProfileType")
self.ProfileLevel = params.get("ProfileLevel")
self.ProfileName = params.get("ProfileName")
if params.get("ProfileInfo") is not None:
self.ProfileInfo = ProfileInfo()
self.ProfileInfo._deserialize(params.get("ProfileInfo"))
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set))
| 31.310427 | 195 | 0.597149 | 117,814 | 0.98723 | 0 | 0 | 0 | 0 | 0 | 0 | 59,733 | 0.500536 |
6a7fd9c2a4520acac2ad0d4b073014e3ffeaa218 | 20,152 | py | Python | oauth/provider.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 2,027 | 2019-11-12T18:05:48.000Z | 2022-03-31T22:25:04.000Z | oauth/provider.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 496 | 2019-11-12T18:13:37.000Z | 2022-03-31T10:43:45.000Z | oauth/provider.py | giuseppe/quay | a1b7e4b51974edfe86f66788621011eef2667e6a | [
"Apache-2.0"
] | 249 | 2019-11-12T18:02:27.000Z | 2022-03-22T12:19:19.000Z | # Ported to Python 3
# Originally from https://github.com/DeprecatedCode/oauth2lib/blob/d161b010f8a596826050a09e5e94d59443cc12d9/oauth2lib/provider.py
import json
import logging
from requests import Response
from io import StringIO
try:
from werkzeug.exceptions import Unauthorized
except ImportError:
Unauthorized = Exception
from oauth import utils
class Provider(object):
"""Base provider class for different types of OAuth 2.0 providers."""
def _handle_exception(self, exc):
"""Handle an internal exception that was caught and suppressed.
:param exc: Exception to process.
:type exc: Exception
"""
logger = logging.getLogger(__name__)
logger.exception(exc)
def _make_response(self, body="", headers=None, status_code=200):
"""Return a response object from the given parameters.
:param body: Buffer/string containing the response body.
:type body: str
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
res = Response()
res.status_code = status_code
if headers is not None:
res.headers.update(headers)
res.raw = StringIO(body)
return res
def _make_redirect_error_response(self, redirect_uri, err):
"""Return a HTTP 302 redirect response object containing the error.
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:param err: OAuth error message.
:type err: str
:rtype: requests.Response
"""
params = {"error": err, "response_type": None, "client_id": None, "redirect_uri": None}
redirect = utils.build_url(redirect_uri, params)
return self._make_response(headers={"Location": redirect}, status_code=302)
def _make_json_response(self, data, headers=None, status_code=200):
"""Return a response object from the given JSON data.
:param data: Data to JSON-encode.
:type data: mixed
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
response_headers = {}
if headers is not None:
response_headers.update(headers)
response_headers["Content-Type"] = "application/json;charset=UTF-8"
response_headers["Cache-Control"] = "no-store"
response_headers["Pragma"] = "no-cache"
return self._make_response(json.dumps(data), response_headers, status_code)
def _make_json_error_response(self, err):
"""Return a JSON-encoded response object representing the error.
:param err: OAuth error message.
:type err: str
:rtype: requests.Response
"""
return self._make_json_response({"error": err}, status_code=400)
def _invalid_redirect_uri_response(self):
"""What to return when the redirect_uri parameter is missing.
:rtype: requests.Response
"""
return self._make_json_error_response("invalid_request")
class AuthorizationProvider(Provider):
"""OAuth 2.0 authorization provider. This class manages authorization
codes and access tokens. Certain methods MUST be overridden in a
subclass, thus this class cannot be directly used as a provider.
These are the methods that must be implemented in a subclass:
validate_client_id(self, client_id)
# Return True or False
validate_client_secret(self, client_id, client_secret)
# Return True or False
validate_scope(self, client_id, scope)
# Return True or False
validate_redirect_uri(self, client_id, redirect_uri)
# Return True or False
validate_access(self) # Use this to validate your app session user
# Return True or False
from_authorization_code(self, client_id, code, scope)
# Return mixed data or None on invalid
from_refresh_token(self, client_id, refresh_token, scope)
# Return mixed data or None on invalid
persist_authorization_code(self, client_id, code, scope)
# Return value ignored
persist_token_information(self, client_id, scope, access_token,
token_type, expires_in, refresh_token,
data)
# Return value ignored
discard_authorization_code(self, client_id, code)
# Return value ignored
discard_refresh_token(self, client_id, refresh_token)
# Return value ignored
Optionally, the following may be overridden to acheive desired behavior:
@property
token_length(self)
@property
token_type(self)
@property
token_expires_in(self)
generate_authorization_code(self)
generate_access_token(self)
generate_refresh_token(self)
"""
@property
def token_length(self):
"""Property method to get the length used to generate tokens.
:rtype: int
"""
return 40
@property
def token_type(self):
"""Property method to get the access token type.
:rtype: str
"""
return "Bearer"
@property
def token_expires_in(self):
"""Property method to get the token expiration time in seconds.
:rtype: int
"""
return 3600
def generate_authorization_code(self):
"""Generate a random authorization code.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def generate_access_token(self):
"""Generate a random access token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def generate_refresh_token(self):
"""Generate a random refresh token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def get_authorization_code(self, response_type, client_id, redirect_uri, **params):
"""Generate authorization code HTTP response.
:param response_type: Desired response type. Must be exactly "code".
:type response_type: str
:param client_id: Client ID.
:type client_id: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:rtype: requests.Response
"""
# Ensure proper response_type
if response_type != "code":
err = "unsupported_response_type"
return self._make_redirect_error_response(redirect_uri, err)
# Check redirect URI
is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri)
if not is_valid_redirect_uri:
return self._invalid_redirect_uri_response()
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_access = self.validate_access()
scope = params.get("scope", "")
is_valid_scope = self.validate_scope(client_id, scope)
# Return proper error responses on invalid conditions
if not is_valid_client_id:
err = "unauthorized_client"
return self._make_redirect_error_response(redirect_uri, err)
if not is_valid_access:
err = "access_denied"
return self._make_redirect_error_response(redirect_uri, err)
if not is_valid_scope:
err = "invalid_scope"
return self._make_redirect_error_response(redirect_uri, err)
# Generate authorization code
code = self.generate_authorization_code()
# Save information to be used to validate later requests
self.persist_authorization_code(client_id=client_id, code=code, scope=scope)
# Return redirection response
params.update(
{"code": code, "response_type": None, "client_id": None, "redirect_uri": None}
)
redirect = utils.build_url(redirect_uri, params)
return self._make_response(headers={"Location": redirect}, status_code=302)
def refresh_token(self, grant_type, client_id, client_secret, refresh_token, **params):
"""Generate access token HTTP response from a refresh token.
:param grant_type: Desired grant type. Must be "refresh_token".
:type grant_type: str
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param refresh_token: Refresh token.
:type refresh_token: str
:rtype: requests.Response
"""
# Ensure proper grant_type
if grant_type != "refresh_token":
return self._make_json_error_response("unsupported_grant_type")
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_client_secret = self.validate_client_secret(client_id, client_secret)
scope = params.get("scope", "")
is_valid_scope = self.validate_scope(client_id, scope)
data = self.from_refresh_token(client_id, refresh_token, scope)
is_valid_refresh_token = data is not None
# Return proper error responses on invalid conditions
if not (is_valid_client_id and is_valid_client_secret):
return self._make_json_error_response("invalid_client")
if not is_valid_scope:
return self._make_json_error_response("invalid_scope")
if not is_valid_refresh_token:
return self._make_json_error_response("invalid_grant")
# Discard original refresh token
self.discard_refresh_token(client_id, refresh_token)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(
client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data,
)
# Return json response
return self._make_json_response(
{
"access_token": access_token,
"token_type": token_type,
"expires_in": expires_in,
"refresh_token": refresh_token,
}
)
def get_token(self, grant_type, client_id, client_secret, redirect_uri, code, **params):
"""Generate access token HTTP response.
:param grant_type: Desired grant type. Must be "authorization_code".
:type grant_type: str
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:param code: Authorization code.
:type code: str
:rtype: requests.Response
"""
# Ensure proper grant_type
if grant_type != "authorization_code":
return self._make_json_error_response("unsupported_grant_type")
# Check conditions
is_valid_client_id = self.validate_client_id(client_id)
is_valid_client_secret = self.validate_client_secret(client_id, client_secret)
is_valid_redirect_uri = self.validate_redirect_uri(client_id, redirect_uri)
scope = params.get("scope", "")
is_valid_scope = self.validate_scope(client_id, scope)
data = self.from_authorization_code(client_id, code, scope)
is_valid_grant = data is not None
# Return proper error responses on invalid conditions
if not (is_valid_client_id and is_valid_client_secret):
return self._make_json_error_response("invalid_client")
if not is_valid_grant or not is_valid_redirect_uri:
return self._make_json_error_response("invalid_grant")
if not is_valid_scope:
return self._make_json_error_response("invalid_scope")
# Discard original authorization code
self.discard_authorization_code(client_id, code)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(
client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data,
)
# Return json response
return self._make_json_response(
{
"access_token": access_token,
"token_type": token_type,
"expires_in": expires_in,
"refresh_token": refresh_token,
}
)
def get_authorization_code_from_uri(self, uri):
"""Get authorization code response from a URI. This method will
ignore the domain and path of the request, instead
automatically parsing the query string parameters.
:param uri: URI to parse for authorization information.
:type uri: str
:rtype: requests.Response
"""
params = utils.url_query_params(uri)
try:
if "response_type" not in params:
raise TypeError("Missing parameter response_type in URL query")
if "client_id" not in params:
raise TypeError("Missing parameter client_id in URL query")
if "redirect_uri" not in params:
raise TypeError("Missing parameter redirect_uri in URL query")
return self.get_authorization_code(**params)
except TypeError as exc:
self._handle_exception(exc)
# Catch missing parameters in request
err = "invalid_request"
if "redirect_uri" in params:
u = params["redirect_uri"]
return self._make_redirect_error_response(u, err)
else:
return self._invalid_redirect_uri_response()
except StandardError as exc:
self._handle_exception(exc)
# Catch all other server errors
err = "server_error"
u = params["redirect_uri"]
return self._make_redirect_error_response(u, err)
def get_token_from_post_data(self, data):
"""Get a token response from POST data.
:param data: POST data containing authorization information.
:type data: dict
:rtype: requests.Response
"""
try:
# Verify OAuth 2.0 Parameters
for x in ["grant_type", "client_id", "client_secret"]:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x))
# Handle get token from refresh_token
if "refresh_token" in data:
return self.refresh_token(**data)
# Handle get token from authorization code
for x in ["redirect_uri", "code"]:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {0}".format(x))
return self.get_token(**data)
except TypeError as exc:
self._handle_exception(exc)
# Catch missing parameters in request
return self._make_json_error_response("invalid_request")
except StandardError as exc:
self._handle_exception(exc)
# Catch all other server errors
return self._make_json_error_response("server_error")
def validate_client_id(self, client_id):
raise NotImplementedError("Subclasses must implement " "validate_client_id.")
def validate_client_secret(self, client_id, client_secret):
raise NotImplementedError("Subclasses must implement " "validate_client_secret.")
def validate_redirect_uri(self, client_id, redirect_uri):
raise NotImplementedError("Subclasses must implement " "validate_redirect_uri.")
def validate_scope(self, client_id, scope):
raise NotImplementedError("Subclasses must implement " "validate_scope.")
def validate_access(self):
raise NotImplementedError("Subclasses must implement " "validate_access.")
def from_authorization_code(self, client_id, code, scope):
raise NotImplementedError("Subclasses must implement " "from_authorization_code.")
def from_refresh_token(self, client_id, refresh_token, scope):
raise NotImplementedError("Subclasses must implement " "from_refresh_token.")
def persist_authorization_code(self, client_id, code, scope):
raise NotImplementedError("Subclasses must implement " "persist_authorization_code.")
def persist_token_information(
self, client_id, scope, access_token, token_type, expires_in, refresh_token, data
):
raise NotImplementedError("Subclasses must implement " "persist_token_information.")
def discard_authorization_code(self, client_id, code):
raise NotImplementedError("Subclasses must implement " "discard_authorization_code.")
def discard_refresh_token(self, client_id, refresh_token):
raise NotImplementedError("Subclasses must implement " "discard_refresh_token.")
class OAuthError(Unauthorized):
"""OAuth error, including the OAuth error reason."""
def __init__(self, reason, *args, **kwargs):
self.reason = reason
super(OAuthError, self).__init__(*args, **kwargs)
class ResourceAuthorization(object):
"""A class containing an OAuth 2.0 authorization."""
is_oauth = False
is_valid = None
token = None
client_id = None
expires_in = None
error = None
def raise_error_if_invalid(self):
if not self.is_valid:
raise OAuthError(self.error, "OAuth authorization error")
class ResourceProvider(Provider):
"""OAuth 2.0 resource provider. This class provides an interface
to validate an incoming request and authenticate resource access.
Certain methods MUST be overridden in a subclass, thus this
class cannot be directly used as a resource provider.
These are the methods that must be implemented in a subclass:
get_authorization_header(self)
# Return header string for key "Authorization" or None
validate_access_token(self, access_token, authorization)
# Set is_valid=True, client_id, and expires_in attributes
# on authorization if authorization was successful.
# Return value is ignored
"""
@property
def authorization_class(self):
return ResourceAuthorization
def get_authorization(self):
"""Get authorization object representing status of authentication."""
auth = self.authorization_class()
header = self.get_authorization_header()
if not header or not header.split:
return auth
header = header.split()
if len(header) > 1 and header[0] == "Bearer":
auth.is_oauth = True
access_token = header[1]
self.validate_access_token(access_token, auth)
if not auth.is_valid:
auth.error = "access_denied"
return auth
def get_authorization_header(self):
raise NotImplementedError("Subclasses must implement " "get_authorization_header.")
def validate_access_token(self, access_token, authorization):
raise NotImplementedError("Subclasses must implement " "validate_token.")
| 35.730496 | 129 | 0.654575 | 19,775 | 0.981292 | 0 | 0 | 554 | 0.027491 | 0 | 0 | 9,337 | 0.463329 |
6a811562ddff805b40048018c138048e412a8c98 | 773 | py | Python | main.py | TomHacker/ImageCluster | c4262e08a61c50b6d850ba29bc4d56d21c789aa9 | [
"Apache-2.0"
] | 10 | 2019-04-08T06:46:35.000Z | 2019-10-31T11:10:32.000Z | main.py | HandsomeBrotherShuaiLi/ImageCluster | c4262e08a61c50b6d850ba29bc4d56d21c789aa9 | [
"Apache-2.0"
] | 3 | 2020-06-02T01:24:18.000Z | 2021-05-20T04:53:26.000Z | main.py | HandsomeBrotherShuaiLi/ImageCluster | c4262e08a61c50b6d850ba29bc4d56d21c789aa9 | [
"Apache-2.0"
] | 1 | 2019-05-23T11:08:04.000Z | 2019-05-23T11:08:04.000Z | from model import ImageCluster
m=ImageCluster(
base_model='vgg16',#your feature map extractor model
resorted_img_folder='resorted_data',#the folder for clustered images
cluster_algo='kmeans',#cluster algorithm
base_img_folder='data',
maxK=150,#the max k num is 30, which means ImageCluster calculates every k in range(2,30+1)
)
# calculate the feature maps
# m.get_feature_map(
# resize_shape=(224,224) # (w,h) a tuple for resizing the input images to the same shape
# )
# #clustering for feature maps
# m.imagecluster()
#As we can see, 21 may be the best cluster number for this dataset.
#So,we can call the resorted_img function to label the images under different folders
m.resorted_img(
selected_k_num=100# a int number in range[2,maxK]
)
| 36.809524 | 95 | 0.750323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 576 | 0.745149 |
6a8199a221f44d9fef4df3ccc6d623b0243a377c | 1,058 | py | Python | tests/dummies.py | arvindmuralie77/gradsflow | d6ec5bc517dcf714cd4ecb91a7f702dce6bded3f | [
"Apache-2.0"
] | 253 | 2021-08-17T17:42:25.000Z | 2022-03-25T07:59:41.000Z | tests/dummies.py | arvindmuralie77/gradsflow | d6ec5bc517dcf714cd4ecb91a7f702dce6bded3f | [
"Apache-2.0"
] | 161 | 2021-08-17T16:28:08.000Z | 2022-03-27T02:36:45.000Z | tests/dummies.py | arvindmuralie77/gradsflow | d6ec5bc517dcf714cd4ecb91a7f702dce6bded3f | [
"Apache-2.0"
] | 35 | 2021-08-23T16:26:15.000Z | 2022-03-26T17:08:15.000Z | # Copyright (c) 2021 GradsFlow. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from gradsflow.models import Model
class DummyModel(Model):
def __init__(self):
learner = torch.nn.Linear(1, 4)
super().__init__(learner)
def backward(self, loss: torch.Tensor):
return None
def train_step(self, batch):
return {"loss": torch.as_tensor(1), "metrics": {"accuracy": 1}}
def val_step(self, batch):
return {"loss": torch.as_tensor(1), "metrics": {"accuracy": 1}}
| 32.060606 | 75 | 0.697543 | 397 | 0.375236 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.609641 |
6a82018dc0f7662911572e4ff805c96d468e9254 | 2,330 | py | Python | JumpscaleCore/tools/executor/ExecutorSerial.py | gneumann333/jumpscaleX_core | 777d249fa3668c6e802c2f765f4b82fb39c3e5fa | [
"Apache-2.0"
] | 1 | 2020-06-21T11:18:52.000Z | 2020-06-21T11:18:52.000Z | JumpscaleCore/tools/executor/ExecutorSerial.py | gneumann333/jumpscaleX_core | 777d249fa3668c6e802c2f765f4b82fb39c3e5fa | [
"Apache-2.0"
] | 644 | 2019-08-25T10:19:56.000Z | 2020-12-23T09:41:04.000Z | JumpscaleCore/tools/executor/ExecutorSerial.py | gneumann333/jumpscaleX_core | 777d249fa3668c6e802c2f765f4b82fb39c3e5fa | [
"Apache-2.0"
] | 11 | 2019-08-29T21:38:50.000Z | 2020-06-21T11:18:55.000Z | from Jumpscale import j
JSBASE = j.baseclasses.object
from .ExecutorBase import *
import serial
class ExecutorSerial(ExecutorBase):
"""
This executor is primary made to communicate with devices (routers, switch, ...) over
console cable but you can use underlaying method to communicate with any serial device.
Please note that default mode attempt to recognize a device with cisco like commands.
"""
def __init__(self, device, baudrate=9600, type="serial", parity="N", stopbits=1, bytesize=8, timeout=1):
ExecutorBase.__init__(self, checkok=False)
self.device = device
self.baudrate = baudrate
self.type = type
self.parity = parity
self.stopbits = stopbits
self.bytesize = bytesize
self.timeout = timeout
self._id = None
self._log_info("Initialized")
self.reconnect()
self.fetch()
def reconnect(self):
self.console = serial.Serial(
port=self.device,
baudrate=self.baudrate,
parity=self.parity,
stopbits=self.stopbits,
bytesize=self.bytesize,
timeout=self.timeout,
)
return True
@property
def id(self):
if self._id is None:
self._id = "serial.%s" % (self.device)
return self._id
def execute(self, cmds, die=True, checkok=None, showout=True, timeout=0, env={}):
self._log_debug("Serial command: %s" % cmds)
if not cmds.endswith("\n"):
cmds += "\n"
self.send(cmds)
return 0, "", ""
def send(self, data):
self.console.write(data.encode("utf-8"))
def fetch(self):
input = self.console.read_all()
return input.decode("utf-8")
def enter(self, command):
self.send(command)
self.send("\n")
def _execute_script(self, content="", die=True, showout=True, checkok=None):
raise j.exceptions.NotImplemented()
def upload(self, source, dest, dest_prefix="", recursive=True, createdir=True):
raise j.exceptions.NotImplemented()
def download(self, source, dest, source_prefix="", recursive=True):
raise j.exceptions.NotImplemented()
def __repr__(self):
return "Executor serial: %s" % (self.device)
__str__ = __repr__
| 27.093023 | 108 | 0.615021 | 2,229 | 0.956652 | 0 | 0 | 131 | 0.056223 | 0 | 0 | 396 | 0.169957 |
6a836399736ccfbfdcec602215566bd6e9ae598c | 2,201 | py | Python | melisa/utils/snowflake.py | MelisaDev/melisa | 53fee10d8c1bf4dd716bc90096c16f096e11bfbf | [
"MIT"
] | 5 | 2022-03-11T19:51:28.000Z | 2022-03-13T16:28:58.000Z | melisa/utils/snowflake.py | jungledev1/melisa | 835e4b644e50b5038599ecbd1bfa510a0d3200e9 | [
"MIT"
] | 2 | 2022-03-19T18:09:39.000Z | 2022-03-23T12:18:49.000Z | melisa/utils/snowflake.py | jungledev1/melisa | 835e4b644e50b5038599ecbd1bfa510a0d3200e9 | [
"MIT"
] | 1 | 2022-03-23T07:30:04.000Z | 2022-03-23T07:30:04.000Z | # Copyright MelisaDev 2022 - Present
# Full MIT License can be found in `LICENSE.txt` at the project root.
from __future__ import annotations
class Snowflake(int):
"""
Discord utilizes Twitter's snowflake format for uniquely identifiable descriptors (IDs).
These IDs are guaranteed to be unique across all of Discord,
except in some unique scenarios in which child objects share their parent's ID.
Because Snowflake IDs are up to 64 bits in size (e.g. a uint64),
they are always returned as strings in the HTTP API
to prevent integer overflows in some languages.
See Gateway ETF/JSON for more information regarding Gateway encoding.
Read more here: https://discord.com/developers/docs/reference#snowflakes
"""
_MAX_VALUE: int = 9223372036854775807
_MIN_VALUE: int = 0
def __init__(self, _):
super().__init__()
if self < self._MIN_VALUE:
raise ValueError("snowflake value should be greater than or equal to 0.")
if self > self._MAX_VALUE:
raise ValueError(
"snowflake value should be less than or equal to 9223372036854775807."
)
@classmethod
def __factory__(cls, string: str) -> Snowflake:
return cls.from_string(string)
@classmethod
def from_string(cls, string: str):
"""Initialize a new Snowflake from a string.
Parameters
----------
string: :class:`str`
The snowflake as a string.
"""
return Snowflake(int(string))
@property
def timestamp(self) -> int:
"""
Milliseconds since Discord Epoch, the first second of 2015 or 1420070400000.
"""
return self >> 22
@property
def worker_id(self) -> int:
"""Internal worker ID"""
return (self >> 17) % 16
@property
def process_id(self) -> int:
"""Internal process ID"""
return (self >> 12) % 16
@property
def increment(self) -> int:
"""For every ID that is generated on that process, this number is incremented"""
return self % 2048
@property
def unix(self) -> int:
return self.timestamp + 1420070400000
| 30.150685 | 92 | 0.63562 | 2,055 | 0.933667 | 0 | 0 | 994 | 0.451613 | 0 | 0 | 1,203 | 0.54657 |
6a83ea727e6668f4f022e77a641fbd9d212a22e3 | 8,749 | py | Python | feed/serializers/extensions.py | cul-it/arxiv-rss | 40c0e859528119cc8ba3700312cb8df095d95cdd | [
"MIT"
] | 4 | 2020-06-29T15:05:37.000Z | 2022-02-02T10:28:28.000Z | feed/serializers/extensions.py | arXiv/arxiv-feed | 82923d062e2524df94c22490cf936a988559ce66 | [
"MIT"
] | 12 | 2020-03-06T16:45:00.000Z | 2022-03-02T15:36:14.000Z | feed/serializers/extensions.py | cul-it/arxiv-rss | 40c0e859528119cc8ba3700312cb8df095d95cdd | [
"MIT"
] | 2 | 2020-12-06T16:30:06.000Z | 2021-11-05T12:29:08.000Z | """Classes derived from the Feedgen extension classes."""
from typing import Dict, List, Optional
from lxml import etree
from lxml.etree import Element
from flask import current_app
from feedgen.ext.base import BaseEntryExtension, BaseExtension
from feed.domain import Author, Media
class ArxivExtension(BaseExtension):
"""Extension of the Feedgen class to allow us to change its behavior."""
def extend_atom(self: BaseExtension, atom_feed: Element) -> Element:
"""Allow the extension to modify the initial feed tree for Atom.
Parameters
----------
atom_feed : Element
The feed's root element.
Returns
-------
atom_feed : Element
The feed's root element.
"""
return atom_feed
def extend_rss(self: BaseExtension, rss_feed: Element) -> Element:
"""Allow the extension to modify the initial feed tree for RSS.
Parameters
----------
rss_feed : Element
The feed's root element.
Returns
-------
rss_feed : Element
The feed's root element.
"""
return rss_feed
def extend_ns(self: BaseExtension) -> Dict[str, str]:
"""
Define the feed's namespaces.
Returns
-------
namespaces : Dict[str, str]
Definitions of the "arxiv" namespaces.
"""
return {
"arxiv": "http://arxiv.org/schemas/atom",
"content": "http://purl.org/rss/1.0/modules/content/",
"taxo": "http://purl.org/rss/1.0/modules/taxonomy/",
"syn": "http://purl.org/rss/1.0/modules/syndication/",
"admin": "http://webns.net/mvcb/",
"media": "http://search.yahoo.com/mrss",
}
class ArxivAtomExtension(BaseEntryExtension):
"""Atom only extension."""
def extend_ns(self: BaseExtension) -> Dict[str, str]:
"""
Define the feed's namespaces.
Returns
-------
namespaces : Dict[str, str]
Definitions of the "arxiv" namespaces.
"""
return {
"arxiv": "http://arxiv.org/schemas/atom",
}
class ArxivEntryExtension(BaseEntryExtension):
"""Extension of the Entry class to allow us to change its behavior."""
def __init__(self: BaseEntryExtension):
"""Initialize the member values to all be empty."""
self.__arxiv_authors: List[Author] = []
self.__arxiv_media: List[Media] = []
self.__arxiv_comment: Optional[str] = None
self.__arxiv_primary_category: Optional[str] = None
self.__arxiv_doi: Optional[dict] = None
self.__arxiv_affiliation: Optional[str] = None
self.__arxiv_journal_ref: Optional[str] = None
self.__arxiv_affiliations: Dict = {}
def __add_media(self, entry: Element) -> None:
for media in self.__arxiv_media:
group = etree.SubElement(
entry, "{http://search.yahoo.com/mrss}group"
)
title = etree.SubElement(
group, "{http://search.yahoo.com/mrss}title"
)
title.text = media.title
etree.SubElement(
group,
"{http://search.yahoo.com/mrss}content",
attrib={"url": media.url, "type": media.type},
)
def extend_atom(self, entry: Element) -> Element:
"""
Allow the extension to modify the entry element for Atom serialization.
Parameters
----------
entry : Element
The FeedEntry to modify.
Returns
-------
entry : Element
The modified entry.
"""
if self.__arxiv_comment:
comment_element = etree.SubElement(
entry, "{http://arxiv.org/schemas/atom}comment"
)
comment_element.text = self.__arxiv_comment
if self.__arxiv_primary_category:
etree.SubElement(
entry,
"{http://arxiv.org/schemas/atom}primary_category",
attrib=self.__arxiv_primary_category,
)
if self.__arxiv_journal_ref:
journal_ref_element = etree.SubElement(
entry, "{http://arxiv.org/schemas/atom}journal_ref"
)
journal_ref_element.text = self.__arxiv_journal_ref
if self.__arxiv_doi:
for doi in self.__arxiv_doi:
doi_element = etree.SubElement(
entry, "{http://arxiv.org/schemas/atom}doi"
)
doi_element.text = doi
# Check each of the entry's author nodes
for entry_child in entry:
if entry_child.tag == "author":
author = entry_child
for author_child in author:
# If the author's name is in the affiliation dictionary,
# add Elements for all of its affiliations.
if author_child.tag == "name":
name = author_child.text
affiliations = self.__arxiv_affiliations.get(name, [])
for affiliation in affiliations:
element = etree.SubElement(
author,
"{http://arxiv.org/schemas/atom}affiliation",
)
element.text = affiliation
self.__add_media(entry=entry)
return entry
def extend_rss(self, entry: Element) -> Element:
"""Allow the extension to modify the entry element for RSS.
Parameters
----------
entry : Element
The FeedEntry to modify.
Returns
-------
entry : Element
The modified entry.
"""
base_server: str = current_app.config["BASE_SERVER"]
for entry_child in entry:
if entry_child.tag == "description":
description = "<p>Authors: "
first = True
for author in self.__arxiv_authors:
if first:
first = False
else:
description += ", "
name = (
f"{author.last_name},"
f"+{author.initials.replace(' ', '+')}"
)
description += (
f'<a href="http://{base_server}/search/?query={name}&'
f'searchtype=author">{author.full_name}</a>'
)
description += f"</p><p>{entry_child.text}</p>"
entry_child.text = description
self.__add_media(entry=entry)
return entry
def author(self, author: Author) -> None:
"""Add an author value to this entry.
Parameters
----------
author : Author
Paper author.
"""
self.__arxiv_authors.append(author)
def media(self, media: Media) -> None:
"""Add a media item.
Parameters
----------
media: Dict[str, str]
Dictionary with url and type attributes.
"""
self.__arxiv_media.append(media)
def comment(self, text: str) -> None:
"""Assign the comment value to this entry.
Parameters
----------
text : str
The new comment text.
"""
self.__arxiv_comment = text
def primary_category(self, text: str) -> None:
"""Assign the primary_category value to this entry.
Parameters
----------
text : str
The new primary_category name.
"""
self.__arxiv_primary_category = text
def journal_ref(self, text: str) -> None:
"""Assign the journal_ref value to this entry.
Parameters
----------
text : str
The new journal_ref value.
"""
self.__arxiv_journal_ref = text
def doi(self, doi_list: Dict[str, str]) -> None:
"""Assign the set of DOI definitions for this entry.
Parameters
----------
doi_list : Dict[str, str]
A dictionary of DOI assignments.
"""
self.__arxiv_doi = doi_list
def affiliation(self, full_name: str, affiliations: List[str]) -> None:
"""Assign an affiliation for one author of this entry.
Parameters
----------
full_name : str
An author's full name.
affiliations : List[str]
The code for the author's affiliated institution.
"""
self.__arxiv_affiliations[full_name] = affiliations
| 30.590909 | 79 | 0.529318 | 8,455 | 0.966396 | 0 | 0 | 0 | 0 | 0 | 0 | 3,941 | 0.450451 |
6a849b7bcd124ad715f2ce345cebb1f79d3397f0 | 1,132 | py | Python | discovery-infra/test_infra/helper_classes/config/controller_config.py | lranjbar/assisted-test-infra | 89cd4e16744afa646af88975f8038ca1774bcfa4 | [
"Apache-2.0"
] | null | null | null | discovery-infra/test_infra/helper_classes/config/controller_config.py | lranjbar/assisted-test-infra | 89cd4e16744afa646af88975f8038ca1774bcfa4 | [
"Apache-2.0"
] | 30 | 2021-11-15T07:10:49.000Z | 2022-03-28T07:10:26.000Z | discovery-infra/test_infra/helper_classes/config/controller_config.py | lranjbar/assisted-test-infra | 89cd4e16744afa646af88975f8038ca1774bcfa4 | [
"Apache-2.0"
] | null | null | null | from abc import ABC
from pathlib import Path
from typing import Any
from dataclasses import dataclass
from test_infra import consts
from test_infra.utils.global_variables import GlobalVariables
from .base_config import _BaseConfig
global_variables = GlobalVariables()
@dataclass
class BaseNodeConfig(_BaseConfig, ABC):
platform: str = None
is_ipv6: bool = None
bootstrap_in_place: bool = None
private_ssh_key_path: Path = None
working_dir: str = consts.WORKING_DIR
master_memory: int = None
master_vcpu: int = None
masters_count: int = None
nodes_count: int = None
master_cpu_mode: str = None
master_disk: int = None # disk size in MB.
master_disk_size_gib: str = None # disk size in GB.
master_disk_count: int = None # number of disks to create
worker_memory: int = None
worker_vcpu: int = None
workers_count: int = None
worker_cpu_mode: str = None
worker_disk: int = None
worker_disk_count: int = None
network_mtu: int = None
@staticmethod
def get_default(key, default=None) -> Any:
return getattr(global_variables, key)
| 26.325581 | 63 | 0.719965 | 847 | 0.748233 | 0 | 0 | 858 | 0.757951 | 0 | 0 | 63 | 0.055654 |
6a84b5159878bc48cef9594078edc989fb798f13 | 952 | py | Python | bitcoinpy/mempool.py | obulpathi/bitcoinpy | 8f41e0221f2ff2d35697b6d4e5397deb7de09c3d | [
"MIT"
] | 21 | 2016-01-03T14:52:07.000Z | 2021-08-09T18:05:08.000Z | bitcoinpy/mempool.py | obulpathi/bitcoinpy | 8f41e0221f2ff2d35697b6d4e5397deb7de09c3d | [
"MIT"
] | null | null | null | bitcoinpy/mempool.py | obulpathi/bitcoinpy | 8f41e0221f2ff2d35697b6d4e5397deb7de09c3d | [
"MIT"
] | 15 | 2015-02-07T20:08:11.000Z | 2019-10-03T04:45:45.000Z | # MemPool.py
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import logging
from lib.serialize import uint256_to_shortstr
class MemPool(object):
def __init__(self):
self.pool = {}
# setup logging
logging.basicConfig(level=logging.DEBUG)
self.logger = logging.getLogger(__name__)
def add(self, tx):
tx.calc_sha256()
hash = tx.sha256
hashstr = uint256_to_shortstr(hash)
if hash in self.pool:
self.log.write("MemPool.add(%s): already known" % (hashstr,))
return False
if not tx.is_valid():
self.log.write("MemPool.add(%s): invalid TX" % (hashstr, ))
return False
self.pool[hash] = tx
self.log.write("MemPool.add(%s), poolsz %d" % (hashstr, len(self.pool)))
return True
def remove(self, hash):
if hash not in self.pool:
return False
del self.pool[hash]
return True
def size(self):
return len(self.pool)
| 23.8 | 74 | 0.703782 | 730 | 0.766807 | 0 | 0 | 0 | 0 | 0 | 0 | 256 | 0.268908 |
6a84b6d6b4154a68e4e0a9485928b636bf10d1b0 | 13,530 | py | Python | bashspy/parser.py | sarvi/bashspy | 088f2cdfd00d29b4c7a98ec311f6e6c382ba4749 | [
"MIT"
] | null | null | null | bashspy/parser.py | sarvi/bashspy | 088f2cdfd00d29b4c7a98ec311f6e6c382ba4749 | [
"MIT"
] | 1 | 2021-06-12T12:47:44.000Z | 2021-06-12T12:47:44.000Z | bashspy/parser.py | sarvi/bashspy | 088f2cdfd00d29b4c7a98ec311f6e6c382ba4749 | [
"MIT"
] | 1 | 2020-05-18T08:55:14.000Z | 2020-05-18T08:55:14.000Z | '''
Created on Jun 13, 2019
@author: sarvi
'''
from sly import Parser
from .lexer import BashLexer
class ASTCommands(list):
__slots__ = ('grouping')
def __init__(self, command, grouping=None):
self.append(command)
self.grouping = grouping
def __repr__(self):
x=[str(i) for i in self]
if self.grouping:
x.insert(0, self.grouping[0])
x.append(self.grouping[1])
return '\n'.join(x)
class ASTCommand:
__slots__ = ('assignments', 'executable', 'arguments', 'redirections', 'pipetocmd')
def __init__(self, executable=None, assignments=None, arguments=None, redirections=None, pipetocmd=None):
self.executable = executable
self.assignments = assignments or list()
self.arguments = arguments or list()
self.redirections = redirections or list()
self.pipetocmd = pipetocmd
def __repr__(self):
if self.executable:
return ('%s %s %s %s %s' % (' '.join([str(i) for i in self.assignments]),
self.executable,
' '.join([str(i) for i in self.arguments]),
' '.join([str(i) for i in self.redirections]),
'| %s'%self.pipetocmd if self.pipetocmd else '')).strip()
else:
return ' '.join([str(i) for i in self.assignments])
class ASTAssignment:
__slots__ = ('variable', 'assignop', 'value')
def __init__(self, variable, assignop, value=None):
self.variable = variable
self.assignop = assignop
self.value = value
def __repr__(self):
return '%s%s%s'%(self.variable, self.assignop, self.value or '')
class ASTArgument:
__slots__ = ('option', 'value')
def __init__(self, option=None, value=None):
self.option = option
self.value = value
def __repr__(self):
return '%s=%s'%(self.option, self.value) if self.option and self.value else (self.option or self.value)
class ASTRedirection:
__slots__ = ('redirect', 'file')
def __init__(self, redirect, file):
self.redirect = redirect
self.file = file
def __repr__(self):
return '%s%s'%(self.redirect, self.file) if self.file else '%s'%(self.redirect)
class ASTTestCombination:
__slots__ = ('leftexpr', 'combination', 'rightexpr', 'test_command', 'group')
def __init__(self, combination, rightexpr, leftexpr=None, test_command=False, group=False):
self.combination = combination
self.rightexpr = rightexpr
self.leftexpr = leftexpr
self.test_command = test_command
self.group = group
def __repr__(self):
if self.leftexpr:
return '%s %s %s'%(self.leftexpr, self.combination, self.rightexpr)
elif self.combination:
return '%s %s'%(self.combination, self.rightexpr)
elif self.test_command:
return '[ %s ]'%(self.rightexpr)
elif self.group:
return '( %s )'%(self.rightexpr)
else:
return '%s'%(self.rightexpr)
class ASTTestCondition:
__slots__ = ('leftvalue', 'test', 'rightvalue')
def __init__(self, test, rightvalue, leftvalue=None):
self.test = test
self.leftvalue = leftvalue
self.rightvalue = rightvalue
def __repr__(self):
if self.test:
return '%s %s %s'%(self.leftvalue, self.test, self.rightvalue) if self.leftvalue else '%s %s'%(self.test, self.rightvalue)
else:
return '%s' % (self.rightvalue)
class ASTIfCommand:
__slots__ = ('test_commands', 'then_commands', 'else_commands')
def __init__(self, test_commands, then_commands, else_commands=None):
self.test_commands = test_commands
self.then_commands = then_commands
self.else_commands = else_commands
def __repr__(self):
if self.else_commands:
return 'if %s; then\n%s\nelse\n%s\nfi' % (self.test_commands, self.then_commands, self.else_commands)
else:
return 'if %s; then\n%s\nfi' % (self.test_commands, self.then_commands)
class BashParser(Parser):
# Get the token list from the lexer (required)
debugfile = 'parser.out'
tokens = BashLexer.tokens
precedence = (
# ('nonassoc', BOOL_NOT),
# ('nonassoc', BOOL_LESS, BOOL_GREATER, BOOL_EQ, BOOL_NEQ), # Nonassociative operators
('left', LIST_COMMANDS),
('left', AMPERSAND, CMDSEP, NEWLINE),
('left', BOOL_COMBINATION),
('left', BOOL_COMPARISON),
('right', BOOL_NOT),
# ('right', END_LINE)
)
# Grammar rules and actions
@_('compound_commands')
def program(self, p):
print('program(%s)' % (p.compound_commands))
return p.compound_commands
@_('compound_command',
'compound_command end_command',
'compound_command end_command compound_commands'
)
def compound_commands(self, p):
# print('simple_command(%s)' % (list(p)))
if getattr(p, 'compound_commands', None):
p.compound_commands.insert(0, p.compound_command)
return p.compound_commands
else:
return ASTCommands(p.compound_command)
@_(
'group_command',
'list_commands',
'if_command',
)
def compound_command(self, p):
return p[0]
@_(
'LBRACE NEWLINE compound_commands RBRACE',
'LBRACE compound_commands RBRACE',
'LPAREN compound_commands RPAREN',
)
def group_command(self, p):
if getattr(p, 'LBRACE', None):
p.compound_commands.grouping = '{}'
elif getattr(p, 'LPAREN', None):
p.compound_commands.grouping = '()'
return getattr(p, 'compound_commands', None)
@_('pipe_command %prec LIST_COMMANDS',
'pipe_command end_pipe',
'pipe_command end_pipe list_commands',
'pipe_command boolean_combination list_commands')
def list_commands(self, p):
if getattr(p, 'boolean_combination', None):
return ASTTestCombination(p.boolean_combination, p.list_commands, p.pipe_command)
elif getattr(p, 'list_commands', None):
p.list_commands.insert(0, p.pipe_command)
return p.list_commands
else:
return ASTCommands(p.pipe_command)
@_('NEWLINE', 'CMDSEP', 'AMPERSAND')
def end_pipe(self, p):
return None
@_('NEWLINE', 'CMDSEP')
def end_command(self, p):
return None
@_('IF list_commands THEN compound_commands FI',
'IF list_commands THEN NEWLINE compound_commands FI',
'IF list_commands THEN compound_commands ELSE compound_commands FI',
'IF list_commands THEN NEWLINE compound_commands ELSE NEWLINE compound_commands FI')
def if_command(self, p):
if getattr(p, 'ELSE', None):
return ASTIfCommand(p.list_commands, p.compound_commands0, p.compound_commands1)
else:
return ASTIfCommand(p.list_commands, p.compound_commands)
# @_( #'test_command',
# 'command_pipe',
# # 'test_command boolean_combination compound_command',
# # 'command_pipe boolean_combination compound_command'
# )
# def compound_command(self, p):
# if getattr(p, 'boolean_combination', None):
# return ASTTestCombination(p.boolean_combination, p.test_commands, p.test_command)
# else:
# return p.test_command
@_('time_command pipe_commands',
'time_command BOOL_NOT pipe_commands',
'pipe_commands',
'BOOL_NOT pipe_commands')
def pipe_command(self, p):
# print('simple_command(%s)' % (list(p)))
cmd = p.pipe_commands
if getattr(p, 'BOOL_NOT', None):
cmd = ASTTestCombination(p.BOOL_NOT, p.pipe_commands)
return cmd
@_('TIME',
'TIME TIME_OPTP')
def time_command(self, p):
cmd = ASTCommand(p.TIME)
if getattr(p, 'TIME_OPTP', None):
cmd.arguments = [p.TIME_OPTP]
return cmd
@_('simple_command',
'simple_command PIPE pipe_commands')
def pipe_commands(self, p):
# print('simple_command(%s)' % (list(p)))
if getattr(p, 'PIPE', None):
p.simple_command.pipetocmd = p.pipe_commands
return p.simple_command
@_('assignments',
'base_command',
'assignments base_command',
'base_command redirects',
'assignments base_command redirects')
def simple_command(self, p):
# print('simple_command(%s)' % (list(p)))
cmd = p.base_command if getattr(p, 'base_command', None) else ASTCommand()
if getattr(p, 'redirects', None):
cmd.redirections = p.redirects
if getattr(p, 'assignments', None):
cmd.assignments = p.assignments
return cmd
@_('redirect',
'redirect redirects')
def redirects(self, p):
return [p.redirect] if len(p)==1 else [p.redirect] + p.redirects
@_('REDIRECT',
'REDIRECT WORD')
def redirect(self, p):
# print('assignment(%s)' % (list(p)))
return ASTRedirection(p.REDIRECT, getattr(p, 'WORD', None))
@_('echo_command',
'exec_command',
'test_command')
def base_command(self, p):
if len(p)==2:
p[1].assignments = p.assignments.assignments
return p[1]
else:
return p[0]
@_('LBRACK test_expressions RBRACK',
'LDBRACK test_expressions RDBRACK')
def test_command(self, p):
if getattr(p, 'BOOL_NOT', None):
return ASTTestCombination(p.BOOL_NOT, p.command_pipe)
elif getattr(p, 'command_pipe', None):
return ASTTestCombination(None, p.command_pipe)
else:
return ASTTestCombination(None, p.test_expressions, test_command=True)
@_('test_expression',
'LPAREN test_expressions RPAREN',
'BOOL_NOT test_expressions %prec BOOL_NOT',
'test_expressions boolean_combination test_expressions %prec BOOL_COMBINATION'
)
def test_expressions(self, p):
if getattr(p, 'BOOL_NOT', None):
return ASTTestCombination(p.BOOL_NOT, p.test_expressions)
elif getattr(p, 'boolean_combination', None):
return ASTTestCombination(p.boolean_combination, p.test_expressions1, p.test_expressions0)
elif getattr(p, 'LPAREN', None):
return ASTTestCombination(None, p.test_expressions, group=True)
else:
return p.test_expression
@_('BOOL_OR', 'BOOL_AND')
def boolean_combination(self, p):
return p[0]
@_('value boolean_comparison value %prec BOOL_COMPARISON',
'OPTION value')
def test_expression(self, p):
if getattr(p, 'BOOL_NOT', None):
return ASTTestCombination(p.BOOL_NOT, p.test_expression)
elif getattr(p, 'LPAREN', None):
return ASTTestCombination(None, p.test_expressions, group=True)
elif getattr(p, 'OPTION', None):
return ASTTestCondition(p.boolean_comparison, p.value)
else:
return ASTTestCondition(p.boolean_comparison, p.value1, p.value0)
@_('OPTION', 'BOOL_EQ', 'BOOL_NEQ', 'BOOL_LESS', 'BOOL_GREATER', 'ASSIGN')
def boolean_comparison(self, p):
return p[0]
# @_(
# 'for_command',
# 'case_command',
# 'WHILE compound_list DO compound_list DONE',
# 'UNTIL compound_list DO compound_list DONE',
# 'select_command',
# 'if_command',
# 'subshell',
# 'group_command',
# 'arith_command'
# 'cond_command',
# 'arith_for_command'
# )
# def shell_command(self, p):
# print('assignments(%s)' % (list(p)))
# return list(p)
@_('ECHO ECHO_STRING')
def echo_command(self, p):
return ASTCommand(p[0], None, [p[1]])
@_('WORD',
'WORD arguments')
def exec_command(self, p):
return ASTCommand(p[0], None, getattr(p, 'arguments', None), getattr(p, 'redirects', None))
@_('argument',
'argument arguments')
def arguments(self, p):
return [p.argument] if len(p)==1 else [p.argument] + p.arguments
@_('OPTION ASSIGN', 'OPTION', 'arg_value')
def argument(self, p):
# print('assignment(%s)' % (list(p)))
return ASTArgument(getattr(p, 'OPTION', None), getattr(p, 'arg_value', None))
@_('value', 'WORD')
def arg_value(self, p):
# print('value(%s)' % (list(p)))
return p[0]
@_('assignment',
'assignment assignments')
def assignments(self, p):
return [p.assignment] if len(p) == 1 else [p.assignment] + p.assignments
@_('LET ID assignop value', 'ID assignop value', 'ID assignop')
def assignment(self, p):
# print('assignment(%s)' % (list(p)))
return ASTAssignment(p.ID, p.assignop, getattr(p, 'value', None))
@_('ASSIGN', 'ARITH_ASSIGN')
def assignop(self, p):
return p[0]
@_('QSTRING', 'DQSTRING', 'BTQUOTED', 'CMD_EXP', 'VAL_STRING', 'VAR_SUBST', 'VARIABLE')
def value(self, p):
# print('value(%s)' % (list(p)))
return p[0]
if __name__ == '__main__':
lexer = BashLexer()
parser = BashParser()
while True:
try:
text = input('Command:>')
result = parser.parse(lexer.tokenize(text))
print(result)
except EOFError:
break | 32.760291 | 135 | 0.602882 | 13,124 | 0.969993 | 0 | 0 | 7,448 | 0.55048 | 0 | 0 | 4,150 | 0.306726 |
6a84df704829c829063b750bd4cbc4f7f7261e8a | 1,555 | py | Python | Module03/pregnancy_wheel.py | biomed-bioinformatics-bootcamp/bmes-t580-2019-coursework-charrison620 | 4f53e9290b456f582464c86d114c794c1448b995 | [
"MIT"
] | null | null | null | Module03/pregnancy_wheel.py | biomed-bioinformatics-bootcamp/bmes-t580-2019-coursework-charrison620 | 4f53e9290b456f582464c86d114c794c1448b995 | [
"MIT"
] | null | null | null | Module03/pregnancy_wheel.py | biomed-bioinformatics-bootcamp/bmes-t580-2019-coursework-charrison620 | 4f53e9290b456f582464c86d114c794c1448b995 | [
"MIT"
] | null | null | null | import datetime
def print_header():
print('----------------------------')
print(' Due Date APP ')
print('----------------------------')
print()
def get_lmp_from_patient():
print("When was the patient's last normal menstrual cycle? ")
date_str = input('Format: [dd/mm/yyyy}')
#'05/06/2018'
parts = date_str.split('/')
if len(parts)!= 3:
print('Bad date found', date_str)
return get_lmp_from_patient()
year = int(parts[2])
month = int(parts[1])
day = int(parts[0])
lmp = datetime.date(year, month, day)
#print(lmp)
return lmp
#avg pregnancy length is 281 days
def compute_days_between_dates(original_date, target_date):
this_year = datetime.date(target_date.year, original_date.month, original_date.day)
dt = this_year - target_date
return dt.days
def print_due_date_information(min_due_date, max_due_date, expected_due_date):
print('Your expected due date is ', expected_due_date.strftime('%a %b %d %Y'))
print('But it may be as early as ', min_due_date.strftime('%m/%d/%Y'))
print('But as late as ', max_due_date.strftime('%m/%d/%Y'))
def main():
print_header()
lmp_day = get_lmp_from_patient()
gest_length = datetime.timedelta(days = 281)
gest_std = datetime.timedelta(days = 13)
expected_due_date = lmp_day + gest_length
min_due_date = expected_due_date - gest_std
max_due_date = expected_due_date + gest_std
print_due_date_information(min_due_date, max_due_date, expected_due_date)
main() | 27.767857 | 87 | 0.65209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 348 | 0.223794 |
6a863c2235ddfde035ee8193728b86e170dd1480 | 1,471 | py | Python | costcalculator/forms.py | connor-c/Trip-Gas-Cost-Calculator | 6101093ffd48b6cb6c4f847b8c1f40351617750b | [
"MIT"
] | null | null | null | costcalculator/forms.py | connor-c/Trip-Gas-Cost-Calculator | 6101093ffd48b6cb6c4f847b8c1f40351617750b | [
"MIT"
] | 8 | 2020-02-11T23:59:35.000Z | 2022-02-10T07:16:43.000Z | costcalculator/forms.py | connor-c/Trip-Gas-Cost-Calculator | 6101093ffd48b6cb6c4f847b8c1f40351617750b | [
"MIT"
] | null | null | null | from django import forms
from django.core.validators import MinValueValidator, MinLengthValidator
class OriginForm(forms.Form):
origin_address = forms.CharField(validators=[MinLengthValidator(1)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '123 Tech St, Silicon Valley, CA 00000'}))
class DestinationForm(forms.Form):
destination_address = forms.CharField(validators=[MinLengthValidator(1)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '123 Tech St, Silicon Valley, CA 00000'}))
class GasPriceForm(forms.Form):
gas_price = forms.FloatField(validators=[MinValueValidator(0.01)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '1.23'}))
class MpgForm(forms.Form):
mpg = forms.FloatField(validators=[MinValueValidator(0.01)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '12'}))
class NumPeopleForm(forms.Form):
num_people = forms.IntegerField(validators=[MinValueValidator(1)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '1 (default is 1 if left blank)'}))
class DistanceForm(forms.Form):
distance = forms.FloatField(validators=[MinValueValidator(0.01)], widget=forms.TextInput(attrs={'class': 'form-control', 'id': 'inlineFormInputGroup', 'placeholder': '15.2'}))
| 70.047619 | 220 | 0.747791 | 1,361 | 0.925221 | 0 | 0 | 0 | 0 | 0 | 0 | 486 | 0.330387 |
6a86a5adbb68bba6dc2b067c07b59de722a8d5ca | 1,940 | py | Python | tasks/storm_raffle_handler.py | Ayouuuu/bili2.0 | 1108e39208e56f129fb5eb6605a5b3f1aadc0d8f | [
"MIT"
] | 2 | 2020-01-03T09:27:53.000Z | 2020-04-07T05:06:36.000Z | tasks/storm_raffle_handler.py | Ayouuuu/bili2.0 | 1108e39208e56f129fb5eb6605a5b3f1aadc0d8f | [
"MIT"
] | null | null | null | tasks/storm_raffle_handler.py | Ayouuuu/bili2.0 | 1108e39208e56f129fb5eb6605a5b3f1aadc0d8f | [
"MIT"
] | 1 | 2019-08-23T07:43:21.000Z | 2019-08-23T07:43:21.000Z | import bili_statistics
from reqs.storm_raffle_handler import StormRaffleHandlerReq
from tasks.utils import UtilsTask
from .base_class import Forced, DontWait, Multi
class StormRaffleJoinTask(Forced, DontWait, Multi):
TASK_NAME = 'join_storm_raffle'
# 为了速度,有时不用等room_id验证就参加,置room_id为0,is_normal_room自然会返回固定值true
@staticmethod
async def check(user, room_id, raffle_id=None):
if not await UtilsTask.is_normal_room(user, room_id):
return
if raffle_id is not None:
json_rsp = {'data': {'id': raffle_id}}
else:
json_rsp = await user.req_s(StormRaffleHandlerReq.check, user, room_id)
next_step_settings = []
data = json_rsp['data']
if data:
raffle_id = int(data['id'])
if not bili_statistics.is_raffleid_duplicate(raffle_id/1000000):
user.info(f'确认获取到飓风暴抽奖 {raffle_id}', with_userid=False)
next_step_setting = (-2, (1, 3), room_id, raffle_id)
next_step_settings.append(next_step_setting)
next_step_setting = (-2, (2, 4), room_id, raffle_id)
next_step_settings.append(next_step_setting)
bili_statistics.add2raffle_ids(raffle_id/1000000, 'STORM')
return next_step_settings
@staticmethod
async def work(user, room_id, raffle_id):
# await UtilsTask.enter_room(user, room_id)
json_rsp = await user.req_s(StormRaffleHandlerReq.join, user, raffle_id)
bili_statistics.add2joined_raffles('节奏风暴(合计)', user.id)
if not json_rsp['code']:
data = json_rsp['data']
gift_name = data["gift_name"]
gift_num = data["gift_num"]
user.info(f'飓风暴({raffle_id})的参与结果: {gift_name}X{gift_num}')
bili_statistics.add2results(gift_name, user.id, gift_num)
return
print(json_rsp)
| 41.276596 | 83 | 0.635567 | 1,872 | 0.917647 | 0 | 0 | 1,642 | 0.804902 | 1,606 | 0.787255 | 367 | 0.179902 |
6a86acff1cb947e60b02c94c6dbdcc5c7b79e9bf | 4,767 | py | Python | u24_lymphocyte/third_party/treeano/sandbox/nodes/gradnet.py | ALSM-PhD/quip_classification | 7347bfaa5cf11ae2d7a528fbcc43322a12c795d3 | [
"BSD-3-Clause"
] | 45 | 2015-04-26T04:45:51.000Z | 2022-01-24T15:03:55.000Z | u24_lymphocyte/third_party/treeano/sandbox/nodes/gradnet.py | ALSM-PhD/quip_classification | 7347bfaa5cf11ae2d7a528fbcc43322a12c795d3 | [
"BSD-3-Clause"
] | 8 | 2018-07-20T20:54:51.000Z | 2020-06-12T05:36:04.000Z | u24_lymphocyte/third_party/treeano/sandbox/nodes/gradnet.py | ALSM-PhD/quip_classification | 7347bfaa5cf11ae2d7a528fbcc43322a12c795d3 | [
"BSD-3-Clause"
] | 22 | 2018-05-21T23:57:20.000Z | 2022-02-21T00:48:32.000Z | import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
@treeano.register_node("grad_net_interpolation")
class GradNetInterpolationNode(treeano.NodeImpl):
"""
interpolates outputs between 2 nodes
"""
hyperparameter_names = ("late_gate",)
children_container = treeano.core.DictChildrenContainerSchema(
early=treeano.core.ChildContainer,
late=treeano.core.ChildContainer,
)
input_keys = ("early", "late")
def init_state(self, network):
children = self.raw_children()
early = children["early"]
late = children["late"]
network.forward_input_to(early.name)
network.forward_input_to(late.name)
network.take_output_from(early.name, to_key="early")
network.take_output_from(late.name, to_key="late")
def compute_output(self, network, early_vw, late_vw):
late_gate = network.find_hyperparameter(["late_gate"], 1)
out_var = (early_vw.variable * (1 - late_gate)
+ late_vw.variable * late_gate)
out_shape = []
assert early_vw.ndim == late_vw.ndim
for e, l in zip(early_vw.shape, late_vw.shape):
if e is None and l is None:
out_shape.append(None)
elif e is None:
out_shape.append(l)
elif l is None:
out_shape.append(e)
else:
assert e == l
out_shape.append(e)
network.create_vw(
"default",
variable=out_var,
shape=tuple(out_shape),
tags={"output"},
)
@treeano.register_node("grad_net_optimizer_interpolation")
class _GradNetOptimizerInterpolationNode(treeano.Wrapper1NodeImpl):
hyperparameter_names = ("late_gate",
"gradnet_epsilon",
"epsilon",
"multiplicative_inverse_for_early_gate")
def init_state(self, network):
super(_GradNetOptimizerInterpolationNode, self).init_state(network)
epsilon = network.find_hyperparameter(["gradnet_epsilon",
"epsilon"],
1e-3)
late_gate = network.find_hyperparameter(["late_gate"], 1)
late_gate = treeano.utils.as_fX(late_gate)
# NOTE: late gate cannot be 0 because the early gate is divide by it
# AND multiplied by it. Clipping only for the early gate will cause
# no updates to occur.
late_gate = T.clip(late_gate, epsilon, 1)
use_multiplicative_inverse = network.find_hyperparameter(
["multiplicative_inverse_for_early_gate"], False)
if use_multiplicative_inverse:
early_gate = epsilon / late_gate
else:
early_gate = 1 - late_gate
network.set_hyperparameter(self.name + "_late_update_scale",
"update_scale_factor",
late_gate)
network.set_hyperparameter(self.name + "_early_update_scale",
"update_scale_factor",
# these updates are also multiplied by
# late_gate later on, so rescale them
early_gate / late_gate)
def GradNetOptimizerInterpolationNode(name,
children,
early,
late,
**kwargs):
"""
interpolates updates from 2 optimizers nodes
NOTE: this is a hack to take in node constructors as arguments
"""
assert set(children.keys()) == {"subtree", "cost"}
subtree = children["subtree"]
cost = children["cost"]
cost_ref = tn.ReferenceNode(name + "_costref", reference=cost.name)
late_subtree = tn.UpdateScaleNode(name + "_late_update_scale", subtree)
late_node = late(name + "_late", {"subtree": late_subtree, "cost": cost})
early_subtree = tn.UpdateScaleNode(name + "_early_update_scale", late_node)
early_node = early(name + "_early",
{"subtree": early_subtree, "cost": cost_ref})
# NOTE: need separate node to forward hyperparameter
return _GradNetOptimizerInterpolationNode(name, early_node, **kwargs)
def GradualSimpleBatchNormalizationNode(name):
from treeano.sandbox.nodes import batch_normalization as bn
return GradNetInterpolationNode(
name,
{"early": bn.SimpleBatchNormalizationNode(name + "_bn"),
"late": tn.IdentityNode(name + "_identity")})
GradualBNNode = GradualSimpleBatchNormalizationNode
| 36.389313 | 79 | 0.594084 | 3,177 | 0.666457 | 0 | 0 | 3,285 | 0.689113 | 0 | 0 | 990 | 0.207678 |
6a88dea060f9ea00f0bb7c465137c71904b3c14f | 662 | py | Python | minus80/RawFile.py | brohammer/Minus80 | 3cd5b61a7349b9fa6d35ed192d8a4f38523f92bb | [
"MIT"
] | null | null | null | minus80/RawFile.py | brohammer/Minus80 | 3cd5b61a7349b9fa6d35ed192d8a4f38523f92bb | [
"MIT"
] | null | null | null | minus80/RawFile.py | brohammer/Minus80 | 3cd5b61a7349b9fa6d35ed192d8a4f38523f92bb | [
"MIT"
] | null | null | null | import gzip #pragma: no cover
import bz2 #pragma: no cover
import lzma #pragma: no cover
class RawFile(object):#pragma: no cover
def __init__(self,filename):
self.filename = filename
if filename.endswith('.gz'):
self.handle = gzip.open(filename,'rt')
elif filename.endswith('bz2'):
self.handle = bz2.open(filename,'rt')
elif filename.endswith('xz'):
self.handle = lzma.open(filenaem,'rt')
else:
self.handle = open(filename,'r')
def __enter__(self):
return self.handle
def __exit__(self,dtype,value,traceback):
self.handle.close()
| 33.1 | 50 | 0.601208 | 563 | 0.850453 | 0 | 0 | 0 | 0 | 0 | 0 | 97 | 0.146526 |
6a89b2893b587e6d66f6aa207ca89999bce84710 | 846 | py | Python | utils/config.py | jtr109/Alpha2kindle | a411d05cafa9036a732eeb75fa13f68963f254e3 | [
"MIT"
] | null | null | null | utils/config.py | jtr109/Alpha2kindle | a411d05cafa9036a732eeb75fa13f68963f254e3 | [
"MIT"
] | null | null | null | utils/config.py | jtr109/Alpha2kindle | a411d05cafa9036a732eeb75fa13f68963f254e3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
class BaseConf(object):
HEADERS = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/55.0.2883.95 "
"Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;"
"q=0.9,image/webp,*/*;"
"q=0.8",
"Accept-Encoding": "gzip, deflate, sdch, br",
"Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4",
"Cache-Control": "max-age=0",
}
class TestConf(BaseConf):
REDIS_URL = "redis://:{password}@{hostname}:{port}/{db_number}".format(
password=os.environ.get("REDIS_PWD"),
hostname='127.0.0.1',
port=6379,
db_number=0
)
CURCONF = TestConf
| 27.290323 | 75 | 0.51773 | 784 | 0.926714 | 0 | 0 | 0 | 0 | 0 | 0 | 446 | 0.527187 |
6a89d65e11282b8c81495e2795c8364f65d2114c | 4,724 | py | Python | framework/database/__init__.py | fabmiz/osf.io | 8d86af3f0a6e5388bd5b18383e68e27b65a66247 | [
"Apache-2.0"
] | null | null | null | framework/database/__init__.py | fabmiz/osf.io | 8d86af3f0a6e5388bd5b18383e68e27b65a66247 | [
"Apache-2.0"
] | null | null | null | framework/database/__init__.py | fabmiz/osf.io | 8d86af3f0a6e5388bd5b18383e68e27b65a66247 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import functools
import httplib as http
import markupsafe
from django.core.paginator import Paginator
from django.db.models import Q, QuerySet
from framework.exceptions import HTTPError
def get_or_http_error(Model, pk_or_query, allow_deleted=False, display_name=None):
"""Load an instance of Model by primary key or modularodm.Q query. Raise an appropriate
HTTPError if no record is found or if the query fails to find a unique record
:param type Model: StoredObject subclass to query
:param pk_or_query:
:type pk_or_query: either
- a <basestring> representation of the record's primary key, e.g. 'abcdef'
- a <QueryBase> subclass query to uniquely select a record, e.g.
Q('title', 'eq', 'Entitled') & Q('version', 'eq', 1)
:param bool allow_deleted: allow deleleted records?
:param basestring display_name:
:raises: HTTPError(404) if the record does not exist
:raises: HTTPError(400) if no unique record is found
:raises: HTTPError(410) if the resource is deleted and allow_deleted = False
:return: Model instance
"""
display_name = display_name or ''
# FIXME: Not everything that uses this decorator needs to be markupsafe, but OsfWebRenderer error.mako does...
safe_name = markupsafe.escape(display_name)
if isinstance(pk_or_query, Q):
try:
instance = Model.objects.get(pk_or_query)
except Model.DoesNotExist:
raise HTTPError(http.NOT_FOUND, data=dict(
message_long='No {name} record matching that query could be found'.format(name=safe_name)
))
except Model.MultipleObjectsReturned:
raise HTTPError(http.BAD_REQUEST, data=dict(
message_long='The query must match exactly one {name} record'.format(name=safe_name)
))
else:
instance = Model.load(pk_or_query)
if not instance:
raise HTTPError(http.NOT_FOUND, data=dict(
message_long='No {name} record with that primary key could be found'.format(name=safe_name)
))
if getattr(instance, 'is_deleted', False) and getattr(instance, 'suspended', False):
raise HTTPError(451, data=dict( # 451 - Unavailable For Legal Reasons
message_short='Content removed',
message_long='This content has been removed'
))
if not allow_deleted and getattr(instance, 'is_deleted', False):
raise HTTPError(http.GONE)
return instance
def autoload(Model, extract_key, inject_key, func):
"""Decorator to autoload a StoredObject instance by primary key and inject into kwargs. Raises
an appropriate HTTPError (see #get_or_http_error)
:param type Model: database collection model to query (should be a subclass of StoredObject)
:param basestring extract_key: named URL field containing the desired primary key to be fetched
from the database
:param basestring inject_key: name the instance will be accessible as when it's injected as an
argument to the function
Example usage: ::
def get_node(node_id):
node = Node.load(node_id)
...
becomes
import functools
autoload_node = functools.partial(autoload, Node, 'node_id', 'node')
@autoload_node
def get_node(node):
...
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
primary_key = kwargs.get(extract_key)
instance = get_or_http_error(Model, primary_key)
kwargs[inject_key] = instance
return func(*args, **kwargs)
return wrapper
def paginated(model, query=None, increment=200, each=True, include=None):
"""Paginate a MODM query.
:param StoredObject model: Model to query.
:param Q query: Optional query object.
:param int increment: Page size
:param bool each: If True, each record is yielded. If False, pages
are yielded.
"""
if include and query:
queryset = model.objects.filter(query).include(*include)
elif query:
queryset = model.objects.filter(query)
else:
queryset = model.objects.all()
# Pagination requires an order by clause, especially when using Postgres.
# see: https://docs.djangoproject.com/en/1.10/topics/pagination/#required-arguments
if isinstance(queryset, QuerySet) and not queryset.ordered:
queryset = queryset.order_by(queryset.model._meta.pk.name)
paginator = Paginator(queryset.all(), increment)
for page_num in paginator.page_range:
page = paginator.page(page_num)
if each:
for item in page.object_list:
yield item
else:
yield page.object_list
| 38.406504 | 114 | 0.67591 | 0 | 0 | 1,102 | 0.233277 | 235 | 0.049746 | 0 | 0 | 2,422 | 0.512701 |
6a8b93de9ef615a88be0dad5abda769599f3cf01 | 2,886 | py | Python | neptune/internal/client_library/job_development_api/image.py | jiji-online/neptune-cli | 50cf680a80d141497f9331ab7cdaee49fcb90b0c | [
"Apache-2.0"
] | null | null | null | neptune/internal/client_library/job_development_api/image.py | jiji-online/neptune-cli | 50cf680a80d141497f9331ab7cdaee49fcb90b0c | [
"Apache-2.0"
] | null | null | null | neptune/internal/client_library/job_development_api/image.py | jiji-online/neptune-cli | 50cf680a80d141497f9331ab7cdaee49fcb90b0c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from future import standard_library
standard_library.install_aliases()
# pylint: disable=wrong-import-position
from future.builtins import object
import base64
import io
import PIL.Image
from neptune.generated.swagger_client import InputImage
from neptune.internal.common.models.parameters_validation import (
of_type_validator,
text_conv,
validate
)
class Image(object):
"""
Represents information about images sent to image channels.
"""
@validate(name=text_conv, description=text_conv, data=of_type_validator(PIL.Image.Image))
def __init__(self, name, description, data):
"""
Creates a new Image.
:param name: Name of the image, displayed in the Channels tab on job's dashboard.
:param description: Description of the image displayed in the Channels tab
on job's dashboard.
:param data: Image data.
:type name: unicode
:type description: unicode
:type data: PIL.Image
"""
self._name = name
self._description = description
self._data = data
def to_input_image(self):
"""
Creates InputImage that can be sent to Neptune.
:return: input image in format appropriate to be sent to Neptune.
:rtype: InputImage
"""
image_buffer = io.BytesIO()
self.data.save(image_buffer, format='PNG')
contents = image_buffer.getvalue()
image_buffer.close()
input_image = InputImage()
input_image.name = self.name
input_image.description = self.description
input_image.data = base64.b64encode(contents).decode('utf-8')
return input_image
@property
def name(self):
"""
Gets name of this Image.
:return: The name of this Image.
:rtype: str
"""
return self._name
@property
def description(self):
"""
Gets description of this Image.
:return: The description of this Image.
:rtype: str
"""
return self._description
@property
def data(self):
"""
Gets data of this Image.
:return: The data of this Image.
:rtype: PIL.Image
"""
return self._data
| 26.722222 | 93 | 0.647263 | 1,905 | 0.660083 | 0 | 0 | 1,187 | 0.411296 | 0 | 0 | 1,634 | 0.566182 |
6a8bca56c22e3267d252604b1793381dda94c58e | 626 | py | Python | src/picome/hukeyboard.py | guibohnert91/picome | 9b91c28f7e83a2b730cafbda1a97205672f3e676 | [
"MIT"
] | null | null | null | src/picome/hukeyboard.py | guibohnert91/picome | 9b91c28f7e83a2b730cafbda1a97205672f3e676 | [
"MIT"
] | null | null | null | src/picome/hukeyboard.py | guibohnert91/picome | 9b91c28f7e83a2b730cafbda1a97205672f3e676 | [
"MIT"
] | null | null | null | from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
from adafruit_hid.keycode import Keycode
import usb_hid
import time
class HumanKeyboard(object):
def __init__(self):
self.keyboard = Keyboard(usb_hid.devices)
self.keyboardLayout = KeyboardLayoutUS(self.keyboard)
def keyPress(self, keyCode):
"""Send a human like keypress.
Keyword arguments:
keyCode -- the real key to be pressed (example Keycode.SEVEN)
"""
self.keyboard.press(keyCode)
time.sleep(0.1)
self.keyboard.release(keyCode) | 28.454545 | 73 | 0.702875 | 452 | 0.722045 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.230032 |
6a8c272cd22c6193695ebfa5fa34ff4d88d4565d | 579 | py | Python | src/solutions/part2/q104_max_bi_tree_depth.py | hychrisli/PyAlgorithms | 71e537180f3b371d0d2cc47b11cb68ec13a8ac68 | [
"Apache-2.0"
] | null | null | null | src/solutions/part2/q104_max_bi_tree_depth.py | hychrisli/PyAlgorithms | 71e537180f3b371d0d2cc47b11cb68ec13a8ac68 | [
"Apache-2.0"
] | null | null | null | src/solutions/part2/q104_max_bi_tree_depth.py | hychrisli/PyAlgorithms | 71e537180f3b371d0d2cc47b11cb68ec13a8ac68 | [
"Apache-2.0"
] | null | null | null | from src.base.solution import Solution
from src.tests.part2.q104_test_max_bi_tree_depth import MaxBiTreeDepthTestCases
class MaxBiTreeDepth(Solution):
def gen_test_cases(self):
return MaxBiTreeDepthTestCases()
def run_test(self, input):
return self.maxDepth(input)
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root: return 0
return max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1
if __name__ == '__main__':
sol = MaxBiTreeDepth()
sol.run_tests() | 23.16 | 79 | 0.661485 | 381 | 0.658031 | 0 | 0 | 0 | 0 | 0 | 0 | 74 | 0.127807 |
6a8c916961dcdf5b4bdd11f085941afc268401f1 | 771 | py | Python | inventory/admin.py | shakyasaijal/businessAnalytics | 9312bae79709387c6eadd50f87f6be85bd52c396 | [
"BSD-3-Clause"
] | null | null | null | inventory/admin.py | shakyasaijal/businessAnalytics | 9312bae79709387c6eadd50f87f6be85bd52c396 | [
"BSD-3-Clause"
] | 8 | 2021-03-30T13:03:11.000Z | 2022-03-12T00:20:13.000Z | inventory/admin.py | shakyasaijal/businessAnalytics | 9312bae79709387c6eadd50f87f6be85bd52c396 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import admin
from . import models
class SupplierAdmin(admin.ModelAdmin):
list_display = ('supplier_name', 'contact', )
search_fields = ['supplier_name', 'contact', ]
admin.site.register(models.Suppliers, SupplierAdmin)
class InventoryUserAdmin(admin.ModelAdmin):
list_display = ('employee_name', 'user_type')
search_fields = ['employee_name', 'user_type']
list_filter = ("user_type",)
admin.site.register(models.InventoryUser, InventoryUserAdmin)
class ProductsAdmin(admin.ModelAdmin):
list_display = ('name', 'quantity', 'cost_price', 'selling_price',)
search_fields = ['name', 'quantity', 'cost_price', 'selling_price',]
list_filter = ("branch", "supplier",)
admin.site.register(models.Product, ProductsAdmin)
| 32.125 | 72 | 0.731518 | 541 | 0.701686 | 0 | 0 | 0 | 0 | 0 | 0 | 215 | 0.278859 |
6a8e0d766c7cdfdc409946fd3a6196d6981baf1d | 55 | py | Python | python/testData/resolve/TryExceptElse.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/resolve/TryExceptElse.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/resolve/TryExceptElse.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | try:
name = ""
except:
pass
else:
print na<ref>me | 9.166667 | 17 | 0.6 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2 | 0.036364 |
6a8e67d20152ca7acb56c8c6199b1999e7f99406 | 500 | py | Python | pybyte/session.py | ms7m/py-byte | c5872ff5b8536160d8cbd7f88406ed593113e77d | [
"MIT"
] | 4 | 2020-01-26T17:22:05.000Z | 2020-08-15T12:23:31.000Z | pybyte/session.py | ms7m/py-byte | c5872ff5b8536160d8cbd7f88406ed593113e77d | [
"MIT"
] | 3 | 2020-01-27T18:10:06.000Z | 2020-03-31T10:56:03.000Z | pybyte/session.py | ms7m/py-byte | c5872ff5b8536160d8cbd7f88406ed593113e77d | [
"MIT"
] | 2 | 2020-01-27T17:59:45.000Z | 2020-02-01T16:43:53.000Z |
import requests
class ByteSession(object):
def __init__(self, token, providedSession=False):
self._userToken = token
if providedSession == False:
self._session = requests.session()
else:
self._session = providedSession
self._session.headers = {
"Authorization": token,
"User-Agent": "byte/0.2 (co.byte.video; build:145; iOS 13.3.0) Alamofire/4.9.1"
}
def session(self):
return self._session | 26.315789 | 91 | 0.594 | 482 | 0.964 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.184 |
6a8e7fcaf4ca3d67de4aab013987d7db788188b5 | 252 | py | Python | pyqtgraph/examples/template.py | secantsquared/pyqtgraph | 3ef7f5b91639543e43bcd66a84290fb9bc18fc5c | [
"MIT"
] | null | null | null | pyqtgraph/examples/template.py | secantsquared/pyqtgraph | 3ef7f5b91639543e43bcd66a84290fb9bc18fc5c | [
"MIT"
] | null | null | null | pyqtgraph/examples/template.py | secantsquared/pyqtgraph | 3ef7f5b91639543e43bcd66a84290fb9bc18fc5c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Description of example
"""
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui, mkQApp
import numpy as np
app = mkQApp()
# win.setWindowTitle('pyqtgraph example: ____')
if __name__ == '__main__':
pg.exec()
| 15.75 | 47 | 0.68254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.436508 |
6a8f3e25920be24fb569cc55eff90ae879efa647 | 73,328 | py | Python | ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py | cas-packone/ambari-chs | 68033fbd4b810b6642853f2ad9128cbbd4e0cb7b | [
"Apache-2.0"
] | 3 | 2019-06-20T11:49:36.000Z | 2020-12-11T10:44:29.000Z | ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py | cas-packone/ambari-chs | 68033fbd4b810b6642853f2ad9128cbbd4e0cb7b | [
"Apache-2.0"
] | null | null | null | ambari-server/src/test/python/stacks/2.0.6/common/test_stack_advisor.py | cas-packone/ambari-chs | 68033fbd4b810b6642853f2ad9128cbbd4e0cb7b | [
"Apache-2.0"
] | 1 | 2019-03-20T08:36:17.000Z | 2019-03-20T08:36:17.000Z | '''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import socket
from unittest import TestCase
from mock.mock import patch, MagicMock
class TestHDP206StackAdvisor(TestCase):
def setUp(self):
import imp
import os
testDirectory = os.path.dirname(os.path.abspath(__file__))
stackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/stack_advisor.py')
hdp206StackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
hdp206StackAdvisorClassName = 'HDP206StackAdvisor'
with open(stackAdvisorPath, 'rb') as fp:
stack_advisor = imp.load_module( 'stack_advisor', fp, stackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE) )
with open(hdp206StackAdvisorPath, 'rb') as fp:
self.stack_advisor_impl = imp.load_module('stack_advisor_impl', fp, hdp206StackAdvisorPath, ('.py', 'rb', imp.PY_SOURCE))
clazz = getattr(self.stack_advisor_impl, hdp206StackAdvisorClassName)
self.stackAdvisor = clazz()
self.maxDiff = None
# substitute method in the instance
self.get_system_min_uid_real = self.stackAdvisor.get_system_min_uid
self.stackAdvisor.get_system_min_uid = self.get_system_min_uid_magic
@patch('__builtin__.open')
@patch('os.path.exists')
def get_system_min_uid_magic(self, exists_mock, open_mock):
class MagicFile(object):
def read(self):
return """
#test line UID_MIN 200
UID_MIN 500
"""
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __enter__(self):
return self
exists_mock.return_value = True
open_mock.return_value = MagicFile()
return self.get_system_min_uid_real()
def test_recommendationCardinalityALL(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [{"name": "GANGLIA_MONITOR", "cardinality": "ALL", "category": "SLAVE", "is_master": False}]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.recommendComponentLayout(services, hosts)
expectedComponentsHostsMap = {
"GANGLIA_MONITOR": ["host1", "host2"]
}
self.assertHostLayout(expectedComponentsHostsMap, result)
def test_recommendOnAllHosts(self):
""" Recommend on all hosts for cardinality ALL even if the component has been installed in the cluster before """
servicesInfo = [
{
"name": "GANGLIA",
"components": [{"name": "GANGLIA_MONITOR", "cardinality": "ALL", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]}]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.recommendComponentLayout(services, hosts)
expectedComponentsHostsMap = {
"GANGLIA_MONITOR": ["host1", "host2"]
}
self.assertHostLayout(expectedComponentsHostsMap, result)
def test_recommendationIsNotPreferableOnAmbariServer(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [{"name": "GANGLIA_SERVER", "cardinality": "ALL", "category": "MASTER", "is_master": True}]
}
]
services = self.prepareServices(servicesInfo)
localhost = socket.getfqdn()
hosts = self.prepareHosts([localhost, "host2"])
result = self.stackAdvisor.recommendComponentLayout(services, hosts)
expectedComponentsHostsMap = {
"GANGLIA_SERVER": ["host2"]
}
self.assertHostLayout(expectedComponentsHostsMap, result)
def test_validationNamenodeAndSecondaryNamenode2Hosts_noMessagesForSameHost(self):
servicesInfo = [
{
"name": "HDFS",
"components": [
{"name": "NAMENODE", "cardinality": "1-2", "category": "MASTER", "is_master": True, "hostnames": ["host1"]},
{"name": "SECONDARY_NAMENODE", "cardinality": "1", "category": "MASTER", "is_master": True, "hostnames": ["host1"]}]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Host is not used", "level": "ERROR", "host": "host2"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinalityALL(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "display_name": "Ganglia Monitor", "cardinality": "ALL", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]},
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "1-2", "category": "MASTER", "is_master": True, "hostnames": ["host2", "host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Ganglia Monitor component should be installed on all hosts in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinalityExactAmount(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "display_name": "Ganglia Monitor", "cardinality": "2", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]},
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "2", "category": "MASTER", "is_master": True, "hostnames": ["host2", "host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Exactly 2 Ganglia Monitor components should be installed in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinalityAtLeast(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "display_name": "Ganglia Monitor", "cardinality": "1+", "category": "SLAVE", "is_master": False, "hostnames": ["host1"]},
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "3+", "category": "MASTER", "is_master": True, "hostnames": ["host2", "host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "At least 3 Ganglia Server components should be installed in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationWarnMessagesIfLessThanDefault(self):
servicesInfo = [
{
"name": "YARN",
"components": []
}
]
services = self.prepareServices(servicesInfo)
services["configurations"] = {"yarn-site":{"properties":{"yarn.nodemanager.resource.memory-mb": "0",
"yarn.scheduler.minimum-allocation-mb": "str"}}}
hosts = self.prepareHosts([])
result = self.stackAdvisor.validateConfigurations(services, hosts)
expectedItems = [
{"message": "Value is less than the recommended default of 512", "level": "WARN"},
{'message': 'Value should be set for yarn.nodemanager.linux-container-executor.group', 'level': 'ERROR'},
{"message": "Value should be integer", "level": "ERROR"},
{"message": "Value should be set", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationYARNServicecheckQueueName(self):
servicesInfo = [
{
"name": "YARN",
"components": []
}
]
services = self.prepareServices(servicesInfo)
services["configurations"] = {"yarn-env":{"properties":{"service_check.queue.name": "default"}},
"capacity-scheduler":{"properties":{"capacity-scheduler": "yarn.scheduler.capacity.root.queues=ndfqueue\n"}}}
hosts = self.prepareHosts([])
result = self.stackAdvisor.validateConfigurations(services, hosts)
expectedItems = [
{'message': 'Queue is not exist, or not corresponds to existing YARN leaf queue', 'level': 'ERROR'}
]
self.assertValidationResult(expectedItems, result)
services["configurations"]["yarn-env"]["properties"]["service_check.queue.name"] = "ndfqueue"
expectedItems = []
result = self.stackAdvisor.validateConfigurations(services, hosts)
self.assertValidationResult(expectedItems, result)
def test_validationMinMax(self):
configurations = {
"mapred-site": {
"properties": {
"mapreduce.task.io.sort.mb": "4096",
"some_float_value": "0.5",
"no_min_or_max_attribute_property": "STRING_VALUE"
}
}
}
recommendedDefaults = {
"mapred-site": {
"properties": {
"mapreduce.task.io.sort.mb": "2047",
"some_float_value": "0.8",
"no_min_or_max_attribute_property": "STRING_VALUE"
},
"property_attributes": {
'mapreduce.task.io.sort.mb': {'maximum': '2047'},
'some_float_value': {'minimum': '0.8'}
}
}
}
items = []
self.stackAdvisor.validateMinMax(items, recommendedDefaults, configurations)
expectedItems = [
{
'message': 'Value is greater than the recommended maximum of 2047 ',
'level': 'WARN',
'config-type': 'mapred-site',
'config-name': 'mapreduce.task.io.sort.mb',
'type': 'configuration'
},
{
'message': 'Value is less than the recommended minimum of 0.8 ',
'level': 'WARN',
'config-type': 'mapred-site',
'config-name': 'some_float_value',
'type': 'configuration'
}
]
self.assertEquals(expectedItems, items)
def test_validationHostIsNotUsedForNonValuableComponent(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_MONITOR", "cardinality": "ALL", "category": "SLAVE", "is_master": False, "hostnames": ["host1", "host2"]},
{"name": "GANGLIA_SERVER", "cardinality": "1", "category": "MASTER", "is_master": True, "hostnames": ["host2"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Host is not used", "host": "host1", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationCardinality01TwoHostsAssigned(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_SERVER", "display_name": "Ganglia Server", "cardinality": "0-1", "category": "MASTER", "is_master": True, "hostnames": ["host1", "host2"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Between 0 and 1 Ganglia Server components should be installed in cluster.", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_validationHostIsNotUsed(self):
servicesInfo = [
{
"name": "GANGLIA",
"components": [
{"name": "GANGLIA_SERVER", "cardinality": "1", "category": "MASTER", "is_master": True, "hostnames": ["host1"]}
]
}
]
services = self.prepareServices(servicesInfo)
hosts = self.prepareHosts(["host1", "host2"])
result = self.stackAdvisor.validateComponentLayout(services, hosts)
expectedItems = [
{"message": "Host is not used", "host": "host2", "level": "ERROR"}
]
self.assertValidationResult(expectedItems, result)
def test_getConfigurationClusterSummary_withHBaseAnd6gbRam(self):
servicesList = ["HBASE"]
components = []
hosts = {
"items" : [
{
"Hosts" : {
"cpu_count" : 8,
"total_mem" : 6291456,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
]
}
expected = {
"hBaseInstalled": True,
"components": components,
"cpu": 8,
"disk": 8,
"ram": 6,
"reservedRam": 2,
"hbaseRam": 1,
"minContainerSize": 512,
"totalAvailableRam": 3072,
"containers": 6,
"ramPerContainer": 512,
"mapMemory": 512,
"reduceMemory": 512,
"amMemory": 512,
"referenceHost": hosts["items"][0]["Hosts"]
}
# Test - Cluster data with 1 host
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(result, expected)
# Test - Cluster data with 2 hosts - pick minimum memory
servicesList.append("YARN")
services = services = {"services":
[{"StackServices":
{"service_name" : "YARN",
"service_version" : "2.6.0.2.2"
},
"components":[
{
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1+",
"component_category":"SLAVE",
"component_name":"NODEMANAGER",
"custom_commands":[
],
"display_name":"NodeManager",
"is_client":"false",
"is_master":"false",
"service_name":"YARN",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1",
"host2"
]
},
"dependencies":[
]
}
],
}],
"configurations": {}
}
hosts["items"][0]["Hosts"]["host_name"] = "host1"
hosts["items"].append({
"Hosts": {
"cpu_count" : 4,
"total_mem" : 500000,
"host_name" : "host2",
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
})
expected["referenceHost"] = hosts["items"][1]["Hosts"]
expected["referenceNodeManagerHost"] = hosts["items"][1]["Hosts"]
expected["amMemory"] = 170.66666666666666
expected["containers"] = 3.0
expected["cpu"] = 4
expected["totalAvailableRam"] = 512
expected["mapMemory"] = 170
expected["minContainerSize"] = 256
expected["reduceMemory"] = 170.66666666666666
expected["ram"] = 0
expected["ramPerContainer"] = 170.66666666666666
expected["reservedRam"] = 1
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, services)
self.assertEquals(result, expected)
def test_getConfigurationClusterSummary_withHBaseAnd48gbRam(self):
servicesList = ["HBASE"]
components = []
hosts = {
"items" : [
{
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
]
}
expected = {
"hBaseInstalled": True,
"components": components,
"cpu": 6,
"disk": 6,
"ram": 48,
"reservedRam": 6,
"hbaseRam": 8,
"minContainerSize": 2048,
"totalAvailableRam": 34816,
"containers": 11,
"ramPerContainer": 3072,
"mapMemory": 3072,
"reduceMemory": 3072,
"amMemory": 3072,
"referenceHost": hosts["items"][0]["Hosts"]
}
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(result, expected)
def test_recommendStormConfigurations(self):
# no AMS
configurations = {}
services = {
"services": [
],
"configurations": configurations
}
expected = {
"storm-site": {
"properties": {
}
},
}
self.stackAdvisor.recommendStormConfigurations(configurations, None, services, None)
self.assertEquals(configurations, expected)
# with AMS
configurations = {}
services = {
"services": [
{
"StackServices": {
"service_name": "AMBARI_METRICS"
}
}
],
"configurations": configurations
}
expected = {
"storm-site": {
"properties": {
"metrics.reporter.register": "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"
}
},
}
self.stackAdvisor.recommendStormConfigurations(configurations, None, services, None)
self.assertEquals(configurations, expected)
def test_recommendYARNConfigurations(self):
configurations = {}
services = {"configurations": configurations, "services": []}
clusterData = {
"containers" : 5,
"ramPerContainer": 256
}
expected = {
"yarn-env": {
"properties": {
"min_user_id": "500",
'service_check.queue.name': 'default'
}
},
"yarn-site": {
"properties": {
"yarn.nodemanager.linux-container-executor.group": "hadoop",
"yarn.nodemanager.resource.memory-mb": "1280",
"yarn.scheduler.minimum-allocation-mb": "256",
"yarn.scheduler.maximum-allocation-mb": "1280"
}
}
}
self.stackAdvisor.recommendYARNConfigurations(configurations, clusterData, services, None)
self.assertEquals(configurations, expected)
def test_recommendMapReduce2Configurations_mapMemoryLessThan2560(self):
configurations = {}
clusterData = {
"mapMemory": 567,
"reduceMemory": 345.6666666666666,
"amMemory": 123.54
}
expected = {
"mapred-site": {
"properties": {
'mapreduce.job.queuename': 'default',
"yarn.app.mapreduce.am.resource.mb": "123",
"yarn.app.mapreduce.am.command-opts": "-Xmx99m",
"mapreduce.map.memory.mb": "567",
"mapreduce.reduce.memory.mb": "345",
"mapreduce.map.java.opts": "-Xmx454m",
"mapreduce.reduce.java.opts": "-Xmx277m",
"mapreduce.task.io.sort.mb": "227"
}
}
}
self.stackAdvisor.recommendMapReduce2Configurations(configurations, clusterData, None, None)
self.assertEquals(configurations, expected)
def test_getConfigurationClusterSummary_noHostsWithoutHBase(self):
servicesList = []
components = []
hosts = {
"items" : []
}
result = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
expected = {
"hBaseInstalled": False,
"components": components,
"cpu": 0,
"disk": 0,
"ram": 0,
"reservedRam": 1,
"hbaseRam": 1,
"minContainerSize": 256,
"totalAvailableRam": 512,
"containers": 3,
"ramPerContainer": 170.66666666666666,
"mapMemory": 170,
"reduceMemory": 170.66666666666666,
"amMemory": 170.66666666666666
}
self.assertEquals(result, expected)
def prepareHosts(self, hostsNames):
hosts = { "items": [] }
for hostName in hostsNames:
nextHost = {"Hosts":{"host_name" : hostName}}
hosts["items"].append(nextHost)
return hosts
def prepareServices(self, servicesInfo):
services = { "Versions" : { "stack_name" : "HDP", "stack_version" : "2.0.6" } }
services["services"] = []
for serviceInfo in servicesInfo:
nextService = {"StackServices":{"service_name" : serviceInfo["name"]}}
nextService["components"] = []
for component in serviceInfo["components"]:
nextComponent = {
"StackServiceComponents": {
"component_name": component["name"],
"cardinality": component["cardinality"],
"component_category": component["category"],
"is_master": component["is_master"]
}
}
try:
nextComponent["StackServiceComponents"]["hostnames"] = component["hostnames"]
except KeyError:
nextComponent["StackServiceComponents"]["hostnames"] = []
try:
nextComponent["StackServiceComponents"]["display_name"] = component["display_name"]
except KeyError:
nextComponent["StackServiceComponents"]["display_name"] = component["name"]
nextService["components"].append(nextComponent)
services["services"].append(nextService)
return services
def assertHostLayout(self, componentsHostsMap, recommendation):
blueprintMapping = recommendation["recommendations"]["blueprint"]["host_groups"]
bindings = recommendation["recommendations"]["blueprint_cluster_binding"]["host_groups"]
actualComponentHostsMap = {}
for hostGroup in blueprintMapping:
hostGroupName = hostGroup["name"]
hostsInfos = [binding["hosts"] for binding in bindings if binding["name"] == hostGroupName][0]
hosts = [info["fqdn"] for info in hostsInfos]
for component in hostGroup["components"]:
componentName = component["name"]
try:
actualComponentHostsMap[componentName]
except KeyError, err:
actualComponentHostsMap[componentName] = []
for host in hosts:
if host not in actualComponentHostsMap[componentName]:
actualComponentHostsMap[componentName].append(host)
for componentName in componentsHostsMap.keys():
expectedHosts = componentsHostsMap[componentName]
actualHosts = actualComponentHostsMap[componentName]
self.checkEqual(expectedHosts, actualHosts)
def checkEqual(self, l1, l2):
if not len(l1) == len(l2) or not sorted(l1) == sorted(l2):
raise AssertionError("list1={0}, list2={1}".format(l1, l2))
def assertValidationResult(self, expectedItems, result):
actualItems = []
for item in result["items"]:
next = {"message": item["message"], "level": item["level"]}
try:
next["host"] = item["host"]
except KeyError, err:
pass
actualItems.append(next)
self.checkEqual(expectedItems, actualItems)
def test_recommendHbaseConfigurations(self):
servicesList = ["HBASE"]
configurations = {}
components = []
host_item = {
"Hosts" : {
"cpu_count" : 6,
"total_mem" : 50331648,
"disk_info" : [
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"},
{"mountpoint" : "/"},
{"mountpoint" : "/dev/shm"},
{"mountpoint" : "/vagrant"}
]
}
}
hosts = {
"items" : [host_item for i in range(1, 300)]
}
services = {
"services" : [
],
"configurations": {
"hbase-site": {
"properties": {
"hbase.superuser": "hbase"
}
},
"hbase-env": {
"properties": {
"hbase_user": "hbase123"
}
}
}
}
expected = {
'hbase-site': {
'properties': {
'hbase.superuser': 'hbase123'
}
},
"hbase-env": {
"properties": {
"hbase_master_heapsize": "4096",
"hbase_regionserver_heapsize": "8192",
}
}
}
clusterData = self.stackAdvisor.getConfigurationClusterSummary(servicesList, hosts, components, None)
self.assertEquals(clusterData['hbaseRam'], 8)
self.stackAdvisor.recommendHbaseConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_recommendRangerConfigurations(self):
clusterData = {}
# Recommend for not existing DB_FLAVOR and http enabled, HDP-2.3
services = {
"Versions" : {
"stack_version" : "2.3",
},
"services": [
{
"StackServices": {
"service_name": "RANGER",
"service_version": "0.5.0"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_ADMIN",
"hostnames": ["host1"]
}
}
]
},
{
"StackServices": {
"service_name": "HDFS"
},
"components": [
{
"StackServiceComponents": {
"component_name": "NAMENODE",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": {
"admin-properties": {
"properties": {
"DB_FLAVOR": "NOT_EXISTING",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
}
}
}
}
expected = {
"admin-properties": {
"properties": {
"policymgr_external_url": "http://host1:7777"
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test for not existing DB_FLAVOR and http enabled, HDP-2.3")
# Recommend for DB_FLAVOR POSTGRES and https enabled, HDP-2.3
configurations = {
"admin-properties": {
"properties": {
"DB_FLAVOR": "POSTGRES",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.https.port": "7777",
"ranger.service.http.enabled": "false",
}
}
}
services['configurations'] = configurations
expected = {
"admin-properties": {
"properties": {
"policymgr_external_url": "https://host1:7777"
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test for DB_FLAVOR POSTGRES and https enabled, HDP-2.3")
# Recommend for DB_FLAVOR ORACLE and https enabled, HDP-2.2
configurations = {
"admin-properties": {
"properties": {
"DB_FLAVOR": "ORACLE",
}
},
"ranger-site": {
"properties": {
"http.enabled": "false",
"https.service.port": "8888",
}
}
}
services['configurations'] = configurations
expected = {
"admin-properties": {
"properties": {
"policymgr_external_url": "https://host1:8888"
}
},
"ranger-env": {"properties": {}}
}
recommendedConfigurations = {}
services['services'][0]['StackServices']['service_version'] = "0.4.0"
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test for DB_FLAVOR ORACLE and https enabled, HDP-2.2")
# Test Recommend LDAP values
services["ambari-server-properties"] = {
"ambari.ldap.isConfigured" : "true",
"authentication.ldap.bindAnonymously" : "false",
"authentication.ldap.baseDn" : "dc=apache,dc=org",
"authentication.ldap.groupNamingAttr" : "cn",
"authentication.ldap.primaryUrl" : "c6403.ambari.apache.org:636",
"authentication.ldap.userObjectClass" : "posixAccount",
"authentication.ldap.secondaryUrl" : "c6403.ambari.apache.org:636",
"authentication.ldap.usernameAttribute" : "uid",
"authentication.ldap.dnAttribute" : "dn",
"authentication.ldap.useSSL" : "true",
"authentication.ldap.managerPassword" : "/etc/ambari-server/conf/ldap-password.dat",
"authentication.ldap.groupMembershipAttr" : "memberUid",
"authentication.ldap.groupObjectClass" : "posixGroup",
"authentication.ldap.managerDn" : "uid=hdfs,ou=people,ou=dev,dc=apache,dc=org"
}
services["configurations"] = {}
expected = {
'admin-properties': {
'properties': {
'policymgr_external_url': 'http://host1:6080',
}
},
'ranger-env': {'properties': {}},
'usersync-properties': {
'properties': {
'SYNC_LDAP_URL': 'ldaps://c6403.ambari.apache.org:636',
'SYNC_LDAP_BIND_DN': 'uid=hdfs,ou=people,ou=dev,dc=apache,dc=org',
'SYNC_LDAP_USER_OBJECT_CLASS': 'posixAccount',
'SYNC_LDAP_USER_NAME_ATTRIBUTE': 'uid'
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test Recommend LDAP values")
# Test Ranger Audit properties
del services["ambari-server-properties"]
services["configurations"] = {
"core-site": {
"properties": {
"fs.defaultFS": "hdfs://host1:8080",
}
},
"ranger-env": {
"properties": {
"xasecure.audit.destination.db": "true",
"xasecure.audit.destination.hdfs":"false",
"xasecure.audit.destination.hdfs.dir":"hdfs://localhost:8020/ranger/audit/%app-type%/%time:yyyyMMdd%"
}
},
"ranger-hdfs-plugin-properties": {
"properties": {}
}
}
expected = {
'admin-properties': {
'properties': {
'policymgr_external_url': 'http://host1:6080'
}
},
'ranger-hdfs-plugin-properties': {
'properties': {
'XAAUDIT.HDFS.IS_ENABLED': 'false',
'XAAUDIT.HDFS.DESTINATION_DIRECTORY': 'hdfs://host1:8080/ranger/audit/%app-type%/%time:yyyyMMdd%',
'XAAUDIT.DB.IS_ENABLED': 'true'
}
},
'ranger-env': {
'properties': {
'xasecure.audit.destination.hdfs.dir': 'hdfs://host1:8080/ranger/audit/%app-type%/%time:yyyyMMdd%'
}
}
}
recommendedConfigurations = {}
self.stackAdvisor.recommendRangerConfigurations(recommendedConfigurations, clusterData, services, None)
self.assertEquals(recommendedConfigurations, expected, "Test Ranger Audit properties")
def test_recommendHDFSConfigurations(self):
configurations = {
"hadoop-env": {
"properties": {
"hdfs_user": "hdfs",
"proxyuser_group": "users"
}
},
"hive-env": {
"properties": {
"webhcat_user": "webhcat",
"hive_user": "hive"
}
},
"oozie-env": {
"properties": {
"oozie_user": "oozie"
}
},
"falcon-env": {
"properties": {
"falcon_user": "falcon"
}
}
}
hosts = {
"items": [
{
"href": "/api/v1/hosts/host1",
"Hosts": {
"cpu_count": 1,
"host_name": "c6401.ambari.apache.org",
"os_arch": "x86_64",
"os_type": "centos6",
"ph_cpu_count": 1,
"public_host_name": "c6401.ambari.apache.org",
"rack_info": "/default-rack",
"total_mem": 2097152,
"disk_info": [{
"size": '8',
"mountpoint": "/"
}]
}
},
{
"href": "/api/v1/hosts/host2",
"Hosts": {
"cpu_count": 1,
"host_name": "c6402.ambari.apache.org",
"os_arch": "x86_64",
"os_type": "centos6",
"ph_cpu_count": 1,
"public_host_name": "c6402.ambari.apache.org",
"rack_info": "/default-rack",
"total_mem": 1048576,
"disk_info": [{
"size": '8',
"mountpoint": "/"
}]
}
},
]}
services = {
"services": [
{
"StackServices": {
"service_name": "HDFS"
}, "components": []
},
{
"StackServices": {
"service_name": "FALCON"
}, "components": []
},
{
"StackServices": {
"service_name": "HIVE"
}, "components": [{
"href": "/api/v1/stacks/HDP/versions/2.0.6/services/HIVE/components/HIVE_SERVER",
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "HIVE_SERVER",
"custom_commands": [],
"display_name": "Hive Server",
"is_client": "false",
"is_master": "true",
"service_name": "HIVE",
"stack_name": "HDP",
"stack_version": "2.0.6",
"hostnames": ["c6401.ambari.apache.org","c6402.ambari.apache.org"]
}},
{
"href": "/api/v1/stacks/HDP/versions/2.0.6/services/HIVE/components/WEBHCAT_SERVER",
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "WEBHCAT_SERVER",
"custom_commands": [],
"display_name": "WebHCat Server",
"is_client": "false",
"is_master": "true",
"service_name": "HIVE",
"stack_name": "HDP",
"stack_version": "2.0.6",
"hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
}}]
},
{
"StackServices": {
"service_name": "OOZIE"
}, "components": [{
"href": "/api/v1/stacks/HDP/versions/2.0.6/services/HIVE/components/OOZIE_SERVER",
"StackServiceComponents": {
"advertise_version": "true",
"cardinality": "1",
"component_category": "MASTER",
"component_name": "OOZIE_SERVER",
"custom_commands": [],
"display_name": "Oozie Server",
"is_client": "false",
"is_master": "true",
"service_name": "HIVE",
"stack_name": "HDP",
"stack_version": "2.0.6",
"hostnames": ["c6401.ambari.apache.org", "c6402.ambari.apache.org"]
}, }]
}],
"configurations": configurations,
"ambari-server-properties": {"ambari-server.user":"ambari_user"}
}
clusterData = {
"totalAvailableRam": 2048
}
ambariHostName = socket.getfqdn()
expected = {'oozie-env':
{'properties':
{'oozie_user': 'oozie'}},
'core-site':
{'properties':
{'hadoop.proxyuser.ambari_user.groups': '*',
'hadoop.proxyuser.ambari_user.hosts': ambariHostName,
'hadoop.proxyuser.oozie.groups': '*',
'hadoop.proxyuser.hive.groups': '*',
'hadoop.proxyuser.webhcat.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.hosts': '*',
'hadoop.proxyuser.webhcat.groups': '*',
'hadoop.proxyuser.hdfs.groups': '*',
'hadoop.proxyuser.hdfs.hosts': '*',
'hadoop.proxyuser.hive.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.oozie.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.groups': '*'}},
'falcon-env':
{'properties':
{'falcon_user': 'falcon'}},
'hdfs-site':
{'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '1024'}},
'hive-env':
{'properties':
{'hive_user': 'hive',
'webhcat_user': 'webhcat'}},
'hadoop-env':
{'properties':
{'hdfs_user': 'hdfs',
'namenode_heapsize': '1024',
'proxyuser_group': 'users',
'namenode_opt_maxnewsize': '256',
'namenode_opt_newsize': '256'}}}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
configurations["hadoop-env"]["properties"]['hdfs_user'] = "hdfs1"
changedConfigurations = [{"type":"hadoop-env",
"name":"hdfs_user",
"old_value":"hdfs"}]
services["changed-configurations"] = changedConfigurations
services['configurations'] = configurations
expected = {'oozie-env':
{'properties':
{'oozie_user': 'oozie'}},
'core-site': {'properties':
{'hadoop.proxyuser.ambari_user.groups': '*',
'hadoop.proxyuser.ambari_user.hosts': ambariHostName,
'hadoop.proxyuser.oozie.groups': '*',
'hadoop.proxyuser.hive.groups': '*',
'hadoop.proxyuser.hdfs1.groups': '*',
'hadoop.proxyuser.hdfs1.hosts': '*',
'hadoop.proxyuser.webhcat.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.hosts': '*',
'hadoop.proxyuser.webhcat.groups': '*',
'hadoop.proxyuser.hdfs.groups': '*',
'hadoop.proxyuser.hdfs.hosts': '*',
'hadoop.proxyuser.hive.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.oozie.hosts': 'c6401.ambari.apache.org,c6402.ambari.apache.org',
'hadoop.proxyuser.falcon.groups': '*'},
'property_attributes':
{'hadoop.proxyuser.hdfs.groups': {'delete': 'true'},
'hadoop.proxyuser.hdfs.hosts': {'delete': 'true'}}},
'falcon-env':
{'properties':
{'falcon_user': 'falcon'}},
'hive-env':
{'properties':
{'hive_user': 'hive',
'webhcat_user': 'webhcat'}},
'hdfs-site':
{'properties':
{'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '1024'}},
'hadoop-env':
{'properties':
{'hdfs_user': 'hdfs1',
'namenode_heapsize': '1024',
'proxyuser_group': 'users',
'namenode_opt_maxnewsize': '256',
'namenode_opt_newsize': '256'}}}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
# Verify dfs.namenode.rpc-address is recommended to be deleted when NN HA
configurations["hdfs-site"]["properties"]['dfs.internal.nameservices'] = "mycluster"
configurations["hdfs-site"]["properties"]['dfs.ha.namenodes.mycluster'] = "nn1,nn2"
services['configurations'] = configurations
expected["hdfs-site"] = {
'properties': {
'dfs.datanode.data.dir': '/hadoop/hdfs/data',
'dfs.datanode.du.reserved': '1024',
'dfs.internal.nameservices': 'mycluster',
'dfs.ha.namenodes.mycluster': 'nn1,nn2'
},
'property_attributes': {
'dfs.namenode.rpc-address': {
'delete': 'true'
}
}
}
self.stackAdvisor.recommendHDFSConfigurations(configurations, clusterData, services, hosts)
self.assertEquals(configurations, expected)
def test_getHostNamesWithComponent(self):
services = {
"services": [
{
"StackServices": {
"service_name": "SERVICE"
},
"components": [
{
"StackServiceComponents": {
"component_name": "COMPONENT",
"hostnames": ["host1","host2","host3"]
}
}
]
}
],
"configurations": {}
}
result = self.stackAdvisor.getHostNamesWithComponent("SERVICE","COMPONENT", services)
expected = ["host1","host2","host3"]
self.assertEquals(result, expected)
def test_getZKHostPortString(self):
configurations = {
"zoo.cfg": {
"properties": {
'clientPort': "2183"
}
}
}
services = {
"services": [
{
"StackServices": {
"service_name": "ZOOKEEPER"
},
"components": [
{
"StackServiceComponents": {
"component_name": "ZOOKEEPER_SERVER",
"hostnames": ["zk.host1","zk.host2","zk.host3"]
}
}, {
"StackServiceComponents": {
"component_name": "ZOOKEEPER_CLIENT",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": configurations
}
result = self.stackAdvisor.getZKHostPortString(services)
expected = "zk.host1:2183,zk.host2:2183,zk.host3:2183"
self.assertEquals(result, expected)
def test_validateHDFSConfigurations(self):
configurations = {}
services = ''
hosts = ''
#Default configuration
recommendedDefaults = {'dfs.datanode.du.reserved': '1024'}
properties = {'dfs.datanode.du.reserved': '1024'}
res = self.stackAdvisor.validateHDFSConfigurations(properties,
recommendedDefaults, configurations, services, hosts)
self.assertFalse(res)
#Value is less then expected
recommendedDefaults = {'dfs.datanode.du.reserved': '1024'}
properties = {'dfs.datanode.du.reserved': '512'}
res = self.stackAdvisor.validateHDFSConfigurations(properties,
recommendedDefaults, configurations, services, hosts)
self.assertTrue(res)
#Value is begger then expected
recommendedDefaults = {'dfs.datanode.du.reserved': '1024'}
properties = {'dfs.datanode.du.reserved': '2048'}
res = self.stackAdvisor.validateHDFSConfigurations(properties,
recommendedDefaults, configurations, services, hosts)
self.assertFalse(res)
def test_validateHDFSConfigurationsEnv(self):
configurations = {}
# 1) ok: namenode_heapsize > recommended
recommendedDefaults = {'namenode_heapsize': '1024',
'namenode_opt_newsize' : '256',
'namenode_opt_maxnewsize' : '256'}
properties = {'namenode_heapsize': '2048',
'namenode_opt_newsize' : '300',
'namenode_opt_maxnewsize' : '300'}
res_expected = []
res = self.stackAdvisor.validateHDFSConfigurationsEnv(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
# 2) fail: namenode_heapsize, namenode_opt_maxnewsize < recommended
properties['namenode_heapsize'] = '1022'
properties['namenode_opt_maxnewsize'] = '255'
res_expected = [{'config-type': 'hadoop-env',
'message': 'Value is less than the recommended default of 1024',
'type': 'configuration',
'config-name': 'namenode_heapsize',
'level': 'WARN'},
{'config-name': 'namenode_opt_maxnewsize',
'config-type': 'hadoop-env',
'level': 'WARN',
'message': 'Value is less than the recommended default of 256',
'type': 'configuration'}]
res = self.stackAdvisor.validateHDFSConfigurationsEnv(properties, recommendedDefaults, configurations, '', '')
self.assertEquals(res, res_expected)
def test_validateAmsHbaseSiteConfigurations(self):
configurations = {
"hdfs-site": {
"properties": {
'dfs.datanode.data.dir': "/hadoop/data"
}
},
"core-site": {
"properties": {
"fs.defaultFS": "hdfs://c6401.ambari.apache.org:8020"
}
},
"ams-site": {
"properties": {
"timeline.metrics.service.operation.mode": "embedded"
}
}
}
recommendedDefaults = {
'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
properties = {
'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
host = {
"href" : "/api/v1/hosts/host1",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host1",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host1",
"rack_info" : "/default-rack",
"total_mem" : 2097152,
"disk_info": [
{
"available": str(15<<30), # 15 GB
"type": "ext4",
"mountpoint": "/"
}
]
}
}
hosts = {
"items" : [
host
]
}
services = {
"services": [
{
"StackServices": {
"service_name": "AMBARI_METRICS"
},
"components": [
{
"StackServiceComponents": {
"component_name": "METRICS_COLLECTOR",
"hostnames": ["host1"]
}
}, {
"StackServiceComponents": {
"component_name": "METRICS_MONITOR",
"hostnames": ["host1"]
}
}
]
},
{
"StackServices": {
"service_name": "HDFS"
},
"components": [
{
"StackServiceComponents": {
"component_name": "DATANODE",
"hostnames": ["host1"]
}
}
]
}
],
"configurations": configurations
}
# only 1 partition, enough disk space, no warnings
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = []
self.assertEquals(res, expected)
# 1 partition, no enough disk space
host['Hosts']['disk_info'] = [
{
"available" : '1',
"type" : "ext4",
"mountpoint" : "/"
}
]
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = [
{'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'Ambari Metrics disk space requirements not met. '
'\nRecommended disk space for partition / is 10G',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
# 2 partitions
host['Hosts']['disk_info'] = [
{
"available": str(15<<30), # 15 GB
"type" : "ext4",
"mountpoint" : "/grid/0"
},
{
"available" : str(15<<30), # 15 GB
"type" : "ext4",
"mountpoint" : "/"
}
]
recommendedDefaults = {
'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir': '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
properties = {
'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = []
self.assertEquals(res, expected)
# dfs.dir & hbase.rootdir crosscheck + root partition + hbase.rootdir == hbase.tmp.dir warnings
properties = {
'hbase.rootdir': 'file:///var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = [
{
'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'It is not recommended to use root partition for hbase.rootdir',
'type': 'configuration'
},
{
'config-name': 'hbase.tmp.dir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'Consider not using / partition for storing metrics temporary data. '
'/ partition is already used as hbase.rootdir to store metrics data',
'type': 'configuration'
},
{
'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'Consider not using / partition for storing metrics data. '
'/ is already used by datanode to store HDFS data',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
# incorrect hbase.rootdir in distributed mode
properties = {
'hbase.rootdir': 'file:///grid/0/var/lib/ambari-metrics-collector/hbase',
'hbase.tmp.dir' : '/var/lib/ambari-metrics-collector/hbase',
'hbase.cluster.distributed': 'false'
}
configurations['ams-site']['properties']['timeline.metrics.service.operation.mode'] = 'distributed'
res = self.stackAdvisor.validateAmsHbaseSiteConfigurations(properties, recommendedDefaults, configurations, services, hosts)
expected = [
{
'config-name': 'hbase.rootdir',
'config-type': 'ams-hbase-site',
'level': 'WARN',
'message': 'In distributed mode hbase.rootdir should point to HDFS.',
'type': 'configuration'
},
{
'config-name': 'hbase.cluster.distributed',
'config-type': 'ams-hbase-site',
'level': 'ERROR',
'message': 'hbase.cluster.distributed property should be set to true for distributed mode',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
def test_validateStormSiteConfigurations(self):
configurations = {
"storm-site": {
"properties": {
'metrics.reporter.register': "org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter"
}
}
}
recommendedDefaults = {
'metrics.reporter.register': 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter',
}
properties = {
'metrics.reporter.register': 'org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter',
}
services = {
"services": [
{
"StackServices": {
"service_name": "AMBARI_METRICS"
}
}
],
"configurations": configurations
}
# positive
res = self.stackAdvisor.validateStormConfigurations(properties, recommendedDefaults, configurations, services, None)
expected = []
self.assertEquals(res, expected)
properties['metrics.reporter.register'] = ''
res = self.stackAdvisor.validateStormConfigurations(properties, recommendedDefaults, configurations, services, None)
expected = [
{'config-name': 'metrics.reporter.register',
'config-type': 'storm-site',
'level': 'WARN',
'message': 'Should be set to org.apache.hadoop.metrics2.sink.storm.StormTimelineMetricsReporter '
'to report the metrics to Ambari Metrics service.',
'type': 'configuration'
}
]
self.assertEquals(res, expected)
def test_getHostsWithComponent(self):
services = {"services":
[{"StackServices":
{"service_name" : "HDFS",
"service_version" : "2.6.0.2.2"
},
"components":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/DATANODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1+",
"component_category":"SLAVE",
"component_name":"DATANODE",
"custom_commands":[
],
"display_name":"DataNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1",
"host2"
]
},
"dependencies":[
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"0+",
"component_category":"SLAVE",
"component_name":"JOURNALNODE",
"custom_commands":[
],
"display_name":"JournalNode",
"is_client":"false",
"is_master":"false",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host1"
]
},
"dependencies":[
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/JOURNALNODE/dependencies/HDFS_CLIENT",
"Dependencies":{
"component_name":"HDFS_CLIENT",
"dependent_component_name":"JOURNALNODE",
"dependent_service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2"
}
}
]
},
{
"href":"/api/v1/stacks/HDP/versions/2.2/services/HDFS/components/NAMENODE",
"StackServiceComponents":{
"advertise_version":"true",
"cardinality":"1-2",
"component_category":"MASTER",
"component_name":"NAMENODE",
"custom_commands":[
"DECOMMISSION",
"REBALANCEHDFS"
],
"display_name":"NameNode",
"is_client":"false",
"is_master":"true",
"service_name":"HDFS",
"stack_name":"HDP",
"stack_version":"2.2",
"hostnames":[
"host2"
]
},
"dependencies":[
]
},
],
}],
"configurations": {}
}
hosts = {
"items" : [
{
"href" : "/api/v1/hosts/host1",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host1",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host1",
"rack_info" : "/default-rack",
"total_mem" : 2097152
}
},
{
"href" : "/api/v1/hosts/host2",
"Hosts" : {
"cpu_count" : 1,
"host_name" : "host2",
"os_arch" : "x86_64",
"os_type" : "centos6",
"ph_cpu_count" : 1,
"public_host_name" : "host2",
"rack_info" : "/default-rack",
"total_mem" : 1048576
}
},
]
}
datanodes = self.stackAdvisor.getHostsWithComponent("HDFS", "DATANODE", services, hosts)
self.assertEquals(len(datanodes), 2)
self.assertEquals(datanodes, hosts["items"])
datanode = self.stackAdvisor.getHostWithComponent("HDFS", "DATANODE", services, hosts)
self.assertEquals(datanode, hosts["items"][0])
namenodes = self.stackAdvisor.getHostsWithComponent("HDFS", "NAMENODE", services, hosts)
self.assertEquals(len(namenodes), 1)
# [host2]
self.assertEquals(namenodes, [hosts["items"][1]])
namenode = self.stackAdvisor.getHostWithComponent("HDFS", "NAMENODE", services, hosts)
# host2
self.assertEquals(namenode, hosts["items"][1])
# not installed
nodemanager = self.stackAdvisor.getHostWithComponent("YARN", "NODEMANAGER", services, hosts)
self.assertEquals(nodemanager, None)
# unknown component
unknown_component = self.stackAdvisor.getHostWithComponent("YARN", "UNKNOWN", services, hosts)
self.assertEquals(nodemanager, None)
# unknown service
unknown_component = self.stackAdvisor.getHostWithComponent("UNKNOWN", "NODEMANAGER", services, hosts)
self.assertEquals(nodemanager, None)
def test_mergeValidators(self):
childValidators = {
"HDFS": {"hdfs-site": "validateHDFSConfigurations2.3"},
"HIVE": {"hiveserver2-site": "validateHiveServer2Configurations2.3"},
"HBASE": {"hbase-site": "validateHBASEConfigurations2.3",
"newconf": "new2.3"},
"NEWSERVICE" : {"newserviceconf": "abc2.3"}
}
parentValidators = {
"HDFS": {"hdfs-site": "validateHDFSConfigurations2.2",
"hadoop-env": "validateHDFSConfigurationsEnv2.2"},
"YARN": {"yarn-env": "validateYARNEnvConfigurations2.2"},
"HIVE": {"hiveserver2-site": "validateHiveServer2Configurations2.2",
"hive-site": "validateHiveConfigurations2.2",
"hive-env": "validateHiveConfigurationsEnv2.2"},
"HBASE": {"hbase-site": "validateHBASEConfigurations2.2",
"hbase-env": "validateHBASEEnvConfigurations2.2"},
"MAPREDUCE2": {"mapred-site": "validateMapReduce2Configurations2.2"},
"TEZ": {"tez-site": "validateTezConfigurations2.2"}
}
expected = {
"HDFS": {"hdfs-site": "validateHDFSConfigurations2.3",
"hadoop-env": "validateHDFSConfigurationsEnv2.2"},
"YARN": {"yarn-env": "validateYARNEnvConfigurations2.2"},
"HIVE": {"hiveserver2-site": "validateHiveServer2Configurations2.3",
"hive-site": "validateHiveConfigurations2.2",
"hive-env": "validateHiveConfigurationsEnv2.2"},
"HBASE": {"hbase-site": "validateHBASEConfigurations2.3",
"hbase-env": "validateHBASEEnvConfigurations2.2",
"newconf": "new2.3"},
"MAPREDUCE2": {"mapred-site": "validateMapReduce2Configurations2.2"},
"TEZ": {"tez-site": "validateTezConfigurations2.2"},
"NEWSERVICE" : {"newserviceconf": "abc2.3"}
}
self.stackAdvisor.mergeValidators(parentValidators, childValidators)
self.assertEquals(expected, parentValidators)
def test_getProperMountPoint(self):
hostInfo = None
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
hostInfo = {"some_key": []}
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
hostInfo["disk_info"] = []
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# root mountpoint with low space available
hostInfo["disk_info"].append(
{
"available" : "1",
"type" : "ext4",
"mountpoint" : "/"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# tmpfs with more space available
hostInfo["disk_info"].append(
{
"available" : "2",
"type" : "tmpfs",
"mountpoint" : "/dev/shm"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# /boot with more space available
hostInfo["disk_info"].append(
{
"available" : "3",
"type" : "tmpfs",
"mountpoint" : "/boot/grub"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# /boot with more space available
hostInfo["disk_info"].append(
{
"available" : "4",
"type" : "tmpfs",
"mountpoint" : "/mnt/external_hdd"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# virtualbox fs with more space available
hostInfo["disk_info"].append(
{
"available" : "5",
"type" : "vboxsf",
"mountpoint" : "/vagrant"
}
)
self.assertEquals(["/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# proper mountpoint with more space available
hostInfo["disk_info"].append(
{
"available" : "6",
"type" : "ext4",
"mountpoint" : "/grid/0"
}
)
self.assertEquals(["/grid/0", "/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
# proper mountpoint with more space available
hostInfo["disk_info"].append(
{
"available" : "7",
"type" : "ext4",
"mountpoint" : "/grid/1"
}
)
self.assertEquals(["/grid/1", "/grid/0", "/"], self.stackAdvisor.getPreferredMountPoints(hostInfo))
def test_validateNonRootFs(self):
hostInfo = {"disk_info": [
{
"available" : "2",
"type" : "ext4",
"mountpoint" : "/"
}
]}
properties = {"property1": "file:///var/dir"}
recommendedDefaults = {"property1": "file:///var/dir"}
# only / mountpoint - no warning
self.assertTrue(self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo) == None)
# More preferable /grid/0 mountpoint - warning
hostInfo["disk_info"].append(
{
"available" : "3",
"type" : "ext4",
"mountpoint" : "/grid/0"
}
)
recommendedDefaults = {"property1": "file:///grid/0/var/dir"}
warn = self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo)
self.assertTrue(warn != None)
self.assertEquals({'message': 'It is not recommended to use root partition for property1', 'level': 'WARN'}, warn)
# Set by user /var mountpoint, which is non-root , but not preferable - no warning
hostInfo["disk_info"].append(
{
"available" : "1",
"type" : "ext4",
"mountpoint" : "/var"
}
)
self.assertTrue(self.stackAdvisor.validatorNotRootFs(properties, recommendedDefaults, 'property1', hostInfo) == None)
def test_validatorEnoughDiskSpace(self):
reqiuredDiskSpace = 1048576
errorMsg = "Ambari Metrics disk space requirements not met. \n" \
"Recommended disk space for partition / is 1G"
# local FS, enough space
hostInfo = {"disk_info": [
{
"available" : "1048578",
"type" : "ext4",
"mountpoint" : "/"
}
]}
properties = {"property1": "file:///var/dir"}
self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
# local FS, no enough space
hostInfo = {"disk_info": [
{
"available" : "1",
"type" : "ext4",
"mountpoint" : "/"
}
]}
warn = self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace)
self.assertTrue(warn != None)
self.assertEquals({'message': errorMsg, 'level': 'WARN'}, warn)
# non-local FS, HDFS
properties = {"property1": "hdfs://h1"}
self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
# non-local FS, WASB
properties = {"property1": "wasb://h1"}
self.assertTrue(self.stackAdvisor.validatorEnoughDiskSpace(properties, 'property1', hostInfo, reqiuredDiskSpace) == None)
def test_round_to_n(self):
self.assertEquals(self.stack_advisor_impl.round_to_n(0), 0)
self.assertEquals(self.stack_advisor_impl.round_to_n(1000), 1024)
self.assertEquals(self.stack_advisor_impl.round_to_n(2000), 2048)
self.assertEquals(self.stack_advisor_impl.round_to_n(4097), 4096)
def test_getMountPointForDir(self):
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("/var/log", ["/"]), "/")
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("/var/log", ["/var", "/"]), "/var")
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("file:///var/log", ["/var", "/"]), "/var")
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("hdfs:///hdfs_path", ["/var", "/"]), None)
self.assertEquals(self.stack_advisor_impl.getMountPointForDir("relative/path", ["/var", "/"]), None)
def test_getValidatorEqualsToRecommendedItem(self):
properties = {"property1": "value1"}
recommendedDefaults = {"property1": "value1"}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), None)
properties = {"property1": "value1"}
recommendedDefaults = {"property1": "value2"}
expected = {'message': 'It is recommended to set value value2 for property property1', 'level': 'WARN'}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), expected)
properties = {}
recommendedDefaults = {"property1": "value2"}
expected = {'level': 'ERROR', 'message': 'Value should be set for property1'}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), expected)
properties = {"property1": "value1"}
recommendedDefaults = {}
expected = {'level': 'ERROR', 'message': 'Value should be recommended for property1'}
self.assertEquals(self.stackAdvisor.validatorEqualsToRecommendedItem(properties, recommendedDefaults, "property1"), expected)
def test_getServicesSiteProperties(self):
import imp, os
testDirectory = os.path.dirname(os.path.abspath(__file__))
hdp206StackAdvisorPath = os.path.join(testDirectory, '../../../../../main/resources/stacks/HDP/2.0.6/services/stack_advisor.py')
stack_advisor = imp.load_source('stack_advisor', hdp206StackAdvisorPath)
services = {
"services": [
{
"StackServices": {
"service_name": "RANGER"
},
"components": [
{
"StackServiceComponents": {
"component_name": "RANGER_ADMIN",
"hostnames": ["host1"]
}
}
]
},
],
"configurations": {
"admin-properties": {
"properties": {
"DB_FLAVOR": "NOT_EXISTING",
}
},
"ranger-admin-site": {
"properties": {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
}
}
}
}
expected = {
"ranger.service.http.port": "7777",
"ranger.service.http.enabled": "true",
}
siteProperties = stack_advisor.getServicesSiteProperties(services, "ranger-admin-site")
self.assertEquals(siteProperties, expected)
def test_createComponentLayoutRecommendations_addService_1freeHost(self):
"""
Test that already installed slaves are not added to any free hosts (not having any component installed)
as part of recommendation received during Add service operation.
For already installed services, recommendation for installed components should match the existing layout
"""
services = {
"services" : [
{
"StackServices" : {
"service_name" : "HDFS"
},
"components" : [ {
"StackServiceComponents" : {
"cardinality" : "1+",
"component_category" : "SLAVE",
"component_name" : "DATANODE",
"hostnames" : [ "c6401.ambari.apache.org" ]
}
} ]
} ]
}
hosts = self.prepareHosts(["c6401.ambari.apache.org", "c6402.ambari.apache.org"])
recommendations = self.stackAdvisor.createComponentLayoutRecommendations(services, hosts)
"""
Recommendation received should be as below:
{
'blueprint': {
'host_groups': [{
'name': 'host-group-1',
'components': []
}, {
'name': 'host-group-2',
'components': [{
'name': 'DATANODE'
}]
}]
},
'blueprint_cluster_binding': {
'host_groups': [{
'hosts': [{
'fqdn': 'c6402.ambari.apache.org'
}],
'name': 'host-group-1'
}, {
'hosts': [{
'fqdn': 'c6401.ambari.apache.org'
}],
'name': 'host-group-2'
}]
}
}
"""
# Assert that the list is empty for host-group-1
self.assertFalse(recommendations['blueprint']['host_groups'][0]['components'])
# Assert that DATANODE is placed on host-group-2
self.assertEquals(recommendations['blueprint']['host_groups'][1]['components'][0]['name'], 'DATANODE')
| 36.13997 | 166 | 0.551836 | 72,478 | 0.988408 | 0 | 0 | 483 | 0.006587 | 0 | 0 | 31,198 | 0.425458 |
6a8f6efd4560f4b302cef1f8c07a3c86f509c35d | 642 | py | Python | debugging/code/multiprocess_main.py | awesome-archive/python-debugging-skills | 69af455302a805d6f198a06ea934f79d5913cb3e | [
"MIT"
] | null | null | null | debugging/code/multiprocess_main.py | awesome-archive/python-debugging-skills | 69af455302a805d6f198a06ea934f79d5913cb3e | [
"MIT"
] | null | null | null | debugging/code/multiprocess_main.py | awesome-archive/python-debugging-skills | 69af455302a805d6f198a06ea934f79d5913cb3e | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
import multiprocessing as mp
import time
from pudb.remote import set_trace
def worker(worker_id):
""" Simple worker process"""
i = 0
while i < 10:
if worker_id == 1: # debug process with id 1
set_trace(term_size=(80, 24))
time.sleep(1) # represents some work
print('In Process {}, i:{}'.format(worker_id, i))
i = i + 1
if __name__ == '__main__':
processes = []
for p_id in range(2): # 2 worker processes
p = mp.Process(target=worker, args=(p_id,))
p.start()
processes.append(p)
for p in processes:
p.join()
| 22.928571 | 57 | 0.573209 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.235202 |
6a8fb5db5439c528b72c62e081f71f595115b9ad | 1,317 | py | Python | lldb/packages/Python/lldbsuite/test/expression_command/anonymous-struct/TestCallUserAnonTypedef.py | bytesnake/Enzyme | 247606c279920d476645d2e319e574bf8be10fc9 | [
"Apache-2.0"
] | null | null | null | lldb/packages/Python/lldbsuite/test/expression_command/anonymous-struct/TestCallUserAnonTypedef.py | bytesnake/Enzyme | 247606c279920d476645d2e319e574bf8be10fc9 | [
"Apache-2.0"
] | null | null | null | lldb/packages/Python/lldbsuite/test/expression_command/anonymous-struct/TestCallUserAnonTypedef.py | bytesnake/Enzyme | 247606c279920d476645d2e319e574bf8be10fc9 | [
"Apache-2.0"
] | null | null | null | """
Test calling user defined functions using expression evaluation.
This test checks that typesystem lookup works correctly for typedefs of
untagged structures.
Ticket: https://llvm.org/bugs/show_bug.cgi?id=26790
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestExprLookupAnonStructTypedef(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
# Find the breakpoint
self.line = line_number('main.cpp', '// lldb testsuite break')
@expectedFailureAll(oslist=["windows"])
@expectedFailureAll(
oslist=['linux'],
archs=['arm'],
bugnumber="llvm.org/pr27868")
def test(self):
"""Test typedeffed untagged struct arguments for function call expressions"""
self.build()
self.runCmd("file "+self.getBuildArtifact("a.out"),
CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self,
"main.cpp",
self.line,
num_expected_locations=-1,
loc_exact=True
)
self.runCmd("run", RUN_SUCCEEDED)
self.expect("expr multiply(&s)", substrs=['$0 = 1'])
| 28.021277 | 85 | 0.658314 | 928 | 0.704632 | 0 | 0 | 677 | 0.514047 | 0 | 0 | 446 | 0.338648 |
6a8fddf8511ca7d429d8644119f475536d5dae17 | 2,486 | py | Python | main.py | ThomasDLi/simple-photo-editor | f8b3f1025155e2542b93b94c12d607b9b5e45731 | [
"MIT"
] | 1 | 2021-05-21T19:21:26.000Z | 2021-05-21T19:21:26.000Z | main.py | ThomasDLi/simple-photo-editor | f8b3f1025155e2542b93b94c12d607b9b5e45731 | [
"MIT"
] | null | null | null | main.py | ThomasDLi/simple-photo-editor | f8b3f1025155e2542b93b94c12d607b9b5e45731 | [
"MIT"
] | null | null | null | from PIL import Image, ImageEnhance
user_account_name = "Thomas.Li26"
def main():
mode = input("Specify image editing mode. Type DEEPFRY, STRETCH, BRIGHTNESS, SHARPEN, or INVERT: ")
if mode == "DEEPFRY":
DEEPFRY()
if mode == "STRETCH":
STRETCH()
if mode == "INVERT":
INVERT()
if mode == "BRIGHTNESS":
BRIGHTNESS()
if mode == "SHARPEN":
SHARPEN()
def DEEPFRY():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Contrast(im)
factor = float(input("Specify deepfry amount (0-100): "))
im_output = enhancer.enhance(factor)
im_output.save('more-contrast-image.png')
im_output.show()
def STRETCH():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
factor = int(input("Specify width: "))
factor2 = int(input("Specify height: "))
im_output = im.resize((factor,factor2))
im_output.save('more-contrast-image.png')
im_output.show()
def INVERT():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Contrast(im)
im_output = enhancer.enhance(-1)
im_output.save('more-contrast-image.png')
im_output.show()
def BRIGHTNESS():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Brightness(im)
factor = float(input("Specify brightness amount: "))
im_output = enhancer.enhance(factor)
im_output.save('more-contrast-image.png')
im_output.show()
def SHARPEN():
img = input("Insert the name of an image found in the Downloads folder (for example: Image.png): ")
im = Image.open(r"C:\Users\{}\Downloads\{}".format(user_account_name, img))
enhancer = ImageEnhance.Sharpness(im)
factor = float(input("Specify sharpening amount: "))
im_output = enhancer.enhance(factor)
im_output.save('more-contrast-image.png')
im_output.show()
if __name__ == "__main__":
main()
| 38.84375 | 104 | 0.650442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 972 | 0.39099 |
6a90540c04aa6d7b6d0419ad6f3c05ff1c4aa797 | 377 | py | Python | scripts/field/Curbrock_Summon1.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 54 | 2019-04-16T23:24:48.000Z | 2021-12-18T11:41:50.000Z | scripts/field/Curbrock_Summon1.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 3 | 2019-05-19T15:19:41.000Z | 2020-04-27T16:29:16.000Z | scripts/field/Curbrock_Summon1.py | G00dBye/YYMS | 1de816fc842b6598d5b4b7896b6ab0ee8f7cdcfb | [
"MIT"
] | 49 | 2020-11-25T23:29:16.000Z | 2022-03-26T16:20:24.000Z | # Curbrock Summon 2
CURBROCK2 = 9400930 # MOD ID
CURBROCKS_ESCAPE_ROUTE_VER2 = 600050040 # MAP ID
CURBROCKS_ESCAPE_ROUTE_VER3 = 600050050 # MAP ID 2
sm.spawnMob(CURBROCK2, 190, -208, False)
sm.createClock(1800)
sm.addEvent(sm.invokeAfterDelay(1800000, "warp", CURBROCKS_ESCAPE_ROUTE_VER3, 0))
sm.waitForMobDeath(CURBROCK2)
sm.warp(CURBROCKS_ESCAPE_ROUTE_VER2)
sm.stopEvents() | 31.416667 | 81 | 0.806366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 51 | 0.135279 |
6a9096da46f0e3f90b72b1a819bf62ce11ddc6bd | 786 | py | Python | trainloops/listeners/cluster_killswitch.py | Gerryflap/master_thesis | 5dc16e21b23837fee8a4532679bb5cb961af0b7c | [
"MIT"
] | null | null | null | trainloops/listeners/cluster_killswitch.py | Gerryflap/master_thesis | 5dc16e21b23837fee8a4532679bb5cb961af0b7c | [
"MIT"
] | null | null | null | trainloops/listeners/cluster_killswitch.py | Gerryflap/master_thesis | 5dc16e21b23837fee8a4532679bb5cb961af0b7c | [
"MIT"
] | null | null | null | """
Cancelling jobs on the University cluster forces programs to instantly quit,
which sometimes crashes cluster nodes.
As a remedy, this killswitch listener will stop the experiment in a nicer way to prevent this from happening.
The experiment will be stopped if a file named "stop" is encountered in the results folder of the experiment.
The existence of this file is checked after each epoch.
"""
import os
from trainloops.listeners.listener import Listener
class KillSwitchListener(Listener):
def __init__(self, experiment_path):
super().__init__()
self.path = os.path.join(experiment_path, "stop")
def initialize(self):
pass
def report(self, state_dict):
if os.path.exists(self.path):
exit()
| 32.75 | 113 | 0.704835 | 293 | 0.372774 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.550891 |
6a920dacf31156e85a0fcebb52765a1d1ca683fe | 2,255 | py | Python | authors/apps/notifications/views.py | andela/ah-backend-spaces- | 58e031a96a6b9555f1a4133cf8cb688c236d3f3b | [
"BSD-3-Clause"
] | 2 | 2018-08-17T15:47:36.000Z | 2018-09-13T13:58:34.000Z | authors/apps/notifications/views.py | andela/ah-backend-spaces- | 58e031a96a6b9555f1a4133cf8cb688c236d3f3b | [
"BSD-3-Clause"
] | 35 | 2018-07-24T11:42:53.000Z | 2021-06-10T20:34:41.000Z | authors/apps/notifications/views.py | andela/ah-backend-spaces- | 58e031a96a6b9555f1a4133cf8cb688c236d3f3b | [
"BSD-3-Clause"
] | 3 | 2018-07-17T13:05:35.000Z | 2018-09-06T16:03:52.000Z | from rest_framework import status
from rest_framework.generics import (
RetrieveUpdateAPIView, CreateAPIView,
RetrieveUpdateDestroyAPIView
)
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from ..authentication.backends import JWTAuthentication
from ..authentication.models import User
from .models import Notifications
from .renderers import (
NotificationsJSONRenderer
)
from .serializers import (
NotificationsAPIViewSerializer, GetNotificationsAPIViewSerializer
)
class NotificationsAPIView(RetrieveUpdateAPIView):
permission_classes = (IsAuthenticated,)
renderer_classes = (NotificationsJSONRenderer,)
def put(self, request):
"""
This class method is used to update a users article
"""
serializer_class = NotificationsAPIViewSerializer
notification = request.data.get('notification', {})
user_data = JWTAuthentication().authenticate(request)
# append user_id from token to article variable for later validations in serializers
notification["user_id"] = user_data[1]
serializer = serializer_class(data=notification)
serializer.is_valid(raise_exception=True)
# update the notification statue to True
serializer.update_read_status(serializer.data["notifications"])
return Response(serializer.data, status=status.HTTP_201_CREATED)
def get(self, request):
"""
retrieve all notifications of a user
"""
# decode users authentication token
user_data = JWTAuthentication().authenticate(request)
# get user notifications details from the Notifications table in the database
notifications = Notifications.objects.filter(notification_owner=user_data[1]).values(
"id", "article_id", "notification_title", "notification_body",
"notification_owner", "read_status"
)
# create a list of notifications
# the action below is done by use of list comprehension
list_of_notifications = [i for i in notifications]
return Response({"notifications": list_of_notifications}, status=status.HTTP_200_OK)
| 34.692308 | 93 | 0.73082 | 1,663 | 0.737472 | 0 | 0 | 0 | 0 | 0 | 0 | 599 | 0.265632 |
6a921ec9df90e9d0bc4821cbf3d19c03f4f29792 | 1,882 | py | Python | scripts/common/frozendict.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | scripts/common/frozendict.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | scripts/common/frozendict.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T11:05:06.000Z | 2020-07-23T11:05:06.000Z | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Implements a frozen dictionary-like object"""
import collections
import copy
import common.memo as memo
class frozendict(collections.Mapping):
"""A frozen dictionary class"""
def __init__(self, *args, **kwargs):
self._data = dict(*args, **kwargs)
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def __getitem__(self, key):
return self._data[key]
@memo.memo_i()
def __hash__(self):
return hash(self.itemtuple())
def __str__(self):
return str(self._data)
def __repr__(self):
return '%s(%s)' % (type(self).__name__, str(self))
def __eq__(self, other):
return self._data == other
def __ne__(self, other):
return not self == other
def __deepcopy__(self, _memo):
return copy.deepcopy(self._data)
@memo.memo_i()
def itemtuple(self):
return tuple(sorted(self.iteritems()))
def mutableDict(self):
"""
Returns a mutable dictionary copy, replacing 'frozendict' with 'dict's.
This function uses the 'copy.deepcopy' method to create a mutable deep copy
of the dictionary.
Note that due to the one-size-fits-all behavior of 'deepcopy', the result
can be anything from heavyhanded to incorrect depending on the contents of
the dictionary. The caller should make sure they understand the operation
and its behavior on all of the dictionary's subtypes before using it.
Returns: (dict) A mutable clone of the dictionary and its members.
"""
return copy.deepcopy(self)
def extend(self, **kwargs):
"""Returns a copy of this object with the 'kwargs' fields updated."""
ndata = self.mutableDict()
ndata.update(kwargs)
return type(self)(**ndata)
| 26.507042 | 79 | 0.698193 | 1,608 | 0.85441 | 0 | 0 | 150 | 0.079702 | 0 | 0 | 888 | 0.471838 |
6a92b244776a352d3c8cb2387f8e203d0ce669c3 | 22 | py | Python | avatar/__init__.py | yogeshkheri/geonode-avatar | 293474f814117ae680278223c8cdf8d59c67862d | [
"BSD-3-Clause"
] | 3 | 2021-10-17T20:37:40.000Z | 2022-03-17T10:29:14.000Z | avatar/__init__.py | yogeshkheri/geonode-avatar | 293474f814117ae680278223c8cdf8d59c67862d | [
"BSD-3-Clause"
] | 4 | 2021-09-02T13:26:11.000Z | 2022-03-16T12:26:36.000Z | avatar/__init__.py | yogeshkheri/geonode-avatar | 293474f814117ae680278223c8cdf8d59c67862d | [
"BSD-3-Clause"
] | null | null | null | __version__ = '5.0.2'
| 11 | 21 | 0.636364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.318182 |
6a9450d0c04cca0024a14e15346193d320a70e58 | 50 | py | Python | __init__.py | mmanganel/neurecon | 5e86324675985f1fedfc4d5d3ac2e750f480538f | [
"MIT"
] | null | null | null | __init__.py | mmanganel/neurecon | 5e86324675985f1fedfc4d5d3ac2e750f480538f | [
"MIT"
] | null | null | null | __init__.py | mmanganel/neurecon | 5e86324675985f1fedfc4d5d3ac2e750f480538f | [
"MIT"
] | null | null | null | from neurecon.reconstruction import reconstruct
| 12.5 | 47 | 0.86 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6a94a7cd3e89c26bee4c47c7741e5a37358da6ff | 46 | py | Python | Fundamentals/Reversed Strings.py | gnvidal/Codewars | 117a83bd949a1503f31f1f915641e96e7bf7a04c | [
"MIT"
] | 49 | 2018-04-30T06:42:45.000Z | 2021-07-22T16:39:02.000Z | Fundamentals/Reversed Strings.py | gnvidal/Codewars | 117a83bd949a1503f31f1f915641e96e7bf7a04c | [
"MIT"
] | 1 | 2020-08-31T02:36:53.000Z | 2020-08-31T10:14:00.000Z | Fundamentals/Reversed Strings.py | gnvidal/Codewars | 117a83bd949a1503f31f1f915641e96e7bf7a04c | [
"MIT"
] | 36 | 2016-11-07T19:59:58.000Z | 2022-03-31T11:18:27.000Z | def solution(string):
return string[::-1]
| 15.333333 | 23 | 0.652174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6a95707999c6dd8d718ea3549fc898fb5e496ed8 | 965 | py | Python | python/flexflow/keras/datasets/cifar.py | zmxdream/FlexFlow | 7ea50d71a02e853af7ae573d88c911511b3e82e0 | [
"Apache-2.0"
] | 455 | 2018-12-09T01:57:46.000Z | 2022-03-22T01:56:47.000Z | python/flexflow/keras/datasets/cifar.py | zmxdream/FlexFlow | 7ea50d71a02e853af7ae573d88c911511b3e82e0 | [
"Apache-2.0"
] | 136 | 2019-04-19T08:24:27.000Z | 2022-03-28T01:39:19.000Z | python/flexflow/keras/datasets/cifar.py | zmxdream/FlexFlow | 7ea50d71a02e853af7ae573d88c911511b3e82e0 | [
"Apache-2.0"
] | 102 | 2018-12-22T07:38:05.000Z | 2022-03-30T06:04:39.000Z | # -*- coding: utf-8 -*-
"""Utilities common to CIFAR10 and CIFAR100 datasets.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from six.moves import cPickle
def load_batch(fpath, label_key='labels'):
"""Internal utility for parsing CIFAR data.
# Arguments
fpath: path the file to parse.
label_key: key for label data in the retrieve
dictionary.
# Returns
A tuple `(data, labels)`.
"""
with open(fpath, 'rb') as f:
if sys.version_info < (3,):
d = cPickle.load(f)
else:
d = cPickle.load(f, encoding='bytes')
# decode utf8
d_decoded = {}
for k, v in d.items():
d_decoded[k.decode('utf8')] = v
d = d_decoded
data = d['data']
labels = d[label_key]
data = data.reshape(data.shape[0], 3, 32, 32)
return data, labels | 26.081081 | 53 | 0.586528 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.370984 |
6a96000d55f22510def511958af1f99b01dba806 | 523 | py | Python | day7/p2.py | Seralpa/Advent-of-code | 9633624e4ff48c50d8be3deac54c83059e9c3b04 | [
"MIT"
] | 1 | 2020-12-18T16:06:25.000Z | 2020-12-18T16:06:25.000Z | day7/p2.py | Seralpa/Advent-of-code | 9633624e4ff48c50d8be3deac54c83059e9c3b04 | [
"MIT"
] | null | null | null | day7/p2.py | Seralpa/Advent-of-code | 9633624e4ff48c50d8be3deac54c83059e9c3b04 | [
"MIT"
] | null | null | null | def getNumBags(color):
if color=='':
return 0
numBags=1
for bag in rules[color]:
numBags+=bag[1]*getNumBags(bag[0])
return numBags
with open('day7/input.txt') as f:
rules=dict([l.split(' contain') for l in f.read().replace(' bags', '').replace(' bag', '').replace('.', '').replace(' no other', '0 ').splitlines()])
for key in rules:
rules[key]=[(d[2:].strip(), int(d[:2].strip())) for d in rules[key].split(', ')]
print(getNumBags('shiny gold')-1) #-1 cause shiny bag not included | 40.230769 | 153 | 0.596558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 113 | 0.216061 |
6a9611abd97c536f926ca250cbefadb44ebcbbc2 | 471 | py | Python | adv/luther.py | 6tennis/dl | 69eb7e71da9fabe9e7ec40c461b525b4f967f345 | [
"Apache-2.0"
] | null | null | null | adv/luther.py | 6tennis/dl | 69eb7e71da9fabe9e7ec40c461b525b4f967f345 | [
"Apache-2.0"
] | null | null | null | adv/luther.py | 6tennis/dl | 69eb7e71da9fabe9e7ec40c461b525b4f967f345 | [
"Apache-2.0"
] | null | null | null | from core.advbase import *
from slot.d import *
def module():
return Luther
class Luther(Adv):
a1 = ('cc',0.10,'hit15')
conf = {}
conf ['slots.d'] = Leviathan()
conf['acl'] = """
`dragon
`s1
`s2, seq=5 and cancel
`s3, seq=5 and cancel or fsc
`fs, seq=5
"""
coab = ['Blade', 'Xander', 'Tiki']
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv) | 20.478261 | 44 | 0.552017 | 280 | 0.59448 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.384289 |
6a9615693970b5561756763d7533ebc0f325ce0c | 21,646 | py | Python | wandb/sdk/data_types/image.py | macio232/client | 295380c99b1a0946470672d40348b17a674ad17f | [
"MIT"
] | null | null | null | wandb/sdk/data_types/image.py | macio232/client | 295380c99b1a0946470672d40348b17a674ad17f | [
"MIT"
] | null | null | null | wandb/sdk/data_types/image.py | macio232/client | 295380c99b1a0946470672d40348b17a674ad17f | [
"MIT"
] | null | null | null | import hashlib
from io import BytesIO
import logging
import os
from typing import Any, cast, Dict, List, Optional, Sequence, Type, TYPE_CHECKING, Union
from pkg_resources import parse_version
import wandb
from wandb import util
from ._private import MEDIA_TMP
from .base_types.media import BatchableMedia, Media
from .helper_types.bounding_boxes_2d import BoundingBoxes2D
from .helper_types.classes import Classes
from .helper_types.image_mask import ImageMask
if TYPE_CHECKING: # pragma: no cover
import matplotlib # type: ignore
import numpy as np # type: ignore
import PIL # type: ignore
import torch # type: ignore
from wandb.apis.public import Artifact as PublicArtifact
from ..wandb_artifacts import Artifact as LocalArtifact
from ..wandb_run import Run as LocalRun
ImageDataType = Union[
"matplotlib.artist.Artist", "PIL.Image", "TorchTensorType", "np.ndarray"
]
ImageDataOrPathType = Union[str, "Image", ImageDataType]
TorchTensorType = Union["torch.Tensor", "torch.Variable"]
def _server_accepts_image_filenames() -> bool:
# Newer versions of wandb accept large image filenames arrays
# but older versions would have issues with this.
max_cli_version = util._get_max_cli_version()
if max_cli_version is None:
return False
return parse_version("0.12.10") <= parse_version(max_cli_version)
class Image(BatchableMedia):
"""Format images for logging to W&B.
Arguments:
data_or_path: (numpy array, string, io) Accepts numpy array of
image data, or a PIL image. The class attempts to infer
the data format and converts it.
mode: (string) The PIL mode for an image. Most common are "L", "RGB",
"RGBA". Full explanation at https://pillow.readthedocs.io/en/4.2.x/handbook/concepts.html#concept-modes.
caption: (string) Label for display of image.
Examples:
### Create a wandb.Image from a numpy array
<!--yeadoc-test:log-image-numpy->
```python
import numpy as np
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3))
image = wandb.Image(pixels, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
### Create a wandb.Image from a PILImage
<!--yeadoc-test:log-image-pil->
```python
import numpy as np
from PIL import Image as PILImage
import wandb
wandb.init()
examples = []
for i in range(3):
pixels = np.random.randint(low=0, high=256, size=(100, 100, 3), dtype=np.uint8)
pil_image = PILImage.fromarray(pixels, mode="RGB")
image = wandb.Image(pil_image, caption=f"random field {i}")
examples.append(image)
wandb.log({"examples": examples})
```
"""
MAX_ITEMS = 108
# PIL limit
MAX_DIMENSION = 65500
_log_type = "image-file"
format: Optional[str]
_grouping: Optional[int]
_caption: Optional[str]
_width: Optional[int]
_height: Optional[int]
_image: Optional["PIL.Image"]
_classes: Optional["Classes"]
_boxes: Optional[Dict[str, "BoundingBoxes2D"]]
_masks: Optional[Dict[str, "ImageMask"]]
def __init__(
self,
data_or_path: "ImageDataOrPathType",
mode: Optional[str] = None,
caption: Optional[str] = None,
grouping: Optional[int] = None,
classes: Optional[Union["Classes", Sequence[dict]]] = None,
boxes: Optional[Union[Dict[str, "BoundingBoxes2D"], Dict[str, dict]]] = None,
masks: Optional[Union[Dict[str, "ImageMask"], Dict[str, dict]]] = None,
) -> None:
super(Image, self).__init__()
# TODO: We should remove grouping, it's a terrible name and I don't
# think anyone uses it.
self._grouping = None
self._caption = None
self._width = None
self._height = None
self._image = None
self._classes = None
self._boxes = None
self._masks = None
# Allows the user to pass an Image object as the first parameter and have a perfect copy,
# only overriding additional metdata passed in. If this pattern is compelling, we can generalize.
if isinstance(data_or_path, Image):
self._initialize_from_wbimage(data_or_path)
elif isinstance(data_or_path, str):
self._initialize_from_path(data_or_path)
else:
self._initialize_from_data(data_or_path, mode)
self._set_initialization_meta(grouping, caption, classes, boxes, masks)
def _set_initialization_meta(
self,
grouping: Optional[int] = None,
caption: Optional[str] = None,
classes: Optional[Union["Classes", Sequence[dict]]] = None,
boxes: Optional[Union[Dict[str, "BoundingBoxes2D"], Dict[str, dict]]] = None,
masks: Optional[Union[Dict[str, "ImageMask"], Dict[str, dict]]] = None,
) -> None:
if grouping is not None:
self._grouping = grouping
if caption is not None:
self._caption = caption
total_classes = {}
if boxes:
if not isinstance(boxes, dict):
raise ValueError('Images "boxes" argument must be a dictionary')
boxes_final: Dict[str, BoundingBoxes2D] = {}
for key in boxes:
box_item = boxes[key]
if isinstance(box_item, BoundingBoxes2D):
boxes_final[key] = box_item
elif isinstance(box_item, dict):
# TODO: Consider injecting top-level classes if user-provided is empty
boxes_final[key] = BoundingBoxes2D(box_item, key)
total_classes.update(boxes_final[key]._class_labels)
self._boxes = boxes_final
if masks:
if not isinstance(masks, dict):
raise ValueError('Images "masks" argument must be a dictionary')
masks_final: Dict[str, ImageMask] = {}
for key in masks:
mask_item = masks[key]
if isinstance(mask_item, ImageMask):
masks_final[key] = mask_item
elif isinstance(mask_item, dict):
# TODO: Consider injecting top-level classes if user-provided is empty
masks_final[key] = ImageMask(mask_item, key)
if hasattr(masks_final[key], "_val"):
total_classes.update(masks_final[key]._val["class_labels"])
self._masks = masks_final
if classes is not None:
if isinstance(classes, Classes):
total_classes.update(
{val["id"]: val["name"] for val in classes._class_set}
)
else:
total_classes.update({val["id"]: val["name"] for val in classes})
if len(total_classes.keys()) > 0:
self._classes = Classes(
[
{"id": key, "name": total_classes[key]}
for key in total_classes.keys()
]
)
self._width, self._height = self.image.size # type: ignore
self._free_ram()
def _initialize_from_wbimage(self, wbimage: "Image") -> None:
self._grouping = wbimage._grouping
self._caption = wbimage._caption
self._width = wbimage._width
self._height = wbimage._height
self._image = wbimage._image
self._classes = wbimage._classes
self._path = wbimage._path
self._is_tmp = wbimage._is_tmp
self._extension = wbimage._extension
self._sha256 = wbimage._sha256
self._size = wbimage._size
self.format = wbimage.format
self._artifact_source = wbimage._artifact_source
self._artifact_target = wbimage._artifact_target
# We do not want to implicitly copy boxes or masks, just the image-related data.
# self._boxes = wbimage._boxes
# self._masks = wbimage._masks
def _initialize_from_path(self, path: str) -> None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._set_file(path, is_tmp=False)
self._image = pil_image.open(path)
self._image.load()
ext = os.path.splitext(path)[1][1:]
self.format = ext
def _initialize_from_data(self, data: "ImageDataType", mode: str = None,) -> None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
if util.is_matplotlib_typename(util.get_full_typename(data)):
buf = BytesIO()
util.ensure_matplotlib_figure(data).savefig(buf)
self._image = pil_image.open(buf)
elif isinstance(data, pil_image.Image):
self._image = data
elif util.is_pytorch_tensor_typename(util.get_full_typename(data)):
vis_util = util.get_module(
"torchvision.utils", "torchvision is required to render images"
)
if hasattr(data, "requires_grad") and data.requires_grad:
data = data.detach()
data = vis_util.make_grid(data, normalize=True)
self._image = pil_image.fromarray(
data.mul(255).clamp(0, 255).byte().permute(1, 2, 0).cpu().numpy()
)
else:
if hasattr(data, "numpy"): # TF data eager tensors
data = data.numpy()
if data.ndim > 2:
data = data.squeeze() # get rid of trivial dimensions as a convenience
self._image = pil_image.fromarray(
self.to_uint8(data), mode=mode or self.guess_mode(data)
)
tmp_path = os.path.join(MEDIA_TMP.name, str(util.generate_id()) + ".png")
self.format = "png"
self._image.save(tmp_path, transparency=None)
self._set_file(tmp_path, is_tmp=True)
@classmethod
def from_json(
cls: Type["Image"], json_obj: dict, source_artifact: "PublicArtifact"
) -> "Image":
classes = None
if json_obj.get("classes") is not None:
classes = source_artifact.get(json_obj["classes"]["path"])
masks = json_obj.get("masks")
_masks: Optional[Dict[str, ImageMask]] = None
if masks:
_masks = {}
for key in masks:
_masks[key] = ImageMask.from_json(masks[key], source_artifact)
_masks[key]._set_artifact_source(source_artifact)
_masks[key]._key = key
boxes = json_obj.get("boxes")
_boxes: Optional[Dict[str, BoundingBoxes2D]] = None
if boxes:
_boxes = {}
for key in boxes:
_boxes[key] = BoundingBoxes2D.from_json(boxes[key], source_artifact)
_boxes[key]._key = key
return cls(
source_artifact.get_path(json_obj["path"]).download(),
caption=json_obj.get("caption"),
grouping=json_obj.get("grouping"),
classes=classes,
boxes=_boxes,
masks=_masks,
)
@classmethod
def get_media_subdir(cls: Type["Image"]) -> str:
return os.path.join("media", "images")
def bind_to_run(
self,
run: "LocalRun",
key: Union[int, str],
step: Union[int, str],
id_: Optional[Union[int, str]] = None,
ignore_copy_err: Optional[bool] = None,
) -> None:
super().bind_to_run(run, key, step, id_, ignore_copy_err=ignore_copy_err)
if self._boxes is not None:
for i, k in enumerate(self._boxes):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._boxes[k].bind_to_run(
run, key, step, id_, ignore_copy_err=ignore_copy_err
)
if self._masks is not None:
for i, k in enumerate(self._masks):
id_ = "{}{}".format(id_, i) if id_ is not None else None
self._masks[k].bind_to_run(
run, key, step, id_, ignore_copy_err=ignore_copy_err
)
def to_json(self, run_or_artifact: Union["LocalRun", "LocalArtifact"]) -> dict:
json_dict = super(Image, self).to_json(run_or_artifact)
json_dict["_type"] = Image._log_type
json_dict["format"] = self.format
if self._width is not None:
json_dict["width"] = self._width
if self._height is not None:
json_dict["height"] = self._height
if self._grouping:
json_dict["grouping"] = self._grouping
if self._caption:
json_dict["caption"] = self._caption
if isinstance(run_or_artifact, wandb.wandb_sdk.wandb_artifacts.Artifact):
artifact = run_or_artifact
if (
self._masks is not None or self._boxes is not None
) and self._classes is None:
raise ValueError(
"classes must be passed to wandb.Image which have masks or bounding boxes when adding to artifacts"
)
if self._classes is not None:
class_id = hashlib.md5(
str(self._classes._class_set).encode("utf-8")
).hexdigest()
class_name = os.path.join("media", "classes", class_id + "_cls",)
classes_entry = artifact.add(self._classes, class_name)
json_dict["classes"] = {
"type": "classes-file",
"path": classes_entry.path,
"digest": classes_entry.digest,
}
elif not isinstance(run_or_artifact, wandb.wandb_sdk.wandb_run.Run):
raise ValueError("to_json accepts wandb_run.Run or wandb_artifact.Artifact")
if self._boxes:
json_dict["boxes"] = {
k: box.to_json(run_or_artifact) for (k, box) in self._boxes.items()
}
if self._masks:
json_dict["masks"] = {
k: mask.to_json(run_or_artifact) for (k, mask) in self._masks.items()
}
return json_dict
def guess_mode(self, data: "np.ndarray") -> str:
"""
Guess what type of image the np.array is representing
"""
# TODO: do we want to support dimensions being at the beginning of the array?
if data.ndim == 2:
return "L"
elif data.shape[-1] == 3:
return "RGB"
elif data.shape[-1] == 4:
return "RGBA"
else:
raise ValueError(
"Un-supported shape for image conversion %s" % list(data.shape)
)
@classmethod
def to_uint8(cls, data: "np.ndarray") -> "np.ndarray":
"""
Converts floating point image on the range [0,1] and integer images
on the range [0,255] to uint8, clipping if necessary.
"""
np = util.get_module(
"numpy",
required="wandb.Image requires numpy if not supplying PIL Images: pip install numpy",
)
# I think it's better to check the image range vs the data type, since many
# image libraries will return floats between 0 and 255
# some images have range -1...1 or 0-1
dmin = np.min(data)
if dmin < 0:
data = (data - np.min(data)) / np.ptp(data)
if np.max(data) <= 1.0:
data = (data * 255).astype(np.int32)
# assert issubclass(data.dtype.type, np.integer), 'Illegal image format.'
return data.clip(0, 255).astype(np.uint8)
@classmethod
def seq_to_json(
cls: Type["Image"],
seq: Sequence["BatchableMedia"],
run: "LocalRun",
key: str,
step: Union[int, str],
) -> dict:
"""
Combines a list of images into a meta dictionary object describing the child images.
"""
if TYPE_CHECKING:
seq = cast(Sequence["Image"], seq)
jsons = [obj.to_json(run) for obj in seq]
media_dir = cls.get_media_subdir()
for obj in jsons:
expected = util.to_forward_slash_path(media_dir)
if not obj["path"].startswith(expected):
raise ValueError(
"Files in an array of Image's must be in the {} directory, not {}".format(
cls.get_media_subdir(), obj["path"]
)
)
num_images_to_log = len(seq)
width, height = seq[0].image.size # type: ignore
format = jsons[0]["format"]
def size_equals_image(image: "Image") -> bool:
img_width, img_height = image.image.size # type: ignore
return img_width == width and img_height == height # type: ignore
sizes_match = all(size_equals_image(img) for img in seq)
if not sizes_match:
logging.warning(
"Images sizes do not match. This will causes images to be display incorrectly in the UI."
)
meta = {
"_type": "images/separated",
"width": width,
"height": height,
"format": format,
"count": num_images_to_log,
}
if _server_accepts_image_filenames():
meta["filenames"] = [obj["path"] for obj in jsons]
else:
wandb.termwarn(
"Unable to log image array filenames. In some cases, this can prevent images from being"
"viewed in the UI. Please upgrade your wandb server",
repeat=False,
)
captions = Image.all_captions(seq)
if captions:
meta["captions"] = captions
all_masks = Image.all_masks(seq, run, key, step)
if all_masks:
meta["all_masks"] = all_masks
all_boxes = Image.all_boxes(seq, run, key, step)
if all_boxes:
meta["all_boxes"] = all_boxes
return meta
@classmethod
def all_masks(
cls: Type["Image"],
images: Sequence["Image"],
run: "LocalRun",
run_key: str,
step: Union[int, str],
) -> Union[List[Optional[dict]], bool]:
all_mask_groups: List[Optional[dict]] = []
for image in images:
if image._masks:
mask_group = {}
for k in image._masks:
mask = image._masks[k]
mask_group[k] = mask.to_json(run)
all_mask_groups.append(mask_group)
else:
all_mask_groups.append(None)
if all_mask_groups and not all(x is None for x in all_mask_groups):
return all_mask_groups
else:
return False
@classmethod
def all_boxes(
cls: Type["Image"],
images: Sequence["Image"],
run: "LocalRun",
run_key: str,
step: Union[int, str],
) -> Union[List[Optional[dict]], bool]:
all_box_groups: List[Optional[dict]] = []
for image in images:
if image._boxes:
box_group = {}
for k in image._boxes:
box = image._boxes[k]
box_group[k] = box.to_json(run)
all_box_groups.append(box_group)
else:
all_box_groups.append(None)
if all_box_groups and not all(x is None for x in all_box_groups):
return all_box_groups
else:
return False
@classmethod
def all_captions(
cls: Type["Image"], images: Sequence["Media"]
) -> Union[bool, Sequence[Optional[str]]]:
return cls.captions(images)
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Image):
return False
else:
self_image = self.image
other_image = other.image
if self_image is not None:
self_image = list(self_image.getdata())
if other_image is not None:
other_image = list(other_image.getdata())
return (
self._grouping == other._grouping
and self._caption == other._caption
and self._width == other._width
and self._height == other._height
and self_image == other_image
and self._classes == other._classes
)
def to_data_array(self) -> List[Any]:
res = []
if self.image is not None:
data = list(self.image.getdata())
for i in range(self.image.height):
res.append(data[i * self.image.width : (i + 1) * self.image.width])
self._free_ram()
return res
def _free_ram(self) -> None:
if self._path is not None:
self._image = None
@property
def image(self) -> Optional["PIL.Image"]:
if self._image is None:
if self._path is not None:
pil_image = util.get_module(
"PIL.Image",
required='wandb.Image needs the PIL package. To get it, run "pip install pillow".',
)
self._image = pil_image.open(self._path)
self._image.load()
return self._image
| 36.688136 | 119 | 0.572438 | 20,254 | 0.935693 | 0 | 0 | 6,687 | 0.308925 | 0 | 0 | 5,037 | 0.232699 |
6a961a7708b268c3d81ea73ab8b93515bd578d6c | 669 | py | Python | src/ACC_Backend_Utils.py | skostic14/isda-racing-backend | 41b5f9760dc17a29aa8ab5e4cc1894a27496a72c | [
"Apache-2.0"
] | 1 | 2021-07-29T05:29:06.000Z | 2021-07-29T05:29:06.000Z | src/ACC_Backend_Utils.py | skostic14/isda-racing-backend | 41b5f9760dc17a29aa8ab5e4cc1894a27496a72c | [
"Apache-2.0"
] | null | null | null | src/ACC_Backend_Utils.py | skostic14/isda-racing-backend | 41b5f9760dc17a29aa8ab5e4cc1894a27496a72c | [
"Apache-2.0"
] | null | null | null | import datetime
# Gets time from milliseconds
# Returns string formatted as HH:MM:SS:mmm, MM:SS:mmm or S:mmm, depending on the time.
def get_time_from_milliseconds(milli):
milliseconds = milli % 1000
seconds= (milli//1000)%60
minutes= (milli//(1000*60))%60
hours= (milli//(1000*60*60))%24
if hours == 0:
if minutes == 0:
return '%d.%03d' % (seconds, milliseconds)
return '%02d:%02d.%03d' % (minutes, seconds, milliseconds)
return '%02d:%02d:%02d.%03d' % (hours, minutes, seconds, milliseconds)
# Returns a string formatted as YYYY-MM-DD
def get_date_today():
return datetime.date.today().strftime("%Y-%m-%d") | 35.210526 | 87 | 0.651719 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 214 | 0.31988 |
6a961b6b72524b941aa7777c8c1e4c9ea87f76f0 | 2,721 | py | Python | examples/advanced/pidigits.py | ovolve/sympy | 0a15782f20505673466b940454b33b8014a25c13 | [
"BSD-3-Clause"
] | 3 | 2015-01-17T23:15:04.000Z | 2015-05-26T14:11:44.000Z | examples/advanced/pidigits.py | ovolve/sympy | 0a15782f20505673466b940454b33b8014a25c13 | [
"BSD-3-Clause"
] | 7 | 2015-03-23T23:33:02.000Z | 2019-02-09T00:19:41.000Z | examples/advanced/pidigits.py | ovolve/sympy | 0a15782f20505673466b940454b33b8014a25c13 | [
"BSD-3-Clause"
] | 1 | 2019-10-18T12:39:41.000Z | 2019-10-18T12:39:41.000Z | #!/usr/bin/env python
"""Pi digits example
Example shows arbitrary precision using mpmath with the
computation of the digits of pi.
"""
from mpmath import libmp, pi
from mpmath import functions as mpf_funs
import math
from time import clock
import sys
def display_fraction(digits, skip=0, colwidth=10, columns=5):
"""Pretty printer for first n digits of a fraction"""
perline = colwidth * columns
printed = 0
for linecount in range((len(digits) - skip) // (colwidth * columns)):
line = digits[skip + linecount*perline:skip + (linecount + 1)*perline]
for i in range(columns):
print(line[i*colwidth: (i + 1)*colwidth],)
print(":", (linecount + 1)*perline)
if (linecount + 1) % 10 == 0:
print
printed += colwidth*columns
rem = (len(digits) - skip) % (colwidth * columns)
if rem:
buf = digits[-rem:]
s = ""
for i in range(columns):
s += buf[:colwidth].ljust(colwidth + 1, " ")
buf = buf[colwidth:]
print(s + ":", printed + colwidth*columns)
def calculateit(func, base, n, tofile):
"""Writes first n base-digits of a mpmath function to file"""
prec = 100
intpart = libmp.numeral(3, base)
if intpart == 0:
skip = 0
else:
skip = len(intpart)
print("Step 1 of 2: calculating binary value...")
prec = int(n*math.log(base, 2)) + 10
t = clock()
a = func(prec)
step1_time = clock() - t
print("Step 2 of 2: converting to specified base...")
t = clock()
d = libmp.bin_to_radix(a.man, -a.exp, base, n)
d = libmp.numeral(d, base, n)
step2_time = clock() - t
print("\nWriting output...\n")
if tofile:
out_ = sys.stdout
sys.stdout = tofile
print("%i base-%i digits of pi:\n" % (n, base))
print(intpart, ".\n")
display_fraction(d, skip, colwidth=10, columns=5)
if tofile:
sys.stdout = out_
print("\nFinished in %f seconds (%f calc, %f convert)" % \
((step1_time + step2_time), step1_time, step2_time))
def interactive():
"""Simple function to interact with user"""
print("Compute digits of pi with SymPy\n")
base = input("Which base? (2-36, 10 for decimal) \n> ")
digits = input("How many digits? (enter a big number, say, 10000)\n> ")
tofile = raw_input("Output to file? (enter a filename, or just press enter\nto print directly to the screen) \n> ")
if tofile:
tofile = open(tofile, "w")
calculateit(pi, base, digits, tofile)
def main():
"""A non-interactive runner"""
base = 16
digits = 500
tofile = None
calculateit(pi, base, digits, tofile)
if __name__ == "__main__":
interactive()
| 30.233333 | 119 | 0.602352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 764 | 0.280779 |
6a975a412e5d4fce44f6c3c23326742e66d78cbe | 1,272 | py | Python | authserver/mailauth/migrations/0011_mnserviceuser.py | yopiti/authserver | 0a1f7f5a83d03963d1ecfb5199be8e05d3068dfd | [
"MIT"
] | 8 | 2017-07-04T10:07:32.000Z | 2022-01-02T10:31:43.000Z | authserver/mailauth/migrations/0011_mnserviceuser.py | yopiti/authserver | 0a1f7f5a83d03963d1ecfb5199be8e05d3068dfd | [
"MIT"
] | 14 | 2020-02-11T21:42:38.000Z | 2022-03-28T16:00:55.000Z | authserver/mailauth/migrations/0011_mnserviceuser.py | yopiti/authserver | 0a1f7f5a83d03963d1ecfb5199be8e05d3068dfd | [
"MIT"
] | 1 | 2020-03-01T10:39:28.000Z | 2020-03-01T10:39:28.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-03-13 00:16
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import mailauth.models
import uuid
class Migration(migrations.Migration):
dependencies = [
('mailauth', '0010_domain_redirect_to'),
]
operations = [
migrations.CreateModel(
name='MNServiceUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(default=uuid.uuid4, max_length=64, verbose_name='Username')),
('password', mailauth.models.PretendHasherPasswordField(max_length=128, verbose_name='Password')),
('description', models.CharField(blank=True, default='', max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'Service User',
'verbose_name_plural': 'Service Users',
},
bases=(mailauth.models.PasswordMaskMixin, models.Model),
),
]
| 36.342857 | 118 | 0.633648 | 1,012 | 0.795597 | 0 | 0 | 0 | 0 | 0 | 0 | 255 | 0.200472 |
6a97a004a7c418b0d32aaf5764a1c6b24a50f26a | 10,580 | py | Python | tempest/hacking/checks.py | rishabh20111990/tempest | df15531cd4231000b0da016f5cd8641523ce984e | [
"Apache-2.0"
] | 2 | 2015-08-13T00:07:49.000Z | 2020-08-07T06:38:44.000Z | tempest/hacking/checks.py | rishabh20111990/tempest | df15531cd4231000b0da016f5cd8641523ce984e | [
"Apache-2.0"
] | null | null | null | tempest/hacking/checks.py | rishabh20111990/tempest | df15531cd4231000b0da016f5cd8641523ce984e | [
"Apache-2.0"
] | 3 | 2016-08-30T06:53:54.000Z | 2021-03-22T16:54:39.000Z | # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
from hacking import core
import pycodestyle
PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
'ironic', 'heat', 'sahara']
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
SETUP_TEARDOWN_CLASS_DEFINITION = re.compile(r'^\s+def (setUp|tearDown)Class')
SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
RAND_NAME_HYPHEN_RE = re.compile(r".*rand_name\(.+[\-\_][\"\']\)")
mutable_default_args = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])")
TESTTOOLS_SKIP_DECORATOR = re.compile(r'\s*@testtools\.skip\((.*)\)')
METHOD = re.compile(r"^ def .+")
METHOD_GET_RESOURCE = re.compile(r"^\s*def (list|show)\_.+")
METHOD_DELETE_RESOURCE = re.compile(r"^\s*def delete_.+")
CLASS = re.compile(r"^class .+")
EX_ATTRIBUTE = re.compile(r'(\s+|\()(e|ex|exc|exception).message(\s+|\))')
NEGATIVE_TEST_DECORATOR = re.compile(
r'\s*@decorators\.attr\(type=.*negative.*\)')
_HAVE_NEGATIVE_DECORATOR = False
@core.flake8ext
def import_no_clients_in_api_and_scenario_tests(physical_line, filename):
"""Check for client imports from tempest/api & tempest/scenario tests
T102: Cannot import OpenStack python clients
"""
if "tempest/api" in filename or "tempest/scenario" in filename:
res = PYTHON_CLIENT_RE.match(physical_line)
if res:
return (physical_line.find(res.group(1)),
("T102: python clients import not allowed"
" in tempest/api/* or tempest/scenario/* tests"))
@core.flake8ext
def scenario_tests_need_service_tags(physical_line, filename,
previous_logical):
"""Check that scenario tests have service tags
T104: Scenario tests require a services decorator
"""
if 'tempest/scenario/' in filename and '/test_' in filename:
if TEST_DEFINITION.match(physical_line):
if not SCENARIO_DECORATOR.match(previous_logical):
return (physical_line.find('def'),
"T104: Scenario tests require a service decorator")
@core.flake8ext
def no_setup_teardown_class_for_tests(physical_line, filename):
if pycodestyle.noqa(physical_line):
return
if 'tempest/test.py' in filename or 'tempest/lib/' in filename:
return
if SETUP_TEARDOWN_CLASS_DEFINITION.match(physical_line):
return (physical_line.find('def'),
"T105: (setUp|tearDown)Class can not be used in tests")
@core.flake8ext
def service_tags_not_in_module_path(physical_line, filename):
"""Check that a service tag isn't in the module path
A service tag should only be added if the service name isn't already in
the module path.
T107
"""
# NOTE(mtreinish) Scenario tests always need service tags, but subdirs are
# created for services like heat which would cause false negatives for
# those tests, so just exclude the scenario tests.
if 'tempest/scenario' not in filename:
matches = SCENARIO_DECORATOR.match(physical_line)
if matches:
services = matches.group(1).split(',')
for service in services:
service_name = service.strip().strip("'")
modulepath = os.path.split(filename)[0]
if service_name in modulepath:
return (physical_line.find(service_name),
"T107: service tag should not be in path")
@core.flake8ext
def no_hyphen_at_end_of_rand_name(logical_line, filename):
"""Check no hyphen at the end of rand_name() argument
T108
"""
msg = "T108: hyphen should not be specified at the end of rand_name()"
if RAND_NAME_HYPHEN_RE.match(logical_line):
return 0, msg
@core.flake8ext
def no_mutable_default_args(logical_line):
"""Check that mutable object isn't used as default argument
N322: Method's default argument shouldn't be mutable
"""
msg = "N322: Method's default argument shouldn't be mutable!"
if mutable_default_args.match(logical_line):
yield (0, msg)
@core.flake8ext
def no_testtools_skip_decorator(logical_line):
"""Check that methods do not have the testtools.skip decorator
T109
"""
if TESTTOOLS_SKIP_DECORATOR.match(logical_line):
yield (0, "T109: Cannot use testtools.skip decorator; instead use "
"decorators.skip_because from tempest.lib")
def _common_service_clients_check(logical_line, physical_line, filename,
ignored_list_file=None):
if not re.match('tempest/(lib/)?services/.*', filename):
return False
if ignored_list_file is not None:
ignored_list = []
with open('tempest/hacking/' + ignored_list_file) as f:
for line in f:
ignored_list.append(line.strip())
if filename in ignored_list:
return False
if not METHOD.match(physical_line):
return False
if pycodestyle.noqa(physical_line):
return False
return True
@core.flake8ext
def get_resources_on_service_clients(physical_line, logical_line, filename,
line_number, lines):
"""Check that service client names of GET should be consistent
T110
"""
if not _common_service_clients_check(logical_line, physical_line,
filename, 'ignored_list_T110.txt'):
return
for line in lines[line_number:]:
if METHOD.match(line) or CLASS.match(line):
# the end of a method
return
if 'self.get(' not in line and ('self.show_resource(' not in line and
'self.list_resources(' not in line):
continue
if METHOD_GET_RESOURCE.match(logical_line):
return
msg = ("T110: [GET /resources] methods should be list_<resource name>s"
" or show_<resource name>")
yield (0, msg)
@core.flake8ext
def delete_resources_on_service_clients(physical_line, logical_line, filename,
line_number, lines):
"""Check that service client names of DELETE should be consistent
T111
"""
if not _common_service_clients_check(logical_line, physical_line,
filename, 'ignored_list_T111.txt'):
return
for line in lines[line_number:]:
if METHOD.match(line) or CLASS.match(line):
# the end of a method
return
if 'self.delete(' not in line and 'self.delete_resource(' not in line:
continue
if METHOD_DELETE_RESOURCE.match(logical_line):
return
msg = ("T111: [DELETE /resources/<id>] methods should be "
"delete_<resource name>")
yield (0, msg)
@core.flake8ext
def dont_import_local_tempest_into_lib(logical_line, filename):
"""Check that tempest.lib should not import local tempest code
T112
"""
if 'tempest/lib/' not in filename:
return
if not ('from tempest' in logical_line or
'import tempest' in logical_line):
return
if ('from tempest.lib' in logical_line or
'import tempest.lib' in logical_line):
return
msg = ("T112: tempest.lib should not import local tempest code to avoid "
"circular dependency")
yield (0, msg)
@core.flake8ext
def use_rand_uuid_instead_of_uuid4(logical_line, filename):
"""Check that tests use data_utils.rand_uuid() instead of uuid.uuid4()
T113
"""
if 'tempest/lib/' in filename:
return
if 'uuid.uuid4()' not in logical_line:
return
msg = ("T113: Tests should use data_utils.rand_uuid()/rand_uuid_hex() "
"instead of uuid.uuid4()/uuid.uuid4().hex")
yield (0, msg)
@core.flake8ext
def dont_use_config_in_tempest_lib(logical_line, filename):
"""Check that tempest.lib doesn't use tempest config
T114
"""
if 'tempest/lib/' not in filename:
return
if ('tempest.config' in logical_line or
'from tempest import config' in logical_line or
'oslo_config' in logical_line):
msg = ('T114: tempest.lib can not have any dependency on tempest '
'config.')
yield(0, msg)
@core.flake8ext
def dont_put_admin_tests_on_nonadmin_path(logical_line,
filename):
"""Check admin tests should exist under admin path
T115
"""
if 'tempest/api/' not in filename:
return
if not re.match(r'class .*Test.*\(.*Admin.*\):', logical_line):
return
if not re.match(r'.\/tempest\/api\/.*\/admin\/.*', filename):
msg = 'T115: All admin tests should exist under admin path.'
yield(0, msg)
@core.flake8ext
def unsupported_exception_attribute_PY3(logical_line):
"""Check Unsupported 'message' exception attribute in PY3
T116
"""
result = EX_ATTRIBUTE.search(logical_line)
msg = ("[T116] Unsupported 'message' Exception attribute in PY3")
if result:
yield(0, msg)
@core.flake8ext
def negative_test_attribute_always_applied_to_negative_tests(physical_line,
filename):
"""Check ``@decorators.attr(type=['negative'])`` applied to negative tests.
T117
"""
global _HAVE_NEGATIVE_DECORATOR
if re.match(r'.\/tempest\/api\/.*_negative.*', filename):
if NEGATIVE_TEST_DECORATOR.match(physical_line):
_HAVE_NEGATIVE_DECORATOR = True
return
if TEST_DEFINITION.match(physical_line):
if not _HAVE_NEGATIVE_DECORATOR:
return (
0, "T117: Must apply `@decorators.attr(type=['negative'])`"
" to all negative API tests"
)
_HAVE_NEGATIVE_DECORATOR = False
| 31.963746 | 79 | 0.636578 | 0 | 0 | 4,583 | 0.433176 | 8,274 | 0.782042 | 0 | 0 | 4,222 | 0.399055 |
6a98547230e4cc83fa248137ca0fde09ebb67dcf | 1,018 | py | Python | data/train/python/6a98547230e4cc83fa248137ca0fde09ebb67dcfController.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/train/python/6a98547230e4cc83fa248137ca0fde09ebb67dcfController.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/train/python/6a98547230e4cc83fa248137ca0fde09ebb67dcfController.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z | import SimpleXMLRPCServer
import sys
import logging
from K8055Controller import K8055Controller
logging.basicConfig()
controller_log = logging.getLogger("Controller")
class Controller:
def __init__(self):
self.k8055 = K8055Controller()
controller_log.debug("initialized")
def reset(self):
self.k8055.reset()
controller_log.debug("reset")
return 0
def turn_on(self, i):
self.k8055.turn_on(i)
controller_log.debug('turned on %i' % (i))
return 0
def turn_off(self, i):
self.k8055.turn_off(i)
controller_log.debug('turned off %i' % (i))
return 0
def set_analog(self, i, level):
if (i == 1):
self.k8055.set_analog1(level)
else:
self.k8055.set_analog2(level)
return 0
controller = Controller()
server = SimpleXMLRPCServer.SimpleXMLRPCServer(("d6349.mysql.zone.ee", 7000))
server.register_instance(controller)
server.serve_forever() | 24.829268 | 77 | 0.634578 | 674 | 0.662083 | 0 | 0 | 0 | 0 | 0 | 0 | 82 | 0.08055 |
6a98f5590ec68008681144a3cad2ee8a6d9f0359 | 55 | py | Python | model/__init__.py | sun1638650145/CRNN | 485157e5803b9be861a63ebb04f04fccb16ef5f1 | [
"Apache-2.0"
] | 11 | 2020-09-18T02:35:48.000Z | 2022-02-26T21:31:55.000Z | model/__init__.py | sun1638650145/CRNN | 485157e5803b9be861a63ebb04f04fccb16ef5f1 | [
"Apache-2.0"
] | null | null | null | model/__init__.py | sun1638650145/CRNN | 485157e5803b9be861a63ebb04f04fccb16ef5f1 | [
"Apache-2.0"
] | null | null | null | from .crnn import CRNN
from .crnn import CRNN_Attention | 27.5 | 32 | 0.836364 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6a9907c6e19624e9a00da0b3cff99ba87e746680 | 3,206 | py | Python | models2.py | Lydia-Tan/MindLife | 644f1a3834f337d51c99650c3924df99c5200d06 | [
"MIT"
] | 1 | 2020-01-20T19:49:07.000Z | 2020-01-20T19:49:07.000Z | models2.py | lindaweng/Mindlife | 30be070b39728fb3fe149d4c95e5bce280a3b6a7 | [
"MIT"
] | null | null | null | models2.py | lindaweng/Mindlife | 30be070b39728fb3fe149d4c95e5bce280a3b6a7 | [
"MIT"
] | null | null | null | import nltk
import re
import sys
from sys import argv
from nltk.sentiment.vader import SentimentIntensityAnalyzer
def ajay(ans):
ajay = SentimentIntensityAnalyzer()
completeScore = 0
questionWeights = [0.05, 0.20, 0.05, 0.05, 0.05, 0.20, 0.05, 0.05, 0.20, 0.10]
print ans
ansList = ans.split("$")
for j in range(10):
print ansList[j]
for i in range(10):
results = []
score = 0
count = 0
# print (count)
for paragraph in ansList:
for line in paragraph:
#Split Paragraph on basis of '.' or ? or !.
for l in re.split(r"\.|\?|\!",paragraph):
# print(l)
ss = ajay.polarity_scores(l)
results.append(ss);
# print(ss['compound'])
score += ss['compound']
count += 1
completeScore += (score/count)*questionWeights[i]
#print(completeScore)
if (completeScore >= 0.1):
return "False Alarm! You don't have Depression."
elif (completeScore >= -0.1):
return ("Seasonal affective disorder (SAD). This type of depression " +
"emerges as days get shorter in the fall and winter. The mood "
+ "change may result from alterations in the body's natural daily "
+ "rhythms, in the eyes' sensitivity to light, or in how chemical "
+ "messengers like serotonin and melatonin function. The leading "
+ "treatment is light therapy, which involves daily sessions sitting "
+ "close to an especially intense light source. The usual treatments "
+ "for depression, such as psychotherapy and medication, may also be "
+ "effective.");
elif (completeScore >= -0.4):
return ("Persistent depressive disorder. Formerly called dysthymia, this "
+ "type of depression refers to low mood that has lasted for at least "
+ "two years but may not reach the intensity of major depression. Many "
+ "people with this type of depression type are able to function day to "
+ "but feel low or joyless much of the time. Some depressive symptoms, "
+ "such as appetite and sleep changes, low energy, low self-esteem, or "
+ "hopelessness, are usually part of the picture.")
else:
return ("The classic depression type, major depression is a state where a dark "
+ "mood is all-consuming and one loses interest in activities, even ones "
+ "that are usually pleasurable. Symptoms of this type of depression "
+ "include trouble sleeping, changes in appetite or weight, loss of energy, "
+ "and feeling worthless. Thoughts of death or suicide may occur. It is "
+ "usually treated with psychotherapy and medication. For some people with "
+ "severe depression that isn't alleviated with psychotherapy or antidepressant "
+ "medications, electroconvulsive therapy may be effective.") | 51.709677 | 98 | 0.585153 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,752 | 0.546475 |
6a9a4141ccd8a77a2a296371f9b8eb6510494db4 | 1,487 | py | Python | tokendito/tool.py | pcmxgti/tokendito | c1672917b1b95e463c5bdf8e9c3c039189da8e42 | [
"Apache-2.0"
] | 40 | 2019-07-31T03:21:03.000Z | 2022-03-29T23:57:19.000Z | tokendito/tool.py | pcmxgti/tokendito | c1672917b1b95e463c5bdf8e9c3c039189da8e42 | [
"Apache-2.0"
] | 27 | 2019-08-07T06:40:15.000Z | 2022-03-21T18:46:49.000Z | tokendito/tool.py | pcmxgti/tokendito | c1672917b1b95e463c5bdf8e9c3c039189da8e42 | [
"Apache-2.0"
] | 16 | 2019-07-31T14:22:04.000Z | 2022-02-16T12:55:27.000Z | # vim: set filetype=python ts=4 sw=4
# -*- coding: utf-8 -*-
"""This module retrieves AWS credentials after authenticating with Okta."""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from future import standard_library
from tokendito import aws_helpers
from tokendito import helpers
from tokendito import okta_helpers
from tokendito import settings
standard_library.install_aliases()
def cli(args):
"""Tokendito retrieves AWS credentials after authenticating with Okta."""
# Set some required initial values
args = helpers.setup(args)
logging.debug("tokendito retrieves AWS credentials after authenticating with Okta.")
# Collect and organize user specific information
helpers.process_options(args)
# Authenticate okta and AWS also use assumerole to assign the role
logging.debug("Authenticate user with Okta and AWS.")
secret_session_token = okta_helpers.authenticate_user(
settings.okta_org, settings.okta_username, settings.okta_password
)
saml_response_string, saml_xml = aws_helpers.authenticate_to_roles(
secret_session_token, settings.okta_aws_app_url
)
assume_role_response, role_name = aws_helpers.select_assumeable_role(
saml_response_string, saml_xml
)
aws_helpers.ensure_keys_work(assume_role_response)
helpers.set_local_credentials(
assume_role_response, role_name, settings.aws_region, settings.aws_output
)
| 31.638298 | 88 | 0.774042 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 462 | 0.310693 |
6a9b47904f23c1124cf4fbc27654a8fe5f3b7493 | 42 | py | Python | resources/__init__.py | Boryslavq/UHMI_Chalenge | 4b7df902c0a0901c727a6fb26347dabca1067494 | [
"MIT"
] | null | null | null | resources/__init__.py | Boryslavq/UHMI_Chalenge | 4b7df902c0a0901c727a6fb26347dabca1067494 | [
"MIT"
] | null | null | null | resources/__init__.py | Boryslavq/UHMI_Chalenge | 4b7df902c0a0901c727a6fb26347dabca1067494 | [
"MIT"
] | null | null | null | from . import rest
from . import helpers
| 14 | 22 | 0.738095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
6a9c07074d315021100d6322a18c6bc3087be1db | 15,833 | py | Python | ir_datasets/formats/trec.py | cakiki/ir_datasets | 7f9f8e9ff62e49d40383220ecc2daa250695d267 | [
"Apache-2.0"
] | null | null | null | ir_datasets/formats/trec.py | cakiki/ir_datasets | 7f9f8e9ff62e49d40383220ecc2daa250695d267 | [
"Apache-2.0"
] | null | null | null | ir_datasets/formats/trec.py | cakiki/ir_datasets | 7f9f8e9ff62e49d40383220ecc2daa250695d267 | [
"Apache-2.0"
] | null | null | null | import io
import codecs
import tarfile
import re
import gzip
import xml.etree.ElementTree as ET
from fnmatch import fnmatch
from pathlib import Path
from typing import NamedTuple
import ir_datasets
from ir_datasets.indices import PickleLz4FullStore
from .base import GenericDoc, GenericQuery, GenericScoredDoc, BaseDocs, BaseQueries, BaseScoredDocs, BaseQrels
class TrecDoc(NamedTuple):
doc_id: str
text: str
marked_up_doc: str
class TitleUrlTextDoc(NamedTuple):
doc_id: str
title: str
url: str
text: str
class TrecQuery(NamedTuple):
query_id: str
title: str
description: str
narrative: str
class TrecSubtopic(NamedTuple):
number: str
text: str
type: str
class TrecQrel(NamedTuple):
query_id: str
doc_id: str
relevance: int
iteration: str
class TrecPrel(NamedTuple):
query_id: str
doc_id: str
relevance: int
method: int
iprob: float
# Default content tags from Anserini's TrecCollection
CONTENT_TAGS = 'TEXT HEADLINE TITLE HL HEAD TTL DD DATE LP LEADPARA'.split()
class TrecDocs(BaseDocs):
def __init__(self, docs_dlc, encoding=None, path_globs=None, content_tags=CONTENT_TAGS, parser='BS4', namespace=None, lang=None, expected_file_count=None, docstore_size_hint=None, count_hint=None):
self._docs_dlc = docs_dlc
self._encoding = encoding
self._path_globs = path_globs
self._content_tags = content_tags
self._parser = {
'BS4': self._parser_bs,
'text': self._parser_text,
'tut': self._parser_tut,
}[parser]
self._doc = {
'BS4': TrecDoc,
'text': GenericDoc,
'tut': TitleUrlTextDoc,
}[parser]
self._docs_namespace = namespace
self._docs_lang = lang
self._expected_file_count = expected_file_count
self._docstore_size_hint = docstore_size_hint
self._count_hint = count_hint
if expected_file_count is not None:
assert self._path_globs is not None, "expected_file_count only supported with path_globs"
def docs_path(self, force=True):
return self._docs_dlc.path(force)
@ir_datasets.util.use_docstore
def docs_iter(self):
if Path(self._docs_dlc.path()).is_dir():
if self._path_globs:
file_count = 0
for glob in sorted(self._path_globs):
for path in sorted(Path(self._docs_dlc.path()).glob(glob)):
file_count += 1
yield from self._docs_iter(path)
if self._expected_file_count is not None:
if file_count != self._expected_file_count:
raise RuntimeError(f'found {file_count} files of the expected {self._expected_file_count} matching the following: {self._path_globs} under {self._docs_dlc.path()}. Make sure that directories are linked such that these globs match the correct number of files.')
else:
yield from self._docs_iter(self._docs_dlc.path())
else:
if self._path_globs:
file_count = 0
# tarfile, find globs, open in streaming mode (r|)
with self._docs_dlc.stream() as stream:
with tarfile.open(fileobj=stream, mode='r|gz') as tarf:
for block in tarf:
if any(fnmatch(block.name, g) for g in self._path_globs):
file = tarf.extractfile(block)
if block.name.endswith('.gz'):
file = gzip.GzipFile(fileobj=file)
yield from self._parser(file)
file_count += 1
if self._expected_file_count is not None:
if file_count != self._expected_file_count:
raise RuntimeError(f'found {file_count} files of the expected {self._expected_file_count} matching the following: {self._path_globs} under {self._docs_dlc.path()}. Make sure that directories are linked such that these globs match the correct number of files.')
else:
with self._docs_dlc.stream() as f:
yield from self._parser(f)
def _docs_iter(self, path):
if Path(path).is_file():
if str(path).endswith('.gz'):
with gzip.open(path, 'rb') as f:
yield from self._parser(f)
else:
with path.open('rb') as f:
yield from self._parser(f)
elif Path(path).is_dir():
for child in path.iterdir():
yield from self._docs_iter(child)
def _parser_bs(self, stream):
BeautifulSoup = ir_datasets.lazy_libs.bs4().BeautifulSoup
f = codecs.getreader(self._encoding or 'utf8')(stream, errors='replace')
doc_id, doc_markup = None, ''
in_tag = False
for line in f:
if line.startswith('<DOCNO>'):
doc_id = line.replace('<DOCNO>', '').replace('</DOCNO>\n', '').strip()
elif line == '</DOC>\n':
soup = BeautifulSoup(f'<OUTER>\n{doc_markup}\n</OUTER>', 'lxml')
text = soup.get_text()
yield TrecDoc(doc_id, text, doc_markup)
doc_id, doc_markup = None, ''
else:
if in_tag:
doc_markup += line
if line.startswith('</'):
if any(line.startswith(f'</{tag}>') for tag in self._content_tags):
in_tag -= 1
if line.startswith('<'):
if any(line.startswith(f'<{tag}>') for tag in self._content_tags):
in_tag += 1
if in_tag == 1:
doc_markup += line
def _parser_text(self, stream):
f = codecs.getreader(self._encoding or 'utf8')(stream, errors='replace')
doc_id, doc_text = None, ''
in_tag = False
for line in f:
if line.startswith('<DOCNO>'):
doc_id = line.replace('<DOCNO>', '').replace('</DOCNO>\n', '').strip()
elif line == '</DOC>\n':
yield GenericDoc(doc_id, doc_text)
doc_id, doc_text = None, ''
else:
if line.startswith('</'):
if any(line.startswith(f'</{tag}>') for tag in self._content_tags):
in_tag = False
if in_tag:
doc_text += line
if line.startswith('<'):
if any(line.startswith(f'<{tag}>') for tag in self._content_tags):
in_tag = True
def _parser_tut(self, stream):
f = codecs.getreader(self._encoding or 'utf8')(stream, errors='replace')
doc_id, doc_title, doc_url, doc_text = None, None, None, ''
in_tag = False
for line in f:
if line.startswith('<DOCNO>'):
doc_id = line.replace('<DOCNO>', '').replace('</DOCNO>\n', '').strip()
if line.startswith('<TITLE>'):
doc_title = line.replace('<TITLE>', '').replace('</TITLE>\n', '').strip()
if line.startswith('<URL>'):
doc_url = line.replace('<URL>', '').replace('</URL>\n', '').strip()
elif line == '</DOC>\n':
yield TitleUrlTextDoc(doc_id, doc_title, doc_url, doc_text)
doc_id, doc_title, doc_url, doc_text = None, None, None, ''
else:
if line.startswith('</TEXT>'):
in_tag = False
if in_tag:
doc_text += line
if line.startswith('<TEXT>'):
in_tag = True
def docs_cls(self):
return self._doc
def docs_store(self, field='doc_id'):
return PickleLz4FullStore(
path=f'{self.docs_path(force=False)}.pklz4',
init_iter_fn=self.docs_iter,
data_cls=self.docs_cls(),
lookup_field=field,
index_fields=['doc_id'],
size_hint=self._docstore_size_hint,
count_hint=self._count_hint,
)
def docs_count(self):
if self.docs_store().built():
return self.docs_store().count()
def docs_namespace(self):
return self._docs_namespace
def docs_lang(self):
return self._docs_lang
DEFAULT_QTYPE_MAP = {
'<num> *(Number:)?': 'query_id',
'<title> *(Topic:)?': 'title',
'<desc> *(Description:)?': 'description',
'<narr> *(Narrative:)?': 'narrative'
}
class TrecQueries(BaseQueries):
def __init__(self, queries_dlc, qtype=TrecQuery, qtype_map=None, encoding=None, namespace=None, lang=None, remove_tags=('</title>',)):
self._queries_dlc = queries_dlc
self._qtype = qtype
self._qtype_map = qtype_map or DEFAULT_QTYPE_MAP
self._encoding = encoding
self._queries_namespace = namespace
self._queries_lang = lang
self._remove_tags = remove_tags
def queries_path(self):
return self._queries_dlc.path()
def queries_iter(self):
fields, reading = {}, None
with self._queries_dlc.stream() as f:
f = codecs.getreader(self._encoding or 'utf8')(f)
for line in f:
if line.startswith('</top>'):
assert len(fields) == len(self._qtype._fields), fields
for tag in self._remove_tags:
fields = {k: v.replace(tag, '') for k, v in fields.items()}
yield self._qtype(*(fields[f].strip() for f in self._qtype._fields))
fields, reading = {}, None
match_any = False
for tag, target in self._qtype_map.items():
match = re.match(tag, line)
if match:
fields[target] = line[match.end():]
reading = target
match_any = True
break
if not match_any and reading and not line.startswith('<'):
fields[reading] += line
def queries_cls(self):
return self._qtype
def queries_namespace(self):
return self._queries_namespace
def queries_lang(self):
return self._queries_lang
class TrecXmlQueries(BaseQueries):
def __init__(self, queries_dlc, qtype=TrecQuery, qtype_map=None, encoding=None, subtopics_key='subtopics', namespace=None, lang=None):
self._queries_dlc = queries_dlc
self._qtype = qtype
self._qtype_map = qtype_map or {f: f for f in qtype._fields}
self._encoding = encoding
self._subtopics_key = subtopics_key
self._queries_namespace = namespace
self._queries_lang = lang
def queries_path(self):
return self._queries_dlc.path()
def queries_iter(self):
with self._queries_dlc.stream() as f:
f = codecs.getreader(self._encoding or 'utf8')(f)
for topic_el in ET.fromstring(f.read()):
item = [None for _ in self._qtype._fields]
if 'number' in topic_el.attrib:
item[self._qtype._fields.index('query_id')] = topic_el.attrib['number']
subtopics = []
for attr in topic_el.attrib:
if attr in self._qtype_map:
text = topic_el.attrib[attr]
field = self._qtype_map[attr]
item[self._qtype._fields.index(field)] = text
if topic_el.tag in self._qtype_map:
text = ''.join(topic_el.itertext())
field = self._qtype_map[topic_el.tag]
item[self._qtype._fields.index(field)] = text
for field_el in topic_el:
if field_el.tag in self._qtype_map:
text = ''.join(field_el.itertext())
field = self._qtype_map[field_el.tag]
item[self._qtype._fields.index(field)] = text
if field_el.tag == 'subtopic':
text = ''.join(field_el.itertext())
subtopics.append(TrecSubtopic(field_el.attrib['number'], text, field_el.attrib['type']))
if self._subtopics_key in self._qtype._fields:
item[self._qtype._fields.index('subtopics')] = tuple(subtopics)
qid_field = self._qtype._fields.index('query_id')
item[qid_field] = item[qid_field].strip() # remove whitespace from query_ids
yield self._qtype(*item)
def queries_cls(self):
return self._qtype
def queries_namespace(self):
return self._queries_namespace
def queries_lang(self):
return self._queries_lang
class TrecColonQueries(BaseQueries):
def __init__(self, queries_dlc, encoding=None, namespace=None, lang=None):
self._queries_dlc = queries_dlc
self._encoding = encoding
self._queries_namespace = namespace
self._queries_lang = lang
def queries_iter(self):
with self._queries_dlc.stream() as f:
f = codecs.getreader(self._encoding or 'utf8')(f)
for line in f:
query_id, text = line.split(':', 1)
text = text.rstrip('\n')
yield GenericQuery(query_id, text)
def queries_path(self):
return self._queries_dlc.path()
def queries_cls(self):
return GenericQuery
def queries_namespace(self):
return self._queries_namespace
def queries_lang(self):
return self._queries_lang
class TrecQrels(BaseQrels):
def __init__(self, qrels_dlc, qrels_defs):
self._qrels_dlc = qrels_dlc
self._qrels_defs = qrels_defs
def qrels_path(self):
return self._qrels_dlc.path()
def qrels_iter(self):
with self._qrels_dlc.stream() as f:
f = codecs.getreader('utf8')(f)
for line in f:
if line == '\n':
continue # ignore blank lines
cols = line.rstrip().split()
if len(cols) != 4:
raise RuntimeError(f'expected 4 columns, got {len(cols)}')
qid, it, did, score = cols
yield TrecQrel(qid, did, int(score), it)
def qrels_cls(self):
return TrecQrel
def qrels_defs(self):
return self._qrels_defs
class TrecPrels(TrecQrels):
def qrels_iter(self):
with self._qrels_dlc.stream() as f:
f = codecs.getreader('utf8')(f)
for line in f:
if line == '\n':
continue # ignore blank lines
cols = line.rstrip().split()
if len(cols) != 5:
raise RuntimeError(f'expected 5 columns, got {len(cols)}')
qid, did, rel, method, iprob = cols
yield TrecPrel(qid, did, int(rel), int(method), float(iprob))
def qrels_cls(self):
return TrecPrel
class TrecScoredDocs(BaseScoredDocs):
def __init__(self, scoreddocs_dlc):
self._scoreddocs_dlc = scoreddocs_dlc
def scoreddocs_path(self):
return self._scoreddocs_dlc.path()
def scoreddocs_iter(self):
with self._scoreddocs_dlc.stream() as f:
f = codecs.getreader('utf8')(f)
for line in f:
cols = line.rstrip().split()
if len(cols) == 6:
qid, _, did, _, score, _ = cols
elif len(cols) == 2:
qid, did, score = *cols, '0'
yield GenericScoredDoc(qid, did, float(score))
| 38.429612 | 284 | 0.55978 | 15,125 | 0.955283 | 10,130 | 0.639803 | 2,126 | 0.134277 | 0 | 0 | 1,610 | 0.101686 |
6a9c552700ad0a75cac33278ee8dc5a5139c2432 | 844 | py | Python | textpand/download.py | caufieldjh/textpand-for-kgs | 42853c53c5a4cc06fbd745c147d02fe7916690fa | [
"BSD-3-Clause"
] | 3 | 2021-12-10T21:13:47.000Z | 2021-12-10T23:36:18.000Z | textpand/download.py | caufieldjh/textpand-for-kgs | 42853c53c5a4cc06fbd745c147d02fe7916690fa | [
"BSD-3-Clause"
] | 1 | 2022-01-06T20:59:07.000Z | 2022-01-06T20:59:07.000Z | textpand/download.py | caufieldjh/textpand-for-kgs | 42853c53c5a4cc06fbd745c147d02fe7916690fa | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from .utils import download_from_yaml
def download(output_dir: str, snippet_only: bool, ignore_cache: bool = False) -> None:
"""Downloads data files from list of URLs (default: download.yaml) into data directory (default: data/).
Args:
output_dir: A string pointing to the location to download data to.
snippet_only: Downloads only the first 5 kB of the source, for testing and file checks.
ignore_cache: Ignore cache and download files even if they exist [false]
Returns:
None.
"""
download_from_yaml(yaml_file="download.yaml",
output_dir=output_dir,
snippet_only=snippet_only,
ignore_cache=ignore_cache,
verbose=True)
return None
| 31.259259 | 108 | 0.625592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 466 | 0.552133 |
6a9cb003c79f63e5985173912dffc928314248d4 | 6,770 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 1 | 2020-01-22T13:11:23.000Z | 2020-01-22T13:11:23.000Z | venv/lib/python3.6/site-packages/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/netapp/ontap/plugins/modules/na_ontap_autosupport_invoke.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | null | null | null | #!/usr/bin/python
# (c) 2020, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
'''
na_ontap_autosupport_invoke
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'
}
DOCUMENTATION = '''
module: na_ontap_autosupport_invoke
author: NetApp Ansible Team (@carchi8py) <[email protected]>
short_description: NetApp ONTAP send AutoSupport message
extends_documentation_fragment:
- netapp.ontap.netapp.na_ontap
version_added: '20.4.0'
description:
- Send an AutoSupport message from a node
options:
name:
description:
- The name of the node to send the message to.
- Not specifying this option invokes AutoSupport on all nodes in the cluster.
type: str
autosupport_message:
description:
- Text sent in the subject line of the AutoSupport message.
type: str
aliases:
- message
version_added: 20.8.0
type:
description:
- Type of AutoSupport Collection to Issue.
choices: ['test', 'performance', 'all']
default: 'all'
type: str
uri:
description:
- send the AutoSupport message to the destination you specify instead of the configured destination.
type: str
'''
EXAMPLES = '''
- name: Send message
na_ontap_autosupport_invoke:
name: node1
message: invoked test autosupport rest
uri: http://1.2.3.4/delivery_uri
type: test
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
'''
RETURN = '''
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible_collections.netapp.ontap.plugins.module_utils.netapp_module import NetAppModule
from ansible_collections.netapp.ontap.plugins.module_utils.netapp import OntapRestAPI
import ansible_collections.netapp.ontap.plugins.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPasupInvoke(object):
''' send ASUP message '''
def __init__(self):
self.use_rest = False
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=False, type='str'),
autosupport_message=dict(required=False, type='str', aliases=["message"]),
type=dict(required=False, choices=[
'test', 'performance', 'all'], default='all'),
uri=dict(required=False, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
# REST API should be used for ONTAP 9.6 or higher.
self.rest_api = OntapRestAPI(self.module)
if self.rest_api.is_rest():
self.use_rest = True
else:
if not HAS_NETAPP_LIB:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
def get_nodes(self):
nodes = list()
node_obj = netapp_utils.zapi.NaElement('system-node-get-iter')
desired_attributes = netapp_utils.zapi.NaElement('desired-attributes')
node_details_info = netapp_utils.zapi.NaElement('node-details-info')
node_details_info.add_new_child('node', '')
desired_attributes.add_child_elem(node_details_info)
node_obj.add_child_elem(desired_attributes)
try:
result = self.server.invoke_successfully(node_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg=to_native(error), exception=traceback.format_exc())
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) > 0:
node_info = result.get_child_by_name('attributes-list')
if node_info is not None:
nodes = [node_details.get_child_content('node') for node_details in node_info.get_children()]
return nodes
def send_zapi_message(self, params, node_name):
params['node-name'] = node_name
send_message = netapp_utils.zapi.NaElement.create_node_with_children('autosupport-invoke', **params)
try:
self.server.invoke_successfully(send_message, enable_tunneling=False)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
% (node_name, to_native(error)),
exception=traceback.format_exc())
def send_message(self):
params = dict()
if self.parameters.get('autosupport_message'):
params['message'] = self.parameters['autosupport_message']
if self.parameters.get('type'):
params['type'] = self.parameters['type']
if self.parameters.get('uri'):
params['uri'] = self.parameters['uri']
if self.use_rest:
if self.parameters.get('name'):
params['node.name'] = self.parameters['name']
node_name = params['node.name']
else:
node_name = '*'
api = 'support/autosupport/messages'
dummy, error = self.rest_api.post(api, params)
if error is not None:
self.module.fail_json(msg="Error on sending autosupport message to node %s: %s."
% (node_name, error))
else:
if self.parameters.get('name'):
node_names = [self.parameters['name']]
else:
# simulate REST behavior by sending to all nodes in the cluster
node_names = self.get_nodes()
for name in node_names:
self.send_zapi_message(params, name)
def ems_log_event(self):
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
return netapp_utils.ems_log_event("na_ontap_autosupport_invoke", cserver)
def apply(self):
if not self.use_rest:
self.ems_log_event()
if self.module.check_mode:
pass
else:
self.send_message()
self.module.exit_json(changed=True)
def main():
message = NetAppONTAPasupInvoke()
message.apply()
if __name__ == '__main__':
main()
| 34.365482 | 109 | 0.644018 | 4,530 | 0.669129 | 0 | 0 | 0 | 0 | 0 | 0 | 2,200 | 0.324963 |
6a9cd0c545ed5aa451bbc0bc26a2e800d471ecd0 | 304 | py | Python | tests/api/serializer/test_user.py | armandomeeuwenoord/freight | 31ae2fa9252ab0b25385abd04742475e6671e3b1 | [
"Apache-2.0"
] | 562 | 2015-02-20T08:25:24.000Z | 2021-11-12T19:58:44.000Z | tests/api/serializer/test_user.py | armandomeeuwenoord/freight | 31ae2fa9252ab0b25385abd04742475e6671e3b1 | [
"Apache-2.0"
] | 129 | 2015-02-20T07:41:14.000Z | 2022-02-17T21:14:40.000Z | tests/api/serializer/test_user.py | armandomeeuwenoord/freight | 31ae2fa9252ab0b25385abd04742475e6671e3b1 | [
"Apache-2.0"
] | 54 | 2015-02-28T01:12:23.000Z | 2021-03-02T11:14:52.000Z | from freight.api.serializer import serialize
from freight.testutils import TestCase
class UserSerializerTest(TestCase):
def test_simple(self):
user = self.create_user()
result = serialize(user)
assert result["id"] == str(user.id)
assert result["name"] == user.name
| 25.333333 | 44 | 0.680921 | 217 | 0.713816 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.032895 |
6a9cdc6c74a18d65dd44c9480dd5e3953a78dd18 | 1,639 | py | Python | binning/pozo_5m_class_dem.py | UP-RS-ESP/GEW-DAP04-WS201819 | 18341620d9168e1eec476af1d8f568cf0017bf56 | [
"MIT"
] | 2 | 2020-10-12T11:33:00.000Z | 2021-12-20T06:33:54.000Z | binning/pozo_5m_class_dem.py | UP-RS-ESP/GEW-DAP04-WS201819 | 18341620d9168e1eec476af1d8f568cf0017bf56 | [
"MIT"
] | null | null | null | binning/pozo_5m_class_dem.py | UP-RS-ESP/GEW-DAP04-WS201819 | 18341620d9168e1eec476af1d8f568cf0017bf56 | [
"MIT"
] | null | null | null | import sys
import numpy as np
from matplotlib import pyplot as pl
from rw import WriteGTiff
fn = '../pozo-steep-vegetated-pcl.npy'
pts = np.load(fn)
x, y, z, c = pts[:, 0], pts[:, 1], pts[:, 2], pts[:, 5]
ix = (0.2 * (x - x.min())).astype('int')
iy = (0.2 * (y - y.min())).astype('int')
shape = (100, 100)
xb = np.arange(shape[1]+1)
yb = np.arange(shape[0]+1)
fg, ax = pl.subplots(ncols = 2, nrows = 2,
figsize = (10.24, 10.24),
sharex = True, sharey = True)
uc = (2, 5)
for j in range(len(uc)):
print('Class %i' % uc[j])
b = c == uc[j]
cx, cy, cz = ix[b], iy[b], z[b]
mean = np.zeros(shape)
stdr = np.zeros(shape)
for i in range(shape[0]):
print('% 3d%%' % i)
for k in range(shape[1]):
b = (cy == i) * (cx == k)
mean[i, k] = cz[b].mean()
stdr[i, k] = cz[b].std()
fname = 'pozo_5m_dem_mean_cl%i.tif' % uc[j]
WriteGTiff(fname, mean, x.min(), y.min()+500, step = 5)
np.save('pozo_5m_dem_mean_cl%i.npy' % uc[j], mean)
np.save('pozo_5m_dem_stdr_cl%i.npy' % uc[j], stdr)
ax[0, j].set_title('Class %i' % uc[j])
im = ax[0, j].pcolormesh(xb, yb,
np.ma.masked_invalid(mean),
cmap = pl.cm.viridis_r)
cb = fg.colorbar(im, ax = ax[0, j])
cb.set_label('Mean elevation [m]')
im = ax[1, j].pcolormesh(xb, yb,
np.ma.masked_invalid(stdr),
cmap = pl.cm.magma_r)
cb = fg.colorbar(im, ax = ax[1, j])
cb.set_label('Elevation STD')
ax[0, j].set_aspect('equal')
ax[1, j].set_aspect('equal')
pl.savefig('%s.png' % sys.argv[0][:-3])
| 30.351852 | 59 | 0.52349 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.127517 |
6a9cfc593e93acc1f1c0f3afda04be08e714940c | 2,228 | py | Python | comtypes/_meta.py | phuslu/pyMSAA | 611bc4c31e0d6ba36f0f0bebdc6e6be14b994eb0 | [
"MIT"
] | 23 | 2015-05-28T15:31:35.000Z | 2022-02-16T07:51:34.000Z | comtypes/_meta.py | kar98kar/pyMSAA | 611bc4c31e0d6ba36f0f0bebdc6e6be14b994eb0 | [
"MIT"
] | 3 | 2020-05-19T03:00:52.000Z | 2020-11-03T09:22:51.000Z | comtypes/_meta.py | kar98kar/pyMSAA | 611bc4c31e0d6ba36f0f0bebdc6e6be14b994eb0 | [
"MIT"
] | 13 | 2016-08-26T23:00:40.000Z | 2022-03-03T09:58:36.000Z | # comtypes._meta helper module
from ctypes import POINTER, c_void_p, cast
import comtypes
################################################################
# metaclass for CoClass (in comtypes/__init__.py)
def _wrap_coclass(self):
# We are an IUnknown pointer, represented as a c_void_p instance,
# but we really want this interface:
itf = self._com_interfaces_[0]
punk = cast(self, POINTER(itf))
result = punk.QueryInterface(itf)
result.__dict__["__clsid"] = str(self._reg_clsid_)
return result
def _coclass_from_param(cls, obj):
if isinstance(obj, (cls._com_interfaces_[0], cls)):
return obj
raise TypeError(obj)
#
# The mro() of a POINTER(App) type, where class App is a subclass of CoClass:
#
# POINTER(App)
# App
# CoClass
# c_void_p
# _SimpleCData
# _CData
# object
class _coclass_meta(type):
# metaclass for CoClass
#
# When a CoClass subclass is created, create a POINTER(...) type
# for that class, with bases <coclass> and c_void_p. Also, the
# POINTER(...) type gets a __ctypes_from_outparam__ method which
# will QueryInterface for the default interface: the first one on
# the coclass' _com_interfaces_ list.
def __new__(cls, name, bases, namespace):
klass = type.__new__(cls, name, bases, namespace)
if bases == (object,):
return klass
# XXX We should insist that a _reg_clsid_ is present.
if "_reg_clsid_" in namespace:
clsid = namespace["_reg_clsid_"]
comtypes.com_coclass_registry[str(clsid)] = klass
PTR = _coclass_pointer_meta("POINTER(%s)" % klass.__name__,
(klass, c_void_p),
{"__ctypes_from_outparam__": _wrap_coclass,
"from_param": classmethod(_coclass_from_param),
})
from ctypes import _pointer_type_cache
_pointer_type_cache[klass] = PTR
return klass
# will not work if we change the order of the two base classes!
class _coclass_pointer_meta(type(c_void_p), _coclass_meta):
pass
| 35.935484 | 85 | 0.601436 | 1,277 | 0.57316 | 0 | 0 | 0 | 0 | 0 | 0 | 962 | 0.431777 |
6a9d299ac035789dcfbdc5b67b56e5ebe19176e2 | 33,321 | py | Python | bin/ADFRsuite/CCSBpckgs/mglutil/gui/BasicWidgets/Tk/Dial.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | null | null | null | bin/ADFRsuite/CCSBpckgs/mglutil/gui/BasicWidgets/Tk/Dial.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | null | null | null | bin/ADFRsuite/CCSBpckgs/mglutil/gui/BasicWidgets/Tk/Dial.py | AngelRuizMoreno/Jupyter_Dock_devel | 6d23bc174d5294d1e9909a0a1f9da0713042339e | [
"MIT"
] | 1 | 2021-11-04T21:48:14.000Z | 2021-11-04T21:48:14.000Z | ################################################################################
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this library; if not, write to the Free Software
## Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
##
## (C) Copyrights Dr. Michel F. Sanner and TSRI 2016
##
################################################################################
#########################################################################
#
# Date: Mai 2001 Authors: Michel Sanner, Daniel Stoffler
#
# [email protected]
# [email protected]
#
# Copyright: Michel Sanner, Daniel Stoffler and TSRI
#
#########################################################################
import Tkinter
import math
import types
import sys
import os
from mglutil.util.callback import CallbackManager
from mglutil.util.misc import ensureFontCase
from optionsPanel import OptionsPanel
from KeyboardEntry import KeyboardEntry
class Dial(Tkinter.Frame, KeyboardEntry):
"""This class implements a Dial widget.
The widget has a pointer that can be moved around a circle.
The range corresponding to one full turn can be specified as well as the min
and max values that are allowed. By defaults these are set to None meaning that
there is no min and no max. One turn corresponds to 360 units by default.
A dial can also operate in discrete mode (if self.increment is set to x). In
this mode the values will be restrained to be multiples of self.increment.
The Widget has a Callback manager. Callback functions get called at every value
change if self.contiguous is set to 1, else they get called when the mouse
button is released. They always get called with the current value as an
argument.
An optional label can be displayed at the center of the Dial widget.
The size of the dial has to be specified at instanciation. Other parameters
can be set after the widget has been created.
The widget tried to adjust automatically the size of the arrow according to
the size of the dial.
The widget has a configure() method: type, min, max, increment, precision,
showLabel, value, continuous, oneTurn can be set this way.
master, labCfg and size can be passed only to the constructor.
a lock() method is used to disable the various gui components of the
options panel. Usage: <instance>.lock(<component>=<value>)
components see configure(). value is 0 or 1. 1 disables,
0 enables.
Setting values with increment enabled:
if using the method set(), the actual value will 'snap' to the next increment.
i.e., if the value is set to 3, and the increment is set to 2, setting the
value to 6 will actually result in 7 (3,5,7,9,.....)
To still be able to set the value, disregarding the current active increment,
the set method understands the optional keyword force=True, i.e.
dial.set(<value>, force=True)), which will set the value to <value>. The
increment will now be added to this new <value>
"""
def __init__(self, master=None, type='float',
labCfg={'fg':'black','side':'left', 'text':None},
min=None, max=None, increment=.0, precision=2,
showLabel=1, value=0.0, continuous=1, oneTurn=360.,
size=50, callback=None,
lockMin=0, lockBMin=0, lockMax=0, lockBMax=0,
lockIncrement=0, lockBIncrement=0,
lockPrecision=0, lockShowLabel=0, lockValue=0,
lockType=0, lockContinuous=0, lockOneTurn=0, **kw):
Tkinter.Frame.__init__(self, master)
Tkinter.Pack.config(self)
self.callbacks = CallbackManager() # object to manage callback
# functions. They get called with the
# current value as an argument
# initialize various attributes with default values
self.precision = 2 # decimal places
self.min = None # minimum value
self.max = None # maximum value
self.increment = increment # value increment
self.minOld = 0. # used to store old values
self.maxOld = 0.
self.incrementOld = increment
self.size = 50 # defines widget size
self.offsetValue = 0. # used to set increment correctly
self.lab = None # label
self.callback = None # user specified callback
self.opPanel = None # option panel widget
self.oneTurn = 360. # value increment for 1 full turn
self.value = 0.0 # current value of widget
self.oldValue = 0.0 # old value of widget
self.showLabel = 1 # turn on to display label on
self.continuous = 1 # set to 1 to call callbacks at
# each value change, else gets called
# on button release event
self.angle = 0. # angle corresponding to value
self.labCfg = labCfg # Tkinter Label options
self.labelFont = (
ensureFontCase('helvetica'), 14, 'bold') # label font
self.labelColor = 'yellow' # label color
self.canvas = None # the canvas to create the widget in
self.usedArcColor = '#aaaaaa' # filled arc color of used portion
self.unusedArcColor = '#cccccc' # filled arc color of unused portion
self.pyOver180 = math.pi/180.0 # constants used in various places
self.threeSixtyOver1turn = 1
self.piOver1turn = math.pi/360.
self.lockMin = lockMin # lock<X> vars are used in self.lock()
self.lockMax = lockMax # to lock/unlock entries in optionpanel
self.lockIncrement = lockIncrement
self.lockBMin = lockBMin
self.lockBMax = lockBMax
self.lockBIncrement = lockBIncrement
self.lockPrecision = lockPrecision
self.lockShowLabel = lockShowLabel
self.lockValue = lockValue
self.lockType = lockType
self.lockContinuous = lockContinuous
self.lockOneTurn = lockOneTurn
self.setArrow()
# configure with user-defined values
self.setSize(size)
self.setCallback(callback)
self.setContinuous(continuous)
self.setType(type)
self.setPrecision(precision)
self.setOneTurn(oneTurn)
self.setMin(min)
self.setMax(max)
self.setIncrement(increment)
self.setShowLabel(showLabel)
self.setValue(value)
self.setLabel(self.labCfg)
self.createCanvas(master)
canvas = self.canvas
canvas.bind("<ButtonPress-1>", self.mouseDown)
canvas.bind("<ButtonRelease-1>", self.mouseUp)
canvas.bind("<B1-Motion>", self.mouseMove)
canvas.bind("<Button-3>", self.toggleOptPanel)
if os.name == 'nt': #sys.platform == 'win32':
canvas.bind("<MouseWheel>", self.mouseWheel)
else:
canvas.bind("<Button-4>", self.mouseWheel)
canvas.bind("<Button-5>", self.mouseWheel)
KeyboardEntry.__init__(self, (canvas,), self.setFromEntry)
self.opPanel = OptionsPanel(master = self, title="Dial Options")
## if self.callback:
## self.callbacks.AddCallback(self.callback)
def setFromEntry(self, valueString):
try:
self.set(self.type(valueString))
except ValueError:
# fixme we would like to pop this up in a window maybe
import traceback
traceback.print_stack()
traceback.print_exc()
def handleKeyStroke(self, event):
# handle key strokes for numbers only in widget keyboard entry label
key = event.keysym
if key.isdigit() or key=='period' or key=='minus' or key=='plus':
if key == 'period':
key = '.'
elif key == 'minus':
key = '-'
elif key == 'plus':
key = '+'
self.typedValue += key
self.typedValueTK.configure(text=self.typedValue)
else:
KeyboardEntry.handleKeyStroke(self, event)
def setSize(self, size):
"""Set widget size. Size must be of type int and greater than 0"""
assert isinstance(size, types.IntType),\
"Illegal size: expected type %s, got %s"%(type(1), type(size) )
assert size > 0, "Illegal size: must be > 0, got %s"%size
self.size = size
def setCallback(self, cb):
"""Set widget callback. Must be callable function. Callback is called
every time the widget value is set/modified"""
assert cb is None or callable(cb) or type(cb) is types.ListType,\
"Illegal callback: must be either None or callable, or list. Got %s"%cb
if cb is None: return
elif type(cb) is types.ListType:
for func in cb:
assert callable(func), "Illegal callback must be callable. Got %s"%func
self.callbacks.AddCallback(func)
else:
self.callbacks.AddCallback(cb)
self.callback = cb
def toggleOptPanel(self, event=None):
if self.opPanel.flag:
self.opPanel.Dismiss_cb()
else:
if not hasattr(self.opPanel, 'optionsForm'):
self.opPanel.displayPanel(create=1)
else:
self.opPanel.displayPanel(create=0)
def setArrow(self, size=None):
if size is not None:
self.setSize(size)
aS = self.size/40
self.arrowLength = max(3, 3*aS) # arrow head length
self.arrowWidth = max(2, aS) # half the arrow body width
self.arrowBorderwidth = max(1, self.arrowWidth/2) # width of arrow
# shadow lines
self.arrowHeadWidth = 2*self.arrowWidth # width of arrow head base
def mouseDown(self, event):
# remember where the mouse went down
self.lastx = event.x
self.lasty = event.y
def mouseUp(self, event):
# call callbacks if not in continuous mode
if not self.continuous:
self.callbacks.CallCallbacks(self.opPanel.valInput.get())
if self.showLabel == 2:
# no widget labels on mouse release
self.canvas.itemconfigure(self.labelId2, text='')
self.canvas.itemconfigure(self.labelId, text='')
def mouseMove(self, event):
dx = event.x-self.xm
dy = self.ym-event.y
n = math.sqrt(dx*dx+dy*dy)
if n == 0.0: v = [0.0, 0.0]
else: v = [dx/n, dy/n]
# find the cosine of the angle between new hand position and previous
# hand position
ma = v[0]*self.vector[0] + v[1]*self.vector[1]
# assure no rounding errors
if ma > 1.0: ma = 1.0
elif ma < -1.0: ma = -1.0
# compute angle increment compared to current vector
ang = math.acos(ma)
# find the sign of the rotation, sign of z component of vector prod.
oldv = self.vector
normz = oldv[0]*v[1] - oldv[1]*v[0]
if normz>0: ang = -1. * ang
# compute the new value
val = self.value + ang*self.oneTurnOver2pi
self.set(val)
self.lastx = event.x
self.lasty = event.y
def mouseWheel(self, event):
#print "mouseWheel", event, event.num
if os.name == 'nt': #sys.platform == 'win32':
if event.delta > 0:
lEventNum = 4
else:
lEventNum = 5
else:
lEventNum = event.num
if lEventNum == 4:
self.set(self.value+self.oneTurn)
else:
self.set(self.value-self.oneTurn)
def get(self):
return self.type(self.value)
def printLabel(self):
if self.canvas is None:
return
self.canvas.itemconfigure(self.labelId2,
text=self.labelFormat%self.value)#newVal)
self.canvas.itemconfigure(self.labelId,
text=self.labelFormat%self.value)#newVal)
def set(self, val, update=1, force=0):
# if force is set to 1, we call this method regardless of the
# widget configuration. This is for example the case if the dial
# is set to continuous=0, but the value is set in the options panel
# snap to closest increment
if self.increment is not None and self.increment != 0. and not force:
offset = self.offsetValue%self.increment
dval = round(val/self.increment) * self.increment
if val < dval:
dval = dval + offset - self.increment
else:
dval = dval + offset
if self.min is not None and dval < self.min:
dval = self.min
elif self.max is not None and dval > self.max:
dval = self.max
# recompute vector and angle corresponding to val
self.angle = (dval%self.oneTurn)*self.threeSixtyOver1turn
if dval <0.0:
self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.value = dval
self.offsetValue = dval
else:
# 'regular' mode, i.e. no step-wise increment
if self.min is not None and val < self.min: val = self.min
elif self.max is not None and val > self.max: val = self.max
# recompute vector and angle corresponding to val
self.angle = (val%self.oneTurn)*self.threeSixtyOver1turn
if val <0.0: self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.value = val
self.offsetValue = val
#update arrow in display
self.drawArrow()
newVal = self.get()
if self.continuous or force:
if update and self.oldValue != newVal or force:
self.oldValue = newVal
self.callbacks.CallCallbacks(newVal)
if self.showLabel==2:
self.printLabel()
else:
if self.showLabel==2:
self.printLabel()
if self.showLabel==1:
self.printLabel()
if self.opPanel:
self.opPanel.valInput.set(self.labelFormat%newVal)
def drawArrow(self):
if self.canvas is None:
return
# end point
x1 = self.xm + self.vector[0]*self.rad
y1 = self.ym - self.vector[1]*self.rad
# point at arrow head base
xb = self.xm + self.vector[0]*self.radNoArrow
yb = self.xm - self.vector[1]*self.radNoArrow
# vector orthogonal to arrow
n = [-self.vector[1], -self.vector[0]]
pts1 = [ self.xm+n[0]*self.arrowWidth, self.ym+n[1]*self.arrowWidth,
xb+n[0]*self.arrowWidth, yb+n[1]*self.arrowWidth,
xb+n[0]*self.arrowHeadWidth, yb+n[1]*self.arrowHeadWidth,
x1, y1 ]
pts2 = [ x1, y1,
xb-n[0]*self.arrowHeadWidth, yb-n[1]*self.arrowHeadWidth,
xb-n[0]*self.arrowWidth, yb-n[1]*self.arrowWidth,
self.xm-n[0]*self.arrowWidth, self.ym-n[1]*self.arrowWidth ]
canvas = self.canvas
if self.vector[0] > 0.0:
col1 = '#DDDDDD'
col2 = 'black'
else:
col1 = 'black'
col2 = '#DDDDDD'
apply( canvas.coords, (self.arrowPolId,) + tuple(pts1+pts2) )
apply( canvas.coords, (self.arrowPolborder1,) + tuple(pts1) )
canvas.itemconfigure( self.arrowPolborder1, fill=col1 )
apply( canvas.coords, (self.arrowPolborder2,) + tuple(pts2) )
canvas.itemconfigure( self.arrowPolborder2, fill=col2 )
canvas.itemconfigure(self.arcId, extent = 0.0-self.angle)
def createCanvas(self, master):
size = self.size
self.frame = Tkinter.Frame(self, borderwidth=3, relief='sunken')
self.canvas = Tkinter.Canvas(self.frame, width=size+2, height=size+2)
self.xm = self.ym = size/2+2
self.rad = size/2
self.radNoArrow = self.rad-self.arrowLength
self.vector = [0, 1]
x1 = self.xm + self.vector[0]*self.rad
y1 = self.ym + self.vector[1]*self.rad
canvas = self.canvas
self.circleId = canvas.create_oval(2,2,size,size, width=1,
fill=self.unusedArcColor)
self.arcId = canvas.create_arc(2,2,size,size, start=90.,
extent=0, fill=self.usedArcColor)
canvas.create_line(2, self.ym, size+2, self.ym)
canvas.create_line(self.xm, 2, self.ym, size+2)
self.arrowPolId = canvas.create_polygon( 0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,
fill='gray75' )
self.arrowPolborder1 = canvas.create_line( 0,0,0,0,0,0,0,0,
fill='black',
width = self.arrowBorderwidth)
self.arrowPolborder2 = canvas.create_line( 0,0,0,0,0,0,0,0,
fill='white',
width = self.arrowBorderwidth )
r = size/20
off = self.arrowBorderwidth
canvas.create_oval(self.xm-r,self.ym-r-off/2,self.xm+r,self.ym+r-off/2,
fill='#DDDDDD', outline='white')
canvas.create_oval(self.xm-r,self.ym-r+off,self.xm+r,self.ym+r+off,
fill='black', outline='black')
canvas.create_oval(self.xm-r,self.ym-r,self.xm+r,self.ym+r,
fill='gray70', outline='#DDDDDD')
self.labelId2 = canvas.create_text(self.xm+2, self.ym+2,
fill='black',
justify='center', text='',
font = self.labelFont)
self.labelId = canvas.create_text(self.xm, self.ym,
fill=self.labelColor,
justify='center', text='',
font = self.labelFont)
self.drawArrow()
self.opPanel = OptionsPanel(master = self, title="Dial Options")
# pack em up
self.canvas.pack(side=Tkinter.TOP)
self.frame.pack(expand=1, fill='x')
self.toggleWidgetLabel(self.showLabel)
def toggleWidgetLabel(self, val):
if val == 0:
# no widget labels
self.showLabel=0
self.canvas.itemconfigure(self.labelId2,
text='')
self.canvas.itemconfigure(self.labelId,
text='')
if val == 1:
# show always widget labels
self.showLabel=1
self.printLabel()
if val == 2:
# show widget labels only when mouse moves
self.showLabel=2
self.canvas.itemconfigure(self.labelId2,
text='')
self.canvas.itemconfigure(self.labelId,
text='')
def setValue(self, val):
if type(val) == types.StringType:
val = float(val)
assert type(val) in [types.IntType, types.FloatType],\
"Illegal type for value: expected %s or %s, got %s"%(
type(1), type(1.0), type(val) )
# setValue does NOT call a callback!
if self.min is not None and val < self.min: val = self.min
if self.max is not None and val > self.max: val = self.max
self.value = self.type(val)
self.offsetValue=self.value
self.oldValue = self.value
#update arrow in display
self.angle = (self.value%self.oneTurn)*self.threeSixtyOver1turn
if self.value <0.0: self.angle = self.angle - 360.0
a = self.angle*self.pyOver180
self.vector = [math.sin(a), math.cos(a)]
self.drawArrow()
if self.showLabel == 1:
self.printLabel()
if self.opPanel:
self.opPanel.valInput.set(self.labelFormat%self.value)
def setLabel(self, labCfg):
self.labCfg = labCfg
text = labCfg.get('text', None)
if text is None or text=='':
return
d={}
for k, w in self.labCfg.items():
if k == 'side': continue
else: d[k] = w
if not 'side' in self.labCfg.keys():
self.labCfg['side'] = 'left'
if not self.lab:
self.lab = Tkinter.Label(self, d)
self.lab.pack(side=self.labCfg['side'])
self.lab.bind("<Button-3>", self.toggleOptPanel)
else:
self.lab.configure(text)
#####################################################################
# the 'configure' methods:
#####################################################################
def configure(self, **kw):
for key,value in kw.items():
# the 'set' parameter callbacks
if key=='labCfg': self.setLabel(value)
elif key=='type': self.setType(value)
elif key=='min': self.setMin(value)
elif key=='max': self.setMax(value)
elif key=='increment': self.setIncrement(value)
elif key=='precision': self.setPrecision(value)
elif key=='showLabel': self.setShowLabel(value)
elif key=='continuous': self.setContinuous(value)
elif key=='oneTurn': self.setOneTurn(value)
# the 'lock' entries callbacks
elif key=='lockType': self.lockTypeCB(value)
elif key=='lockMin': self.lockMinCB(value)
elif key=='lockBMin': self.lockBMinCB(value)
elif key=='lockMax': self.lockMaxCB(value)
elif key=='lockBMax': self.lockBMaxCB(value)
elif key=='lockIncrement': self.lockIncrementCB(value)
elif key=='lockBIncrement': self.lockBIncrementCB(value)
elif key=='lockPrecision': self.lockPrecisionCB(value)
elif key=='lockShowLabel': self.lockShowLabelCB(value)
elif key=='lockValue': self.lockValueCB(value)
elif key=='lockContinuous': self.lockContinuousCB(value)
elif key=='lockOneTurn': self.lockOneTurnCB(value)
def setType(self, Type):
assert type(Type) in [types.StringType, types.TypeType],\
"Illegal type for datatype. Expected %s or %s, got %s"%(
type('a'), type(type), type(Type) )
if type(Type) == type(""): # type str
assert Type in ('int','float'),\
"Illegal type descriptor. Expected 'int' or 'float', got '%s'"%Type
self.type = eval(Type)
else:
self.type = Type
if self.type == int:
self.labelFormat = "%d"
self.int_value = self.value
else:
self.labelFormat = "%."+str(self.precision)+"f"
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togIntFloat']['widget']
if self.type == int:
w.setvalue('int')
elif self.type == 'float':
w.setvalue('float')
if self.opPanel:
self.opPanel.updateDisplay()
# and update the printed label
if self.canvas and self.showLabel == 1:
self.printLabel()
def setMin(self, min):
if min is not None:
assert type(min) in [types.IntType, types.FloatType],\
"Illegal type for minimum. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(min) )
if self.max and min > self.max:
min = self.max
self.min = self.type(min)
if self.showLabel == 1:
self.printLabel()
if self.value < self.min:
self.set(self.min)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.minInput.set(self.labelFormat%self.min)
self.opPanel.toggleMin.set(1)
self.opPanel.min_entry.configure(state='normal', fg='gray0')
self.minOld = self.min
else:
self.min = None
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleMin.set(0)
self.opPanel.min_entry.configure(state='disabled',
fg='gray40')
def setMax(self, max):
if max is not None:
assert type(max) in [types.IntType, types.FloatType],\
"Illegal type for maximum. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(max) )
if self.min and max < self.min:
max = self.min
self.max = self.type(max)
if self.showLabel == 1:
self.printLabel()
if self.value > self.max:
self.set(self.max)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.maxInput.set(self.labelFormat%self.max)
self.opPanel.toggleMax.set(1)
self.opPanel.max_entry.configure(state='normal', fg='gray0')
self.maxOld = self.max
else:
self.max = None
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleMax.set(0)
self.opPanel.max_entry.configure(state='disabled', fg='gray40')
def setIncrement(self, incr):
if incr is not None:
assert type(incr) in [types.IntType, types.FloatType],\
"Illegal type for increment. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(incr) )
self.increment = self.type(incr)
self.offsetValue = self.value
self.incrementOld = self.increment
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.incrInput.set(self.labelFormat%self.increment)
self.opPanel.toggleIncr.set(1)
self.opPanel.incr_entry.configure(state='normal', fg='gray0')
else:
self.increment = self.type(0)
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.toggleIncr.set(0)
self.opPanel.incrInput.set(self.labelFormat%0)
self.opPanel.incr_entry.configure(state='disabled',
fg='gray40')
def setPrecision(self, val):
assert type(val) in [types.IntType, types.FloatType],\
"Illegal type for precision. Expected type %s or %s, got %s"%(
type(0), type(0.0), type(val) )
val = int(val)
if val > 10:
val = 10
if val < 1:
val = 1
self.precision = val
if self.type == float:
self.labelFormat = "%."+str(self.precision)+"f"
else:
self.labelFormat = "%d"
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['selPrec']['widget']
w.setvalue(val)
if self.opPanel:
self.opPanel.updateDisplay()
# and update the printed label
if self.canvas and self.showLabel == 1:
self.printLabel()
def setContinuous(self, cont):
""" cont can be None, 0 or 1 """
assert cont in [None, 0, 1],\
"Illegal value for continuous: expected None, 0 or 1, got %s"%cont
if cont != 1:
cont = None
self.continuous = cont
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togCont']['widget']
if cont:
w.setvalue('on')#i=1
else:
w.setvalue('off')#i=0
if self.opPanel:
self.opPanel.updateDisplay()
def setShowLabel(self, val):
"""Show label can be 0, 1 or 2
0: no label
1: label is always shown
2: show label only when value changes"""
assert val in [0,1,2],\
"Illegal value for showLabel. Expected 0, 1 or 2, got %s"%val
if val != 0 and val != 1 and val != 2:
print "Illegal value. Must be 0, 1 or 2"
return
self.showLabel = val
self.toggleWidgetLabel(val)
if hasattr(self.opPanel, 'optionsForm'):
w = self.opPanel.idf.entryByName['togLabel']['widget']
if self.showLabel == 0:
label = 'never'
elif self.showLabel == 1:
label = 'always'
elif self.showLabel == 2:
label = 'move'
w.setvalue(label)
if self.opPanel:
self.opPanel.updateDisplay()
def setOneTurn(self, oneTurn):
assert type(oneTurn) in [types.IntType, types.FloatType],\
"Illegal type for oneTurn. Expected %s or %s, got %s"%(
type(0), type(0.0), type(oneTurn) )
self.oneTurn = oneTurn
self.threeSixtyOver1turn = 360./oneTurn
self.piOver1turn = math.pi/oneTurn
self.oneTurnOver2pi = oneTurn / (2*math.pi)
if self.opPanel:
self.opPanel.updateDisplay()
#####################################################################
# the 'lock' methods:
#####################################################################
def lockTypeCB(self, mode):
if mode != 0: mode = 1
self.lockType = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockMinCB(self, mode): #min entry field
if mode != 0: mode = 1
self.lockMin = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBMinCB(self, mode): # min checkbutton
if mode != 0: mode = 1
self.lockBMin = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockMaxCB(self, mode): # max entry field
if mode != 0: mode = 1
self.lockMax = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBMaxCB(self, mode): # max checkbutton
if mode != 0: mode = 1
self.lockBMax = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockIncrementCB(self, mode): # increment entry field
if mode != 0: mode = 1
self.lockIncrement = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockBIncrementCB(self, mode): # increment checkbutton
if mode != 0: mode = 1
self.lockBIncrement = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockPrecisionCB(self, mode):
if mode != 0: mode = 1
self.lockPrecision = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockShowLabelCB(self, mode):
if mode != 0: mode = 1
self.lockShowLabel = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockValueCB(self, mode):
if mode != 0: mode = 1
self.lockValue = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockContinuousCB(self, mode):
if mode != 0: mode = 1
self.lockContinuous = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
def lockOneTurnCB(self, mode):
if mode != 0: mode = 1
self.lockOneTurn = mode
if hasattr(self.opPanel, 'optionsForm'):
self.opPanel.lockUnlockDisplay()
if __name__ == '__main__':
def foo(val):
print val
d = Dial(size=50)
d.configure(showLabel=1)
d.callbacks.AddCallback(foo)
| 36.376638 | 87 | 0.547643 | 31,646 | 0.949731 | 0 | 0 | 0 | 0 | 0 | 0 | 8,394 | 0.251913 |
6a9d42bd307c1507375c76e403f46b3901bbf76d | 3,560 | py | Python | qt-creator-opensource-src-4.6.1/scripts/checkInstalledFiles.py | kevinlq/Qt-Creator-Opensource-Study | b8cadff1f33f25a5d4ef33ed93f661b788b1ba0f | [
"MIT"
] | 5 | 2018-12-22T14:49:13.000Z | 2022-01-13T07:21:46.000Z | qt-creator-opensource-src-4.6.1/scripts/checkInstalledFiles.py | kevinlq/Qt-Creator-Opensource-Study | b8cadff1f33f25a5d4ef33ed93f661b788b1ba0f | [
"MIT"
] | null | null | null | qt-creator-opensource-src-4.6.1/scripts/checkInstalledFiles.py | kevinlq/Qt-Creator-Opensource-Study | b8cadff1f33f25a5d4ef33ed93f661b788b1ba0f | [
"MIT"
] | 8 | 2018-07-17T03:55:48.000Z | 2021-12-22T06:37:53.000Z | #!/usr/bin/env python
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
import os
import sys
import stat
import difflib
import inspect
import getopt
def referenceFile():
if sys.platform.startswith('linux'):
filename = 'makeinstall.linux'
elif sys.platform.startswith('win'):
filename = 'makeinstall.windows'
elif sys.platform == 'darwin':
filename = 'makeinstall.darwin'
else:
print "Unsupported platform: ", sys.platform
sys.exit(-1)
scriptDir = os.path.dirname(inspect.getfile(inspect.currentframe()))
return os.path.join(scriptDir,'..','tests', 'reference', filename)
def readReferenceFile():
# read file with old diff
f = open(referenceFile(), 'r');
filelist = []
for line in f:
filelist.append(line)
f.close()
return filelist
def generateReference(rootdir):
fileDict = {}
for root, subFolders, files in os.walk(rootdir):
for file in (subFolders + files):
f = os.path.join(root,file)
perm = os.stat(f).st_mode & 0777
if os.path.getsize(f) == 0:
print "'%s' is empty!" % f
fileDict[f[len(rootdir)+1:]] = perm
# generate new list
formattedlist = []
for name, perm in sorted(fileDict.iteritems()):
formattedlist.append("%o %s\n"% (perm, name))
return formattedlist;
def usage():
print "Usage: %s [-g | --generate] <dir>" % os.path.basename(sys.argv[0])
def main():
generateMode = False
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], 'hg', ['help', 'generate'])
except:
print str(err)
usage()
sys.exit(2)
for o, a in opts:
if o in ('-h', '--help'):
usage()
sys.exit(0)
if o in ('-g', '--generate'):
generateMode = True
if len(args) != 1:
usage()
sys.exit(2)
rootdir = args[0]
if generateMode:
f = open(referenceFile(), 'w')
for item in generateReference(rootdir):
f.write(item)
f.close()
print "Do not forget to commit", referenceFile()
else:
hasDiff = False
for line in difflib.unified_diff(readReferenceFile(), generateReference(rootdir), fromfile=referenceFile(), tofile="generated"):
sys.stdout.write(line)
hasDiff = True
if hasDiff:
sys.exit(1)
if __name__ == "__main__":
main()
| 31.504425 | 136 | 0.608989 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,497 | 0.420506 |
6a9da9d2fe8534cba2998ec7d3c2190abe55abec | 5,190 | py | Python | deep_sdf/workspace.py | huajian1069/non-convex_optimisation | cf4cd5070524c3f7e6b814fe9b85a15a06e7b8db | [
"MIT"
] | 2 | 2020-10-12T19:22:50.000Z | 2021-08-21T21:48:27.000Z | deep_sdf/workspace.py | huajian1069/non-convex_optimisation | cf4cd5070524c3f7e6b814fe9b85a15a06e7b8db | [
"MIT"
] | 13 | 2020-04-17T09:07:06.000Z | 2020-07-25T19:43:44.000Z | deep_sdf/workspace.py | huajian1069/non-convex-optimisation | cf4cd5070524c3f7e6b814fe9b85a15a06e7b8db | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2004-present Facebook. All Rights Reserved.
import json
import os
import torch
model_params_subdir = "ModelParameters"
optimizer_params_subdir = "OptimizerParameters"
latent_codes_subdir = "LatentCodes"
logs_filename = "Logs.pth"
reconstructions_subdir = "Reconstructions"
reconstruction_meshes_subdir = "Meshes"
reconstruction_codes_subdir = "Codes"
optimizations_subdir = "Optimizations"
optimizations_meshes_subdir = "Meshes"
optimizations_codes_subdir = "Codes"
specifications_filename = "specs.json"
data_source_map_filename = ".datasources.json"
evaluation_subdir = "Evaluation"
sdf_samples_subdir = "SdfSamples"
renders_subdir = "Renders"
surface_samples_subdir = "SurfaceSamples"
normalization_param_subdir = "NormalizationParameters"
training_meshes_subdir = "TrainingMeshes"
def load_experiment_specifications(experiment_directory):
filename = os.path.join(experiment_directory, specifications_filename)
if not os.path.isfile(filename):
raise Exception(
"The experiment directory ({}) does not include specifications file "
+ '"specs.json"'.format(experiment_directory)
)
return json.load(open(filename))
def load_model_parameters(experiment_directory, checkpoint, decoder):
filename = os.path.join(
experiment_directory, model_params_subdir, checkpoint + ".pth"
)
if not os.path.isfile(filename):
raise Exception('model state dict "{}" does not exist'.format(filename))
data = torch.load(filename)
decoder.load_state_dict(data["model_state_dict"])
return data["epoch"]
def build_decoder(experiment_directory, experiment_specs):
arch = __import__(
"networks." + experiment_specs["NetworkArch"], fromlist=["Decoder"]
)
latent_size = experiment_specs["CodeLength"]
decoder = arch.Decoder(latent_size, **experiment_specs["NetworkSpecs"]).cuda()
return decoder
def load_decoder(
experiment_directory, experiment_specs, checkpoint, data_parallel=True
):
decoder = build_decoder(experiment_directory, experiment_specs)
if data_parallel:
decoder = torch.nn.DataParallel(decoder)
epoch = load_model_parameters(experiment_directory, checkpoint, decoder)
return (decoder, epoch)
def load_latent_vectors(experiment_directory, checkpoint):
filename = os.path.join(
experiment_directory, latent_codes_subdir, checkpoint + ".pth"
)
if not os.path.isfile(filename):
raise Exception(
"The experiment directory ({}) does not include a latent code file"
+ " for checkpoint '{}'".format(experiment_directory, checkpoint)
)
data = torch.load(filename)
if isinstance(data["latent_codes"], torch.Tensor):
num_vecs = data["latent_codes"].size()[0]
lat_vecs = []
for i in range(num_vecs):
lat_vecs.append(data["latent_codes"][i].cuda())
return lat_vecs
else:
num_embeddings, embedding_dim = data["latent_codes"]["weight"].shape
lat_vecs = torch.nn.Embedding(num_embeddings, embedding_dim)
lat_vecs.load_state_dict(data["latent_codes"])
return lat_vecs.weight.data.detach()
def get_data_source_map_filename(data_dir):
return os.path.join(data_dir, data_source_map_filename)
def get_reconstructed_mesh_filename(
experiment_dir, epoch, dataset, class_name, instance_name
):
return os.path.join(
experiment_dir,
reconstructions_subdir,
str(epoch),
reconstruction_meshes_subdir,
dataset,
class_name,
instance_name + ".ply",
)
def get_reconstructed_code_filename(
experiment_dir, epoch, dataset, class_name, instance_name
):
return os.path.join(
experiment_dir,
reconstructions_subdir,
str(epoch),
reconstruction_codes_subdir,
dataset,
class_name,
instance_name + ".pth",
)
def get_evaluation_dir(experiment_dir, checkpoint, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, evaluation_subdir, checkpoint)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_model_params_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, model_params_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_optimizer_params_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, optimizer_params_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_latent_codes_dir(experiment_dir, create_if_nonexistent=False):
dir = os.path.join(experiment_dir, latent_codes_subdir)
if create_if_nonexistent and not os.path.isdir(dir):
os.makedirs(dir)
return dir
def get_normalization_params_filename(
data_dir, dataset_name, class_name, instance_name
):
return os.path.join(
data_dir,
normalization_param_subdir,
dataset_name,
class_name,
instance_name + ".npz",
)
| 25.566502 | 82 | 0.7158 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 723 | 0.139306 |
6a9e779e59feb114fa5c597307d0f0ba536c3a82 | 1,571 | py | Python | EmoPy/EmoPy/examples/convolutional_dropout_model.py | Rahmatullina/FinalYearProject | 326f521b9f600dbbc7ace2223bd5aafc79b2267c | [
"Apache-2.0"
] | null | null | null | EmoPy/EmoPy/examples/convolutional_dropout_model.py | Rahmatullina/FinalYearProject | 326f521b9f600dbbc7ace2223bd5aafc79b2267c | [
"Apache-2.0"
] | 9 | 2020-09-26T01:09:35.000Z | 2022-02-10T01:32:30.000Z | EmoPy/EmoPy/examples/convolutional_dropout_model.py | Rahmatullina/FinalYearProject | 326f521b9f600dbbc7ace2223bd5aafc79b2267c | [
"Apache-2.0"
] | null | null | null | from EmoPy.src.fermodel import FERModel
from EmoPy.src.directory_data_loader import DirectoryDataLoader
from EmoPy.src.csv_data_loader import CSVDataLoader
from EmoPy.src.data_generator import DataGenerator
from EmoPy.src.neuralnets import ConvolutionalNNDropout
from sklearn.model_selection import train_test_split
import numpy as np
from pkg_resources import resource_filename,resource_exists
validation_split = 0.15
target_dimensions = (48, 48)
channels = 1
verbose = True
print('--------------- Convolutional Dropout Model -------------------')
print('Loading data...')
directory_path = resource_filename('EmoPy.examples','image_data/sample_image_directory')
data_loader = DirectoryDataLoader(datapath=directory_path, validation_split=validation_split)
dataset = data_loader.load_data()
if verbose:
dataset.print_data_details()
print('Preparing training/testing data...')
train_images, train_labels = dataset.get_training_data()
train_gen = DataGenerator().fit(train_images, train_labels)
test_images, test_labels = dataset.get_test_data()
test_gen = DataGenerator().fit(test_images, test_labels)
print('Training net...')
model = ConvolutionalNNDropout(target_dimensions, channels, dataset.get_emotion_index_map(), verbose=True)
model.fit_generator(train_gen.generate(target_dimensions, batch_size=5),
test_gen.generate(target_dimensions, batch_size=5),
epochs=15)
# Save model configuration
# model.export_model('output/conv2d_model.json','output/conv2d_weights.h5',"output/conv2d_emotion_map.json", emotion_map)
| 38.317073 | 121 | 0.789306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 333 | 0.211967 |
6a9f3ae293c23801e109a6d38cb1d3e6cf12238a | 189 | py | Python | ENV/lib/python3.6/site-packages/pyramid_jinja2/tests/extensions.py | captain-c00keys/pyramid-stocks | 0acf3363a6a7ee61cd41b855f43c9d6f9582ae6a | [
"MIT"
] | null | null | null | ENV/lib/python3.6/site-packages/pyramid_jinja2/tests/extensions.py | captain-c00keys/pyramid-stocks | 0acf3363a6a7ee61cd41b855f43c9d6f9582ae6a | [
"MIT"
] | null | null | null | ENV/lib/python3.6/site-packages/pyramid_jinja2/tests/extensions.py | captain-c00keys/pyramid-stocks | 0acf3363a6a7ee61cd41b855f43c9d6f9582ae6a | [
"MIT"
] | null | null | null | from jinja2 import nodes
from jinja2.ext import Extension
class TestExtension(Extension):
tags = {'test_ext'}
def parse(self, parser): return nodes.Const("This is test extension")
| 27 | 73 | 0.746032 | 129 | 0.68254 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.179894 |
6a9f71d63576d36e576c5ed1a561ba09b6a33e88 | 45,622 | py | Python | deepstream_ignition_usb_yolo.py | valdivj/Deepstream-IGN-Maker-YOLO | f38ece731e9797a525da932c3da2de77e48f45af | [
"Unlicense"
] | 18 | 2021-02-09T11:07:57.000Z | 2022-03-16T12:35:34.000Z | deepstream_ignition_usb_yolo.py | valdivj/Deepstream-IGN-Maker-YOLO | f38ece731e9797a525da932c3da2de77e48f45af | [
"Unlicense"
] | null | null | null | deepstream_ignition_usb_yolo.py | valdivj/Deepstream-IGN-Maker-YOLO | f38ece731e9797a525da932c3da2de77e48f45af | [
"Unlicense"
] | 3 | 2021-02-11T00:23:56.000Z | 2021-11-16T02:15:37.000Z | #!/usr/bin/env python3
################################################################################
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
import sys
sys.path.append('../')
sys.path.insert(0, "../../../client_libraries/python/")
import paho.mqtt.client as mqtt
import sparkplug_b as sparkplug
import time
import time, threading
import random
import string
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
from common.is_aarch_64 import is_aarch64
from common.bus_call import bus_call
from sparkplug_b import *
import pyds
# Application Variables
serverUrl = "localhost"
myGroupId = "Sparkplug B Devices"
myNodeName = "NVIDIA"
myDeviceName = "XavierNX"
publishPeriod = 5000
myUsername = "admin"
myPassword = "changeme"
client = mqtt.Client(serverUrl, 1883, 60)
WAIT_SECONDS = 1
frame_numberx = 0
num_rectsx = 0
counter1 = 0
counter2 = 0
Object1 = 0
Object2 = 0
Object3 = 0
Object4 = 0
Object5 = 0
Object6 = 0
Object7 = 0
Object8 = 0
Object9 = 0
Object10 = 0
newValue1 = 0
newValue2 = 0
newValue3 = 0
newValue4 = 0
newValue5 = 0
newValue6 = 0
newValue7 = 0
newValue8 = 0
newValue9 = 0
newValue10 = 0
class AliasMap:
Next_Server = 0
Rebirth = 1
Reboot = 2
Device_frame_numberx = 3
Device_num_rectsx = 4
Device_Metric0 = 5
Device_Metric1 = 6
Device_Metric2 = 7
Device_Metric3 = 8
Device_Metric4 = 9
Device_counter1 = 10
Device_counter2 = 11
Device_Input1 = 12
Device_Input2 = 13
Device_Input3 = 14
Device_Input4 = 15
Device_Input5 = 16
Device_Input6 = 17
Device_Input7 = 18
Device_Input8 = 19
Device_Input9 = 20
Device_Input10 = 21
Device_Output1 = 22
Device_Output2 = 23
Device_Output3 = 24
Device_Output4 = 25
Device_Output5 = 26
Device_Output6 = 27
Device_Output7 = 28
Device_Output8 = 29
Device_Output9 = 30
Device_Output10 = 31
MAX_DISPLAY_LEN=64
PGIE_CLASS_ID_TOOTHBRUSH = 79
PGIE_CLASS_ID_HAIR_DRYER = 78
PGIE_CLASS_ID_TEDDY_BEAR = 77
PGIE_CLASS_ID_SCISSORS = 76
PGIE_CLASS_ID_VASE = 75
PGIE_CLASS_ID_CLOCK = 74
PGIE_CLASS_ID_BOOK = 73
PGIE_CLASS_ID_REFRIGERATOR = 72
PGIE_CLASS_ID_SINK = 71
PGIE_CLASS_ID_TOASTER = 70
PGIE_CLASS_ID_OVEN = 69
PGIE_CLASS_ID_MICROWAVE = 68
PGIE_CLASS_ID_CELL_PHONE = 67
PGIE_CLASS_ID_KEYBOARD = 66
PGIE_CLASS_ID_REMOTE = 65
PGIE_CLASS_ID_MOUSE = 64
PGIE_CLASS_ID_LAPTOP = 63
PGIE_CLASS_ID_TVMONITOR = 62
PGIE_CLASS_ID_TOILET = 61
PGIE_CLASS_ID_DININGTABLE= 60
PGIE_CLASS_ID_BED = 59
PGIE_CLASS_ID_POTTEDPLANT = 58
PGIE_CLASS_ID_SOFA = 57
PGIE_CLASS_ID_CHAIR = 56
PGIE_CLASS_ID_CAKE = 55
PGIE_CLASS_ID_DONUT = 54
PGIE_CLASS_ID_PIZZA = 53
PGIE_CLASS_ID_HOT_DOG = 52
PGIE_CLASS_ID_CARROT = 51
PGIE_CLASS_ID_BROCCOLI = 50
PGIE_CLASS_ID_ORANGE = 49
PGIE_CLASS_ID_SANDWICH = 48
PGIE_CLASS_ID_APPLE = 47
PGIE_CLASS_ID_BANANA = 46
PGIE_CLASS_ID_BOWL = 45
PGIE_CLASS_ID_SPOON = 44
PGIE_CLASS_ID_KNIFE = 43
PGIE_CLASS_ID_FORK = 42
PGIE_CLASS_ID_CUP = 41
PGIE_CLASS_ID_WINE_GLASS = 40
PGIE_CLASS_ID_BOTTLE = 39
PGIE_CLASS_ID_TENNIS_RACKET = 38
PGIE_CLASS_ID_SURFBOARD = 37
PGIE_CLASS_ID_SKATEBOARD = 36
PGIE_CLASS_ID_BASEBALL_GLOVE = 35
PGIE_CLASS_ID_BASEBALL_BAT = 34
PGIE_CLASS_ID_KITE = 33
PGIE_CLASS_ID_SPORTS_BALL = 32
PGIE_CLASS_ID_SNOWBOARD = 31
PGIE_CLASS_ID_SKIS = 30
PGIE_CLASS_ID_FRISBEE = 29
PGIE_CLASS_ID_SUITCASE = 28
PGIE_CLASS_ID_TIE = 27
PGIE_CLASS_ID_HANDBAG = 26
PGIE_CLASS_ID_UMBRELLA = 25
PGIE_CLASS_ID_BACKPACK = 24
PGIE_CLASS_ID_GIRAFFE = 23
PGIE_CLASS_ID_ZEBRA = 22
PGIE_CLASS_ID_BEAR = 21
PGIE_CLASS_ID_ELEPHANT = 20
PGIE_CLASS_ID_COW = 19
PGIE_CLASS_ID_SHEEP = 18
PGIE_CLASS_ID_HORSE = 17
PGIE_CLASS_ID_DOG = 16
PGIE_CLASS_ID_CAT = 15
PGIE_CLASS_ID_BIRD = 14
PGIE_CLASS_ID_BENCH = 13
PGIE_CLASS_ID_PARKING_METER = 12
PGIE_CLASS_ID_STOP_SIGN = 11
PGIE_CLASS_ID_FIRE_HYDRANT = 10
PGIE_CLASS_ID_TRAFFIC_LIGHT = 9
PGIE_CLASS_ID_BOAT = 8
PGIE_CLASS_ID_TRUCK = 7
PGIE_CLASS_ID_TRAIN = 6
PGIE_CLASS_ID_BUS = 5
PGIE_CLASS_ID_AEROPLANE = 4
PGIE_CLASS_ID_MOTORBIKE = 3
PGIE_CLASS_ID_VEHICLE = 2
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 0
pgie_classes_str= ["Toothbrush", "Hair dryer", "Teddy bear","Scissors","Vase", "Clock", "Book","Refrigerator", "Sink", "Toaster","Oven","Microwave", "Cell phone", "Keyboard","Remote", "Mouse", "Laptop","Tvmonitor","Toilet", "Diningtable", "Bed","Pottedplant", "Sofa", "Chair","Cake","Donut", "Pizza", "Hot dog","Carrot", "Broccli", "Orange","Sandwich","Apple", "Banana", "Bowl","Spoon", "Knife", "Fork","Cup","Wine Glass", "Bottle", "Tennis racket","Surfboard", "Skateboard", "Baseball glove","Baseball bat","Kite", "Sports ball", "Snowboard","Skis", "Frisbee", "Suitcase","Tie","Handbag", "Umbrella", "Backpack","Giraffe", "Zebra", "Bear","Elephant","Cow", "Sheep", "Horse","Dog", "Cat", "Bird","Bench","Parking meter", "Stop sign", "Fire hydrant","Traffic light", "Boat", "Truck","Train","Bus", "Areoplane", "Motorbike","Car", "Bicycle", "Person"]
######################################################################
# The callback for when the client receives a CONNACK response from the server.
######################################################################
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected with result code "+str(rc))
else:
print("Failed to connect with result code "+str(rc))
sys.exit()
global myGroupId
global myNodeName
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("spBv1.0/" + myGroupId + "/NCMD/" + myNodeName + "/#")
client.subscribe("spBv1.0/" + myGroupId + "/DCMD/" + myNodeName + "/#")
######################################################################
######################################################################
# The callback for when a PUBLISH message is received from the server.
######################################################################
def on_message(client, userdata, msg):
print("Message arrived: " + msg.topic)
tokens = msg.topic.split("/")
global newValue1
global newValue2
global newValue3
global newValue4
global newValue5
global newValue6
global newValue7
global newValue8
global newValue9
global newValue10
if tokens[0] == "spBv1.0" and tokens[1] == myGroupId and (tokens[2] == "NCMD" or tokens[2] == "DCMD") and tokens[3] == myNodeName:
inboundPayload = sparkplug_b_pb2.Payload()
inboundPayload.ParseFromString(msg.payload)
for metric in inboundPayload.metrics:
if metric.name == "Node Control/Next Server" or metric.alias == AliasMap.Next_Server:
# 'Node Control/Next Server' is an NCMD used to tell the device/client application to
# disconnect from the current MQTT server and connect to the next MQTT server in the
# list of available servers. This is used for clients that have a pool of MQTT servers
# to connect to.
print ("'Node Control/Next Server' is not implemented in this example")
elif metric.name == "Node Control/Rebirth" or metric.alias == AliasMap.Rebirth:
# 'Node Control/Rebirth' is an NCMD used to tell the device/client application to resend
# its full NBIRTH and DBIRTH again. MQTT Engine will send this NCMD to a device/client
# application if it receives an NDATA or DDATA with a metric that was not published in the
# original NBIRTH or DBIRTH. This is why the application must send all known metrics in
# its original NBIRTH and DBIRTH messages.
publishBirth()
elif metric.name == "Node Control/Reboot" or metric.alias == AliasMap.Reboot:
# 'Node Control/Reboot' is an NCMD used to tell a device/client application to reboot
# This can be used for devices that need a full application reset via a soft reboot.
# In this case, we fake a full reboot with a republishing of the NBIRTH and DBIRTH
# messages.
publishBirth()
elif metric.name == "output/Device Metric2" or metric.alias == AliasMap.Device_Metric2:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue = metric.int_value
print ("CMD message for output/Device Metric2 - New Value: {}".format(newValue))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric2, MetricDataType.Int16, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 1
#publishBirth()
elif metric.name == "output/Device Input1" or metric.alias == AliasMap.Device_Input1:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue1 = metric.int_value
print ("CMD message for output/Device Input1 - New Value: {}".format(newValue1))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input1, MetricDataType.Int16, newValue1)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 2
#publishBirth()
elif metric.name == "output/Device Input2" or metric.alias == AliasMap.Device_Input2:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue2 = metric.int_value
print ("CMD message for output/Device Input2 - New Value: {}".format(newValue2))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input2, MetricDataType.Int16, newValue2)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 3
#publishBirth()
elif metric.name == "output/Device Input3" or metric.alias == AliasMap.Device_Input3:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue3 = metric.int_value
print ("CMD message for output/Device Input3 - New Value: {}".format(newValue3))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input3, MetricDataType.Int16, newValue3)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 4
#publishBirth()
elif metric.name == "output/Device Input4" or metric.alias == AliasMap.Device_Input4:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue4 = metric.int_value
print ("CMD message for output/Device Input4 - New Value: {}".format(newValue4))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input4, MetricDataType.Int16, newValue4)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 5
#publishBirth()
elif metric.name == "output/Device Input5" or metric.alias == AliasMap.Device_Input5:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue5 = metric.int_value
print ("CMD message for output/Device Input5 - New Value: {}".format(newValue5))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input5, MetricDataType.Int16, newValue5)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 6
#publishBirth()
elif metric.name == "output/Device Input6" or metric.alias == AliasMap.Device_Input6:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue6 = metric.int_value
print ("CMD message for output/Device Input6 - New Value: {}".format(newValue6))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input6, MetricDataType.Int16, newValue6)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 7
#publishBirth()
elif metric.name == "output/Device Input7" or metric.alias == AliasMap.Device_Input7:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue7 = metric.int_value
print ("CMD message for output/Device Input7 - New Value: {}".format(newValue7))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input7, MetricDataType.Int16, newValue7)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 8
#publishBirth()
elif metric.name == "output/Device Input8" or metric.alias == AliasMap.Device_Input8:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue8 = metric.int_value
print ("CMD message for output/Device Input8 - New Value: {}".format(newValue8))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input8, MetricDataType.Int16, newValue8)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 9
#publishBirth()
elif metric.name == "output/Device Input9" or metric.alias == AliasMap.Device_Input9:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue9 = metric.int_value
print ("CMD message for output/Device Input9 - New Value: {}".format(newValue9))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input9, MetricDataType.Int16, newValue9)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Publish a message Input 10
#publishBirth()
elif metric.name == "output/Device Input10" or metric.alias == AliasMap.Device_Input10:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue10 = metric.int_value
print ("CMD message for output/Device Input10 - New Value: {}".format(newValue10))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Input10, MetricDataType.Int16, newValue10)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
#global newValue4
#publishBirth()
elif metric.name == "output/Device Metric4" or metric.alias == AliasMap.Device_Metric4:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Int16 because of how we declated it in the DBIRTH
newValue = metric.string_value
print ("CMD message for output/Device Metric4 - New Value: {}".format(newValue))
# Create the DDATA payload - Use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric4, MetricDataType.String, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
#publishBirth()
elif metric.name == "output/Device Metric3" or metric.alias == AliasMap.Device_Metric3:
# This is a metric we declared in our DBIRTH message and we're emulating an output.
# So, on incoming 'writes' to the output we must publish a DDATA with the new output
# value. If this were a real output we'd write to the output and then read it back
# before publishing a DDATA message.
# We know this is an Boolean because of how we declated it in the DBIRTH
newValue = metric.boolean_value
print ("CMD message for output/Device Metric3 - New Value: %r" % newValue)
# Create the DDATA payload - use the alias because this isn't the DBIRTH
payload = sparkplug.getDdataPayload()
addMetric(payload, None, AliasMap.Device_Metric3, MetricDataType.Boolean, newValue)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
else:
print ("Unknown command: " + metric.name)
else:
print ("Unknown command...")
print ("Done publishing")
#####################################################################
######################################################################
######################################################################
# Publish the BIRTH certificates
######################################################################
def publishBirth():
publishNodeBirth()
publishDeviceBirth()
######################################################################
######################################################################
# Publish the NBIRTH certificate
######################################################################
def publishNodeBirth():
print ("Publishing Node Birth")
# Create the node birth payload
payload = sparkplug.getNodeBirthPayload()
# Set up the Node Controls
addMetric(payload, "Node Control/Next Server", AliasMap.Next_Server, MetricDataType.Boolean, False)
addMetric(payload, "Node Control/Rebirth", AliasMap.Rebirth, MetricDataType.Boolean, False)
addMetric(payload, "Node Control/Reboot", AliasMap.Reboot, MetricDataType.Boolean, False)
# Publish the node birth certificate
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/NBIRTH/" + myNodeName, byteArray, 0, False)
######################################################################
######################################################################
# Publish the DBIRTH certificate
######################################################################
def publishDeviceBirth():
print ("Publishing Device Birth")
# Get the payload
payload = sparkplug.getDeviceBirthPayload()
# Add some device metrics
addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx, MetricDataType.Int16, frame_numberx )
addMetric(payload, "input/Device Metric0", AliasMap.Device_Metric0, MetricDataType.String, "hello device")
addMetric(payload, "input/Device Metric1", AliasMap.Device_Metric1, MetricDataType.Boolean, True)
addMetric(payload, "input/Number of Objects", AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx )
addMetric(payload, "output/Device Metric2", AliasMap.Device_Metric2, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input1", AliasMap.Device_Input1, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input2", AliasMap.Device_Input2, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input3", AliasMap.Device_Input3, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input4", AliasMap.Device_Input4, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input5", AliasMap.Device_Input5, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input6", AliasMap.Device_Input6, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input7", AliasMap.Device_Input7, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input8", AliasMap.Device_Input8, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input9", AliasMap.Device_Input9, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Input10", AliasMap.Device_Input10, MetricDataType.Int16, 0)
addMetric(payload,"input/Device Output1", AliasMap.Device_Output1, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output2", AliasMap.Device_Output2, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output3", AliasMap.Device_Output3, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output4", AliasMap.Device_Output4, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output5", AliasMap.Device_Output5, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output6", AliasMap.Device_Output6, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output7", AliasMap.Device_Output7, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output8", AliasMap.Device_Output8, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output9", AliasMap.Device_Output9, MetricDataType.Int16, 0)
addMetric(payload, "input/Device Output10", AliasMap.Device_Output10, MetricDataType.Int16, 0)
addMetric(payload, "output/Device Metric3", AliasMap.Device_Metric3, MetricDataType.Boolean, True)
addMetric(payload, "output/Device Metric4", AliasMap.Device_Metric4, MetricDataType.String, "start")
# Publish the initial data with the Device BIRTH certificate
totalByteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DBIRTH/" + myNodeName + "/" + myDeviceName, totalByteArray, 0, False)
######################################################################
######################################################################
def osd_sink_pad_buffer_probe(pad,info,u_data):
global frame_numberx
global num_rectsx
global Object1
global Object2
global Object3
global Object4
global Object5
global Object6
global Object7
global Object8
global Object9
global Object10
#Intiallizing object counter with 0.
obj_counter = {
PGIE_CLASS_ID_TOOTHBRUSH:0,
PGIE_CLASS_ID_HAIR_DRYER:0,
PGIE_CLASS_ID_TEDDY_BEAR:0,
PGIE_CLASS_ID_SCISSORS:0,
PGIE_CLASS_ID_VASE:0,
PGIE_CLASS_ID_CLOCK:0,
PGIE_CLASS_ID_BOOK:0,
PGIE_CLASS_ID_REFRIGERATOR:0,
PGIE_CLASS_ID_SINK:0,
PGIE_CLASS_ID_TOASTER:0,
PGIE_CLASS_ID_OVEN:0,
PGIE_CLASS_ID_MICROWAVE:0,
PGIE_CLASS_ID_CELL_PHONE:0,
PGIE_CLASS_ID_KEYBOARD:0,
PGIE_CLASS_ID_REMOTE:0,
PGIE_CLASS_ID_MOUSE:0,
PGIE_CLASS_ID_LAPTOP:0,
PGIE_CLASS_ID_TVMONITOR:0,
PGIE_CLASS_ID_TOILET:0,
PGIE_CLASS_ID_DININGTABLE:0,
PGIE_CLASS_ID_BED:0,
PGIE_CLASS_ID_POTTEDPLANT:0,
PGIE_CLASS_ID_SOFA:0,
PGIE_CLASS_ID_CHAIR:0,
PGIE_CLASS_ID_CAKE:0,
PGIE_CLASS_ID_DONUT:0,
PGIE_CLASS_ID_PIZZA:0,
PGIE_CLASS_ID_HOT_DOG:0,
PGIE_CLASS_ID_CARROT:0,
PGIE_CLASS_ID_BROCCOLI:0,
PGIE_CLASS_ID_ORANGE:0,
PGIE_CLASS_ID_SANDWICH:0,
PGIE_CLASS_ID_APPLE:0,
PGIE_CLASS_ID_BANANA:0,
PGIE_CLASS_ID_BOWL:0,
PGIE_CLASS_ID_SPOON:0,
PGIE_CLASS_ID_KNIFE:0,
PGIE_CLASS_ID_FORK:0,
PGIE_CLASS_ID_CUP:0,
PGIE_CLASS_ID_WINE_GLASS:0,
PGIE_CLASS_ID_BOTTLE:0,
PGIE_CLASS_ID_TENNIS_RACKET:0,
PGIE_CLASS_ID_SURFBOARD:0,
PGIE_CLASS_ID_SKATEBOARD:0,
PGIE_CLASS_ID_BASEBALL_GLOVE:0,
PGIE_CLASS_ID_BASEBALL_BAT:0,
PGIE_CLASS_ID_KITE:0,
PGIE_CLASS_ID_SPORTS_BALL:0,
PGIE_CLASS_ID_SNOWBOARD:0,
PGIE_CLASS_ID_SKIS:0,
PGIE_CLASS_ID_FRISBEE:0,
PGIE_CLASS_ID_SUITCASE:0,
PGIE_CLASS_ID_TIE:0,
PGIE_CLASS_ID_HANDBAG:0,
PGIE_CLASS_ID_UMBRELLA:0,
PGIE_CLASS_ID_BACKPACK:0,
PGIE_CLASS_ID_GIRAFFE:0,
PGIE_CLASS_ID_ZEBRA:0,
PGIE_CLASS_ID_BEAR:0,
PGIE_CLASS_ID_ELEPHANT:0,
PGIE_CLASS_ID_COW:0,
PGIE_CLASS_ID_SHEEP:0,
PGIE_CLASS_ID_HORSE:0,
PGIE_CLASS_ID_DOG:0,
PGIE_CLASS_ID_CAT:0,
PGIE_CLASS_ID_BIRD:0,
PGIE_CLASS_ID_BENCH:0,
PGIE_CLASS_ID_PARKING_METER:0,
PGIE_CLASS_ID_STOP_SIGN:0,
PGIE_CLASS_ID_FIRE_HYDRANT:0,
PGIE_CLASS_ID_TRAFFIC_LIGHT:0,
PGIE_CLASS_ID_BOAT:0,
PGIE_CLASS_ID_TRUCK:0,
PGIE_CLASS_ID_TRAIN:0,
PGIE_CLASS_ID_BUS:0,
PGIE_CLASS_ID_AEROPLANE:0,
PGIE_CLASS_ID_MOTORBIKE:0,
PGIE_CLASS_ID_VEHICLE:0,
PGIE_CLASS_ID_BICYCLE:0,
PGIE_CLASS_ID_PERSON:0
}
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.NvDsFrameMeta.cast()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
frame_numberx=frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
num_rectsx = frame_meta.num_obj_meta
l_obj=frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
try:
l_obj=l_obj.next
except StopIteration:
break
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Bird_count={} Person_count={}".format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_CUP], obj_counter[PGIE_CLASS_ID_BOTTLE])
Object1 = obj_counter[newValue1]
Object2 = obj_counter[newValue2]
Object3 = obj_counter[newValue3]
Object4 = obj_counter[newValue4]
Object5 = obj_counter[newValue5]
Object6 = obj_counter[newValue6]
Object7 = obj_counter[newValue7]
Object8 = obj_counter[newValue8]
Object9 = obj_counter[newValue9]
Object10 = obj_counter[newValue10]
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
# print(pyds.get_string(py_nvosd_text_params.display_text))
#pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
######################################################################
def main(args):
# Check input arguments
if len(args) != 2:
sys.stderr.write("usage: %s <v4l2-device-path>\n" % args[0])
sys.exit(1)
# Standard GStreamer initialization
GObject.threads_init()
Gst.init(None)
# Create gstreamer elements
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
# Source element for reading from the file
print("Creating Source \n ")
source = Gst.ElementFactory.make("v4l2src", "usb-cam-source")
if not source:
sys.stderr.write(" Unable to create Source \n")
caps_v4l2src = Gst.ElementFactory.make("capsfilter", "v4l2src_caps")
if not caps_v4l2src:
sys.stderr.write(" Unable to create v4l2src capsfilter \n")
print("Creating Video Converter \n")
# Adding videoconvert -> nvvideoconvert as not all
# raw formats are supported by nvvideoconvert;
# Say YUYV is unsupported - which is the common
# raw format for many logi usb cams
# In case we have a camera with raw format supported in
# nvvideoconvert, GStreamer plugins' capability negotiation
# shall be intelligent enough to reduce compute by
# videoconvert doing passthrough (TODO we need to confirm this)
# videoconvert to make sure a superset of raw formats are supported
vidconvsrc = Gst.ElementFactory.make("videoconvert", "convertor_src1")
if not vidconvsrc:
sys.stderr.write(" Unable to create videoconvert \n")
# nvvideoconvert to convert incoming raw buffers to NVMM Mem (NvBufSurface API)
nvvidconvsrc = Gst.ElementFactory.make("nvvideoconvert", "convertor_src2")
if not nvvidconvsrc:
sys.stderr.write(" Unable to create Nvvideoconvert \n")
caps_vidconvsrc = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
if not caps_vidconvsrc:
sys.stderr.write(" Unable to create capsfilter \n")
# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
# Use nvinfer to run inferencing on camera's output,
# behaviour of inferencing is set through config file
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
# Use convertor to convert from NV12 to RGBA as required by nvosd
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
# Create OSD to draw on the converted RGBA buffer
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
# Finally render the osd output
if is_aarch64():
transform = Gst.ElementFactory.make("nvegltransform", "nvegl-transform")
print("Creating EGLSink \n")
sink = Gst.ElementFactory.make("nveglglessink", "nvvideo-renderer")
if not sink:
sys.stderr.write(" Unable to create egl sink \n")
print("Playing cam %s " %args[1])
caps_v4l2src.set_property('caps', Gst.Caps.from_string("video/x-raw, framerate=30/1"))
caps_vidconvsrc.set_property('caps', Gst.Caps.from_string("video/x-raw(memory:NVMM)"))
source.set_property('device', args[1])
streammux.set_property('width', 640)
streammux.set_property('height', 480)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "config_infer_primary_yoloV3.txt")
# Set sync = false to avoid late frame drops at the display-sink
sink.set_property('sync', False)
print("Adding elements to Pipeline \n")
pipeline.add(source)
pipeline.add(caps_v4l2src)
pipeline.add(vidconvsrc)
pipeline.add(nvvidconvsrc)
pipeline.add(caps_vidconvsrc)
pipeline.add(streammux)
pipeline.add(pgie)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(sink)
if is_aarch64():
pipeline.add(transform)
# we link the elements together
# v4l2src -> nvvideoconvert -> mux ->
# nvinfer -> nvvideoconvert -> nvosd -> video-renderer
print("Linking elements in the Pipeline \n")
source.link(caps_v4l2src)
caps_v4l2src.link(vidconvsrc)
vidconvsrc.link(nvvidconvsrc)
nvvidconvsrc.link(caps_vidconvsrc)
sinkpad = streammux.get_request_pad("sink_0")
if not sinkpad:
sys.stderr.write(" Unable to get the sink pad of streammux \n")
srcpad = caps_vidconvsrc.get_static_pad("src")
if not srcpad:
sys.stderr.write(" Unable to get source pad of caps_vidconvsrc \n")
srcpad.link(sinkpad)
streammux.link(pgie)
pgie.link(nvvidconv)
nvvidconv.link(nvosd)
if is_aarch64():
nvosd.link(transform)
transform.link(sink)
else:
nvosd.link(sink)
# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
# Lets add probe to get informed of the meta data generated, we add probe to
# the sink pad of the osd element, since by that time, the buffer would have
# had got all the metadata.
osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
######################################################################
# Create the node death payload
deathPayload = sparkplug.getNodeDeathPayload()
# Start of main program - Set up the MQTT client connection
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(myUsername, myPassword)
deathByteArray = bytearray(deathPayload.SerializeToString())
client.will_set("spBv1.0/" + myGroupId + "/NDEATH/" + myNodeName, deathByteArray, 0, False)
client.connect(serverUrl, 1883, 60)
# Publish the birth certificates
publishBirth()
def foo():
# Periodically publish some new data
payload = sparkplug.getDdataPayload()
# Add some random data to the inputs
addMetric(payload, "input/number of objects", AliasMap.Device_num_rectsx, MetricDataType.Int16, num_rectsx )
addMetric(payload, "input/Frame Number", AliasMap.Device_frame_numberx, MetricDataType.Int16, frame_numberx )
addMetric(payload,"input/Device Output1", AliasMap.Device_Output1, MetricDataType.Int16, Object1)
addMetric(payload, "input/Device Output2", AliasMap.Device_Output2, MetricDataType.Int16, Object2)
addMetric(payload, "input/Device Output3", AliasMap.Device_Output3, MetricDataType.Int16, Object3)
addMetric(payload, "input/Device Output4", AliasMap.Device_Output4, MetricDataType.Int16, Object4)
addMetric(payload, "input/Device Output5", AliasMap.Device_Output5, MetricDataType.Int16, Object5)
addMetric(payload, "input/Device Output6", AliasMap.Device_Output6, MetricDataType.Int16, Object6)
addMetric(payload, "input/Device Output7", AliasMap.Device_Output7, MetricDataType.Int16, Object7)
addMetric(payload, "input/Device Output8", AliasMap.Device_Output8, MetricDataType.Int16, Object8)
addMetric(payload, "input/Device Output9", AliasMap.Device_Output9, MetricDataType.Int16, Object9)
addMetric(payload, "input/Device Output10", AliasMap.Device_Output10, MetricDataType.Int16, Object10)
# Publish a message data
byteArray = bytearray(payload.SerializeToString())
client.publish("spBv1.0/" + myGroupId + "/DDATA/" + myNodeName + "/" + myDeviceName, byteArray, 0, False)
# Sit and wait for inbound or outbound events
for _ in range(1):
time.sleep(1)
client.loop()
threading.Timer(WAIT_SECONDS, foo).start()
foo()
######################################################################
print("Starting pipeline \n")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
#cleanup
print("Exiting app\n")
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 46.410987 | 849 | 0.645829 | 761 | 0.016681 | 0 | 0 | 0 | 0 | 0 | 0 | 18,586 | 0.407391 |
6a9fcd8ecc089595a2ffc3a48b4ee67000ac218d | 799 | py | Python | src/pyams_i18n/tests/__init__.py | Py-AMS/pyams-i18n | dbb3953302311977653145385af02e4d1ae41431 | [
"ZPL-2.1"
] | null | null | null | src/pyams_i18n/tests/__init__.py | Py-AMS/pyams-i18n | dbb3953302311977653145385af02e4d1ae41431 | [
"ZPL-2.1"
] | null | null | null | src/pyams_i18n/tests/__init__.py | Py-AMS/pyams-i18n | dbb3953302311977653145385af02e4d1ae41431 | [
"ZPL-2.1"
] | null | null | null | #
# Copyright (c) 2015-2019 Thierry Florac <tflorac AT ulthar.net>
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
"""
Generic test cases for pyams_i18n doctests
"""
__docformat__ = 'restructuredtext'
import os
import sys
def get_package_dir(value):
"""Get package directory"""
package_dir = os.path.split(value)[0]
if package_dir not in sys.path:
sys.path.append(package_dir)
return package_dir
| 26.633333 | 75 | 0.740926 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 572 | 0.715895 |
6a9fd697438b33b3c4d1fb77bd83291ab92dfcca | 1,811 | py | Python | tests/test_custom_experts.py | protagohhz/hivemind | 487fb68feea4d27ede0afdef211f6edc889b1a9e | [
"MIT"
] | 1,026 | 2020-06-04T05:02:30.000Z | 2022-03-29T13:05:04.000Z | tests/test_custom_experts.py | mryab/hivemind | 595b831bcaac6b4d8da215de70b8138ac548c562 | [
"MIT"
] | 301 | 2020-04-04T14:26:49.000Z | 2022-03-19T15:25:39.000Z | tests/test_custom_experts.py | learning-at-home/tesseract | c6b2b2d84ccfc890314a2bfece8eef238372d410 | [
"MIT"
] | 89 | 2020-04-16T19:39:29.000Z | 2022-03-25T17:32:43.000Z | import os
import pytest
import torch
from hivemind import RemoteExpert
from hivemind.moe.server import background_server
CUSTOM_EXPERTS_PATH = os.path.join(os.path.dirname(__file__), "test_utils", "custom_networks.py")
@pytest.mark.forked
def test_custom_expert(hid_dim=16):
with background_server(
expert_cls="perceptron",
num_experts=2,
device="cpu",
hidden_dim=hid_dim,
num_handlers=2,
no_dht=True,
custom_module_path=CUSTOM_EXPERTS_PATH,
) as (server_endpoint, _):
expert0 = RemoteExpert("expert.0", server_endpoint)
expert1 = RemoteExpert("expert.1", server_endpoint)
for batch_size in (1, 4):
batch = torch.randn(batch_size, hid_dim)
output0 = expert0(batch)
output1 = expert1(batch)
loss = output0.sum()
loss.backward()
loss = output1.sum()
loss.backward()
@pytest.mark.forked
def test_multihead_expert(hid_dim=16):
with background_server(
expert_cls="multihead",
num_experts=2,
device="cpu",
hidden_dim=hid_dim,
num_handlers=2,
no_dht=True,
custom_module_path=CUSTOM_EXPERTS_PATH,
) as (server_endpoint, _):
expert0 = RemoteExpert("expert.0", server_endpoint)
expert1 = RemoteExpert("expert.1", server_endpoint)
for batch_size in (1, 4):
batch = (
torch.randn(batch_size, hid_dim),
torch.randn(batch_size, 2 * hid_dim),
torch.randn(batch_size, 3 * hid_dim),
)
output0 = expert0(*batch)
output1 = expert1(*batch)
loss = output0.sum()
loss.backward()
loss = output1.sum()
loss.backward()
| 27.439394 | 97 | 0.600773 | 0 | 0 | 0 | 0 | 1,583 | 0.874103 | 0 | 0 | 105 | 0.057979 |
6aa02482ee4345f8d62c98b8785e029ed85945dd | 1,639 | py | Python | tqsdk/demo/example/momentum.py | boyscout2008/tqsdk-python | 79496a938a44f79ea9164569637509d0cc7db70a | [
"Apache-2.0"
] | null | null | null | tqsdk/demo/example/momentum.py | boyscout2008/tqsdk-python | 79496a938a44f79ea9164569637509d0cc7db70a | [
"Apache-2.0"
] | null | null | null | tqsdk/demo/example/momentum.py | boyscout2008/tqsdk-python | 79496a938a44f79ea9164569637509d0cc7db70a | [
"Apache-2.0"
] | 1 | 2020-11-20T01:19:11.000Z | 2020-11-20T01:19:11.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Ringo"
'''
价格动量 策略 (难度:初级)
参考: https://www.shinnytech.com/blog/momentum-strategy/
注: 该示例策略仅用于功能示范, 实盘时请根据自己的策略/经验进行修改
'''
from tqsdk import TqAccount, TqApi, TargetPosTask
# 设置指定合约,获取N条K线计算价格动量
SYMBOL = "SHFE.au1912"
N = 15
api = TqApi()
klines = api.get_kline_serial(SYMBOL, 60*60*24, N)
quote = api.get_quote(SYMBOL)
target_pos = TargetPosTask(api, SYMBOL)
position = api.get_position(SYMBOL)
# 编写价格动量函数AR,以前N-1日K线计算价格动量ar
def AR(kline1):
spread_ho = sum(kline1.high[:-1] - kline1.open[:-1])
spread_oc = sum(kline1.open[:-1] - kline1.low[:-1])
# spread_oc 为0时,设置为最小价格跳动值
if spread_oc == 0:
spread_oc = quote.price_tick
ar = (spread_ho/spread_oc)*100
return ar
ar = AR(klines)
print("策略开始启动")
while True:
api.wait_update()
# 生成新K线时,重新计算价格动量值ar
if api.is_changing(klines.iloc[-1], "datetime"):
ar = AR(klines)
print("价格动量是:", ar)
# 每次最新价发生变动时,重新进行判断
if api.is_changing(quote, "last_price"):
# 开仓策略
if position.pos_long == 0 and position.pos_short == 0:
# 如果ar大于110并且小于150,开多仓
if 110 < ar < 150:
print("价值动量超过110,小于150,做多")
target_pos.set_target_volume(100)
# 如果ar大于50,小于90,开空仓
elif 50 < ar < 90:
print("价值动量大于50,小于90,做空")
target_pos.set_target_volume(-100)
# 止损策略,多头下当前ar值小于90则平仓止损,空头下当前ar值大于110则平仓止损
elif (position.pos_long > 0 and ar < 90) or (position.pos_short > 0 and ar > 110):
print("止损平仓")
target_pos.set_target_volume(0)
| 26.015873 | 90 | 0.621721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 910 | 0.43687 |
6aa1d7c9f54267d6e42717a153600f7e111a7f9f | 10,323 | py | Python | color_transfer/__init__.py | AdamSpannbauer/color_transfer | 155e0134615f35bf19bf32f4cacf056603604914 | [
"MIT"
] | null | null | null | color_transfer/__init__.py | AdamSpannbauer/color_transfer | 155e0134615f35bf19bf32f4cacf056603604914 | [
"MIT"
] | null | null | null | color_transfer/__init__.py | AdamSpannbauer/color_transfer | 155e0134615f35bf19bf32f4cacf056603604914 | [
"MIT"
] | 1 | 2020-11-05T17:35:14.000Z | 2020-11-05T17:35:14.000Z | # import the necessary packages
import numpy as np
import cv2
import imutils
def color_transfer(source, target, clip=True, preserve_paper=True):
"""
Transfers the color distribution from the source to the target
image using the mean and standard deviations of the L*a*b*
color space.
This implementation is (loosely) based on to the "Color Transfer
between Images" paper by Reinhard et al., 2001.
Parameters:
-------
source: NumPy array
OpenCV image in BGR color space (the source image)
target: NumPy array
OpenCV image in BGR color space (the target image)
clip: Should components of L*a*b* image be scaled by np.clip before
converting back to BGR color space?
If False then components will be min-max scaled appropriately.
Clipping will keep target image brightness truer to the input.
Scaling will adjust image brightness to avoid washed out portions
in the resulting color transfer that can be caused by clipping.
preserve_paper: Should color transfer strictly follow methodology
laid out in original paper? The method does not always produce
aesthetically pleasing results.
If False then L*a*b* components will scaled using the reciprocal of
the scaling factor proposed in the paper. This method seems to produce
more consistently aesthetically pleasing results
Returns:
-------
transfer: NumPy array
OpenCV image (w, h, 3) NumPy array (uint8)
"""
# convert the images from the RGB to L*ab* color space, being
# sure to utilizing the floating point data type (note: OpenCV
# expects floats to be 32-bit, so use that instead of 64-bit)
source = cv2.cvtColor(source, cv2.COLOR_BGR2LAB).astype("float32")
target = cv2.cvtColor(target, cv2.COLOR_BGR2LAB).astype("float32")
# compute color statistics for the source and target images
(lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = image_stats(source)
(lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = image_stats(target)
# subtract the means from the target image
(l, a, b) = cv2.split(target)
l -= lMeanTar
a -= aMeanTar
b -= bMeanTar
if preserve_paper:
# scale by the standard deviations using paper proposed factor
l = (lStdTar / lStdSrc) * l
a = (aStdTar / aStdSrc) * a
b = (bStdTar / bStdSrc) * b
else:
# scale by the standard deviations using reciprocal of paper proposed factor
l = (lStdSrc / lStdTar) * l
a = (aStdSrc / aStdTar) * a
b = (bStdSrc / bStdTar) * b
# add in the source mean
l += lMeanSrc
a += aMeanSrc
b += bMeanSrc
# clip/scale the pixel intensities to [0, 255] if they fall
# outside this range
l = _scale_array(l, clip=clip)
a = _scale_array(a, clip=clip)
b = _scale_array(b, clip=clip)
# merge the channels together and convert back to the RGB color
# space, being sure to utilize the 8-bit unsigned integer data
# type
transfer = cv2.merge([l, a, b])
transfer = cv2.cvtColor(transfer.astype("uint8"), cv2.COLOR_LAB2BGR)
# return the color transferred image
return transfer
def auto_color_transfer(source, target):
"""Pick color_transfer result truest to source image color
Applies color_transfer with all possible combinations of the clip & preserve_paper arguments.
Mean absolute error (MAE) is computed for the HSV channels of each result and the source image.
The best_result that minimizes the MAE is returned as well as a montage of all candidate results.
Parameters:
-------
source: NumPy array
OpenCV image in BGR color space (the source image)
target: NumPy array
OpenCV image in BGR color space (the target image)
Returns:
-------
tuple: (best_result, comparison)
best_result: NumPy array
result that minimizes mean absolute error between compared to source image in HSV color space
comparison: NumPy array
image showing the results of all combinations of color_transfer options
"""
# get mean HSV stats from source image for comparison
hsv_source = cv2.cvtColor(source, cv2.COLOR_BGR2HSV)
hsv_hist_src = cv2.calcHist([hsv_source], [0, 1, 2], None,
[8, 8, 8], [0, 256, 0, 256, 0, 256])
# iterate through all 4 options for toggling color transfer
bools = [True, False]
candidates = []
best_result = None
best_dist = float('inf')
for clip in bools:
for preserve_paper in bools:
# create candidate image from options of this iteration
candidate = color_transfer(source, target, clip, preserve_paper)
# get mean HSV stats from candidate image for comparison
hsv_candidate = cv2.cvtColor(candidate, cv2.COLOR_BGR2HSV)
hsv_hist_cand = cv2.calcHist([hsv_candidate], [0, 1, 2], None,
[8, 8, 8], [0, 256, 0, 256, 0, 256])
# calc chi square dist
chi2_dist = chi2_distance(hsv_hist_src, hsv_hist_cand)
# propose new truest result if found new smallest mae
if chi2_dist < best_dist:
best_result = candidate[:]
candidates.append(candidate)
# build 2 by 2 image matrix of all candidates for comparison
comparison = np.hstack((np.vstack(candidates[:2]),
np.vstack(candidates[2:])))
# add border annotations showing values of params for each output
comparison = _bool_matrix_border(comparison)
return best_result, comparison
def chi2_distance(hist_a, hist_b, eps=1e-10):
return 0.5 * np.sum(((hist_a - hist_b) ** 2) / (hist_a + hist_b + eps))
def _bool_matrix_border(comparison_image):
"""Apply table formatting for comparison of color_transfer options
Parameters:
-------
target: NumPy array
OpenCV image in BGR color space (the comparison image produced in auto_color_transfer)
Returns:
-------
comparison: NumPy array
OpenCV image in BGR color space with borders applied to easily compare the different
results of the auto_color_transfer
"""
# 200 seems to work well as border size
border_size = 200
# put black border on top and left of input image
h, w = comparison_image.shape[:2]
top = np.zeros(w * border_size, dtype='uint8').reshape(border_size, w)
left = np.zeros((h + border_size) * border_size, dtype='uint8').reshape(h + border_size, border_size)
top = cv2.cvtColor(top, cv2.COLOR_GRAY2BGR)
left = cv2.cvtColor(left, cv2.COLOR_GRAY2BGR)
bordered_comparison_image = np.vstack((top, comparison_image))
bordered_comparison_image = np.hstack((left, bordered_comparison_image))
# add text for clip arg options to top border
top_title_loc = (border_size, 75)
top_true_loc = (border_size, 190)
top_false_loc = (int(border_size + w / 2), 190)
cv2.putText(bordered_comparison_image, 'Clip', top_title_loc,
cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'True', top_true_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'False', top_false_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
# rotate 90 degrees for writing text to left border
bordered_comparison_image = imutils.rotate_bound(bordered_comparison_image, 90)
# add text for preserve paper arg options to left border
top_title_loc = (5, 75)
top_true_loc = (5 + int(h / 2), 190)
top_false_loc = (5, 190)
cv2.putText(bordered_comparison_image, 'Preserve Paper', top_title_loc,
cv2.FONT_HERSHEY_SIMPLEX, 3, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'True', top_true_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
cv2.putText(bordered_comparison_image, 'False', top_false_loc,
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
# rotate -90 degrees to return image in correct orientation
bordered_comparison_image = imutils.rotate_bound(bordered_comparison_image, -90)
return bordered_comparison_image
def image_stats(image):
"""
Parameters:
-------
image: NumPy array
OpenCV image in L*a*b* color space
Returns:
-------
Tuple of mean and standard deviations for the L*, a*, and b*
channels, respectively
"""
# compute the mean and standard deviation of each channel
(l, a, b) = cv2.split(image)
(lMean, lStd) = (l.mean(), l.std())
(aMean, aStd) = (a.mean(), a.std())
(bMean, bStd) = (b.mean(), b.std())
# return the color statistics
return lMean, lStd, aMean, aStd, bMean, bStd
def _min_max_scale(arr, new_range=(0, 255)):
"""
Perform min-max scaling to a NumPy array
Parameters:
-------
arr: NumPy array to be scaled to [new_min, new_max] range
new_range: tuple of form (min, max) specifying range of
transformed array
Returns:
-------
NumPy array that has been scaled to be in
[new_range[0], new_range[1]] range
"""
# get array's current min and max
mn = arr.min()
mx = arr.max()
# check if scaling needs to be done to be in new_range
if mn < new_range[0] or mx > new_range[1]:
# perform min-max scaling
scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]
else:
# return array if already in range
scaled = arr
return scaled
def _scale_array(arr, clip=True):
"""
Trim NumPy array values to be in [0, 255] range with option of
clipping or scaling.
Parameters:
-------
arr: array to be trimmed to [0, 255] range
clip: should array be scaled by np.clip? if False then input
array will be min-max scaled to range
[max([arr.min(), 0]), min([arr.max(), 255])]
Returns:
-------
NumPy array that has been scaled to be in [0, 255] range
"""
if clip:
scaled = np.clip(arr, 0, 255)
else:
scale_range = (max([arr.min(), 0]), min([arr.max(), 255]))
scaled = _min_max_scale(arr, new_range=scale_range)
return scaled
| 36.477032 | 105 | 0.657173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,400 | 0.523104 |
6aa21222a53d441e6c157bad6965004f0771b6e4 | 250 | py | Python | Python/Tree/TestCreateTreeLibraryImport.py | zseen/hackerrank-challenges | c154f039f58073ee3d94d012462c7055e68784b2 | [
"MIT"
] | null | null | null | Python/Tree/TestCreateTreeLibraryImport.py | zseen/hackerrank-challenges | c154f039f58073ee3d94d012462c7055e68784b2 | [
"MIT"
] | null | null | null | Python/Tree/TestCreateTreeLibraryImport.py | zseen/hackerrank-challenges | c154f039f58073ee3d94d012462c7055e68784b2 | [
"MIT"
] | null | null | null | from Library.CreateATree import CreateATree
tree = CreateATree.BinarySearchTree()
nodesList = list((4, 5, 1, 3, 2))
for i in range(0, len(nodesList)):
tree.insert(nodesList[i])
#tree.printInorder()
tree.printPreorder()
#tree.printPostorder()
| 19.230769 | 43 | 0.732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.168 |
6aa25f7e4d64679c81ca1e60dffb6ddf922f9c4c | 522 | py | Python | application/siteApp/urls.py | Marcelotsvaz/vaz-projects | 8ccc0bf8d25f9276714e1e5ecb0a4e80f07442b4 | [
"Unlicense"
] | null | null | null | application/siteApp/urls.py | Marcelotsvaz/vaz-projects | 8ccc0bf8d25f9276714e1e5ecb0a4e80f07442b4 | [
"Unlicense"
] | null | null | null | application/siteApp/urls.py | Marcelotsvaz/vaz-projects | 8ccc0bf8d25f9276714e1e5ecb0a4e80f07442b4 | [
"Unlicense"
] | null | null | null | #
# VAZ Projects
#
#
# Author: Marcelo Tellier Sartori Vaz <[email protected]>
from django.urls import path
from . import views
app_name = 'siteApp'
urlpatterns = [
path( '', views.Home.as_view(), name = 'home' ),
path( 'about-me', views.About_me.as_view(), name = 'about_me' ),
path( 'search', views.Search.as_view(), name = 'search' ),
path( 'search/page/<int:page>', views.Search.as_view(), name = 'search' ),
path( 'sitemap.xml', views.Sitemap.as_view(), name = 'sitemap' ),
] | 23.727273 | 77 | 0.628352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.362069 |
6aa2b3de5b891e225cac65fc5b3ebe31165e5ef6 | 63 | py | Python | svd/core/exc.py | epicosy/svd | baa91f57ee5bd51b0140d9d0b1b97ce39f18acc4 | [
"MIT"
] | null | null | null | svd/core/exc.py | epicosy/svd | baa91f57ee5bd51b0140d9d0b1b97ce39f18acc4 | [
"MIT"
] | null | null | null | svd/core/exc.py | epicosy/svd | baa91f57ee5bd51b0140d9d0b1b97ce39f18acc4 | [
"MIT"
] | null | null | null |
class SVDError(Exception):
"""Generic errors."""
pass
| 12.6 | 26 | 0.619048 | 61 | 0.968254 | 0 | 0 | 0 | 0 | 0 | 0 | 21 | 0.333333 |
6aa2cafc9ca0f9283336142e3b81fea44a3587b3 | 1,286 | py | Python | Classes/ServiceBase.py | tkeske/SMS-Fetcher | 7b3ec0ea4517ad11164b8e2d7ee2c60d2a9f0ed2 | [
"BSD-3-Clause"
] | null | null | null | Classes/ServiceBase.py | tkeske/SMS-Fetcher | 7b3ec0ea4517ad11164b8e2d7ee2c60d2a9f0ed2 | [
"BSD-3-Clause"
] | null | null | null | Classes/ServiceBase.py | tkeske/SMS-Fetcher | 7b3ec0ea4517ad11164b8e2d7ee2c60d2a9f0ed2 | [
"BSD-3-Clause"
] | null | null | null | '''
@author Tomáš Keske
@since 10.8.2019
'''
import sys
from jnius import autoclass
from Conf.Conf import *
class ServiceBase():
def __init__(self):
PythonServiceClass = autoclass('org.kivy.android.PythonService')
self.Context = autoclass('android.content.Context')
self.Service = PythonServiceClass.mService
#set autorestart to be imune to task swiping on Android 9
self.Service.setAutoRestartService(True)
self.confDict = {k: v for k,v in globals().items() if k.isupper() and k.startswith("SMS")}
for k, v in confDict.items():
setattr(self, k, v)
def killGeneric(self, error):
print(repr(error))
PythonService.setAutoRestartService(False)
print("Autorestart of the service disabled.")
print("Attempting to kill service permanently.")
PythonService.stop()
#service takes time to stop. flow thus continues to next block of code
#sys.exit() is to prevent subsequent code from execution
#both calls are neccesary to avoid "Scheduling restart of crashed service process"
#in case we called only sys.exit()
#this applies even if we have setAutoRestartService(False)
print("Exiting python script")
sys.exit() | 32.15 | 98 | 0.667963 | 1,176 | 0.913043 | 0 | 0 | 0 | 0 | 0 | 0 | 567 | 0.440217 |
6aa359b860399eb8f9859835f9d9ac0f53b4de56 | 723 | py | Python | api/queue/__init__.py | sofia008/api-redis-queue | 8d65665c8a9f44990565baa8c7ba43d7f01425d3 | [
"Apache-2.0"
] | null | null | null | api/queue/__init__.py | sofia008/api-redis-queue | 8d65665c8a9f44990565baa8c7ba43d7f01425d3 | [
"Apache-2.0"
] | null | null | null | api/queue/__init__.py | sofia008/api-redis-queue | 8d65665c8a9f44990565baa8c7ba43d7f01425d3 | [
"Apache-2.0"
] | null | null | null | # api/queue/__init__.py
import os
from flask import Flask
from flask_bootstrap import Bootstrap
# instantiate the extensions
bootstrap = Bootstrap()
def create_app(script_info=None):
# instantiate the app
app = Flask(
__name__,
template_folder="../client/templates",
static_folder="../client/static",
)
# set config
app_settings = os.getenv("APP_SETTINGS")
app.config.from_object(app_settings)
# set up extensions
bootstrap.init_app(app)
# register blueprints
from api.queue.push.views import main_blueprint
app.register_blueprint(main_blueprint)
# shell context for flask cli
app.shell_context_processor({"app": app})
return app
| 19.026316 | 51 | 0.697095 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 211 | 0.29184 |
6aa447f55a379751c7664d4eb5818450b99462c4 | 2,183 | py | Python | tests/test_engine.py | Foxboron/python-adblock | 50b2ddba9f7b237b38c848c7d4a1637917444924 | [
"Apache-2.0",
"MIT"
] | 35 | 2020-06-26T21:06:13.000Z | 2022-03-19T10:50:35.000Z | tests/test_engine.py | Foxboron/python-adblock | 50b2ddba9f7b237b38c848c7d4a1637917444924 | [
"Apache-2.0",
"MIT"
] | 34 | 2020-04-27T02:59:40.000Z | 2022-03-06T20:55:00.000Z | tests/test_engine.py | Foxboron/python-adblock | 50b2ddba9f7b237b38c848c7d4a1637917444924 | [
"Apache-2.0",
"MIT"
] | 6 | 2020-12-22T21:56:02.000Z | 2022-02-16T02:13:21.000Z | import adblock
import pytest
SMALL_FILTER_LIST = """
||wikipedia.org^
||old.reddit.com^
||lobste.rs^
"""
def empty_engine():
return adblock.Engine(adblock.FilterSet())
def test_engine_creation_and_blocking():
filter_set = adblock.FilterSet(debug=True)
filter_set.add_filter_list(SMALL_FILTER_LIST)
engine = adblock.Engine(filter_set=filter_set)
blocker_result_wikipedia = engine.check_network_urls(
url="https://wikipedia.org/img.png",
source_url="https://google.com/",
request_type="image",
)
assert isinstance(blocker_result_wikipedia, adblock.BlockerResult)
assert blocker_result_wikipedia.matched
blocker_result_facebook = engine.check_network_urls(
"https://facebook.com/directory/img.png",
"https://old.reddit.com/r/all",
"image",
)
assert isinstance(blocker_result_facebook, adblock.BlockerResult)
assert not blocker_result_facebook.matched
def test_serde_file(tmpdir):
path = str(tmpdir / "cache.dat")
engine0 = empty_engine()
with pytest.raises(FileNotFoundError):
# We haven't created the cache.dat file, so we should get an exception
# when attempting to deserialize.
engine0.deserialize_from_file(path)
engine1 = empty_engine()
serialization_result = engine1.serialize_to_file(path)
assert serialization_result is None
engine2 = empty_engine()
deserialization_result = engine2.deserialize_from_file(path)
assert deserialization_result is None
def test_deserialize_corrupt(tmpdir):
path = str(tmpdir / "corrupt_cache.dat")
with open(path, "w", encoding="utf-8") as f:
f.write("abc")
engine = empty_engine()
with pytest.raises(adblock.DeserializationError):
engine.deserialize_from_file(path)
with pytest.raises(adblock.DeserializationError):
engine.deserialize(b"abc")
def test_serde():
engine = empty_engine()
serialization_result = engine.serialize()
assert isinstance(serialization_result, bytes)
engine2 = empty_engine()
deserialization_result = engine2.deserialize(serialization_result)
assert deserialization_result is None
| 29.106667 | 78 | 0.724233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.158039 |
6aa4b1f3d6675f767aaa7329c04a4c62bcde0e63 | 232 | py | Python | v1/status_updates/urls.py | DucPhamTV/Bank | 4905ec7d63ef4daafe2119bf6b32928d4db2d4f2 | [
"MIT"
] | 94 | 2020-07-12T23:08:47.000Z | 2022-03-05T14:00:01.000Z | v1/status_updates/urls.py | DucPhamTV/Bank | 4905ec7d63ef4daafe2119bf6b32928d4db2d4f2 | [
"MIT"
] | 84 | 2020-07-13T23:30:50.000Z | 2022-03-15T15:47:46.000Z | v1/status_updates/urls.py | DucPhamTV/Bank | 4905ec7d63ef4daafe2119bf6b32928d4db2d4f2 | [
"MIT"
] | 63 | 2020-07-13T02:46:51.000Z | 2021-11-26T09:29:29.000Z | from rest_framework.routers import SimpleRouter
from .views.upgrade_notice import UpgradeNoticeViewSet
router = SimpleRouter(trailing_slash=False)
router.register('upgrade_notice', UpgradeNoticeViewSet, basename='upgrade_notice')
| 33.142857 | 82 | 0.857759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.137931 |
6aa55947380a65f7c24093ff3b3feee2ac3b5948 | 1,048 | py | Python | data_structures/stack/largest_rectangle_area_in_histogram.py | ruler30cm/python-ds | f84605c5b746ea1d46de3d00b86f5fba399445c7 | [
"MIT"
] | 1,723 | 2019-07-30T07:06:22.000Z | 2022-03-31T15:22:22.000Z | data_structures/stack/largest_rectangle_area_in_histogram.py | ruler30cm/python-ds | f84605c5b746ea1d46de3d00b86f5fba399445c7 | [
"MIT"
] | 213 | 2019-10-06T08:07:47.000Z | 2021-10-04T15:38:36.000Z | data_structures/stack/largest_rectangle_area_in_histogram.py | ruler30cm/python-ds | f84605c5b746ea1d46de3d00b86f5fba399445c7 | [
"MIT"
] | 628 | 2019-10-06T10:26:25.000Z | 2022-03-31T01:41:00.000Z | '''
Largest rectangle area in a histogram::
Find the largest rectangular area possible in a given histogram where the largest rectangle can be made of a number of contiguous bars.
For simplicity, assume that all bars have same width and the width is 1 unit.
'''
def max_area_histogram(histogram):
stack = list()
max_area = 0 # Initialize max area
index = 0
while index < len(histogram):
if (not stack) or (histogram[stack[-1]] <= histogram[index]):
stack.append(index)
index += 1
else:
top_of_stack = stack.pop()
area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index))
max_area = max(max_area, area)
while stack:
top_of_stack = stack.pop()
area = (histogram[top_of_stack] * ((index - stack[-1] - 1) if stack else index))
max_area = max(max_area, area)
return max_area
hist = [4, 7, 1, 8, 4, 9, 5]
print("Maximum area is",
max_area_histogram(hist))
| 28.324324 | 136 | 0.603053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 301 | 0.287214 |
6aa62343269180c72e1026d8bfdc9d3fa9196b1e | 7,448 | py | Python | gluon/contrib/pbkdf2_ctypes.py | Cwlowe/web2py | 6ae4c3c274be1026cbc45b0fcd8d1180c74b9070 | [
"BSD-3-Clause"
] | 9 | 2018-04-19T05:08:30.000Z | 2021-11-23T07:36:58.000Z | gluon/contrib/pbkdf2_ctypes.py | mohit3011/Quiz-Mate | 17988a623abde439aef2b43fc8dc3162b5cae15e | [
"BSD-3-Clause"
] | 98 | 2017-11-02T19:00:44.000Z | 2022-03-22T16:15:39.000Z | gluon/contrib/pbkdf2_ctypes.py | mohit3011/Quiz-Mate | 17988a623abde439aef2b43fc8dc3162b5cae15e | [
"BSD-3-Clause"
] | 9 | 2017-10-24T21:53:36.000Z | 2021-11-23T07:36:59.000Z | # -*- coding: utf-8 -*-
"""
pbkdf2_ctypes
~~~~~~
Fast pbkdf2.
This module implements pbkdf2 for Python using crypto lib from
openssl or commoncrypto.
Note: This module is intended as a plugin replacement of pbkdf2.py
by Armin Ronacher.
Git repository:
$ git clone https://github.com/michele-comitini/pbkdf2_ctypes.git
:copyright: Copyright (c) 2013: Michele Comitini <[email protected]>
:license: LGPLv3
"""
import ctypes
import ctypes.util
import hashlib
import platform
import os.path
import binascii
import sys
__all__ = ['pkcs5_pbkdf2_hmac', 'pbkdf2_bin', 'pbkdf2_hex']
__version__ = '0.99.3'
def _commoncrypto_hashlib_to_crypto_map_get(hashfunc):
hashlib_to_crypto_map = {hashlib.sha1: 1,
hashlib.sha224: 2,
hashlib.sha256: 3,
hashlib.sha384: 4,
hashlib.sha512: 5}
crypto_hashfunc = hashlib_to_crypto_map.get(hashfunc)
if crypto_hashfunc is None:
raise ValueError('Unkwnown digest %s' % hashfunc)
return crypto_hashfunc
def _commoncrypto_pbkdf2(data, salt, iterations, digest, keylen):
"""Common Crypto compatibile wrapper
"""
c_hashfunc = ctypes.c_uint32(_commoncrypto_hashlib_to_crypto_map_get(digest))
c_pass = ctypes.c_char_p(data)
c_passlen = ctypes.c_size_t(len(data))
c_salt = ctypes.c_char_p(salt)
c_saltlen = ctypes.c_size_t(len(salt))
c_iter = ctypes.c_uint(iterations)
c_keylen = ctypes.c_size_t(keylen)
c_buff = ctypes.create_string_buffer(keylen)
crypto.CCKeyDerivationPBKDF.restype = ctypes.c_int
crypto.CCKeyDerivationPBKDF.argtypes = [ctypes.c_uint32,
ctypes.c_char_p,
ctypes.c_size_t,
ctypes.c_char_p,
ctypes.c_size_t,
ctypes.c_uint32,
ctypes.c_uint,
ctypes.c_char_p,
ctypes.c_size_t]
ret = crypto.CCKeyDerivationPBKDF(2, # hardcoded 2-> PBKDF2
c_pass, c_passlen,
c_salt, c_saltlen,
c_hashfunc,
c_iter,
c_buff,
c_keylen)
return (1 - ret, c_buff)
def _openssl_hashlib_to_crypto_map_get(hashfunc):
hashlib_to_crypto_map = {hashlib.md5: crypto.EVP_md5,
hashlib.sha1: crypto.EVP_sha1,
hashlib.sha256: crypto.EVP_sha256,
hashlib.sha224: crypto.EVP_sha224,
hashlib.sha384: crypto.EVP_sha384,
hashlib.sha512: crypto.EVP_sha512}
crypto_hashfunc = hashlib_to_crypto_map.get(hashfunc)
if crypto_hashfunc is None:
raise ValueError('Unkwnown digest %s' % hashfunc)
crypto_hashfunc.restype = ctypes.c_void_p
return crypto_hashfunc()
def _openssl_pbkdf2(data, salt, iterations, digest, keylen):
"""OpenSSL compatibile wrapper
"""
c_hashfunc = ctypes.c_void_p(_openssl_hashlib_to_crypto_map_get(digest))
c_pass = ctypes.c_char_p(data)
c_passlen = ctypes.c_int(len(data))
c_salt = ctypes.c_char_p(salt)
c_saltlen = ctypes.c_int(len(salt))
c_iter = ctypes.c_int(iterations)
c_keylen = ctypes.c_int(keylen)
c_buff = ctypes.create_string_buffer(keylen)
# PKCS5_PBKDF2_HMAC(const char *pass, int passlen,
# const unsigned char *salt, int saltlen, int iter,
# const EVP_MD *digest,
# int keylen, unsigned char *out);
crypto.PKCS5_PBKDF2_HMAC.argtypes = [ctypes.c_char_p, ctypes.c_int,
ctypes.c_char_p, ctypes.c_int,
ctypes.c_int, ctypes.c_void_p,
ctypes.c_int, ctypes.c_char_p]
crypto.PKCS5_PBKDF2_HMAC.restype = ctypes.c_int
err = crypto.PKCS5_PBKDF2_HMAC(c_pass, c_passlen,
c_salt, c_saltlen,
c_iter,
c_hashfunc,
c_keylen,
c_buff)
return (err, c_buff)
try: # check that we have proper OpenSSL or Common Crypto on the system.
system = platform.system()
if system == 'Windows':
if platform.architecture()[0] == '64bit':
libname = ctypes.util.find_library('libeay64')
if not libname:
raise OSError('Library not found')
crypto = ctypes.CDLL(libname)
else:
libname = ctypes.util.find_library('libeay32')
if not libname:
raise OSError('Library libeay32 not found.')
crypto = ctypes.CDLL(libname)
_pbkdf2_hmac = _openssl_pbkdf2
crypto.PKCS5_PBKDF2_HMAC # test compatibility
elif system == 'Darwin': # think different(TM)! i.e. break things!
if [int(x) for x in platform.mac_ver()[0].split('.')] < [10, 7, 0]:
raise OSError('OS X Version too old %s < 10.7.0' % platform.mac_ver()[0])
libname = ctypes.util.find_library('System')
if not libname:
raise OSError('Library not found')
crypto = ctypes.CDLL(os.path.basename(libname))
_pbkdf2_hmac = _commoncrypto_pbkdf2
else:
libname = ctypes.util.find_library('crypto')
if not libname:
raise OSError('Library crypto not found.')
crypto = ctypes.CDLL(os.path.basename(libname))
_pbkdf2_hmac = _openssl_pbkdf2
crypto.PKCS5_PBKDF2_HMAC # test compatibility
except (OSError, AttributeError):
_, e, _ = sys.exc_info()
raise ImportError('Cannot find a compatible cryptographic library '
'on your system. %s' % e)
def pkcs5_pbkdf2_hmac(data, salt, iterations=1000, keylen=24, hashfunc=None):
if hashfunc is None:
hashfunc = hashlib.sha1
err, c_buff = _pbkdf2_hmac(data, salt, iterations, hashfunc, keylen)
if err == 0:
raise ValueError('wrong parameters')
return c_buff.raw[:keylen]
def pbkdf2_hex(data, salt, iterations=1000, keylen=24, hashfunc=None):
return binascii.hexlify(pkcs5_pbkdf2_hmac(data, salt, iterations, keylen, hashfunc))
def pbkdf2_bin(data, salt, iterations=1000, keylen=24, hashfunc=None):
return pkcs5_pbkdf2_hmac(data, salt, iterations, keylen, hashfunc)
if __name__ == '__main__':
try:
crypto.SSLeay_version.restype = ctypes.c_char_p
print(crypto.SSLeay_version(0))
except:
pass
import platform
if platform.python_version_tuple() < ('3', '0', '0'):
def bytes(*args):
return str(args[0])
for h in [hashlib.sha1, hashlib.sha224, hashlib.sha256,
hashlib.sha384, hashlib.sha512]:
print(binascii.hexlify(pkcs5_pbkdf2_hmac(bytes('secret', 'utf-8') * 11,
bytes('salt', 'utf-8'),
hashfunc=h)))
| 38.194872 | 88 | 0.569683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,354 | 0.181794 |
6aa72ed7ab8eb40be3928ae652b97a0368992b42 | 2,389 | py | Python | auth_backend/src/key_op.py | cispa/bitahoy | ffc2004930a033cfb94d13671bc6068b473ce226 | [
"MIT"
] | null | null | null | auth_backend/src/key_op.py | cispa/bitahoy | ffc2004930a033cfb94d13671bc6068b473ce226 | [
"MIT"
] | null | null | null | auth_backend/src/key_op.py | cispa/bitahoy | ffc2004930a033cfb94d13671bc6068b473ce226 | [
"MIT"
] | 2 | 2021-12-30T16:48:15.000Z | 2022-01-14T14:21:15.000Z | import sys
import os
import psycopg2
import base64
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import padding, rsa
from cryptography.hazmat.backends import default_backend
import time
if len(sys.argv) < 2:
print("Please enter either create or remove as a argv[1]")
sys.exit(0)
with psycopg2.connect("dbname='auth_db' user='auth_db' host='authdb' [redacted-2]") as conn:
with conn.cursor() as cursor:
if sys.argv[1] == "generate":
#Load the key or generate a new one:
cursor.execute("CREATE TABLE IF NOT EXISTS key (key varchar(4096),time bigint UNIQUE PRIMARY KEY)")
privkey = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
pem = privkey.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.TraditionalOpenSSL,encryption_algorithm=serialization.NoEncryption())
cursor.execute("INSERT INTO key (key,time) VALUES('"+str(pem.decode("utf-8"))+"',"+str(int(time.time()))+")")
conn.commit()
print("New key generated!")
elif sys.argv[1] == "generate_if_needed":
#Load the key or generate a new one:
cursor.execute("CREATE TABLE IF NOT EXISTS key (key varchar(4096),time bigint UNIQUE PRIMARY KEY)")
cursor.execute("SELECT * FROM key")
res = cursor.fetchall()
if len(res) == 0:
privkey = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
pem = privkey.private_bytes(encoding=serialization.Encoding.PEM,format=serialization.PrivateFormat.TraditionalOpenSSL,encryption_algorithm=serialization.NoEncryption())
cursor.execute("INSERT INTO key (key,time) VALUES('"+str(pem.decode("utf-8"))+"',"+str(int(time.time()))+")")
conn.commit()
print("New key generated, as database was empty!")
else:
print("Database has key ready!")
elif sys.argv[1] == "drop":
cursor.execute("DROP TABLE key")
conn.commit()
print("Dropped old keys")
else:
print("Invalid option! Try 'drop', 'generate' or 'generate_if_needed'...") | 45.942308 | 184 | 0.637505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 695 | 0.290917 |
6aa73c2ad6102f4cce8d4e90a98090c3f6fe5363 | 342 | py | Python | src/tools/types/obj.py | loongson-zn/build | d4bedebfa046b763c316e31c98b48ed2779741b9 | [
"BSL-1.0"
] | 215 | 2015-01-10T17:16:34.000Z | 2022-02-23T15:22:08.000Z | src/tools/types/obj.py | loongson-zn/build | d4bedebfa046b763c316e31c98b48ed2779741b9 | [
"BSL-1.0"
] | 594 | 2015-01-22T16:17:55.000Z | 2022-02-26T22:11:01.000Z | src/tools/types/obj.py | loongson-zn/build | d4bedebfa046b763c316e31c98b48ed2779741b9 | [
"BSL-1.0"
] | 302 | 2015-02-03T01:20:29.000Z | 2022-02-12T07:01:28.000Z | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE.txt or copy at https://www.bfgroup.xyz/b2/LICENSE.txt)
from b2.build import type
def register ():
type.register_type ('OBJ', ['obj'], None, ['NT', 'CYGWIN'])
type.register_type ('OBJ', ['o'])
register ()
| 28.5 | 69 | 0.692982 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 209 | 0.611111 |
6aa7fd8436efabe5593a8174e9772f897fb7aec0 | 4,465 | py | Python | sympy/polys/tests/test_sqfreetools.py | eriknw/sympy | b7544e2bb74c011f6098a7e886fd77f41776c2c4 | [
"BSD-3-Clause"
] | 7 | 2015-01-14T06:55:33.000Z | 2018-08-11T14:43:52.000Z | sympy/polys/tests/test_sqfreetools.py | pbeltran/sympy-1 | 94f92b36731c2bebe6de1037c063c2a258a8a399 | [
"BSD-3-Clause"
] | 1 | 2018-02-19T04:56:04.000Z | 2018-02-19T04:56:04.000Z | sympy/polys/tests/test_sqfreetools.py | pbeltran/sympy-1 | 94f92b36731c2bebe6de1037c063c2a258a8a399 | [
"BSD-3-Clause"
] | 1 | 2016-04-24T14:39:22.000Z | 2016-04-24T14:39:22.000Z | """Tests for square-free decomposition algorithms and related tools. """
from sympy.polys.rings import ring
from sympy.polys.domains import FF, ZZ, QQ
from sympy.polys.polyclasses import DMP
from sympy.polys.specialpolys import f_polys
from sympy.utilities.pytest import raises
f_0, f_1, f_2, f_3, f_4, f_5, f_6 = f_polys()
def test_dup_sqf():
R, x = ring("x", ZZ)
assert R.dup_sqf_part(0) == 0
assert R.dup_sqf_p(0) is True
assert R.dup_sqf_part(7) == 1
assert R.dup_sqf_p(7) is True
assert R.dup_sqf_part(2*x + 2) == x + 1
assert R.dup_sqf_p(2*x + 2) is True
assert R.dup_sqf_part(x**3 + x + 1) == x**3 + x + 1
assert R.dup_sqf_p(x**3 + x + 1) is True
assert R.dup_sqf_part(-x**3 + x + 1) == x**3 - x - 1
assert R.dup_sqf_p(-x**3 + x + 1) is True
assert R.dup_sqf_part(2*x**3 + 3*x**2) == 2*x**2 + 3*x
assert R.dup_sqf_p(2*x**3 + 3*x**2) is False
assert R.dup_sqf_part(-2*x**3 + 3*x**2) == 2*x**2 - 3*x
assert R.dup_sqf_p(-2*x**3 + 3*x**2) is False
assert R.dup_sqf_list(0) == (0, [])
assert R.dup_sqf_list(1) == (1, [])
assert R.dup_sqf_list(x) == (1, [(x, 1)])
assert R.dup_sqf_list(2*x**2) == (2, [(x, 2)])
assert R.dup_sqf_list(3*x**3) == (3, [(x, 3)])
assert R.dup_sqf_list(-x**5 + x**4 + x - 1) == \
(-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)])
assert R.dup_sqf_list(x**8 + 6*x**6 + 12*x**4 + 8*x**2) == \
( 1, [(x, 2), (x**2 + 2, 3)])
assert R.dup_sqf_list(2*x**2 + 4*x + 2) == (2, [(x + 1, 2)])
R, x = ring("x", QQ)
assert R.dup_sqf_list(2*x**2 + 4*x + 2) == (2, [(x + 1, 2)])
R, x = ring("x", FF(2))
assert R.dup_sqf_list(x**2 + 1) == (1, [(x + 1, 2)])
R, x = ring("x", FF(3))
assert R.dup_sqf_list(x**10 + 2*x**7 + 2*x**4 + x) == \
(1, [(x, 1),
(x + 1, 3),
(x + 2, 6)])
R1, x = ring("x", ZZ)
R2, y = ring("y", FF(3))
f = x**3 + 1
g = y**3 + 1
assert R1.dup_sqf_part(f) == f
assert R2.dup_sqf_part(g) == y + 1
assert R1.dup_sqf_p(f) is True
assert R2.dup_sqf_p(g) is False
R, x, y = ring("x,y", ZZ)
A = x**4 - 3*x**2 + 6
D = x**6 - 5*x**4 + 5*x**2 + 4
f, g = D, R.dmp_sub(A, R.dmp_mul(R.dmp_diff(D, 1), y))
res = R.dmp_resultant(f, g)
h = (4*y**2 + 1).drop(x)
assert R.drop(x).dup_sqf_list(res) == (45796, [(h, 3)])
R, x = ring("x", ZZ["t"])
assert R.dup_sqf_list_include(DMP([1, 0, 0, 0], ZZ)*x**2) == \
[(DMP([1, 0, 0, 0], ZZ), 1), (DMP([1], ZZ)*x, 2)]
def test_dmp_sqf():
R, x, y = ring("x,y", ZZ)
assert R.dmp_sqf_part(0) == 0
assert R.dmp_sqf_p(0) is True
assert R.dmp_sqf_part(7) == 1
assert R.dmp_sqf_p(7) is True
assert R.dmp_sqf_list(3) == (3, [])
assert R.dmp_sqf_list_include(3) == [(3, 1)]
R, x, y, z = ring("x,y,z", ZZ)
assert R.dmp_sqf_p(f_0) is True
assert R.dmp_sqf_p(f_0**2) is False
assert R.dmp_sqf_p(f_1) is True
assert R.dmp_sqf_p(f_1**2) is False
assert R.dmp_sqf_p(f_2) is True
assert R.dmp_sqf_p(f_2**2) is False
assert R.dmp_sqf_p(f_3) is True
assert R.dmp_sqf_p(f_3**2) is False
assert R.dmp_sqf_p(f_5) is False
assert R.dmp_sqf_p(f_5**2) is False
assert R.dmp_sqf_p(f_4) is True
assert R.dmp_sqf_part(f_4) == -f_4
assert R.dmp_sqf_part(f_5) == x + y - z
R, x, y, z, t = ring("x,y,z,t", ZZ)
assert R.dmp_sqf_p(f_6) is True
assert R.dmp_sqf_part(f_6) == f_6
R, x = ring("x", ZZ)
f = -x**5 + x**4 + x - 1
assert R.dmp_sqf_list(f) == (-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)])
assert R.dmp_sqf_list_include(f) == [(-x**3 - x**2 - x - 1, 1), (x - 1, 2)]
R, x, y = ring("x,y", ZZ)
f = -x**5 + x**4 + x - 1
assert R.dmp_sqf_list(f) == (-1, [(x**3 + x**2 + x + 1, 1), (x - 1, 2)])
assert R.dmp_sqf_list_include(f) == [(-x**3 - x**2 - x - 1, 1), (x - 1, 2)]
f = -x**2 + 2*x - 1
assert R.dmp_sqf_list_include(f) == [(-1, 1), (x - 1, 2)]
R, x, y = ring("x,y", FF(2))
raises(NotImplementedError, lambda: R.dmp_sqf_list(y**2 + 1))
def test_dup_gff_list():
R, x = ring("x", ZZ)
f = x**5 + 2*x**4 - x**3 - 2*x**2
assert R.dup_gff_list(f) == [(x, 1), (x + 2, 4)]
g = x**9 - 20*x**8 + 166*x**7 - 744*x**6 + 1965*x**5 - 3132*x**4 + 2948*x**3 - 1504*x**2 + 320*x
assert R.dup_gff_list(g) == [(x**2 - 5*x + 4, 1), (x**2 - 5*x + 4, 2), (x, 3)]
raises(ValueError, lambda: R.dup_gff_list(0))
| 29.569536 | 100 | 0.519821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 138 | 0.030907 |
6aa848925fe885025486d711e7226e473656a954 | 1,377 | py | Python | ezno_convert/enums.py | ofersadan85/ezno_convert | 4c5cf7d41c72698e5486068673f170d968a9de27 | [
"MIT"
] | 2 | 2021-02-07T21:27:04.000Z | 2021-03-13T06:47:25.000Z | ezno_convert/enums.py | ofersadan85/ezno_convert | 4c5cf7d41c72698e5486068673f170d968a9de27 | [
"MIT"
] | 1 | 2021-02-10T05:45:00.000Z | 2021-02-10T05:45:00.000Z | ezno_convert/enums.py | ofersadan85/ezno_convert | 4c5cf7d41c72698e5486068673f170d968a9de27 | [
"MIT"
] | null | null | null | import enum
from typing import Union
@enum.unique
class PPT(enum.Enum):
# Source: https://docs.microsoft.com/en-us/office/vba/api/powerpoint.ppsaveasfiletype
AnimatedGIF = 40
BMP = 19
Default = 11
EMF = 23
External = 64000
GIF = 16
JPG = 17
META = 15
MP4 = 39
OpenPresentation = 35
PDF = 32
PNG = 18
Presentation = 1
RTF = 6
SHOW = 7
Template = 5
TIF = 21
WMV = 37
XPS = 33
app = 'Powerpoint.Application'
extensions = ('.ppt', '.pptx')
@enum.unique
class WORD(enum.Enum):
# Source: https://docs.microsoft.com/en-us/office/vba/api/word.wdsaveformat
DosText = 4
DosTextLineBreaks = 5
FilteredHTML = 10
FlatXML = 19
OpenDocumentText = 23
HTML = 8
RTF = 6
Template = 1
Text = 2
TextLineBreaks = 3
UnicodeText = 7
WebArchive = 9
XML = 11
Document97 = 0
DocumentDefault = 16
PDF = 17
XPS = 18
app = 'Word.Application'
extensions = ('.doc', '.docx')
@enum.unique
class XL(enum.Enum):
# Source: https://docs.microsoft.com/en-us/office/vba/api/excel.xlfixedformattype
# TODO: Implement "SaveAs" methods, see: https://docs.microsoft.com/en-us/office/vba/api/excel.workbook.saveas
PDF = 0
XPS = 1
app = 'Excel.Application'
extensions = ('.xls', '.xlsx')
enum_types = Union[PPT, WORD, XL]
| 20.863636 | 114 | 0.611474 | 1,256 | 0.912128 | 0 | 0 | 1,295 | 0.94045 | 0 | 0 | 451 | 0.327524 |
6aa897704d8b8b96376b6c78aa9de27ecec18071 | 378 | py | Python | app/django_first/news/migrations/0002_movies_year.py | vvuri/flask_pipeline | d3f283b8a6a6239e56d85e67dbe3edce55bcb980 | [
"MIT"
] | null | null | null | app/django_first/news/migrations/0002_movies_year.py | vvuri/flask_pipeline | d3f283b8a6a6239e56d85e67dbe3edce55bcb980 | [
"MIT"
] | null | null | null | app/django_first/news/migrations/0002_movies_year.py | vvuri/flask_pipeline | d3f283b8a6a6239e56d85e67dbe3edce55bcb980 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.1 on 2022-01-19 23:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='movies',
name='year',
field=models.CharField(max_length=4, null=True),
),
]
| 19.894737 | 60 | 0.582011 | 285 | 0.753968 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.214286 |
6aa93bd0cfbc5bae7eaa0365dd95b7de863c0e17 | 653 | py | Python | scripts/issue_param_value.py | Jhsmit/awesome-panel-extensions | 41eba7cf84caa911be4ed0df2a96e16fc1e70263 | [
"CC-BY-4.0"
] | 3 | 2020-07-16T07:28:45.000Z | 2020-07-17T12:53:56.000Z | scripts/issue_param_value.py | MarcSkovMadsen/panel-extensions-template | f41ad8d8fb8502f87de3a4992917cbffb6299012 | [
"CC-BY-4.0"
] | null | null | null | scripts/issue_param_value.py | MarcSkovMadsen/panel-extensions-template | f41ad8d8fb8502f87de3a4992917cbffb6299012 | [
"CC-BY-4.0"
] | null | null | null | import panel as pn
import param
from awesome_panel_extensions.frameworks.fast import FastTemplate, FastTextInput
WIDGETS = {
"some_text": {"type": FastTextInput, "readonly": True, "sizing_mode": "fixed", "width": 400}
}
class ParameterizedApp(param.Parameterized):
some_text = param.String(default="This is some text")
view = param.Parameter()
def __init__(self, **params):
super().__init__(**params)
self.view = pn.Param(self, parameters=["some_text"], widgets=WIDGETS)
parameterized_app = ParameterizedApp()
paremeterized_template = FastTemplate(main=[parameterized_app.view])
paremeterized_template.servable()
| 27.208333 | 96 | 0.735069 | 280 | 0.42879 | 0 | 0 | 0 | 0 | 0 | 0 | 84 | 0.128637 |
6aaa29259fb6e01655aa91ee60654bb2eceee036 | 1,271 | py | Python | gjqyxyxxcxxt/gjqyxyxxcxxt/queue_companies.py | AisinoPythonTeam/PythonAiniso | 983a29962752679d8cc26a2c3cdb0ba8fcfa3f02 | [
"Apache-2.0"
] | null | null | null | gjqyxyxxcxxt/gjqyxyxxcxxt/queue_companies.py | AisinoPythonTeam/PythonAiniso | 983a29962752679d8cc26a2c3cdb0ba8fcfa3f02 | [
"Apache-2.0"
] | null | null | null | gjqyxyxxcxxt/gjqyxyxxcxxt/queue_companies.py | AisinoPythonTeam/PythonAiniso | 983a29962752679d8cc26a2c3cdb0ba8fcfa3f02 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import pymysql
import sys, os, json, time, pymongo
app_dir = os.path.abspath("../")
sys.path.append(app_dir)
from gjqyxyxxcxxt import settings
from gjqyxyxxcxxt.database.my_redis import QueueRedis
conn = None
def connect_db():
global conn
conn = pymysql.connect(host="172.16.16.15",port=3306,user="root",passwd="A1s1n0@zxyc#3",db="ixinnuo_sjcj",charset="utf8")
return
def get_req_from_db():
global conn
cursor = conn.cursor()
cursor.execute('select id, entname from req where status=0 order by id limit 10')
results = cursor.fetchall()
companies = []
for res in results:
company = {}
company['id'] = res[0]
company['name'] = res[1]
companies.append(company)
return companies
def main():
my_queue = QueueRedis()
result = my_queue.get_queue_length(settings.COMPANIES)
print result
#mq 里存在数据则,3秒后退出
if result:
time.sleep(3)
exit()
time.sleep(3)
global conn
connect_db()
source = get_req_from_db()
for id_name in source:
message = json.dumps(id_name)
my_queue.send_to_queue(settings.COMPANIES, message)
conn.close()
print '成功添加队列%s条数据!!!' % len(source)
if __name__ == '__main__':
main()
| 24.921569 | 125 | 0.650669 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 240 | 0.183066 |
6aab26683b9b2a063b1ca8928d6b0655775e0f6b | 86,132 | py | Python | model/entity_quotes.py | tkuculo/QuoteKG | a7b7d323679624a9cd3805e866028fad0a5a5408 | [
"MIT"
] | null | null | null | model/entity_quotes.py | tkuculo/QuoteKG | a7b7d323679624a9cd3805e866028fad0a5a5408 | [
"MIT"
] | null | null | null | model/entity_quotes.py | tkuculo/QuoteKG | a7b7d323679624a9cd3805e866028fad0a5a5408 | [
"MIT"
] | null | null | null | #main_section > lines > line > text
#main_section > lines > line > sub_line > text
#main_section > sub_sections
#main_section > templates > type
#main_section > templates > empty_values
#main_section > templates > values
#main_section > templates > sub_templates
#main_section > title > line > text
from transformers.models.auto import configuration_auto
from model.quote import *
import collections
languages_with_templates=["fr","da","nl","be","is","ca","bg","da","ka"]
hybrid_languages = ["uk","ru","sv","et"] + ["ko","fa","cs","fi", "hy"]
misattributed = {
'ar': ['ضعيف', 'متنازع عليه', 'بشكل غير صحيح', 'قائلا نعزى خطأ', 'يعزى خطأ إلى', 'ونقلت تم تعيينها', 'إساءة', 'نعزى بشكل غير صحيح', 'متصل بشكل غير صحيح', 'يعزى بشكل غير صحيح إلى', 'مثيرة للجدل', 'تم تعيينه بشكل غير صحيح', 'تم تعيينه بشكل غير صحيح', 'الفضل بشكل غير صحيح', 'مشكوك فيه', 'سوء المعاملة', 'سيئة', 'خاطئ', 'الفضل بشكل خاطئ', 'لم يتم التحقق منه', 'مرفقة بشكل غير صحيح', 'الفضل بشكل غير صحيح', 'غير صحيح', 'يعزى إلى الخطأ', 'مشبوه أو مشكوك فيه'],\
'az': ['zəif', 'mübahisəli', 'yanlış', 'yanlış şəkildə aid olduğunu söyləmək', 'səhv yanına aiddir', 'Təyin olunmuş sitatlar', 'yanılsaq', 'səhv aiddir', 'səhv bağlıdır', 'səhv aiddir', 'mübahisəli', 'səhv təyin olunur', 'səhv təyin olunmuşdur', 'səhv hesablanır', 'şübhəli', 'zəif', 'səhv', 'səhv hesablanır', 'təsdiqlənməmiş', 'səhv əlavə olunur', 'səhv hesablanır', 'yanlış', 'səhvən aiddir', 'şübhəli'],\
'be': ['слабы', 'спрэчны', 'няправільна', 'кажучы няправільна прыпісаны', 'памылкова звязаны з', 'Цытаты прызначаныя', 'misatributed', 'няправільна прыпісваецца', 'няправільна падлучаны', 'няправільна прыпісваецца', 'супярэчлівы', 'няправільна прызначаны', 'няправільна прызначаны', 'залічваецца няправільна', 'няпэўны', 'адварочваў', 'кепска', 'памылковы', 'памылкова залічана', 'неўверыў', 'няправільна прыкладаецца', 'няправільна залічаны', 'няправільны', 'прыпісваецца памылкова', 'падазроны'],\
'bg': ['слаб', 'оспорван', 'неправилно', 'казвайки погрешно приписване', 'погрешно се приписва', 'Misattributed.', 'неправилно приписано', 'неправилно свързани', 'неправилно', 'противоречиви', 'е неправилно назначен', 'неправилно зададен', 'кредитирани неправилно', 'съмнително', 'Млъкни', 'лошо', 'погрешно', 'неправилно кредитирани', 'Несъвършен', 'неправилно прикрепени', 'неправилно кредитирани', 'неправилен', 'се приписва на погрешно', 'подозрителен'],\
'bs': ['slab', 'sporan', 'pogrešno', 'govoreći pogrešno pripisano', 'pogrešno se pripisuje', 'Citati dodijeljene', 'misao', 'Netačno pripisan', 'Nepravilno povezani', 'pogrešno pripisan', 'kontroverzan', 'pogrešno je dodeljen', 'pogrešno dodijeljeno', 'pripisuju pogrešno', 'sumnjiv', 'maltretiran', 'slabo', 'pogrešno', 'pogrešno pripisan', 'neprovjeren', 'pogrešno priložen', 'pogrešno pripisan', 'netačan', 'pripisuje se pogrešno', 'sumnjiv'], \
'ca': ['feble', 'en disputa', 'incorrectament', 'dient incorrectament atribuït', "s'atribueix incorrectament a", 'Cotitzacions assignades', 'Misattributed', 'atribuïts incorrectament', 'connectat incorrectament', 'atribuït incorrectament a', 'controvertit', 'està assignat incorrectament', 'assignat incorrectament', 'acreditat incorrectament', 'dubtós', 'maltractat', 'pobrament', 'mal', 'acreditat incorrectament', 'no verificat', 'incorrectament adjunt', 'acreditat incorrectament', 'incorrecte', "s'atribueix a erròniament", 'sospitós'], \
'co': ['debuli', 'disputa', 'sbagliatu', 'dicendu attribuitu sbagliatu', 'sbagliatu hè attribuita à', 'Quotes assignati', 'misattribuitu', 'attribuitu sbagliatu', 'cunnessu sbagliatu', 'attribuitu sbagliatu à', 'cuntruversuale', 'hè incorrectamente assignatu', 'assignatu sbagliatu', 'creditu sbagliatu', 'dubbitu', 'MISTORATU', 'Poviru', 'sbagliatu', 'sbagliatu creditu', 'Unvererazionatu', 'sbagliatu attaccatu', 'incorrectamente creditu', 'sbagliatu', 'hè attribuita à sbaglià', 'suspicosu'],\
"cs": ['pochybný', 'nesprávně je připisován', 'je přičítán omylem', 'neosgejavané.', 'říká se nesprávně přiřazené', 'sporný', 'je nesprávně přiřazen', 'špatně', 'nesprávně připojeno', 'nesprávně', 'nezbytný', 'nesprávně přiřazeno', 'nesprávně přisuzováno', 'špatně zacházený', 'slabý', 'nesprávný', 'nesprávně připsány', 'nesprávně připsaný', 'přidělené nabídky', 'podezřelý', 'neověřené'],\
'da': ['svag', 'bestridt', 'forkert', 'siger fejlagtigt tilskrevet', 'fejlagtigt tilskrives', 'citater tildelt', 'misattributed.', 'forkert tilskrevet', 'forkert forbundet', 'forkert tilskrives', 'kontroversielt', 'er forkert tildelt', 'forkert tildelt', 'krediteret forkert', 'tvivlsom', 'mishandlet', 'Dårlig', 'forkert', 'fejlagtigt krediteret', 'unverified.', 'forkert vedhæftet', 'forkert krediteret', 'ukorrekt', 'er tilskrevet fejlagtigt', 'mistænksom'], \
"de": ['falsch verbunden', 'falsch angebracht', 'falsch zugewiesen', 'wird fehlerhaft zurückgeführt', 'schwach', 'fälschlich zugeschrieben', 'falsch zugerechnet', 'falsch wird zugeschrieben', 'falsch', 'falsch angeschlossen', 'misshandelt', 'unrecht zugeschrieben werden', 'misstrauisch', 'falsch gutgeschrieben', 'zweifelhaft', 'ist falsch zugewiesen', 'notwendig', 'zitate zugewiesen', 'nicht verifiziert'],\
'el': ['αδύναμος', 'αμφισβητούμενος', 'εσφαλμένα', 'λέγοντας εσφαλμένα αποδόσεις', 'λανθασμένα αποδίδεται σε', 'αποσπάσματα', 'απροσδόκητος', 'που αποδίδονται εσφαλμένα', 'εσφαλμένα συνδεδεμένο', 'που αποδοθεί εσφαλμένα', 'αμφιλεγόμενος', 'έχει ανατεθεί εσφαλμένα', 'εσφαλμένα αποδίδεται', 'πιστώθηκε λανθασμένα', 'αμφίβολος', 'κακομεταχειρίζομαι', 'πτωχώς', 'λανθασμένος', 'λάθος πιστώθηκε', 'ανεπιβεβαίωτος', 'Επισυνάπτεται εσφαλμένα', 'εσφαλμένα πιστώνεται', 'ανακριβής', 'αποδίδεται λανθασμένα', 'ύποπτος'],\
"en": ['weak', 'disputed', 'incorrectly', 'saying wrongly attributed', 'wrongly is attributed to', 'quotes assigned', 'misattributed', 'incorrectly attributed', 'incorrectly connected', 'incorrectly attributed to', 'controversial', 'is incorrectly assigned', 'incorrectly assigned', 'credited incorrectly', 'doubtful', 'mistreated', 'poorly', 'wrong', 'wrongly credited', 'unverified', 'incorrectly attached', 'incorrectly credited', 'incorrect', 'is attributed to mistakenly', 'suspicious'],\
"es": ['débil', 'disputado', 'incorrectamente', 'decir atribuido incorrectamente', 'atribuido incorrectamente a', 'citas asignadas', 'atribuido incorrectamente', 'atribuido incorrectamente', 'conectado incorrectamente', ' atribuido incorrectamente a ',' controvertido ',' asignado incorrectamente ',' asignado incorrectamente ',' acreditado incorrectamente ',' dudoso ',' maltratado ',' mal ',' incorrecto ',' acreditado incorrectamente ',' no verificado ', 'adjunto incorrectamente', 'acreditado incorrectamente', 'incorrecto', 'atribuido erróneamente', 'sospechoso'],\
'et': ['nõrk', 'vaidlustatud', 'valesti', 'öeldes valesti omistatud', 'valesti omistatakse', 'määratud hinnapakkumisi', 'eksima', 'valesti omistatud', 'valesti ühendatud', 'valesti omistatud', 'vastuoluline', 'on valesti määratud', 'valesti määratud', 'krediteeritud valesti', 'kahtlane', 'väärkohtlemine', 'halvasti', 'vale', 'valesti krediteeritud', 'vastamata jätmine', 'valesti kinnitatud', 'valesti krediteeritud', 'vale', 'omistatakse ekslikult', 'kahtlane'],\
'eu': ['ahul', 'jokatu', 'gaizki', 'gaizki egozten esanda', 'gaizki egozten zaio', 'esleitutako aipuak', 'Misattributatua', 'oker egotzi', 'Gaizki konektatuta', 'oker egotzita', 'Polemika', 'gaizki esleitzen da', 'gaizki esleituta', 'oker kreditua', 'zalantzazko', 'tratu txarrak', 'txarto', 'okerreko', 'gaizki kreditatu', 'irentetu gabe', 'oker erantsita', 'Gaizki kreditatu', 'ez zuzen', 'oker egozten zaio', 'goganbehartsu'],\
'fa': ['ضعیف', 'متضاد', 'نادرست', 'گفتن اشتباه است', 'اشتباه به آن نسبت داده می شود', 'نقل قول اختصاص داده شده', 'سوء تفاهم', 'نادرست نسبت داده شده است', 'نادرست متصل است', 'نادرست به', 'بحث برانگیز', 'نادرست اختصاص داده شده است', 'اشتباه اختصاص داده شده است', 'اعتبار نادرست', 'مشکوک', 'بدرفتاری', 'ضعیف', 'اشتباه', 'اشتباه اعتبار', 'غیر قابل تایید', 'اشتباه متصل شده', 'اشتباه اعتبار', 'غلط', 'به اشتباه نسبت داده شده است', 'مشکوک'],\
'fi': ['heikko', 'kiistanalainen', 'väärin', 'sanomalla väärin', 'virheellisesti johtuu', 'Lainaukset', 'huonosti', 'virheellisesti', 'Väärin kytketty', 'virheellisesti', 'kiistanalainen', 'on asetettu virheellisesti', 'Virheellisesti määritetty', 'hyvitetään väärin', 'epäilyttävä', 'kohteliaisuus', 'huonosti', 'väärä', 'Väärin hyvitetty', 'vahvistettu', 'Virheellisesti kiinnitetty', 'Virheellisesti hyvitetty', 'väärä', 'johtuu virheellisesti', 'epäilyttävä'],\
'fr': ['faible', 'contesté', 'incorrectement', 'dire attribué à tort', 'est attribué à tort à', 'citations attribuées', 'mal attribué', 'mal attribué', 'incorrectement connecté', ' attribué à tort à', 'controversé', 'est attribué de manière incorrecte', 'attribué de manière incorrecte', 'crédité de manière incorrecte', 'douteux', 'maltraité', 'mal', 'mauvais', 'crédité à tort', 'non vérifié', 'incorrectement joint', 'mal crédité', 'incorrect', 'est attribué à tort', 'suspect'],\
'he': ['חלש', 'משווקת', 'לא נכון', 'אומר מיוחסת בטעות', 'בטעות מיוחסת', 'ציטוטים שהוקצו', 'misattributed', 'המיוחס בצורה שגויה', 'קשור באופן שגוי', 'המיוחס לא נכון', 'שנוי במחלוקת', 'מוקצה באופן שגוי', 'שהוקצו באופן שגוי', 'זוכה באופן שגוי', 'מוטל בספק', 'התעללות', 'גרוע', 'שגוי', 'שזוכו בטעות', 'unverified', 'המצורפת באופן שגוי', 'זוכה לא נכון', 'לֹא נָכוֹן', 'מיוחסת לטעות בטעות', 'חָשׁוּד'], 'hi': ['कमज़ोर', 'विवादित', 'गलत तरीके से', 'गलत तरीके से कहना', 'गलत तरीके से जिम्मेदार है', 'उद्धरण सौंपा', 'गलत', 'गलत तरीके से जिम्मेदार', 'गलत तरीके से जुड़ा हुआ', 'गलत तरीके से जिम्मेदार ठहराया', 'विवादास्पद', 'गलत तरीके से सौंपा गया है', 'गलत तरीके से असाइन किया गया', 'गलत तरीके से श्रेय दिया गया', 'संदिग्ध', 'दुराचारित', 'बीमार', 'गलत', 'गलत तरीके से श्रेय दिया गया', 'असत्यापित', 'गलत तरीके से संलग्न', 'गलत तरीके से श्रेय दिया गया', 'ग़लत', 'गलती से जिम्मेदार है', 'संदेहजनक'],\
'hr': ['slab', 'osporen', 'nepravilno', 'govoreći pogrešno pripisuje se', 'pogrešno se pripisuje', 'dodijeljeni citati', 'pogrešan', 'Neispravno se pripisuje', 'pogrešno povezan', 'pogrešno pripisuje', 'kontroverzno', 'je pogrešno dodijeljen', 'pogrešno dodijeljen', 'pogrešno pripisano', 'sumnjiv', 'maltretiran', 'slabo', 'pogrešno', 'pogrešno pripisano', 'neveritičan', 'pogrešno pričvršćen', 'pogrešno pripisano', 'netočno', 'se pripisuje pogrešno', 'sumnjičav'],\
'hu': ['gyenge', 'vitatott', 'tévesen', 'rosszul mondván', 'helytelenül tulajdonítható', 'Idézetek hozzárendeltek', 'félreérthetetlen', 'helytelenül tulajdonítható', 'Helytelenül csatlakoztatva van', 'helytelenül tulajdonítható', 'vitatott', 'helytelenül hozzárendelt', 'Helytelenül hozzárendelt', 'helytelenül jóváírják', 'kétséges', 'rosszul kezelt', 'rosszul', 'rossz', 'tévesen jóváírta', 'ellenőrizetlen', 'Helytelenül csatolt', 'helytelenül jóváírta', 'helytelen', 'tévesen tulajdonítható', 'gyanús'],\
'hy': ['թույլ', 'վիճված', 'սխալ', 'սխալ ասելով, վերագրվում է', 'սխալ է վերագրվում', 'Նշված մեջբերումները', 'Մատսել է', 'Սխալ կերպով վերագրվում է', 'Սխալ միացված', 'սխալ է վերագրվել', 'վիճաբանական', 'սխալ է նշանակվել', 'Սխալ նշանակված', 'սխալվել է սխալ', 'կասկածելի', 'չարամտել', 'վատ', 'սխալ', 'սխալվել է', 'անավարտ', 'Սխալորեն կցված', 'սխալ է գնահատվել', 'սխալ', 'վերագրվում է սխալմամբ', 'կասկածելի'],\
'id': ['lemah', 'diperdebatkan', 'salah', 'mengatakan salah dikaitkan.', 'salah dikaitkan dengan', 'Kutipan ditugaskan', 'salah penyibaran', 'salah dikaitkan', 'salah terhubung', 'salah dikaitkan dengannya', 'kontroversial', 'salah ditugaskan', 'salah ditugaskan', 'dikreditkan secara salah', 'diragukan lagi', 'Dianiaya', 'buruk', 'salah', 'salah dikreditkan', 'tidak diverifikasi', 'salah melekat', 'salah dikreditkan', 'salah', 'dikaitkan dengan keliru', 'mencurigakan'],\
'is': ['veik', 'umdeildur', 'rangt', 'segja að ranglega rekja til', 'rangt stafar af', 'Tilvitnanir úthlutað', 'misertributed.', 'rangt rekja má', 'rangt tengt', 'rangt rekja til', 'umdeild', 'er rangt úthlutað', 'rangt úthlutað', 'lögð rangt', 'efast', 'mistreated.', 'illa', 'rangt', 'ranglega lögð inn', 'unverfied.', 'rangt fylgir', 'Rangt viðurkennt', 'rangt', 'er rekja til ranglega', 'grunsamlegt'],\
'it': ['debole', 'disputato', 'erroneamente', 'detto erroneamente attribuito', 'erroneamente attribuito a', 'virgolette assegnate', 'erroneamente attribuito', 'erroneamente attribuito', 'erroneamente connesso', ' erroneamente attribuito a', 'controverso', 'è assegnato in modo errato', 'assegnato in modo errato', 'accreditato in modo errato', 'dubbio', 'maltrattato', 'male', 'sbagliato', 'accreditato erroneamente', 'non verificato', 'erroneamente allegato', 'erroneamente accreditato', 'errato', 'è attribuito a erroneamente', 'sospetto'],\
'ja': ['弱い', '議論した', '誤って', '間違って帰ったことを言っています', '間違って帰属しています', '割り当てられた引用符', '誤動作しました', '間違って帰属しました', '誤って接続されています', '誤って帰属しました', '物議を醸す', '間違って割り当てられています', '間違って割り当てられています', '誤って入金されました', '疑わしい', '虐待された', '不完全に', '間違い', '間違ってクレジットされました', '未検証', '誤って添付されています', '誤ってクレジットされました', '正しくない', '誤って帰属されています', '疑わしい'],\
'ka': ['სუსტი', 'სადავო', 'არასწორად', 'არასწორად მიეკუთვნება', 'არასწორად მიეკუთვნება', 'შეთავაზებები', 'misattributed', 'არასწორად მიეკუთვნება', 'არასწორად უკავშირდება', 'არასწორად მიეკუთვნება', 'დროებითი', 'არასწორად არის მინიჭებული', 'არასწორად მინიჭებული', 'არასწორად დაკრედიტდება', 'საეჭვო', 'mistreated', 'ღარიბად', 'მცდარი', 'არასწორად დაკრედიტდება', 'გადაუსებული', 'არასწორად ერთვის', 'არასწორად დაკრედიტდება', 'არასწორი', 'შეცდომით მიეკუთვნება', 'საეჭვო'],\
'ko': ['약한', '분쟁', '틀리게', '잘못된 것으로 말하고있다', '잘못된 것은', '할당 된 따옴표', '미해시', '잘못된 것으로 잘못된 것입니다', '잘못 연결되었습니다', '잘못된 것으로 잘못된 것입니다', '논란이 많은', '잘못 지정됩니다', '잘못 지정되었습니다', '잘못 적립되었습니다', '불안한', '학대하다', '신통치 않게', '잘못된', '잘못된 적립 된 것', '확인되지 않았습니다', '잘못 첨부되었습니다', '잘못 적립되었습니다', '잘못된', '실수로 기인합니다', '의심스러운'],\
'lt': ['Silpnas', 'ginčijama', 'Neteisingai', 'sakydamas neteisingai priskirtas', 'neteisingai priskiriama', 'Citatos', 'nesuderinta', 'neteisingai priskiriama', 'neteisingai prijungta', 'neteisingai priskirta', 'prieštaringas', 'yra neteisingai priskirtas', 'neteisingai priskirtas', 'neteisingai įskaityta', 'abejotina', 'netinkamai elgiamasi', 'blogai', 'neteisingas', 'neteisingai įskaityta', 'nepatvirtinta', 'neteisingai prijungtas', 'neteisingai įskaityta', 'Neteisinga', 'priskiriama klaidingai', 'įtartinas'],\
'nl': ['zwak', 'twijfelachtig', 'onjuist', 'Samenstellen ten onrechte toegeschreven', 'ten onrechte wordt toegeschreven aan', 'Citaten toegewezen', 'verkeerd ingesteld', 'Onjuist toegeschreven', 'Onjuist aangesloten', 'onjuist toegeschreven aan', 'controverseel', 'is verkeerd toegewezen', 'Onjuist toegewezen', 'verkeerd gecrediteerd', 'twijfelachtig', 'mishandeld', 'slecht', 'mis', 'ten onrechte gecrediteerd', 'ongehroken', 'verkeerd bevestigd', 'onjuist gecrediteerd', 'niet correct', 'wordt toegeschreven aan ten onrechte', 'verdacht'],\
'no': ['svak', 'omstridt', 'feil', 'sier feilaktig tilskrives det', 'feil er tilskrevet', 'Sitater tildelt', 'misattributed.', 'feilaktig tilskrives det', 'feil tilkoblet', 'feilaktig tilskrives', 'kontroversiell', 'er feil tildelt', 'feilaktig tildelt', 'krediteres feil', 'tvilsom', 'feilbehandlet', 'dårlig', 'feil', 'feil kreditert', 'unverified.', 'feil festet', 'feil kreditert', 'stemmer ikke', 'er tilskrevet feilaktig', 'mistenkelig'],\
'ro': ['slab', 'contestată', 'incorect', 'spunând atribuit greșit', 'este atribuit în mod greșit', 'Citate atribuite', 'misattribuit', 'incorect atribuită', 'incorect conectat', 'incorect atribuită', 'controversat', 'este atribuită incorect', 'incorect atribuite', 'creditat incorect', 'îndoielnic', 'maltratat', 'slab', 'gresit', 'creditat greșit', 'neveriectificat', 'În mod incorect atașat', 'incorect creditate', 'incorect', 'este atribuită în mod eronat', 'suspicios'],\
'ru': ['слабый', 'оспариваемый', 'неправильно', 'говорить неправильно приписанным', 'неправильно объясняется', 'цитаты назначены', 'несущественно', 'неправильно приписан', 'неправильно подключен', 'неправильно приписан', 'спорный', 'неверно назначен', 'неверно назначен', 'зачислен неправильно', 'сомнительный', 'плохо обращаться', 'плохо', 'неправильный', 'неправильно приписывать', 'неверно', 'неправильно прилагается', 'неправильно зачислено', 'неверный', 'приписывается по ошибке', 'подозрительный'],\
'sk': ['slabý', 'sporný', 'nesprávne', 'hovorí nesprávne pripisované', 'nesprávne sa pripisuje', 'Pridelené citácie', 'nesprávny', 'Nesprávne pripísané', 'Nesprávne pripojené', 'nesprávne pripísané', 'kontroverzný', 'je nesprávne priradený', 'Nesprávne priradené', 'nesprávne pripísané', 'pochybný', 'nespokojný', 'úboho', 'vhodný', 'nesprávne pripísané', 'neoverený', 'Nesprávne pripojené', 'Nesprávne pripísané', 'nesprávny', 'sa pripisuje mylne', 'podozrivý'],\
"sl": ["neozdrojované"'napačno prijavljeno', 'rekel napačno pripisano', 'napačno nakazana', 'napačno povezan', 'slabo', 'sumljivega', 'nepravilno dodeljena', 'neosgejavan.', 'dodeljeni citati', 'sporno', 'nepravilno pritrjena', 'nepreverjeno', 'napačno', 'je nepravilno dodeljen', 'nepravilno', 'napačno pripisano', 'se pripisuje pomotoma', 'in pavipe.', 'napačno pripisuje', 'dvomljiv', 'šibko', 'narobe', 'nepravilno pripisana'],\
"sq": ['i diskutueshëm', 'atribuohet gabimisht', 'i keqtrajtuar', 'i atribuohet gabimisht', 'i pasaktë', 'kredituar gabimisht', 'caktohet gabimisht', 'i lidhur gabimisht', 'i dyshimtë', 'i pavepi', 'i gabuar', 'thënie të atribuara gabimisht', 'bashkangjitur gabimisht', 'dobet'],\
"pl": ['zło', 'błędny', 'misattriruted.', 'źle traktować', 'słabo', 'wątpliwy', 'nieprawidłowo przymocowany', 'nieprawidłowo przypisany do', 'niepoprawnie przypisany', 'niepoprawnie połączony', 'mówiąc błędnie przypisany', 'kwestionować', 'cytaty przypisywane', 'niesprawdzony', 'błędnie przypisany', 'nieprawidłowo przypisany'], \
'pt': ['fraca', 'contestada', 'incorretamente', 'dizendo atribuída incorretamente', 'atribuída incorretamente a', 'citações atribuídas', 'atribuída incorretamente', 'atribuída incorretamente', 'conectada incorretamente', ' atribuído incorretamente a ',' controverso ',' atribuído incorretamente ',' atribuído incorretamente ',' creditado incorretamente ',' duvidoso ',' maltratado ',' mal ',' errado ',' creditado incorretamente ',' não verificado ', 'incorretamente anexado', 'incorretamente creditado', 'incorreto', 'atribuído a incorretamente', 'suspeito'], \
'ta': ['பலவீனமான', 'விவாதத்திற்குரியது', 'தவறாக', 'தவறாக சொல்லப்பட்டது', 'தவறாக காரணம்', 'மேற்கோள் ஒதுக்கப்படும்', 'misattributed.', 'தவறாக காரணம்', 'தவறாக இணைக்கப்பட்டுள்ளது', 'தவறாக காரணம்', 'சர்ச்சைக்குரிய', 'தவறாக ஒதுக்கப்பட்டுள்ளது', 'தவறாக ஒதுக்கப்படும்', 'தவறாக வழங்கப்பட்டது', 'சந்தேகம்', 'தவறாக நடத்தப்பட்டது', 'மோசமாக', 'தவறு', 'தவறாக வரவு', 'சரிபார்க்கப்படவில்லை', 'தவறாக இணைக்கப்பட்டுள்ளது', 'தவறாக நம்பப்படுகிறது', 'தவறானது', 'தவறுதலாக காரணம்', 'சந்தேகத்திற்கிடமான'],\
'te': ['బలహీనమైన', 'వివాదాస్పదంగా', 'తప్పుగా', 'తప్పుగా ఆపాదించబడినది', 'తప్పుగా ఆపాదించబడినది', 'కేటాయించిన కోట్స్', 'myatattributed', 'తప్పుగా ఆపాదించబడినది', 'తప్పుగా కనెక్ట్ చేయబడింది', 'తప్పుగా ఆపాదించబడినది', 'వివాదాస్పద', 'తప్పుగా కేటాయించబడుతుంది', 'తప్పుగా కేటాయించబడింది', 'తప్పుగా జమ చేయబడుతుంది', 'అనుమానాస్పద', 'బాధితుడు', 'పేలవంగా', 'తప్పు', 'తప్పుగా ఘనత పొందింది', 'ధృవీకరించనిది', 'తప్పుగా జతచేయబడింది', 'తప్పుగా ఘనత పొందింది', 'తప్పు', 'తప్పుగా ఆపాదించబడింది', 'అనుమానాస్పద'],\
'uk': ['слабкий', 'спірний', 'неправильно', 'кажучи неправильно віднесено', 'неправильно пояснюється', 'Призначені цитати', 'мізерний', 'неправильно віднесено', 'неправильно підключено', 'неправильно віднесено', 'суперечливий', 'неправильно призначено', 'неправильно призначено', 'неправильно приписується', 'сумнівний', 'погано', 'погано', 'неправильний', 'неправильно зарахований', 'неперевірений', 'неправильно прикріплені', 'неправильно зараховано', 'неправильний', 'пояснюється помилково', 'підозрілий'],\
'ur': ['کمزور', 'متنازعہ', 'غلط طور پر', 'غلط طور پر منسوب کیا گیا ہے', 'غلط طور پر منسوب کیا جاتا ہے', 'حوالہ جات', 'غلط استعمال کی اطلاع دیتے ہوئے ایرر آ گیا ہے', 'غلط طور پر منسوب', 'غلط طور پر منسلک', 'غلط طور پر منسوب', 'متضاد', 'غلط طور پر تفویض کیا جاتا ہے', 'غلط طور پر تفویض', 'غلط طریقے سے کریڈٹ', 'شکست', 'غلطی', 'غریب', 'غلط', 'غلط طور پر کریڈٹ', 'غیر تصدیق شدہ', 'غلط طریقے سے منسلک', 'غلط طریقے سے کریڈٹ', 'غلط', 'غلطی سے منسوب کیا جاتا ہے', 'مشکوک'],\
'vi': ['Yếu', 'tranh chấp', 'không chính xác', 'nói sai quy kết', 'sai được quy cho', 'Báo giá được giao', 'sai lệch', 'quy cho không chính xác', 'kết nối không chính xác', 'quy cho không chính xác cho.', 'gây tranh cãi', 'được giao không chính xác', 'chỉ định không chính xác', 'ghi có không chính xác', 'nghi ngờ', 'ngược đãi', 'kém', 'Sai lầm', 'Tín dụng sai', 'chưa được xác minh', 'đính kèm không chính xác', 'Credited không chính xác', 'không đúng', 'được quy cho nhầm', 'khả nghi'],\
'zh': ['弱', '有争议', '不正确', '错误归因', '错误归因于', '引用分配', '错误归因', '错误归因', '错误连接', ' 错误地归因于', '有争议的', '被错误地分配', '错误地分配','记入错误','可疑','虐待','差','错误','错误记入','未验证', '错误附加','错误记入','错误','归因于错误','可疑']
}
#attributed? Neověřené disputed
# to be checked: Djela, Obras, Povedali o
forbidden_by_language = {
"ar" : ["قالوا عنه","قالوا عنه","أشهر مؤلفاتها","الوصلات الخارجية"],\
"az" : ["İstinadlar","Mənbə","Xarici keçidlər","Haqqında deyilənlər","istinadlar"],\
"be":["Выказванні пра", "зноскі","спасылкі"],\
"bg":["За нея","за него","Източници","Бележки","Външни препратки","литература"],\
"bs":["Drugi o njemu","Djela","Također pogledajte","Vanjski linkovi","Izdanja"],\
"ca":["citacions sobre","Referències","Bibliografia","Enllaços externs","referències"],\
"co":["daveoù"],\
"cs":["ve výrocích","Reference","Externí odkazy","Související"],\
"da":["Eksterne henvisninger","Kilder"],\
"de":["zitate mit bezug auf", ],\
"el":["εξωτερικοί σύνδεσμοι"],\
"es":["sobre", "Obras", "Véase también", "Bibliografía","referencias"],\
"et":["välislingid"],\
"en":["quotes about", "filmography", "footnote", "sources", "resources", "other projects","external links","links",\
"notes", "note", "weblinks", "bibliogprahy", "related items","works", "references","literature","see","see also",\
"footnote","other projects"],\
"eu":["Kanpo loturak","Erreferentziak"],\
"fa":["دربارهٔ او","پیوند به بیرون","جستارهای وابسته","منبعدار", "منابع","پیوند بهبیرون"],\
"fi":["sanottua","lähteet"],\
"fr":["sur "],\
"he":["על עצמה", "נאמר עליה","מקורות","קישורים חיצוניים","נאמר עליו","ראו גם"],\
"hi":["बाहरी कडियाँ"],\
"hr":["vanjske poveznice"],\
"hu":["róla mondták","külső hivatkozások","Művei"],\
"hy":["Աղբյուրներ","Ծանոթագրություններ","ծանոթագրություններ"],\
"is":["tenglar"],\
"id":["pranala luar"],\
"it":["citazioni su","Doppiaggio","film","filmografia","altri progetti","voci correlate"], \
"ja":["外部リンク"],\
"ka":["რესურსები ინტერნეტში"],\
"ko":["각주","관련 어록"],\
"lt":["nuorodos"],\
"nl":["over "], \
"no":["eksterne lenker","referanser"],\
"pl":["zobacz też","o "],\
"pt":["obras", "sobre","Ligações externas"],\
"ro":["legături externe","despre"],\
"ru":["Об","Фильмография","примечания","ссылки", "см. также"],\
"sk":["Povedali o","iné projekty","referencie"],\
"sl":["viri","sklici"],\
"sq":["Thënie për të","Referimet","Shiko edhe","lidhje të jashtme","referime"],\
"ta":["வெளி இணைப்புகள்","சான்றுகள்"],\
"te":["మూలాలు"],\
"tr":["Hakkında","kaynakça"],\
"uk":["Про","Джерела","примітки","література"],\
"ur":["حوالہ جات"],\
"vi":["Liên kết ngoài","notennoù"],\
"zh":["外部链接","参见","参考文献"]
}
forbidden_by_language["ar"] = ["قالوا عنه", "قالوا عنه", "أشهر مؤلفاتها", "الوصلات الخارجية", "انظر أيضا إلى", "فهرس", "ويعمل", "ملحوظة", "المرجعي", "آخر حول هذا الموضوع", "في الذكرى", "قالوا عن ذلك", "فيلموجرافيا.", "قوله", "روابط", "قالوا عنه", "يقال عن", "يقتبس", "رابط ل", "الإحالات", "الأكثر شهرة الكتب", "الفمود الخارجي", "وصلات خارجية", "مصادر:", "عنه", "استفسارات الاعتماد", "المرجعي", "دبلجة", "فيلموجرافيا.", "له", "في", "روابط خارجية", "يلعب", "هامش", "قيل عنه", "ويعمل", "يلعب", "على", "مشاريع أخرى", "عن", "عنها", "موارد", "رابط خارجي", "المراجع", "مصادر", "قيل عنها", "الحواشي", "المراجع الخارجية", "الأصناف ذات الصلة", "مصدر", "ملحوظات:", "روابط", "لها", "إطلاق", "الشهادات - التوصيات", "ملحوظات", "قل", "الموارد في الإنترنت", "أنظر أيضا", "daveoù.", "رابط إلى الخارج", "عنه", "أنظر أيضا", "فيلم", "تشغيل", "مراجع", "قالوا O.", "متعلق ب", "رابط خارجي", "بيانات حول", "حول", "الاستشهادات المذكورة أعلاه", "مصادر", "سفير", "يقال له", "المؤلفات", "حول نفسها", "روابط خارجية", "التطبيقات ذات الصلة", "ونقلت فيما يتعلق", "ارى", "على", "الروابط الزائدة", "ونقلت حول", "فيلموجرافيا.", "هامش", "مصادر", "مصادر", "مشاريع أخرى", "روابط خارجية", "روابط", "ملحوظات", "ملاحظة", "روابط انترنت", "فهرس", "الأصناف ذات الصلة", "ويعمل", "المراجع", "المؤلفات", "ارى", "أنظر أيضا", "هامش", "مشاريع أخرى"]
forbidden_by_language["az"] = ["İstinadlar", "Mənbə", "Xarici keçidlər", "Haqqında deyilənlər", "istinadlar", "Də baxmaq", "Biblioqrafiya", "əsər", "Əlamətdar", "istinad", "Bu barədə başqa bir şey", "Yubileydə", "Bu barədə dedilər", "Filmoqrafiya", "Deyən", "linklər", "Onun haqqında dedilər", "Haqqında deyilir", "sitat gətirən", "Birləşdirmək", "refertral", "Ən məşhur kitablar", "Xarici TADS", "Xarici əlaqələr", "Mənbələr:", "onun haqqında", "sualları asılı idi", "İstinad", "Dublying", "filmoqrafiya", "onun üçün", "O", "xarici linklər", "pyes", "izdihamlı", "Onun haqqında deyildi", "Əsər", "Pyes", "üstünə", "Digər layihələr", "Haqqında", "onun haqqında", "Resurslar", "Xarici əlaqə", "arayışlar", "Mənbələr", "Onun haqqında deyildi", "izahat", "Xarici İstinadlar", "Oxşar əşyalar", "Mənbəyi", "Qeydlər:", "Linklər", "Onun üçün", "Buraxılış", "Şöhrətli", "Qeydlər", "ərz etmək", "İnternetdəki mənbələr", "Həmçinin bax", "daveoùù", "Xarici ilə əlaqə", "Onun haqqında", "həmçinin bax", "filmə", "yan", "Arayışlar", "Dedilər.", "Bahalı", "xarici əlaqə", "Haqqında ifadələr", "haqqında", "Yuxarıdakı sitatlar", "mənbələr", "Səfir", "Ona deyilir", "ədəbiyyat", "haqqında özü haqqında", "xarici linklər", "Əlaqədar tətbiqlər", "Hörmətlə sitatlar", "Görmək", "artıq", "Həddindən artıq bağlantılar", "haqqında sitatlar", "filmoqrafiya", "izdihamlı", "mənbələr", "resurslar", "Digər layihələr", "xarici linklər", "linklər", "qeydlər", "Qeyd", "vaklar", "biblioqrafiya", "Oxşar əşyalar", "əsər", "arayışlar", "ədəbiyyat", "görmək", "həmçinin bax", "izdihamlı", "Digər layihələr"]
forbidden_by_language["be"] = ["Выказванні пра", "зноскі", "спасылкі", "Таксама глядзець на", "Бібліяграфія", "работы", "нотенно", "спасылка", "Іншы пра гэта", "У гадавіне", "Яны сказалі пра гэта", "Фільмаграфія", "Кажучы", "спасылкі", "Яны сказалі пра яго", "Кажуць пра", "каціроўкі", "Спасылка на", "рэфералы", "Самыя вядомыя кнігі", "Знешнія казалі", "Знешнія злучэння", "Крыніцы:", "пра яго", "Залежныя запыты", "Спасылка", "Нядбайнны", "фільмаграфія", "для яго", "Аб", "Знешнія спасылкі", "п'есы", "знос", "было сказана пра яго", "Работы", "П'есы", "на", "Іншыя праекты", "Пра", "пра яе", "Рэсурсы", "Знешняя спасылка", "рэкамендацыі", "Крыніцы", "Было сказана пра яе", "знос", "Знешнія спасылкі", "Звязаныя элементы", "Крыніца", "Нататкі:", "Спасылкі", "Для яе", "Верхі", "Водгукі", "Нататкі", "гаварыць", "Рэсурсы ў Інтэрнэце", "См таксама", "Спасылка на вонкавым боку", "Пра яго", "см таксама", "плёнка", "на", "Рэкамендацыі", "Яны сказалі, што О.", "Звязаны", "знешняя спасылка", "Заявы аб", "пра", "Цытаты вышэй", "крыніцы", "літаратура", "знешнія спасылкі", "Звязаныя з прыкладаннямі", "Каціроўкі ў адносінах да", "Бачыць", "больш", "Лішак спасылкі", "цытаты аб", "фільмаграфія", "крыніцы", "рэсурсы", "іншыя праекты", "знешнія спасылкі", "спасылкі", "нататкі", "нататка", "weblinks", "бібліяграфія", "Звязаныя элементы", "работы", "рэкамендацыі", "літаратура", "бачыць", "см таксама", "іншыя праекты"]
forbidden_by_language["bg"] = ["За нея", "за него", "Източници", "Бележки", "Външни препратки", "литература", "Също погледнете", "Библиография", "върши работа", "Бележит", "справка", "Друг за него", "в годишнината", "Те казаха за това", "Филмография", "Да се каже", "Връзки", "Те казаха за него", "Се казва", "Връзка към", "Реферали", "Най-известните книги", "Външни тади", "Външни връзки", "Източници:", "за него", "Запитвания", "Справка", "Дублиране", "Филмография", "за него", "О", "външни връзки", "Играе", "Бележка под линия", "Беше казано за него", "Върши работа", "Играе", "в", "Други проекти", "относно", "за нея", "Ресурси", "Външен линк", "препратки", "Източници", "Беше казано за нея", "Бележки под линия", "Външни препратки", "Подобни продукти", "Източник", "Забележки:", "Връзки", "За нея", "Освобождаване", "Отзиви", "Бележки", "казвам", "Ресурси в Интернет", "Вижте също", "Дъстина", "Връзка с външната страна", "За него", "Вижте също", "филм", "На", "Препратки", "Те казаха О.", "Свързани", "външен линк", "Изявления за", "относно", "Цитати над", "Източници", "Посланик", "Му се казва", "Литература", "за себе си", "външни връзки", "Свързани приложения", "Цитати по отношение на", "Вж", "над", "Излишните връзки", "цитати за", "Филмография", "Бележка под линия", "Източници", "Ресурси", "Други проекти", "външни връзки", "Връзки", "Бележки", "Забележка", "WeBlinks.", "Библиография", "подобни продукти", "върши работа", "препратки", "Литература", "вж", "Вижте също", "Бележка под линия", "Други проекти"]
forbidden_by_language["bs"] = ["Drugi o njemu", "Djela", "Također pogledajte", "Vanjski linkovi", "Izdanja", "Takođe pogledajte", "Bibliografija", "radovi", "Primijetan", "referenca", "Još jedan o tome", "u godišnjici", "Rekli su o tome", "Filmografija", "Govoreći", "linkove", "Rekli su o njemu", "Su rekli o", "citati", "Link na", "preporuke", "Najpoznatije knjige", "Vanjski tads", "Vanjske veze", "Izvori:", "o njemu", "Zavito upiti", "Referenca", "Presnimav", "Filmografija", "za njega", "O", "Vanjske veze", "igra", "fusnota", "Rečeno je o njemu", "Radovi", "Igra", "na", "Ostali projekti", "O", "o njoj", "Resursi", "Vanjska veza", "reference", "Izvori", "Rečeno je o njoj", "fusnote", "Vanjske reference", "Srodni predmeti", "Izvor", "Napomene:", "Linkove", "Za nju", "Izdanja", "Testimonials", "Bilješke", "izgovoriti", "Resursi na Internetu", "Vidjeti i", "Daveoù", "Veza sa spolja", "O njemu", "vidjeti i", "film", "na", "Reference", "Rekli su O.", "Povezani", "Vanjska veza", "Izjave o", "o", "Citati gore", "izvori", "Ambasador", "Kaže mu", "literatura", "o sebi", "Vanjske veze", "Srodne aplikacije", "Citati u odnosu na", "Vidjeti", "preko", "Višak veze", "citati o", "Filmografija", "fusnota", "izvori", "resursi", "Ostali projekti", "Vanjske veze", "linkove", "bilješke", "Bilješka", "Webliks", "bibliografija", "Srodni predmeti", "radovi", "reference", "literatura", "vidjeti", "vidjeti i", "fusnota", "Ostali projekti"]
forbidden_by_language["ca"] = ["citacions sobre", "Referències", "Bibliografia", "Enllaços externs", "referències", "També mireu", "Bibliografia", "treballa", "Notable", "referència", "Un altre sobre això", "En l'aniversari", "Van dir sobre això", "Filtrografia", "Dient", "enllaç", "Van dir sobre ell", "Es diu sobre", "cites", "Enllaç a", "referències", "Els llibres més famosos", "Tads exteriors", "Connexions externes", "Fonts:", "sobre ell", "Consultes dependents", "Referència", "% De comportament", "filtrografia", "per ell", "O a", "Enllaços externs", "obert", "Nota al peu", "Es va dir sobre ell", "Treballa", "Obert", "a sobre de", "Altres projectes", "Sobre", "sobre ella", "Recursos", "Enllaç extern", "referències", "Fonts", "Es va dir sobre ella", "Notes al peu de pàgina", "Referències externes", "Articles relacionats", "Font", "NOTES:", "Enllaç", "Per ella", "Llançaments", "Testimonis", "Notes", "dir", "Recursos a Internet", "Vegeu també", "daveoù", "Enllaç a l'exterior", "Sobre ell", "Vegeu també", "pel·lícula", "conectada", "Referències", "Van dir O.", "Relacionada", "Enllaç extern", "Declaracions sobre", "Sobre", "Cites anteriors", "fonts", "Ambaixador", "Se li diu", "literatura", "sobre ella mateixa", "Enllaços externs", "Aplicacions relacionades", "Cites respecte a", "Veure", "sobrar", "Enllaços d'excés", "cites sobre", "filtrografia", "Nota al peu", "fonts", "recursos", "Altres projectes", "Enllaços externs", "enllaç", "notes", "nota", "Weblinks", "bibliografia", "Articles relacionats", "treballa", "referències", "literatura", "veure", "Vegeu també", "Nota al peu", "Altres projectes"]
forbidden_by_language["co"] = ["daveoù", "Fighjà ancu", "Bibliografia", "FUNZIONI", "Notabile", "Riferimentu", "Un altru nantu à questu", "In l'anniversariu", "Anu dettu di questu", "Filmografia", "Dicendu à", "Ligami", "Anu dettu di ellu", "Sò dettu di circa", "Ligame cù", "I referenze", "I libri più famosi", "Tadri esterni", "Cunnessioni esterni", "FONTI:", "circa ellu", "Quistioni dipendenti", "Riferimentu", "Dubaghju", "Filmografia", "per ellu", "O", "Ligami esterni", "Ghjucà", "nota di nota", "si dicia di ellu", "FUNZIONI", "Ghjucà", "à", "Altri prughjetti", "Circa à", "circa ella", "Risorse", "Link esternu", "Riferimenti", "Fonti", "Si dicia di ella", "Testrootes", "Riferimenti esterni", "Oggetti Relativi", "Fonte", "NOTI:", "Ligami", "Per ella", "Release", "Testimonianza", "Note", "dì", "Risorse in Internet", "Vede ancu", "daveoù", "Ligame à l'esterno", "Circa ellu", "vede ancu", "film", "avanti", "Riferimenti", "Anu dettu O.", "Ligatu", "Link esternu", "Dichjarazioni circa", "circa à", "Citazioni sopra", "fonti", "Ambasciatore", "Si dice à ellu", "Letteratura", "circa ella stessu", "ligami esterni", "Applicazioni ligate", "Quotes cun rispettu à", "Vede", "finitu", "Ligami d'uccasioni", "citazioni circa", "Filmografia", "nota di nota", "fonti", "Risorse", "altri prughjetti", "ligami esterni", "Ligami", "Note", "Nota", "weblinks", "bibliografia", "Oggetti Relativi", "FUNZIONI", "Riferimenti", "Letteratura", "vede", "vede ancu", "nota di nota", "altri prughjetti"]
forbidden_by_language["cs"] = ["ve výrocích", "Reference", "Externí odkazy", "Související", "Také se podívejte na", "Bibliografie", "práce", "Pozoruhodný", "odkaz", "Další o tom", "v výročí", "Řekli o tom", "Filmografie", "Říkat", "Odkazy", "Řekli o něm", "Říkají se asi", "citáty", "Odkaz na", "odkazy", "Nejznámější knihy", "Vnější Tads.", "Externí připojení", "Prameny:", "o něm", "Závislé dotazy", "Odkaz", "Dabing", "filmografie", "pro něj", "Ó", "externí odkazy", "hra", "poznámka pod čarou", "Řekl to o něm", "Práce", "Hra", "na", "Další projekty", "O", "o ní", "Zdroje", "Externí odkaz", "Reference", "Prameny", "Řekl to o ní", "poznámky pod čarou", "Externí odkazy", "Související zboží", "Zdroj", "Poznámky:", "Odkazy", "Pro ni", "Releases", "Svědectví", "Poznámky", "říci", "Zdroje v Internetu", "Viz také", "daveoù.", "Odkaz na vnější stranu", "O něm", "viz také", "film", "na", "Reference", "Řekli O.", "Příbuzný", "Externí odkaz", "Výkazy", "o", "Citace výše", "prameny", "Velvyslanec", "Říká se mu", "literatura", "o sobě", "externí odkazy", "Související aplikace", "S ohledem na", "Vidět", "přes", "Přebytečné odkazy", "cituje", "filmografie", "poznámka pod čarou", "prameny", "zdroje", "Další projekty", "externí odkazy", "Odkazy", "poznámky", "Poznámka", "webové odkazy", "bibliografie", "Související zboží", "práce", "Reference", "literatura", "vidět", "viz také", "poznámka pod čarou", "Další projekty"]
forbidden_by_language["da"] = ["Eksterne henvisninger", "Kilder", "Se også på", "Bibliografi.", "arbejder", "Bemærkelsesværdig", "reference", "En anden om det", "i jubilæet.", "de sagde om det", "Filmografi.", "Siger til", "links.", "De sagde om ham", "Er sagt omkring", "citater", "Link til", "henvisninger.", "De mest berømte bøger", "Ydre tads.", "Eksterne forbindelser", "Kilder:", "om ham", "Afhængige forespørgsler", "Reference", "Dubbing.", "Filmografi.", "For ham", "O.", "eksterne links", "spiller.", "fodnote.", "Det blev sagt om ham", "Arbejder", "Spiller.", "på", "Andre projekter", "Om", "om hende", "Ressourcer.", "Eksternt link", "Referencer.", "Kilder.", "Det blev sagt om hende", "fodnoter.", "Eksterne referencer.", "Relaterede elementer.", "Kilde", "Noter:", "Links.", "For hende", "Udgivelser.", "Testimonials.", "Noter.", "sige", "Ressourcer på internettet", "Se også", "daveoù.", "Link til ydersiden", "Om ham", "se også", "film", "på", "Referencer.", "De sagde O.", "Relaterede", "Eksternt link", "Udsagn om", "om", "Citater ovenfor", "Kilder.", "Ambassadør", "Det siges til ham", "litteratur", "om sig selv.", "eksterne links", "Relaterede applikationer", "Citater med hensyn til", "Se", "over", "Overskydende links.", "citater om", "Filmografi.", "fodnote.", "Kilder.", "ressourcer.", "andre projekter", "eksterne links", "links.", "noter.", "Bemærk", "Weblinks.", "bibliografi", "relaterede elementer.", "arbejder", "Referencer.", "litteratur", "se", "se også", "fodnote.", "andre projekter"]
forbidden_by_language["de"] = ["Zitate über", "Filmografie", "Fußnote", "Quellen", "Ressourcen", "andere Projekte", "externe Links", "Links", "Notizen", "Hinweis", "Weblinks", "Literaturverzeichnis", "verwandte Artikel", "Werke", "Referenzen", "Literatur", "sehen", "siehe auch", "Fußnote", "andere Projekte", "Auch anschauen", "Bibliographie", "Werke", "Bemerkenswert", "Referenz", "Noch einer darüber", "im Jubiläum", "Sie sagten darüber", "Filmografie", "Sagen zu", "Links", "Sie sagten über ihn", "Sind sagte über", "Zitate", "Link zu", "Empfehlungen", "Die berühmtesten Bücher", "Outer tads", "Externe Verbindungen", "Quellen:", "über ihn", "Abhängige Anfragen", " Referenz", "Synchronisation", "Filmografie", "für ihn", "O", "Externe Links", "Spiele", "Fußnote", "es wurde über ihn gesagt", "Werke", "Spiele", " auf", "Andere Projekte", "Über", "Über sie", "Ressourcen", "Externer Link", "Referenzen", "Quellen", "Es wurde über sie gesagt", "Fußnoten", "Externe Verweise", "Verwandte Artikel", "Quelle", "Notizen:", "Links", "Für sie", "Veröffentlichungen", "Testimonials", "Nicht es", "sagen", "Ressourcen im Internet", "Siehe auch", "daveoù", "Link nach außen", "Über ihn", "Siehe auch", "Film", "on", "Referenzen", "Sie sagten O.", "Verwandte", "externer Link", "Aussagen über", "über", "Zitate oben", "Quellen", "Botschafter", "Es wird ihm gesagt", "Literatur", "über sich selbst", "externe Links", "Verwandte Anwendungen", "Zitate in Bezug auf", "Siehe", "über", "Überzählige Links", "Zitate über", "Filmografie", "Fußnote", " Quellen", "Ressourcen", "andere Projekte", "externe Links", "Links", "Notizen", "Hinweis", "Weblinks", "Bibliographie", "Verwandte Artikel", "Werke", "Referenzen", "Literatur", "sehen", "siehe auch", "Fußnote", "andere Projekte"]
forbidden_by_language["el"] = ["εξωτερικοί σύνδεσμοι", "Επίσης κοιτάξτε", "Βιβλιογραφία", "έργα", "Αξιοσημείωτος", "αναφορά", "Ένα άλλο για αυτό", "Στην επέτειο", "είπαν γι 'αυτό", "Φωτοτυπογραφία", "Λέγοντας", "συνδέσεις", "Είπαν γι 'αυτόν", "Λέγονται", "αποσπάσματα", "Συνδέω με", "παραπομπές", "Τα πιο διάσημα βιβλία", "Εξωτερικά μαύρα", "Εξωτερικές συνδέσεις", "Πηγές:", "για αυτόν", "εξαρτώμενα ερωτήματα", "Αναφορά", "Μεταγλώ", "φωτοτυπογραφία", "για εκείνον", "O", "εξωτερικοί σύνδεσμοι", "παίζει", "υποσημείωση", "Είχε ειπωθεί γι 'αυτόν", "Εργα", "Παίζει", "επάνω σε", "Άλλα έργα", "Σχετικά με", "σχετικά με αυτήν", "Πόροι", "Εξωτερικός σύνδεσμος", "βιβλιογραφικές αναφορές", "Πηγές", "Είχε ειπωθεί γι 'αυτήν", "υποσημειώσεις", "Εξωτερικές αναφορές", "Σχετικά Αντικείμενα", "Πηγή", "Σημειώσεις:", "Συνδέσεις", "Για εκείνη", "Απελευθερώνει", "Μαρτυρίες", "Σημειώνει", "λένε", "Πόροι στο Διαδίκτυο", "Δείτε επίσης", "daveoù", "Σύνδεσμος προς το εξωτερικό", "Για αυτόν", "δείτε επίσης", "ταινία", "επί", "βιβλιογραφικές αναφορές", "Είπαν Ο.", "Σχετίζεται με", "εξωτερικός σύνδεσμος", "Δηλώσεις σχετικά με", "σχετικά με", "Παραπάνω αναφορές", "πηγές", "Πρεσβευτής", "Του λέγεται", "λογοτεχνία", "Σχετικά με τον εαυτό της", "εξωτερικοί σύνδεσμοι", "Σχετικές εφαρμογές", "Αποσπάσματα σε σχέση με", "Βλέπω", "πάνω από", "Υπερβολικοί σύνδεσμοι", "αποσπάσματα περίπου", "φωτοτυπογραφία", "υποσημείωση", "πηγές", "πόροι", "Άλλα έργα", "εξωτερικοί σύνδεσμοι", "συνδέσεις", "σημειώνει", "Σημείωση", "διαδικτυακοί σύνδεσμοι", "βιβλιογραφία", "Σχετικά Αντικείμενα", "έργα", "βιβλιογραφικές αναφορές", "λογοτεχνία", "βλέπω", "δείτε επίσης", "υποσημείωση", "Άλλα έργα"]
forbidden_by_language["et"] = ["välislingid", "Vaata ka", "Bibliograafia", "töötama", "Märkimisväärne", "viide", "Teine sellest", "aastapäeval", "Nad ütlesid sellest", "Filmograafia", "Öeldes", "lingid", "Nad ütlesid temast", "Öeldakse", "tsitaat", "Link", "viited", "Kõige kuulsamad raamatud", "Outer Tads", "Välised ühendused", "Allikad:", "temast", "sõltus päringutest", "Viide", "Dubleerimine", "filmograafia", "tema jaoks", "O", "Välised lingid", "mängima", "joonealune märkus", "Ta ütles temast", "Töötama", "Mängima", "peale", "Muud projektid", "Umbes", "temast", "Vahendid", "Väline link", "viited", "Allikad", "Tema kohta öeldi", "joonealused märkused", "Välised viited", "Seotud üksused", "Allikas", "Märkused:", "Lingid", "Temale", "Väljaanded", "Iseloomustused", "Märgib", "ütlema", "Ressursid Internetis", "Vaata ka", "daveoù", "Link väljastpoolt", "Temast", "Vaata ka", "film", "peal", "Viited", "Nad ütlesid O.", "Seotud", "Väline link", "Avaldused", "umbes", "Valitud tsitaadid", "allikad", "Suursaadik", "See on talle öeldud", "kirjandus", "ennast", "Välised lingid", "Seotud rakendused", "Hinnapakkumisi", "Nägema", "üle", "Liigne lingid", "hinnapakkumisi", "filmograafia", "joonealune märkus", "allikad", "vahendid", "Muud projektid", "Välised lingid", "lingid", "märgib", "Märge", "weblinks", "bibliograafia", "Seotud üksused", "töötama", "viited", "kirjandus", "nägema", "Vaata ka", "joonealune märkus", "Muud projektid"]
forbidden_by_language["en"] = ["quotes about", "filmography", "footnote", "sources", "resources", "other projects", "external links", "links", "notes", "note", "weblinks", "bibliography", "related items", "works", "references", "literature", "see", "see also", "footnote", "other projects", "Also look at", "Bibliography", "works", "Notable", "reference", "Another about it", "in the anniversary", "they said about it", "Filmography", "Saying to", "links", "They said about him", "Are said about", "Link to", "referrals", "The most famous books", "Outer tads", "External connections", "Sources:", "about him", "depended queries", "Reference", "Dubbing", "filmography", "for him", "O", "External links", "plays", "footnote", "it was said about him", "Works", "Plays", "upon", "Other projects", "About", "about her", "Resources", "External link", "references", "Sources", "It was said about her", "footnotes", "External references", "Related items", "Source", "Notes:", "Links", "For her", "Releases", "Testimonials", "Notes", "say", "resources in Internet", "See also", "daveoù", "Link to the outside", "About him", "see also", "film", "on", "References", "They said O.", "Related", "external link", "Statements about", "about", "Citations above", "sources", "Ambassador", "It is said to him", "literature", "about herself", "external links", "Related Applications", "Quotes with respect to", "See", "over", "Excess links", "quotes about", "filmography", "footnote", "sources", "resources", "other projects", "external links", "links", "notes", "note", "weblinks", "bibliography", "related items", "works", "references", "literature", "see", "see also", "footnote", "other projects"]
forbidden_by_language["eu"] = ["Kanpo loturak", "Erreferentziak", "Begira ere", "Bibliografia", "zeregin", "Nabarmen", "kontsulta", "Horri buruz", "Urteurrenean", "Esan zuten", "Filmografia", "Esanda", "estekak", "Berari buruz esan zuten", "Esaten da", "aipamen", "Esteka", "ikuskapen", "Liburu ospetsuenak", "Kanpoko Tads", "Kanpoko konexioak", "Iturriak:", "Berari buruz", "Dependatutako kontsultak", "Kontsulta", "Bosbing", "Filmografia", "harentzat", "O", "Kanpoko estekak", "Plays", "oharra", "Berari buruz esan zen", "Zeregin", "Plays", "-en gainean", "Beste proiektu batzuk", "Ei buruz", "haren inguruan", "Baliabide", "Kanpoko esteka", "erreferentziak", "Iturriak", "Berari buruz esan zen", "Oharrak", "Kanpoko erreferentziak", "Lotutako elementuak", "Iturri", "Oharrak:", "Estekak", "Berarentzat", "Oheratu", "Testigantzak", "Ohar", "esan", "Baliabideak Interneten", "Ikusi ere", "Daveoù", "Kanpotik estekatu", "Berari buruz", "ikusi ere", "mintz", "-en gainean", "Erreferentziak", "Esan zuten O.", "Lotinduta", "Kanpoko esteka", "Adierazpenak", "ei buruz", "Goiko aipuak", "iturriak", "Enbaxadore", "Esan dio", "literatura", "bere buruari buruz", "Kanpoko estekak", "Lotutako aplikazioak", "Aipamenak", "Ikusi", "-en gainetik", "Gehiegizko estekak", "aipamenak buruz", "Filmografia", "oharra", "iturriak", "baliabide", "Beste proiektu batzuk", "Kanpoko estekak", "estekak", "ohar", "ohar", "Weblinkak", "Bibliografia", "Lotutako elementuak", "zeregin", "erreferentziak", "literatura", "ikusi", "ikusi ere", "oharra", "Beste proiektu batzuk"]
forbidden_by_language["fa"] = ["دربارهٔ او", "پیوند به بیرون", "جستارهای وابسته", "منبع\u200cدار", "منابع", "پیوند به\u200cبیرون", "همچنین نگاه کن", "کتابشناسی - فهرست کتب", "آثار", "قابل توجه", "مرجع", "یکی دیگر در مورد آن", "در سالگرد", "آنها درباره آن گفتند", "فیلمنامه نویسی", "گفتن به", "پیوندها", "آنها درباره او گفتند", "در مورد آنها گفته شده است", "نقل قول", "پیوند به", "ارجاع", "مشهورترین کتاب ها", "بیرونی", "اتصالات خارجی", "منابع:", "درباره ی او", "پرسش های وابسته", "ارجاع", "دوبله", "فیلمنامه نویسی", "برای او", "o", "لینک های خارجی", "نمایشنامه", "پاورقی", "در مورد او گفته شد", "آثار", "نمایشنامه", "بر", "پروژه های دیگر", "در باره", "در مورد او", "منابع", "لینک خارجی", "منابع", "منابع", "در مورد او گفته شد", "پانویسها و منابع", "منابع خارجی", "آیتم های مرتبط", "منبع", "یادداشت:", "پیوندها", "برای او", "منتشر شده", "توصیفات", "یادداشت", "گفتن", "منابع در اینترنت", "همچنین ببینید", "daveoù", "پیوند به خارج", "درباره ی او", "همچنین ببینید", "فیلم", "بر", "منابع", "آنها گفتند O.", "مربوط", "لینک خارجی", "اظهارات در مورد", "در باره", "نقل قول بالا", "منابع", "سفیر", "به او گفته شده است", "ادبیات", "درباره خودش", "لینک های خارجی", "برنامه های مرتبط", "نقل قول با توجه به", "دیدن", "بر فراز", "لینک های اضافی", "نقل قول در مورد", "فیلمنامه نویسی", "پاورقی", "منابع", "منابع", "پروژه های دیگر", "لینک های خارجی", "پیوندها", "یادداشت", "توجه داشته باشید", "weblinks", "کتابشناسی - فهرست کتب", "آیتم های مرتبط", "آثار", "منابع", "ادبیات", "دیدن", "همچنین ببینید", "پاورقی", "پروژه های دیگر"]
forbidden_by_language["es"] = ["citas sobre", "filmografía", "nota al pie", "fuentes", "recursos", "otros proyectos", "enlaces externos", "enlaces", "notas", "nota", "enlaces web", "bibliografía"," artículos relacionados"," obras"," referencias"," literatura"," ver"," ver también"," nota al pie"," otros proyectos"," Mirar también"," Bibliografía"," obras", "Notable", "referencia", "Otro sobre eso", "en el aniversario", "Ellos dijeron al respecto", "Filmografía", "Diciendo a", "Enlaces", "Ellos dijeron sobre él", "Son dijo sobre"," citas"," Enlace a"," referencias"," Los libros más famosos"," Tads externos"," Conexiones externas"," Fuentes:"," sobre él"," consultas dependientes"," Referencia"," Doblaje"," filmografía"," para él"," O"," Enlaces externos"," obras de teatro"," nota al pie"," se dijo sobre él"," Obras"," Obras de teatro"," sobre"," Otros proyectos"," Acerca de"," Acerca de ella"," Recursos"," Enlace externo"," Referencias"," Fuentes"," Se dijo sobre ella"," Notas al pie"," Referencias externas", "Artículos relacionados", "Fuente", "Notas:", "Enlaces", "Para ella", "Lanzamientos", "Testimonios", "No es"," decir"," recursos en Internet"," Ver también"," daveoù"," Enlace con el exterior"," Acerca de él"," ver también"," película"," sobre"," Referencias", "Dijeron O.", "Relacionado", "Enlace externo", "Declaraciones sobre", "Sobre", "Citas arriba", "Fuentes", "Embajador", "Se le dice a él", "Literatura", "sobre ella", "enlaces externos", "Aplicaciones relacionadas", "Citas con respecto a", "Ver", "sobre", "Enlaces en exceso", "Citas sobre", "filmografía", "nota al pie", " fuentes"," recursos"," otros proyectos"," enlaces externos"," enlaces"," notas"," nota"," enlaces web"," bibliografía"," artículos relacionados"," obras"," referencias", "literatura", "ver", "ver también", "nota al pie", "otros proyectos"]
forbidden_by_language["fi"] = ["lainaukset aiheesta","Aiheesta muualla" , "filmografia", "alaviite", "lähteet", "resurssit", "muut projektit", "ulkoiset linkit", "linkit", "muistiinpanot", "huomautus", "weblinks", "bibliografia", "liittyvät kohteet", "teokset", "viitteet", "kirjallisuus", "katso", "katso myös", "alaviite", "muut projektit", "katso myös", "Bibliografia", "teokset", "Huomattava", "viite", "Toinen siitä", "juhlapäivänä", "he sanoivat siitä", "Filmografia", "Sanominen", "linkit", "He sanoivat hänestä", "Ovatko sanoi aiheesta", "lainaukset", "Linkki", "viittaukset", "kuuluisimmat kirjat", "Ulkoiset", "Ulkoiset yhteydet", "Lähteet:", "Hänestä", "riippuvaiset kyselyt", " Viite", "Kopiointi", "filmografia", "hänelle", "O", "ulkoiset linkit", "näytelmät", "alaviite", "hänestä sanottiin", "teokset", "näytelmät", " upon", "Muut projektit", "Tietoja", "Hänestä", "Resurssit", "Ulkoinen linkki", "viitteet", "Lähteet", "Hänestä sanottiin", "alaviitteet", "Ulkoiset viitteet", "Aiheeseen liittyvät kohteet", "Lähde", "Huomautukset:", "Linkit", "Hänelle", "Julkaisut", "Lausunnot", "Ei es", "sano", "resurssit Internetissä", "Katso myös", "daveoù", "Linkki ulkopuolelta", "Tietoa hänestä", "katso myös", "elokuva", "päällä", "viitteet", "He sanoivat O.", "Aiheeseen liittyvä", "ulkoinen linkki", "Lausunnot aiheesta", "Tietoja", "Yllä olevat lainaukset", "lähteet", "suurlähettiläs", "Hänelle sanotaan", "kirjallisuus", "itsestään", "ulkoiset linkit", "Aiheeseen liittyvät sovellukset", "Lainaukset suhteessa", "Katso", "yli", "Ylimääräiset linkit", "lainauksia", "filmografia", "alaviite", " lähteet", "resurssit", "muut projektit", "ulkoiset linkit", "linkit", "muistiinpanot", "huomautus", "verkkolinkit", "bibliografia", "liittyvät kohteet", "teokset", "viitteet", "kirjallisuus", "katso", "katso myös", "alaviite", "muut hankkeet"]
forbidden_by_language["fr"] = ["citations sur", "filmographie", "note de bas de page", "sources", "ressources", "autres projets", "liens externes", "liens", "notes", "note", "liens web", "bibliogprahie", "éléments liés", "œuvres", "références", "littérature", "voir", "voir aussi", "note de bas de page", "autres projets", "Regarder aussi", "Bibliographie", "œuvres", "Remarquable", "référence", "Un autre à ce sujet", "à l'anniversaire", "ils en ont dit", "Filmographie", "En disant à", "liens", "Ils ont dit à propos de lui", "Sont dit à propos de", "citations", "Lien vers", "références", "Les livres les plus célèbres", "Tads externes", "Connexions externes", "Sources :", "à propos de lui", "requêtes dépendantes", " Référence", "Doublage", "filmographie", "pour lui", "O", "Liens externes", "pièces", "note de bas de page", "on a dit de lui", "Travaux", "Joues", " sur", "Autres projets", "À propos", "à propos d'elle", "Ressources", "Lien externe", "Références", "Sources", "On a dit d'elle", "Notes de bas de page", "Références externes", "Articles associés", "Source", "Notes :", "Liens", "Pour elle", "Releases", "Témoignages", "Non es", "dire", "ressources sur Internet", "Voir aussi", "daveoù", "Lien vers l'extérieur", "A propos de lui", "voir aussi", "film", "sur", "Références", "Ils ont dit O.", "Connexe", "lien externe", "Déclarations sur", "à propos", "Citations ci-dessus", "sources", "Ambassadeur", "On lui dit", "littérature", "à propos d'elle-même", "liens externes", "Applications associées", "Citations concernant", "Voir", "over", "Liens excédentaires", "Citations sur", "filmographie", "note de bas de page", " sources", "ressources", "autres projets", "liens externes", "liens", "notes", "note", "liens web", "bibliographie", "éléments associés", "ouvrages", "références", "littérature", "voir", "voir aussi", "note de bas de page", "autres projets"]
forbidden_by_language["he"] = ["ציטוטים על", "פילמוגרפיה", "הערת שוליים", "מקורות", "משאבים", "פרויקטים אחרים", "קישורים חיצוניים", "קישורים", "הערות", "הערה", "קישורי אינטרנט", "ביבליוגפרה'", "פריטים קשורים", "עבודות", "הפניות", "ספרות", "ראה", "ראה גם", "הערת שוליים", "פרויקטים אחרים", "הסתכל גם על", "ביבליוגרפיה", "עבודות", "ראוי לציון", "התייחסות", "עוד על זה", "ביום השנה", "אמרו על זה", "פילמוגרפיה", "אומרים ל", "קישורים", "אמרו עליו", "האם אמר על", "ציטוטים", "קישור אל", "הפניות", "'הספרים המפורסמים ביותר", "תקשורים חיצוניים", "חיבורים חיצוניים", "מקורות:", "עליו", "שאילתות תלויות", " הפניה", "דיבוב", "פילמוגרפיה", "בשבילו", "O", "קישורים חיצוניים", "הצגות", "הערת שוליים", "אמרו עליו", "עבודות", "מחזות", " על", "פרויקטים אחרים", "אודות", "עליה", "משאבים", "קישור חיצוני", "הפניות", "מקורות", "נאמר עליה", "הערות שוליים", "הפניות חיצוניות", "פריטים קשורים", "מקור", "הערות:", "קישורים", "בשבילה", "פרסומים", "המלצות", "לא es", "אמר", "משאבים באינטרנט", "ראה גם", "daveoù", "קישור אל החוץ", "אודותיו", "ראה גם", "סרט", "על", "הפניות", "הם אמרו O", "קשורים", "קישור חיצוני", "הצהרות על", "על", "ציטוטים למעלה", "מקורות", "שגריר", "נאמר לו", "ספרות", "על עצמה", "קישורים חיצוניים", "יישומים קשורים", "ציטוטים ביחס ל", "ראה", "מעל", "קישורים עודפים", "ציטוטים על", "פילמוגרפיה", "הערת שוליים", " מקורות", "משאבים", "פרויקטים אחרים", "קישורים חיצוניים", "קישורים", "הערות", "הערה", "קישורי אינטרנט", "ביבליוגרפיה", "פריטים קשורים", "עבודות", "הפניות", "ספרות", "ראה", "ראה גם", "הערת שוליים", "פרויקטים אחרים"]
forbidden_by_language["hi"] = ["के बारे में उद्धरण", "फिल्मोग्राफी", "फुटनोट", "स्रोत", "संसाधन", "अन्य परियोजनाएं", "बाहरी लिंक", "लिंक", "नोट्स", "नोट", "वेबलिंक", "ग्रंथ सूची", "संबंधित आइटम", "कार्य", "संदर्भ", "साहित्य", "देखें", "यह भी देखें", "फुटनोट", "अन्य परियोजनाएं", "भी देखें", "ग्रंथ सूची", "काम करता है", "उल्लेखनीय", "संदर्भ", "इसके बारे में एक और", "वर्षगांठ में", "उन्होंने इसके बारे में कहा", "फिल्मोग्राफी", "सेइंग टू", "लिंक्स", "उन्होंने उसके बारे में कहा", "हैं के बारे में कहा", "उद्धरण", "लिंक टू", "रेफ़रल", "सबसे प्रसिद्ध किताबें", "बाहरी बच्चे", "बाहरी कनेक्शन", "स्रोत:", "उसके बारे में", "आश्रित प्रश्न", " संदर्भ", "डबिंग", "फिल्मोग्राफी", "उसके लिए", "ओ", "बाहरी लिंक", "नाटक", "फुटनोट", "उसके बारे में कहा गया", "काम करता है", "नाटक", "अन्य प्रोजेक्ट", "अबाउट", "उसके बारे में", "संसाधन", "बाहरी लिंक", "संदर्भ", "स्रोत", "उसके बारे में कहा गया", "फुटनोट", "बाहरी संदर्भ", "संबंधित आइटम", "स्रोत", "नोट्स:", "लिंक", "उसके लिए", "रिलीज़", "प्रशंसापत्र", "नहीं es", "कहते हैं", "इंटरनेट में संसाधन", "यह भी देखें", "डेवो", "बाहर से लिंक करें", "उसके बारे में", "यह भी देखें", "फिल्म", "पर", "संदर्भ", "उन्होंने कहा ओ", "संबंधित", "बाहरी लिंक", "बयानों के बारे में", "के बारे में", "उपरोक्त उद्धरण", "स्रोत", "राजदूत", "यह उसे कहा जाता है", "साहित्य", "अपने बारे में", "बाहरी लिंक", "संबंधित अनुप्रयोग", "के संबंध में उद्धरण", "देखें", "ओवर", "अतिरिक्त लिंक", "उद्धरण के बारे में", "फिल्मोग्राफी", "फुटनोट", " स्रोत", "संसाधन", "अन्य परियोजनाएं", "बाहरी लिंक", "लिंक", "नोट्स", "नोट", "वेबलिंक", "ग्रंथ सूची", "संबंधित आइटम", "कार्य", "संदर्भ", "साहित्य", "देखें", "यह भी देखें", "फुटनोट", "अन्य परियोजनाएं"]
forbidden_by_language["hr"] = ["navodnici o", "filmografija", "fusnota", "izvori", "izvori", "drugi projekti", "vanjske veze", "veze", "bilješke", "napomena", "weblinks", "bibliografija", "srodne stavke", "radovi", "reference", "literatura", "vidi", "vidi također", "fusnota", "drugi projekti", "također pogledajte", "Bibliografija", "radovi", "Zapaženo", "referenca", "Još jedan o tome", "u obljetnici", "rekli su o tome", "Filmografija", "Kaže se", "linkovi", "Rekli su o njemu", "Jesu li rekao o", "citati", "Veza na", "preporuke", "Najpoznatije knjige", "Vanjski tad", "Vanjske veze", "Izvori:", "o njemu", "ovisni upiti", " Referenca", "Sinhronizacija", "filmografija", "za njega", "O", "Vanjske veze", "predstave", "fusnota", "rečeno je o njemu", "Djela", "Predstave", " na", "Drugi projekti", "O njoj", "O njoj", "Resursi", "Vanjski link", "reference", "Izvori", "Rečeno je o njoj", "fusnote", "Vanjske reference", "Povezane stavke", "Izvor", "Napomene:", "Veze", "Za nju", "Izdanja", "Izjave", "Ne es", "recimo", "resursi na Internetu", "Vidi također", "daveoù", "Veza prema van", "O njemu", "vidi također", "film", "on", "Reference", "Rekli su O.", "Povezano", "vanjska veza", "Izjave o", "o", "Navodi gore", "izvori", "Ambasador", "Rečeno mu je", "književnost", "o sebi", "vanjske veze", "Povezane aplikacije", "Citati s obzirom na", "Vidi", "preko", "Višak veza", "citati o", "filmografija", "fusnota", " izvori", "resursi", "ostali projekti", "vanjske veze", "veze", "bilješke", "bilješka", "web-veze", "bibliografija", "srodne stavke", "radovi", "reference", "književnost", "vidi", "vidi također", "fusnota", "drugi projekti"]
forbidden_by_language["is"] = ["tilvitnanir um", "kvikmyndafræði", "neðanmálsgrein", "heimildir", "auðlindir", "önnur verkefni", "ytri tenglar", "tenglar", "aths", "ath", "weblinks", "heimildaskrá", "tengd atriði", "verk", "tilvísanir", "bókmenntir", "sjá", "sjá einnig", "neðanmálsgrein", "önnur verkefni", "Skoðaðu líka", "Heimildaskrá", "verk", "Athyglisvert", "tilvísun", "Annað um það", "í afmælinu", "þeir sögðu um það", "Kvikmyndataka", "Seggja við", "tenglar", "Þeir sögðu um hann", "Eru sagði um", "tilvitnanir", "Tengill á", "tilvísanir", "Frægustu bækurnar", "Ytri tads", "Ytri tengingar", "Heimildir:", "um hann", "háðar fyrirspurnir", " Tilvísun", "talsetning", "kvikmyndataka", "fyrir hann", "O", "Ytri hlekkir", "leikrit", "neðanmálsgrein", "það var sagt um hann", "verk", "leikrit", " á", "Önnur verkefni", "Um", "um hana", "Auðlindir", "Ytri tengill", "tilvísanir", "Heimildir", "Það var sagt um hana", "neðanmálsgrein", "Ytri tilvísanir", "Tengd atriði", "Heimild", "Athugasemdir:", "Tenglar", "Fyrir hana", "Útgáfur", "Vitnisburður", "Ekki es", "segja", "tilföng á internetinu", "Sjá líka", "daveoù", "Tengill að utan", "Um hann", "sjá líka", "kvikmynd", "on", "Tilvísanir", "Þeir sögðu O.", "Tengd", "ytri tengill", "Yfirlýsingar um", "um", "Tilvitnanir að ofan", "heimildir", "sendiherra", "það er sagt við hann", "bókmenntir", "um sjálfa sig", "ytri tenglar", "Tengd forrit", "Tilvitnanir með tilliti til", "Sjá", "yfir", "Umframtenglar", "tilvitnanir um", "kvikmyndafræði", "neðanmáls", " heimildir", "tilföng", "önnur verkefni", "ytri hlekkir", "tenglar", "athugasemdir", "aths", "veftenglar", "heimildaskrá", "tengd atriði", "verk", "tilvísanir", "bókmenntir", "sjá", "sjá einnig", "neðanmálsgrein", "önnur verkefni"]
forbidden_by_language["it"] = ["citazioni su", "filmografia", "nota", "fonti", "risorse", "altri progetti", "link esterni", "link", "note", "nota", "link web", "bibliografia", "articoli correlati", "opere", "riferimenti", "letteratura", "vedi", "vedi anche", "nota a piè di pagina", "altri progetti", "guarda anche", "bibliografia", "lavori", "Notevole", "riferimento", "Un altro a riguardo", "nell'anniversario", "hanno detto a riguardo", "Filmografia", "Detto a", "link", "Hanno detto di lui", "Sono ha detto su", "citazioni", "Link a", "riferimenti", "I libri più famosi", "Schede esterne", "Connessioni esterne", "Fonti:", "su di lui", "domande dipendenti", " Riferimento", "Doppiaggio", "filmografia", "per lui", "O", "Link esterni", "ascolta", "nota a piè di pagina", "si diceva di lui", "Lavori", "Riproduzioni", " su", "Altri progetti", "Su", "su di lei", "Risorse", "Link esterno", "riferimenti", "Fonti", "Si diceva di lei", "note a piè di pagina", "Riferimenti esterni", "Articoli correlati", "Fonte", "Note:", "Link", "Per lei", "Pubblicazioni", "Testimonianze", "Non es", "say", "risorse in Internet", "Vedi anche", "daveoù", "Link all'esterno", "Su di lui", "vedi anche", "film", "on", "Riferimenti", "Hanno detto O.", "Correlato", "link esterno", "Dichiarazioni su", "su", "Citazioni sopra", "fonti", "Ambasciatore", "Si dice a lui", "letteratura", "su di sé", "link esterni", "applicazioni correlate", "citazioni rispetto a", "vedere", "sopra", "collegamenti in eccesso", "citazioni su", "filmografia", "nota a piè di pagina", " fonti", "risorse", "altri progetti", "link esterni", "link", "note", "nota", "link web", "bibliografia", "articoli correlati", "lavori", "riferimenti", "letteratura", "vedi", "vedi anche", "nota a piè di pagina", "altri progetti"]
forbidden_by_language["ja"] = ["引用'、 '脚注'、 'ソース'、 'リソース'、 'その他のプロジェクト'、 '外部リンク'、 'リンク'、 'ノート'、 '注'、 'ウェブリンク'、 '参考文献' ' '、'作品 '、'参考文献 '、'文学 '、'参照 '、'関連項目 '、'脚注 '、'その他のプロジェクト '、'関連項目 '、'参考文献 '、'作品 ' 、 '注目すべき'、 '参照'、 'それについての別の'、 '記念日'、 '彼らはそれについて言った'、 '映画誌'、 '言っている'、 'リンク'、 '彼らは彼について言った'、 ' '、'引用符 '、'リンク先 '、'参照 '、'最も有名な本 '、'外部接続 '、'外部接続 '、'出典: '、'彼について '、'依存クエリ '、'参照 '、'ダビング '、'フィルムグラフィー '、'彼のために '、' O '、'外部リンク '、'演劇 '、'脚注 '、'彼について言われた '、'作品 '、'演劇 '、' '、'その他のプロジェクト '、'について '、'彼女について '、'リソース '、'外部リンク '、'参照 '、'ソース '、'彼女について言われた '、'脚注 '、'外部参照 ' 、 '関連項目'、 'ソース'、 '注:'、 'リンク'、 '彼女のために'、 'リリース'、 '証言","脚注'、 '言う'、 'インターネットのリソース'、 '関連項目' 、、 '外部へのリンク'、 '彼について'、 '関連項目'、 '映画'、 '参考文献'、 '参照はO '、'リソース'、 '映画リンク'、 'リンク'、 '注'、 '注'、 'レリンク'、 '参照参照'、 '化参照'、 '作品 '、'参照 '、'文献 '、'参照 '、'関連項目 '、'脚注 '、'その他のプロジェクト "]
forbidden_by_language["ka"] = ["ციტატები", "ფილმოგრაფია", "სქოლიო", "წყაროები", "რესურსები", "სხვა პროექტები", "გარე ბმულები", "ბმულები", "შენიშვნები", "შენიშვნა", "ვებლინკები", "ბიბლიოგრაფია'", "დაკავშირებული ერთეულები", "ნამუშევრები", "ცნობები", "ლიტერატურა", "იხილეთ", "ასევე იხილეთ", "სქოლიო", "სხვა პროექტები", "ასევე შეხედე", "ბიბლიოგრაფია", "ნამუშევრები' , „აღსანიშნავი“, „მინიშნება“, „კიდევ ერთი ამის შესახებ“, „იუბილეზე“, „ამის შესახებ თქვეს“, „ფილმოგრაფია“, „ამბობენ“, „ბმულები“, „მასზე თქვეს“, „არის. ნათქვამია შესახებ", "ციტატები", "ბმული", "რეფერატები", "ყველაზე ცნობილი წიგნები", "გარე ბავშვები", "გარე კავშირები", "წყაროები:", "მის შესახებ", "დამოკიდებული შეკითხვები", " მითითება“, „დუბლირება“, „ფილმოგრაფია“, „მისთვის“, „ო“, „გარე ბმულები“, „სპექტაკლები“, „სქოლიო“, „მასზე ითქვა“, „ნამუშევრები“, „სპექტაკლები“, „ საფუძველზე", "სხვა პროექტები", "შესახებ", "მის შესახებ", "რესურსები", "გარე ბმული", "ცნობები", "წყაროები", "ითქვა მის შესახებ", "სქოლიოები", "გარე ცნობები", "დაკავშირებული ნივთები", "წყარო", "შენიშვნები:", "ბმულები", "მისთვის", "გამოშვებები", "ჩვენებები", "N ოტესები", "ვთქვათ", "რესურსები ინტერნეტში", "ასევე იხილეთ", "დავეოუ", "გარედან ბმული", "მის შესახებ", "ასევე იხილეთ", "ფილმი", "ჩართული", "ცნობები", "მათ თქვეს ო.", "დაკავშირებული", "გარე ბმული", "განცხადებები", "შესახებ", "ციტატები ზემოთ", "წყაროები", "ელჩი", "მას უთხრეს", "ლიტერატურა", "თავის შესახებ", "გარე ბმულები", "დაკავშირებული აპლიკაციები", "ციტატები დაკავშირებით", "იხილეთ", "ზედა", "ჭარბი ბმულები", "ციტატები", "ფილმოგრაფია", "სქოლიო", " წყაროები", "რესურსები", "სხვა პროექტები", "გარე ბმულები", "ბმულები", "შენიშვნები", "შენიშვნა", "ვებლინკები", "ბიბლიოგრაფია", "დაკავშირებული ერთეულები", "ნამუშევრები", "ცნობები", "ლიტერატურა", "იხილეთ", "ასევე იხილეთ", "სქოლიო", "სხვა პროექტები"]
forbidden_by_language["pl"] = ['zobacz też', 'o ', 'Zasoby', 'Wydanie', 'o nim', 'Link do zewnątrz', 'Cytaty w odniesieniu do', 'Bibliografia', 'Najbardziej znane książki', 'powiedzieli o tym', 'Powiedziane są o', 'Powiązane przedmioty', 'na', 'spinki do mankietów', 'Powiązane zastosowania', 'referencja', 'Powiedzieli o nim', 'Również patrzeć', 'Pracuje', 'literatura', 'Link zewnętrzny', 'Referencje.', 'Bibliografia', 'zależało zapytania', 'Daveoù.', 'Powiedział o niej', 'Spinki do mankietów', 'Pracuje', 'Uwagi:', 'Dubbing.', 'przypisy', 'Widzieć', 'Mówiono o nim', 'o niej', 'Ambasador', 'cytaty', 'bawić się', 'film', 'O.', 'Filmografia', 'O nim', 'Związane z', 'Zewnętrzne odniesienia', 'Cytaty powyżej', 'link zewnętrzny', 'Bibliografia', 'Inne projekty', 'Filmografia', 'Outer Tads.', 'Źródło', 'Zewnętrzne linki', 'Zasoby w Internecie.', 'notatka', 'Zobacz też', 'Referencja', 'Powiedzieli O.', 'Notatki', 'Dla niej', 'Znaczny', 'nad', 'Mówi się mu', 'Nadmiarowe linki', 'o', 'O sobie', 'Bawić się', 'ŹRÓDŁA', 'mowić', 'Inny o tym', 'Mówiąc do', 'Połączenia zewnętrzne', 'Zobacz też', 'od', 'O', 'w rocznicy.', 'Łączyć z', 'skierowania', 'dla niego', 'Źródła:', 'Oświadczenia o', 'ŹRÓDŁA', 'Zewnętrzne linki', 'cytaty', 'Filmografia', 'notatka', 'ŹRÓDŁA', 'Surowce', 'inne projekty', 'Zewnętrzne linki', 'spinki do mankietów', 'notatki', 'Notatka', 'linki internetowe', 'bibliografia', 'powiązane przedmioty', 'Pracuje', 'Bibliografia', 'literatura', 'zobaczyć', 'Zobacz też', 'notatka', 'inne projekty']
forbidden_by_language["pt"] = ["Ligações externas","citações sobre ele", "citações sobre ela", "filmografia", "nota de rodapé", "fontes", "recursos", "outros projetos", "links externos", "links", "notas", "nota", "links da web", "bibliografia", "itens relacionados", "obras", "referências", "literatura", "ver", "ver também", "nota de rodapé", "outros projetos" , "Veja também", "Bibliografia", "obras", "Notável", "Referência", "Outra sobre isso", "no aniversário", "foi dito sobre ela", "Filmografia", "Dizendo a "," links "," Disseram sobre ele "," Dizem sobre "," Link para "," referências "," Os livros mais famosos "," Meninos de fora "," Conexões externas "," Fontes: ", "sobre ele", "consultas dependentes", "Referência", "Dublagem", "filmografia", "para ele", "O", "Ligações externas", "peças", "nota de rodapé", "foi falado sobre ele "," Funciona "," Joga "," sobre "," Outros projetos "," Sobre "," sobre ela "," Recursos "," Link externo "," Referências "," Fontes "," Foi dito sobre ela "," notas de rodapé "," Referências externas "," Itens relacionados "," Fonte "," Notas: "," Link s "," Releases "," Notes "," resources in Internet "," See also "," daveoù "," Link to the outside "," About him "," see also "," film ", "Referências", "Disseram sobre ele", "Relacionadas", "link externo", "Declarações sobre" , "Citações acima", "fontes", "Embaixador", "Diz-se sobre ele", "literatura "," Disseram sobre ela "," links externos "," Aplicativos relacionados "," Citações a respeito de "," Ver ", " sobre "," Excesso de links "," citações sobre "," filmografia "," nota de rodapé "," fontes "," recursos "," outros projetos "," links externos "," links "," notas "," nota "," links da web "," bibliografia "," itens relacionados "," trabalhos "," referências "," literatura "," ver "," ver também "," nota de rodapé "," outros projetos "]
forbidden_by_language["ro"] = ['legături externe', 'despre', 'NOTE:', 'literatură', 'sa spus despre el', 'despre el', 'Dobbing.', 'Pentru ea', 'Se spune despre', 'Articole conexe', 'Notabil', 'Notele de subsol', 'Aplicații înrudite', 'Filmografie', 'Surse:', 'depinde de interogări', 'Referințe externe', 'Au spus despre el', 'Alte proiecte', 'Vedea', 'Uitați de asemenea la', 'Filmografie', 'Despre', 'pe', 'Legate de', 'O.', 'Ambasador', 'joacă', 'referinţă', 'pentru el', 'TADS OUTER.', 'Bibliografie', 'linkuri externe', 'În aniversare', 'Link-uri', 'Releases.', 'despre ea însăși', 'Link-uri', 'lucrări', 'Referinţă', 'Declarații despre', 'Vezi si', 'Cele mai cunoscute cărți', 'Lucrări', 'Sa spus despre ea', 'Link-uri excesive', 'citate', 'Link-ul la exterior', 'Sursă', 'Altul despre el', 'Spunând', 'film', 'Citate cu privire la', 'Spune', 'Daveoù.', 'Link extern', 'Citări de mai sus', 'Vezi si', 'peste', 'Surse.', 'Îi se spune', 'Au spus O.', 'Referințe', 'despre', 'peste', 'Legătura cu', 'Joacă', 'Referințe', 'despre ea', 'Surse.', 'linkuri externe', 'Au spus despre asta', 'Link extern', 'Mărturii', 'notă de subsol', 'Referințe', 'Note', 'Resurse pe Internet', 'Despre el', 'Resurse', 'Conexiuni externe', 'Citate despre', 'Filmografie', 'notă de subsol', 'Surse.', 'resurse', 'Alte proiecte', 'linkuri externe', 'Link-uri', 'note', 'Notă', 'Link-uri web', 'bibliografie', 'Articole conexe', 'lucrări', 'Referințe', 'literatură', 'vedea', 'Vezi si', 'notă de subsol', 'Alte proiecte']
forbidden_by_language["ru"] = ['Об ', 'Фильмография', 'примечания', 'ссылки ', 'см. также', 'Примечания:', 'литература', 'Было сказано о нем', 'о нем', 'Дублировка', 'Для нее', 'Говорится о', 'Похожие материалы', 'нотенно', 'сноски', 'Похожие приложения', 'Фильмография', 'Источники:', 'Взял запросы', 'Внешние ссылки', 'Они сказали о нем', 'Другие проекты', 'Видеть', 'Также смотрите', 'фильмография', 'О', 'на', 'Связанный', 'О', 'Посол', 'пьесы', 'ссылка', 'для него', 'Внешние тады', 'Библиография', 'внешние ссылки', 'в годовщине', 'Ссылки', 'Релизы', 'о себе', 'ссылки', 'работает', 'Ссылка', 'Утверждение о', 'смотрите также', 'Самые известные книги', 'Работает', 'Было сказано о ней', 'Избыточные ссылки', 'Ссылка на улицу', 'Источник', 'Другой об этом', 'Говорить', 'пленка', 'Цитаты по отношению к', 'сказать', 'Daveoù.', 'Внешняя ссылка', 'Цитаты выше', 'Смотрите также', 'над', 'Источники', 'Это сказано ему', 'Они сказали О.', 'использованная литература', 'о', 'на', 'Ссылка на', 'Пьесы', 'рефералы', 'о ней', 'источники', 'внешние ссылки', 'Они сказали об этом', 'внешняя ссылка', 'Отзывы', 'сноска', 'использованная литература', 'Примечания', 'Ресурсы в интернете', 'О нем', 'Ресурсы', 'Внешние соединения', 'цитаты о', 'фильмография', 'сноска', 'источники', 'Ресурсы', 'другие проекты', 'внешние ссылки', 'ссылки', 'Примечания', 'Примечание', 'веб ссылки', 'Библиография', 'Похожие материалы', 'работает', 'использованная литература', 'литература', 'видеть', 'смотрите также', 'сноска', 'другие проекты']
forbidden_by_language["sk"] = ['Povedali o', 'iné projekty', 'referencie', 'Poznámky:', 'literatúra', 'Hovorilo sa o ňom', 'o ňom', 'Dabovanie', 'Pre ňu', 'Hovoria', 'Súvisiace položky', 'Pozoruhodný', 'poznámky pod čiarou', 'Súvisiace aplikácie', 'Filmograf', 'Zdroje:', 'závislé dotazy', 'Externé referencie', 'Povedali o ňom', 'Ostatné projekty', 'Pozrieť sa', 'Pozrite sa aj na', 'filmograf', 'O', 'zapnutý', 'Súvisiaci', 'O', 'Veľvyslanec', 'hrať', 'referencia', 'pre neho', 'Vonkajšie tads', 'Bibliografia', 'vonkajšie odkazy', 'v výročnom', 'Spojenie', 'Vydania', 'o sebe', 'spojenie', 'Tvorba', 'Referencia', 'Vyhlásenia', 'pozri tiež', 'Najznámejšie knihy', 'Tvorba', 'Povedala sa o ňom', 'Prebytočné odkazy', 'citácie', 'Odkaz na vonkajšiu stranu', 'Zdroj', 'O tom', 'Hovoriť', 'film', 'Citáty s ohľadom na', 'povedať', 'daveoù', 'Externý odkaz', 'Vyššie uvedené citácie', 'Pozri tiež', 'nad', 'Zdroje', 'Hovorí sa mu', 'Povedali o.', 'Referencie', 'o', 'na', 'Odkaz na', 'Hrať', 'referencie', 'o nej', 'zdroje', 'vonkajšie odkazy', 'Povedali o tom', 'externý odkaz', 'Referencie', 'poznámka pod čiarou', 'referencie', 'Poznámky', 'Zdroje na internete', 'O ňom', 'Prostriedky', 'Externé pripojenia', 'cituje', 'filmograf', 'poznámka pod čiarou', 'zdroje', 'prostriedky', 'Ostatné projekty', 'vonkajšie odkazy', 'spojenie', 'poznámky', 'Poznámka', 'weblinks', 'Bibliografia', 'Súvisiace položky', 'Tvorba', 'referencie', 'literatúra', 'pozrieť sa', 'pozri tiež', 'poznámka pod čiarou', 'Ostatné projekty']
forbidden_by_language["sl"] = ['viri', 'sklici', 'Opombe:', 'Literatura.', 'Rečeno je bilo o njem', 'o njem', 'Dubbing.', 'Za njo', 'Rečeno', 'Podobni elementi', 'Opazno', 'Opombe', 'Povezane aplikacije', 'Filmografija', 'Viri:', 'odvisne poizvedbe', 'Zunanje reference', 'Rekli so o njem', 'Drugi projekti', 'Glejte', 'Oglejte si tudi', 'filmografija', 'Približno', 'On.', 'Povezano', 'O.', 'Veleposlanik', 'igra', 'Referenca', 'zanj', 'Zunanji tads.', 'Bibliografija', 'Zunanje povezave', 'V obletnici', 'Povezave', 'Sprosti', 'o sebi', 'Povezave', 'dela', 'Referenca', 'Izjave', 'Poglej tudi', 'Najbolj znane knjige', 'Dela', 'Rečeno je bilo o njej', 'Presežne povezave', 'citate', 'Povezava na zunanjost', 'Vir.', 'Drugo o tem', 'Rekel', 'film', 'Citati v zvezi s tem', 'reči.', 'daveoù.', 'Zunanja povezava', 'Zgoraj', 'Poglej tudi', 'nad', 'Viri', 'Rečeno mu je', 'Rekli so O.', 'Reference', 'približno', 'AN.', 'Povezava do', 'Igra', 'napotitve', 'o njej', 'Viri', 'Zunanje povezave', 'Rekli so o tem', 'Zunanja povezava', 'Pričevanja', 'opomba', 'Reference', 'Opombe', 'Viri na internetu', 'O njem', 'Viri', 'Zunanje povezave', 'navaja', 'filmografija', 'opomba', 'Viri', 'Viri', 'Drugi projekti', 'Zunanje povezave', 'Povezave', 'Opombe', 'Opomba', 'weblinks.', 'Bibliografija', 'Podobni elementi', 'dela', 'Reference', 'Literatura.', 'Glejte', 'Poglej tudi', 'opomba', 'Drugi projekti']
forbidden_by_language["sq"] = ['Thënie për të', 'Referimet', 'Shiko edhe', 'lidhje të jashtme', 'referime', 'Shënime:', 'letërsi', 'U tha për të', 'për të', 'Dublim', 'Për të', 'Janë thënë', 'Artikuj të ngjashëm', 'I dukshëm', 'fusnotat', 'Aplikime të ngjashme', 'Film', 'Burimet:', 'Pyetje të varura', 'Referencat e jashtme', 'Ata thanë për të', 'Projekte të tjera', 'Shiko', 'Gjithashtu shikoni', 'film', 'Rreth', 'në', 'I lidhur', 'O', 'Ambasador', 'luaj', 'referim', 'per atë', 'Tads e jashtme', 'Bibliografi', 'Linqe te jashtme', 'Në përvjetorin', 'Lidhje', 'Liron', 'për veten', 'lidhje', 'vepron', 'Referim', 'Deklaratat rreth', 'Shiko gjithashtu', 'Librat më të famshëm', 'Vepron', 'U tha për të', 'Lidhje të tepërta', 'kuotat', 'Lidhje me pjesën e jashtme', 'Burim', 'Një tjetër për këtë', 'Duke thënë', 'film', 'Kuotat në lidhje me', 'thua', 'daveoù', 'Lidhje e jashtme', 'Citimet e mësipërme', 'Shiko gjithashtu', 'mbi', 'Burime', 'Është thënë atij', 'Ata thanë O.', 'Referencat', 'rreth', 'në', 'Lidh me', 'Luaj', 'referime', 'për të', 'burime', 'Linqe te jashtme', 'ata thanë për këtë', 'lidhje e jashtme', 'Dëshmi', 'shënim shënim', 'referencat', 'Shënim', 'Burimet në Internet', 'Për të', 'Burime', 'Lidhjet e jashtme', 'citon rreth', 'film', 'shënim shënim', 'burime', 'burime', 'Projekte të tjera', 'Linqe te jashtme', 'lidhje', 'shënim', 'shënim', 'weblinks', 'bibliografi', 'Artikuj të ngjashëm', 'vepron', 'referencat', 'letërsi', 'Shiko', 'Shiko gjithashtu', 'shënim shënim', 'Projekte të tjera']
forbidden_by_language["ta"] = ['வெளி இணைப்புகள்', 'சான்றுகள்', 'குறிப்புகள்:', 'இலக்கியம்', 'அது அவரைப் பற்றி கூறப்பட்டது', 'அவரை பற்றி', 'டுபிங்', 'அவளுக்கு', 'பற்றி கூறப்படுகிறது', 'தொடர்புடைய பொருட்கள்', 'குறிப்பிடத்தக்கது', 'அடிக்குறிப்புகள்', 'தொடர்புடைய பயன்பாடுகள்', 'திரைப்படவியல்', 'ஆதாரங்கள்:', 'சார்ந்த கேள்விகள்', 'வெளிப்புற குறிப்புகள்', 'அவர்கள் அவரைப் பற்றி சொன்னார்கள்', 'பிற திட்டங்கள்', 'பார்க்க', 'மேலும் பாருங்கள்', 'திரைப்படவியல்', 'பற்றி', 'மீது', 'தொடர்புடைய', 'ஓ', 'தூதர்', 'நாடகம்', 'குறிப்பு', 'அவருக்கு', 'வெளிப்புற tads.', 'நூலகம்', 'வெளி இணைப்புகள்', 'ஆண்டு விழாவில்', 'இணைப்புகள்', 'வெளியீடுகள்', 'தன்னை பற்றி', 'இணைப்புகள்', 'வேலை', 'குறிப்பு', 'பற்றி அறிக்கைகள்', 'மேலும் காண்க', 'மிகவும் பிரபலமான புத்தகங்கள்', 'வேலை', 'அது அவளைப் பற்றி கூறப்பட்டது', 'அதிக இணைப்புகள்', 'மேற்கோள்கள்', 'வெளியே இணைப்பு', 'மூல', 'அது பற்றி மற்றொரு', 'சொல்லுங்கள்', 'திரைப்படம்', 'மரியாதையுடன் மேற்கோள்கள்', 'சொல்', 'daveoù.', 'வெளிப்புற இணைப்பு', 'மேலே மேற்கோள்கள்', 'மேலும் காண்க', 'மேல்', 'ஆதாரங்கள்', 'அது அவரிடம் கூறப்படுகிறது', 'அவர்கள் ஓ என்று சொன்னார்கள்.', 'குறிப்புகள்', 'பற்றி', 'மீது', 'இணைப்பு', 'நாடகம்', 'பரிந்துரைகளை', 'அவளை பற்றி', 'ஆதாரங்கள்', 'வெளி இணைப்புகள்', 'அவர்கள் அதைப் பற்றி சொன்னார்கள்', 'வெளிப்புற இணைப்பு', 'சான்றுகள்', 'அடிகுறிப்பு', 'குறிப்புகள்', 'குறிப்புகள்', 'இணையத்தில் வளங்கள்', 'அவரை பற்றி', 'வளங்கள்', 'வெளிப்புற இணைப்புகள்', 'மேற்கோள்கள் பற்றி', 'திரைப்படவியல்', 'அடிகுறிப்பு', 'ஆதாரங்கள்', 'வளங்கள்', 'பிற திட்டங்கள்', 'வெளி இணைப்புகள்', 'இணைப்புகள்', 'குறிப்புகள்', 'குறிப்பு', 'weblinks.', 'நூலகம்', 'தொடர்புடைய பொருட்கள்', 'வேலை', 'குறிப்புகள்', 'இலக்கியம்', 'பார்க்க', 'மேலும் காண்க', 'அடிகுறிப்பு', 'பிற திட்டங்கள்']
forbidden_by_language["te"] = ['మూలాలు', 'గమనికలు:', 'సాహిత్యం', 'ఇది అతని గురించి చెప్పబడింది', 'అతని గురించి', 'డబ్బింగ్', 'ఆమె కోసం', 'గురించి చెప్పారు', 'సంబంధిత అంశాలు', 'గుర్తించదగినది', 'ఫుట్నోట్స్', 'సంబంధిత అనువర్తనాలు', 'ఫిల్మోగ్రఫీ', 'సోర్సెస్:', 'వివరించిన ప్రశ్నలు', 'బాహ్య సూచనలు', 'వారు అతని గురించి చెప్పారు', 'ఇతర ప్రాజెక్టులు', 'చూడండి', 'కూడా చూడండి', 'ఫిల్మోగ్రఫీ', 'గురించి', 'పై', 'సంబంధిత', 'O.', 'రాయబారి', 'ప్లేస్', 'సూచన', 'అతనికి', 'ఔటర్ tads.', 'బిబ్లియోగ్రఫీ', 'బాహ్య లింకులు', 'వార్షికోత్సవంలో', 'లింకులు', 'విడుదలలు', 'ఆమె గురించి', 'లింకులు', 'పనిచేస్తుంది', 'సూచన', 'గురించి ప్రకటనలు', 'ఇది కూడ చూడు', 'అత్యంత ప్రసిద్ధ పుస్తకాలు', 'పనిచేస్తుంది', 'ఆమె గురించి చెప్పబడింది', 'అదనపు లింకులు', 'కోట్స్', 'వెలుపల లింక్', 'మూల', 'దాని గురించి మరొకటి', 'చెప్పడం', 'సినిమా', 'సంబంధించి కోట్స్', 'చెప్పండి', 'daveoù.', 'బాహ్య లింక్', 'పైన ఉన్న అనులేఖనాలు', 'ఇది కూడ చూడు', 'పైగా', 'సోర్సెస్', 'అది అతనికి చెప్పబడింది', 'వారు ఓ అన్నారు', 'ప్రస్తావనలు', 'గురించి', 'దీని తరువాత', 'లింక్', 'ప్లేస్', 'రెఫరల్స్', 'ఆమె గురించి', 'సోర్సెస్', 'బాహ్య లింకులు', 'వారు దాని గురించి చెప్పారు', 'బాహ్య లింక్', 'టెస్టిమోనియల్స్', 'ఫుట్నోట్', 'ప్రస్తావనలు', 'గమనికలు', 'ఇంటర్నెట్లో వనరులు', 'అతని గురించి', 'వనరులు', 'బాహ్య కనెక్షన్లు', 'కోట్స్ గురించి', 'ఫిల్మోగ్రఫీ', 'ఫుట్నోట్', 'సోర్సెస్', 'వనరులు', 'ఇతర ప్రాజెక్టులు', 'బాహ్య లింకులు', 'లింకులు', 'గమనికలు', 'గమనిక', 'weblinks.', 'బిబ్లియోగ్రఫీ', 'సంబంధిత అంశాలు', 'పనిచేస్తుంది', 'ప్రస్తావనలు', 'సాహిత్యం', 'చూడండి', 'ఇది కూడ చూడు', 'ఫుట్నోట్', 'ఇతర ప్రాజెక్టులు']
forbidden_by_language["tr"] = ['Hakkında', 'kaynakça', 'Notlar:', 'Edebiyat', 'Onun hakkında söylendi', 'onun hakkında', 'Dublaj', 'Onun için', 'Hakkında söyleniyor', 'İlgili öğeler', 'Dikkate değer', 'dipnotlar', 'İlgili uygulamalar', 'Filmografi', 'Kaynaklar:', 'SORUMLULUKLAR', 'Dış referanslar', 'Onun hakkında söylediler', 'Diğer projeler', 'Görmek', 'Ayrıca bak', 'filmografi', 'Hakkında', 'üzerinde', 'İlgili', 'Ö', 'Büyükelçi', 'oynar', 'referans', 'onun için', 'Dış tads', 'Bibliyografya', 'Dış bağlantılar', 'yıldönümünde', 'Linkler', 'Salıverme', 'kendisi hakkında', 'linkler', 'İşler', 'Referans', 'Hakkında açıklamalar', 'Ayrıca bakınız', 'En ünlü kitaplar', 'İşler', 'Onun hakkında söylendi', 'Aşırı bağlantılar', 'alıntı', 'Dışa bağlantı', 'Kaynak', 'Bunun hakkında başka', 'Söyleyerek', 'film', 'İle ilgili alıntılar', 'söylemek', 'Daveoù', 'Harici bağlantı', 'Yukarıdaki alıntılar', 'Ayrıca bakınız', 'üzerinde', 'Kaynaklar', 'Ona söyleniyor', 'O dediler.', 'Referanslar', 'hakkında', 'üzerine', 'Bağlamak', 'Oynar', 'yönlendirmeler', 'Onun hakkında', 'kaynaklar', 'Dış bağlantılar', 'Bunun hakkında söylediler', 'harici bağlantı', 'Tanıklık', 'dipnot', 'Referanslar', 'Notlar', 'İnternetteki kaynaklar', 'Onun hakkında', 'Kaynaklar', 'Harici Bağlantılar', 'hakkında alıntılar', 'filmografi', 'dipnot', 'kaynaklar', 'Kaynaklar', 'diğer projeler', 'Dış bağlantılar', 'linkler', 'notalar', 'Not', 'İnternet linkleri', 'bibliyografya', 'ilgili öğeler', 'İşler', 'Referanslar', 'Edebiyat', 'görmek', 'Ayrıca bakınız', 'dipnot', 'diğer projeler']
forbidden_by_language["uk"] = ['Про ', 'Джерела', 'примітки', 'література', 'Примітки:', 'література', 'Про це сказано', 'про нього', 'Дублювання', 'Для неї', 'Сказані', "Пов'язані елементи", 'Нотен', 'виноски', "Пов'язані заявки", 'Фільмографія', 'Джерела:', 'залежати від запитів', 'Зовнішні посилання', 'Вони сказали про нього', 'Інші проекти', 'Побачити', 'Також подивіться', 'фільмографія', 'Про', 'на', 'Споріднений', 'O', 'Посла', 'грає', 'довідник', 'для нього', 'Зовнішні tads', 'Бібліографія', 'зовнішні посилання', 'у річницю', 'Посилання', 'Релізи', 'про себе', 'посилання', 'робіт', 'Довідник', 'Заяви про', 'Дивись також', 'Найвідоміші книги', 'Робіт', 'Це було сказано про неї', "Надлишкові зв'язки", 'котирування', 'Посилання назовні', 'Джерело', 'Інше про це', 'Кажуть', 'плівка', 'Цитати по відношенню до', 'казати', 'дав', 'Зовнішня посилання', 'Цитати вище', 'Дивись також', 'надмірно', 'Джерела', 'Йому сказано', 'Вони сказали О.', 'Посилання', 'про', 'на', 'Посилання на', 'Грає', 'рефераль', 'про неї', 'джерела', 'зовнішні посилання', 'вони сказали про це', 'Зовнішня посилання', 'Відгуки', 'виноска', 'посилання', 'Ноти', 'Ресурси в Інтернеті', 'Про нього', 'Ресурси', "Зовнішні з'єднання", 'фільмографія', 'виноска', 'джерела', 'ресурси', 'Інші проекти', 'зовнішні посилання', 'посилання', 'ноти', 'Примітка', 'weblinks', 'бібліографія', "Пов'язані елементи", 'робіт', 'посилання', 'література', 'побачити', 'Дивись також', 'виноска', 'Інші проекти']
forbidden_by_language["ur"] = ['حوالہ جات', 'نوٹ:', 'ادب', 'اس کے بارے میں یہ کہا گیا تھا', 'اس کے بارے میں', 'ڈوبنگ', 'اس لڑکی کے لئے', 'کے بارے میں کہا جاتا ہے', 'متعلقہ اشیاء', 'قابل ذکر', 'فوٹیاں', 'متعلقہ ایپلی کیشنز', 'فلمگرافی', 'ذرائع:', 'منحصر سوالات', 'بیرونی حوالہ جات', 'انہوں نے اس کے بارے میں کہا', 'دیگر منصوبوں', 'دیکھو', 'بھی دیکھو', 'فلمگرافی', 'کے بارے میں', 'پر', 'متعلقہ', 'اے', 'سفیر', 'ادا کرتا ہے', 'حوالہ', 'اس کے لیے', 'بیرونی ٹاد', 'بائبلگرافی', 'بیرونی روابط', 'سالگرہ میں', 'روابط', 'ریلیز', 'خود کے بارے میں', 'روابط', 'کام', 'حوالہ', 'کے بارے میں بیانات', 'بھی دیکھو', 'سب سے مشہور کتابیں', 'کام', 'اس کے بارے میں یہ کہا گیا تھا', 'اضافی لنکس', 'حوالہ جات', 'باہر سے رابطہ کریں', 'ذریعہ', 'اس کے بارے میں ایک اور', 'کہہ رہا ہے', 'فلم', 'احترام کے ساتھ حوالہ جات', 'کہہ دو', 'ڈیویو', 'بیرونی لنک', 'حوالہ اوپر', 'بھی دیکھو', 'زیادہ', 'ذرائع', 'اس سے کہا جاتا ہے', 'انہوں نے کہا اے', 'حوالہ جات', 'کے بارے میں', 'پر', 'سے رابطہ کریں', 'ادا کرتا ہے', 'حوالہ جات', 'اس کے بارے میں', 'ذرائع', 'بیرونی روابط', 'انہوں نے اس کے بارے میں کہا', 'بیرونی لنک', 'تعریف', 'فوٹیوٹ', 'حوالہ جات', 'نوٹس', 'انٹرنیٹ میں وسائل', 'اس کے بارے میں', 'حوالہ جات', 'بیرونی کنکشن', 'کے بارے میں حوالہ جات', 'فلمگرافی', 'فوٹیوٹ', 'ذرائع', 'حوالہ جات', 'دیگر منصوبوں', 'بیرونی روابط', 'روابط', 'نوٹس', 'نوٹ', 'ویب لنکس', 'بائبلگرافی', 'متعلقہ اشیاء', 'کام', 'حوالہ جات', 'ادب', 'دیکھو', 'بھی دیکھو', 'فوٹیوٹ', 'دیگر منصوبوں']
forbidden_by_language["zh"] = ["引用","片目","脚注","来源","资源","其他项目","外部链接","链接","注释","注释","网络链接","参考书目","相关项目","作品","参考文献","文献","参见","另见","脚注","其他项目","另看","参考书目","作品", "著名","参考","他们说的","电影","关于他","相关查询","参考","配音","电影","为他","外部链接","戏剧","脚注","有人说他","作品","戏剧","其他项目"]
forbidden = [f.lower() for l in list(forbidden_by_language.values()) for f in l]
class EntityWithQuotes:
def __init__(self, entity, id, language):
def getQuotesFromUnstructuredText(section, id, wikiquote_id):
def getQ(section, id):
nonlocal quotes
nonlocal n
nonlocal level
nonlocal section_titles
section_titles = section_titles[:level]
section_titles.append(section.title.text)
for line in section.lines:
n+=1
quote = untemplatedQuote(section_titles, line, id, n, language, wikiquote_id)
quotes.update({quote.id:quote})
for sec in section.sub_sections:
if sec.title.text.lower() in forbidden+[i+" "+wikiquote_id.lower() for i in forbidden]:
continue
level=level+1
getQ(sec, id)
level=level-1
# filtering for empty Quotes using __bool__
temp_quotes = dict(quotes)
for quote_id in temp_quotes:
if not quotes[quote_id]:
del quotes[quote_id]
quotes = {}
n = 1
level = 0
section_titles = []
getQ(section, id)
return quotes
def getQuotesFromTemplates(section, id, wikiquote_id):
def getTempQ(section, id):
nonlocal quotes
nonlocal n
nonlocal level
nonlocal section_titles
section_titles = section_titles[:level]
section_titles.append(section.title.text)
for template in section.templates:
n+=1
templ = template.values
quote = templatedQuote(id, n, language, section_titles, wikiquote_id, **templ)
quotes.update({quote.id:quote})
for sec in section.sub_sections:
if sec.title.text.lower() in forbidden+[i+" "+wikiquote_id.lower() for i in forbidden]:
continue
level=level+1
getTempQ(sec, id)
level=level-1
# filtering for empty Quotes using __bool__
temp_quotes = dict(quotes)
for quote_id in temp_quotes:
if not quotes[quote_id]:
del quotes[quote_id]
quotes = {}
n = 1
level = 0
section_titles = []
getTempQ(section, id)
return quotes
self.lang=language
self.entity = entity
self.wikiquote_id = entity.wikiquote_id
self.wikiquote_page_id = entity.wikiquote_page_id
self.wikidata_id = entity.wikidata_id
self.wikipedia_id = entity.wikipedia_id
self.types = []
self.id = id
self.quotes = dict()
if self.lang in languages_with_templates:
self.quotes = getQuotesFromTemplates(entity.main_section, id, self.wikiquote_id)
elif self.lang in hybrid_languages:
self.quotes = getQuotesFromTemplates(entity.main_section, id, self.wikiquote_id)
self.quotes.update(getQuotesFromUnstructuredText(entity.main_section, self.id, self.wikiquote_id))
else:
self.quotes = getQuotesFromUnstructuredText(entity.main_section, self.id, self.wikiquote_id)
self.quotes = collections.OrderedDict(sorted(self.quotes.items()))
class CompleteEntity():
def __init__(self, id, entities):
self.entities = entities
self.wikiquoteIds = dict()
self.wikiquotePageIds= dict()
self.wikipediaIds= dict()
for language in self.entities:
self.wikiquoteIds.update({language:self.entities[language][0].entity.wikiquote_id})
self.wikiquotePageIds.update({language:self.entities[language][0].entity.wikiquote_page_id})
self.wikipediaIds.update({language:self.entities[language][0].entity.wikipedia_id})
self.wikidata_id = id
| 337.772549 | 1,878 | 0.652603 | 4,117 | 0.036174 | 0 | 0 | 0 | 0 | 0 | 0 | 97,659 | 0.858073 |
6aab6b3ba732d64220b4fb1bf6b4cc739254d1fe | 1,019 | py | Python | tests/pm/update_sla.py | supsi-dacd-isaac/parity-sidechain-interface | b64a5fb724955332afb4998344081d1b93ac216a | [
"MIT"
] | null | null | null | tests/pm/update_sla.py | supsi-dacd-isaac/parity-sidechain-interface | b64a5fb724955332afb4998344081d1b93ac216a | [
"MIT"
] | null | null | null | tests/pm/update_sla.py | supsi-dacd-isaac/parity-sidechain-interface | b64a5fb724955332afb4998344081d1b93ac216a | [
"MIT"
] | null | null | null | # Importing section
import json
import requests
import argparse
import hashlib
import time
from http import HTTPStatus
# Main
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
args = arg_parser.parse_args()
set_cmd = 'updateSla'
params = {
'idx': 'sla04',
'start': 3000,
'end': 3900
}
cmd_url = 'http://localhost:9119/%s' % set_cmd
headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
print('COMMAND: %s' % cmd_url)
print('PARAMS: %s' % params)
r = requests.post(cmd_url, headers=headers, json=params)
data = json.loads(r.text)
print('RESPONSE: %s\n' % data)
# Wait some seconds to be sure that the transaction has been handled
time.sleep(5)
check_tx_url = 'http://localhost:9119/checkTx/%s' % data['tx_hash']
print('CHECK TX: %s' % check_tx_url)
r = requests.get(check_tx_url)
data = json.loads(r.text)
print('RESPONSE: %s\n' % data)
| 24.261905 | 80 | 0.617272 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 337 | 0.330716 |
6aac1f4d092634d65b03e7c6699787370a84bac7 | 498 | py | Python | array/python3/5_move_all_negative_elements.py | jitendragangwar123/cp | 8d9da1abd841784da8304e7ebb64a6b94cb804bb | [
"MIT"
] | null | null | null | array/python3/5_move_all_negative_elements.py | jitendragangwar123/cp | 8d9da1abd841784da8304e7ebb64a6b94cb804bb | [
"MIT"
] | 1 | 2020-12-12T19:09:01.000Z | 2020-12-12T19:09:01.000Z | array/python3/5_move_all_negative_elements.py | jitendragangwar123/cp | 8d9da1abd841784da8304e7ebb64a6b94cb804bb | [
"MIT"
] | 1 | 2020-12-12T18:36:24.000Z | 2020-12-12T18:36:24.000Z | def sort(arr):
# Start index 0.
start = 0
# End index
end = len(arr)-1
while start <= end:
# Swap all positive value with last index end & decrease end by 1.
if arr[start] >= 0:
arr[start], arr[end] = arr[end], arr[start]
end -= 1
else:
# If arr[start] is not positive then increase start by 1.
start += 1
if __name__ == "__main__":
arr = [-1, 2, -3, 4, 5, 6, -7, 8, 9]
sort(arr)
print(arr)
| 23.714286 | 74 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.321285 |
6aac551e77cffa8d22df81867eace49a7797fd1d | 1,199 | py | Python | misc.py | hldai/wikiprocesspy | 788ccb6f0e0e54a7322863d5a13332635afc240d | [
"MIT"
] | null | null | null | misc.py | hldai/wikiprocesspy | 788ccb6f0e0e54a7322863d5a13332635afc240d | [
"MIT"
] | null | null | null | misc.py | hldai/wikiprocesspy | 788ccb6f0e0e54a7322863d5a13332635afc240d | [
"MIT"
] | null | null | null | import json
def __text_from_anchor_sents_file(anchor_sents_file, output_file):
f = open(anchor_sents_file, encoding='utf-8')
fout = open(output_file, 'w', encoding='utf-8', newline='\n')
for i, line in enumerate(f):
sent = json.loads(line)
fout.write('{}\n'.format(sent['tokens']))
# if i > 5:
# break
f.close()
fout.close()
def merge_files(filenames, output_file):
fout = open(output_file, 'w', encoding='utf-8', newline='\n')
for filename in filenames:
print(filename)
f = open(filename, encoding='utf-8')
for line in f:
fout.write(line)
f.close()
fout.close()
wiki19_anchor_sents_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents.txt'
anchor_sent_texts_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts.txt'
# __text_from_anchor_sents_file(wiki19_anchor_sents_file, anchor_sent_texts_file)
part_pos_tag_files = [f'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts-pos-{i}.txt' for i in range(4)]
pos_tag_file = 'd:/data/res/wiki/anchor/enwiki-20190101-anchor-sents-tok-texts-pos.txt'
# merge_files(part_pos_tag_files, pos_tag_file)
| 35.264706 | 118 | 0.686405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 481 | 0.401168 |
6aad34cdb7b95e79e68448f602b65f3d09cae50a | 1,106 | py | Python | test/smptest.py | myrtam/CANNR | b966a873ec60264b0fd42b81edadb5495237d7ea | [
"Apache-2.0"
] | null | null | null | test/smptest.py | myrtam/CANNR | b966a873ec60264b0fd42b81edadb5495237d7ea | [
"Apache-2.0"
] | null | null | null | test/smptest.py | myrtam/CANNR | b966a873ec60264b0fd42b81edadb5495237d7ea | [
"Apache-2.0"
] | 1 | 2021-05-14T08:37:37.000Z | 2021-05-14T08:37:37.000Z | """
Test harness for smp.py
"""
import sys
import os
sys.path.append('/Users/ptendick/open-source-workspace/cannr Image/source/cannr/lib')
os.environ['PATH'] = '/Library/Frameworks/Python.framework/Versions/3.7/bin:' + os.environ['PATH']
import cannr
import smp
# Test openProcess by opening a Flask process
def test_openProcess1():
return smp.openProcess(
{"processInfo": "processInfo"},
['python', '/Users/ptendick/open-source-workspace/cannr Image/test/flaskSample.py', '5000', '1'])
# Test openProcess by opening a Plumber process
def test_openProcess2():
return smp.openProcess(
{"processInfo": "processInfo"},
['Rscript', '--vanilla', '/Users/ptendick/open-source-workspace/cannr Image/source/cannr/runApp.R',
'/Users/ptendick/open-source-workspace/cannr Image/test/hello.R', '5001', '2'])
# Test countPorts
def test_countPorts():
projectFilePath = '/Users/ptendick/open-source-workspace/MyRTAM Service/working/project1/project.json'
project = cannr.readJSONFile(projectFilePath)
return smp.countPorts(project)
| 32.529412 | 108 | 0.705244 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 666 | 0.60217 |
6aad4ce5dfa92a930b5b7dfb6e85c80cb8498743 | 2,833 | py | Python | neural_toolbox/inception.py | ibrahimSouleiman/GuessWhat | 60d140de1aae5ccda27e7d3eef2b9fb9548f0854 | [
"Apache-2.0"
] | null | null | null | neural_toolbox/inception.py | ibrahimSouleiman/GuessWhat | 60d140de1aae5ccda27e7d3eef2b9fb9548f0854 | [
"Apache-2.0"
] | null | null | null | neural_toolbox/inception.py | ibrahimSouleiman/GuessWhat | 60d140de1aae5ccda27e7d3eef2b9fb9548f0854 | [
"Apache-2.0"
] | null | null | null | import tensorflow as tf
import tensorflow.contrib.slim as slim
import tensorflow.contrib.slim.python.slim.nets.resnet_v1 as resnet_v1
import tensorflow.contrib.slim.python.slim.nets.inception_v1 as inception_v1
import tensorflow.contrib.slim.python.slim.nets.resnet_utils as slim_utils
from tensorflow.contrib import layers as layers_lib
from tensorflow.contrib.framework.python.ops import arg_scope
import os
def get_resnet_arg_scope(bn_fn):
"""
Trick to apply CBN from a pretrained tf network. It overides the batchnorm constructor with cbn
:param bn_fn: cbn factory
:return: tensorflow scope
"""
with arg_scope(
[layers_lib.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=bn_fn,
normalizer_params=None) as arg_sc:
return arg_sc
def create_inception(image_input, is_training, scope="", inception_out="Mixed_5c", resnet_version=50, cbn=None):
"""
Create a resnet by overidding the classic batchnorm with conditional batchnorm
:param image_input: placeholder with image
:param is_training: are you using the resnet at training_time or test_time
:param scope: tensorflow scope
:param resnet_version: 50/101/152
:param cbn: the cbn factory
:return: the resnet output
"""
# assert False, "\n" \
# "There is a bug with classic batchnorm with slim networks (https://github.com/tensorflow/tensorflow/issues/4887). \n" \
# "Please use the following config -> 'cbn': {'use_cbn':true, 'excluded_scope_names': ['*']}"
# arg_sc = slim_utils.resnet_arg_scope(is_training=is_training)
# print("--- 1")
arg_sc = inception_v1.inception_v1_arg_scope()
# Pick the correct version of the resnet
# if resnet_version == 50:
# current_resnet = resnet_v1.resnet_v1_50
# elif resnet_version == 101:
# current_resnet = resnet_v1.resnet_v1_101
# elif resnet_version == 152:
# current_resnet = resnet_v1.resnet_v1_152
# else:
# raise ValueError("Unsupported resnet version")
# inception_scope = os.path.join('InceptionV1/InceptionV1', inception_out)
# print("--- 2")
inception_scope = inception_out
# print(" resnet_out = {} , resnet_scope = {}".format(resnet_out,resnet_scope))
# print("--- 3")
with slim.arg_scope(arg_sc):
net, end_points = inception_v1.inception_v1(image_input, 1001) # 1000 is the number of softmax class
print("Net = ",net)
# print("--- 4")
if len(scope) > 0 and not scope.endswith("/"):
scope += "/"
# print("--- 5")
# print(end_points)
print(" Batch ",inception_scope)
out = end_points[scope + inception_scope]
print("-- out Use: {},output = {}".format(inception_scope,out))
return out,end_points
| 36.320513 | 143 | 0.676668 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,527 | 0.539005 |
6aad74ee52655f68220f799efaffcbccdd0748ad | 6,133 | py | Python | timm/utils/checkpoint_saver.py | Robert-JunWang/pytorch-image-models | 7c67d6aca992f039eece0af5f7c29a43d48c00e4 | [
"Apache-2.0"
] | 17,769 | 2019-05-02T08:08:25.000Z | 2022-03-31T22:14:44.000Z | timm/utils/checkpoint_saver.py | jonychoi/pytorch-image-models | e4360e6125bb0bb4279785810c8eb33b40af3ebd | [
"Apache-2.0"
] | 556 | 2019-05-26T16:31:37.000Z | 2022-03-30T04:21:07.000Z | timm/utils/checkpoint_saver.py | jonychoi/pytorch-image-models | e4360e6125bb0bb4279785810c8eb33b40af3ebd | [
"Apache-2.0"
] | 3,029 | 2019-05-14T01:18:28.000Z | 2022-03-31T20:09:50.000Z | """ Checkpoint Saver
Track top-n training checkpoints and maintain recovery checkpoints on specified intervals.
Hacked together by / Copyright 2020 Ross Wightman
"""
import glob
import operator
import os
import logging
import torch
from .model import unwrap_model, get_state_dict
_logger = logging.getLogger(__name__)
class CheckpointSaver:
def __init__(
self,
model,
optimizer,
args=None,
model_ema=None,
amp_scaler=None,
checkpoint_prefix='checkpoint',
recovery_prefix='recovery',
checkpoint_dir='',
recovery_dir='',
decreasing=False,
max_history=10,
unwrap_fn=unwrap_model):
# objects to save state_dicts of
self.model = model
self.optimizer = optimizer
self.args = args
self.model_ema = model_ema
self.amp_scaler = amp_scaler
# state
self.checkpoint_files = [] # (filename, metric) tuples in order of decreasing betterness
self.best_epoch = None
self.best_metric = None
self.curr_recovery_file = ''
self.last_recovery_file = ''
# config
self.checkpoint_dir = checkpoint_dir
self.recovery_dir = recovery_dir
self.save_prefix = checkpoint_prefix
self.recovery_prefix = recovery_prefix
self.extension = '.pth.tar'
self.decreasing = decreasing # a lower metric is better if True
self.cmp = operator.lt if decreasing else operator.gt # True if lhs better than rhs
self.max_history = max_history
self.unwrap_fn = unwrap_fn
assert self.max_history >= 1
def save_checkpoint(self, epoch, metric=None):
assert epoch >= 0
tmp_save_path = os.path.join(self.checkpoint_dir, 'tmp' + self.extension)
last_save_path = os.path.join(self.checkpoint_dir, 'last' + self.extension)
self._save(tmp_save_path, epoch, metric)
if os.path.exists(last_save_path):
os.unlink(last_save_path) # required for Windows support.
os.rename(tmp_save_path, last_save_path)
worst_file = self.checkpoint_files[-1] if self.checkpoint_files else None
if (len(self.checkpoint_files) < self.max_history
or metric is None or self.cmp(metric, worst_file[1])):
if len(self.checkpoint_files) >= self.max_history:
self._cleanup_checkpoints(1)
filename = '-'.join([self.save_prefix, str(epoch)]) + self.extension
save_path = os.path.join(self.checkpoint_dir, filename)
os.link(last_save_path, save_path)
self.checkpoint_files.append((save_path, metric))
self.checkpoint_files = sorted(
self.checkpoint_files, key=lambda x: x[1],
reverse=not self.decreasing) # sort in descending order if a lower metric is not better
checkpoints_str = "Current checkpoints:\n"
for c in self.checkpoint_files:
checkpoints_str += ' {}\n'.format(c)
_logger.info(checkpoints_str)
if metric is not None and (self.best_metric is None or self.cmp(metric, self.best_metric)):
self.best_epoch = epoch
self.best_metric = metric
best_save_path = os.path.join(self.checkpoint_dir, 'model_best' + self.extension)
if os.path.exists(best_save_path):
os.unlink(best_save_path)
os.link(last_save_path, best_save_path)
return (None, None) if self.best_metric is None else (self.best_metric, self.best_epoch)
def _save(self, save_path, epoch, metric=None):
save_state = {
'epoch': epoch,
'arch': type(self.model).__name__.lower(),
'state_dict': get_state_dict(self.model, self.unwrap_fn),
'optimizer': self.optimizer.state_dict(),
'version': 2, # version < 2 increments epoch before save
}
if self.args is not None:
save_state['arch'] = self.args.model
save_state['args'] = self.args
if self.amp_scaler is not None:
save_state[self.amp_scaler.state_dict_key] = self.amp_scaler.state_dict()
if self.model_ema is not None:
save_state['state_dict_ema'] = get_state_dict(self.model_ema, self.unwrap_fn)
if metric is not None:
save_state['metric'] = metric
torch.save(save_state, save_path)
def _cleanup_checkpoints(self, trim=0):
trim = min(len(self.checkpoint_files), trim)
delete_index = self.max_history - trim
if delete_index < 0 or len(self.checkpoint_files) <= delete_index:
return
to_delete = self.checkpoint_files[delete_index:]
for d in to_delete:
try:
_logger.debug("Cleaning checkpoint: {}".format(d))
os.remove(d[0])
except Exception as e:
_logger.error("Exception '{}' while deleting checkpoint".format(e))
self.checkpoint_files = self.checkpoint_files[:delete_index]
def save_recovery(self, epoch, batch_idx=0):
assert epoch >= 0
filename = '-'.join([self.recovery_prefix, str(epoch), str(batch_idx)]) + self.extension
save_path = os.path.join(self.recovery_dir, filename)
self._save(save_path, epoch)
if os.path.exists(self.last_recovery_file):
try:
_logger.debug("Cleaning recovery: {}".format(self.last_recovery_file))
os.remove(self.last_recovery_file)
except Exception as e:
_logger.error("Exception '{}' while removing {}".format(e, self.last_recovery_file))
self.last_recovery_file = self.curr_recovery_file
self.curr_recovery_file = save_path
def find_recovery(self):
recovery_path = os.path.join(self.recovery_dir, self.recovery_prefix)
files = glob.glob(recovery_path + '*' + self.extension)
files = sorted(files)
return files[0] if len(files) else ''
| 40.615894 | 104 | 0.626121 | 5,805 | 0.946519 | 0 | 0 | 0 | 0 | 0 | 0 | 779 | 0.127018 |
6aad9dd74183fdbafeb45c7c06a4bb4ab92534aa | 292 | py | Python | AGC004/AGC004a.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | AGC004/AGC004a.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | AGC004/AGC004a.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | # AGC004a
def main():
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
a, b, c = map(int, input().split())
if a % 2 == 0 or b % 2 == 0 or c % 2 == 0:
print(0)
exit(0)
print(min(a*b, b*c, c*a))
if __name__ == '__main__':
main()
| 18.25 | 46 | 0.506849 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.065068 |
6aae27568c85842fa9dbea1ace5c81d9190ab20e | 12,603 | py | Python | glance/tests/functional/test_api.py | arvindn05/glance | 055d15a6ba5d132f649156eac0fc91f4cd2813e4 | [
"Apache-2.0"
] | null | null | null | glance/tests/functional/test_api.py | arvindn05/glance | 055d15a6ba5d132f649156eac0fc91f4cd2813e4 | [
"Apache-2.0"
] | null | null | null | glance/tests/functional/test_api.py | arvindn05/glance | 055d15a6ba5d132f649156eac0fc91f4cd2813e4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Version-independent api tests"""
import httplib2
from oslo_serialization import jsonutils
from six.moves import http_client
from glance.tests import functional
# TODO(rosmaita): all the EXPERIMENTAL stuff in this file can be ripped out
# when v2.6 becomes CURRENT in Queens
def _generate_v1_versions(url):
v1_versions = {'versions': [
{
'id': 'v1.1',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'DEPRECATED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
return v1_versions
def _generate_v2_versions(url):
version_list = []
version_list.extend([
{
'id': 'v2.6',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.5',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.4',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.3',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
}
])
v2_versions = {'versions': version_list}
return v2_versions
def _generate_all_versions(url):
v1 = _generate_v1_versions(url)
v2 = _generate_v2_versions(url)
all_versions = {'versions': v2['versions'] + v1['versions']}
return all_versions
class TestApiVersions(functional.FunctionalTest):
def test_version_configurations(self):
"""Test that versioning is handled properly through all channels"""
# v1 and v2 api enabled
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = _generate_all_versions(url)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(versions, content)
def test_v2_api_configuration(self):
self.api_server.enable_v1_api = False
self.api_server.enable_v2_api = True
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = _generate_v2_versions(url)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(versions, content)
def test_v1_api_configuration(self):
self.api_server.enable_v1_api = True
self.api_server.enable_v2_api = False
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = _generate_v1_versions(url)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(versions, content)
class TestApiPaths(functional.FunctionalTest):
def setUp(self):
super(TestApiPaths, self).setUp()
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
self.versions = _generate_all_versions(url)
images = {'images': []}
self.images_json = jsonutils.dumps(images)
def test_get_root_path(self):
"""Assert GET / with `no Accept:` header.
Verify version choices returned.
Bug lp:803260 no Accept header causes a 500 in glance-api
"""
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_images_path(self):
"""Assert GET /images with `no Accept:` header.
Verify version choices returned.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_v1_images_path(self):
"""GET /v1/images with `no Accept:` header.
Verify empty images list returned.
"""
path = 'http://%s:%d/v1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
def test_get_root_path_with_unknown_header(self):
"""Assert GET / with Accept: unknown header
Verify version choices returned. Verify message in API log about
unknown accept header.
"""
path = 'http://%s:%d/' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'unknown'}
response, content_json = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_root_path_with_openstack_header(self):
"""Assert GET / with an Accept: application/vnd.openstack.images-v1
Verify empty image list returned
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.OK, response.status)
self.assertEqual(self.images_json, content.decode())
def test_get_images_path_with_openstack_header(self):
"""Assert GET /images with a
`Accept: application/vnd.openstack.compute-v1` header.
Verify version choices returned. Verify message in API log
about unknown accept header.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.compute-v1'}
response, content_json = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_v10_images_path(self):
"""Assert GET /v1.0/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
def test_get_v1a_images_path(self):
"""Assert GET /v1.a/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
def test_get_va1_images_path(self):
"""Assert GET /va.1/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/va.1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_versions_path(self):
"""Assert GET /versions with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.OK, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_versions_path_with_openstack_header(self):
"""Assert GET /versions with the
`Accept: application/vnd.openstack.images-v1` header.
Verify version choices returned.
"""
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content_json = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.OK, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_v1_versions_path(self):
"""Assert GET /v1/versions with `no Accept:` header
Verify 404 returned
"""
path = 'http://%s:%d/v1/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(http_client.NOT_FOUND, response.status)
def test_get_versions_choices(self):
"""Verify version choices returned"""
path = 'http://%s:%d/v10' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_images_path_with_openstack_v2_header(self):
"""Assert GET /images with a
`Accept: application/vnd.openstack.compute-v2` header.
Verify version choices returned. Verify message in API log
about unknown version in accept header.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v10'}
response, content_json = http.request(path, 'GET', headers=headers)
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
def test_get_v12_images_path(self):
"""Assert GET /v1.2/images with `no Accept:` header
Verify version choices returned
"""
path = 'http://%s:%d/v1.2/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content_json = http.request(path, 'GET')
self.assertEqual(http_client.MULTIPLE_CHOICES, response.status)
content = jsonutils.loads(content_json.decode())
self.assertEqual(self.versions, content)
| 39.261682 | 78 | 0.614933 | 9,962 | 0.790447 | 0 | 0 | 0 | 0 | 0 | 0 | 4,332 | 0.343728 |
6ab135f81cd0354b89240b44a37bacfa732bfab3 | 13,664 | py | Python | qcore/asserts.py | corey-sobel/qcore | 719a44617789e3cc384ce860031d9479ee0877e4 | [
"Apache-2.0"
] | 1 | 2022-01-31T23:15:48.000Z | 2022-01-31T23:15:48.000Z | qcore/asserts.py | corey-sobel/qcore | 719a44617789e3cc384ce860031d9479ee0877e4 | [
"Apache-2.0"
] | null | null | null | qcore/asserts.py | corey-sobel/qcore | 719a44617789e3cc384ce860031d9479ee0877e4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Quora, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module with assertion helpers.
The advantages of using a method like
assert_eq(expected, actual)
instead of
assert expected == actual
include:
1 - On failures, assert_eq prints an informative message of the actual
values compared (e.g. AssertionError: 1 != 2) for free, which makes it
faster and easier to iterate on tests.
2 - In the context of refactors, basic asserts incorrectly shift the burden of
adding printouts and writing good test code to people refactoring code
rather than the person who initially wrote the code.
"""
__all__ = [
"assert_is",
"assert_is_not",
"assert_is_instance",
"assert_eq",
"assert_dict_eq",
"assert_ne",
"assert_gt",
"assert_ge",
"assert_lt",
"assert_le",
"assert_in",
"assert_not_in",
"assert_in_with_tolerance",
"assert_unordered_list_eq",
"assert_raises",
"AssertRaises",
# Strings
"assert_is_substring",
"assert_is_not_substring",
"assert_startswith",
"assert_endswith",
]
# The unittest.py testing framework checks for this variable in a module to
# filter out stack frames from that module from the test output, in order to
# make the output more concise.
# __unittest = 1
import traceback
from .inspection import get_full_name
_number_types = (int, float, complex)
def _assert_fail_message(message, expected, actual, comparison_str, extra):
if message:
return message
if extra:
return "%a %s %a (%s)" % (expected, comparison_str, actual, extra)
return "%a %s %a" % (expected, comparison_str, actual)
def assert_is(expected, actual, message=None, extra=None):
"""Raises an AssertionError if expected is not actual."""
assert expected is actual, _assert_fail_message(
message, expected, actual, "is not", extra
)
def assert_is_not(expected, actual, message=None, extra=None):
"""Raises an AssertionError if expected is actual."""
assert expected is not actual, _assert_fail_message(
message, expected, actual, "is", extra
)
def assert_is_instance(value, types, message=None, extra=None):
"""Raises an AssertionError if value is not an instance of type(s)."""
assert isinstance(value, types), _assert_fail_message(
message, value, types, "is not an instance of", extra
)
def assert_eq(expected, actual, message=None, tolerance=None, extra=None):
"""Raises an AssertionError if expected != actual.
If tolerance is specified, raises an AssertionError if either
- expected or actual isn't a number, or
- the difference between expected and actual is larger than the tolerance.
"""
if tolerance is None:
assert expected == actual, _assert_fail_message(
message, expected, actual, "!=", extra
)
else:
assert isinstance(tolerance, _number_types), (
"tolerance parameter to assert_eq must be a number: %a" % tolerance
)
assert isinstance(expected, _number_types) and isinstance(
actual, _number_types
), "parameters must be numbers when tolerance is specified: %a, %a" % (
expected,
actual,
)
diff = abs(expected - actual)
assert diff <= tolerance, _assert_fail_message(
message, expected, actual, "is more than %a away from" % tolerance, extra
)
def _dict_path_string(path):
if len(path) == 0:
return "(root)"
return "->".join(map(ascii, path))
def assert_dict_eq(expected, actual, number_tolerance=None, dict_path=[]):
"""Asserts that two dictionaries are equal, producing a custom message if they are not."""
assert_is_instance(expected, dict)
assert_is_instance(actual, dict)
expected_keys = set(expected.keys())
actual_keys = set(actual.keys())
assert expected_keys <= actual_keys, "Actual dict at %s is missing keys: %a" % (
_dict_path_string(dict_path),
expected_keys - actual_keys,
)
assert actual_keys <= expected_keys, "Actual dict at %s has extra keys: %a" % (
_dict_path_string(dict_path),
actual_keys - expected_keys,
)
for k in expected_keys:
key_path = dict_path + [k]
assert_is_instance(
actual[k],
type(expected[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
assert_is_instance(
expected[k],
type(actual[k]),
extra="Types don't match for %s" % _dict_path_string(key_path),
)
if isinstance(actual[k], dict):
assert_dict_eq(
expected[k],
actual[k],
number_tolerance=number_tolerance,
dict_path=key_path,
)
elif isinstance(actual[k], _number_types):
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
tolerance=number_tolerance,
)
else:
assert_eq(
expected[k],
actual[k],
extra="Value doesn't match for %s" % _dict_path_string(key_path),
)
def assert_ne(expected, actual, message=None, tolerance=None, extra=None):
"""Raises an AssertionError if expected == actual.
If tolerance is specified, raises an AssertionError if either
- expected or actual isn't a number, or
- the difference between expected and actual is smaller than the tolerance.
"""
if tolerance is None:
assert expected != actual, _assert_fail_message(
message, expected, actual, "==", extra
)
else:
assert isinstance(tolerance, _number_types), (
"tolerance parameter to assert_eq must be a number: %a" % tolerance
)
assert isinstance(expected, _number_types) and isinstance(
actual, _number_types
), "parameters must be numbers when tolerance is specified: %a, %a" % (
expected,
actual,
)
diff = abs(expected - actual)
assert diff > tolerance, _assert_fail_message(
message, expected, actual, "is less than %a away from" % tolerance, extra
)
def assert_gt(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand <= right_hand."""
assert left > right, _assert_fail_message(message, left, right, "<=", extra)
def assert_ge(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand < right_hand."""
assert left >= right, _assert_fail_message(message, left, right, "<", extra)
def assert_lt(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand >= right_hand."""
assert left < right, _assert_fail_message(message, left, right, ">=", extra)
def assert_le(left, right, message=None, extra=None):
"""Raises an AssertionError if left_hand > right_hand."""
assert left <= right, _assert_fail_message(message, left, right, ">", extra)
def assert_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is not in seq."""
assert obj in seq, _assert_fail_message(message, obj, seq, "is not in", extra)
def assert_not_in(obj, seq, message=None, extra=None):
"""Raises an AssertionError if obj is in iter."""
# for very long strings, provide a truncated error
if isinstance(seq, str) and obj in seq and len(seq) > 200:
index = seq.find(obj)
start_index = index - 50
if start_index > 0:
truncated = "(truncated) ..."
else:
truncated = ""
start_index = 0
end_index = index + len(obj) + 50
truncated += seq[start_index:end_index]
if end_index < len(seq):
truncated += "... (truncated)"
assert False, _assert_fail_message(message, obj, truncated, "is in", extra)
assert obj not in seq, _assert_fail_message(message, obj, seq, "is in", extra)
def assert_in_with_tolerance(obj, seq, tolerance, message=None, extra=None):
"""Raises an AssertionError if obj is not in seq using assert_eq cmp."""
for i in seq:
try:
assert_eq(obj, i, tolerance=tolerance, message=message, extra=extra)
return
except AssertionError:
pass
assert False, _assert_fail_message(message, obj, seq, "is not in", extra)
def assert_unordered_list_eq(expected, actual, message=None):
"""Raises an AssertionError if the objects contained
in expected are not equal to the objects contained
in actual without regard to their order.
This takes quadratic time in the umber of elements in actual; don't use it for very long lists.
"""
missing_in_actual = []
missing_in_expected = list(actual)
for x in expected:
try:
missing_in_expected.remove(x)
except ValueError:
missing_in_actual.append(x)
if missing_in_actual or missing_in_expected:
if not message:
message = (
"%a not equal to %a; missing items: %a in expected, %a in actual."
% (expected, actual, missing_in_expected, missing_in_actual)
)
assert False, message
def assert_raises(fn, *expected_exception_types):
"""Raises an AssertionError if calling fn does not raise one of the expected_exception-types."""
with AssertRaises(*expected_exception_types):
fn()
class AssertRaises(object):
"""With-context that asserts that the code within the context raises the specified exception."""
def __init__(self, *expected_exception_types, **kwargs):
# when you don't specify the exception expected, it's easy to write buggy tests that appear
# to pass but actually throw an exception different from the expected one
assert (
len(expected_exception_types) >= 1
), "You must specify the exception type when using AssertRaises"
self.expected_exception_types = set(expected_exception_types)
self.expected_exception_found = None
self.extra = kwargs.pop("extra", None)
assert_eq({}, kwargs)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type in self.expected_exception_types:
# Return True to suppress the Exception if the type matches. For details,
# see: http://docs.python.org/release/2.5.2/lib/typecontextmanager.html
self.expected_exception_found = exc_val
return True
for t in self.expected_exception_types:
if isinstance(exc_val, t):
self.expected_exception_found = exc_val
return True
expected = ", ".join(map(get_full_name, self.expected_exception_types))
if exc_type is None:
message = "No exception raised, but expected: %s" % expected
if self.extra is not None:
message += " (%s)" % self.extra
else:
template = (
"{TYPE}: {VAL} is raised, but expected:"
" {EXPECTED}{EXTRA_STR}\n\n{STACK}"
)
message = template.format(
TYPE=get_full_name(exc_type),
VAL=exc_val,
EXPECTED=expected,
STACK="".join(traceback.format_tb(exc_tb)),
EXTRA_STR=(" (%s)" % self.extra) if self.extra is not None else "",
)
raise AssertionError(message)
# ===================================================
# Strings
# ===================================================
def assert_is_substring(substring, subject, message=None, extra=None):
"""Raises an AssertionError if substring is not a substring of subject."""
assert (
(subject is not None)
and (substring is not None)
and (subject.find(substring) != -1)
), _assert_fail_message(message, substring, subject, "is not in", extra)
def assert_is_not_substring(substring, subject, message=None, extra=None):
"""Raises an AssertionError if substring is a substring of subject."""
assert (
(subject is not None)
and (substring is not None)
and (subject.find(substring) == -1)
), _assert_fail_message(message, substring, subject, "is in", extra)
def assert_startswith(prefix, subject, message=None, extra=None):
"""Raises an AssertionError if the subject string does not start with prefix."""
assert (
(type(subject) is str)
and (type(prefix) is str)
and (subject.startswith(prefix))
), _assert_fail_message(message, subject, prefix, "does not start with", extra)
def assert_endswith(suffix, subject, message=None, extra=None):
"""Raises an AssertionError if the subject string does not end with suffix."""
assert (
(type(subject) is str) and (type(suffix) is str) and (subject.endswith(suffix))
), _assert_fail_message(message, subject, suffix, "does not end with", extra)
| 34.592405 | 100 | 0.640222 | 2,050 | 0.150029 | 0 | 0 | 0 | 0 | 0 | 0 | 5,021 | 0.367462 |
6ab1bd9218aece261b575574072df1d919112085 | 1,108 | py | Python | lib/galaxy/web/__init__.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 4 | 2015-05-12T20:36:41.000Z | 2017-06-26T15:34:02.000Z | lib/galaxy/web/__init__.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 52 | 2015-03-16T14:02:14.000Z | 2021-12-24T09:50:23.000Z | lib/galaxy/web/__init__.py | rikeshi/galaxy | c536a877e4a9b3d12aa0d00fd4d5e705109a0d0a | [
"CC-BY-3.0"
] | 1 | 2016-03-21T12:54:06.000Z | 2016-03-21T12:54:06.000Z | """
The Galaxy web application framework
"""
from .framework import url_for
from .framework.base import httpexceptions
from .framework.decorators import (
do_not_cache,
error,
expose,
expose_api,
expose_api_anonymous,
expose_api_anonymous_and_sessionless,
expose_api_raw,
expose_api_raw_anonymous,
expose_api_raw_anonymous_and_sessionless,
format_return_as_json,
json,
json_pretty,
legacy_expose_api,
legacy_expose_api_anonymous,
legacy_expose_api_raw,
legacy_expose_api_raw_anonymous,
require_admin,
require_login,
)
__all__ = ('FormBuilder', 'do_not_cache', 'error', 'expose', 'expose_api',
'expose_api_anonymous', 'expose_api_anonymous_and_sessionless',
'expose_api_raw', 'expose_api_raw_anonymous',
'expose_api_raw_anonymous_and_sessionless', 'form',
'format_return_as_json', 'httpexceptions', 'json', 'json_pretty',
'legacy_expose_api', 'legacy_expose_api_anonymous',
'legacy_expose_api_raw', 'legacy_expose_api_raw_anonymous',
'require_admin', 'require_login', 'url_for')
| 30.777778 | 74 | 0.737365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 449 | 0.405235 |
6ab21446cecd0d46b1a47275470353f326cec4d7 | 6,318 | py | Python | src/python/pants/core/goals/check_test.py | yoav-orca/pants | 995448e9add343975844c7a43d5d64618fc4e4d9 | [
"Apache-2.0"
] | 1,806 | 2015-01-05T07:31:00.000Z | 2022-03-31T11:35:41.000Z | src/python/pants/core/goals/check_test.py | yoav-orca/pants | 995448e9add343975844c7a43d5d64618fc4e4d9 | [
"Apache-2.0"
] | 9,565 | 2015-01-02T19:01:59.000Z | 2022-03-31T23:25:16.000Z | src/python/pants/core/goals/check_test.py | riisi/pants | b33327389fab67c47b919710ea32f20ca284b1a6 | [
"Apache-2.0"
] | 443 | 2015-01-06T20:17:57.000Z | 2022-03-31T05:28:17.000Z | # Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from abc import ABCMeta, abstractmethod
from pathlib import Path
from textwrap import dedent
from typing import ClassVar, Iterable, List, Optional, Tuple, Type
from pants.core.goals.check import Check, CheckRequest, CheckResult, CheckResults, check
from pants.core.util_rules.distdir import DistDir
from pants.engine.addresses import Address
from pants.engine.fs import Workspace
from pants.engine.target import FieldSet, MultipleSourcesField, Target, Targets
from pants.engine.unions import UnionMembership
from pants.testutil.option_util import create_options_bootstrapper
from pants.testutil.rule_runner import MockGet, RuleRunner, mock_console, run_rule_with_mocks
from pants.util.logging import LogLevel
class MockTarget(Target):
alias = "mock_target"
core_fields = (MultipleSourcesField,)
class MockCheckFieldSet(FieldSet):
required_fields = (MultipleSourcesField,)
class MockCheckRequest(CheckRequest, metaclass=ABCMeta):
field_set_type = MockCheckFieldSet
checker_name: ClassVar[str]
@staticmethod
@abstractmethod
def exit_code(_: Iterable[Address]) -> int:
pass
@property
def check_results(self) -> CheckResults:
addresses = [config.address for config in self.field_sets]
return CheckResults(
[
CheckResult(
self.exit_code(addresses),
"",
"",
)
],
checker_name=self.checker_name,
)
class SuccessfulRequest(MockCheckRequest):
checker_name = "SuccessfulChecker"
@staticmethod
def exit_code(_: Iterable[Address]) -> int:
return 0
class FailingRequest(MockCheckRequest):
checker_name = "FailingChecker"
@staticmethod
def exit_code(_: Iterable[Address]) -> int:
return 1
class ConditionallySucceedsRequest(MockCheckRequest):
checker_name = "ConditionallySucceedsChecker"
@staticmethod
def exit_code(addresses: Iterable[Address]) -> int:
if any(address.target_name == "bad" for address in addresses):
return 127
return 0
class SkippedRequest(MockCheckRequest):
@staticmethod
def exit_code(_) -> int:
return 0
@property
def check_results(self) -> CheckResults:
return CheckResults([], checker_name="SkippedChecker")
class InvalidField(MultipleSourcesField):
pass
class InvalidFieldSet(MockCheckFieldSet):
required_fields = (InvalidField,)
class InvalidRequest(MockCheckRequest):
field_set_type = InvalidFieldSet
checker_name = "InvalidChecker"
@staticmethod
def exit_code(_: Iterable[Address]) -> int:
return -1
def make_target(address: Optional[Address] = None) -> Target:
if address is None:
address = Address("", target_name="tests")
return MockTarget({}, address)
def run_typecheck_rule(
*, request_types: List[Type[CheckRequest]], targets: List[Target]
) -> Tuple[int, str]:
union_membership = UnionMembership({CheckRequest: request_types})
with mock_console(create_options_bootstrapper()) as (console, stdio_reader):
rule_runner = RuleRunner()
result: Check = run_rule_with_mocks(
check,
rule_args=[
console,
Workspace(rule_runner.scheduler, _enforce_effects=False),
Targets(targets),
DistDir(relpath=Path("dist")),
union_membership,
],
mock_gets=[
MockGet(
output_type=CheckResults,
input_type=CheckRequest,
mock=lambda field_set_collection: field_set_collection.check_results,
),
],
union_membership=union_membership,
)
assert not stdio_reader.get_stdout()
return result.exit_code, stdio_reader.get_stderr()
def test_invalid_target_noops() -> None:
exit_code, stderr = run_typecheck_rule(request_types=[InvalidRequest], targets=[make_target()])
assert exit_code == 0
assert stderr == ""
def test_summary() -> None:
good_address = Address("", target_name="good")
bad_address = Address("", target_name="bad")
exit_code, stderr = run_typecheck_rule(
request_types=[
ConditionallySucceedsRequest,
FailingRequest,
SkippedRequest,
SuccessfulRequest,
],
targets=[make_target(good_address), make_target(bad_address)],
)
assert exit_code == FailingRequest.exit_code([bad_address])
assert stderr == dedent(
"""\
𐄂 ConditionallySucceedsChecker failed.
𐄂 FailingChecker failed.
- SkippedChecker skipped.
✓ SuccessfulChecker succeeded.
"""
)
def test_streaming_output_skip() -> None:
results = CheckResults([], checker_name="typechecker")
assert results.level() == LogLevel.DEBUG
assert results.message() == "typechecker skipped."
def test_streaming_output_success() -> None:
results = CheckResults([CheckResult(0, "stdout", "stderr")], checker_name="typechecker")
assert results.level() == LogLevel.INFO
assert results.message() == dedent(
"""\
typechecker succeeded.
stdout
stderr
"""
)
def test_streaming_output_failure() -> None:
results = CheckResults([CheckResult(18, "stdout", "stderr")], checker_name="typechecker")
assert results.level() == LogLevel.ERROR
assert results.message() == dedent(
"""\
typechecker failed (exit code 18).
stdout
stderr
"""
)
def test_streaming_output_partitions() -> None:
results = CheckResults(
[
CheckResult(21, "", "", partition_description="ghc8.1"),
CheckResult(0, "stdout", "stderr", partition_description="ghc9.2"),
],
checker_name="typechecker",
)
assert results.level() == LogLevel.ERROR
assert results.message() == dedent(
"""\
typechecker failed (exit code 21).
Partition #1 - ghc8.1:
Partition #2 - ghc9.2:
stdout
stderr
"""
)
| 28.459459 | 99 | 0.650681 | 1,946 | 0.307619 | 0 | 0 | 1,060 | 0.167562 | 0 | 0 | 921 | 0.14559 |
6ab219191a7ea6ce5d831e0b7655a8775e4ac26e | 9,851 | py | Python | data-processing/entities/definitions/model/utils.py | alexkreidler/scholarphi | 86d26d0bfa5ded00760fba1a9c6891a94a3dd6d2 | [
"Apache-2.0"
] | null | null | null | data-processing/entities/definitions/model/utils.py | alexkreidler/scholarphi | 86d26d0bfa5ded00760fba1a9c6891a94a3dd6d2 | [
"Apache-2.0"
] | null | null | null | data-processing/entities/definitions/model/utils.py | alexkreidler/scholarphi | 86d26d0bfa5ded00760fba1a9c6891a94a3dd6d2 | [
"Apache-2.0"
] | 1 | 2020-10-23T12:36:11.000Z | 2020-10-23T12:36:11.000Z | import os
import random
from typing import Any, Dict, List, Union
import numpy as np
import torch
from colorama import Fore, Style
from sklearn.metrics import f1_score
from sklearn.metrics import precision_recall_fscore_support as score
from sklearn.metrics import precision_score, recall_score
def highlight(input_: Any) -> str:
input_ = str(input_)
return str(Fore.YELLOW + str(input_) + Style.RESET_ALL)
def get_intent_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.intent_label_file), "r", encoding="utf-8"
)
]
def get_slot_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.slot_label_file), "r", encoding="utf-8"
)
]
def get_pos_labels(args: Any) -> List[str]:
return [
label.strip()
for label in open(
os.path.join(args.data_dir, args.pos_label_file), "r", encoding="utf-8"
)
]
def set_torch_seed(seed: Any, no_cuda: bool) -> None:
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed) # type: ignore
if not no_cuda and torch.cuda.is_available():
torch.cuda.manual_seed_all(seed) # type: ignore
def compute_metrics(
intent_preds: List[str],
intent_labels: List[str],
slot_preds: List[List[str]],
slot_labels: List[List[str]],
) -> Dict[Any, Any]:
assert (
len(intent_preds) == len(intent_labels) == len(slot_preds) == len(slot_labels)
)
results: Dict[Any, Any] = {}
intent_result = get_intent_acc(intent_preds, intent_labels)
slot_result = get_slot_metrics(slot_preds, slot_labels)
sementic_result = get_sentence_frame_acc(
intent_preds, intent_labels, slot_preds, slot_labels
)
# New metrics added following Dan's request.
slot_simple_result = get_slot_simple_metrics(slot_preds, slot_labels)
partial_match_result = get_partial_match_metrics(slot_preds, slot_labels)
results.update(intent_result)
results.update(slot_result)
results.update(sementic_result)
results.update(slot_simple_result)
results.update(partial_match_result)
return results
def simplify_tokens(preds: List[str]) -> List[str]:
simple_preds = []
for p in preds:
if p.endswith("TERM"):
simple_preds.append("TERM")
elif p.endswith("DEF"):
simple_preds.append("DEF")
else:
simple_preds.append(p)
return simple_preds
def get_partial_match_metrics(
preds: List[List[str]], labels: List[List[str]]
) -> Dict[Any, Any]:
"""
Suppose there are N such pairs in the gold data and the system predicts M such pairs. Say a ‘partial match’ happens when the system predicts a pair <term,defn> and there is some overlap (at least one token) between the predicted and gold term spans AND there is some overlap between the predicted and gold definition spans. Let X be the number of partial matches. What are
Partial match precision = P/M
Partial match recall = P/N
"""
assert len(preds) == len(labels)
both_in_preds, both_in_labels = [], []
partial_matches, exact_matches = [], []
for pred_sent, label_sent in zip(preds, labels):
simple_pred_sent = simplify_tokens(pred_sent)
simple_label_sent = simplify_tokens(label_sent)
# check whether term/def exist together
both_in_pred = "TERM" in simple_pred_sent and "DEF" in simple_pred_sent
both_in_label = "TERM" in simple_label_sent and "DEF" in simple_label_sent
both_in_preds.append(both_in_pred)
both_in_labels.append(both_in_label)
partial_match = False
exact_match = False
match: List[Union[str, bool]] = []
if both_in_pred and both_in_label:
for p, l in zip(simple_pred_sent, simple_label_sent):
if p == l:
match.append(p)
else:
match.append(False)
if "TERM" in match and "DEF" in match:
partial_match = True
if False not in match:
exact_match = True
partial_matches.append(partial_match)
exact_matches.append(exact_match)
count_both_in_preds = sum(both_in_preds) # N
count_both_in_labels = sum(both_in_labels) # M
count_partial_matches = sum(partial_matches) # P
count_exact_matches = sum(exact_matches) # E
partial_precision = count_partial_matches / count_both_in_preds
partial_recall = count_partial_matches / count_both_in_labels
partial_fscore = (
2 * partial_precision * partial_recall / (partial_precision + partial_recall)
)
exact_precision = count_exact_matches / count_both_in_preds
exact_recall = count_exact_matches / count_both_in_labels
exact_fscore = 2 * exact_precision * exact_recall / (exact_precision + exact_recall)
return {
"partial_match_precision": partial_precision,
"partial_match_recall": partial_recall,
"partial_match_f1": partial_fscore,
"exact_match_precision": exact_precision,
"excat_match_recall": exact_recall,
"excat_match_f1": exact_fscore,
}
def get_slot_simple_metrics(
preds: List[List[str]], labels: List[List[str]]
) -> Dict[Any, Any]:
"""
Conceptually, define the following new types of ‘virtual tags’
TERM = B-term OR I-Term (ie the union of those two tags)
DEF = B-Def OR I-Def
Now, what are the P,R & F1 numbers for TERM and DEF? (I think these matter because users may just care about accuracy of term and defn matching and the macro averaged scores conflate other things like recall on these metrics and precision on O. Likewise the current macro average treats missing the first word in a definition differently from skipping the last word.
"""
assert len(preds) == len(labels)
# flatten
preds_flattened = [p for ps in preds for p in ps]
labels_flattened = [l for ls in labels for l in ls]
# simplify by replacing {B,I}-TERM to TERM and {B,I}-DEF to DEF
simple_preds = simplify_tokens(preds_flattened)
simple_labels = simplify_tokens(labels_flattened)
assert len(simple_preds) == len(simple_labels)
label_names = ["O", "TERM", "DEF"]
p, r, f, s = score(simple_labels, simple_preds, average=None, labels=label_names)
s = [int(si) for si in s]
p = [round(float(pi), 3) for pi in p]
r = [round(float(pi), 3) for pi in r]
f = [round(float(pi), 3) for pi in f]
per_class = {"p": list(p), "r": list(r), "f": list(f), "s": list(s)}
# pprint(per_class)
return {
"slot_merged_TERM_precision": per_class["p"][1],
"slot_merged_TERM_recall": per_class["r"][1],
"slot_merged_TERM_f1": per_class["f"][1],
"slot_merged_DEFINITION_precision": per_class["p"][2],
"slot_merged_DEFINITION_recall": per_class["r"][2],
"slot_merged_DEFINITION_f1": per_class["f"][2],
}
def get_slot_metrics(preds: List[List[str]], labels: List[List[str]]) -> Dict[Any, Any]:
assert len(preds) == len(labels)
# flatten
preds_flattened = [p for ps in preds for p in ps]
labels_flattened = [l for ls in labels for l in ls]
macro_f1 = f1_score(labels_flattened, preds_flattened, average="macro")
micro_f1 = f1_score(labels_flattened, preds_flattened, average="micro")
macro_p = precision_score(labels_flattened, preds_flattened, average="macro")
micro_p = precision_score(labels_flattened, preds_flattened, average="micro")
macro_r = recall_score(labels_flattened, preds_flattened, average="macro")
micro_r = recall_score(labels_flattened, preds_flattened, average="micro")
label_names = ["O", "B-TERM", "I-TERM", "B-DEF", "I-DEF"]
p, r, f, s = score(
labels_flattened, preds_flattened, average=None, labels=label_names
)
s = [int(si) for si in s]
p = [round(float(pi), 3) for pi in p]
r = [round(float(pi), 3) for pi in r]
f = [round(float(pi), 3) for pi in f]
per_class = {"p": list(p), "r": list(r), "f": list(f), "s": list(s)}
# print(per_class)
return {
"slot_precision_macro": macro_p,
"slot_recall_macro": macro_r,
"slot_f1_macro": macro_f1,
"slot_precision_micro": micro_p,
"slot_recall_micro": micro_r,
"slot_f1_micro": micro_f1,
"slot_precision_per_label": per_class["p"],
"slot_recal_per_label": per_class["r"],
"slot_f1_per_label": per_class["f"],
"slot_num_per_label": per_class["s"],
}
def get_intent_acc(preds: List[str], labels: List[str]) -> Dict[Any, Any]:
acc = (preds == labels).mean()
return {"intent_acc": acc}
def read_prediction_text(args: Any) -> List[str]:
return [
text.strip()
for text in open(
os.path.join(args.pred_dir, args.pred_input_file), "r", encoding="utf-8"
)
]
def get_sentence_frame_acc(
intent_preds: List[str],
intent_labels: List[str],
slot_preds: List[List[str]],
slot_labels: List[List[str]],
) -> Dict[Any, Any]:
"""For the cases that intent and all the slots are correct (in one sentence)"""
# Get the intent comparison result
intent_result = intent_preds == intent_labels
# Get the slot comparision result
slot_result = []
for preds, labels in zip(slot_preds, slot_labels):
assert len(preds) == len(labels)
one_sent_result = True
for p, l in zip(preds, labels):
if p != l:
one_sent_result = False
break
slot_result.append(one_sent_result)
slot_result = np.array(slot_result)
sementic_acc = np.multiply(intent_result, slot_result).mean()
return {"sementic_frame_acc": sementic_acc}
| 35.952555 | 376 | 0.66308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,143 | 0.217365 |
6ab2ef53d9a0815c477ae2435981a3a0029d019b | 11,463 | py | Python | fire/trace.py | nvhoang55/python-fire | b78287f6d68208732ca4d91e57f4678e6c4747c7 | [
"Apache-2.0"
] | null | null | null | fire/trace.py | nvhoang55/python-fire | b78287f6d68208732ca4d91e57f4678e6c4747c7 | [
"Apache-2.0"
] | null | null | null | fire/trace.py | nvhoang55/python-fire | b78287f6d68208732ca4d91e57f4678e6c4747c7 | [
"Apache-2.0"
] | 1 | 2022-01-17T08:35:09.000Z | 2022-01-17T08:35:09.000Z | # Copyright (C) 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module has classes for tracing the execution of a Fire execution.
A FireTrace consists of a sequence of FireTraceElement objects. Each element
represents an action taken by Fire during a single Fire execution. An action may
be instantiating a class, calling a routine, or accessing a property.
Each action consumes args and results in a new component. The final component
is serialized to stdout by Fire as well as returned by the Fire method. If
a Fire usage error occurs, such as insufficient arguments being provided to call
a function, then that error will be captured in the trace and the final
component will be None.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pipes
from fire import inspectutils
INITIAL_COMPONENT = 'Initial component'
INSTANTIATED_CLASS = 'Instantiated class'
CALLED_ROUTINE = 'Called routine'
CALLED_CALLABLE = 'Called callable'
ACCESSED_PROPERTY = 'Accessed property'
COMPLETION_SCRIPT = 'Generated completion script'
INTERACTIVE_MODE = 'Entered interactive mode'
class FireTrace(object):
"""A FireTrace represents the steps taken during a single Fire execution.
A FireTrace consists of a sequence of FireTraceElement objects. Each element
represents an action taken by Fire during a single Fire execution. An action
may be instantiating a class, calling a routine, or accessing a property.
"""
def __init__(self, initial_component, name=None, separator='-', verbose=False,
show_help=False, show_trace=False):
initial_trace_element = FireTraceElement(
component=initial_component,
action=INITIAL_COMPONENT,
)
self.name = name
self.separator = separator
self.elements = [initial_trace_element]
self.verbose = verbose
self.show_help = show_help
self.show_trace = show_trace
def GetResult(self):
"""Returns the component from the last element of the trace."""
# pytype: disable=attribute-error
return self.GetLastHealthyElement().component
# pytype: enable=attribute-error
def GetLastHealthyElement(self):
"""Returns the last element of the trace that is not an error.
This element will contain the final component indicated by the trace.
Returns:
The last element of the trace that is not an error.
"""
for element in reversed(self.elements):
if not element.HasError():
return element
return None
def HasError(self):
"""Returns whether the Fire execution encountered a Fire usage error."""
return self.elements[-1].HasError()
def AddAccessedProperty(self, component, target, args, filename, lineno):
element = FireTraceElement(
component=component,
action=ACCESSED_PROPERTY,
target=target,
args=args,
filename=filename,
lineno=lineno,
)
self.elements.append(element)
def AddCalledComponent(self, component, target, args, filename, lineno,
capacity, action=CALLED_CALLABLE):
"""Adds an element to the trace indicating that a component was called.
Also applies to instantiating a class.
Args:
component: The result of calling the callable.
target: The name of the callable.
args: The args consumed in order to call this callable.
filename: The file in which the callable is defined, or None if N/A.
lineno: The line number on which the callable is defined, or None if N/A.
capacity: (bool) Whether the callable could have accepted additional args.
action: The value to include as the action in the FireTraceElement.
"""
element = FireTraceElement(
component=component,
action=action,
target=target,
args=args,
filename=filename,
lineno=lineno,
capacity=capacity,
)
self.elements.append(element)
def AddCompletionScript(self, script):
element = FireTraceElement(
component=script,
action=COMPLETION_SCRIPT,
)
self.elements.append(element)
def AddInteractiveMode(self):
element = FireTraceElement(action=INTERACTIVE_MODE)
self.elements.append(element)
def AddError(self, error, args):
element = FireTraceElement(error=error, args=args)
self.elements.append(element)
def AddSeparator(self):
"""Marks that the most recent element of the trace used a separator.
A separator is an argument you can pass to a Fire CLI to separate args left
of the separator from args right of the separator.
Here's an example to demonstrate the separator. Let's say you have a
function that takes a variable number of args, and you want to call that
function, and then upper case the result. Here's how to do it:
# in Python
def display(arg1, arg2='!'):
return arg1 + arg2
# from Bash (the default separator is the hyphen -)
display hello # hello!
display hello upper # helloupper
display hello - upper # HELLO!
Note how the separator caused the display function to be called with the
default value for arg2.
"""
self.elements[-1].AddSeparator()
def _Quote(self, arg):
if arg.startswith('--') and '=' in arg:
prefix, value = arg.split('=', 1)
return pipes.quote(prefix) + '=' + pipes.quote(value)
return pipes.quote(arg)
def GetCommand(self, include_separators=True):
"""Returns the command representing the trace up to this point.
Args:
include_separators: Whether or not to include separators in the command.
Returns:
A string representing a Fire CLI command that would produce this trace.
"""
args = []
if self.name:
args.append(self.name)
for element in self.elements:
if element.HasError():
continue
if element.args:
args.extend(element.args)
if element.HasSeparator() and include_separators:
args.append(self.separator)
if self.NeedsSeparator() and include_separators:
args.append(self.separator)
return ' '.join(self._Quote(arg) for arg in args)
def NeedsSeparator(self):
"""Returns whether a separator should be added to the command.
If the command is a function call, then adding an additional argument to the
command sometimes would add an extra arg to the function call, and sometimes
would add an arg acting on the result of the function call.
This function tells us whether we should add a separator to the command
before adding additional arguments in order to make sure the arg is applied
to the result of the function call, and not the function call itself.
Returns:
Whether a separator should be added to the command if order to keep the
component referred to by the command the same when adding additional args.
"""
element = self.GetLastHealthyElement()
return element.HasCapacity() and not element.HasSeparator()
def __str__(self):
lines = []
for index, element in enumerate(self.elements):
line = '{index}. {trace_string}'.format(
index=index + 1,
trace_string=element,
)
lines.append(line)
return '\n'.join(lines)
def NeedsSeparatingHyphenHyphen(self, flag='help'):
"""Returns whether a the trace need '--' before '--help'.
'--' is needed when the component takes keyword arguments, when the value of
flag matches one of the argument of the component, or the component takes in
keyword-only arguments(e.g. argument with default value).
Args:
flag: the flag available for the trace
Returns:
True for needed '--', False otherwise.
"""
element = self.GetLastHealthyElement()
component = element.component
spec = inspectutils.GetFullArgSpec(component)
return (spec.varkw is not None
or flag in spec.args
or flag in spec.kwonlyargs)
class FireTraceElement(object):
"""A FireTraceElement represents a single step taken by a Fire execution.
Examples of a FireTraceElement are the instantiation of a class or the
accessing of an object member.
"""
def __init__(self,
component=None,
action=None,
target=None,
args=None,
filename=None,
lineno=None,
error=None,
capacity=None):
"""Instantiates a FireTraceElement.
Args:
component: The result of this element of the trace.
action: The type of action (eg instantiating a class) taking place.
target: (string) The name of the component being acted upon.
args: The args consumed by the represented action.
filename: The file in which the action is defined, or None if N/A.
lineno: The line number on which the action is defined, or None if N/A.
error: The error represented by the action, or None if N/A.
capacity: (bool) Whether the action could have accepted additional args.
"""
self.component = component
self._action = action
self._target = target
self.args = args
self._filename = filename
self._lineno = lineno
self._error = error
self._separator = False
self._capacity = capacity
def HasError(self):
return self._error is not None
def HasCapacity(self):
return self._capacity
def HasSeparator(self):
return self._separator
def AddSeparator(self):
self._separator = True
def ErrorAsStr(self):
return ' '.join(str(arg) for arg in self._error.args)
def __str__(self):
if self.HasError():
return self.ErrorAsStr()
else:
# Format is: {action} "{target}" ({filename}:{lineno})
string = self._action
if self._target is not None:
string += ' "{target}"'.format(target=self._target)
if self._filename is not None:
path = self._filename
if self._lineno is not None:
path += ':{lineno}'.format(lineno=self._lineno)
string += ' ({path})'.format(path=path)
return string
| 36.275316 | 84 | 0.640932 | 9,799 | 0.854837 | 0 | 0 | 0 | 0 | 0 | 0 | 6,005 | 0.523859 |
6ab36187809db9e1ba202abbdfa4e21a0d5b6dfb | 33,549 | py | Python | test/unit/__init__.py | thiagodasilva/swift | 0553d9333ed0045c4d209065b315533a33e5d7d7 | [
"Apache-2.0"
] | null | null | null | test/unit/__init__.py | thiagodasilva/swift | 0553d9333ed0045c4d209065b315533a33e5d7d7 | [
"Apache-2.0"
] | null | null | null | test/unit/__init__.py | thiagodasilva/swift | 0553d9333ed0045c4d209065b315533a33e5d7d7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Swift tests """
from __future__ import print_function
import os
import copy
import logging
import errno
from six.moves import range
import sys
from contextlib import contextmanager, closing
from collections import defaultdict, Iterable
import itertools
from numbers import Number
from tempfile import NamedTemporaryFile
import time
import eventlet
from eventlet.green import socket
from tempfile import mkdtemp
from shutil import rmtree
from swift.common.utils import Timestamp, NOTICE
from test import get_config
from swift.common import swob, utils
from swift.common.ring import Ring, RingData
from hashlib import md5
import logging.handlers
from six.moves.http_client import HTTPException
from swift.common import storage_policy
from swift.common.storage_policy import (StoragePolicy, ECStoragePolicy,
VALID_EC_TYPES)
import functools
import six.moves.cPickle as pickle
from gzip import GzipFile
import mock as mocklib
import inspect
EMPTY_ETAG = md5().hexdigest()
# try not to import this module from swift
if not os.path.basename(sys.argv[0]).startswith('swift'):
# never patch HASH_PATH_SUFFIX AGAIN!
utils.HASH_PATH_SUFFIX = 'endcap'
EC_TYPE_PREFERENCE = [
'liberasurecode_rs_vand',
'jerasure_rs_vand',
]
for eclib_name in EC_TYPE_PREFERENCE:
if eclib_name in VALID_EC_TYPES:
break
else:
raise SystemExit('ERROR: unable to find suitable PyECLib type'
' (none of %r found in %r)' % (
EC_TYPE_PREFERENCE,
VALID_EC_TYPES,
))
DEFAULT_TEST_EC_TYPE = eclib_name
def patch_policies(thing_or_policies=None, legacy_only=False,
with_ec_default=False, fake_ring_args=None):
if isinstance(thing_or_policies, (
Iterable, storage_policy.StoragePolicyCollection)):
return PatchPolicies(thing_or_policies, fake_ring_args=fake_ring_args)
if legacy_only:
default_policies = [
StoragePolicy(0, name='legacy', is_default=True),
]
default_ring_args = [{}]
elif with_ec_default:
default_policies = [
ECStoragePolicy(0, name='ec', is_default=True,
ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10,
ec_nparity=4, ec_segment_size=4096),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{'replicas': 14}, {}]
else:
default_policies = [
StoragePolicy(0, name='nulo', is_default=True),
StoragePolicy(1, name='unu'),
]
default_ring_args = [{}, {}]
fake_ring_args = fake_ring_args or default_ring_args
decorator = PatchPolicies(default_policies, fake_ring_args=fake_ring_args)
if not thing_or_policies:
return decorator
else:
# it's a thing, we return the wrapped thing instead of the decorator
return decorator(thing_or_policies)
class PatchPolicies(object):
"""
Why not mock.patch? In my case, when used as a decorator on the class it
seemed to patch setUp at the wrong time (i.e. in setup the global wasn't
patched yet)
"""
def __init__(self, policies, fake_ring_args=None):
if isinstance(policies, storage_policy.StoragePolicyCollection):
self.policies = policies
else:
self.policies = storage_policy.StoragePolicyCollection(policies)
self.fake_ring_args = fake_ring_args or [None] * len(self.policies)
def _setup_rings(self):
"""
Our tests tend to use the policies rings like their own personal
playground - which can be a problem in the particular case of a
patched TestCase class where the FakeRing objects are scoped in the
call to the patch_policies wrapper outside of the TestCase instance
which can lead to some bled state.
To help tests get better isolation without having to think about it,
here we're capturing the args required to *build* a new FakeRing
instances so we can ensure each test method gets a clean ring setup.
The TestCase can always "tweak" these fresh rings in setUp - or if
they'd prefer to get the same "reset" behavior with custom FakeRing's
they can pass in their own fake_ring_args to patch_policies instead of
setting the object_ring on the policy definitions.
"""
for policy, fake_ring_arg in zip(self.policies, self.fake_ring_args):
if fake_ring_arg is not None:
policy.object_ring = FakeRing(**fake_ring_arg)
def __call__(self, thing):
if isinstance(thing, type):
return self._patch_class(thing)
else:
return self._patch_method(thing)
def _patch_class(self, cls):
"""
Creating a new class that inherits from decorated class is the more
common way I've seen class decorators done - but it seems to cause
infinite recursion when super is called from inside methods in the
decorated class.
"""
orig_setUp = cls.setUp
orig_tearDown = cls.tearDown
def setUp(cls_self):
self._orig_POLICIES = storage_policy._POLICIES
if not getattr(cls_self, '_policies_patched', False):
storage_policy._POLICIES = self.policies
self._setup_rings()
cls_self._policies_patched = True
orig_setUp(cls_self)
def tearDown(cls_self):
orig_tearDown(cls_self)
storage_policy._POLICIES = self._orig_POLICIES
cls.setUp = setUp
cls.tearDown = tearDown
return cls
def _patch_method(self, f):
@functools.wraps(f)
def mywrapper(*args, **kwargs):
self._orig_POLICIES = storage_policy._POLICIES
try:
storage_policy._POLICIES = self.policies
self._setup_rings()
return f(*args, **kwargs)
finally:
storage_policy._POLICIES = self._orig_POLICIES
return mywrapper
def __enter__(self):
self._orig_POLICIES = storage_policy._POLICIES
storage_policy._POLICIES = self.policies
def __exit__(self, *args):
storage_policy._POLICIES = self._orig_POLICIES
class FakeRing(Ring):
def __init__(self, replicas=3, max_more_nodes=0, part_power=0,
base_port=1000):
"""
:param part_power: make part calculation based on the path
If you set a part_power when you setup your FakeRing the parts you get
out of ring methods will actually be based on the path - otherwise we
exercise the real ring code, but ignore the result and return 1.
"""
self._base_port = base_port
self.max_more_nodes = max_more_nodes
self._part_shift = 32 - part_power
# 9 total nodes (6 more past the initial 3) is the cap, no matter if
# this is set higher, or R^2 for R replicas
self.set_replicas(replicas)
self._reload()
def _reload(self):
self._rtime = time.time()
def set_replicas(self, replicas):
self.replicas = replicas
self._devs = []
for x in range(self.replicas):
ip = '10.0.0.%s' % x
port = self._base_port + x
self._devs.append({
'ip': ip,
'replication_ip': ip,
'port': port,
'replication_port': port,
'device': 'sd' + (chr(ord('a') + x)),
'zone': x % 3,
'region': x % 2,
'id': x,
})
@property
def replica_count(self):
return self.replicas
def _get_part_nodes(self, part):
return [dict(node, index=i) for i, node in enumerate(list(self._devs))]
def get_more_nodes(self, part):
for x in range(self.replicas, (self.replicas + self.max_more_nodes)):
yield {'ip': '10.0.0.%s' % x,
'replication_ip': '10.0.0.%s' % x,
'port': self._base_port + x,
'replication_port': self._base_port + x,
'device': 'sda',
'zone': x % 3,
'region': x % 2,
'id': x}
def write_fake_ring(path, *devs):
"""
Pretty much just a two node, two replica, 2 part power ring...
"""
dev1 = {'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
'port': 6000}
dev2 = {'id': 0, 'zone': 0, 'device': 'sdb1', 'ip': '127.0.0.1',
'port': 6000}
dev1_updates, dev2_updates = devs or ({}, {})
dev1.update(dev1_updates)
dev2.update(dev2_updates)
replica2part2dev_id = [[0, 1, 0, 1], [1, 0, 1, 0]]
devs = [dev1, dev2]
part_shift = 30
with closing(GzipFile(path, 'wb')) as f:
pickle.dump(RingData(replica2part2dev_id, devs, part_shift), f)
class FabricatedRing(Ring):
"""
When a FakeRing just won't do - you can fabricate one to meet
your tests needs.
"""
def __init__(self, replicas=6, devices=8, nodes=4, port=6000,
part_power=4):
self.devices = devices
self.nodes = nodes
self.port = port
self.replicas = 6
self.part_power = part_power
self._part_shift = 32 - self.part_power
self._reload()
def _reload(self, *args, **kwargs):
self._rtime = time.time() * 2
if hasattr(self, '_replica2part2dev_id'):
return
self._devs = [{
'region': 1,
'zone': 1,
'weight': 1.0,
'id': i,
'device': 'sda%d' % i,
'ip': '10.0.0.%d' % (i % self.nodes),
'replication_ip': '10.0.0.%d' % (i % self.nodes),
'port': self.port,
'replication_port': self.port,
} for i in range(self.devices)]
self._replica2part2dev_id = [
[None] * 2 ** self.part_power
for i in range(self.replicas)
]
dev_ids = itertools.cycle(range(self.devices))
for p in range(2 ** self.part_power):
for r in range(self.replicas):
self._replica2part2dev_id[r][p] = next(dev_ids)
class FakeMemcache(object):
def __init__(self):
self.store = {}
def get(self, key):
return self.store.get(key)
def keys(self):
return self.store.keys()
def set(self, key, value, time=0):
self.store[key] = value
return True
def incr(self, key, time=0):
self.store[key] = self.store.setdefault(key, 0) + 1
return self.store[key]
@contextmanager
def soft_lock(self, key, timeout=0, retries=5):
yield True
def delete(self, key):
try:
del self.store[key]
except Exception:
pass
return True
def readuntil2crlfs(fd):
rv = ''
lc = ''
crlfs = 0
while crlfs < 2:
c = fd.read(1)
if not c:
raise ValueError("didn't get two CRLFs; just got %r" % rv)
rv = rv + c
if c == '\r' and lc != '\n':
crlfs = 0
if lc == '\r' and c == '\n':
crlfs += 1
lc = c
return rv
def connect_tcp(hostport):
rv = socket.socket()
rv.connect(hostport)
return rv
@contextmanager
def tmpfile(content):
with NamedTemporaryFile('w', delete=False) as f:
file_name = f.name
f.write(str(content))
try:
yield file_name
finally:
os.unlink(file_name)
xattr_data = {}
def _get_inode(fd):
if not isinstance(fd, int):
try:
fd = fd.fileno()
except AttributeError:
return os.stat(fd).st_ino
return os.fstat(fd).st_ino
def _setxattr(fd, k, v):
inode = _get_inode(fd)
data = xattr_data.get(inode, {})
data[k] = v
xattr_data[inode] = data
def _getxattr(fd, k):
inode = _get_inode(fd)
data = xattr_data.get(inode, {}).get(k)
if not data:
raise IOError(errno.ENODATA, "Fake IOError")
return data
import xattr
xattr.setxattr = _setxattr
xattr.getxattr = _getxattr
@contextmanager
def temptree(files, contents=''):
# generate enough contents to fill the files
c = len(files)
contents = (list(contents) + [''] * c)[:c]
tempdir = mkdtemp()
for path, content in zip(files, contents):
if os.path.isabs(path):
path = '.' + path
new_path = os.path.join(tempdir, path)
subdir = os.path.dirname(new_path)
if not os.path.exists(subdir):
os.makedirs(subdir)
with open(new_path, 'w') as f:
f.write(str(content))
try:
yield tempdir
finally:
rmtree(tempdir)
def with_tempdir(f):
"""
Decorator to give a single test a tempdir as argument to test method.
"""
@functools.wraps(f)
def wrapped(*args, **kwargs):
tempdir = mkdtemp()
args = list(args)
args.append(tempdir)
try:
return f(*args, **kwargs)
finally:
rmtree(tempdir)
return wrapped
class NullLoggingHandler(logging.Handler):
def emit(self, record):
pass
class UnmockTimeModule(object):
"""
Even if a test mocks time.time - you can restore unmolested behavior in a
another module who imports time directly by monkey patching it's imported
reference to the module with an instance of this class
"""
_orig_time = time.time
def __getattribute__(self, name):
if name == 'time':
return UnmockTimeModule._orig_time
return getattr(time, name)
# logging.LogRecord.__init__ calls time.time
logging.time = UnmockTimeModule()
class FakeLogger(logging.Logger, object):
# a thread safe fake logger
def __init__(self, *args, **kwargs):
self._clear()
self.name = 'swift.unit.fake_logger'
self.level = logging.NOTSET
if 'facility' in kwargs:
self.facility = kwargs['facility']
self.statsd_client = None
self.thread_locals = None
self.parent = None
store_in = {
logging.ERROR: 'error',
logging.WARNING: 'warning',
logging.INFO: 'info',
logging.DEBUG: 'debug',
logging.CRITICAL: 'critical',
NOTICE: 'notice',
}
def notice(self, msg, *args, **kwargs):
"""
Convenience function for syslog priority LOG_NOTICE. The python
logging lvl is set to 25, just above info. SysLogHandler is
monkey patched to map this log lvl to the LOG_NOTICE syslog
priority.
"""
self.log(NOTICE, msg, *args, **kwargs)
def _log(self, level, msg, *args, **kwargs):
store_name = self.store_in[level]
cargs = [msg]
if any(args):
cargs.extend(args)
captured = dict(kwargs)
if 'exc_info' in kwargs and \
not isinstance(kwargs['exc_info'], tuple):
captured['exc_info'] = sys.exc_info()
self.log_dict[store_name].append((tuple(cargs), captured))
super(FakeLogger, self)._log(level, msg, *args, **kwargs)
def _clear(self):
self.log_dict = defaultdict(list)
self.lines_dict = {'critical': [], 'error': [], 'info': [],
'warning': [], 'debug': [], 'notice': []}
clear = _clear # this is a public interface
def get_lines_for_level(self, level):
if level not in self.lines_dict:
raise KeyError(
"Invalid log level '%s'; valid levels are %s" %
(level,
', '.join("'%s'" % lvl for lvl in sorted(self.lines_dict))))
return self.lines_dict[level]
def all_log_lines(self):
return dict((level, msgs) for level, msgs in self.lines_dict.items()
if len(msgs) > 0)
def _store_in(store_name):
def stub_fn(self, *args, **kwargs):
self.log_dict[store_name].append((args, kwargs))
return stub_fn
# mock out the StatsD logging methods:
update_stats = _store_in('update_stats')
increment = _store_in('increment')
decrement = _store_in('decrement')
timing = _store_in('timing')
timing_since = _store_in('timing_since')
transfer_rate = _store_in('transfer_rate')
set_statsd_prefix = _store_in('set_statsd_prefix')
def get_increments(self):
return [call[0][0] for call in self.log_dict['increment']]
def get_increment_counts(self):
counts = {}
for metric in self.get_increments():
if metric not in counts:
counts[metric] = 0
counts[metric] += 1
return counts
def setFormatter(self, obj):
self.formatter = obj
def close(self):
self._clear()
def set_name(self, name):
# don't touch _handlers
self._name = name
def acquire(self):
pass
def release(self):
pass
def createLock(self):
pass
def emit(self, record):
pass
def _handle(self, record):
try:
line = record.getMessage()
except TypeError:
print('WARNING: unable to format log message %r %% %r' % (
record.msg, record.args))
raise
self.lines_dict[record.levelname.lower()].append(line)
def handle(self, record):
self._handle(record)
def flush(self):
pass
def handleError(self, record):
pass
class DebugLogger(FakeLogger):
"""A simple stdout logging version of FakeLogger"""
def __init__(self, *args, **kwargs):
FakeLogger.__init__(self, *args, **kwargs)
self.formatter = logging.Formatter(
"%(server)s %(levelname)s: %(message)s")
def handle(self, record):
self._handle(record)
print(self.formatter.format(record))
class DebugLogAdapter(utils.LogAdapter):
def _send_to_logger(name):
def stub_fn(self, *args, **kwargs):
return getattr(self.logger, name)(*args, **kwargs)
return stub_fn
# delegate to FakeLogger's mocks
update_stats = _send_to_logger('update_stats')
increment = _send_to_logger('increment')
decrement = _send_to_logger('decrement')
timing = _send_to_logger('timing')
timing_since = _send_to_logger('timing_since')
transfer_rate = _send_to_logger('transfer_rate')
set_statsd_prefix = _send_to_logger('set_statsd_prefix')
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.__dict__['logger'], name)
def debug_logger(name='test'):
"""get a named adapted debug logger"""
return DebugLogAdapter(DebugLogger(), name)
original_syslog_handler = logging.handlers.SysLogHandler
def fake_syslog_handler():
for attr in dir(original_syslog_handler):
if attr.startswith('LOG'):
setattr(FakeLogger, attr,
copy.copy(getattr(logging.handlers.SysLogHandler, attr)))
FakeLogger.priority_map = \
copy.deepcopy(logging.handlers.SysLogHandler.priority_map)
logging.handlers.SysLogHandler = FakeLogger
if utils.config_true_value(
get_config('unit_test').get('fake_syslog', 'False')):
fake_syslog_handler()
class MockTrue(object):
"""
Instances of MockTrue evaluate like True
Any attr accessed on an instance of MockTrue will return a MockTrue
instance. Any method called on an instance of MockTrue will return
a MockTrue instance.
>>> thing = MockTrue()
>>> thing
True
>>> thing == True # True == True
True
>>> thing == False # True == False
False
>>> thing != True # True != True
False
>>> thing != False # True != False
True
>>> thing.attribute
True
>>> thing.method()
True
>>> thing.attribute.method()
True
>>> thing.method().attribute
True
"""
def __getattribute__(self, *args, **kwargs):
return self
def __call__(self, *args, **kwargs):
return self
def __repr__(*args, **kwargs):
return repr(True)
def __eq__(self, other):
return other is True
def __ne__(self, other):
return other is not True
@contextmanager
def mock(update):
returns = []
deletes = []
for key, value in update.items():
imports = key.split('.')
attr = imports.pop(-1)
module = __import__(imports[0], fromlist=imports[1:])
for modname in imports[1:]:
module = getattr(module, modname)
if hasattr(module, attr):
returns.append((module, attr, getattr(module, attr)))
else:
deletes.append((module, attr))
setattr(module, attr, value)
try:
yield True
finally:
for module, attr, value in returns:
setattr(module, attr, value)
for module, attr in deletes:
delattr(module, attr)
class FakeStatus(object):
"""
This will work with our fake_http_connect, if you hand in one of these
instead of a status int or status int tuple to the "codes" iter you can
add some eventlet sleep to the expect and response stages of the
connection.
"""
def __init__(self, status, expect_sleep=None, response_sleep=None):
"""
:param status: the response status int, or a tuple of
([expect_status, ...], response_status)
:param expect_sleep: float, time to eventlet sleep during expect, can
be a iter of floats
:param response_sleep: float, time to eventlet sleep during response
"""
# connect exception
if isinstance(status, (Exception, eventlet.Timeout)):
raise status
if isinstance(status, tuple):
self.expect_status = list(status[:-1])
self.status = status[-1]
self.explicit_expect_list = True
else:
self.expect_status, self.status = ([], status)
self.explicit_expect_list = False
if not self.expect_status:
# when a swift backend service returns a status before reading
# from the body (mostly an error response) eventlet.wsgi will
# respond with that status line immediately instead of 100
# Continue, even if the client sent the Expect 100 header.
# BufferedHttp and the proxy both see these error statuses
# when they call getexpect, so our FakeConn tries to act like
# our backend services and return certain types of responses
# as expect statuses just like a real backend server would do.
if self.status in (507, 412, 409):
self.expect_status = [status]
else:
self.expect_status = [100, 100]
# setup sleep attributes
if not isinstance(expect_sleep, (list, tuple)):
expect_sleep = [expect_sleep] * len(self.expect_status)
self.expect_sleep_list = list(expect_sleep)
while len(self.expect_sleep_list) < len(self.expect_status):
self.expect_sleep_list.append(None)
self.response_sleep = response_sleep
def get_response_status(self):
if self.response_sleep is not None:
eventlet.sleep(self.response_sleep)
if self.expect_status and self.explicit_expect_list:
raise Exception('Test did not consume all fake '
'expect status: %r' % (self.expect_status,))
if isinstance(self.status, (Exception, eventlet.Timeout)):
raise self.status
return self.status
def get_expect_status(self):
expect_sleep = self.expect_sleep_list.pop(0)
if expect_sleep is not None:
eventlet.sleep(expect_sleep)
expect_status = self.expect_status.pop(0)
if isinstance(expect_status, (Exception, eventlet.Timeout)):
raise expect_status
return expect_status
class SlowBody(object):
"""
This will work with our fake_http_connect, if you hand in these
instead of strings it will make reads take longer by the given
amount. It should be a little bit easier to extend than the
current slow kwarg - which inserts whitespace in the response.
Also it should be easy to detect if you have one of these (or a
subclass) for the body inside of FakeConn if we wanted to do
something smarter than just duck-type the str/buffer api
enough to get by.
"""
def __init__(self, body, slowness):
self.body = body
self.slowness = slowness
def slowdown(self):
eventlet.sleep(self.slowness)
def __getitem__(self, s):
return SlowBody(self.body[s], self.slowness)
def __len__(self):
return len(self.body)
def __radd__(self, other):
self.slowdown()
return other + self.body
def fake_http_connect(*code_iter, **kwargs):
class FakeConn(object):
def __init__(self, status, etag=None, body='', timestamp='1',
headers=None, expect_headers=None, connection_id=None,
give_send=None):
if not isinstance(status, FakeStatus):
status = FakeStatus(status)
self._status = status
self.reason = 'Fake'
self.host = '1.2.3.4'
self.port = '1234'
self.sent = 0
self.received = 0
self.etag = etag
self.body = body
self.headers = headers or {}
self.expect_headers = expect_headers or {}
self.timestamp = timestamp
self.connection_id = connection_id
self.give_send = give_send
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
try:
self._next_sleep = kwargs['slow'].pop(0)
except IndexError:
self._next_sleep = None
# be nice to trixy bits with node_iter's
eventlet.sleep()
def getresponse(self):
exc = kwargs.get('raise_exc')
if exc:
if isinstance(exc, (Exception, eventlet.Timeout)):
raise exc
raise Exception('test')
if kwargs.get('raise_timeout_exc'):
raise eventlet.Timeout()
self.status = self._status.get_response_status()
return self
def getexpect(self):
expect_status = self._status.get_expect_status()
headers = dict(self.expect_headers)
if expect_status == 409:
headers['X-Backend-Timestamp'] = self.timestamp
response = FakeConn(expect_status,
timestamp=self.timestamp,
headers=headers)
response.status = expect_status
return response
def getheaders(self):
etag = self.etag
if not etag:
if isinstance(self.body, str):
etag = '"' + md5(self.body).hexdigest() + '"'
else:
etag = '"68b329da9893e34099c7d8ad5cb9c940"'
headers = swob.HeaderKeyDict({
'content-length': len(self.body),
'content-type': 'x-application/test',
'x-timestamp': self.timestamp,
'x-backend-timestamp': self.timestamp,
'last-modified': self.timestamp,
'x-object-meta-test': 'testing',
'x-delete-at': '9876543210',
'etag': etag,
'x-works': 'yes',
})
if self.status // 100 == 2:
headers['x-account-container-count'] = \
kwargs.get('count', 12345)
if not self.timestamp:
# when timestamp is None, HeaderKeyDict raises KeyError
headers.pop('x-timestamp', None)
try:
if next(container_ts_iter) is False:
headers['x-container-timestamp'] = '1'
except StopIteration:
pass
am_slow, value = self.get_slow()
if am_slow:
headers['content-length'] = '4'
headers.update(self.headers)
return headers.items()
def get_slow(self):
if 'slow' in kwargs and isinstance(kwargs['slow'], list):
if self._next_sleep is not None:
return True, self._next_sleep
else:
return False, 0.01
if kwargs.get('slow') and isinstance(kwargs['slow'], Number):
return True, kwargs['slow']
return bool(kwargs.get('slow')), 0.1
def read(self, amt=None):
am_slow, value = self.get_slow()
if am_slow:
if self.sent < 4:
self.sent += 1
eventlet.sleep(value)
return ' '
rv = self.body[:amt]
self.body = self.body[amt:]
return rv
def send(self, amt=None):
if self.give_send:
self.give_send(self.connection_id, amt)
am_slow, value = self.get_slow()
if am_slow:
if self.received < 4:
self.received += 1
eventlet.sleep(value)
def getheader(self, name, default=None):
return swob.HeaderKeyDict(self.getheaders()).get(name, default)
def close(self):
pass
timestamps_iter = iter(kwargs.get('timestamps') or ['1'] * len(code_iter))
etag_iter = iter(kwargs.get('etags') or [None] * len(code_iter))
if isinstance(kwargs.get('headers'), (list, tuple)):
headers_iter = iter(kwargs['headers'])
else:
headers_iter = iter([kwargs.get('headers', {})] * len(code_iter))
if isinstance(kwargs.get('expect_headers'), (list, tuple)):
expect_headers_iter = iter(kwargs['expect_headers'])
else:
expect_headers_iter = iter([kwargs.get('expect_headers', {})] *
len(code_iter))
x = kwargs.get('missing_container', [False] * len(code_iter))
if not isinstance(x, (tuple, list)):
x = [x] * len(code_iter)
container_ts_iter = iter(x)
code_iter = iter(code_iter)
conn_id_and_code_iter = enumerate(code_iter)
static_body = kwargs.get('body', None)
body_iter = kwargs.get('body_iter', None)
if body_iter:
body_iter = iter(body_iter)
def connect(*args, **ckwargs):
if kwargs.get('slow_connect', False):
eventlet.sleep(0.1)
if 'give_content_type' in kwargs:
if len(args) >= 7 and 'Content-Type' in args[6]:
kwargs['give_content_type'](args[6]['Content-Type'])
else:
kwargs['give_content_type']('')
i, status = next(conn_id_and_code_iter)
if 'give_connect' in kwargs:
give_conn_fn = kwargs['give_connect']
argspec = inspect.getargspec(give_conn_fn)
if argspec.keywords or 'connection_id' in argspec.args:
ckwargs['connection_id'] = i
give_conn_fn(*args, **ckwargs)
etag = next(etag_iter)
headers = next(headers_iter)
expect_headers = next(expect_headers_iter)
timestamp = next(timestamps_iter)
if status <= 0:
raise HTTPException()
if body_iter is None:
body = static_body or ''
else:
body = next(body_iter)
return FakeConn(status, etag, body=body, timestamp=timestamp,
headers=headers, expect_headers=expect_headers,
connection_id=i, give_send=kwargs.get('give_send'))
connect.code_iter = code_iter
return connect
@contextmanager
def mocked_http_conn(*args, **kwargs):
requests = []
def capture_requests(ip, port, method, path, headers, qs, ssl):
req = {
'ip': ip,
'port': port,
'method': method,
'path': path,
'headers': headers,
'qs': qs,
'ssl': ssl,
}
requests.append(req)
kwargs.setdefault('give_connect', capture_requests)
fake_conn = fake_http_connect(*args, **kwargs)
fake_conn.requests = requests
with mocklib.patch('swift.common.bufferedhttp.http_connect_raw',
new=fake_conn):
yield fake_conn
left_over_status = list(fake_conn.code_iter)
if left_over_status:
raise AssertionError('left over status %r' % left_over_status)
def make_timestamp_iter():
return iter(Timestamp(t) for t in itertools.count(int(time.time())))
| 32.104306 | 79 | 0.592924 | 22,331 | 0.665623 | 2,782 | 0.082923 | 3,072 | 0.091568 | 0 | 0 | 8,031 | 0.239381 |
6ab3a62e50f821717cc617bcae69096621bae1d3 | 10,138 | py | Python | fairseq/models/bart/model.py | samsontmr/fairseq | 1d50b6dcd961faaa74ee32e9d7a02ff76f16ab87 | [
"MIT"
] | 172 | 2019-08-22T14:20:25.000Z | 2022-02-16T07:38:12.000Z | fairseq/models/bart/model.py | samsontmr/fairseq | 1d50b6dcd961faaa74ee32e9d7a02ff76f16ab87 | [
"MIT"
] | 3 | 2019-08-30T11:56:15.000Z | 2020-10-02T13:57:49.000Z | fairseq/models/bart/model.py | samsontmr/fairseq | 1d50b6dcd961faaa74ee32e9d7a02ff76f16ab87 | [
"MIT"
] | 8 | 2019-10-15T04:36:43.000Z | 2020-10-21T01:50:09.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
BART: Denoising Sequence-to-Sequence Pre-training for
Natural Language Generation, Translation, and Comprehension
"""
import torch.nn as nn
from fairseq import utils
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.transformer import TransformerModel
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from .hub_interface import BARTHubInterface
@register_model('bart')
class BARTModel(TransformerModel):
@classmethod
def hub_models(cls):
return {
'bart.large': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.tar.gz',
'bart.large.mnli': 'http://dl.fbaipublicfiles.com/fairseq/models/bart.large.mnli.tar.gz',
}
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
# We follow BERT's random weight initialization
self.apply(init_bert_params)
self.classification_heads = nn.ModuleDict()
@staticmethod
def add_args(parser):
super(BARTModel, BARTModel).add_args(parser)
parser.add_argument(
'--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence'
)
parser.add_argument(
'--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence'
)
parser.add_argument(
'--pooler-dropout', type=float, metavar='D',
help='dropout probability in the masked_lm pooler layers'
)
parser.add_argument(
'--pooler-activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use for pooler layer'
)
@property
def supported_targets(self):
return {'self'}
def forward(
self, src_tokens, src_lengths, prev_output_tokens,
features_only=False, classification_head_name=None, **kwargs
):
if classification_head_name is not None:
features_only = True
encoder_out = self.encoder(
src_tokens,
src_lengths=src_lengths,
**kwargs,
)
x, extra = self.decoder(
prev_output_tokens,
encoder_out=encoder_out,
features_only=features_only,
**kwargs,
)
if classification_head_name is not None:
sentence_representation = x[
src_tokens.eq(self.encoder.dictionary.eos()), :
].view(x.size(0), -1, x.size(-1))[:, -1, :]
x = self.classification_heads[classification_head_name](
sentence_representation
)
return x, extra
@classmethod
def from_pretrained(
cls,
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
bpe='gpt2',
**kwargs,
):
from fairseq import hub_utils
x = hub_utils.from_pretrained(
model_name_or_path,
checkpoint_file,
data_name_or_path,
archive_map=cls.hub_models(),
bpe=bpe,
load_checkpoint_heads=True,
**kwargs,
)
return BARTHubInterface(x['args'], x['task'], x['models'][0])
def register_classification_head(self, name, num_classes=None, inner_dim=None, **kwargs):
"""Register a classification head."""
print("Registering classification head: {0}".format(name))
if name in self.classification_heads:
prev_num_classes = self.classification_heads[name].out_proj.out_features
prev_inner_dim = self.classification_heads[name].dense.out_features
if num_classes != prev_num_classes or inner_dim != prev_inner_dim:
print(
'WARNING: re-registering head "{}" with num_classes {} (prev: {}) '
'and inner_dim {} (prev: {})'.format(
name, num_classes, prev_num_classes, inner_dim, prev_inner_dim
)
)
self.classification_heads[name] = BARTClassificationHead(
self.args.encoder_embed_dim,
inner_dim or self.args.encoder_embed_dim,
num_classes,
self.args.pooler_activation_fn,
self.args.pooler_dropout,
)
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
prefix = name + '.' if name != '' else ''
current_head_names = [] if not hasattr(self, 'classification_heads') else \
self.classification_heads.keys()
# Handle new classification heads present in the state dict.
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + 'classification_heads.'):
continue
head_name = k[len(prefix + 'classification_heads.'):].split('.')[0]
num_classes = state_dict[prefix + 'classification_heads.' + head_name + '.out_proj.weight'].size(0)
inner_dim = state_dict[prefix + 'classification_heads.' + head_name + '.dense.weight'].size(0)
if getattr(self.args, 'load_checkpoint_heads', False):
if head_name not in current_head_names:
self.register_classification_head(head_name, num_classes, inner_dim)
else:
if head_name not in current_head_names:
print(
'WARNING: deleting classification head ({}) from checkpoint '
'not present in current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
elif (
num_classes != self.classification_heads[head_name].out_proj.out_features
or inner_dim != self.classification_heads[head_name].dense.out_features
):
print(
'WARNING: deleting classification head ({}) from checkpoint '
'with different dimensions than current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, 'classification_heads'):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + 'classification_heads.' + k not in state_dict:
print('Overwriting', prefix + 'classification_heads.' + k)
state_dict[prefix + 'classification_heads.' + k] = v
class BARTClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self,
input_dim,
inner_dim,
num_classes,
activation_fn,
pooler_dropout,
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.activation_fn = utils.get_activation_fn(activation_fn)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, features, **kwargs):
x = features
x = self.dropout(x)
x = self.dense(x)
x = self.activation_fn(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@register_model_architecture('bart', 'bart_large')
def bart_large_architecture(args):
args.encoder_embed_path = getattr(args, 'encoder_embed_path', None)
args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024)
args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4*1024)
args.encoder_layers = getattr(args, 'encoder_layers', 12)
args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16)
args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False)
args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True)
args.decoder_embed_path = getattr(args, 'decoder_embed_path', None)
args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', args.encoder_ffn_embed_dim)
args.decoder_layers = getattr(args, 'decoder_layers', 12)
args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16)
args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', False)
args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', True)
args.attention_dropout = getattr(args, 'attention_dropout', 0.)
args.relu_dropout = getattr(args, 'relu_dropout', 0.)
args.dropout = getattr(args, 'dropout', 0.1)
args.max_target_positions = getattr(args, 'max_target_positions', 1024)
args.max_source_positions = getattr(args, 'max_source_positions', 1024)
args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None)
args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0)
args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', True)
args.share_all_embeddings = getattr(args, 'share_all_embeddings', True)
args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim)
args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, 'no_scale_embedding', True)
args.layernorm_embedding = getattr(args, 'layernorm_embedding', True)
args.activation_fn = getattr(args, 'activation_fn', 'gelu')
args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh')
args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0)
| 41.044534 | 111 | 0.641251 | 7,130 | 0.703295 | 0 | 0 | 8,794 | 0.867429 | 0 | 0 | 2,360 | 0.232788 |
6ab43a62d7fe9c3851fb93216f017948e9264012 | 4,665 | py | Python | vagrant/optraj.istic.univ-rennes1.fr/src/system/Assignment.py | gpierre42/optraj | 53beb81c669093b866a786f2c1df9c663bbd7224 | [
"Apache-2.0"
] | null | null | null | vagrant/optraj.istic.univ-rennes1.fr/src/system/Assignment.py | gpierre42/optraj | 53beb81c669093b866a786f2c1df9c663bbd7224 | [
"Apache-2.0"
] | null | null | null | vagrant/optraj.istic.univ-rennes1.fr/src/system/Assignment.py | gpierre42/optraj | 53beb81c669093b866a786f2c1df9c663bbd7224 | [
"Apache-2.0"
] | null | null | null | # coding=utf8
'''
Created on 29 Oct 2013
@author: Nicolas Poirey
'''
from Worker import Worker
from Phase import Phase
class Assignment(object):
'''
Classe Assignment définissant une affectation
attributs publics :
- num : l'id de l'affectation (int)
- worker : l'ouvrier de l'affectation (Worker.Worker)
- phase : la phase de l'affectation (Phase.Phase)
'''
def __init__(self, num=-1, worker=Worker(), phase=Phase()):
'''
Constructeur d'une affectation d'un ouvrier à un chantier
Args:
num : le numero unique identifiant l'affectation. (int)
worker : l'ouvrier concerné. (Worker.Worker)
phase : la phase de l'affectation (Phase.Phase)
'''
self._num = num
self._worker = worker
self._phase = phase
def __str__(self):
'''
Retourne l'affecation sous une forme lisible pour l'humain
Returns:
l'affectation sous forme de string.
Examples:
>>> p.__str__()
>>> "L'affectation, d'id 3, de l'ouvrier (id 5) Doe John, sur la phase d'id 4"
'''
return "L'affectation, d'id {}, de l'ouvrier (id {}) {} {}, sur la phase d'id {}".format(self.num, self.worker.num, self.worker.firstName, self.worker.name, \
self.phase.num) + " " + str(self._phase)
'''
/// @cond
========================= Setters/accesseurs ==============================
'''
#ifndef DOXYGEN_SHOULD_SKIP_THIS
@property
def num(self):
"""
Getter du num
"""
return self._num
@num.setter
def num(self, value):
"""
Setter du num
"""
self._num = value
@property
def worker(self):
"""
Getter du worker
"""
return self._worker
@worker.setter
def worker(self, value):
"""
Setter du worker
"""
self._worker = value
@property
def phase(self):
"""
Getter de la phase
"""
return self._phase
@phase.setter
def phase(self, value):
"""
Setter de la phase
"""
self._phase = value
#endif /* DOXYGEN_SHOULD_SKIP_THIS */
'''
/// @endcond
================ Méthodes publiques ================
'''
def serial(self):
'''
Sérialise une affectation
Returns:
un dict contenant l'affectation serialisé
Example:
>>> {'phase': {'needs': {33: {'num': 33, '__class__': 'Need', 'need': 10, 'craft': {'num': 2, '__class__': 'Craft', 'name': u'Macon'}, 'qualification': {'num': 4, '__class__': 'Qualification', 'name': u'N3P2'}, 'phase': 19}, 34: {'num': 34, '__class__': 'Need', 'need': 20, 'craft': {'num': 2, '__class__': 'Craft', 'name': u'Macon'}, 'qualification': {'num': 5, '__class__': 'Qualification', 'name': u'N3P1'}, 'phase': 19}, 92: {'num': 92, '__class__': 'Need', 'need': 2, 'craft': {'num': 7, '__class__': 'Craft', 'name': u"Agent d'entretien"}, 'qualification': {'num': 6, '__class__': 'Qualification', 'name': u'N2'}, 'phase': 19}, 79: {'num': 79, '__class__': 'Need', 'need': 2, 'craft': {'num': 10, '__class__': 'Craft', 'name': u"Chef d'\xe9quipe"}, 'qualification': {'num': 2, '__class__': 'Qualification', 'name': u'N4P2'}, 'phase': 19}}, 'num': 19, 'numYear': 2014, 'numWeek': 15, 'totalWorkers': 0, 'nbWorkers': 0, '__class__': 'Phase', 'numSite': 4}, 'num': 391, 'worker': {'num': 101, 'licence': u' ', 'name': u'JOUSSEAUME', 'firstName': u'MICKAEL', 'birthdateY': '1972', '__class__': 'Worker', 'birthdateM': '11', 'craft': {'num': 2, '__class__': 'Craft', 'name': u'Macon'}, 'qualification': {'num': 4, '__class__': 'Qualification', 'name': u'N3P2'}, 'position': {'latitude': 47.9292, '__class__': 'Position', 'longitude': -1.94175, 'address': u'6 RUE DE RENNES 35330 LA CHAPELLE BOUEXIC'}, 'birthdateD': '26'}, '__class__': 'Assignment'}
'''
return {"__class__": "Assignment",
"num": self.num,
"worker": self.worker.serial(),
"phase": self.phase.serial()
}
def phaseNumber(self):
'''
Retourne le numéro de la phase associée
Returns:
numPhase (int).
'''
return self._phase.num
def workerNumber(self):
'''
retourne le numéro de l'ouvrier associé
Returns:
numWorker (int).
'''
return self._worker.num
| 34.555556 | 1,463 | 0.520043 | 4,553 | 0.973904 | 0 | 0 | 665 | 0.142246 | 0 | 0 | 3,351 | 0.716791 |
6ab44cf57a381d44d95045fd0ee5b06ba9aa8eed | 2,257 | py | Python | tracker/view/error.py | cmm1107/arch-security-tracker | 7d5f7d69f2b02c056c3888a9b70132c29432a468 | [
"MIT"
] | null | null | null | tracker/view/error.py | cmm1107/arch-security-tracker | 7d5f7d69f2b02c056c3888a9b70132c29432a468 | [
"MIT"
] | null | null | null | tracker/view/error.py | cmm1107/arch-security-tracker | 7d5f7d69f2b02c056c3888a9b70132c29432a468 | [
"MIT"
] | null | null | null | from binascii import hexlify
from functools import wraps
from logging import error
from os import urandom
from random import randint
from flask import make_response
from flask import render_template
from werkzeug.exceptions import BadRequest
from werkzeug.exceptions import Forbidden
from werkzeug.exceptions import Gone
from werkzeug.exceptions import InternalServerError
from werkzeug.exceptions import MethodNotAllowed
from werkzeug.exceptions import NotFound
from config import get_debug_flag
from tracker import tracker
from tracker.symbol import smileys_sad
error_handlers = []
def errorhandler(code_or_exception):
def decorator(func):
error_handlers.append({'func': func, 'code_or_exception': code_or_exception})
@wraps(func)
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
return wrapped
return decorator
def handle_error(e, code, json=False):
if json:
return {'message': e}, code
return make_response(render_template('error.html',
smiley=smileys_sad[randint(0, len(smileys_sad) - 1)],
text=e,
title='{}'.format(code)), code)
@errorhandler(NotFound.code)
def not_found(e='404: Not Found', json=False):
return handle_error(e if 'check your spelling' not in '{}'.format(e) else '404: Not Found', NotFound.code, json)
@errorhandler(Forbidden.code)
def forbidden(e='403: Forbidden', json=False):
return handle_error(e, Forbidden.code, json)
@errorhandler(MethodNotAllowed.code)
def method_not_allowed(e='405: Method Not Allowed', json=False):
return handle_error(e, MethodNotAllowed.code, json)
@errorhandler(Gone.code)
def gone(e='410: Gone', json=False):
return handle_error(e, Gone.code, json)
@errorhandler(BadRequest.code)
def bad_request(e='400: Bad Request', json=False):
return handle_error(e, BadRequest.code, json)
@errorhandler(Exception)
@errorhandler(InternalServerError.code)
def internal_error(e):
if get_debug_flag():
raise e
code = hexlify(urandom(4)).decode()
error(Exception("Code: {}".format(code), e), exc_info=True)
text = '500: Deep Shit\n{}'.format(code)
return handle_error(text, InternalServerError.code)
| 29.311688 | 116 | 0.717767 | 0 | 0 | 0 | 0 | 1,134 | 0.502437 | 0 | 0 | 207 | 0.091715 |